Lines Matching defs:bio
11 #include <linux/bio.h>
134 * will unthrottle and is ready to dispatch more bio. It is used as
156 /* Number of bio's dispatched in current slice */
389 static inline unsigned int throtl_bio_data_size(struct bio *bio)
392 if (unlikely(bio_op(bio) == REQ_OP_DISCARD))
394 return bio->bi_iter.bi_size;
405 * throtl_qnode_add_bio - add a bio to a throtl_qnode and activate it
406 * @bio: bio being added
407 * @qn: qnode to add bio to
410 * Add @bio to @qn and put @qn on @queued if it's not already on.
414 static void throtl_qnode_add_bio(struct bio *bio, struct throtl_qnode *qn,
417 bio_list_add(&qn->bios, bio);
425 * throtl_peek_queued - peek the first bio on a qnode list
428 static struct bio *throtl_peek_queued(struct list_head *queued)
431 struct bio *bio;
437 bio = bio_list_peek(&qn->bios);
438 WARN_ON_ONCE(!bio);
439 return bio;
443 * throtl_pop_queued - pop the first bio form a qnode list
444 * @queued: the qnode list to pop a bio from
447 * Pop the first bio from the qnode list @queued. After popping, the first
456 static struct bio *throtl_pop_queued(struct list_head *queued,
460 struct bio *bio;
466 bio = bio_list_pop(&qn->bios);
467 WARN_ON_ONCE(!bio);
479 return bio;
782 * bio dispatch. That means since start of last slice, we never used
853 * A bio has been dispatched. Also adjust slice_end. It might happen
855 * slice_end, but later limit was bumped up and bio was dispatched
896 static bool tg_with_in_iops_limit(struct throtl_grp *tg, struct bio *bio,
899 bool rw = bio_data_dir(bio);
944 static bool tg_with_in_bps_limit(struct throtl_grp *tg, struct bio *bio,
947 bool rw = bio_data_dir(bio);
950 unsigned int bio_size = throtl_bio_data_size(bio);
992 * Returns whether one can dispatch a bio or not. Also returns approx number
993 * of jiffies to wait before this bio is with-in IO rate and can be dispatched
995 static bool tg_may_dispatch(struct throtl_grp *tg, struct bio *bio,
998 bool rw = bio_data_dir(bio);
1004 * Currently whole state machine of group depends on first bio
1005 * queued in the group bio list. So one should not be calling
1006 * this function with a different bio if there are other bios
1010 bio != throtl_peek_queued(&tg->service_queue.queued[rw]));
1023 * If there is queued bio, that means there should be an active
1038 if (tg_with_in_bps_limit(tg, bio, bps_limit, &bps_wait) &&
1039 tg_with_in_iops_limit(tg, bio, iops_limit, &iops_wait)) {
1056 static void throtl_charge_bio(struct throtl_grp *tg, struct bio *bio)
1058 bool rw = bio_data_dir(bio);
1059 unsigned int bio_size = throtl_bio_data_size(bio);
1061 /* Charge the bio to the group */
1068 * BIO_THROTTLED is used to prevent the same bio to be throttled
1069 * more than once as a throttled bio will go through blk-throtl the
1070 * second time when it eventually gets issued. Set it when a bio
1073 if (!bio_flagged(bio, BIO_THROTTLED))
1074 bio_set_flag(bio, BIO_THROTTLED);
1078 * throtl_add_bio_tg - add a bio to the specified throtl_grp
1079 * @bio: bio to add
1083 * Add @bio to @tg's service_queue using @qn. If @qn is not specified,
1086 static void throtl_add_bio_tg(struct bio *bio, struct throtl_qnode *qn,
1090 bool rw = bio_data_dir(bio);
1097 * direction, queueing @bio can change when @tg should be
1104 throtl_qnode_add_bio(bio, qn, &sq->queued[rw]);
1114 struct bio *bio;
1116 bio = throtl_peek_queued(&sq->queued[READ]);
1117 if (bio)
1118 tg_may_dispatch(tg, bio, &read_wait);
1120 bio = throtl_peek_queued(&sq->queued[WRITE]);
1121 if (bio)
1122 tg_may_dispatch(tg, bio, &write_wait);
1152 struct bio *bio;
1155 * @bio is being transferred from @tg to @parent_sq. Popping a bio
1158 * after @bio is transferred to @parent_sq.
1160 bio = throtl_pop_queued(&sq->queued[rw], &tg_to_put);
1163 throtl_charge_bio(tg, bio);
1166 * If our parent is another tg, we just need to transfer @bio to
1168 * @td->service_queue, @bio is ready to be issued. Put it on its
1173 throtl_add_bio_tg(bio, &tg->qnode_on_parent[rw], parent_tg);
1176 throtl_qnode_add_bio(bio, &tg->qnode_on_parent[rw],
1194 struct bio *bio;
1198 while ((bio = throtl_peek_queued(&sq->queued[READ])) &&
1199 tg_may_dispatch(tg, bio, NULL)) {
1201 tg_dispatch_one_bio(tg, bio_data_dir(bio));
1208 while ((bio = throtl_peek_queued(&sq->queued[WRITE])) &&
1209 tg_may_dispatch(tg, bio, NULL)) {
1211 tg_dispatch_one_bio(tg, bio_data_dir(bio));
1260 * This timer is armed when a child throtl_grp with active bio's become
1263 * dispatches bio's from the children throtl_grps to the parent
1269 * kicked so that the ready bio's are issued.
1346 struct bio *bio;
1354 while ((bio = throtl_pop_queued(&td_sq->queued[rw], NULL)))
1355 bio_list_add(&bio_list_on_stack, bio);
1360 while ((bio = bio_list_pop(&bio_list_on_stack)))
1361 submit_bio_noacct(bio);
2221 void blk_throtl_charge_bio_split(struct bio *bio)
2223 struct blkcg_gq *blkg = bio->bi_blkg;
2226 bool rw = bio_data_dir(bio);
2240 bool blk_throtl_bio(struct bio *bio)
2242 struct request_queue *q = bio->bi_disk->queue;
2243 struct blkcg_gq *blkg = bio->bi_blkg;
2247 bool rw = bio_data_dir(bio);
2254 if (bio_flagged(bio, BIO_THROTTLED))
2258 blkg_rwstat_add(&tg->stat_bytes, bio->bi_opf,
2259 bio->bi_iter.bi_size);
2260 blkg_rwstat_add(&tg->stat_ios, bio->bi_opf, 1);
2285 if (!tg_may_dispatch(tg, bio, NULL)) {
2295 throtl_charge_bio(tg, bio);
2299 * otherwise it might happen that a bio is not queued for
2306 * So keep on trimming slice even if bio is not queued.
2311 * @bio passed through this layer without being throttled.
2323 throtl_log(sq, "[%c] bio. bdisp=%llu sz=%u bps=%llu iodisp=%u iops=%u queued=%d/%d",
2325 tg->bytes_disp[rw], bio->bi_iter.bi_size,
2333 throtl_add_bio_tg(bio, qn, tg);
2338 * was empty before @bio. The forced scheduling isn't likely to
2339 * cause undue delay as @bio is likely to be dispatched directly if
2350 bio_set_flag(bio, BIO_THROTTLED);
2354 bio->bi_issue.value |= BIO_ISSUE_THROTL_SKIP_LATENCY;
2389 void blk_throtl_bio_endio(struct bio *bio)
2397 int rw = bio_data_dir(bio);
2399 blkg = bio->bi_blkg;
2409 start_time = bio_issue_time(&bio->bi_issue) >> 10;
2415 /* this is only for bio based driver */
2416 if (!(bio->bi_issue.value & BIO_ISSUE_THROTL_SKIP_LATENCY))
2417 throtl_track_latency(tg->td, bio_issue_size(&bio->bi_issue),
2418 bio_op(bio), lat);
2424 bucket = request_bucket_index(bio_issue_size(&bio->bi_issue));