Lines Matching defs:bio
11 #include <linux/bio.h>
236 static inline unsigned int throtl_bio_data_size(struct bio *bio)
239 if (unlikely(bio_op(bio) == REQ_OP_DISCARD))
241 return bio->bi_iter.bi_size;
252 * throtl_qnode_add_bio - add a bio to a throtl_qnode and activate it
253 * @bio: bio being added
254 * @qn: qnode to add bio to
257 * Add @bio to @qn and put @qn on @queued if it's not already on.
261 static void throtl_qnode_add_bio(struct bio *bio, struct throtl_qnode *qn,
264 bio_list_add(&qn->bios, bio);
272 * throtl_peek_queued - peek the first bio on a qnode list
275 static struct bio *throtl_peek_queued(struct list_head *queued)
278 struct bio *bio;
284 bio = bio_list_peek(&qn->bios);
285 WARN_ON_ONCE(!bio);
286 return bio;
290 * throtl_pop_queued - pop the first bio form a qnode list
291 * @queued: the qnode list to pop a bio from
294 * Pop the first bio from the qnode list @queued. After popping, the first
303 static struct bio *throtl_pop_queued(struct list_head *queued,
307 struct bio *bio;
313 bio = bio_list_pop(&qn->bios);
314 WARN_ON_ONCE(!bio);
326 return bio;
643 * bio dispatch. That means since start of last slice, we never used
753 * A bio has been dispatched. Also adjust slice_end. It might happen
755 * slice_end, but later limit was bumped up and bio was dispatched
831 static unsigned long tg_within_iops_limit(struct throtl_grp *tg, struct bio *bio,
834 bool rw = bio_data_dir(bio);
856 static unsigned long tg_within_bps_limit(struct throtl_grp *tg, struct bio *bio,
859 bool rw = bio_data_dir(bio);
863 unsigned int bio_size = throtl_bio_data_size(bio);
865 /* no need to throttle if this bio's bytes have been accounted */
866 if (bps_limit == U64_MAX || bio_flagged(bio, BIO_BPS_THROTTLED)) {
898 * Returns whether one can dispatch a bio or not. Also returns approx number
899 * of jiffies to wait before this bio is with-in IO rate and can be dispatched
901 static bool tg_may_dispatch(struct throtl_grp *tg, struct bio *bio,
904 bool rw = bio_data_dir(bio);
910 * Currently whole state machine of group depends on first bio
911 * queued in the group bio list. So one should not be calling
912 * this function with a different bio if there are other bios
916 bio != throtl_peek_queued(&tg->service_queue.queued[rw]));
930 * If there is queued bio, that means there should be an active
942 bps_wait = tg_within_bps_limit(tg, bio, bps_limit);
943 iops_wait = tg_within_iops_limit(tg, bio, iops_limit);
961 static void throtl_charge_bio(struct throtl_grp *tg, struct bio *bio)
963 bool rw = bio_data_dir(bio);
964 unsigned int bio_size = throtl_bio_data_size(bio);
966 /* Charge the bio to the group */
967 if (!bio_flagged(bio, BIO_BPS_THROTTLED)) {
977 * throtl_add_bio_tg - add a bio to the specified throtl_grp
978 * @bio: bio to add
982 * Add @bio to @tg's service_queue using @qn. If @qn is not specified,
985 static void throtl_add_bio_tg(struct bio *bio, struct throtl_qnode *qn,
989 bool rw = bio_data_dir(bio);
996 * direction, queueing @bio can change when @tg should be
1003 throtl_qnode_add_bio(bio, qn, &sq->queued[rw]);
1013 struct bio *bio;
1015 bio = throtl_peek_queued(&sq->queued[READ]);
1016 if (bio)
1017 tg_may_dispatch(tg, bio, &read_wait);
1019 bio = throtl_peek_queued(&sq->queued[WRITE]);
1020 if (bio)
1021 tg_may_dispatch(tg, bio, &write_wait);
1051 struct bio *bio;
1054 * @bio is being transferred from @tg to @parent_sq. Popping a bio
1057 * after @bio is transferred to @parent_sq.
1059 bio = throtl_pop_queued(&sq->queued[rw], &tg_to_put);
1062 throtl_charge_bio(tg, bio);
1065 * If our parent is another tg, we just need to transfer @bio to
1067 * @td->service_queue, @bio is ready to be issued. Put it on its
1072 throtl_add_bio_tg(bio, &tg->qnode_on_parent[rw], parent_tg);
1075 bio_set_flag(bio, BIO_BPS_THROTTLED);
1076 throtl_qnode_add_bio(bio, &tg->qnode_on_parent[rw],
1094 struct bio *bio;
1098 while ((bio = throtl_peek_queued(&sq->queued[READ])) &&
1099 tg_may_dispatch(tg, bio, NULL)) {
1101 tg_dispatch_one_bio(tg, bio_data_dir(bio));
1108 while ((bio = throtl_peek_queued(&sq->queued[WRITE])) &&
1109 tg_may_dispatch(tg, bio, NULL)) {
1111 tg_dispatch_one_bio(tg, bio_data_dir(bio));
1160 * This timer is armed when a child throtl_grp with active bio's become
1163 * dispatches bio's from the children throtl_grps to the parent
1169 * kicked so that the ready bio's are issued.
1256 struct bio *bio;
1264 while ((bio = throtl_pop_queued(&td_sq->queued[rw], NULL)))
1265 bio_list_add(&bio_list_on_stack, bio);
1270 while ((bio = bio_list_pop(&bio_list_on_stack)))
1271 submit_bio_noacct_nocheck(bio);
2176 bool __blk_throtl_bio(struct bio *bio)
2178 struct request_queue *q = bdev_get_queue(bio->bi_bdev);
2179 struct blkcg_gq *blkg = bio->bi_blkg;
2183 bool rw = bio_data_dir(bio);
2208 if (!tg_may_dispatch(tg, bio, NULL)) {
2218 throtl_charge_bio(tg, bio);
2222 * otherwise it might happen that a bio is not queued for
2229 * So keep on trimming slice even if bio is not queued.
2234 * @bio passed through this layer without being throttled.
2242 bio_set_flag(bio, BIO_BPS_THROTTLED);
2248 throtl_log(sq, "[%c] bio. bdisp=%llu sz=%u bps=%llu iodisp=%u iops=%u queued=%d/%d",
2250 tg->bytes_disp[rw], bio->bi_iter.bi_size,
2258 throtl_add_bio_tg(bio, qn, tg);
2263 * was empty before @bio. The forced scheduling isn't likely to
2264 * cause undue delay as @bio is likely to be dispatched directly if
2275 bio->bi_issue.value |= BIO_ISSUE_THROTL_SKIP_LATENCY;
2313 void blk_throtl_bio_endio(struct bio *bio)
2321 int rw = bio_data_dir(bio);
2323 blkg = bio->bi_blkg;
2333 start_time = bio_issue_time(&bio->bi_issue) >> 10;
2339 /* this is only for bio based driver */
2340 if (!(bio->bi_issue.value & BIO_ISSUE_THROTL_SKIP_LATENCY))
2341 throtl_track_latency(tg->td, bio_issue_size(&bio->bi_issue),
2342 bio_op(bio), lat);
2348 bucket = request_bucket_index(bio_issue_size(&bio->bi_issue));