Lines Matching refs:bio
7 #include <linux/bio.h>
21 static inline void bio_get_first_bvec(struct bio *bio, struct bio_vec *bv)
23 *bv = mp_bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter);
26 static inline void bio_get_last_bvec(struct bio *bio, struct bio_vec *bv)
28 struct bvec_iter iter = bio->bi_iter;
31 bio_get_first_bvec(bio, bv);
32 if (bv->bv_len == bio->bi_iter.bi_size)
33 return; /* this bio only has a single bvec */
35 bio_advance_iter(bio, &iter, iter.bi_size);
42 *bv = bio->bi_io_vec[idx];
46 * if this bio ends in the middle of one io vector
53 struct request *prev_rq, struct bio *prev, struct bio *next)
61 * Don't merge if the 1st bio starts with non-zero offset, otherwise it
66 bio_get_first_bvec(prev_rq->bio, &pb);
77 * - if 'pb' ends unaligned, the next bio must include
88 static inline bool req_gap_back_merge(struct request *req, struct bio *bio)
90 return bio_will_gap(req->q, req, req->biotail, bio);
93 static inline bool req_gap_front_merge(struct request *req, struct bio *bio)
95 return bio_will_gap(req->q, NULL, bio, req->bio);
99 * The max size one bio can handle is UINT_MAX becasue bvec_iter.bi_size
108 static struct bio *bio_split_discard(struct bio *bio,
130 if (bio_sectors(bio) <= max_discard_sectors)
139 tmp = bio->bi_iter.bi_sector + split_sectors -
146 return bio_split(bio, split_sectors, GFP_NOIO, bs);
149 static struct bio *bio_split_write_zeroes(struct bio *bio,
156 if (bio_sectors(bio) <= lim->max_write_zeroes_sectors)
158 return bio_split(bio, lim->max_write_zeroes_sectors, GFP_NOIO, bs);
162 * Return the maximum number of sectors from the start of a bio that may be
166 * requests that are submitted to a block device if the start of a bio is not
169 static inline unsigned get_max_io_size(struct bio *bio,
178 blk_chunk_sectors_left(bio->bi_iter.bi_sector,
182 start = bio->bi_iter.bi_sector & (pbs - 1);
215 * @nsegs: [in,out] Number of segments in the bio being built. Incremented
217 * bio without exceeding @max_segs
218 * @bytes: [in,out] Number of bytes in the bio being built. Incremented
220 * bio without exceeding @max_bytes
224 * When splitting a bio, it can happen that a bvec is encountered that is too
227 * %true is returned if and only if appending the entire @bv to a bio with
228 * *@nsegs segments and *@sectors sectors would make that bio unacceptable for
260 * bio_split_rw - split a bio in two bios
261 * @bio: [in] bio to be split
263 * @segs: [out] number of segments in the bio with the first half of the sectors
264 * @bs: [in] bio set to allocate the clone from
265 * @max_bytes: [in] maximum number of bytes per bio
267 * Clone @bio, update the bi_iter of the clone to represent the first sectors
268 * of @bio and update @bio->bi_iter to represent the remaining sectors. The
269 * following is guaranteed for the cloned bio:
273 * Except for discard requests the cloned bio will point at the bi_io_vec of
274 * the original bio. It is the responsibility of the caller to ensure that the
275 * original bio is not freed before the cloned bio. The caller is also
277 * split bio has finished.
279 struct bio *bio_split_rw(struct bio *bio, const struct queue_limits *lim,
286 bio_for_each_bvec(bv, bio, iter) {
313 * We can't sanely support splitting for a REQ_NOWAIT bio. End it
316 if (bio->bi_opf & REQ_NOWAIT) {
317 bio->bi_status = BLK_STS_AGAIN;
318 bio_endio(bio);
326 * split size so that each bio is properly block size aligned, even if
336 bio_clear_polled(bio);
337 return bio_split(bio, bytes >> SECTOR_SHIFT, GFP_NOIO, bs);
342 * __bio_split_to_limits - split a bio to fit the queue limits
343 * @bio: bio to be split
345 * @nr_segs: returns the number of segments in the returned bio
347 * Check if @bio needs splitting based on the queue limits, and if so split off
348 * a bio fitting the limits from the beginning of @bio and return it. @bio is
351 * The split bio is allocated from @q->bio_split, which is provided by the
354 struct bio *__bio_split_to_limits(struct bio *bio,
358 struct bio_set *bs = &bio->bi_bdev->bd_disk->bio_split;
359 struct bio *split;
361 switch (bio_op(bio)) {
364 split = bio_split_discard(bio, lim, nr_segs, bs);
367 split = bio_split_write_zeroes(bio, lim, nr_segs, bs);
370 split = bio_split_rw(bio, lim, nr_segs, bs,
371 get_max_io_size(bio, lim) << SECTOR_SHIFT);
378 /* there isn't chance to merge the split bio */
382 bio_chain(split, bio);
383 trace_block_split(split, bio->bi_iter.bi_sector);
384 submit_bio_noacct(bio);
387 return bio;
391 * bio_split_to_limits - split a bio to fit the queue limits
392 * @bio: bio to be split
394 * Check if @bio needs splitting based on the queue limits of @bio->bi_bdev, and
395 * if so split off a bio fitting the limits from the beginning of @bio and
396 * return it. @bio is shortened to the remainder and re-submitted.
398 * The split bio is allocated from @q->bio_split, which is provided by the
401 struct bio *bio_split_to_limits(struct bio *bio)
403 const struct queue_limits *lim = &bdev_get_queue(bio->bi_bdev)->limits;
406 if (bio_may_exceed_limits(bio, lim))
407 return __bio_split_to_limits(bio, lim, &nr_segs);
408 return bio;
419 if (!rq->bio)
422 switch (bio_op(rq->bio)) {
426 struct bio *bio = rq->bio;
428 for_each_bio(bio)
527 static int __blk_bios_map_sg(struct request_queue *q, struct bio *bio,
536 for_each_bio(bio) {
537 bio_for_each_bvec(bvec, bio, iter) {
540 * have done bio internal merge when adding pages
541 * to bio
554 if (likely(bio->bi_iter.bi_size)) {
574 else if (rq->bio)
575 nsegs = __blk_bios_map_sg(q, rq->bio, sglist, last_sg);
608 static inline int ll_new_hw_segment(struct request *req, struct bio *bio,
611 if (!blk_cgroup_mergeable(req, bio))
614 if (blk_integrity_merge_bio(req->q, req, bio) == false)
636 int ll_back_merge_fn(struct request *req, struct bio *bio, unsigned int nr_segs)
638 if (req_gap_back_merge(req, bio))
641 integrity_req_gap_back_merge(req, bio))
643 if (!bio_crypt_ctx_back_mergeable(req, bio))
645 if (blk_rq_sectors(req) + bio_sectors(bio) >
651 return ll_new_hw_segment(req, bio, nr_segs);
654 static int ll_front_merge_fn(struct request *req, struct bio *bio,
657 if (req_gap_front_merge(req, bio))
660 integrity_req_gap_front_merge(req, bio))
662 if (!bio_crypt_ctx_front_mergeable(req, bio))
664 if (blk_rq_sectors(req) + bio_sectors(bio) >
665 blk_rq_get_max_sectors(req, bio->bi_iter.bi_sector)) {
670 return ll_new_hw_segment(req, bio, nr_segs);
680 if (blk_rq_sectors(req) + bio_sectors(next->bio) >
696 if (req_gap_back_merge(req, next->bio))
710 if (!blk_cgroup_mergeable(req, next->bio))
730 * which can be mixed are set in each bio and mark @rq as mixed
736 struct bio *bio;
744 * Distributes the attributs to each bio.
746 for (bio = rq->bio; bio; bio = bio->bi_next) {
747 WARN_ON_ONCE((bio->bi_opf & REQ_FAILFAST_MASK) &&
748 (bio->bi_opf & REQ_FAILFAST_MASK) != ff);
749 bio->bi_opf |= ff;
754 static inline blk_opf_t bio_failfast(const struct bio *bio)
756 if (bio->bi_opf & REQ_RAHEAD)
759 return bio->bi_opf & REQ_FAILFAST_MASK;
763 * After we are marked as MIXED_MERGE, any new RA bio has to be updated
768 struct bio *bio, bool front_merge)
771 if (bio->bi_opf & REQ_RAHEAD)
772 bio->bi_opf |= REQ_FAILFAST_MASK;
776 req->cmd_flags |= bio->bi_opf & REQ_FAILFAST_MASK;
821 * If we are allowed to merge, then append bio list
862 req->biotail->bi_next = next->bio;
880 * ownership of bio passed from next to req, return 'next' for
883 next->bio = NULL;
920 bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
922 if (!rq_mergeable(rq) || !bio_mergeable(bio))
925 if (req_op(rq) != bio_op(bio))
929 if (bio_data_dir(bio) != rq_data_dir(rq))
933 if (!blk_cgroup_mergeable(rq, bio))
936 /* only merge integrity protected bio into ditto rq */
937 if (blk_integrity_merge_bio(rq->q, rq, bio) == false)
941 if (!bio_crypt_rq_ctx_compatible(rq, bio))
944 if (rq->ioprio != bio_prio(bio))
950 enum elv_merge blk_try_merge(struct request *rq, struct bio *bio)
954 else if (blk_rq_pos(rq) + blk_rq_sectors(rq) == bio->bi_iter.bi_sector)
956 else if (blk_rq_pos(rq) - bio_sectors(bio) == bio->bi_iter.bi_sector)
978 struct bio *bio, unsigned int nr_segs)
980 const blk_opf_t ff = bio_failfast(bio);
982 if (!ll_back_merge_fn(req, bio, nr_segs))
985 trace_block_bio_backmerge(bio);
986 rq_qos_merge(req->q, req, bio);
991 blk_update_mixed_merge(req, bio, false);
993 req->biotail->bi_next = bio;
994 req->biotail = bio;
995 req->__data_len += bio->bi_iter.bi_size;
997 bio_crypt_free_ctx(bio);
1004 struct bio *bio, unsigned int nr_segs)
1006 const blk_opf_t ff = bio_failfast(bio);
1008 if (!ll_front_merge_fn(req, bio, nr_segs))
1011 trace_block_bio_frontmerge(bio);
1012 rq_qos_merge(req->q, req, bio);
1017 blk_update_mixed_merge(req, bio, true);
1019 bio->bi_next = req->bio;
1020 req->bio = bio;
1022 req->__sector = bio->bi_iter.bi_sector;
1023 req->__data_len += bio->bi_iter.bi_size;
1025 bio_crypt_do_front_merge(req, bio);
1032 struct request *req, struct bio *bio)
1038 if (blk_rq_sectors(req) + bio_sectors(bio) >
1042 rq_qos_merge(q, req, bio);
1044 req->biotail->bi_next = bio;
1045 req->biotail = bio;
1046 req->__data_len += bio->bi_iter.bi_size;
1058 struct bio *bio,
1062 if (!blk_rq_merge_ok(rq, bio))
1065 switch (blk_try_merge(rq, bio)) {
1067 if (!sched_allow_merge || blk_mq_sched_allow_merge(q, rq, bio))
1068 return bio_attempt_back_merge(rq, bio, nr_segs);
1071 if (!sched_allow_merge || blk_mq_sched_allow_merge(q, rq, bio))
1072 return bio_attempt_front_merge(rq, bio, nr_segs);
1075 return bio_attempt_discard_merge(q, rq, bio);
1085 * @q: request_queue new bio is being queued at
1086 * @bio: new bio being queued
1087 * @nr_segs: number of segments in @bio
1090 * Determine whether @bio being queued on @q can be merged with the previous
1103 bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
1109 plug = blk_mq_plug(bio);
1115 if (blk_attempt_bio_merge(q, rq, bio, nr_segs, false) ==
1132 * Iterate list of requests and see if we can merge this bio with any
1136 struct bio *bio, unsigned int nr_segs)
1145 switch (blk_attempt_bio_merge(q, rq, bio, nr_segs, true)) {
1160 bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio,
1165 switch (elv_merge(q, &rq, bio)) {
1167 if (!blk_mq_sched_allow_merge(q, rq, bio))
1169 if (bio_attempt_back_merge(rq, bio, nr_segs) != BIO_MERGE_OK)
1176 if (!blk_mq_sched_allow_merge(q, rq, bio))
1178 if (bio_attempt_front_merge(rq, bio, nr_segs) != BIO_MERGE_OK)
1185 return bio_attempt_discard_merge(q, rq, bio) == BIO_MERGE_OK;