162306a36Sopenharmony_ci// SPDX-License-Identifier: GPL-2.0 262306a36Sopenharmony_ci/* 362306a36Sopenharmony_ci * Functions related to segment and merge handling 462306a36Sopenharmony_ci */ 562306a36Sopenharmony_ci#include <linux/kernel.h> 662306a36Sopenharmony_ci#include <linux/module.h> 762306a36Sopenharmony_ci#include <linux/bio.h> 862306a36Sopenharmony_ci#include <linux/blkdev.h> 962306a36Sopenharmony_ci#include <linux/blk-integrity.h> 1062306a36Sopenharmony_ci#include <linux/scatterlist.h> 1162306a36Sopenharmony_ci#include <linux/part_stat.h> 1262306a36Sopenharmony_ci#include <linux/blk-cgroup.h> 1362306a36Sopenharmony_ci 1462306a36Sopenharmony_ci#include <trace/events/block.h> 1562306a36Sopenharmony_ci 1662306a36Sopenharmony_ci#include "blk.h" 1762306a36Sopenharmony_ci#include "blk-mq-sched.h" 1862306a36Sopenharmony_ci#include "blk-rq-qos.h" 1962306a36Sopenharmony_ci#include "blk-throttle.h" 2062306a36Sopenharmony_ci 2162306a36Sopenharmony_cistatic inline void bio_get_first_bvec(struct bio *bio, struct bio_vec *bv) 2262306a36Sopenharmony_ci{ 2362306a36Sopenharmony_ci *bv = mp_bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter); 2462306a36Sopenharmony_ci} 2562306a36Sopenharmony_ci 2662306a36Sopenharmony_cistatic inline void bio_get_last_bvec(struct bio *bio, struct bio_vec *bv) 2762306a36Sopenharmony_ci{ 2862306a36Sopenharmony_ci struct bvec_iter iter = bio->bi_iter; 2962306a36Sopenharmony_ci int idx; 3062306a36Sopenharmony_ci 3162306a36Sopenharmony_ci bio_get_first_bvec(bio, bv); 3262306a36Sopenharmony_ci if (bv->bv_len == bio->bi_iter.bi_size) 3362306a36Sopenharmony_ci return; /* this bio only has a single bvec */ 3462306a36Sopenharmony_ci 3562306a36Sopenharmony_ci bio_advance_iter(bio, &iter, iter.bi_size); 3662306a36Sopenharmony_ci 3762306a36Sopenharmony_ci if (!iter.bi_bvec_done) 3862306a36Sopenharmony_ci idx = iter.bi_idx - 1; 3962306a36Sopenharmony_ci else /* in the middle of bvec */ 4062306a36Sopenharmony_ci idx = iter.bi_idx; 4162306a36Sopenharmony_ci 4262306a36Sopenharmony_ci *bv = bio->bi_io_vec[idx]; 4362306a36Sopenharmony_ci 4462306a36Sopenharmony_ci /* 4562306a36Sopenharmony_ci * iter.bi_bvec_done records actual length of the last bvec 4662306a36Sopenharmony_ci * if this bio ends in the middle of one io vector 4762306a36Sopenharmony_ci */ 4862306a36Sopenharmony_ci if (iter.bi_bvec_done) 4962306a36Sopenharmony_ci bv->bv_len = iter.bi_bvec_done; 5062306a36Sopenharmony_ci} 5162306a36Sopenharmony_ci 5262306a36Sopenharmony_cistatic inline bool bio_will_gap(struct request_queue *q, 5362306a36Sopenharmony_ci struct request *prev_rq, struct bio *prev, struct bio *next) 5462306a36Sopenharmony_ci{ 5562306a36Sopenharmony_ci struct bio_vec pb, nb; 5662306a36Sopenharmony_ci 5762306a36Sopenharmony_ci if (!bio_has_data(prev) || !queue_virt_boundary(q)) 5862306a36Sopenharmony_ci return false; 5962306a36Sopenharmony_ci 6062306a36Sopenharmony_ci /* 6162306a36Sopenharmony_ci * Don't merge if the 1st bio starts with non-zero offset, otherwise it 6262306a36Sopenharmony_ci * is quite difficult to respect the sg gap limit. We work hard to 6362306a36Sopenharmony_ci * merge a huge number of small single bios in case of mkfs. 6462306a36Sopenharmony_ci */ 6562306a36Sopenharmony_ci if (prev_rq) 6662306a36Sopenharmony_ci bio_get_first_bvec(prev_rq->bio, &pb); 6762306a36Sopenharmony_ci else 6862306a36Sopenharmony_ci bio_get_first_bvec(prev, &pb); 6962306a36Sopenharmony_ci if (pb.bv_offset & queue_virt_boundary(q)) 7062306a36Sopenharmony_ci return true; 7162306a36Sopenharmony_ci 7262306a36Sopenharmony_ci /* 7362306a36Sopenharmony_ci * We don't need to worry about the situation that the merged segment 7462306a36Sopenharmony_ci * ends in unaligned virt boundary: 7562306a36Sopenharmony_ci * 7662306a36Sopenharmony_ci * - if 'pb' ends aligned, the merged segment ends aligned 7762306a36Sopenharmony_ci * - if 'pb' ends unaligned, the next bio must include 7862306a36Sopenharmony_ci * one single bvec of 'nb', otherwise the 'nb' can't 7962306a36Sopenharmony_ci * merge with 'pb' 8062306a36Sopenharmony_ci */ 8162306a36Sopenharmony_ci bio_get_last_bvec(prev, &pb); 8262306a36Sopenharmony_ci bio_get_first_bvec(next, &nb); 8362306a36Sopenharmony_ci if (biovec_phys_mergeable(q, &pb, &nb)) 8462306a36Sopenharmony_ci return false; 8562306a36Sopenharmony_ci return __bvec_gap_to_prev(&q->limits, &pb, nb.bv_offset); 8662306a36Sopenharmony_ci} 8762306a36Sopenharmony_ci 8862306a36Sopenharmony_cistatic inline bool req_gap_back_merge(struct request *req, struct bio *bio) 8962306a36Sopenharmony_ci{ 9062306a36Sopenharmony_ci return bio_will_gap(req->q, req, req->biotail, bio); 9162306a36Sopenharmony_ci} 9262306a36Sopenharmony_ci 9362306a36Sopenharmony_cistatic inline bool req_gap_front_merge(struct request *req, struct bio *bio) 9462306a36Sopenharmony_ci{ 9562306a36Sopenharmony_ci return bio_will_gap(req->q, NULL, bio, req->bio); 9662306a36Sopenharmony_ci} 9762306a36Sopenharmony_ci 9862306a36Sopenharmony_ci/* 9962306a36Sopenharmony_ci * The max size one bio can handle is UINT_MAX becasue bvec_iter.bi_size 10062306a36Sopenharmony_ci * is defined as 'unsigned int', meantime it has to be aligned to with the 10162306a36Sopenharmony_ci * logical block size, which is the minimum accepted unit by hardware. 10262306a36Sopenharmony_ci */ 10362306a36Sopenharmony_cistatic unsigned int bio_allowed_max_sectors(const struct queue_limits *lim) 10462306a36Sopenharmony_ci{ 10562306a36Sopenharmony_ci return round_down(UINT_MAX, lim->logical_block_size) >> SECTOR_SHIFT; 10662306a36Sopenharmony_ci} 10762306a36Sopenharmony_ci 10862306a36Sopenharmony_cistatic struct bio *bio_split_discard(struct bio *bio, 10962306a36Sopenharmony_ci const struct queue_limits *lim, 11062306a36Sopenharmony_ci unsigned *nsegs, struct bio_set *bs) 11162306a36Sopenharmony_ci{ 11262306a36Sopenharmony_ci unsigned int max_discard_sectors, granularity; 11362306a36Sopenharmony_ci sector_t tmp; 11462306a36Sopenharmony_ci unsigned split_sectors; 11562306a36Sopenharmony_ci 11662306a36Sopenharmony_ci *nsegs = 1; 11762306a36Sopenharmony_ci 11862306a36Sopenharmony_ci /* Zero-sector (unknown) and one-sector granularities are the same. */ 11962306a36Sopenharmony_ci granularity = max(lim->discard_granularity >> 9, 1U); 12062306a36Sopenharmony_ci 12162306a36Sopenharmony_ci max_discard_sectors = 12262306a36Sopenharmony_ci min(lim->max_discard_sectors, bio_allowed_max_sectors(lim)); 12362306a36Sopenharmony_ci max_discard_sectors -= max_discard_sectors % granularity; 12462306a36Sopenharmony_ci 12562306a36Sopenharmony_ci if (unlikely(!max_discard_sectors)) { 12662306a36Sopenharmony_ci /* XXX: warn */ 12762306a36Sopenharmony_ci return NULL; 12862306a36Sopenharmony_ci } 12962306a36Sopenharmony_ci 13062306a36Sopenharmony_ci if (bio_sectors(bio) <= max_discard_sectors) 13162306a36Sopenharmony_ci return NULL; 13262306a36Sopenharmony_ci 13362306a36Sopenharmony_ci split_sectors = max_discard_sectors; 13462306a36Sopenharmony_ci 13562306a36Sopenharmony_ci /* 13662306a36Sopenharmony_ci * If the next starting sector would be misaligned, stop the discard at 13762306a36Sopenharmony_ci * the previous aligned sector. 13862306a36Sopenharmony_ci */ 13962306a36Sopenharmony_ci tmp = bio->bi_iter.bi_sector + split_sectors - 14062306a36Sopenharmony_ci ((lim->discard_alignment >> 9) % granularity); 14162306a36Sopenharmony_ci tmp = sector_div(tmp, granularity); 14262306a36Sopenharmony_ci 14362306a36Sopenharmony_ci if (split_sectors > tmp) 14462306a36Sopenharmony_ci split_sectors -= tmp; 14562306a36Sopenharmony_ci 14662306a36Sopenharmony_ci return bio_split(bio, split_sectors, GFP_NOIO, bs); 14762306a36Sopenharmony_ci} 14862306a36Sopenharmony_ci 14962306a36Sopenharmony_cistatic struct bio *bio_split_write_zeroes(struct bio *bio, 15062306a36Sopenharmony_ci const struct queue_limits *lim, 15162306a36Sopenharmony_ci unsigned *nsegs, struct bio_set *bs) 15262306a36Sopenharmony_ci{ 15362306a36Sopenharmony_ci *nsegs = 0; 15462306a36Sopenharmony_ci if (!lim->max_write_zeroes_sectors) 15562306a36Sopenharmony_ci return NULL; 15662306a36Sopenharmony_ci if (bio_sectors(bio) <= lim->max_write_zeroes_sectors) 15762306a36Sopenharmony_ci return NULL; 15862306a36Sopenharmony_ci return bio_split(bio, lim->max_write_zeroes_sectors, GFP_NOIO, bs); 15962306a36Sopenharmony_ci} 16062306a36Sopenharmony_ci 16162306a36Sopenharmony_ci/* 16262306a36Sopenharmony_ci * Return the maximum number of sectors from the start of a bio that may be 16362306a36Sopenharmony_ci * submitted as a single request to a block device. If enough sectors remain, 16462306a36Sopenharmony_ci * align the end to the physical block size. Otherwise align the end to the 16562306a36Sopenharmony_ci * logical block size. This approach minimizes the number of non-aligned 16662306a36Sopenharmony_ci * requests that are submitted to a block device if the start of a bio is not 16762306a36Sopenharmony_ci * aligned to a physical block boundary. 16862306a36Sopenharmony_ci */ 16962306a36Sopenharmony_cistatic inline unsigned get_max_io_size(struct bio *bio, 17062306a36Sopenharmony_ci const struct queue_limits *lim) 17162306a36Sopenharmony_ci{ 17262306a36Sopenharmony_ci unsigned pbs = lim->physical_block_size >> SECTOR_SHIFT; 17362306a36Sopenharmony_ci unsigned lbs = lim->logical_block_size >> SECTOR_SHIFT; 17462306a36Sopenharmony_ci unsigned max_sectors = lim->max_sectors, start, end; 17562306a36Sopenharmony_ci 17662306a36Sopenharmony_ci if (lim->chunk_sectors) { 17762306a36Sopenharmony_ci max_sectors = min(max_sectors, 17862306a36Sopenharmony_ci blk_chunk_sectors_left(bio->bi_iter.bi_sector, 17962306a36Sopenharmony_ci lim->chunk_sectors)); 18062306a36Sopenharmony_ci } 18162306a36Sopenharmony_ci 18262306a36Sopenharmony_ci start = bio->bi_iter.bi_sector & (pbs - 1); 18362306a36Sopenharmony_ci end = (start + max_sectors) & ~(pbs - 1); 18462306a36Sopenharmony_ci if (end > start) 18562306a36Sopenharmony_ci return end - start; 18662306a36Sopenharmony_ci return max_sectors & ~(lbs - 1); 18762306a36Sopenharmony_ci} 18862306a36Sopenharmony_ci 18962306a36Sopenharmony_ci/** 19062306a36Sopenharmony_ci * get_max_segment_size() - maximum number of bytes to add as a single segment 19162306a36Sopenharmony_ci * @lim: Request queue limits. 19262306a36Sopenharmony_ci * @start_page: See below. 19362306a36Sopenharmony_ci * @offset: Offset from @start_page where to add a segment. 19462306a36Sopenharmony_ci * 19562306a36Sopenharmony_ci * Returns the maximum number of bytes that can be added as a single segment. 19662306a36Sopenharmony_ci */ 19762306a36Sopenharmony_cistatic inline unsigned get_max_segment_size(const struct queue_limits *lim, 19862306a36Sopenharmony_ci struct page *start_page, unsigned long offset) 19962306a36Sopenharmony_ci{ 20062306a36Sopenharmony_ci unsigned long mask = lim->seg_boundary_mask; 20162306a36Sopenharmony_ci 20262306a36Sopenharmony_ci offset = mask & (page_to_phys(start_page) + offset); 20362306a36Sopenharmony_ci 20462306a36Sopenharmony_ci /* 20562306a36Sopenharmony_ci * Prevent an overflow if mask = ULONG_MAX and offset = 0 by adding 1 20662306a36Sopenharmony_ci * after having calculated the minimum. 20762306a36Sopenharmony_ci */ 20862306a36Sopenharmony_ci return min(mask - offset, (unsigned long)lim->max_segment_size - 1) + 1; 20962306a36Sopenharmony_ci} 21062306a36Sopenharmony_ci 21162306a36Sopenharmony_ci/** 21262306a36Sopenharmony_ci * bvec_split_segs - verify whether or not a bvec should be split in the middle 21362306a36Sopenharmony_ci * @lim: [in] queue limits to split based on 21462306a36Sopenharmony_ci * @bv: [in] bvec to examine 21562306a36Sopenharmony_ci * @nsegs: [in,out] Number of segments in the bio being built. Incremented 21662306a36Sopenharmony_ci * by the number of segments from @bv that may be appended to that 21762306a36Sopenharmony_ci * bio without exceeding @max_segs 21862306a36Sopenharmony_ci * @bytes: [in,out] Number of bytes in the bio being built. Incremented 21962306a36Sopenharmony_ci * by the number of bytes from @bv that may be appended to that 22062306a36Sopenharmony_ci * bio without exceeding @max_bytes 22162306a36Sopenharmony_ci * @max_segs: [in] upper bound for *@nsegs 22262306a36Sopenharmony_ci * @max_bytes: [in] upper bound for *@bytes 22362306a36Sopenharmony_ci * 22462306a36Sopenharmony_ci * When splitting a bio, it can happen that a bvec is encountered that is too 22562306a36Sopenharmony_ci * big to fit in a single segment and hence that it has to be split in the 22662306a36Sopenharmony_ci * middle. This function verifies whether or not that should happen. The value 22762306a36Sopenharmony_ci * %true is returned if and only if appending the entire @bv to a bio with 22862306a36Sopenharmony_ci * *@nsegs segments and *@sectors sectors would make that bio unacceptable for 22962306a36Sopenharmony_ci * the block driver. 23062306a36Sopenharmony_ci */ 23162306a36Sopenharmony_cistatic bool bvec_split_segs(const struct queue_limits *lim, 23262306a36Sopenharmony_ci const struct bio_vec *bv, unsigned *nsegs, unsigned *bytes, 23362306a36Sopenharmony_ci unsigned max_segs, unsigned max_bytes) 23462306a36Sopenharmony_ci{ 23562306a36Sopenharmony_ci unsigned max_len = min(max_bytes, UINT_MAX) - *bytes; 23662306a36Sopenharmony_ci unsigned len = min(bv->bv_len, max_len); 23762306a36Sopenharmony_ci unsigned total_len = 0; 23862306a36Sopenharmony_ci unsigned seg_size = 0; 23962306a36Sopenharmony_ci 24062306a36Sopenharmony_ci while (len && *nsegs < max_segs) { 24162306a36Sopenharmony_ci seg_size = get_max_segment_size(lim, bv->bv_page, 24262306a36Sopenharmony_ci bv->bv_offset + total_len); 24362306a36Sopenharmony_ci seg_size = min(seg_size, len); 24462306a36Sopenharmony_ci 24562306a36Sopenharmony_ci (*nsegs)++; 24662306a36Sopenharmony_ci total_len += seg_size; 24762306a36Sopenharmony_ci len -= seg_size; 24862306a36Sopenharmony_ci 24962306a36Sopenharmony_ci if ((bv->bv_offset + total_len) & lim->virt_boundary_mask) 25062306a36Sopenharmony_ci break; 25162306a36Sopenharmony_ci } 25262306a36Sopenharmony_ci 25362306a36Sopenharmony_ci *bytes += total_len; 25462306a36Sopenharmony_ci 25562306a36Sopenharmony_ci /* tell the caller to split the bvec if it is too big to fit */ 25662306a36Sopenharmony_ci return len > 0 || bv->bv_len > max_len; 25762306a36Sopenharmony_ci} 25862306a36Sopenharmony_ci 25962306a36Sopenharmony_ci/** 26062306a36Sopenharmony_ci * bio_split_rw - split a bio in two bios 26162306a36Sopenharmony_ci * @bio: [in] bio to be split 26262306a36Sopenharmony_ci * @lim: [in] queue limits to split based on 26362306a36Sopenharmony_ci * @segs: [out] number of segments in the bio with the first half of the sectors 26462306a36Sopenharmony_ci * @bs: [in] bio set to allocate the clone from 26562306a36Sopenharmony_ci * @max_bytes: [in] maximum number of bytes per bio 26662306a36Sopenharmony_ci * 26762306a36Sopenharmony_ci * Clone @bio, update the bi_iter of the clone to represent the first sectors 26862306a36Sopenharmony_ci * of @bio and update @bio->bi_iter to represent the remaining sectors. The 26962306a36Sopenharmony_ci * following is guaranteed for the cloned bio: 27062306a36Sopenharmony_ci * - That it has at most @max_bytes worth of data 27162306a36Sopenharmony_ci * - That it has at most queue_max_segments(@q) segments. 27262306a36Sopenharmony_ci * 27362306a36Sopenharmony_ci * Except for discard requests the cloned bio will point at the bi_io_vec of 27462306a36Sopenharmony_ci * the original bio. It is the responsibility of the caller to ensure that the 27562306a36Sopenharmony_ci * original bio is not freed before the cloned bio. The caller is also 27662306a36Sopenharmony_ci * responsible for ensuring that @bs is only destroyed after processing of the 27762306a36Sopenharmony_ci * split bio has finished. 27862306a36Sopenharmony_ci */ 27962306a36Sopenharmony_cistruct bio *bio_split_rw(struct bio *bio, const struct queue_limits *lim, 28062306a36Sopenharmony_ci unsigned *segs, struct bio_set *bs, unsigned max_bytes) 28162306a36Sopenharmony_ci{ 28262306a36Sopenharmony_ci struct bio_vec bv, bvprv, *bvprvp = NULL; 28362306a36Sopenharmony_ci struct bvec_iter iter; 28462306a36Sopenharmony_ci unsigned nsegs = 0, bytes = 0; 28562306a36Sopenharmony_ci 28662306a36Sopenharmony_ci bio_for_each_bvec(bv, bio, iter) { 28762306a36Sopenharmony_ci /* 28862306a36Sopenharmony_ci * If the queue doesn't support SG gaps and adding this 28962306a36Sopenharmony_ci * offset would create a gap, disallow it. 29062306a36Sopenharmony_ci */ 29162306a36Sopenharmony_ci if (bvprvp && bvec_gap_to_prev(lim, bvprvp, bv.bv_offset)) 29262306a36Sopenharmony_ci goto split; 29362306a36Sopenharmony_ci 29462306a36Sopenharmony_ci if (nsegs < lim->max_segments && 29562306a36Sopenharmony_ci bytes + bv.bv_len <= max_bytes && 29662306a36Sopenharmony_ci bv.bv_offset + bv.bv_len <= PAGE_SIZE) { 29762306a36Sopenharmony_ci nsegs++; 29862306a36Sopenharmony_ci bytes += bv.bv_len; 29962306a36Sopenharmony_ci } else { 30062306a36Sopenharmony_ci if (bvec_split_segs(lim, &bv, &nsegs, &bytes, 30162306a36Sopenharmony_ci lim->max_segments, max_bytes)) 30262306a36Sopenharmony_ci goto split; 30362306a36Sopenharmony_ci } 30462306a36Sopenharmony_ci 30562306a36Sopenharmony_ci bvprv = bv; 30662306a36Sopenharmony_ci bvprvp = &bvprv; 30762306a36Sopenharmony_ci } 30862306a36Sopenharmony_ci 30962306a36Sopenharmony_ci *segs = nsegs; 31062306a36Sopenharmony_ci return NULL; 31162306a36Sopenharmony_cisplit: 31262306a36Sopenharmony_ci /* 31362306a36Sopenharmony_ci * We can't sanely support splitting for a REQ_NOWAIT bio. End it 31462306a36Sopenharmony_ci * with EAGAIN if splitting is required and return an error pointer. 31562306a36Sopenharmony_ci */ 31662306a36Sopenharmony_ci if (bio->bi_opf & REQ_NOWAIT) { 31762306a36Sopenharmony_ci bio->bi_status = BLK_STS_AGAIN; 31862306a36Sopenharmony_ci bio_endio(bio); 31962306a36Sopenharmony_ci return ERR_PTR(-EAGAIN); 32062306a36Sopenharmony_ci } 32162306a36Sopenharmony_ci 32262306a36Sopenharmony_ci *segs = nsegs; 32362306a36Sopenharmony_ci 32462306a36Sopenharmony_ci /* 32562306a36Sopenharmony_ci * Individual bvecs might not be logical block aligned. Round down the 32662306a36Sopenharmony_ci * split size so that each bio is properly block size aligned, even if 32762306a36Sopenharmony_ci * we do not use the full hardware limits. 32862306a36Sopenharmony_ci */ 32962306a36Sopenharmony_ci bytes = ALIGN_DOWN(bytes, lim->logical_block_size); 33062306a36Sopenharmony_ci 33162306a36Sopenharmony_ci /* 33262306a36Sopenharmony_ci * Bio splitting may cause subtle trouble such as hang when doing sync 33362306a36Sopenharmony_ci * iopoll in direct IO routine. Given performance gain of iopoll for 33462306a36Sopenharmony_ci * big IO can be trival, disable iopoll when split needed. 33562306a36Sopenharmony_ci */ 33662306a36Sopenharmony_ci bio_clear_polled(bio); 33762306a36Sopenharmony_ci return bio_split(bio, bytes >> SECTOR_SHIFT, GFP_NOIO, bs); 33862306a36Sopenharmony_ci} 33962306a36Sopenharmony_ciEXPORT_SYMBOL_GPL(bio_split_rw); 34062306a36Sopenharmony_ci 34162306a36Sopenharmony_ci/** 34262306a36Sopenharmony_ci * __bio_split_to_limits - split a bio to fit the queue limits 34362306a36Sopenharmony_ci * @bio: bio to be split 34462306a36Sopenharmony_ci * @lim: queue limits to split based on 34562306a36Sopenharmony_ci * @nr_segs: returns the number of segments in the returned bio 34662306a36Sopenharmony_ci * 34762306a36Sopenharmony_ci * Check if @bio needs splitting based on the queue limits, and if so split off 34862306a36Sopenharmony_ci * a bio fitting the limits from the beginning of @bio and return it. @bio is 34962306a36Sopenharmony_ci * shortened to the remainder and re-submitted. 35062306a36Sopenharmony_ci * 35162306a36Sopenharmony_ci * The split bio is allocated from @q->bio_split, which is provided by the 35262306a36Sopenharmony_ci * block layer. 35362306a36Sopenharmony_ci */ 35462306a36Sopenharmony_cistruct bio *__bio_split_to_limits(struct bio *bio, 35562306a36Sopenharmony_ci const struct queue_limits *lim, 35662306a36Sopenharmony_ci unsigned int *nr_segs) 35762306a36Sopenharmony_ci{ 35862306a36Sopenharmony_ci struct bio_set *bs = &bio->bi_bdev->bd_disk->bio_split; 35962306a36Sopenharmony_ci struct bio *split; 36062306a36Sopenharmony_ci 36162306a36Sopenharmony_ci switch (bio_op(bio)) { 36262306a36Sopenharmony_ci case REQ_OP_DISCARD: 36362306a36Sopenharmony_ci case REQ_OP_SECURE_ERASE: 36462306a36Sopenharmony_ci split = bio_split_discard(bio, lim, nr_segs, bs); 36562306a36Sopenharmony_ci break; 36662306a36Sopenharmony_ci case REQ_OP_WRITE_ZEROES: 36762306a36Sopenharmony_ci split = bio_split_write_zeroes(bio, lim, nr_segs, bs); 36862306a36Sopenharmony_ci break; 36962306a36Sopenharmony_ci default: 37062306a36Sopenharmony_ci split = bio_split_rw(bio, lim, nr_segs, bs, 37162306a36Sopenharmony_ci get_max_io_size(bio, lim) << SECTOR_SHIFT); 37262306a36Sopenharmony_ci if (IS_ERR(split)) 37362306a36Sopenharmony_ci return NULL; 37462306a36Sopenharmony_ci break; 37562306a36Sopenharmony_ci } 37662306a36Sopenharmony_ci 37762306a36Sopenharmony_ci if (split) { 37862306a36Sopenharmony_ci /* there isn't chance to merge the split bio */ 37962306a36Sopenharmony_ci split->bi_opf |= REQ_NOMERGE; 38062306a36Sopenharmony_ci 38162306a36Sopenharmony_ci blkcg_bio_issue_init(split); 38262306a36Sopenharmony_ci bio_chain(split, bio); 38362306a36Sopenharmony_ci trace_block_split(split, bio->bi_iter.bi_sector); 38462306a36Sopenharmony_ci submit_bio_noacct(bio); 38562306a36Sopenharmony_ci return split; 38662306a36Sopenharmony_ci } 38762306a36Sopenharmony_ci return bio; 38862306a36Sopenharmony_ci} 38962306a36Sopenharmony_ci 39062306a36Sopenharmony_ci/** 39162306a36Sopenharmony_ci * bio_split_to_limits - split a bio to fit the queue limits 39262306a36Sopenharmony_ci * @bio: bio to be split 39362306a36Sopenharmony_ci * 39462306a36Sopenharmony_ci * Check if @bio needs splitting based on the queue limits of @bio->bi_bdev, and 39562306a36Sopenharmony_ci * if so split off a bio fitting the limits from the beginning of @bio and 39662306a36Sopenharmony_ci * return it. @bio is shortened to the remainder and re-submitted. 39762306a36Sopenharmony_ci * 39862306a36Sopenharmony_ci * The split bio is allocated from @q->bio_split, which is provided by the 39962306a36Sopenharmony_ci * block layer. 40062306a36Sopenharmony_ci */ 40162306a36Sopenharmony_cistruct bio *bio_split_to_limits(struct bio *bio) 40262306a36Sopenharmony_ci{ 40362306a36Sopenharmony_ci const struct queue_limits *lim = &bdev_get_queue(bio->bi_bdev)->limits; 40462306a36Sopenharmony_ci unsigned int nr_segs; 40562306a36Sopenharmony_ci 40662306a36Sopenharmony_ci if (bio_may_exceed_limits(bio, lim)) 40762306a36Sopenharmony_ci return __bio_split_to_limits(bio, lim, &nr_segs); 40862306a36Sopenharmony_ci return bio; 40962306a36Sopenharmony_ci} 41062306a36Sopenharmony_ciEXPORT_SYMBOL(bio_split_to_limits); 41162306a36Sopenharmony_ci 41262306a36Sopenharmony_ciunsigned int blk_recalc_rq_segments(struct request *rq) 41362306a36Sopenharmony_ci{ 41462306a36Sopenharmony_ci unsigned int nr_phys_segs = 0; 41562306a36Sopenharmony_ci unsigned int bytes = 0; 41662306a36Sopenharmony_ci struct req_iterator iter; 41762306a36Sopenharmony_ci struct bio_vec bv; 41862306a36Sopenharmony_ci 41962306a36Sopenharmony_ci if (!rq->bio) 42062306a36Sopenharmony_ci return 0; 42162306a36Sopenharmony_ci 42262306a36Sopenharmony_ci switch (bio_op(rq->bio)) { 42362306a36Sopenharmony_ci case REQ_OP_DISCARD: 42462306a36Sopenharmony_ci case REQ_OP_SECURE_ERASE: 42562306a36Sopenharmony_ci if (queue_max_discard_segments(rq->q) > 1) { 42662306a36Sopenharmony_ci struct bio *bio = rq->bio; 42762306a36Sopenharmony_ci 42862306a36Sopenharmony_ci for_each_bio(bio) 42962306a36Sopenharmony_ci nr_phys_segs++; 43062306a36Sopenharmony_ci return nr_phys_segs; 43162306a36Sopenharmony_ci } 43262306a36Sopenharmony_ci return 1; 43362306a36Sopenharmony_ci case REQ_OP_WRITE_ZEROES: 43462306a36Sopenharmony_ci return 0; 43562306a36Sopenharmony_ci default: 43662306a36Sopenharmony_ci break; 43762306a36Sopenharmony_ci } 43862306a36Sopenharmony_ci 43962306a36Sopenharmony_ci rq_for_each_bvec(bv, rq, iter) 44062306a36Sopenharmony_ci bvec_split_segs(&rq->q->limits, &bv, &nr_phys_segs, &bytes, 44162306a36Sopenharmony_ci UINT_MAX, UINT_MAX); 44262306a36Sopenharmony_ci return nr_phys_segs; 44362306a36Sopenharmony_ci} 44462306a36Sopenharmony_ci 44562306a36Sopenharmony_cistatic inline struct scatterlist *blk_next_sg(struct scatterlist **sg, 44662306a36Sopenharmony_ci struct scatterlist *sglist) 44762306a36Sopenharmony_ci{ 44862306a36Sopenharmony_ci if (!*sg) 44962306a36Sopenharmony_ci return sglist; 45062306a36Sopenharmony_ci 45162306a36Sopenharmony_ci /* 45262306a36Sopenharmony_ci * If the driver previously mapped a shorter list, we could see a 45362306a36Sopenharmony_ci * termination bit prematurely unless it fully inits the sg table 45462306a36Sopenharmony_ci * on each mapping. We KNOW that there must be more entries here 45562306a36Sopenharmony_ci * or the driver would be buggy, so force clear the termination bit 45662306a36Sopenharmony_ci * to avoid doing a full sg_init_table() in drivers for each command. 45762306a36Sopenharmony_ci */ 45862306a36Sopenharmony_ci sg_unmark_end(*sg); 45962306a36Sopenharmony_ci return sg_next(*sg); 46062306a36Sopenharmony_ci} 46162306a36Sopenharmony_ci 46262306a36Sopenharmony_cistatic unsigned blk_bvec_map_sg(struct request_queue *q, 46362306a36Sopenharmony_ci struct bio_vec *bvec, struct scatterlist *sglist, 46462306a36Sopenharmony_ci struct scatterlist **sg) 46562306a36Sopenharmony_ci{ 46662306a36Sopenharmony_ci unsigned nbytes = bvec->bv_len; 46762306a36Sopenharmony_ci unsigned nsegs = 0, total = 0; 46862306a36Sopenharmony_ci 46962306a36Sopenharmony_ci while (nbytes > 0) { 47062306a36Sopenharmony_ci unsigned offset = bvec->bv_offset + total; 47162306a36Sopenharmony_ci unsigned len = min(get_max_segment_size(&q->limits, 47262306a36Sopenharmony_ci bvec->bv_page, offset), nbytes); 47362306a36Sopenharmony_ci struct page *page = bvec->bv_page; 47462306a36Sopenharmony_ci 47562306a36Sopenharmony_ci /* 47662306a36Sopenharmony_ci * Unfortunately a fair number of drivers barf on scatterlists 47762306a36Sopenharmony_ci * that have an offset larger than PAGE_SIZE, despite other 47862306a36Sopenharmony_ci * subsystems dealing with that invariant just fine. For now 47962306a36Sopenharmony_ci * stick to the legacy format where we never present those from 48062306a36Sopenharmony_ci * the block layer, but the code below should be removed once 48162306a36Sopenharmony_ci * these offenders (mostly MMC/SD drivers) are fixed. 48262306a36Sopenharmony_ci */ 48362306a36Sopenharmony_ci page += (offset >> PAGE_SHIFT); 48462306a36Sopenharmony_ci offset &= ~PAGE_MASK; 48562306a36Sopenharmony_ci 48662306a36Sopenharmony_ci *sg = blk_next_sg(sg, sglist); 48762306a36Sopenharmony_ci sg_set_page(*sg, page, len, offset); 48862306a36Sopenharmony_ci 48962306a36Sopenharmony_ci total += len; 49062306a36Sopenharmony_ci nbytes -= len; 49162306a36Sopenharmony_ci nsegs++; 49262306a36Sopenharmony_ci } 49362306a36Sopenharmony_ci 49462306a36Sopenharmony_ci return nsegs; 49562306a36Sopenharmony_ci} 49662306a36Sopenharmony_ci 49762306a36Sopenharmony_cistatic inline int __blk_bvec_map_sg(struct bio_vec bv, 49862306a36Sopenharmony_ci struct scatterlist *sglist, struct scatterlist **sg) 49962306a36Sopenharmony_ci{ 50062306a36Sopenharmony_ci *sg = blk_next_sg(sg, sglist); 50162306a36Sopenharmony_ci sg_set_page(*sg, bv.bv_page, bv.bv_len, bv.bv_offset); 50262306a36Sopenharmony_ci return 1; 50362306a36Sopenharmony_ci} 50462306a36Sopenharmony_ci 50562306a36Sopenharmony_ci/* only try to merge bvecs into one sg if they are from two bios */ 50662306a36Sopenharmony_cistatic inline bool 50762306a36Sopenharmony_ci__blk_segment_map_sg_merge(struct request_queue *q, struct bio_vec *bvec, 50862306a36Sopenharmony_ci struct bio_vec *bvprv, struct scatterlist **sg) 50962306a36Sopenharmony_ci{ 51062306a36Sopenharmony_ci 51162306a36Sopenharmony_ci int nbytes = bvec->bv_len; 51262306a36Sopenharmony_ci 51362306a36Sopenharmony_ci if (!*sg) 51462306a36Sopenharmony_ci return false; 51562306a36Sopenharmony_ci 51662306a36Sopenharmony_ci if ((*sg)->length + nbytes > queue_max_segment_size(q)) 51762306a36Sopenharmony_ci return false; 51862306a36Sopenharmony_ci 51962306a36Sopenharmony_ci if (!biovec_phys_mergeable(q, bvprv, bvec)) 52062306a36Sopenharmony_ci return false; 52162306a36Sopenharmony_ci 52262306a36Sopenharmony_ci (*sg)->length += nbytes; 52362306a36Sopenharmony_ci 52462306a36Sopenharmony_ci return true; 52562306a36Sopenharmony_ci} 52662306a36Sopenharmony_ci 52762306a36Sopenharmony_cistatic int __blk_bios_map_sg(struct request_queue *q, struct bio *bio, 52862306a36Sopenharmony_ci struct scatterlist *sglist, 52962306a36Sopenharmony_ci struct scatterlist **sg) 53062306a36Sopenharmony_ci{ 53162306a36Sopenharmony_ci struct bio_vec bvec, bvprv = { NULL }; 53262306a36Sopenharmony_ci struct bvec_iter iter; 53362306a36Sopenharmony_ci int nsegs = 0; 53462306a36Sopenharmony_ci bool new_bio = false; 53562306a36Sopenharmony_ci 53662306a36Sopenharmony_ci for_each_bio(bio) { 53762306a36Sopenharmony_ci bio_for_each_bvec(bvec, bio, iter) { 53862306a36Sopenharmony_ci /* 53962306a36Sopenharmony_ci * Only try to merge bvecs from two bios given we 54062306a36Sopenharmony_ci * have done bio internal merge when adding pages 54162306a36Sopenharmony_ci * to bio 54262306a36Sopenharmony_ci */ 54362306a36Sopenharmony_ci if (new_bio && 54462306a36Sopenharmony_ci __blk_segment_map_sg_merge(q, &bvec, &bvprv, sg)) 54562306a36Sopenharmony_ci goto next_bvec; 54662306a36Sopenharmony_ci 54762306a36Sopenharmony_ci if (bvec.bv_offset + bvec.bv_len <= PAGE_SIZE) 54862306a36Sopenharmony_ci nsegs += __blk_bvec_map_sg(bvec, sglist, sg); 54962306a36Sopenharmony_ci else 55062306a36Sopenharmony_ci nsegs += blk_bvec_map_sg(q, &bvec, sglist, sg); 55162306a36Sopenharmony_ci next_bvec: 55262306a36Sopenharmony_ci new_bio = false; 55362306a36Sopenharmony_ci } 55462306a36Sopenharmony_ci if (likely(bio->bi_iter.bi_size)) { 55562306a36Sopenharmony_ci bvprv = bvec; 55662306a36Sopenharmony_ci new_bio = true; 55762306a36Sopenharmony_ci } 55862306a36Sopenharmony_ci } 55962306a36Sopenharmony_ci 56062306a36Sopenharmony_ci return nsegs; 56162306a36Sopenharmony_ci} 56262306a36Sopenharmony_ci 56362306a36Sopenharmony_ci/* 56462306a36Sopenharmony_ci * map a request to scatterlist, return number of sg entries setup. Caller 56562306a36Sopenharmony_ci * must make sure sg can hold rq->nr_phys_segments entries 56662306a36Sopenharmony_ci */ 56762306a36Sopenharmony_ciint __blk_rq_map_sg(struct request_queue *q, struct request *rq, 56862306a36Sopenharmony_ci struct scatterlist *sglist, struct scatterlist **last_sg) 56962306a36Sopenharmony_ci{ 57062306a36Sopenharmony_ci int nsegs = 0; 57162306a36Sopenharmony_ci 57262306a36Sopenharmony_ci if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) 57362306a36Sopenharmony_ci nsegs = __blk_bvec_map_sg(rq->special_vec, sglist, last_sg); 57462306a36Sopenharmony_ci else if (rq->bio) 57562306a36Sopenharmony_ci nsegs = __blk_bios_map_sg(q, rq->bio, sglist, last_sg); 57662306a36Sopenharmony_ci 57762306a36Sopenharmony_ci if (*last_sg) 57862306a36Sopenharmony_ci sg_mark_end(*last_sg); 57962306a36Sopenharmony_ci 58062306a36Sopenharmony_ci /* 58162306a36Sopenharmony_ci * Something must have been wrong if the figured number of 58262306a36Sopenharmony_ci * segment is bigger than number of req's physical segments 58362306a36Sopenharmony_ci */ 58462306a36Sopenharmony_ci WARN_ON(nsegs > blk_rq_nr_phys_segments(rq)); 58562306a36Sopenharmony_ci 58662306a36Sopenharmony_ci return nsegs; 58762306a36Sopenharmony_ci} 58862306a36Sopenharmony_ciEXPORT_SYMBOL(__blk_rq_map_sg); 58962306a36Sopenharmony_ci 59062306a36Sopenharmony_cistatic inline unsigned int blk_rq_get_max_sectors(struct request *rq, 59162306a36Sopenharmony_ci sector_t offset) 59262306a36Sopenharmony_ci{ 59362306a36Sopenharmony_ci struct request_queue *q = rq->q; 59462306a36Sopenharmony_ci unsigned int max_sectors; 59562306a36Sopenharmony_ci 59662306a36Sopenharmony_ci if (blk_rq_is_passthrough(rq)) 59762306a36Sopenharmony_ci return q->limits.max_hw_sectors; 59862306a36Sopenharmony_ci 59962306a36Sopenharmony_ci max_sectors = blk_queue_get_max_sectors(q, req_op(rq)); 60062306a36Sopenharmony_ci if (!q->limits.chunk_sectors || 60162306a36Sopenharmony_ci req_op(rq) == REQ_OP_DISCARD || 60262306a36Sopenharmony_ci req_op(rq) == REQ_OP_SECURE_ERASE) 60362306a36Sopenharmony_ci return max_sectors; 60462306a36Sopenharmony_ci return min(max_sectors, 60562306a36Sopenharmony_ci blk_chunk_sectors_left(offset, q->limits.chunk_sectors)); 60662306a36Sopenharmony_ci} 60762306a36Sopenharmony_ci 60862306a36Sopenharmony_cistatic inline int ll_new_hw_segment(struct request *req, struct bio *bio, 60962306a36Sopenharmony_ci unsigned int nr_phys_segs) 61062306a36Sopenharmony_ci{ 61162306a36Sopenharmony_ci if (!blk_cgroup_mergeable(req, bio)) 61262306a36Sopenharmony_ci goto no_merge; 61362306a36Sopenharmony_ci 61462306a36Sopenharmony_ci if (blk_integrity_merge_bio(req->q, req, bio) == false) 61562306a36Sopenharmony_ci goto no_merge; 61662306a36Sopenharmony_ci 61762306a36Sopenharmony_ci /* discard request merge won't add new segment */ 61862306a36Sopenharmony_ci if (req_op(req) == REQ_OP_DISCARD) 61962306a36Sopenharmony_ci return 1; 62062306a36Sopenharmony_ci 62162306a36Sopenharmony_ci if (req->nr_phys_segments + nr_phys_segs > blk_rq_get_max_segments(req)) 62262306a36Sopenharmony_ci goto no_merge; 62362306a36Sopenharmony_ci 62462306a36Sopenharmony_ci /* 62562306a36Sopenharmony_ci * This will form the start of a new hw segment. Bump both 62662306a36Sopenharmony_ci * counters. 62762306a36Sopenharmony_ci */ 62862306a36Sopenharmony_ci req->nr_phys_segments += nr_phys_segs; 62962306a36Sopenharmony_ci return 1; 63062306a36Sopenharmony_ci 63162306a36Sopenharmony_cino_merge: 63262306a36Sopenharmony_ci req_set_nomerge(req->q, req); 63362306a36Sopenharmony_ci return 0; 63462306a36Sopenharmony_ci} 63562306a36Sopenharmony_ci 63662306a36Sopenharmony_ciint ll_back_merge_fn(struct request *req, struct bio *bio, unsigned int nr_segs) 63762306a36Sopenharmony_ci{ 63862306a36Sopenharmony_ci if (req_gap_back_merge(req, bio)) 63962306a36Sopenharmony_ci return 0; 64062306a36Sopenharmony_ci if (blk_integrity_rq(req) && 64162306a36Sopenharmony_ci integrity_req_gap_back_merge(req, bio)) 64262306a36Sopenharmony_ci return 0; 64362306a36Sopenharmony_ci if (!bio_crypt_ctx_back_mergeable(req, bio)) 64462306a36Sopenharmony_ci return 0; 64562306a36Sopenharmony_ci if (blk_rq_sectors(req) + bio_sectors(bio) > 64662306a36Sopenharmony_ci blk_rq_get_max_sectors(req, blk_rq_pos(req))) { 64762306a36Sopenharmony_ci req_set_nomerge(req->q, req); 64862306a36Sopenharmony_ci return 0; 64962306a36Sopenharmony_ci } 65062306a36Sopenharmony_ci 65162306a36Sopenharmony_ci return ll_new_hw_segment(req, bio, nr_segs); 65262306a36Sopenharmony_ci} 65362306a36Sopenharmony_ci 65462306a36Sopenharmony_cistatic int ll_front_merge_fn(struct request *req, struct bio *bio, 65562306a36Sopenharmony_ci unsigned int nr_segs) 65662306a36Sopenharmony_ci{ 65762306a36Sopenharmony_ci if (req_gap_front_merge(req, bio)) 65862306a36Sopenharmony_ci return 0; 65962306a36Sopenharmony_ci if (blk_integrity_rq(req) && 66062306a36Sopenharmony_ci integrity_req_gap_front_merge(req, bio)) 66162306a36Sopenharmony_ci return 0; 66262306a36Sopenharmony_ci if (!bio_crypt_ctx_front_mergeable(req, bio)) 66362306a36Sopenharmony_ci return 0; 66462306a36Sopenharmony_ci if (blk_rq_sectors(req) + bio_sectors(bio) > 66562306a36Sopenharmony_ci blk_rq_get_max_sectors(req, bio->bi_iter.bi_sector)) { 66662306a36Sopenharmony_ci req_set_nomerge(req->q, req); 66762306a36Sopenharmony_ci return 0; 66862306a36Sopenharmony_ci } 66962306a36Sopenharmony_ci 67062306a36Sopenharmony_ci return ll_new_hw_segment(req, bio, nr_segs); 67162306a36Sopenharmony_ci} 67262306a36Sopenharmony_ci 67362306a36Sopenharmony_cistatic bool req_attempt_discard_merge(struct request_queue *q, struct request *req, 67462306a36Sopenharmony_ci struct request *next) 67562306a36Sopenharmony_ci{ 67662306a36Sopenharmony_ci unsigned short segments = blk_rq_nr_discard_segments(req); 67762306a36Sopenharmony_ci 67862306a36Sopenharmony_ci if (segments >= queue_max_discard_segments(q)) 67962306a36Sopenharmony_ci goto no_merge; 68062306a36Sopenharmony_ci if (blk_rq_sectors(req) + bio_sectors(next->bio) > 68162306a36Sopenharmony_ci blk_rq_get_max_sectors(req, blk_rq_pos(req))) 68262306a36Sopenharmony_ci goto no_merge; 68362306a36Sopenharmony_ci 68462306a36Sopenharmony_ci req->nr_phys_segments = segments + blk_rq_nr_discard_segments(next); 68562306a36Sopenharmony_ci return true; 68662306a36Sopenharmony_cino_merge: 68762306a36Sopenharmony_ci req_set_nomerge(q, req); 68862306a36Sopenharmony_ci return false; 68962306a36Sopenharmony_ci} 69062306a36Sopenharmony_ci 69162306a36Sopenharmony_cistatic int ll_merge_requests_fn(struct request_queue *q, struct request *req, 69262306a36Sopenharmony_ci struct request *next) 69362306a36Sopenharmony_ci{ 69462306a36Sopenharmony_ci int total_phys_segments; 69562306a36Sopenharmony_ci 69662306a36Sopenharmony_ci if (req_gap_back_merge(req, next->bio)) 69762306a36Sopenharmony_ci return 0; 69862306a36Sopenharmony_ci 69962306a36Sopenharmony_ci /* 70062306a36Sopenharmony_ci * Will it become too large? 70162306a36Sopenharmony_ci */ 70262306a36Sopenharmony_ci if ((blk_rq_sectors(req) + blk_rq_sectors(next)) > 70362306a36Sopenharmony_ci blk_rq_get_max_sectors(req, blk_rq_pos(req))) 70462306a36Sopenharmony_ci return 0; 70562306a36Sopenharmony_ci 70662306a36Sopenharmony_ci total_phys_segments = req->nr_phys_segments + next->nr_phys_segments; 70762306a36Sopenharmony_ci if (total_phys_segments > blk_rq_get_max_segments(req)) 70862306a36Sopenharmony_ci return 0; 70962306a36Sopenharmony_ci 71062306a36Sopenharmony_ci if (!blk_cgroup_mergeable(req, next->bio)) 71162306a36Sopenharmony_ci return 0; 71262306a36Sopenharmony_ci 71362306a36Sopenharmony_ci if (blk_integrity_merge_rq(q, req, next) == false) 71462306a36Sopenharmony_ci return 0; 71562306a36Sopenharmony_ci 71662306a36Sopenharmony_ci if (!bio_crypt_ctx_merge_rq(req, next)) 71762306a36Sopenharmony_ci return 0; 71862306a36Sopenharmony_ci 71962306a36Sopenharmony_ci /* Merge is OK... */ 72062306a36Sopenharmony_ci req->nr_phys_segments = total_phys_segments; 72162306a36Sopenharmony_ci return 1; 72262306a36Sopenharmony_ci} 72362306a36Sopenharmony_ci 72462306a36Sopenharmony_ci/** 72562306a36Sopenharmony_ci * blk_rq_set_mixed_merge - mark a request as mixed merge 72662306a36Sopenharmony_ci * @rq: request to mark as mixed merge 72762306a36Sopenharmony_ci * 72862306a36Sopenharmony_ci * Description: 72962306a36Sopenharmony_ci * @rq is about to be mixed merged. Make sure the attributes 73062306a36Sopenharmony_ci * which can be mixed are set in each bio and mark @rq as mixed 73162306a36Sopenharmony_ci * merged. 73262306a36Sopenharmony_ci */ 73362306a36Sopenharmony_civoid blk_rq_set_mixed_merge(struct request *rq) 73462306a36Sopenharmony_ci{ 73562306a36Sopenharmony_ci blk_opf_t ff = rq->cmd_flags & REQ_FAILFAST_MASK; 73662306a36Sopenharmony_ci struct bio *bio; 73762306a36Sopenharmony_ci 73862306a36Sopenharmony_ci if (rq->rq_flags & RQF_MIXED_MERGE) 73962306a36Sopenharmony_ci return; 74062306a36Sopenharmony_ci 74162306a36Sopenharmony_ci /* 74262306a36Sopenharmony_ci * @rq will no longer represent mixable attributes for all the 74362306a36Sopenharmony_ci * contained bios. It will just track those of the first one. 74462306a36Sopenharmony_ci * Distributes the attributs to each bio. 74562306a36Sopenharmony_ci */ 74662306a36Sopenharmony_ci for (bio = rq->bio; bio; bio = bio->bi_next) { 74762306a36Sopenharmony_ci WARN_ON_ONCE((bio->bi_opf & REQ_FAILFAST_MASK) && 74862306a36Sopenharmony_ci (bio->bi_opf & REQ_FAILFAST_MASK) != ff); 74962306a36Sopenharmony_ci bio->bi_opf |= ff; 75062306a36Sopenharmony_ci } 75162306a36Sopenharmony_ci rq->rq_flags |= RQF_MIXED_MERGE; 75262306a36Sopenharmony_ci} 75362306a36Sopenharmony_ci 75462306a36Sopenharmony_cistatic inline blk_opf_t bio_failfast(const struct bio *bio) 75562306a36Sopenharmony_ci{ 75662306a36Sopenharmony_ci if (bio->bi_opf & REQ_RAHEAD) 75762306a36Sopenharmony_ci return REQ_FAILFAST_MASK; 75862306a36Sopenharmony_ci 75962306a36Sopenharmony_ci return bio->bi_opf & REQ_FAILFAST_MASK; 76062306a36Sopenharmony_ci} 76162306a36Sopenharmony_ci 76262306a36Sopenharmony_ci/* 76362306a36Sopenharmony_ci * After we are marked as MIXED_MERGE, any new RA bio has to be updated 76462306a36Sopenharmony_ci * as failfast, and request's failfast has to be updated in case of 76562306a36Sopenharmony_ci * front merge. 76662306a36Sopenharmony_ci */ 76762306a36Sopenharmony_cistatic inline void blk_update_mixed_merge(struct request *req, 76862306a36Sopenharmony_ci struct bio *bio, bool front_merge) 76962306a36Sopenharmony_ci{ 77062306a36Sopenharmony_ci if (req->rq_flags & RQF_MIXED_MERGE) { 77162306a36Sopenharmony_ci if (bio->bi_opf & REQ_RAHEAD) 77262306a36Sopenharmony_ci bio->bi_opf |= REQ_FAILFAST_MASK; 77362306a36Sopenharmony_ci 77462306a36Sopenharmony_ci if (front_merge) { 77562306a36Sopenharmony_ci req->cmd_flags &= ~REQ_FAILFAST_MASK; 77662306a36Sopenharmony_ci req->cmd_flags |= bio->bi_opf & REQ_FAILFAST_MASK; 77762306a36Sopenharmony_ci } 77862306a36Sopenharmony_ci } 77962306a36Sopenharmony_ci} 78062306a36Sopenharmony_ci 78162306a36Sopenharmony_cistatic void blk_account_io_merge_request(struct request *req) 78262306a36Sopenharmony_ci{ 78362306a36Sopenharmony_ci if (blk_do_io_stat(req)) { 78462306a36Sopenharmony_ci part_stat_lock(); 78562306a36Sopenharmony_ci part_stat_inc(req->part, merges[op_stat_group(req_op(req))]); 78662306a36Sopenharmony_ci part_stat_unlock(); 78762306a36Sopenharmony_ci } 78862306a36Sopenharmony_ci} 78962306a36Sopenharmony_ci 79062306a36Sopenharmony_cistatic enum elv_merge blk_try_req_merge(struct request *req, 79162306a36Sopenharmony_ci struct request *next) 79262306a36Sopenharmony_ci{ 79362306a36Sopenharmony_ci if (blk_discard_mergable(req)) 79462306a36Sopenharmony_ci return ELEVATOR_DISCARD_MERGE; 79562306a36Sopenharmony_ci else if (blk_rq_pos(req) + blk_rq_sectors(req) == blk_rq_pos(next)) 79662306a36Sopenharmony_ci return ELEVATOR_BACK_MERGE; 79762306a36Sopenharmony_ci 79862306a36Sopenharmony_ci return ELEVATOR_NO_MERGE; 79962306a36Sopenharmony_ci} 80062306a36Sopenharmony_ci 80162306a36Sopenharmony_ci/* 80262306a36Sopenharmony_ci * For non-mq, this has to be called with the request spinlock acquired. 80362306a36Sopenharmony_ci * For mq with scheduling, the appropriate queue wide lock should be held. 80462306a36Sopenharmony_ci */ 80562306a36Sopenharmony_cistatic struct request *attempt_merge(struct request_queue *q, 80662306a36Sopenharmony_ci struct request *req, struct request *next) 80762306a36Sopenharmony_ci{ 80862306a36Sopenharmony_ci if (!rq_mergeable(req) || !rq_mergeable(next)) 80962306a36Sopenharmony_ci return NULL; 81062306a36Sopenharmony_ci 81162306a36Sopenharmony_ci if (req_op(req) != req_op(next)) 81262306a36Sopenharmony_ci return NULL; 81362306a36Sopenharmony_ci 81462306a36Sopenharmony_ci if (rq_data_dir(req) != rq_data_dir(next)) 81562306a36Sopenharmony_ci return NULL; 81662306a36Sopenharmony_ci 81762306a36Sopenharmony_ci if (req->ioprio != next->ioprio) 81862306a36Sopenharmony_ci return NULL; 81962306a36Sopenharmony_ci 82062306a36Sopenharmony_ci /* 82162306a36Sopenharmony_ci * If we are allowed to merge, then append bio list 82262306a36Sopenharmony_ci * from next to rq and release next. merge_requests_fn 82362306a36Sopenharmony_ci * will have updated segment counts, update sector 82462306a36Sopenharmony_ci * counts here. Handle DISCARDs separately, as they 82562306a36Sopenharmony_ci * have separate settings. 82662306a36Sopenharmony_ci */ 82762306a36Sopenharmony_ci 82862306a36Sopenharmony_ci switch (blk_try_req_merge(req, next)) { 82962306a36Sopenharmony_ci case ELEVATOR_DISCARD_MERGE: 83062306a36Sopenharmony_ci if (!req_attempt_discard_merge(q, req, next)) 83162306a36Sopenharmony_ci return NULL; 83262306a36Sopenharmony_ci break; 83362306a36Sopenharmony_ci case ELEVATOR_BACK_MERGE: 83462306a36Sopenharmony_ci if (!ll_merge_requests_fn(q, req, next)) 83562306a36Sopenharmony_ci return NULL; 83662306a36Sopenharmony_ci break; 83762306a36Sopenharmony_ci default: 83862306a36Sopenharmony_ci return NULL; 83962306a36Sopenharmony_ci } 84062306a36Sopenharmony_ci 84162306a36Sopenharmony_ci /* 84262306a36Sopenharmony_ci * If failfast settings disagree or any of the two is already 84362306a36Sopenharmony_ci * a mixed merge, mark both as mixed before proceeding. This 84462306a36Sopenharmony_ci * makes sure that all involved bios have mixable attributes 84562306a36Sopenharmony_ci * set properly. 84662306a36Sopenharmony_ci */ 84762306a36Sopenharmony_ci if (((req->rq_flags | next->rq_flags) & RQF_MIXED_MERGE) || 84862306a36Sopenharmony_ci (req->cmd_flags & REQ_FAILFAST_MASK) != 84962306a36Sopenharmony_ci (next->cmd_flags & REQ_FAILFAST_MASK)) { 85062306a36Sopenharmony_ci blk_rq_set_mixed_merge(req); 85162306a36Sopenharmony_ci blk_rq_set_mixed_merge(next); 85262306a36Sopenharmony_ci } 85362306a36Sopenharmony_ci 85462306a36Sopenharmony_ci /* 85562306a36Sopenharmony_ci * At this point we have either done a back merge or front merge. We 85662306a36Sopenharmony_ci * need the smaller start_time_ns of the merged requests to be the 85762306a36Sopenharmony_ci * current request for accounting purposes. 85862306a36Sopenharmony_ci */ 85962306a36Sopenharmony_ci if (next->start_time_ns < req->start_time_ns) 86062306a36Sopenharmony_ci req->start_time_ns = next->start_time_ns; 86162306a36Sopenharmony_ci 86262306a36Sopenharmony_ci req->biotail->bi_next = next->bio; 86362306a36Sopenharmony_ci req->biotail = next->biotail; 86462306a36Sopenharmony_ci 86562306a36Sopenharmony_ci req->__data_len += blk_rq_bytes(next); 86662306a36Sopenharmony_ci 86762306a36Sopenharmony_ci if (!blk_discard_mergable(req)) 86862306a36Sopenharmony_ci elv_merge_requests(q, req, next); 86962306a36Sopenharmony_ci 87062306a36Sopenharmony_ci blk_crypto_rq_put_keyslot(next); 87162306a36Sopenharmony_ci 87262306a36Sopenharmony_ci /* 87362306a36Sopenharmony_ci * 'next' is going away, so update stats accordingly 87462306a36Sopenharmony_ci */ 87562306a36Sopenharmony_ci blk_account_io_merge_request(next); 87662306a36Sopenharmony_ci 87762306a36Sopenharmony_ci trace_block_rq_merge(next); 87862306a36Sopenharmony_ci 87962306a36Sopenharmony_ci /* 88062306a36Sopenharmony_ci * ownership of bio passed from next to req, return 'next' for 88162306a36Sopenharmony_ci * the caller to free 88262306a36Sopenharmony_ci */ 88362306a36Sopenharmony_ci next->bio = NULL; 88462306a36Sopenharmony_ci return next; 88562306a36Sopenharmony_ci} 88662306a36Sopenharmony_ci 88762306a36Sopenharmony_cistatic struct request *attempt_back_merge(struct request_queue *q, 88862306a36Sopenharmony_ci struct request *rq) 88962306a36Sopenharmony_ci{ 89062306a36Sopenharmony_ci struct request *next = elv_latter_request(q, rq); 89162306a36Sopenharmony_ci 89262306a36Sopenharmony_ci if (next) 89362306a36Sopenharmony_ci return attempt_merge(q, rq, next); 89462306a36Sopenharmony_ci 89562306a36Sopenharmony_ci return NULL; 89662306a36Sopenharmony_ci} 89762306a36Sopenharmony_ci 89862306a36Sopenharmony_cistatic struct request *attempt_front_merge(struct request_queue *q, 89962306a36Sopenharmony_ci struct request *rq) 90062306a36Sopenharmony_ci{ 90162306a36Sopenharmony_ci struct request *prev = elv_former_request(q, rq); 90262306a36Sopenharmony_ci 90362306a36Sopenharmony_ci if (prev) 90462306a36Sopenharmony_ci return attempt_merge(q, prev, rq); 90562306a36Sopenharmony_ci 90662306a36Sopenharmony_ci return NULL; 90762306a36Sopenharmony_ci} 90862306a36Sopenharmony_ci 90962306a36Sopenharmony_ci/* 91062306a36Sopenharmony_ci * Try to merge 'next' into 'rq'. Return true if the merge happened, false 91162306a36Sopenharmony_ci * otherwise. The caller is responsible for freeing 'next' if the merge 91262306a36Sopenharmony_ci * happened. 91362306a36Sopenharmony_ci */ 91462306a36Sopenharmony_cibool blk_attempt_req_merge(struct request_queue *q, struct request *rq, 91562306a36Sopenharmony_ci struct request *next) 91662306a36Sopenharmony_ci{ 91762306a36Sopenharmony_ci return attempt_merge(q, rq, next); 91862306a36Sopenharmony_ci} 91962306a36Sopenharmony_ci 92062306a36Sopenharmony_cibool blk_rq_merge_ok(struct request *rq, struct bio *bio) 92162306a36Sopenharmony_ci{ 92262306a36Sopenharmony_ci if (!rq_mergeable(rq) || !bio_mergeable(bio)) 92362306a36Sopenharmony_ci return false; 92462306a36Sopenharmony_ci 92562306a36Sopenharmony_ci if (req_op(rq) != bio_op(bio)) 92662306a36Sopenharmony_ci return false; 92762306a36Sopenharmony_ci 92862306a36Sopenharmony_ci /* different data direction or already started, don't merge */ 92962306a36Sopenharmony_ci if (bio_data_dir(bio) != rq_data_dir(rq)) 93062306a36Sopenharmony_ci return false; 93162306a36Sopenharmony_ci 93262306a36Sopenharmony_ci /* don't merge across cgroup boundaries */ 93362306a36Sopenharmony_ci if (!blk_cgroup_mergeable(rq, bio)) 93462306a36Sopenharmony_ci return false; 93562306a36Sopenharmony_ci 93662306a36Sopenharmony_ci /* only merge integrity protected bio into ditto rq */ 93762306a36Sopenharmony_ci if (blk_integrity_merge_bio(rq->q, rq, bio) == false) 93862306a36Sopenharmony_ci return false; 93962306a36Sopenharmony_ci 94062306a36Sopenharmony_ci /* Only merge if the crypt contexts are compatible */ 94162306a36Sopenharmony_ci if (!bio_crypt_rq_ctx_compatible(rq, bio)) 94262306a36Sopenharmony_ci return false; 94362306a36Sopenharmony_ci 94462306a36Sopenharmony_ci if (rq->ioprio != bio_prio(bio)) 94562306a36Sopenharmony_ci return false; 94662306a36Sopenharmony_ci 94762306a36Sopenharmony_ci return true; 94862306a36Sopenharmony_ci} 94962306a36Sopenharmony_ci 95062306a36Sopenharmony_cienum elv_merge blk_try_merge(struct request *rq, struct bio *bio) 95162306a36Sopenharmony_ci{ 95262306a36Sopenharmony_ci if (blk_discard_mergable(rq)) 95362306a36Sopenharmony_ci return ELEVATOR_DISCARD_MERGE; 95462306a36Sopenharmony_ci else if (blk_rq_pos(rq) + blk_rq_sectors(rq) == bio->bi_iter.bi_sector) 95562306a36Sopenharmony_ci return ELEVATOR_BACK_MERGE; 95662306a36Sopenharmony_ci else if (blk_rq_pos(rq) - bio_sectors(bio) == bio->bi_iter.bi_sector) 95762306a36Sopenharmony_ci return ELEVATOR_FRONT_MERGE; 95862306a36Sopenharmony_ci return ELEVATOR_NO_MERGE; 95962306a36Sopenharmony_ci} 96062306a36Sopenharmony_ci 96162306a36Sopenharmony_cistatic void blk_account_io_merge_bio(struct request *req) 96262306a36Sopenharmony_ci{ 96362306a36Sopenharmony_ci if (!blk_do_io_stat(req)) 96462306a36Sopenharmony_ci return; 96562306a36Sopenharmony_ci 96662306a36Sopenharmony_ci part_stat_lock(); 96762306a36Sopenharmony_ci part_stat_inc(req->part, merges[op_stat_group(req_op(req))]); 96862306a36Sopenharmony_ci part_stat_unlock(); 96962306a36Sopenharmony_ci} 97062306a36Sopenharmony_ci 97162306a36Sopenharmony_cienum bio_merge_status { 97262306a36Sopenharmony_ci BIO_MERGE_OK, 97362306a36Sopenharmony_ci BIO_MERGE_NONE, 97462306a36Sopenharmony_ci BIO_MERGE_FAILED, 97562306a36Sopenharmony_ci}; 97662306a36Sopenharmony_ci 97762306a36Sopenharmony_cistatic enum bio_merge_status bio_attempt_back_merge(struct request *req, 97862306a36Sopenharmony_ci struct bio *bio, unsigned int nr_segs) 97962306a36Sopenharmony_ci{ 98062306a36Sopenharmony_ci const blk_opf_t ff = bio_failfast(bio); 98162306a36Sopenharmony_ci 98262306a36Sopenharmony_ci if (!ll_back_merge_fn(req, bio, nr_segs)) 98362306a36Sopenharmony_ci return BIO_MERGE_FAILED; 98462306a36Sopenharmony_ci 98562306a36Sopenharmony_ci trace_block_bio_backmerge(bio); 98662306a36Sopenharmony_ci rq_qos_merge(req->q, req, bio); 98762306a36Sopenharmony_ci 98862306a36Sopenharmony_ci if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff) 98962306a36Sopenharmony_ci blk_rq_set_mixed_merge(req); 99062306a36Sopenharmony_ci 99162306a36Sopenharmony_ci blk_update_mixed_merge(req, bio, false); 99262306a36Sopenharmony_ci 99362306a36Sopenharmony_ci req->biotail->bi_next = bio; 99462306a36Sopenharmony_ci req->biotail = bio; 99562306a36Sopenharmony_ci req->__data_len += bio->bi_iter.bi_size; 99662306a36Sopenharmony_ci 99762306a36Sopenharmony_ci bio_crypt_free_ctx(bio); 99862306a36Sopenharmony_ci 99962306a36Sopenharmony_ci blk_account_io_merge_bio(req); 100062306a36Sopenharmony_ci return BIO_MERGE_OK; 100162306a36Sopenharmony_ci} 100262306a36Sopenharmony_ci 100362306a36Sopenharmony_cistatic enum bio_merge_status bio_attempt_front_merge(struct request *req, 100462306a36Sopenharmony_ci struct bio *bio, unsigned int nr_segs) 100562306a36Sopenharmony_ci{ 100662306a36Sopenharmony_ci const blk_opf_t ff = bio_failfast(bio); 100762306a36Sopenharmony_ci 100862306a36Sopenharmony_ci if (!ll_front_merge_fn(req, bio, nr_segs)) 100962306a36Sopenharmony_ci return BIO_MERGE_FAILED; 101062306a36Sopenharmony_ci 101162306a36Sopenharmony_ci trace_block_bio_frontmerge(bio); 101262306a36Sopenharmony_ci rq_qos_merge(req->q, req, bio); 101362306a36Sopenharmony_ci 101462306a36Sopenharmony_ci if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff) 101562306a36Sopenharmony_ci blk_rq_set_mixed_merge(req); 101662306a36Sopenharmony_ci 101762306a36Sopenharmony_ci blk_update_mixed_merge(req, bio, true); 101862306a36Sopenharmony_ci 101962306a36Sopenharmony_ci bio->bi_next = req->bio; 102062306a36Sopenharmony_ci req->bio = bio; 102162306a36Sopenharmony_ci 102262306a36Sopenharmony_ci req->__sector = bio->bi_iter.bi_sector; 102362306a36Sopenharmony_ci req->__data_len += bio->bi_iter.bi_size; 102462306a36Sopenharmony_ci 102562306a36Sopenharmony_ci bio_crypt_do_front_merge(req, bio); 102662306a36Sopenharmony_ci 102762306a36Sopenharmony_ci blk_account_io_merge_bio(req); 102862306a36Sopenharmony_ci return BIO_MERGE_OK; 102962306a36Sopenharmony_ci} 103062306a36Sopenharmony_ci 103162306a36Sopenharmony_cistatic enum bio_merge_status bio_attempt_discard_merge(struct request_queue *q, 103262306a36Sopenharmony_ci struct request *req, struct bio *bio) 103362306a36Sopenharmony_ci{ 103462306a36Sopenharmony_ci unsigned short segments = blk_rq_nr_discard_segments(req); 103562306a36Sopenharmony_ci 103662306a36Sopenharmony_ci if (segments >= queue_max_discard_segments(q)) 103762306a36Sopenharmony_ci goto no_merge; 103862306a36Sopenharmony_ci if (blk_rq_sectors(req) + bio_sectors(bio) > 103962306a36Sopenharmony_ci blk_rq_get_max_sectors(req, blk_rq_pos(req))) 104062306a36Sopenharmony_ci goto no_merge; 104162306a36Sopenharmony_ci 104262306a36Sopenharmony_ci rq_qos_merge(q, req, bio); 104362306a36Sopenharmony_ci 104462306a36Sopenharmony_ci req->biotail->bi_next = bio; 104562306a36Sopenharmony_ci req->biotail = bio; 104662306a36Sopenharmony_ci req->__data_len += bio->bi_iter.bi_size; 104762306a36Sopenharmony_ci req->nr_phys_segments = segments + 1; 104862306a36Sopenharmony_ci 104962306a36Sopenharmony_ci blk_account_io_merge_bio(req); 105062306a36Sopenharmony_ci return BIO_MERGE_OK; 105162306a36Sopenharmony_cino_merge: 105262306a36Sopenharmony_ci req_set_nomerge(q, req); 105362306a36Sopenharmony_ci return BIO_MERGE_FAILED; 105462306a36Sopenharmony_ci} 105562306a36Sopenharmony_ci 105662306a36Sopenharmony_cistatic enum bio_merge_status blk_attempt_bio_merge(struct request_queue *q, 105762306a36Sopenharmony_ci struct request *rq, 105862306a36Sopenharmony_ci struct bio *bio, 105962306a36Sopenharmony_ci unsigned int nr_segs, 106062306a36Sopenharmony_ci bool sched_allow_merge) 106162306a36Sopenharmony_ci{ 106262306a36Sopenharmony_ci if (!blk_rq_merge_ok(rq, bio)) 106362306a36Sopenharmony_ci return BIO_MERGE_NONE; 106462306a36Sopenharmony_ci 106562306a36Sopenharmony_ci switch (blk_try_merge(rq, bio)) { 106662306a36Sopenharmony_ci case ELEVATOR_BACK_MERGE: 106762306a36Sopenharmony_ci if (!sched_allow_merge || blk_mq_sched_allow_merge(q, rq, bio)) 106862306a36Sopenharmony_ci return bio_attempt_back_merge(rq, bio, nr_segs); 106962306a36Sopenharmony_ci break; 107062306a36Sopenharmony_ci case ELEVATOR_FRONT_MERGE: 107162306a36Sopenharmony_ci if (!sched_allow_merge || blk_mq_sched_allow_merge(q, rq, bio)) 107262306a36Sopenharmony_ci return bio_attempt_front_merge(rq, bio, nr_segs); 107362306a36Sopenharmony_ci break; 107462306a36Sopenharmony_ci case ELEVATOR_DISCARD_MERGE: 107562306a36Sopenharmony_ci return bio_attempt_discard_merge(q, rq, bio); 107662306a36Sopenharmony_ci default: 107762306a36Sopenharmony_ci return BIO_MERGE_NONE; 107862306a36Sopenharmony_ci } 107962306a36Sopenharmony_ci 108062306a36Sopenharmony_ci return BIO_MERGE_FAILED; 108162306a36Sopenharmony_ci} 108262306a36Sopenharmony_ci 108362306a36Sopenharmony_ci/** 108462306a36Sopenharmony_ci * blk_attempt_plug_merge - try to merge with %current's plugged list 108562306a36Sopenharmony_ci * @q: request_queue new bio is being queued at 108662306a36Sopenharmony_ci * @bio: new bio being queued 108762306a36Sopenharmony_ci * @nr_segs: number of segments in @bio 108862306a36Sopenharmony_ci * from the passed in @q already in the plug list 108962306a36Sopenharmony_ci * 109062306a36Sopenharmony_ci * Determine whether @bio being queued on @q can be merged with the previous 109162306a36Sopenharmony_ci * request on %current's plugged list. Returns %true if merge was successful, 109262306a36Sopenharmony_ci * otherwise %false. 109362306a36Sopenharmony_ci * 109462306a36Sopenharmony_ci * Plugging coalesces IOs from the same issuer for the same purpose without 109562306a36Sopenharmony_ci * going through @q->queue_lock. As such it's more of an issuing mechanism 109662306a36Sopenharmony_ci * than scheduling, and the request, while may have elvpriv data, is not 109762306a36Sopenharmony_ci * added on the elevator at this point. In addition, we don't have 109862306a36Sopenharmony_ci * reliable access to the elevator outside queue lock. Only check basic 109962306a36Sopenharmony_ci * merging parameters without querying the elevator. 110062306a36Sopenharmony_ci * 110162306a36Sopenharmony_ci * Caller must ensure !blk_queue_nomerges(q) beforehand. 110262306a36Sopenharmony_ci */ 110362306a36Sopenharmony_cibool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio, 110462306a36Sopenharmony_ci unsigned int nr_segs) 110562306a36Sopenharmony_ci{ 110662306a36Sopenharmony_ci struct blk_plug *plug; 110762306a36Sopenharmony_ci struct request *rq; 110862306a36Sopenharmony_ci 110962306a36Sopenharmony_ci plug = blk_mq_plug(bio); 111062306a36Sopenharmony_ci if (!plug || rq_list_empty(plug->mq_list)) 111162306a36Sopenharmony_ci return false; 111262306a36Sopenharmony_ci 111362306a36Sopenharmony_ci rq_list_for_each(&plug->mq_list, rq) { 111462306a36Sopenharmony_ci if (rq->q == q) { 111562306a36Sopenharmony_ci if (blk_attempt_bio_merge(q, rq, bio, nr_segs, false) == 111662306a36Sopenharmony_ci BIO_MERGE_OK) 111762306a36Sopenharmony_ci return true; 111862306a36Sopenharmony_ci break; 111962306a36Sopenharmony_ci } 112062306a36Sopenharmony_ci 112162306a36Sopenharmony_ci /* 112262306a36Sopenharmony_ci * Only keep iterating plug list for merges if we have multiple 112362306a36Sopenharmony_ci * queues 112462306a36Sopenharmony_ci */ 112562306a36Sopenharmony_ci if (!plug->multiple_queues) 112662306a36Sopenharmony_ci break; 112762306a36Sopenharmony_ci } 112862306a36Sopenharmony_ci return false; 112962306a36Sopenharmony_ci} 113062306a36Sopenharmony_ci 113162306a36Sopenharmony_ci/* 113262306a36Sopenharmony_ci * Iterate list of requests and see if we can merge this bio with any 113362306a36Sopenharmony_ci * of them. 113462306a36Sopenharmony_ci */ 113562306a36Sopenharmony_cibool blk_bio_list_merge(struct request_queue *q, struct list_head *list, 113662306a36Sopenharmony_ci struct bio *bio, unsigned int nr_segs) 113762306a36Sopenharmony_ci{ 113862306a36Sopenharmony_ci struct request *rq; 113962306a36Sopenharmony_ci int checked = 8; 114062306a36Sopenharmony_ci 114162306a36Sopenharmony_ci list_for_each_entry_reverse(rq, list, queuelist) { 114262306a36Sopenharmony_ci if (!checked--) 114362306a36Sopenharmony_ci break; 114462306a36Sopenharmony_ci 114562306a36Sopenharmony_ci switch (blk_attempt_bio_merge(q, rq, bio, nr_segs, true)) { 114662306a36Sopenharmony_ci case BIO_MERGE_NONE: 114762306a36Sopenharmony_ci continue; 114862306a36Sopenharmony_ci case BIO_MERGE_OK: 114962306a36Sopenharmony_ci return true; 115062306a36Sopenharmony_ci case BIO_MERGE_FAILED: 115162306a36Sopenharmony_ci return false; 115262306a36Sopenharmony_ci } 115362306a36Sopenharmony_ci 115462306a36Sopenharmony_ci } 115562306a36Sopenharmony_ci 115662306a36Sopenharmony_ci return false; 115762306a36Sopenharmony_ci} 115862306a36Sopenharmony_ciEXPORT_SYMBOL_GPL(blk_bio_list_merge); 115962306a36Sopenharmony_ci 116062306a36Sopenharmony_cibool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio, 116162306a36Sopenharmony_ci unsigned int nr_segs, struct request **merged_request) 116262306a36Sopenharmony_ci{ 116362306a36Sopenharmony_ci struct request *rq; 116462306a36Sopenharmony_ci 116562306a36Sopenharmony_ci switch (elv_merge(q, &rq, bio)) { 116662306a36Sopenharmony_ci case ELEVATOR_BACK_MERGE: 116762306a36Sopenharmony_ci if (!blk_mq_sched_allow_merge(q, rq, bio)) 116862306a36Sopenharmony_ci return false; 116962306a36Sopenharmony_ci if (bio_attempt_back_merge(rq, bio, nr_segs) != BIO_MERGE_OK) 117062306a36Sopenharmony_ci return false; 117162306a36Sopenharmony_ci *merged_request = attempt_back_merge(q, rq); 117262306a36Sopenharmony_ci if (!*merged_request) 117362306a36Sopenharmony_ci elv_merged_request(q, rq, ELEVATOR_BACK_MERGE); 117462306a36Sopenharmony_ci return true; 117562306a36Sopenharmony_ci case ELEVATOR_FRONT_MERGE: 117662306a36Sopenharmony_ci if (!blk_mq_sched_allow_merge(q, rq, bio)) 117762306a36Sopenharmony_ci return false; 117862306a36Sopenharmony_ci if (bio_attempt_front_merge(rq, bio, nr_segs) != BIO_MERGE_OK) 117962306a36Sopenharmony_ci return false; 118062306a36Sopenharmony_ci *merged_request = attempt_front_merge(q, rq); 118162306a36Sopenharmony_ci if (!*merged_request) 118262306a36Sopenharmony_ci elv_merged_request(q, rq, ELEVATOR_FRONT_MERGE); 118362306a36Sopenharmony_ci return true; 118462306a36Sopenharmony_ci case ELEVATOR_DISCARD_MERGE: 118562306a36Sopenharmony_ci return bio_attempt_discard_merge(q, rq, bio) == BIO_MERGE_OK; 118662306a36Sopenharmony_ci default: 118762306a36Sopenharmony_ci return false; 118862306a36Sopenharmony_ci } 118962306a36Sopenharmony_ci} 119062306a36Sopenharmony_ciEXPORT_SYMBOL_GPL(blk_mq_sched_try_merge); 1191