Lines Matching refs:bio

7 #include <linux/bio.h>
32 struct bio *free_list;
33 struct bio *free_list_irq;
68 * fs_bio_set is the bio_set containing bio and iovec memory pools used by
93 snprintf(bslab->name, sizeof(bslab->name), "bio-%d", size);
115 return bs->front_pad + sizeof(struct bio) + bs->back_pad;
144 if (WARN(!bslab, KERN_ERR "bio: unable to find slab!\n"))
214 void bio_uninit(struct bio *bio)
217 if (bio->bi_blkg) {
218 blkg_put(bio->bi_blkg);
219 bio->bi_blkg = NULL;
222 if (bio_integrity(bio))
223 bio_integrity_free(bio);
225 bio_crypt_free_ctx(bio);
229 static void bio_free(struct bio *bio)
231 struct bio_set *bs = bio->bi_pool;
232 void *p = bio;
236 bio_uninit(bio);
237 bvec_free(&bs->bvec_pool, bio->bi_io_vec, bio->bi_max_vecs);
242 * Users of this function have their own bio allocation. Subsequently,
244 * when IO has completed, or when the bio is released.
246 void bio_init(struct bio *bio, struct block_device *bdev, struct bio_vec *table,
249 bio->bi_next = NULL;
250 bio->bi_bdev = bdev;
251 bio->bi_opf = opf;
252 bio->bi_flags = 0;
253 bio->bi_ioprio = 0;
254 bio->bi_status = 0;
255 bio->bi_iter.bi_sector = 0;
256 bio->bi_iter.bi_size = 0;
257 bio->bi_iter.bi_idx = 0;
258 bio->bi_iter.bi_bvec_done = 0;
259 bio->bi_end_io = NULL;
260 bio->bi_private = NULL;
262 bio->bi_blkg = NULL;
263 bio->bi_issue.value = 0;
265 bio_associate_blkg(bio);
267 bio->bi_iocost_cost = 0;
271 bio->bi_crypt_context = NULL;
274 bio->bi_integrity = NULL;
276 bio->bi_vcnt = 0;
278 atomic_set(&bio->__bi_remaining, 1);
279 atomic_set(&bio->__bi_cnt, 1);
280 bio->bi_cookie = BLK_QC_T_NONE;
282 bio->bi_max_vecs = max_vecs;
283 bio->bi_io_vec = table;
284 bio->bi_pool = NULL;
289 * bio_reset - reinitialize a bio
290 * @bio: bio to reset
291 * @bdev: block device to use the bio for
292 * @opf: operation and flags for bio
295 * After calling bio_reset(), @bio will be in the same state as a freshly
296 * allocated bio returned bio bio_alloc_bioset() - the only fields that are
298 * comment in struct bio.
300 void bio_reset(struct bio *bio, struct block_device *bdev, blk_opf_t opf)
302 bio_uninit(bio);
303 memset(bio, 0, BIO_RESET_BYTES);
304 atomic_set(&bio->__bi_remaining, 1);
305 bio->bi_bdev = bdev;
306 if (bio->bi_bdev)
307 bio_associate_blkg(bio);
308 bio->bi_opf = opf;
312 static struct bio *__bio_chain_endio(struct bio *bio)
314 struct bio *parent = bio->bi_private;
316 if (bio->bi_status && !parent->bi_status)
317 parent->bi_status = bio->bi_status;
318 bio_put(bio);
322 static void bio_chain_endio(struct bio *bio)
324 bio_endio(__bio_chain_endio(bio));
328 * bio_chain - chain bio completions
329 * @bio: the target bio
330 * @parent: the parent bio of @bio
332 * The caller won't have a bi_end_io called when @bio completes - instead,
333 * @parent's bi_end_io won't be called until both @parent and @bio have
334 * completed; the chained bio will also be freed when it completes.
336 * The caller must not set bi_private or bi_end_io in @bio.
338 void bio_chain(struct bio *bio, struct bio *parent)
340 BUG_ON(bio->bi_private || bio->bi_end_io);
342 bio->bi_private = parent;
343 bio->bi_end_io = bio_chain_endio;
348 struct bio *blk_next_bio(struct bio *bio, struct block_device *bdev,
351 struct bio *new = bio_alloc(bdev, nr_pages, opf, gfp);
353 if (bio) {
354 bio_chain(bio, new);
355 submit_bio(bio);
365 struct bio *bio;
369 bio = bio_list_pop(&bs->rescue_list);
372 if (!bio)
375 submit_bio_noacct(bio);
382 struct bio *bio;
388 * were allocated from this bio_set; otherwise, if there was a bio on
393 * Since bio lists are singly linked, pop them all instead of trying to
400 while ((bio = bio_list_pop(&current->bio_list[0])))
401 bio_list_add(bio->bi_pool == bs ? &punt : &nopunt, bio);
405 while ((bio = bio_list_pop(&current->bio_list[1])))
406 bio_list_add(bio->bi_pool == bs ? &punt : &nopunt, bio);
432 static struct bio *bio_alloc_percpu_cache(struct block_device *bdev,
437 struct bio *bio;
448 bio = cache->free_list;
449 cache->free_list = bio->bi_next;
453 bio_init(bio, bdev, nr_vecs ? bio->bi_inline_vecs : NULL, nr_vecs, opf);
454 bio->bi_pool = bs;
455 return bio;
459 * bio_alloc_bioset - allocate a bio for I/O
460 * @bdev: block device to allocate the bio for (can be %NULL)
462 * @opf: operation and flags for bio
466 * Allocate a bio from the mempools in @bs.
469 * allocate a bio. This is due to the mempool guarantees. To make this work,
470 * callers must never allocate more than 1 bio at a time from the general pool.
471 * Callers that need to allocate more than 1 bio must always submit the
472 * previously allocated bio for IO before attempting to allocate a new one.
488 * for per bio allocations.
490 * Returns: Pointer to new bio on success, NULL on failure.
492 struct bio *bio_alloc_bioset(struct block_device *bdev, unsigned short nr_vecs,
497 struct bio *bio;
506 bio = bio_alloc_percpu_cache(bdev, nr_vecs, opf,
508 if (bio)
509 return bio;
511 * No cached bio available, bio returned below marked with
554 bio = p + bs->front_pad;
567 bio_init(bio, bdev, bvl, nr_vecs, opf);
569 bio_init(bio, bdev, bio->bi_inline_vecs, BIO_INLINE_VECS, opf);
571 bio_init(bio, bdev, NULL, 0, opf);
574 bio->bi_pool = bs;
575 return bio;
584 * bio_kmalloc - kmalloc a bio
588 * Use kmalloc to allocate a bio (including bvecs). The bio must be initialized
589 * using bio_init() before use. To free a bio returned from this function use
590 * kfree() after calling bio_uninit(). A bio returned from this function can
597 * Returns: Pointer to new bio on success, NULL on failure.
599 struct bio *bio_kmalloc(unsigned short nr_vecs, gfp_t gfp_mask)
601 struct bio *bio;
605 return kmalloc(struct_size(bio, bi_inline_vecs, nr_vecs), gfp_mask);
609 void zero_fill_bio_iter(struct bio *bio, struct bvec_iter start)
614 __bio_for_each_segment(bv, bio, iter, start)
620 * bio_truncate - truncate the bio to small size of @new_size
621 * @bio: the bio to be truncated
622 * @new_size: new size for truncating the bio
625 * Truncate the bio to new size of @new_size. If bio_op(bio) is
627 * be used for handling corner cases, such as bio eod.
629 static void bio_truncate(struct bio *bio, unsigned new_size)
636 if (new_size >= bio->bi_iter.bi_size)
639 if (bio_op(bio) != REQ_OP_READ)
642 bio_for_each_segment(bv, bio, iter) {
660 * fs bio user has to retrieve all pages via bio_for_each_segment_all
663 * It is enough to truncate bio by updating .bi_size since we can make
666 bio->bi_iter.bi_size = new_size;
671 * @bio: bio to truncate
676 * We'll just truncate the bio to the size of the device, and clear the end of
681 void guard_bio_eod(struct bio *bio)
683 sector_t maxsector = bdev_nr_sectors(bio->bi_bdev);
693 if (unlikely(bio->bi_iter.bi_sector >= maxsector))
696 maxsector -= bio->bi_iter.bi_sector;
697 if (likely((bio->bi_iter.bi_size >> 9) <= maxsector))
700 bio_truncate(bio, maxsector << 9);
707 struct bio *bio;
709 while ((bio = cache->free_list) != NULL) {
710 cache->free_list = bio->bi_next;
712 bio_free(bio);
760 static inline void bio_put_percpu_cache(struct bio *bio)
764 cache = per_cpu_ptr(bio->bi_pool->cache, get_cpu());
767 bio_free(bio);
771 bio_uninit(bio);
773 if ((bio->bi_opf & REQ_POLLED) && !WARN_ON_ONCE(in_interrupt())) {
774 bio->bi_next = cache->free_list;
775 bio->bi_bdev = NULL;
776 cache->free_list = bio;
782 bio->bi_next = cache->free_list_irq;
783 cache->free_list_irq = bio;
791 * bio_put - release a reference to a bio
792 * @bio: bio to release reference to
795 * Put a reference to a &struct bio, either one you have gotten with
796 * bio_alloc, bio_get or bio_clone_*. The last put of a bio will free it.
798 void bio_put(struct bio *bio)
800 if (unlikely(bio_flagged(bio, BIO_REFFED))) {
801 BUG_ON(!atomic_read(&bio->__bi_cnt));
802 if (!atomic_dec_and_test(&bio->__bi_cnt))
805 if (bio->bi_opf & REQ_ALLOC_CACHE)
806 bio_put_percpu_cache(bio);
808 bio_free(bio);
812 static int __bio_clone(struct bio *bio, struct bio *bio_src, gfp_t gfp)
814 bio_set_flag(bio, BIO_CLONED);
815 bio->bi_ioprio = bio_src->bi_ioprio;
816 bio->bi_iter = bio_src->bi_iter;
818 if (bio->bi_bdev) {
819 if (bio->bi_bdev == bio_src->bi_bdev &&
821 bio_set_flag(bio, BIO_REMAPPED);
822 bio_clone_blkg_association(bio, bio_src);
825 if (bio_crypt_clone(bio, bio_src, gfp) < 0)
828 bio_integrity_clone(bio, bio_src, gfp) < 0)
834 * bio_alloc_clone - clone a bio that shares the original bio's biovec
836 * @bio_src: bio to clone from
840 * Allocate a new bio that is a clone of @bio_src. The caller owns the returned
841 * bio, but not the actual data it points to.
843 * The caller must ensure that the return bio is not freed before @bio_src.
845 struct bio *bio_alloc_clone(struct block_device *bdev, struct bio *bio_src,
848 struct bio *bio;
850 bio = bio_alloc_bioset(bdev, 0, bio_src->bi_opf, gfp, bs);
851 if (!bio)
854 if (__bio_clone(bio, bio_src, gfp) < 0) {
855 bio_put(bio);
858 bio->bi_io_vec = bio_src->bi_io_vec;
860 return bio;
865 * bio_init_clone - clone a bio that shares the original bio's biovec
867 * @bio: bio to clone into
868 * @bio_src: bio to clone from
871 * Initialize a new bio in caller provided memory that is a clone of @bio_src.
872 * The caller owns the returned bio, but not the actual data it points to.
874 * The caller must ensure that @bio_src is not freed before @bio.
876 int bio_init_clone(struct block_device *bdev, struct bio *bio,
877 struct bio *bio_src, gfp_t gfp)
881 bio_init(bio, bdev, bio_src->bi_io_vec, 0, bio_src->bi_opf);
882 ret = __bio_clone(bio, bio_src, gfp);
884 bio_uninit(bio);
890 * bio_full - check if the bio is full
891 * @bio: bio to check
894 * Return true if @bio is full and one segment with @len bytes can't be
895 * added to the bio, otherwise return false
897 static inline bool bio_full(struct bio *bio, unsigned len)
899 if (bio->bi_vcnt >= bio->bi_max_vecs)
901 if (bio->bi_iter.bi_size > UINT_MAX - len)
953 * bio_add_hw_page - attempt to add a page to a bio with hw constraints
955 * @bio: destination bio
962 * Add a page to a bio while respecting the hardware max_sectors, max_segment
965 int bio_add_hw_page(struct request_queue *q, struct bio *bio,
969 if (WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED)))
972 if (((bio->bi_iter.bi_size + len) >> SECTOR_SHIFT) > max_sectors)
975 if (bio->bi_vcnt > 0) {
976 struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt - 1];
980 bio->bi_iter.bi_size += len;
984 if (bio->bi_vcnt >=
985 min(bio->bi_max_vecs, queue_max_segments(q)))
996 bvec_set_page(&bio->bi_io_vec[bio->bi_vcnt], page, len, offset);
997 bio->bi_vcnt++;
998 bio->bi_iter.bi_size += len;
1003 * bio_add_pc_page - attempt to add page to passthrough bio
1005 * @bio: destination bio
1011 * number of reasons, such as the bio being full or target block device
1012 * limitations. The target block device must allow bio's up to PAGE_SIZE,
1013 * so it is always possible to add a single page to an empty bio.
1017 int bio_add_pc_page(struct request_queue *q, struct bio *bio,
1021 return bio_add_hw_page(q, bio, page, len, offset,
1027 * bio_add_zone_append_page - attempt to add page to zone-append bio
1028 * @bio: destination bio
1033 * Attempt to add a page to the bio_vec maplist of a bio that will be submitted
1035 * bio being full or the target block device is not a zoned block device or
1037 * allow bio's up to PAGE_SIZE, so it is always possible to add a single page
1038 * to an empty bio.
1040 * Returns: number of bytes added to the bio, or 0 in case of a failure.
1042 int bio_add_zone_append_page(struct bio *bio, struct page *page,
1045 struct request_queue *q = bdev_get_queue(bio->bi_bdev);
1048 if (WARN_ON_ONCE(bio_op(bio) != REQ_OP_ZONE_APPEND))
1051 if (WARN_ON_ONCE(!bdev_is_zoned(bio->bi_bdev)))
1054 return bio_add_hw_page(q, bio, page, len, offset,
1060 * __bio_add_page - add page(s) to a bio in a new segment
1061 * @bio: destination bio
1066 * Add the data at @page + @off to @bio as a new bvec. The caller must ensure
1067 * that @bio has space for another bvec.
1069 void __bio_add_page(struct bio *bio, struct page *page,
1072 WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED));
1073 WARN_ON_ONCE(bio_full(bio, len));
1075 bvec_set_page(&bio->bi_io_vec[bio->bi_vcnt], page, len, off);
1076 bio->bi_iter.bi_size += len;
1077 bio->bi_vcnt++;
1082 * bio_add_page - attempt to add page(s) to bio
1083 * @bio: destination bio
1089 * if either bio->bi_vcnt == bio->bi_max_vecs or it's a cloned bio.
1091 int bio_add_page(struct bio *bio, struct page *page,
1096 if (WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED)))
1098 if (bio->bi_iter.bi_size > UINT_MAX - len)
1101 if (bio->bi_vcnt > 0 &&
1102 bvec_try_merge_page(&bio->bi_io_vec[bio->bi_vcnt - 1],
1104 bio->bi_iter.bi_size += len;
1108 if (bio->bi_vcnt >= bio->bi_max_vecs)
1110 __bio_add_page(bio, page, len, offset);
1115 void bio_add_folio_nofail(struct bio *bio, struct folio *folio, size_t len,
1120 __bio_add_page(bio, &folio->page, len, off);
1124 * bio_add_folio - Attempt to add part of a folio to a bio.
1125 * @bio: BIO to add to.
1137 bool bio_add_folio(struct bio *bio, struct folio *folio, size_t len,
1142 return bio_add_page(bio, &folio->page, len, off) > 0;
1146 void __bio_release_pages(struct bio *bio, bool mark_dirty)
1150 bio_for_each_folio_all(fi, bio) {
1163 bio_release_page(bio, page++);
1169 void bio_iov_bvec_set(struct bio *bio, struct iov_iter *iter)
1173 WARN_ON_ONCE(bio->bi_max_vecs);
1175 if (bio_op(bio) == REQ_OP_ZONE_APPEND) {
1176 struct request_queue *q = bdev_get_queue(bio->bi_bdev);
1182 bio->bi_vcnt = iter->nr_segs;
1183 bio->bi_io_vec = (struct bio_vec *)iter->bvec;
1184 bio->bi_iter.bi_bvec_done = iter->iov_offset;
1185 bio->bi_iter.bi_size = size;
1186 bio_set_flag(bio, BIO_CLONED);
1189 static int bio_iov_add_page(struct bio *bio, struct page *page,
1194 if (WARN_ON_ONCE(bio->bi_iter.bi_size > UINT_MAX - len))
1197 if (bio->bi_vcnt > 0 &&
1198 bvec_try_merge_page(&bio->bi_io_vec[bio->bi_vcnt - 1],
1200 bio->bi_iter.bi_size += len;
1202 bio_release_page(bio, page);
1205 __bio_add_page(bio, page, len, offset);
1209 static int bio_iov_add_zone_append_page(struct bio *bio, struct page *page,
1212 struct request_queue *q = bdev_get_queue(bio->bi_bdev);
1215 if (bio_add_hw_page(q, bio, page, len, offset,
1219 bio_release_page(bio, page);
1226 * __bio_iov_iter_get_pages - pin user or kernel pages and add them to a bio
1227 * @bio: bio to add pages to
1230 * Extracts pages from *iter and appends them to @bio's bvec array. The pages
1235 static int __bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
1238 unsigned short nr_pages = bio->bi_max_vecs - bio->bi_vcnt;
1239 unsigned short entries_left = bio->bi_max_vecs - bio->bi_vcnt;
1240 struct bio_vec *bv = bio->bi_io_vec + bio->bi_vcnt;
1248 * Move page array up in the allocated memory for the bio vecs as far as
1255 if (bio->bi_bdev && blk_queue_pci_p2pdma(bio->bi_bdev->bd_disk->queue))
1262 * result to ensure the bio's total size is correct. The remainder of
1263 * the iov data will be picked up in the next bio iteration.
1266 UINT_MAX - bio->bi_iter.bi_size,
1273 if (bio->bi_bdev) {
1274 size_t trim = size & (bdev_logical_block_size(bio->bi_bdev) - 1);
1288 if (bio_op(bio) == REQ_OP_ZONE_APPEND) {
1289 ret = bio_iov_add_zone_append_page(bio, page, len,
1294 bio_iov_add_page(bio, page, len, offset);
1302 bio_release_page(bio, pages[i++]);
1308 * bio_iov_iter_get_pages - add user or kernel pages to a bio
1309 * @bio: bio to add pages to
1319 * -EIOCBQUEUED. The caller needs to check if the bio is flagged BIO_NO_PAGE_REF
1323 * fit into the bio, or are requested in @iter, whatever is smaller. If
1327 int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
1331 if (WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED)))
1335 bio_iov_bvec_set(bio, iter);
1336 iov_iter_advance(iter, bio->bi_iter.bi_size);
1341 bio_set_flag(bio, BIO_PAGE_PINNED);
1343 ret = __bio_iov_iter_get_pages(bio, iter);
1344 } while (!ret && iov_iter_count(iter) && !bio_full(bio, 0));
1346 return bio->bi_vcnt ? 0 : ret;
1350 static void submit_bio_wait_endio(struct bio *bio)
1352 complete(bio->bi_private);
1356 * submit_bio_wait - submit a bio, and wait until it completes
1357 * @bio: The &struct bio which describes the I/O
1363 * result in bio reference to be consumed. The caller must drop the reference
1366 int submit_bio_wait(struct bio *bio)
1369 bio->bi_bdev->bd_disk->lockdep_map);
1372 bio->bi_private = &done;
1373 bio->bi_end_io = submit_bio_wait_endio;
1374 bio->bi_opf |= REQ_SYNC;
1375 submit_bio(bio);
1386 return blk_status_to_errno(bio->bi_status);
1390 void __bio_advance(struct bio *bio, unsigned bytes)
1392 if (bio_integrity(bio))
1393 bio_integrity_advance(bio, bytes);
1395 bio_crypt_advance(bio, bytes);
1396 bio_advance_iter(bio, &bio->bi_iter, bytes);
1400 void bio_copy_data_iter(struct bio *dst, struct bvec_iter *dst_iter,
1401 struct bio *src, struct bvec_iter *src_iter)
1422 * bio_copy_data - copy contents of data buffers from one bio to another
1423 * @src: source bio
1424 * @dst: destination bio
1429 void bio_copy_data(struct bio *dst, struct bio *src)
1438 void bio_free_pages(struct bio *bio)
1443 bio_for_each_segment_all(bvec, bio, iter_all)
1465 * deferred bio dirtying paths.
1469 * bio_set_pages_dirty() will mark all the bio's pages as dirty.
1471 void bio_set_pages_dirty(struct bio *bio)
1475 bio_for_each_folio_all(fi, bio) {
1498 static struct bio *bio_dirty_list;
1505 struct bio *bio, *next;
1512 while ((bio = next) != NULL) {
1513 next = bio->bi_private;
1515 bio_release_pages(bio, true);
1516 bio_put(bio);
1520 void bio_check_pages_dirty(struct bio *bio)
1525 bio_for_each_folio_all(fi, bio) {
1530 bio_release_pages(bio, false);
1531 bio_put(bio);
1535 bio->bi_private = bio_dirty_list;
1536 bio_dirty_list = bio;
1542 static inline bool bio_remaining_done(struct bio *bio)
1548 if (!bio_flagged(bio, BIO_CHAIN))
1551 BUG_ON(atomic_read(&bio->__bi_remaining) <= 0);
1553 if (atomic_dec_and_test(&bio->__bi_remaining)) {
1554 bio_clear_flag(bio, BIO_CHAIN);
1562 * bio_endio - end I/O on a bio
1563 * @bio: bio
1566 * bio_endio() will end I/O on the whole bio. bio_endio() is the preferred
1567 * way to end I/O on a bio. No one should call bi_end_io() directly on a
1568 * bio unless they own it and thus know that it has an end_io function.
1570 * bio_endio() can be called several times on a bio that has been chained
1574 void bio_endio(struct bio *bio)
1577 if (!bio_remaining_done(bio))
1579 if (!bio_integrity_endio(bio))
1582 rq_qos_done_bio(bio);
1584 if (bio->bi_bdev && bio_flagged(bio, BIO_TRACE_COMPLETION)) {
1585 trace_block_bio_complete(bdev_get_queue(bio->bi_bdev), bio);
1586 bio_clear_flag(bio, BIO_TRACE_COMPLETION);
1597 if (bio->bi_end_io == bio_chain_endio) {
1598 bio = __bio_chain_endio(bio);
1602 blk_throtl_bio_endio(bio);
1604 bio_uninit(bio);
1605 if (bio->bi_end_io)
1606 bio->bi_end_io(bio);
1611 * bio_split - split a bio
1612 * @bio: bio to split
1613 * @sectors: number of sectors to split from the front of @bio
1615 * @bs: bio set to allocate from
1617 * Allocates and returns a new bio which represents @sectors from the start of
1618 * @bio, and updates @bio to represent the remaining sectors.
1620 * Unless this is a discard request the newly allocated bio will point
1621 * to @bio's bi_io_vec. It is the caller's responsibility to ensure that
1622 * neither @bio nor @bs are freed before the split bio.
1624 struct bio *bio_split(struct bio *bio, int sectors,
1627 struct bio *split;
1630 BUG_ON(sectors >= bio_sectors(bio));
1633 if (WARN_ON_ONCE(bio_op(bio) == REQ_OP_ZONE_APPEND))
1636 split = bio_alloc_clone(bio->bi_bdev, bio, gfp, bs);
1645 bio_advance(bio, split->bi_iter.bi_size);
1647 if (bio_flagged(bio, BIO_TRACE_COMPLETION))
1655 * bio_trim - trim a bio
1656 * @bio: bio to trim
1657 * @offset: number of sectors to trim from the front of @bio
1658 * @size: size we want to trim @bio to, in sectors
1663 void bio_trim(struct bio *bio, sector_t offset, sector_t size)
1666 offset + size > bio_sectors(bio)))
1670 if (offset == 0 && size == bio->bi_iter.bi_size)
1673 bio_advance(bio, offset << 9);
1674 bio->bi_iter.bi_size = size;
1676 if (bio_integrity(bio))
1677 bio_integrity_trim(bio);
1718 * @pool_size: Number of bio and bio_vecs to cache in the mempool
1719 * @front_pad: Number of bytes to allocate in front of the returned bio
1725 * to ask for a number of bytes to be allocated in front of the bio.
1726 * Front pad allocation is useful for embedding the bio inside
1727 * another structure, to avoid allocating extra data to go with the bio.
1728 * Note that the bio must be embedded at the END of that structure always,
1786 BUILD_BUG_ON(BIO_FLAG_LAST > 8 * sizeof_field(struct bio, bi_flags));
1798 cpuhp_setup_state_multi(CPUHP_BIO_DEAD, "block/bio:dead", NULL,
1803 panic("bio: can't allocate bios\n");
1806 panic("bio: can't create integrity pool\n");