Lines Matching refs:bio

7 #include <linux/bio.h>
28 * Test patch to inline a certain number of bi_io_vec's inside the bio
29 * itself, to shrink a bio data allocation from two mempool calls to one
45 * fs_bio_set is the bio_set containing bio and iovec memory pools used by
66 unsigned int sz = sizeof(struct bio) + extra_size;
106 snprintf(bslab->name, sizeof(bslab->name), "bio-%d", entry);
134 if (WARN(!bslab, KERN_ERR "bio: unable to find slab!\n"))
235 void bio_uninit(struct bio *bio)
238 if (bio->bi_blkg) {
239 blkg_put(bio->bi_blkg);
240 bio->bi_blkg = NULL;
243 if (bio_integrity(bio))
244 bio_integrity_free(bio);
246 bio_crypt_free_ctx(bio);
250 static void bio_free(struct bio *bio)
252 struct bio_set *bs = bio->bi_pool;
255 bio_uninit(bio);
258 bvec_free(&bs->bvec_pool, bio->bi_io_vec, BVEC_POOL_IDX(bio));
261 * If we have front padding, adjust the bio pointer before freeing
263 p = bio;
269 kfree(bio);
274 * Users of this function have their own bio allocation. Subsequently,
276 * when IO has completed, or when the bio is released.
278 void bio_init(struct bio *bio, struct bio_vec *table,
281 memset(bio, 0, sizeof(*bio));
282 atomic_set(&bio->__bi_remaining, 1);
283 atomic_set(&bio->__bi_cnt, 1);
285 bio->bi_io_vec = table;
286 bio->bi_max_vecs = max_vecs;
291 * bio_reset - reinitialize a bio
292 * @bio: bio to reset
295 * After calling bio_reset(), @bio will be in the same state as a freshly
296 * allocated bio returned bio bio_alloc_bioset() - the only fields that are
298 * comment in struct bio.
300 void bio_reset(struct bio *bio)
302 unsigned long flags = bio->bi_flags & (~0UL << BIO_RESET_BITS);
304 bio_uninit(bio);
306 memset(bio, 0, BIO_RESET_BYTES);
307 bio->bi_flags = flags;
308 atomic_set(&bio->__bi_remaining, 1);
312 static struct bio *__bio_chain_endio(struct bio *bio)
314 struct bio *parent = bio->bi_private;
316 if (bio->bi_status && !parent->bi_status)
317 parent->bi_status = bio->bi_status;
318 bio_put(bio);
322 static void bio_chain_endio(struct bio *bio)
324 bio_endio(__bio_chain_endio(bio));
328 * bio_chain - chain bio completions
329 * @bio: the target bio
330 * @parent: the parent bio of @bio
332 * The caller won't have a bi_end_io called when @bio completes - instead,
333 * @parent's bi_end_io won't be called until both @parent and @bio have
334 * completed; the chained bio will also be freed when it completes.
336 * The caller must not set bi_private or bi_end_io in @bio.
338 void bio_chain(struct bio *bio, struct bio *parent)
340 BUG_ON(bio->bi_private || bio->bi_end_io);
342 bio->bi_private = parent;
343 bio->bi_end_io = bio_chain_endio;
351 struct bio *bio;
355 bio = bio_list_pop(&bs->rescue_list);
358 if (!bio)
361 submit_bio_noacct(bio);
368 struct bio *bio;
374 * were allocated from this bio_set; otherwise, if there was a bio on
379 * Since bio lists are singly linked, pop them all instead of trying to
386 while ((bio = bio_list_pop(&current->bio_list[0])))
387 bio_list_add(bio->bi_pool == bs ? &punt : &nopunt, bio);
391 while ((bio = bio_list_pop(&current->bio_list[1])))
392 bio_list_add(bio->bi_pool == bs ? &punt : &nopunt, bio);
403 * bio_alloc_bioset - allocate a bio for I/O
409 * If @bs is NULL, uses kmalloc() to allocate the bio; else the allocation is
413 * always be able to allocate a bio. This is due to the mempool guarantees.
414 * To make this work, callers must never allocate more than 1 bio at a time
415 * from this pool. Callers that need to allocate more than 1 bio must always
416 * submit the previously allocated bio for IO before attempting to allocate
432 * for per bio allocations.
435 * Pointer to new bio on success, NULL on failure.
437 struct bio *bio_alloc_bioset(gfp_t gfp_mask, unsigned int nr_iovecs,
444 struct bio *bio;
451 p = kmalloc(struct_size(bio, bi_inline_vecs, nr_iovecs), gfp_mask);
500 bio = p + front_pad;
501 bio_init(bio, NULL, 0);
516 bio->bi_flags |= idx << BVEC_POOL_OFFSET;
518 bvl = bio->bi_inline_vecs;
521 bio->bi_pool = bs;
522 bio->bi_max_vecs = nr_iovecs;
523 bio->bi_io_vec = bvl;
524 return bio;
532 void zero_fill_bio_iter(struct bio *bio, struct bvec_iter start)
538 __bio_for_each_segment(bv, bio, iter, start) {
548 * bio_truncate - truncate the bio to small size of @new_size
549 * @bio: the bio to be truncated
550 * @new_size: new size for truncating the bio
553 * Truncate the bio to new size of @new_size. If bio_op(bio) is
555 * be used for handling corner cases, such as bio eod.
557 void bio_truncate(struct bio *bio, unsigned new_size)
564 if (new_size >= bio->bi_iter.bi_size)
567 if (bio_op(bio) != REQ_OP_READ)
570 bio_for_each_segment(bv, bio, iter) {
588 * fs bio user has to retrieve all pages via bio_for_each_segment_all
591 * It is enough to truncate bio by updating .bi_size since we can make
594 bio->bi_iter.bi_size = new_size;
599 * @bio: bio to truncate
604 * We'll just truncate the bio to the size of the device, and clear the end of
609 void guard_bio_eod(struct bio *bio)
615 part = __disk_get_part(bio->bi_disk, bio->bi_partno);
619 maxsector = get_capacity(bio->bi_disk);
630 if (unlikely(bio->bi_iter.bi_sector >= maxsector))
633 maxsector -= bio->bi_iter.bi_sector;
634 if (likely((bio->bi_iter.bi_size >> 9) <= maxsector))
637 bio_truncate(bio, maxsector << 9);
641 * bio_put - release a reference to a bio
642 * @bio: bio to release reference to
645 * Put a reference to a &struct bio, either one you have gotten with
646 * bio_alloc, bio_get or bio_clone_*. The last put of a bio will free it.
648 void bio_put(struct bio *bio)
650 if (!bio_flagged(bio, BIO_REFFED))
651 bio_free(bio);
653 BIO_BUG_ON(!atomic_read(&bio->__bi_cnt));
658 if (atomic_dec_and_test(&bio->__bi_cnt))
659 bio_free(bio);
665 * __bio_clone_fast - clone a bio that shares the original bio's biovec
666 * @bio: destination bio
667 * @bio_src: bio to clone
669 * Clone a &bio. Caller will own the returned bio, but not
671 * bio will be one.
673 * Caller must ensure that @bio_src is not freed before @bio.
675 void __bio_clone_fast(struct bio *bio, struct bio *bio_src)
677 BUG_ON(bio->bi_pool && BVEC_POOL_IDX(bio));
683 bio->bi_disk = bio_src->bi_disk;
684 bio->bi_partno = bio_src->bi_partno;
685 bio_set_flag(bio, BIO_CLONED);
687 bio_set_flag(bio, BIO_THROTTLED);
688 bio->bi_opf = bio_src->bi_opf;
689 bio->bi_ioprio = bio_src->bi_ioprio;
690 bio->bi_write_hint = bio_src->bi_write_hint;
691 bio->bi_iter = bio_src->bi_iter;
692 bio->bi_io_vec = bio_src->bi_io_vec;
694 bio_clone_blkg_association(bio, bio_src);
695 blkcg_bio_issue_init(bio);
700 * bio_clone_fast - clone a bio that shares the original bio's biovec
701 * @bio: bio to clone
705 * Like __bio_clone_fast, only also allocates the returned bio
707 struct bio *bio_clone_fast(struct bio *bio, gfp_t gfp_mask, struct bio_set *bs)
709 struct bio *b;
715 __bio_clone_fast(b, bio);
717 if (bio_crypt_clone(b, bio, gfp_mask) < 0)
720 if (bio_integrity(bio) &&
721 bio_integrity_clone(b, bio, gfp_mask) < 0)
732 const char *bio_devname(struct bio *bio, char *buf)
734 return disk_name(bio->bi_disk, bio->bi_partno, buf);
762 static bool bio_try_merge_hw_seg(struct request_queue *q, struct bio *bio,
766 struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt - 1];
775 return __bio_try_merge_page(bio, page, len, offset, same_page);
779 * bio_add_hw_page - attempt to add a page to a bio with hw constraints
781 * @bio: destination bio
788 * Add a page to a bio while respecting the hardware max_sectors, max_segment
791 int bio_add_hw_page(struct request_queue *q, struct bio *bio,
797 if (WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED)))
800 if (((bio->bi_iter.bi_size + len) >> 9) > max_sectors)
803 if (bio->bi_vcnt > 0) {
804 if (bio_try_merge_hw_seg(q, bio, page, len, offset, same_page))
811 bvec = &bio->bi_io_vec[bio->bi_vcnt - 1];
816 if (bio_full(bio, len))
819 if (bio->bi_vcnt >= queue_max_segments(q))
822 bvec = &bio->bi_io_vec[bio->bi_vcnt];
826 bio->bi_vcnt++;
827 bio->bi_iter.bi_size += len;
832 * bio_add_pc_page - attempt to add page to passthrough bio
834 * @bio: destination bio
840 * number of reasons, such as the bio being full or target block device
841 * limitations. The target block device must allow bio's up to PAGE_SIZE,
842 * so it is always possible to add a single page to an empty bio.
846 int bio_add_pc_page(struct request_queue *q, struct bio *bio,
850 return bio_add_hw_page(q, bio, page, len, offset,
857 * @bio: destination bio
863 * Try to add the data at @page + @off to the last bvec of @bio. This is a
871 bool __bio_try_merge_page(struct bio *bio, struct page *page,
874 if (WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED)))
877 if (bio->bi_vcnt > 0) {
878 struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt - 1];
881 if (bio->bi_iter.bi_size > UINT_MAX - len) {
886 bio->bi_iter.bi_size += len;
895 * __bio_add_page - add page(s) to a bio in a new segment
896 * @bio: destination bio
901 * Add the data at @page + @off to @bio as a new bvec. The caller must ensure
902 * that @bio has space for another bvec.
904 void __bio_add_page(struct bio *bio, struct page *page,
907 struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt];
909 WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED));
910 WARN_ON_ONCE(bio_full(bio, len));
916 bio->bi_iter.bi_size += len;
917 bio->bi_vcnt++;
919 if (!bio_flagged(bio, BIO_WORKINGSET) && unlikely(PageWorkingset(page)))
920 bio_set_flag(bio, BIO_WORKINGSET);
925 * bio_add_page - attempt to add page(s) to bio
926 * @bio: destination bio
932 * if either bio->bi_vcnt == bio->bi_max_vecs or it's a cloned bio.
934 int bio_add_page(struct bio *bio, struct page *page,
939 if (!__bio_try_merge_page(bio, page, len, offset, &same_page)) {
940 if (bio_full(bio, len))
942 __bio_add_page(bio, page, len, offset);
948 void bio_release_pages(struct bio *bio, bool mark_dirty)
953 if (bio_flagged(bio, BIO_NO_PAGE_REF))
956 bio_for_each_segment_all(bvec, bio, iter_all) {
964 static int __bio_iov_bvec_add_pages(struct bio *bio, struct iov_iter *iter)
974 size = bio_add_page(bio, bv->bv_page, len,
993 * __bio_iov_iter_get_pages - pin user or kernel pages and add them to a bio
994 * @bio: bio to add pages to
997 * Pins pages from *iter and appends them to @bio's bvec array. The
1002 static int __bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
1004 unsigned short nr_pages = bio->bi_max_vecs - bio->bi_vcnt;
1005 unsigned short entries_left = bio->bi_max_vecs - bio->bi_vcnt;
1006 struct bio_vec *bv = bio->bi_io_vec + bio->bi_vcnt;
1014 * Move page array up in the allocated memory for the bio vecs as far as
1030 if (__bio_try_merge_page(bio, page, len, offset, &same_page)) {
1034 if (WARN_ON_ONCE(bio_full(bio, len))) {
1038 __bio_add_page(bio, page, len, offset);
1047 static int __bio_iov_append_get_pages(struct bio *bio, struct iov_iter *iter)
1049 unsigned short nr_pages = bio->bi_max_vecs - bio->bi_vcnt;
1050 unsigned short entries_left = bio->bi_max_vecs - bio->bi_vcnt;
1051 struct request_queue *q = bio->bi_disk->queue;
1053 struct bio_vec *bv = bio->bi_io_vec + bio->bi_vcnt;
1061 * Move page array up in the allocated memory for the bio vecs as far as
1077 if (bio_add_hw_page(q, bio, page, len, offset,
1093 * bio_iov_iter_get_pages - add user or kernel pages to a bio
1094 * @bio: bio to add pages to
1101 * do so, we just have to add the pages to the bio directly. We don't grab an
1103 * don't put the page on IO completion. The caller needs to check if the bio is
1108 * fit into the bio, or are requested in @iter, whatever is smaller. If
1112 int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
1117 if (WARN_ON_ONCE(bio->bi_vcnt))
1121 if (bio_op(bio) == REQ_OP_ZONE_APPEND) {
1124 ret = __bio_iov_append_get_pages(bio, iter);
1127 ret = __bio_iov_bvec_add_pages(bio, iter);
1129 ret = __bio_iov_iter_get_pages(bio, iter);
1131 } while (!ret && iov_iter_count(iter) && !bio_full(bio, 0));
1134 bio_set_flag(bio, BIO_NO_PAGE_REF);
1135 return bio->bi_vcnt ? 0 : ret;
1139 static void submit_bio_wait_endio(struct bio *bio)
1141 complete(bio->bi_private);
1145 * submit_bio_wait - submit a bio, and wait until it completes
1146 * @bio: The &struct bio which describes the I/O
1152 * result in bio reference to be consumed. The caller must drop the reference
1155 int submit_bio_wait(struct bio *bio)
1157 DECLARE_COMPLETION_ONSTACK_MAP(done, bio->bi_disk->lockdep_map);
1160 bio->bi_private = &done;
1161 bio->bi_end_io = submit_bio_wait_endio;
1162 bio->bi_opf |= REQ_SYNC;
1163 submit_bio(bio);
1174 return blk_status_to_errno(bio->bi_status);
1179 * bio_advance - increment/complete a bio by some number of bytes
1180 * @bio: bio to advance
1187 * @bio will then represent the remaining, uncompleted portion of the io.
1189 void bio_advance(struct bio *bio, unsigned bytes)
1191 if (bio_integrity(bio))
1192 bio_integrity_advance(bio, bytes);
1194 bio_crypt_advance(bio, bytes);
1195 bio_advance_iter(bio, &bio->bi_iter, bytes);
1199 void bio_copy_data_iter(struct bio *dst, struct bvec_iter *dst_iter,
1200 struct bio *src, struct bvec_iter *src_iter)
1231 * bio_copy_data - copy contents of data buffers from one bio to another
1232 * @src: source bio
1233 * @dst: destination bio
1238 void bio_copy_data(struct bio *dst, struct bio *src)
1250 * @src: source bio list
1251 * @dst: destination bio list
1257 void bio_list_copy_data(struct bio *dst, struct bio *src)
1284 void bio_free_pages(struct bio *bio)
1289 bio_for_each_segment_all(bvec, bio, iter_all)
1317 * deferred bio dirtying paths.
1321 * bio_set_pages_dirty() will mark all the bio's pages as dirty.
1323 void bio_set_pages_dirty(struct bio *bio)
1328 bio_for_each_segment_all(bvec, bio, iter_all) {
1348 static struct bio *bio_dirty_list;
1355 struct bio *bio, *next;
1362 while ((bio = next) != NULL) {
1363 next = bio->bi_private;
1365 bio_release_pages(bio, true);
1366 bio_put(bio);
1370 void bio_check_pages_dirty(struct bio *bio)
1376 bio_for_each_segment_all(bvec, bio, iter_all) {
1381 bio_release_pages(bio, false);
1382 bio_put(bio);
1386 bio->bi_private = bio_dirty_list;
1387 bio_dirty_list = bio;
1392 static inline bool bio_remaining_done(struct bio *bio)
1398 if (!bio_flagged(bio, BIO_CHAIN))
1401 BUG_ON(atomic_read(&bio->__bi_remaining) <= 0);
1403 if (atomic_dec_and_test(&bio->__bi_remaining)) {
1404 bio_clear_flag(bio, BIO_CHAIN);
1412 * bio_endio - end I/O on a bio
1413 * @bio: bio
1416 * bio_endio() will end I/O on the whole bio. bio_endio() is the preferred
1417 * way to end I/O on a bio. No one should call bi_end_io() directly on a
1418 * bio unless they own it and thus know that it has an end_io function.
1420 * bio_endio() can be called several times on a bio that has been chained
1425 void bio_endio(struct bio *bio)
1428 if (!bio_remaining_done(bio))
1430 if (!bio_integrity_endio(bio))
1433 if (bio->bi_disk && bio_flagged(bio, BIO_TRACKED))
1434 rq_qos_done_bio(bio->bi_disk->queue, bio);
1444 if (bio->bi_end_io == bio_chain_endio) {
1445 bio = __bio_chain_endio(bio);
1449 if (bio->bi_disk && bio_flagged(bio, BIO_TRACE_COMPLETION)) {
1450 trace_block_bio_complete(bio->bi_disk->queue, bio);
1451 bio_clear_flag(bio, BIO_TRACE_COMPLETION);
1454 blk_throtl_bio_endio(bio);
1456 bio_uninit(bio);
1457 if (bio->bi_end_io)
1458 bio->bi_end_io(bio);
1463 * bio_split - split a bio
1464 * @bio: bio to split
1465 * @sectors: number of sectors to split from the front of @bio
1467 * @bs: bio set to allocate from
1469 * Allocates and returns a new bio which represents @sectors from the start of
1470 * @bio, and updates @bio to represent the remaining sectors.
1472 * Unless this is a discard request the newly allocated bio will point
1473 * to @bio's bi_io_vec. It is the caller's responsibility to ensure that
1474 * neither @bio nor @bs are freed before the split bio.
1476 struct bio *bio_split(struct bio *bio, int sectors,
1479 struct bio *split;
1482 BUG_ON(sectors >= bio_sectors(bio));
1485 if (WARN_ON_ONCE(bio_op(bio) == REQ_OP_ZONE_APPEND))
1488 split = bio_clone_fast(bio, gfp, bs);
1497 bio_advance(bio, split->bi_iter.bi_size);
1499 if (bio_flagged(bio, BIO_TRACE_COMPLETION))
1507 * bio_trim - trim a bio
1508 * @bio: bio to trim
1509 * @offset: number of sectors to trim from the front of @bio
1510 * @size: size we want to trim @bio to, in sectors
1512 void bio_trim(struct bio *bio, int offset, int size)
1514 /* 'bio' is a cloned bio which we need to trim to match
1519 if (offset == 0 && size == bio->bi_iter.bi_size)
1522 bio_advance(bio, offset << 9);
1523 bio->bi_iter.bi_size = size;
1525 if (bio_integrity(bio))
1526 bio_integrity_trim(bio);
1567 * @pool_size: Number of bio and bio_vecs to cache in the mempool
1568 * @front_pad: Number of bytes to allocate in front of the returned bio
1574 * to ask for a number of bytes to be allocated in front of the bio.
1575 * Front pad allocation is useful for embedding the bio inside
1576 * another structure, to avoid allocating extra data to go with the bio.
1577 * Note that the bio must be embedded at the END of that structure always,
1670 panic("bio: can't allocate bios\n");
1676 panic("bio: can't allocate bios\n");
1679 panic("bio: can't create integrity pool\n");