1// SPDX-License-Identifier: GPL-2.0 2/* 3 * Copyright (C) 2001 Jens Axboe <axboe@kernel.dk> 4 */ 5#include <linux/mm.h> 6#include <linux/swap.h> 7#include <linux/bio.h> 8#include <linux/blkdev.h> 9#include <linux/uio.h> 10#include <linux/iocontext.h> 11#include <linux/slab.h> 12#include <linux/init.h> 13#include <linux/kernel.h> 14#include <linux/export.h> 15#include <linux/mempool.h> 16#include <linux/workqueue.h> 17#include <linux/cgroup.h> 18#include <linux/blk-cgroup.h> 19#include <linux/highmem.h> 20#include <linux/sched/sysctl.h> 21#include <linux/blk-crypto.h> 22 23#include <trace/events/block.h> 24#include "blk.h" 25#include "blk-rq-qos.h" 26 27/* 28 * Test patch to inline a certain number of bi_io_vec's inside the bio 29 * itself, to shrink a bio data allocation from two mempool calls to one 30 */ 31#define BIO_INLINE_VECS 4 32 33/* 34 * if you change this list, also change bvec_alloc or things will 35 * break badly! cannot be bigger than what you can fit into an 36 * unsigned short 37 */ 38#define BV(x, n) { .nr_vecs = x, .name = "biovec-"#n } 39static struct biovec_slab bvec_slabs[BVEC_POOL_NR] __read_mostly = { 40 BV(1, 1), BV(4, 4), BV(16, 16), BV(64, 64), BV(128, 128), BV(BIO_MAX_PAGES, max), 41}; 42#undef BV 43 44/* 45 * fs_bio_set is the bio_set containing bio and iovec memory pools used by 46 * IO code that does not need private memory pools. 47 */ 48struct bio_set fs_bio_set; 49EXPORT_SYMBOL(fs_bio_set); 50 51/* 52 * Our slab pool management 53 */ 54struct bio_slab { 55 struct kmem_cache *slab; 56 unsigned int slab_ref; 57 unsigned int slab_size; 58 char name[8]; 59}; 60static DEFINE_MUTEX(bio_slab_lock); 61static struct bio_slab *bio_slabs; 62static unsigned int bio_slab_nr, bio_slab_max; 63 64static struct kmem_cache *bio_find_or_create_slab(unsigned int extra_size) 65{ 66 unsigned int sz = sizeof(struct bio) + extra_size; 67 struct kmem_cache *slab = NULL; 68 struct bio_slab *bslab, *new_bio_slabs; 69 unsigned int new_bio_slab_max; 70 unsigned int i, entry = -1; 71 72 mutex_lock(&bio_slab_lock); 73 74 i = 0; 75 while (i < bio_slab_nr) { 76 bslab = &bio_slabs[i]; 77 78 if (!bslab->slab && entry == -1) 79 entry = i; 80 else if (bslab->slab_size == sz) { 81 slab = bslab->slab; 82 bslab->slab_ref++; 83 break; 84 } 85 i++; 86 } 87 88 if (slab) 89 goto out_unlock; 90 91 if (bio_slab_nr == bio_slab_max && entry == -1) { 92 new_bio_slab_max = bio_slab_max << 1; 93 new_bio_slabs = krealloc(bio_slabs, 94 new_bio_slab_max * sizeof(struct bio_slab), 95 GFP_KERNEL); 96 if (!new_bio_slabs) 97 goto out_unlock; 98 bio_slab_max = new_bio_slab_max; 99 bio_slabs = new_bio_slabs; 100 } 101 if (entry == -1) 102 entry = bio_slab_nr++; 103 104 bslab = &bio_slabs[entry]; 105 106 snprintf(bslab->name, sizeof(bslab->name), "bio-%d", entry); 107 slab = kmem_cache_create(bslab->name, sz, ARCH_KMALLOC_MINALIGN, 108 SLAB_HWCACHE_ALIGN, NULL); 109 if (!slab) 110 goto out_unlock; 111 112 bslab->slab = slab; 113 bslab->slab_ref = 1; 114 bslab->slab_size = sz; 115out_unlock: 116 mutex_unlock(&bio_slab_lock); 117 return slab; 118} 119 120static void bio_put_slab(struct bio_set *bs) 121{ 122 struct bio_slab *bslab = NULL; 123 unsigned int i; 124 125 mutex_lock(&bio_slab_lock); 126 127 for (i = 0; i < bio_slab_nr; i++) { 128 if (bs->bio_slab == bio_slabs[i].slab) { 129 bslab = &bio_slabs[i]; 130 break; 131 } 132 } 133 134 if (WARN(!bslab, KERN_ERR "bio: unable to find slab!\n")) 135 goto out; 136 137 WARN_ON(!bslab->slab_ref); 138 139 if (--bslab->slab_ref) 140 goto out; 141 142 kmem_cache_destroy(bslab->slab); 143 bslab->slab = NULL; 144 145out: 146 mutex_unlock(&bio_slab_lock); 147} 148 149unsigned int bvec_nr_vecs(unsigned short idx) 150{ 151 return bvec_slabs[--idx].nr_vecs; 152} 153 154void bvec_free(mempool_t *pool, struct bio_vec *bv, unsigned int idx) 155{ 156 if (!idx) 157 return; 158 idx--; 159 160 BIO_BUG_ON(idx >= BVEC_POOL_NR); 161 162 if (idx == BVEC_POOL_MAX) { 163 mempool_free(bv, pool); 164 } else { 165 struct biovec_slab *bvs = bvec_slabs + idx; 166 167 kmem_cache_free(bvs->slab, bv); 168 } 169} 170 171struct bio_vec *bvec_alloc(gfp_t gfp_mask, int nr, unsigned long *idx, 172 mempool_t *pool) 173{ 174 struct bio_vec *bvl; 175 176 /* 177 * see comment near bvec_array define! 178 */ 179 switch (nr) { 180 case 1: 181 *idx = 0; 182 break; 183 case 2 ... 4: 184 *idx = 1; 185 break; 186 case 5 ... 16: 187 *idx = 2; 188 break; 189 case 17 ... 64: 190 *idx = 3; 191 break; 192 case 65 ... 128: 193 *idx = 4; 194 break; 195 case 129 ... BIO_MAX_PAGES: 196 *idx = 5; 197 break; 198 default: 199 return NULL; 200 } 201 202 /* 203 * idx now points to the pool we want to allocate from. only the 204 * 1-vec entry pool is mempool backed. 205 */ 206 if (*idx == BVEC_POOL_MAX) { 207fallback: 208 bvl = mempool_alloc(pool, gfp_mask); 209 } else { 210 struct biovec_slab *bvs = bvec_slabs + *idx; 211 gfp_t __gfp_mask = gfp_mask & ~(__GFP_DIRECT_RECLAIM | __GFP_IO); 212 213 /* 214 * Make this allocation restricted and don't dump info on 215 * allocation failures, since we'll fallback to the mempool 216 * in case of failure. 217 */ 218 __gfp_mask |= __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN; 219 220 /* 221 * Try a slab allocation. If this fails and __GFP_DIRECT_RECLAIM 222 * is set, retry with the 1-entry mempool 223 */ 224 bvl = kmem_cache_alloc(bvs->slab, __gfp_mask); 225 if (unlikely(!bvl && (gfp_mask & __GFP_DIRECT_RECLAIM))) { 226 *idx = BVEC_POOL_MAX; 227 goto fallback; 228 } 229 } 230 231 (*idx)++; 232 return bvl; 233} 234 235void bio_uninit(struct bio *bio) 236{ 237#ifdef CONFIG_BLK_CGROUP 238 if (bio->bi_blkg) { 239 blkg_put(bio->bi_blkg); 240 bio->bi_blkg = NULL; 241 } 242#endif 243 if (bio_integrity(bio)) 244 bio_integrity_free(bio); 245 246 bio_crypt_free_ctx(bio); 247} 248EXPORT_SYMBOL(bio_uninit); 249 250static void bio_free(struct bio *bio) 251{ 252 struct bio_set *bs = bio->bi_pool; 253 void *p; 254 255 bio_uninit(bio); 256 257 if (bs) { 258 bvec_free(&bs->bvec_pool, bio->bi_io_vec, BVEC_POOL_IDX(bio)); 259 260 /* 261 * If we have front padding, adjust the bio pointer before freeing 262 */ 263 p = bio; 264 p -= bs->front_pad; 265 266 mempool_free(p, &bs->bio_pool); 267 } else { 268 /* Bio was allocated by bio_kmalloc() */ 269 kfree(bio); 270 } 271} 272 273/* 274 * Users of this function have their own bio allocation. Subsequently, 275 * they must remember to pair any call to bio_init() with bio_uninit() 276 * when IO has completed, or when the bio is released. 277 */ 278void bio_init(struct bio *bio, struct bio_vec *table, 279 unsigned short max_vecs) 280{ 281 memset(bio, 0, sizeof(*bio)); 282 atomic_set(&bio->__bi_remaining, 1); 283 atomic_set(&bio->__bi_cnt, 1); 284 285 bio->bi_io_vec = table; 286 bio->bi_max_vecs = max_vecs; 287} 288EXPORT_SYMBOL(bio_init); 289 290/** 291 * bio_reset - reinitialize a bio 292 * @bio: bio to reset 293 * 294 * Description: 295 * After calling bio_reset(), @bio will be in the same state as a freshly 296 * allocated bio returned bio bio_alloc_bioset() - the only fields that are 297 * preserved are the ones that are initialized by bio_alloc_bioset(). See 298 * comment in struct bio. 299 */ 300void bio_reset(struct bio *bio) 301{ 302 unsigned long flags = bio->bi_flags & (~0UL << BIO_RESET_BITS); 303 304 bio_uninit(bio); 305 306 memset(bio, 0, BIO_RESET_BYTES); 307 bio->bi_flags = flags; 308 atomic_set(&bio->__bi_remaining, 1); 309} 310EXPORT_SYMBOL(bio_reset); 311 312static struct bio *__bio_chain_endio(struct bio *bio) 313{ 314 struct bio *parent = bio->bi_private; 315 316 if (bio->bi_status && !parent->bi_status) 317 parent->bi_status = bio->bi_status; 318 bio_put(bio); 319 return parent; 320} 321 322static void bio_chain_endio(struct bio *bio) 323{ 324 bio_endio(__bio_chain_endio(bio)); 325} 326 327/** 328 * bio_chain - chain bio completions 329 * @bio: the target bio 330 * @parent: the parent bio of @bio 331 * 332 * The caller won't have a bi_end_io called when @bio completes - instead, 333 * @parent's bi_end_io won't be called until both @parent and @bio have 334 * completed; the chained bio will also be freed when it completes. 335 * 336 * The caller must not set bi_private or bi_end_io in @bio. 337 */ 338void bio_chain(struct bio *bio, struct bio *parent) 339{ 340 BUG_ON(bio->bi_private || bio->bi_end_io); 341 342 bio->bi_private = parent; 343 bio->bi_end_io = bio_chain_endio; 344 bio_inc_remaining(parent); 345} 346EXPORT_SYMBOL(bio_chain); 347 348static void bio_alloc_rescue(struct work_struct *work) 349{ 350 struct bio_set *bs = container_of(work, struct bio_set, rescue_work); 351 struct bio *bio; 352 353 while (1) { 354 spin_lock(&bs->rescue_lock); 355 bio = bio_list_pop(&bs->rescue_list); 356 spin_unlock(&bs->rescue_lock); 357 358 if (!bio) 359 break; 360 361 submit_bio_noacct(bio); 362 } 363} 364 365static void punt_bios_to_rescuer(struct bio_set *bs) 366{ 367 struct bio_list punt, nopunt; 368 struct bio *bio; 369 370 if (WARN_ON_ONCE(!bs->rescue_workqueue)) 371 return; 372 /* 373 * In order to guarantee forward progress we must punt only bios that 374 * were allocated from this bio_set; otherwise, if there was a bio on 375 * there for a stacking driver higher up in the stack, processing it 376 * could require allocating bios from this bio_set, and doing that from 377 * our own rescuer would be bad. 378 * 379 * Since bio lists are singly linked, pop them all instead of trying to 380 * remove from the middle of the list: 381 */ 382 383 bio_list_init(&punt); 384 bio_list_init(&nopunt); 385 386 while ((bio = bio_list_pop(¤t->bio_list[0]))) 387 bio_list_add(bio->bi_pool == bs ? &punt : &nopunt, bio); 388 current->bio_list[0] = nopunt; 389 390 bio_list_init(&nopunt); 391 while ((bio = bio_list_pop(¤t->bio_list[1]))) 392 bio_list_add(bio->bi_pool == bs ? &punt : &nopunt, bio); 393 current->bio_list[1] = nopunt; 394 395 spin_lock(&bs->rescue_lock); 396 bio_list_merge(&bs->rescue_list, &punt); 397 spin_unlock(&bs->rescue_lock); 398 399 queue_work(bs->rescue_workqueue, &bs->rescue_work); 400} 401 402/** 403 * bio_alloc_bioset - allocate a bio for I/O 404 * @gfp_mask: the GFP_* mask given to the slab allocator 405 * @nr_iovecs: number of iovecs to pre-allocate 406 * @bs: the bio_set to allocate from. 407 * 408 * Description: 409 * If @bs is NULL, uses kmalloc() to allocate the bio; else the allocation is 410 * backed by the @bs's mempool. 411 * 412 * When @bs is not NULL, if %__GFP_DIRECT_RECLAIM is set then bio_alloc will 413 * always be able to allocate a bio. This is due to the mempool guarantees. 414 * To make this work, callers must never allocate more than 1 bio at a time 415 * from this pool. Callers that need to allocate more than 1 bio must always 416 * submit the previously allocated bio for IO before attempting to allocate 417 * a new one. Failure to do so can cause deadlocks under memory pressure. 418 * 419 * Note that when running under submit_bio_noacct() (i.e. any block 420 * driver), bios are not submitted until after you return - see the code in 421 * submit_bio_noacct() that converts recursion into iteration, to prevent 422 * stack overflows. 423 * 424 * This would normally mean allocating multiple bios under 425 * submit_bio_noacct() would be susceptible to deadlocks, but we have 426 * deadlock avoidance code that resubmits any blocked bios from a rescuer 427 * thread. 428 * 429 * However, we do not guarantee forward progress for allocations from other 430 * mempools. Doing multiple allocations from the same mempool under 431 * submit_bio_noacct() should be avoided - instead, use bio_set's front_pad 432 * for per bio allocations. 433 * 434 * RETURNS: 435 * Pointer to new bio on success, NULL on failure. 436 */ 437struct bio *bio_alloc_bioset(gfp_t gfp_mask, unsigned int nr_iovecs, 438 struct bio_set *bs) 439{ 440 gfp_t saved_gfp = gfp_mask; 441 unsigned front_pad; 442 unsigned inline_vecs; 443 struct bio_vec *bvl = NULL; 444 struct bio *bio; 445 void *p; 446 447 if (!bs) { 448 if (nr_iovecs > UIO_MAXIOV) 449 return NULL; 450 451 p = kmalloc(struct_size(bio, bi_inline_vecs, nr_iovecs), gfp_mask); 452 front_pad = 0; 453 inline_vecs = nr_iovecs; 454 } else { 455 /* should not use nobvec bioset for nr_iovecs > 0 */ 456 if (WARN_ON_ONCE(!mempool_initialized(&bs->bvec_pool) && 457 nr_iovecs > 0)) 458 return NULL; 459 /* 460 * submit_bio_noacct() converts recursion to iteration; this 461 * means if we're running beneath it, any bios we allocate and 462 * submit will not be submitted (and thus freed) until after we 463 * return. 464 * 465 * This exposes us to a potential deadlock if we allocate 466 * multiple bios from the same bio_set() while running 467 * underneath submit_bio_noacct(). If we were to allocate 468 * multiple bios (say a stacking block driver that was splitting 469 * bios), we would deadlock if we exhausted the mempool's 470 * reserve. 471 * 472 * We solve this, and guarantee forward progress, with a rescuer 473 * workqueue per bio_set. If we go to allocate and there are 474 * bios on current->bio_list, we first try the allocation 475 * without __GFP_DIRECT_RECLAIM; if that fails, we punt those 476 * bios we would be blocking to the rescuer workqueue before 477 * we retry with the original gfp_flags. 478 */ 479 480 if (current->bio_list && 481 (!bio_list_empty(¤t->bio_list[0]) || 482 !bio_list_empty(¤t->bio_list[1])) && 483 bs->rescue_workqueue) 484 gfp_mask &= ~__GFP_DIRECT_RECLAIM; 485 486 p = mempool_alloc(&bs->bio_pool, gfp_mask); 487 if (!p && gfp_mask != saved_gfp) { 488 punt_bios_to_rescuer(bs); 489 gfp_mask = saved_gfp; 490 p = mempool_alloc(&bs->bio_pool, gfp_mask); 491 } 492 493 front_pad = bs->front_pad; 494 inline_vecs = BIO_INLINE_VECS; 495 } 496 497 if (unlikely(!p)) 498 return NULL; 499 500 bio = p + front_pad; 501 bio_init(bio, NULL, 0); 502 503 if (nr_iovecs > inline_vecs) { 504 unsigned long idx = 0; 505 506 bvl = bvec_alloc(gfp_mask, nr_iovecs, &idx, &bs->bvec_pool); 507 if (!bvl && gfp_mask != saved_gfp) { 508 punt_bios_to_rescuer(bs); 509 gfp_mask = saved_gfp; 510 bvl = bvec_alloc(gfp_mask, nr_iovecs, &idx, &bs->bvec_pool); 511 } 512 513 if (unlikely(!bvl)) 514 goto err_free; 515 516 bio->bi_flags |= idx << BVEC_POOL_OFFSET; 517 } else if (nr_iovecs) { 518 bvl = bio->bi_inline_vecs; 519 } 520 521 bio->bi_pool = bs; 522 bio->bi_max_vecs = nr_iovecs; 523 bio->bi_io_vec = bvl; 524 return bio; 525 526err_free: 527 mempool_free(p, &bs->bio_pool); 528 return NULL; 529} 530EXPORT_SYMBOL(bio_alloc_bioset); 531 532void zero_fill_bio_iter(struct bio *bio, struct bvec_iter start) 533{ 534 unsigned long flags; 535 struct bio_vec bv; 536 struct bvec_iter iter; 537 538 __bio_for_each_segment(bv, bio, iter, start) { 539 char *data = bvec_kmap_irq(&bv, &flags); 540 memset(data, 0, bv.bv_len); 541 flush_dcache_page(bv.bv_page); 542 bvec_kunmap_irq(data, &flags); 543 } 544} 545EXPORT_SYMBOL(zero_fill_bio_iter); 546 547/** 548 * bio_truncate - truncate the bio to small size of @new_size 549 * @bio: the bio to be truncated 550 * @new_size: new size for truncating the bio 551 * 552 * Description: 553 * Truncate the bio to new size of @new_size. If bio_op(bio) is 554 * REQ_OP_READ, zero the truncated part. This function should only 555 * be used for handling corner cases, such as bio eod. 556 */ 557void bio_truncate(struct bio *bio, unsigned new_size) 558{ 559 struct bio_vec bv; 560 struct bvec_iter iter; 561 unsigned int done = 0; 562 bool truncated = false; 563 564 if (new_size >= bio->bi_iter.bi_size) 565 return; 566 567 if (bio_op(bio) != REQ_OP_READ) 568 goto exit; 569 570 bio_for_each_segment(bv, bio, iter) { 571 if (done + bv.bv_len > new_size) { 572 unsigned offset; 573 574 if (!truncated) 575 offset = new_size - done; 576 else 577 offset = 0; 578 zero_user(bv.bv_page, bv.bv_offset + offset, 579 bv.bv_len - offset); 580 truncated = true; 581 } 582 done += bv.bv_len; 583 } 584 585 exit: 586 /* 587 * Don't touch bvec table here and make it really immutable, since 588 * fs bio user has to retrieve all pages via bio_for_each_segment_all 589 * in its .end_bio() callback. 590 * 591 * It is enough to truncate bio by updating .bi_size since we can make 592 * correct bvec with the updated .bi_size for drivers. 593 */ 594 bio->bi_iter.bi_size = new_size; 595} 596 597/** 598 * guard_bio_eod - truncate a BIO to fit the block device 599 * @bio: bio to truncate 600 * 601 * This allows us to do IO even on the odd last sectors of a device, even if the 602 * block size is some multiple of the physical sector size. 603 * 604 * We'll just truncate the bio to the size of the device, and clear the end of 605 * the buffer head manually. Truly out-of-range accesses will turn into actual 606 * I/O errors, this only handles the "we need to be able to do I/O at the final 607 * sector" case. 608 */ 609void guard_bio_eod(struct bio *bio) 610{ 611 sector_t maxsector; 612 struct hd_struct *part; 613 614 rcu_read_lock(); 615 part = __disk_get_part(bio->bi_disk, bio->bi_partno); 616 if (part) 617 maxsector = part_nr_sects_read(part); 618 else 619 maxsector = get_capacity(bio->bi_disk); 620 rcu_read_unlock(); 621 622 if (!maxsector) 623 return; 624 625 /* 626 * If the *whole* IO is past the end of the device, 627 * let it through, and the IO layer will turn it into 628 * an EIO. 629 */ 630 if (unlikely(bio->bi_iter.bi_sector >= maxsector)) 631 return; 632 633 maxsector -= bio->bi_iter.bi_sector; 634 if (likely((bio->bi_iter.bi_size >> 9) <= maxsector)) 635 return; 636 637 bio_truncate(bio, maxsector << 9); 638} 639 640/** 641 * bio_put - release a reference to a bio 642 * @bio: bio to release reference to 643 * 644 * Description: 645 * Put a reference to a &struct bio, either one you have gotten with 646 * bio_alloc, bio_get or bio_clone_*. The last put of a bio will free it. 647 **/ 648void bio_put(struct bio *bio) 649{ 650 if (!bio_flagged(bio, BIO_REFFED)) 651 bio_free(bio); 652 else { 653 BIO_BUG_ON(!atomic_read(&bio->__bi_cnt)); 654 655 /* 656 * last put frees it 657 */ 658 if (atomic_dec_and_test(&bio->__bi_cnt)) 659 bio_free(bio); 660 } 661} 662EXPORT_SYMBOL(bio_put); 663 664/** 665 * __bio_clone_fast - clone a bio that shares the original bio's biovec 666 * @bio: destination bio 667 * @bio_src: bio to clone 668 * 669 * Clone a &bio. Caller will own the returned bio, but not 670 * the actual data it points to. Reference count of returned 671 * bio will be one. 672 * 673 * Caller must ensure that @bio_src is not freed before @bio. 674 */ 675void __bio_clone_fast(struct bio *bio, struct bio *bio_src) 676{ 677 BUG_ON(bio->bi_pool && BVEC_POOL_IDX(bio)); 678 679 /* 680 * most users will be overriding ->bi_disk with a new target, 681 * so we don't set nor calculate new physical/hw segment counts here 682 */ 683 bio->bi_disk = bio_src->bi_disk; 684 bio->bi_partno = bio_src->bi_partno; 685 bio_set_flag(bio, BIO_CLONED); 686 if (bio_flagged(bio_src, BIO_THROTTLED)) 687 bio_set_flag(bio, BIO_THROTTLED); 688 bio->bi_opf = bio_src->bi_opf; 689 bio->bi_ioprio = bio_src->bi_ioprio; 690 bio->bi_write_hint = bio_src->bi_write_hint; 691 bio->bi_iter = bio_src->bi_iter; 692 bio->bi_io_vec = bio_src->bi_io_vec; 693 694 bio_clone_blkg_association(bio, bio_src); 695 blkcg_bio_issue_init(bio); 696} 697EXPORT_SYMBOL(__bio_clone_fast); 698 699/** 700 * bio_clone_fast - clone a bio that shares the original bio's biovec 701 * @bio: bio to clone 702 * @gfp_mask: allocation priority 703 * @bs: bio_set to allocate from 704 * 705 * Like __bio_clone_fast, only also allocates the returned bio 706 */ 707struct bio *bio_clone_fast(struct bio *bio, gfp_t gfp_mask, struct bio_set *bs) 708{ 709 struct bio *b; 710 711 b = bio_alloc_bioset(gfp_mask, 0, bs); 712 if (!b) 713 return NULL; 714 715 __bio_clone_fast(b, bio); 716 717 if (bio_crypt_clone(b, bio, gfp_mask) < 0) 718 goto err_put; 719 720 if (bio_integrity(bio) && 721 bio_integrity_clone(b, bio, gfp_mask) < 0) 722 goto err_put; 723 724 return b; 725 726err_put: 727 bio_put(b); 728 return NULL; 729} 730EXPORT_SYMBOL(bio_clone_fast); 731 732const char *bio_devname(struct bio *bio, char *buf) 733{ 734 return disk_name(bio->bi_disk, bio->bi_partno, buf); 735} 736EXPORT_SYMBOL(bio_devname); 737 738static inline bool page_is_mergeable(const struct bio_vec *bv, 739 struct page *page, unsigned int len, unsigned int off, 740 bool *same_page) 741{ 742 size_t bv_end = bv->bv_offset + bv->bv_len; 743 phys_addr_t vec_end_addr = page_to_phys(bv->bv_page) + bv_end - 1; 744 phys_addr_t page_addr = page_to_phys(page); 745 746 if (vec_end_addr + 1 != page_addr + off) 747 return false; 748 if (xen_domain() && !xen_biovec_phys_mergeable(bv, page)) 749 return false; 750 751 *same_page = ((vec_end_addr & PAGE_MASK) == page_addr); 752 if (*same_page) 753 return true; 754 return (bv->bv_page + bv_end / PAGE_SIZE) == (page + off / PAGE_SIZE); 755} 756 757/* 758 * Try to merge a page into a segment, while obeying the hardware segment 759 * size limit. This is not for normal read/write bios, but for passthrough 760 * or Zone Append operations that we can't split. 761 */ 762static bool bio_try_merge_hw_seg(struct request_queue *q, struct bio *bio, 763 struct page *page, unsigned len, 764 unsigned offset, bool *same_page) 765{ 766 struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt - 1]; 767 unsigned long mask = queue_segment_boundary(q); 768 phys_addr_t addr1 = page_to_phys(bv->bv_page) + bv->bv_offset; 769 phys_addr_t addr2 = page_to_phys(page) + offset + len - 1; 770 771 if ((addr1 | mask) != (addr2 | mask)) 772 return false; 773 if (len > queue_max_segment_size(q) - bv->bv_len) 774 return false; 775 return __bio_try_merge_page(bio, page, len, offset, same_page); 776} 777 778/** 779 * bio_add_hw_page - attempt to add a page to a bio with hw constraints 780 * @q: the target queue 781 * @bio: destination bio 782 * @page: page to add 783 * @len: vec entry length 784 * @offset: vec entry offset 785 * @max_sectors: maximum number of sectors that can be added 786 * @same_page: return if the segment has been merged inside the same page 787 * 788 * Add a page to a bio while respecting the hardware max_sectors, max_segment 789 * and gap limitations. 790 */ 791int bio_add_hw_page(struct request_queue *q, struct bio *bio, 792 struct page *page, unsigned int len, unsigned int offset, 793 unsigned int max_sectors, bool *same_page) 794{ 795 struct bio_vec *bvec; 796 797 if (WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED))) 798 return 0; 799 800 if (((bio->bi_iter.bi_size + len) >> 9) > max_sectors) 801 return 0; 802 803 if (bio->bi_vcnt > 0) { 804 if (bio_try_merge_hw_seg(q, bio, page, len, offset, same_page)) 805 return len; 806 807 /* 808 * If the queue doesn't support SG gaps and adding this segment 809 * would create a gap, disallow it. 810 */ 811 bvec = &bio->bi_io_vec[bio->bi_vcnt - 1]; 812 if (bvec_gap_to_prev(q, bvec, offset)) 813 return 0; 814 } 815 816 if (bio_full(bio, len)) 817 return 0; 818 819 if (bio->bi_vcnt >= queue_max_segments(q)) 820 return 0; 821 822 bvec = &bio->bi_io_vec[bio->bi_vcnt]; 823 bvec->bv_page = page; 824 bvec->bv_len = len; 825 bvec->bv_offset = offset; 826 bio->bi_vcnt++; 827 bio->bi_iter.bi_size += len; 828 return len; 829} 830 831/** 832 * bio_add_pc_page - attempt to add page to passthrough bio 833 * @q: the target queue 834 * @bio: destination bio 835 * @page: page to add 836 * @len: vec entry length 837 * @offset: vec entry offset 838 * 839 * Attempt to add a page to the bio_vec maplist. This can fail for a 840 * number of reasons, such as the bio being full or target block device 841 * limitations. The target block device must allow bio's up to PAGE_SIZE, 842 * so it is always possible to add a single page to an empty bio. 843 * 844 * This should only be used by passthrough bios. 845 */ 846int bio_add_pc_page(struct request_queue *q, struct bio *bio, 847 struct page *page, unsigned int len, unsigned int offset) 848{ 849 bool same_page = false; 850 return bio_add_hw_page(q, bio, page, len, offset, 851 queue_max_hw_sectors(q), &same_page); 852} 853EXPORT_SYMBOL(bio_add_pc_page); 854 855/** 856 * __bio_try_merge_page - try appending data to an existing bvec. 857 * @bio: destination bio 858 * @page: start page to add 859 * @len: length of the data to add 860 * @off: offset of the data relative to @page 861 * @same_page: return if the segment has been merged inside the same page 862 * 863 * Try to add the data at @page + @off to the last bvec of @bio. This is a 864 * useful optimisation for file systems with a block size smaller than the 865 * page size. 866 * 867 * Warn if (@len, @off) crosses pages in case that @same_page is true. 868 * 869 * Return %true on success or %false on failure. 870 */ 871bool __bio_try_merge_page(struct bio *bio, struct page *page, 872 unsigned int len, unsigned int off, bool *same_page) 873{ 874 if (WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED))) 875 return false; 876 877 if (bio->bi_vcnt > 0) { 878 struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt - 1]; 879 880 if (page_is_mergeable(bv, page, len, off, same_page)) { 881 if (bio->bi_iter.bi_size > UINT_MAX - len) { 882 *same_page = false; 883 return false; 884 } 885 bv->bv_len += len; 886 bio->bi_iter.bi_size += len; 887 return true; 888 } 889 } 890 return false; 891} 892EXPORT_SYMBOL_GPL(__bio_try_merge_page); 893 894/** 895 * __bio_add_page - add page(s) to a bio in a new segment 896 * @bio: destination bio 897 * @page: start page to add 898 * @len: length of the data to add, may cross pages 899 * @off: offset of the data relative to @page, may cross pages 900 * 901 * Add the data at @page + @off to @bio as a new bvec. The caller must ensure 902 * that @bio has space for another bvec. 903 */ 904void __bio_add_page(struct bio *bio, struct page *page, 905 unsigned int len, unsigned int off) 906{ 907 struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt]; 908 909 WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED)); 910 WARN_ON_ONCE(bio_full(bio, len)); 911 912 bv->bv_page = page; 913 bv->bv_offset = off; 914 bv->bv_len = len; 915 916 bio->bi_iter.bi_size += len; 917 bio->bi_vcnt++; 918 919 if (!bio_flagged(bio, BIO_WORKINGSET) && unlikely(PageWorkingset(page))) 920 bio_set_flag(bio, BIO_WORKINGSET); 921} 922EXPORT_SYMBOL_GPL(__bio_add_page); 923 924/** 925 * bio_add_page - attempt to add page(s) to bio 926 * @bio: destination bio 927 * @page: start page to add 928 * @len: vec entry length, may cross pages 929 * @offset: vec entry offset relative to @page, may cross pages 930 * 931 * Attempt to add page(s) to the bio_vec maplist. This will only fail 932 * if either bio->bi_vcnt == bio->bi_max_vecs or it's a cloned bio. 933 */ 934int bio_add_page(struct bio *bio, struct page *page, 935 unsigned int len, unsigned int offset) 936{ 937 bool same_page = false; 938 939 if (!__bio_try_merge_page(bio, page, len, offset, &same_page)) { 940 if (bio_full(bio, len)) 941 return 0; 942 __bio_add_page(bio, page, len, offset); 943 } 944 return len; 945} 946EXPORT_SYMBOL(bio_add_page); 947 948void bio_release_pages(struct bio *bio, bool mark_dirty) 949{ 950 struct bvec_iter_all iter_all; 951 struct bio_vec *bvec; 952 953 if (bio_flagged(bio, BIO_NO_PAGE_REF)) 954 return; 955 956 bio_for_each_segment_all(bvec, bio, iter_all) { 957 if (mark_dirty) 958 set_page_dirty_lock(bvec->bv_page); 959 put_page(bvec->bv_page); 960 } 961} 962EXPORT_SYMBOL_GPL(bio_release_pages); 963 964static int __bio_iov_bvec_add_pages(struct bio *bio, struct iov_iter *iter) 965{ 966 const struct bio_vec *bv = iter->bvec; 967 unsigned int len; 968 size_t size; 969 970 if (WARN_ON_ONCE(iter->iov_offset > bv->bv_len)) 971 return -EINVAL; 972 973 len = min_t(size_t, bv->bv_len - iter->iov_offset, iter->count); 974 size = bio_add_page(bio, bv->bv_page, len, 975 bv->bv_offset + iter->iov_offset); 976 if (unlikely(size != len)) 977 return -EINVAL; 978 iov_iter_advance(iter, size); 979 return 0; 980} 981 982static void bio_put_pages(struct page **pages, size_t size, size_t off) 983{ 984 size_t i, nr = DIV_ROUND_UP(size + (off & ~PAGE_MASK), PAGE_SIZE); 985 986 for (i = 0; i < nr; i++) 987 put_page(pages[i]); 988} 989 990#define PAGE_PTRS_PER_BVEC (sizeof(struct bio_vec) / sizeof(struct page *)) 991 992/** 993 * __bio_iov_iter_get_pages - pin user or kernel pages and add them to a bio 994 * @bio: bio to add pages to 995 * @iter: iov iterator describing the region to be mapped 996 * 997 * Pins pages from *iter and appends them to @bio's bvec array. The 998 * pages will have to be released using put_page() when done. 999 * For multi-segment *iter, this function only adds pages from the 1000 * next non-empty segment of the iov iterator. 1001 */ 1002static int __bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter) 1003{ 1004 unsigned short nr_pages = bio->bi_max_vecs - bio->bi_vcnt; 1005 unsigned short entries_left = bio->bi_max_vecs - bio->bi_vcnt; 1006 struct bio_vec *bv = bio->bi_io_vec + bio->bi_vcnt; 1007 struct page **pages = (struct page **)bv; 1008 bool same_page = false; 1009 ssize_t size, left; 1010 unsigned len, i; 1011 size_t offset; 1012 1013 /* 1014 * Move page array up in the allocated memory for the bio vecs as far as 1015 * possible so that we can start filling biovecs from the beginning 1016 * without overwriting the temporary page array. 1017 */ 1018 BUILD_BUG_ON(PAGE_PTRS_PER_BVEC < 2); 1019 pages += entries_left * (PAGE_PTRS_PER_BVEC - 1); 1020 1021 size = iov_iter_get_pages(iter, pages, LONG_MAX, nr_pages, &offset); 1022 if (unlikely(size <= 0)) 1023 return size ? size : -EFAULT; 1024 1025 for (left = size, i = 0; left > 0; left -= len, i++) { 1026 struct page *page = pages[i]; 1027 1028 len = min_t(size_t, PAGE_SIZE - offset, left); 1029 1030 if (__bio_try_merge_page(bio, page, len, offset, &same_page)) { 1031 if (same_page) 1032 put_page(page); 1033 } else { 1034 if (WARN_ON_ONCE(bio_full(bio, len))) { 1035 bio_put_pages(pages + i, left, offset); 1036 return -EINVAL; 1037 } 1038 __bio_add_page(bio, page, len, offset); 1039 } 1040 offset = 0; 1041 } 1042 1043 iov_iter_advance(iter, size); 1044 return 0; 1045} 1046 1047static int __bio_iov_append_get_pages(struct bio *bio, struct iov_iter *iter) 1048{ 1049 unsigned short nr_pages = bio->bi_max_vecs - bio->bi_vcnt; 1050 unsigned short entries_left = bio->bi_max_vecs - bio->bi_vcnt; 1051 struct request_queue *q = bio->bi_disk->queue; 1052 unsigned int max_append_sectors = queue_max_zone_append_sectors(q); 1053 struct bio_vec *bv = bio->bi_io_vec + bio->bi_vcnt; 1054 struct page **pages = (struct page **)bv; 1055 ssize_t size, left; 1056 unsigned len, i; 1057 size_t offset; 1058 int ret = 0; 1059 1060 /* 1061 * Move page array up in the allocated memory for the bio vecs as far as 1062 * possible so that we can start filling biovecs from the beginning 1063 * without overwriting the temporary page array. 1064 */ 1065 BUILD_BUG_ON(PAGE_PTRS_PER_BVEC < 2); 1066 pages += entries_left * (PAGE_PTRS_PER_BVEC - 1); 1067 1068 size = iov_iter_get_pages(iter, pages, LONG_MAX, nr_pages, &offset); 1069 if (unlikely(size <= 0)) 1070 return size ? size : -EFAULT; 1071 1072 for (left = size, i = 0; left > 0; left -= len, i++) { 1073 struct page *page = pages[i]; 1074 bool same_page = false; 1075 1076 len = min_t(size_t, PAGE_SIZE - offset, left); 1077 if (bio_add_hw_page(q, bio, page, len, offset, 1078 max_append_sectors, &same_page) != len) { 1079 bio_put_pages(pages + i, left, offset); 1080 ret = -EINVAL; 1081 break; 1082 } 1083 if (same_page) 1084 put_page(page); 1085 offset = 0; 1086 } 1087 1088 iov_iter_advance(iter, size - left); 1089 return ret; 1090} 1091 1092/** 1093 * bio_iov_iter_get_pages - add user or kernel pages to a bio 1094 * @bio: bio to add pages to 1095 * @iter: iov iterator describing the region to be added 1096 * 1097 * This takes either an iterator pointing to user memory, or one pointing to 1098 * kernel pages (BVEC iterator). If we're adding user pages, we pin them and 1099 * map them into the kernel. On IO completion, the caller should put those 1100 * pages. If we're adding kernel pages, and the caller told us it's safe to 1101 * do so, we just have to add the pages to the bio directly. We don't grab an 1102 * extra reference to those pages (the user should already have that), and we 1103 * don't put the page on IO completion. The caller needs to check if the bio is 1104 * flagged BIO_NO_PAGE_REF on IO completion. If it isn't, then pages should be 1105 * released. 1106 * 1107 * The function tries, but does not guarantee, to pin as many pages as 1108 * fit into the bio, or are requested in @iter, whatever is smaller. If 1109 * MM encounters an error pinning the requested pages, it stops. Error 1110 * is returned only if 0 pages could be pinned. 1111 */ 1112int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter) 1113{ 1114 const bool is_bvec = iov_iter_is_bvec(iter); 1115 int ret; 1116 1117 if (WARN_ON_ONCE(bio->bi_vcnt)) 1118 return -EINVAL; 1119 1120 do { 1121 if (bio_op(bio) == REQ_OP_ZONE_APPEND) { 1122 if (WARN_ON_ONCE(is_bvec)) 1123 return -EINVAL; 1124 ret = __bio_iov_append_get_pages(bio, iter); 1125 } else { 1126 if (is_bvec) 1127 ret = __bio_iov_bvec_add_pages(bio, iter); 1128 else 1129 ret = __bio_iov_iter_get_pages(bio, iter); 1130 } 1131 } while (!ret && iov_iter_count(iter) && !bio_full(bio, 0)); 1132 1133 if (is_bvec) 1134 bio_set_flag(bio, BIO_NO_PAGE_REF); 1135 return bio->bi_vcnt ? 0 : ret; 1136} 1137EXPORT_SYMBOL_GPL(bio_iov_iter_get_pages); 1138 1139static void submit_bio_wait_endio(struct bio *bio) 1140{ 1141 complete(bio->bi_private); 1142} 1143 1144/** 1145 * submit_bio_wait - submit a bio, and wait until it completes 1146 * @bio: The &struct bio which describes the I/O 1147 * 1148 * Simple wrapper around submit_bio(). Returns 0 on success, or the error from 1149 * bio_endio() on failure. 1150 * 1151 * WARNING: Unlike to how submit_bio() is usually used, this function does not 1152 * result in bio reference to be consumed. The caller must drop the reference 1153 * on his own. 1154 */ 1155int submit_bio_wait(struct bio *bio) 1156{ 1157 DECLARE_COMPLETION_ONSTACK_MAP(done, bio->bi_disk->lockdep_map); 1158 unsigned long hang_check; 1159 1160 bio->bi_private = &done; 1161 bio->bi_end_io = submit_bio_wait_endio; 1162 bio->bi_opf |= REQ_SYNC; 1163 submit_bio(bio); 1164 1165 /* Prevent hang_check timer from firing at us during very long I/O */ 1166 hang_check = sysctl_hung_task_timeout_secs; 1167 if (hang_check) 1168 while (!wait_for_completion_io_timeout(&done, 1169 hang_check * (HZ/2))) 1170 ; 1171 else 1172 wait_for_completion_io(&done); 1173 1174 return blk_status_to_errno(bio->bi_status); 1175} 1176EXPORT_SYMBOL(submit_bio_wait); 1177 1178/** 1179 * bio_advance - increment/complete a bio by some number of bytes 1180 * @bio: bio to advance 1181 * @bytes: number of bytes to complete 1182 * 1183 * This updates bi_sector, bi_size and bi_idx; if the number of bytes to 1184 * complete doesn't align with a bvec boundary, then bv_len and bv_offset will 1185 * be updated on the last bvec as well. 1186 * 1187 * @bio will then represent the remaining, uncompleted portion of the io. 1188 */ 1189void bio_advance(struct bio *bio, unsigned bytes) 1190{ 1191 if (bio_integrity(bio)) 1192 bio_integrity_advance(bio, bytes); 1193 1194 bio_crypt_advance(bio, bytes); 1195 bio_advance_iter(bio, &bio->bi_iter, bytes); 1196} 1197EXPORT_SYMBOL(bio_advance); 1198 1199void bio_copy_data_iter(struct bio *dst, struct bvec_iter *dst_iter, 1200 struct bio *src, struct bvec_iter *src_iter) 1201{ 1202 struct bio_vec src_bv, dst_bv; 1203 void *src_p, *dst_p; 1204 unsigned bytes; 1205 1206 while (src_iter->bi_size && dst_iter->bi_size) { 1207 src_bv = bio_iter_iovec(src, *src_iter); 1208 dst_bv = bio_iter_iovec(dst, *dst_iter); 1209 1210 bytes = min(src_bv.bv_len, dst_bv.bv_len); 1211 1212 src_p = kmap_atomic(src_bv.bv_page); 1213 dst_p = kmap_atomic(dst_bv.bv_page); 1214 1215 memcpy(dst_p + dst_bv.bv_offset, 1216 src_p + src_bv.bv_offset, 1217 bytes); 1218 1219 kunmap_atomic(dst_p); 1220 kunmap_atomic(src_p); 1221 1222 flush_dcache_page(dst_bv.bv_page); 1223 1224 bio_advance_iter(src, src_iter, bytes); 1225 bio_advance_iter(dst, dst_iter, bytes); 1226 } 1227} 1228EXPORT_SYMBOL(bio_copy_data_iter); 1229 1230/** 1231 * bio_copy_data - copy contents of data buffers from one bio to another 1232 * @src: source bio 1233 * @dst: destination bio 1234 * 1235 * Stops when it reaches the end of either @src or @dst - that is, copies 1236 * min(src->bi_size, dst->bi_size) bytes (or the equivalent for lists of bios). 1237 */ 1238void bio_copy_data(struct bio *dst, struct bio *src) 1239{ 1240 struct bvec_iter src_iter = src->bi_iter; 1241 struct bvec_iter dst_iter = dst->bi_iter; 1242 1243 bio_copy_data_iter(dst, &dst_iter, src, &src_iter); 1244} 1245EXPORT_SYMBOL(bio_copy_data); 1246 1247/** 1248 * bio_list_copy_data - copy contents of data buffers from one chain of bios to 1249 * another 1250 * @src: source bio list 1251 * @dst: destination bio list 1252 * 1253 * Stops when it reaches the end of either the @src list or @dst list - that is, 1254 * copies min(src->bi_size, dst->bi_size) bytes (or the equivalent for lists of 1255 * bios). 1256 */ 1257void bio_list_copy_data(struct bio *dst, struct bio *src) 1258{ 1259 struct bvec_iter src_iter = src->bi_iter; 1260 struct bvec_iter dst_iter = dst->bi_iter; 1261 1262 while (1) { 1263 if (!src_iter.bi_size) { 1264 src = src->bi_next; 1265 if (!src) 1266 break; 1267 1268 src_iter = src->bi_iter; 1269 } 1270 1271 if (!dst_iter.bi_size) { 1272 dst = dst->bi_next; 1273 if (!dst) 1274 break; 1275 1276 dst_iter = dst->bi_iter; 1277 } 1278 1279 bio_copy_data_iter(dst, &dst_iter, src, &src_iter); 1280 } 1281} 1282EXPORT_SYMBOL(bio_list_copy_data); 1283 1284void bio_free_pages(struct bio *bio) 1285{ 1286 struct bio_vec *bvec; 1287 struct bvec_iter_all iter_all; 1288 1289 bio_for_each_segment_all(bvec, bio, iter_all) 1290 __free_page(bvec->bv_page); 1291} 1292EXPORT_SYMBOL(bio_free_pages); 1293 1294/* 1295 * bio_set_pages_dirty() and bio_check_pages_dirty() are support functions 1296 * for performing direct-IO in BIOs. 1297 * 1298 * The problem is that we cannot run set_page_dirty() from interrupt context 1299 * because the required locks are not interrupt-safe. So what we can do is to 1300 * mark the pages dirty _before_ performing IO. And in interrupt context, 1301 * check that the pages are still dirty. If so, fine. If not, redirty them 1302 * in process context. 1303 * 1304 * We special-case compound pages here: normally this means reads into hugetlb 1305 * pages. The logic in here doesn't really work right for compound pages 1306 * because the VM does not uniformly chase down the head page in all cases. 1307 * But dirtiness of compound pages is pretty meaningless anyway: the VM doesn't 1308 * handle them at all. So we skip compound pages here at an early stage. 1309 * 1310 * Note that this code is very hard to test under normal circumstances because 1311 * direct-io pins the pages with get_user_pages(). This makes 1312 * is_page_cache_freeable return false, and the VM will not clean the pages. 1313 * But other code (eg, flusher threads) could clean the pages if they are mapped 1314 * pagecache. 1315 * 1316 * Simply disabling the call to bio_set_pages_dirty() is a good way to test the 1317 * deferred bio dirtying paths. 1318 */ 1319 1320/* 1321 * bio_set_pages_dirty() will mark all the bio's pages as dirty. 1322 */ 1323void bio_set_pages_dirty(struct bio *bio) 1324{ 1325 struct bio_vec *bvec; 1326 struct bvec_iter_all iter_all; 1327 1328 bio_for_each_segment_all(bvec, bio, iter_all) { 1329 set_page_dirty_lock(bvec->bv_page); 1330 } 1331} 1332 1333/* 1334 * bio_check_pages_dirty() will check that all the BIO's pages are still dirty. 1335 * If they are, then fine. If, however, some pages are clean then they must 1336 * have been written out during the direct-IO read. So we take another ref on 1337 * the BIO and re-dirty the pages in process context. 1338 * 1339 * It is expected that bio_check_pages_dirty() will wholly own the BIO from 1340 * here on. It will run one put_page() against each page and will run one 1341 * bio_put() against the BIO. 1342 */ 1343 1344static void bio_dirty_fn(struct work_struct *work); 1345 1346static DECLARE_WORK(bio_dirty_work, bio_dirty_fn); 1347static DEFINE_SPINLOCK(bio_dirty_lock); 1348static struct bio *bio_dirty_list; 1349 1350/* 1351 * This runs in process context 1352 */ 1353static void bio_dirty_fn(struct work_struct *work) 1354{ 1355 struct bio *bio, *next; 1356 1357 spin_lock_irq(&bio_dirty_lock); 1358 next = bio_dirty_list; 1359 bio_dirty_list = NULL; 1360 spin_unlock_irq(&bio_dirty_lock); 1361 1362 while ((bio = next) != NULL) { 1363 next = bio->bi_private; 1364 1365 bio_release_pages(bio, true); 1366 bio_put(bio); 1367 } 1368} 1369 1370void bio_check_pages_dirty(struct bio *bio) 1371{ 1372 struct bio_vec *bvec; 1373 unsigned long flags; 1374 struct bvec_iter_all iter_all; 1375 1376 bio_for_each_segment_all(bvec, bio, iter_all) { 1377 if (!PageDirty(bvec->bv_page)) 1378 goto defer; 1379 } 1380 1381 bio_release_pages(bio, false); 1382 bio_put(bio); 1383 return; 1384defer: 1385 spin_lock_irqsave(&bio_dirty_lock, flags); 1386 bio->bi_private = bio_dirty_list; 1387 bio_dirty_list = bio; 1388 spin_unlock_irqrestore(&bio_dirty_lock, flags); 1389 schedule_work(&bio_dirty_work); 1390} 1391 1392static inline bool bio_remaining_done(struct bio *bio) 1393{ 1394 /* 1395 * If we're not chaining, then ->__bi_remaining is always 1 and 1396 * we always end io on the first invocation. 1397 */ 1398 if (!bio_flagged(bio, BIO_CHAIN)) 1399 return true; 1400 1401 BUG_ON(atomic_read(&bio->__bi_remaining) <= 0); 1402 1403 if (atomic_dec_and_test(&bio->__bi_remaining)) { 1404 bio_clear_flag(bio, BIO_CHAIN); 1405 return true; 1406 } 1407 1408 return false; 1409} 1410 1411/** 1412 * bio_endio - end I/O on a bio 1413 * @bio: bio 1414 * 1415 * Description: 1416 * bio_endio() will end I/O on the whole bio. bio_endio() is the preferred 1417 * way to end I/O on a bio. No one should call bi_end_io() directly on a 1418 * bio unless they own it and thus know that it has an end_io function. 1419 * 1420 * bio_endio() can be called several times on a bio that has been chained 1421 * using bio_chain(). The ->bi_end_io() function will only be called the 1422 * last time. At this point the BLK_TA_COMPLETE tracing event will be 1423 * generated if BIO_TRACE_COMPLETION is set. 1424 **/ 1425void bio_endio(struct bio *bio) 1426{ 1427again: 1428 if (!bio_remaining_done(bio)) 1429 return; 1430 if (!bio_integrity_endio(bio)) 1431 return; 1432 1433 if (bio->bi_disk && bio_flagged(bio, BIO_TRACKED)) 1434 rq_qos_done_bio(bio->bi_disk->queue, bio); 1435 1436 /* 1437 * Need to have a real endio function for chained bios, otherwise 1438 * various corner cases will break (like stacking block devices that 1439 * save/restore bi_end_io) - however, we want to avoid unbounded 1440 * recursion and blowing the stack. Tail call optimization would 1441 * handle this, but compiling with frame pointers also disables 1442 * gcc's sibling call optimization. 1443 */ 1444 if (bio->bi_end_io == bio_chain_endio) { 1445 bio = __bio_chain_endio(bio); 1446 goto again; 1447 } 1448 1449 if (bio->bi_disk && bio_flagged(bio, BIO_TRACE_COMPLETION)) { 1450 trace_block_bio_complete(bio->bi_disk->queue, bio); 1451 bio_clear_flag(bio, BIO_TRACE_COMPLETION); 1452 } 1453 1454 blk_throtl_bio_endio(bio); 1455 /* release cgroup info */ 1456 bio_uninit(bio); 1457 if (bio->bi_end_io) 1458 bio->bi_end_io(bio); 1459} 1460EXPORT_SYMBOL(bio_endio); 1461 1462/** 1463 * bio_split - split a bio 1464 * @bio: bio to split 1465 * @sectors: number of sectors to split from the front of @bio 1466 * @gfp: gfp mask 1467 * @bs: bio set to allocate from 1468 * 1469 * Allocates and returns a new bio which represents @sectors from the start of 1470 * @bio, and updates @bio to represent the remaining sectors. 1471 * 1472 * Unless this is a discard request the newly allocated bio will point 1473 * to @bio's bi_io_vec. It is the caller's responsibility to ensure that 1474 * neither @bio nor @bs are freed before the split bio. 1475 */ 1476struct bio *bio_split(struct bio *bio, int sectors, 1477 gfp_t gfp, struct bio_set *bs) 1478{ 1479 struct bio *split; 1480 1481 BUG_ON(sectors <= 0); 1482 BUG_ON(sectors >= bio_sectors(bio)); 1483 1484 /* Zone append commands cannot be split */ 1485 if (WARN_ON_ONCE(bio_op(bio) == REQ_OP_ZONE_APPEND)) 1486 return NULL; 1487 1488 split = bio_clone_fast(bio, gfp, bs); 1489 if (!split) 1490 return NULL; 1491 1492 split->bi_iter.bi_size = sectors << 9; 1493 1494 if (bio_integrity(split)) 1495 bio_integrity_trim(split); 1496 1497 bio_advance(bio, split->bi_iter.bi_size); 1498 1499 if (bio_flagged(bio, BIO_TRACE_COMPLETION)) 1500 bio_set_flag(split, BIO_TRACE_COMPLETION); 1501 1502 return split; 1503} 1504EXPORT_SYMBOL(bio_split); 1505 1506/** 1507 * bio_trim - trim a bio 1508 * @bio: bio to trim 1509 * @offset: number of sectors to trim from the front of @bio 1510 * @size: size we want to trim @bio to, in sectors 1511 */ 1512void bio_trim(struct bio *bio, int offset, int size) 1513{ 1514 /* 'bio' is a cloned bio which we need to trim to match 1515 * the given offset and size. 1516 */ 1517 1518 size <<= 9; 1519 if (offset == 0 && size == bio->bi_iter.bi_size) 1520 return; 1521 1522 bio_advance(bio, offset << 9); 1523 bio->bi_iter.bi_size = size; 1524 1525 if (bio_integrity(bio)) 1526 bio_integrity_trim(bio); 1527 1528} 1529EXPORT_SYMBOL_GPL(bio_trim); 1530 1531/* 1532 * create memory pools for biovec's in a bio_set. 1533 * use the global biovec slabs created for general use. 1534 */ 1535int biovec_init_pool(mempool_t *pool, int pool_entries) 1536{ 1537 struct biovec_slab *bp = bvec_slabs + BVEC_POOL_MAX; 1538 1539 return mempool_init_slab_pool(pool, pool_entries, bp->slab); 1540} 1541 1542/* 1543 * bioset_exit - exit a bioset initialized with bioset_init() 1544 * 1545 * May be called on a zeroed but uninitialized bioset (i.e. allocated with 1546 * kzalloc()). 1547 */ 1548void bioset_exit(struct bio_set *bs) 1549{ 1550 if (bs->rescue_workqueue) 1551 destroy_workqueue(bs->rescue_workqueue); 1552 bs->rescue_workqueue = NULL; 1553 1554 mempool_exit(&bs->bio_pool); 1555 mempool_exit(&bs->bvec_pool); 1556 1557 bioset_integrity_free(bs); 1558 if (bs->bio_slab) 1559 bio_put_slab(bs); 1560 bs->bio_slab = NULL; 1561} 1562EXPORT_SYMBOL(bioset_exit); 1563 1564/** 1565 * bioset_init - Initialize a bio_set 1566 * @bs: pool to initialize 1567 * @pool_size: Number of bio and bio_vecs to cache in the mempool 1568 * @front_pad: Number of bytes to allocate in front of the returned bio 1569 * @flags: Flags to modify behavior, currently %BIOSET_NEED_BVECS 1570 * and %BIOSET_NEED_RESCUER 1571 * 1572 * Description: 1573 * Set up a bio_set to be used with @bio_alloc_bioset. Allows the caller 1574 * to ask for a number of bytes to be allocated in front of the bio. 1575 * Front pad allocation is useful for embedding the bio inside 1576 * another structure, to avoid allocating extra data to go with the bio. 1577 * Note that the bio must be embedded at the END of that structure always, 1578 * or things will break badly. 1579 * If %BIOSET_NEED_BVECS is set in @flags, a separate pool will be allocated 1580 * for allocating iovecs. This pool is not needed e.g. for bio_clone_fast(). 1581 * If %BIOSET_NEED_RESCUER is set, a workqueue is created which can be used to 1582 * dispatch queued requests when the mempool runs out of space. 1583 * 1584 */ 1585int bioset_init(struct bio_set *bs, 1586 unsigned int pool_size, 1587 unsigned int front_pad, 1588 int flags) 1589{ 1590 unsigned int back_pad = BIO_INLINE_VECS * sizeof(struct bio_vec); 1591 1592 bs->front_pad = front_pad; 1593 1594 spin_lock_init(&bs->rescue_lock); 1595 bio_list_init(&bs->rescue_list); 1596 INIT_WORK(&bs->rescue_work, bio_alloc_rescue); 1597 1598 bs->bio_slab = bio_find_or_create_slab(front_pad + back_pad); 1599 if (!bs->bio_slab) 1600 return -ENOMEM; 1601 1602 if (mempool_init_slab_pool(&bs->bio_pool, pool_size, bs->bio_slab)) 1603 goto bad; 1604 1605 if ((flags & BIOSET_NEED_BVECS) && 1606 biovec_init_pool(&bs->bvec_pool, pool_size)) 1607 goto bad; 1608 1609 if (!(flags & BIOSET_NEED_RESCUER)) 1610 return 0; 1611 1612 bs->rescue_workqueue = alloc_workqueue("bioset", WQ_MEM_RECLAIM, 0); 1613 if (!bs->rescue_workqueue) 1614 goto bad; 1615 1616 return 0; 1617bad: 1618 bioset_exit(bs); 1619 return -ENOMEM; 1620} 1621EXPORT_SYMBOL(bioset_init); 1622 1623/* 1624 * Initialize and setup a new bio_set, based on the settings from 1625 * another bio_set. 1626 */ 1627int bioset_init_from_src(struct bio_set *bs, struct bio_set *src) 1628{ 1629 int flags; 1630 1631 flags = 0; 1632 if (src->bvec_pool.min_nr) 1633 flags |= BIOSET_NEED_BVECS; 1634 if (src->rescue_workqueue) 1635 flags |= BIOSET_NEED_RESCUER; 1636 1637 return bioset_init(bs, src->bio_pool.min_nr, src->front_pad, flags); 1638} 1639EXPORT_SYMBOL(bioset_init_from_src); 1640 1641static void __init biovec_init_slabs(void) 1642{ 1643 int i; 1644 1645 for (i = 0; i < BVEC_POOL_NR; i++) { 1646 int size; 1647 struct biovec_slab *bvs = bvec_slabs + i; 1648 1649 if (bvs->nr_vecs <= BIO_INLINE_VECS) { 1650 bvs->slab = NULL; 1651 continue; 1652 } 1653 1654 size = bvs->nr_vecs * sizeof(struct bio_vec); 1655 bvs->slab = kmem_cache_create(bvs->name, size, 0, 1656 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); 1657 } 1658} 1659 1660static int __init init_bio(void) 1661{ 1662 bio_slab_max = 2; 1663 bio_slab_nr = 0; 1664 bio_slabs = kcalloc(bio_slab_max, sizeof(struct bio_slab), 1665 GFP_KERNEL); 1666 1667 BUILD_BUG_ON(BIO_FLAG_LAST > BVEC_POOL_OFFSET); 1668 1669 if (!bio_slabs) 1670 panic("bio: can't allocate bios\n"); 1671 1672 bio_integrity_init(); 1673 biovec_init_slabs(); 1674 1675 if (bioset_init(&fs_bio_set, BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS)) 1676 panic("bio: can't allocate bios\n"); 1677 1678 if (bioset_integrity_create(&fs_bio_set, BIO_POOL_SIZE)) 1679 panic("bio: can't create integrity pool\n"); 1680 1681 return 0; 1682} 1683subsys_initcall(init_bio); 1684