Lines Matching refs:bs

113 static inline unsigned int bs_bio_slab_size(struct bio_set *bs)
115 return bs->front_pad + sizeof(struct bio) + bs->back_pad;
118 static struct kmem_cache *bio_find_or_create_slab(struct bio_set *bs)
120 unsigned int size = bs_bio_slab_size(bs);
136 static void bio_put_slab(struct bio_set *bs)
139 unsigned int slab_size = bs_bio_slab_size(bs);
147 WARN_ON_ONCE(bslab->slab != bs->bio_slab);
231 struct bio_set *bs = bio->bi_pool;
234 WARN_ON_ONCE(!bs);
237 bvec_free(&bs->bvec_pool, bio->bi_io_vec, bio->bi_max_vecs);
238 mempool_free(p - bs->front_pad, &bs->bio_pool);
364 struct bio_set *bs = container_of(work, struct bio_set, rescue_work);
368 spin_lock(&bs->rescue_lock);
369 bio = bio_list_pop(&bs->rescue_list);
370 spin_unlock(&bs->rescue_lock);
379 static void punt_bios_to_rescuer(struct bio_set *bs)
384 if (WARN_ON_ONCE(!bs->rescue_workqueue))
401 bio_list_add(bio->bi_pool == bs ? &punt : &nopunt, bio);
406 bio_list_add(bio->bi_pool == bs ? &punt : &nopunt, bio);
409 spin_lock(&bs->rescue_lock);
410 bio_list_merge(&bs->rescue_list, &punt);
411 spin_unlock(&bs->rescue_lock);
413 queue_work(bs->rescue_workqueue, &bs->rescue_work);
434 struct bio_set *bs)
439 cache = per_cpu_ptr(bs->cache, get_cpu());
454 bio->bi_pool = bs;
464 * @bs: the bio_set to allocate from.
466 * Allocate a bio from the mempools in @bs.
494 struct bio_set *bs)
501 if (WARN_ON_ONCE(!mempool_initialized(&bs->bvec_pool) && nr_vecs > 0))
505 if (bs->cache && nr_vecs <= BIO_INLINE_VECS) {
507 gfp_mask, bs);
540 bs->rescue_workqueue)
543 p = mempool_alloc(&bs->bio_pool, gfp_mask);
545 punt_bios_to_rescuer(bs);
547 p = mempool_alloc(&bs->bio_pool, gfp_mask);
551 if (!mempool_is_saturated(&bs->bio_pool))
554 bio = p + bs->front_pad;
558 bvl = bvec_alloc(&bs->bvec_pool, &nr_vecs, gfp_mask);
560 punt_bios_to_rescuer(bs);
562 bvl = bvec_alloc(&bs->bvec_pool, &nr_vecs, gfp_mask);
574 bio->bi_pool = bs;
578 mempool_free(p, &bs->bio_pool);
731 struct bio_set *bs;
733 bs = hlist_entry_safe(node, struct bio_set, cpuhp_dead);
734 if (bs->cache) {
735 struct bio_alloc_cache *cache = per_cpu_ptr(bs->cache, cpu);
742 static void bio_alloc_cache_destroy(struct bio_set *bs)
746 if (!bs->cache)
749 cpuhp_state_remove_instance_nocalls(CPUHP_BIO_DEAD, &bs->cpuhp_dead);
753 cache = per_cpu_ptr(bs->cache, cpu);
756 free_percpu(bs->cache);
757 bs->cache = NULL;
838 * @bs: bio_set to allocate from
846 gfp_t gfp, struct bio_set *bs)
850 bio = bio_alloc_bioset(bdev, 0, bio_src->bi_opf, gfp, bs);
1615 * @bs: bio set to allocate from
1622 * neither @bio nor @bs are freed before the split bio.
1625 gfp_t gfp, struct bio_set *bs)
1636 split = bio_alloc_clone(bio->bi_bdev, bio, gfp, bs);
1698 void bioset_exit(struct bio_set *bs)
1700 bio_alloc_cache_destroy(bs);
1701 if (bs->rescue_workqueue)
1702 destroy_workqueue(bs->rescue_workqueue);
1703 bs->rescue_workqueue = NULL;
1705 mempool_exit(&bs->bio_pool);
1706 mempool_exit(&bs->bvec_pool);
1708 bioset_integrity_free(bs);
1709 if (bs->bio_slab)
1710 bio_put_slab(bs);
1711 bs->bio_slab = NULL;
1717 * @bs: pool to initialize
1736 int bioset_init(struct bio_set *bs,
1741 bs->front_pad = front_pad;
1743 bs->back_pad = BIO_INLINE_VECS * sizeof(struct bio_vec);
1745 bs->back_pad = 0;
1747 spin_lock_init(&bs->rescue_lock);
1748 bio_list_init(&bs->rescue_list);
1749 INIT_WORK(&bs->rescue_work, bio_alloc_rescue);
1751 bs->bio_slab = bio_find_or_create_slab(bs);
1752 if (!bs->bio_slab)
1755 if (mempool_init_slab_pool(&bs->bio_pool, pool_size, bs->bio_slab))
1759 biovec_init_pool(&bs->bvec_pool, pool_size))
1763 bs->rescue_workqueue = alloc_workqueue("bioset",
1765 if (!bs->rescue_workqueue)
1769 bs->cache = alloc_percpu(struct bio_alloc_cache);
1770 if (!bs->cache)
1772 cpuhp_state_add_instance_nocalls(CPUHP_BIO_DEAD, &bs->cpuhp_dead);
1777 bioset_exit(bs);