Lines Matching refs:bv
163 void bvec_free(mempool_t *pool, struct bio_vec *bv, unsigned short nr_vecs)
168 mempool_free(bv, pool);
170 kmem_cache_free(biovec_slab(nr_vecs)->slab, bv);
611 struct bio_vec bv;
614 __bio_for_each_segment(bv, bio, iter, start)
615 memzero_bvec(&bv);
631 struct bio_vec bv;
642 bio_for_each_segment(bv, bio, iter) {
643 if (done + bv.bv_len > new_size) {
650 zero_user(bv.bv_page, bv.bv_offset + offset,
651 bv.bv_len - offset);
654 done += bv.bv_len;
906 static bool bvec_try_merge_page(struct bio_vec *bv, struct page *page,
909 size_t bv_end = bv->bv_offset + bv->bv_len;
910 phys_addr_t vec_end_addr = page_to_phys(bv->bv_page) + bv_end - 1;
915 if (xen_domain() && !xen_biovec_phys_mergeable(bv, page))
917 if (!zone_device_pages_have_same_pgmap(bv->bv_page, page))
924 if (bv->bv_page + bv_end / PAGE_SIZE != page + off / PAGE_SIZE)
928 bv->bv_len += len;
937 bool bvec_try_merge_hw_page(struct request_queue *q, struct bio_vec *bv,
942 phys_addr_t addr1 = page_to_phys(bv->bv_page) + bv->bv_offset;
947 if (len > queue_max_segment_size(q) - bv->bv_len)
949 return bvec_try_merge_page(bv, page, len, offset, same_page);
976 struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt - 1];
978 if (bvec_try_merge_hw_page(q, bv, page, len, offset,
992 if (bvec_gap_to_prev(&q->limits, bv, offset))
1240 struct bio_vec *bv = bio->bi_io_vec + bio->bi_vcnt;
1241 struct page **pages = (struct page **)bv;