/kernel/linux/linux-6.6/include/linux/ |
H A D | bvec.h | 23 * @bv_offset: Start of the address range relative to the start of @bv_page. 25 * The following holds for a bvec if n * PAGE_SIZE < bv_offset + bv_len: 34 unsigned int bv_offset; member 49 bv->bv_offset = offset; in bvec_set_page() 109 (__bvec_iter_bvec((bvec), (iter))->bv_offset + (iter).bi_bvec_done) 118 .bv_offset = mp_bvec_iter_offset((bvec), (iter)), \ 137 .bv_offset = bvec_iter_offset((bvec), (iter)), \ 211 bv->bv_offset = 0; in bvec_advance() 213 bv->bv_page = bvec->bv_page + (bvec->bv_offset >> PAGE_SHIFT); in bvec_advance() 214 bv->bv_offset in bvec_advance() [all...] |
/kernel/linux/linux-5.10/include/linux/ |
H A D | bvec.h | 23 * @bv_offset: Start of the address range relative to the start of @bv_page. 25 * The following holds for a bvec if n * PAGE_SIZE < bv_offset + bv_len: 34 unsigned int bv_offset; member 69 (__bvec_iter_bvec((bvec), (iter))->bv_offset + (iter).bi_bvec_done) 78 .bv_offset = mp_bvec_iter_offset((bvec), (iter)), \ 97 .bv_offset = bvec_iter_offset((bvec), (iter)), \ 161 bv->bv_offset = 0; in bvec_advance() 163 bv->bv_page = bvec->bv_page + (bvec->bv_offset >> PAGE_SHIFT); in bvec_advance() 164 bv->bv_offset = bvec->bv_offset in bvec_advance() [all...] |
/kernel/linux/linux-5.10/block/ |
H A D | bounce.c | 81 memcpy(vto + to->bv_offset, vfrom, to->bv_len); in bounce_copy_vec() 88 memcpy(page_address((to)->bv_page) + (to)->bv_offset, vfrom, (to)->bv_len) 148 * fromvec->bv_offset and fromvec->bv_len might have in copy_to_high_bio_irq() 153 tovec.bv_offset; in copy_to_high_bio_irq() 336 vto = page_address(to->bv_page) + to->bv_offset; in __blk_queue_bounce() 337 vfrom = kmap_atomic(page) + to->bv_offset; in __blk_queue_bounce()
|
H A D | blk-merge.c | 34 if (pb.bv_offset & queue_virt_boundary(q)) in bio_will_gap() 50 return __bvec_gap_to_prev(q, &pb, nb.bv_offset); in bio_will_gap() 210 bv->bv_offset + total_len); in bvec_split_segs() 217 if ((bv->bv_offset + total_len) & queue_virt_boundary(q)) in bvec_split_segs() 262 if (bvprvp && bvec_gap_to_prev(q, bvprvp, bv.bv_offset)) in blk_bio_segment_split() 267 bv.bv_offset + bv.bv_len <= PAGE_SIZE) { in blk_bio_segment_split() 329 (*bio)->bi_io_vec[0].bv_offset) <= PAGE_SIZE) { in __blk_queue_split() 426 unsigned offset = bvec->bv_offset + total; in blk_bvec_map_sg() 457 sg_set_page(*sg, bv.bv_page, bv.bv_len, bv.bv_offset); in __blk_bvec_map_sg() 503 if (bvec.bv_offset in __blk_bios_map_sg() [all...] |
H A D | blk.h | 58 phys_addr_t addr1 = page_to_phys(vec1->bv_page) + vec1->bv_offset; in biovec_phys_mergeable() 59 phys_addr_t addr2 = page_to_phys(vec2->bv_page) + vec2->bv_offset; in biovec_phys_mergeable() 74 ((bprv->bv_offset + bprv->bv_len) & queue_virt_boundary(q)); in __bvec_gap_to_prev() 124 bip_next->bip_vec[0].bv_offset); in integrity_req_gap_back_merge() 134 bip_next->bip_vec[0].bv_offset); in integrity_req_gap_front_merge()
|
H A D | bio-integrity.c | 113 bip->bip_vec->bv_offset); in bio_integrity_free() 149 iv->bv_offset = offset; in bio_integrity_add_page() 172 bip->bip_vec->bv_offset; in bio_integrity_process() 182 iter.data_buf = kaddr + bv.bv_offset; in bio_integrity_process()
|
H A D | t10-pi.c | 154 p = pmap + iv.bv_offset; in t10_pi_type1_prepare() 202 p = pmap + iv.bv_offset; in t10_pi_type1_complete()
|
H A D | bio.c | 578 zero_user(bv.bv_page, bv.bv_offset + offset, in bio_truncate() 742 size_t bv_end = bv->bv_offset + bv->bv_len; in page_is_mergeable() 768 phys_addr_t addr1 = page_to_phys(bv->bv_page) + bv->bv_offset; in bio_try_merge_hw_seg() 825 bvec->bv_offset = offset; in bio_add_hw_page() 913 bv->bv_offset = off; in __bio_add_page() 975 bv->bv_offset + iter->iov_offset); in __bio_iov_bvec_add_pages() 1184 * complete doesn't align with a bvec boundary, then bv_len and bv_offset will 1215 memcpy(dst_p + dst_bv.bv_offset, in bio_copy_data_iter() 1216 src_p + src_bv.bv_offset, in bio_copy_data_iter()
|
H A D | blk-crypto-fallback.c | 327 enc_bvec->bv_offset); in blk_crypto_fallback_encrypt_bio() 329 enc_bvec->bv_offset); in blk_crypto_fallback_encrypt_bio() 416 sg_set_page(&sg, page, data_unit_size, bv.bv_offset); in blk_crypto_fallback_decrypt_bio()
|
/kernel/linux/linux-6.6/block/ |
H A D | blk.h | 86 phys_addr_t addr1 = page_to_phys(vec1->bv_page) + vec1->bv_offset; in biovec_phys_mergeable() 87 phys_addr_t addr2 = page_to_phys(vec2->bv_page) + vec2->bv_offset; in biovec_phys_mergeable() 109 ((bprv->bv_offset + bprv->bv_len) & lim->virt_boundary_mask); in __bvec_gap_to_prev() 206 bip_next->bip_vec[0].bv_offset); in integrity_req_gap_back_merge() 217 bip_next->bip_vec[0].bv_offset); in integrity_req_gap_front_merge() 317 bio->bi_io_vec->bv_len + bio->bi_io_vec->bv_offset > PAGE_SIZE; in bio_may_exceed_limits()
|
H A D | blk-merge.c | 69 if (pb.bv_offset & queue_virt_boundary(q)) in bio_will_gap() 85 return __bvec_gap_to_prev(&q->limits, &pb, nb.bv_offset); in bio_will_gap() 242 bv->bv_offset + total_len); in bvec_split_segs() 249 if ((bv->bv_offset + total_len) & lim->virt_boundary_mask) in bvec_split_segs() 291 if (bvprvp && bvec_gap_to_prev(lim, bvprvp, bv.bv_offset)) in bio_split_rw() 296 bv.bv_offset + bv.bv_len <= PAGE_SIZE) { in bio_split_rw() 470 unsigned offset = bvec->bv_offset + total; in blk_bvec_map_sg() 501 sg_set_page(*sg, bv.bv_page, bv.bv_len, bv.bv_offset); in __blk_bvec_map_sg() 547 if (bvec.bv_offset + bvec.bv_len <= PAGE_SIZE) in __blk_bios_map_sg()
|
H A D | blk-map.c | 57 bvec->bv_offset, in bio_copy_from_iter() 88 bvec->bv_offset, in bio_copy_to_iter() 595 if (bvprvp && bvec_gap_to_prev(lim, bvprvp, bv->bv_offset)) { in blk_rq_map_user_bvec() 604 if (bv->bv_offset + bv->bv_len > PAGE_SIZE) in blk_rq_map_user_bvec()
|
H A D | blk-crypto-fallback.c | 330 enc_bvec->bv_offset); in blk_crypto_fallback_encrypt_bio() 332 enc_bvec->bv_offset); in blk_crypto_fallback_encrypt_bio() 420 sg_set_page(&sg, page, data_unit_size, bv.bv_offset); in blk_crypto_fallback_decrypt_bio()
|
/kernel/linux/linux-5.10/drivers/md/bcache/ |
H A D | util.c | 239 bv->bv_offset = base ? offset_in_page(base) : 0; in bch_bio_map() 243 bv->bv_offset = 0; in bch_bio_map() 244 start: bv->bv_len = min_t(size_t, PAGE_SIZE - bv->bv_offset, in bch_bio_map()
|
H A D | debug.c | 136 cache_set_err_on(memcmp(p1 + bv.bv_offset, in bch_data_verify() 137 p2 + bv.bv_offset, in bch_data_verify()
|
/kernel/linux/linux-5.10/fs/squashfs/ |
H A D | block.c | 53 page_address(bvec->bv_page) + bvec->bv_offset + offset, in copy_bio_to_actor() 180 data = page_address(bvec->bv_page) + bvec->bv_offset; in squashfs_read_data() 189 data = page_address(bvec->bv_page) + bvec->bv_offset; in squashfs_read_data()
|
H A D | lz4_wrapper.c | 104 data = page_address(bvec->bv_page) + bvec->bv_offset; in lz4_uncompress()
|
H A D | lzo_wrapper.c | 79 data = page_address(bvec->bv_page) + bvec->bv_offset; in lzo_uncompress()
|
/kernel/linux/linux-6.6/drivers/md/bcache/ |
H A D | util.c | 239 bv->bv_offset = base ? offset_in_page(base) : 0; in bch_bio_map() 243 bv->bv_offset = 0; in bch_bio_map() 244 start: bv->bv_len = min_t(size_t, PAGE_SIZE - bv->bv_offset, in bch_bio_map()
|
/kernel/linux/linux-5.10/lib/ |
H A D | iov_iter.c | 627 memcpy_to_page(v.bv_page, v.bv_offset, in _copy_to_iter() 729 rem = copy_mc_to_page(v.bv_page, v.bv_offset, in _copy_mc_to_iter() 765 v.bv_offset, v.bv_len), in _copy_from_iter() 791 v.bv_offset, v.bv_len), in _copy_from_iter_full() 811 v.bv_offset, v.bv_len), in _copy_from_iter_nocache() 845 v.bv_offset, v.bv_len), in _copy_from_iter_flushcache() 870 v.bv_offset, v.bv_len), in _copy_from_iter_full_nocache() 977 memzero_page(v.bv_page, v.bv_offset, v.bv_len), in iov_iter_zero() 1001 v.bv_offset, v.bv_len), in iov_iter_copy_from_user_atomic() 1236 res |= v.bv_offset | in iov_iter_alignment() [all...] |
/kernel/linux/linux-5.10/drivers/xen/ |
H A D | biomerge.c | 15 return bfn1 + PFN_DOWN(vec1->bv_offset + vec1->bv_len) == bfn2; in xen_biovec_phys_mergeable()
|
/kernel/linux/linux-6.6/drivers/xen/ |
H A D | biomerge.c | 15 return bfn1 + PFN_DOWN(vec1->bv_offset + vec1->bv_len) == bfn2; in xen_biovec_phys_mergeable()
|
/kernel/linux/linux-6.6/fs/btrfs/ |
H A D | raid56.c | 1123 sector->pgoff = bvec.bv_offset + bvec_offset; in index_one_bio() 1401 for (pgoff = bvec->bv_offset; pgoff - bvec->bv_offset < bvec->bv_len; in set_bio_pages_uptodate() 1420 if (sector->page == bv->bv_page && sector->pgoff == bv->bv_offset) in get_bio_sector_nr() 1423 if (sector->page == bv->bv_page && sector->pgoff == bv->bv_offset) in get_bio_sector_nr() 1469 int bv_offset; in verify_bio_data_sectors() local 1471 for (bv_offset = bvec->bv_offset; in verify_bio_data_sectors() 1472 bv_offset < bvec->bv_offset in verify_bio_data_sectors() [all...] |
/kernel/linux/linux-5.10/drivers/nvdimm/ |
H A D | blk.c | 87 * .bv_offset already adjusted for iter->bi_bvec_done, and we in nd_blk_rw_integrity() 93 err = ndbr->do_io(ndbr, dev_offset, iobuf + bv.bv_offset, in nd_blk_rw_integrity() 188 bvec.bv_offset, rw, iter.bi_sector); in nd_blk_submit_bio()
|
/kernel/linux/linux-5.10/drivers/block/zram/ |
H A D | zram_drv.c | 552 if (!bio_add_page(bio, bvec->bv_page, bvec->bv_len, bvec->bv_offset)) { in read_from_bdev_async() 614 bvec.bv_offset = 0; in writeback_store() 669 bvec.bv_offset); in writeback_store() 1256 bvec.bv_offset = 0; in __zram_bvec_read() 1337 memcpy(dst + bvec->bv_offset, src + offset, bvec->bv_len); in zram_bvec_read() 1492 memcpy(dst + offset, src + bvec->bv_offset, bvec->bv_len); in zram_bvec_write() 1498 vec.bv_offset = 0; in zram_bvec_write() 1615 bv.bv_offset += bv.bv_len; in __zram_make_request() 1687 bv.bv_offset = 0; in zram_rw_page()
|