/kernel/linux/linux-6.6/lib/ |
H A D | iov_iter.c | 132 i->nr_segs -= iov - iter_iov(i); \ 140 i->nr_segs -= bvec - i->bvec; \ 148 i->nr_segs -= kvec - i->kvec; \ 285 const struct iovec *iov, unsigned long nr_segs, in iov_iter_init() 296 .nr_segs = nr_segs, in iov_iter_init() 613 for (bvec = i->bvec, end = bvec + i->nr_segs; bvec < end; bvec++) { in iov_iter_bvec_advance() 619 i->nr_segs -= bvec - i->bvec; in iov_iter_bvec_advance() 632 for (iov = iter_iov(i), end = iov + i->nr_segs; iov < end; iov++) { in iov_iter_iovec_advance() 638 i->nr_segs in iov_iter_iovec_advance() 284 iov_iter_init(struct iov_iter *i, unsigned int direction, const struct iovec *iov, unsigned long nr_segs, size_t count) iov_iter_init() argument 722 iov_iter_kvec(struct iov_iter *i, unsigned int direction, const struct kvec *kvec, unsigned long nr_segs, size_t count) iov_iter_kvec() argument 739 iov_iter_bvec(struct iov_iter *i, unsigned int direction, const struct bio_vec *bvec, unsigned long nr_segs, size_t count) iov_iter_bvec() argument 1333 copy_compat_iovec_from_user(struct iovec *iov, const struct iovec __user *uvec, unsigned long nr_segs) copy_compat_iovec_from_user() argument 1365 copy_iovec_from_user(struct iovec *iov, const struct iovec __user *uiov, unsigned long nr_segs) copy_iovec_from_user() argument 1397 iovec_from_user(const struct iovec __user *uvec, unsigned long nr_segs, unsigned long fast_segs, struct iovec *fast_iov, bool compat) iovec_from_user() argument 1456 __import_iovec(int type, const struct iovec __user *uvec, unsigned nr_segs, unsigned fast_segs, struct iovec **iovp, struct iov_iter *i, bool compat) __import_iovec() argument 1528 import_iovec(int type, const struct iovec __user *uvec, unsigned nr_segs, unsigned fast_segs, struct iovec **iovp, struct iov_iter *i) import_iovec() argument [all...] |
/kernel/linux/linux-5.10/include/linux/ |
H A D | uio.h | 32 unsigned long nr_segs; member 51 unsigned long nr_segs; member 69 state->nr_segs = iter->nr_segs; in iov_iter_save_state() 109 static inline size_t iov_length(const struct iovec *iov, unsigned long nr_segs) in iov_length() argument 114 for (seg = 0; seg < nr_segs; seg++) in iov_length() 230 unsigned long nr_segs, size_t count); 232 unsigned long nr_segs, size_t count); 234 unsigned long nr_segs, size_t count); 291 unsigned long nr_segs, unsigne [all...] |
/kernel/linux/linux-5.10/block/ |
H A D | blk-merge.c | 289 * @nr_segs: [out] number of segments in the first bio 299 void __blk_queue_split(struct bio **bio, unsigned int *nr_segs) in __blk_queue_split() argument 307 split = blk_bio_discard_split(q, *bio, &q->bio_split, nr_segs); in __blk_queue_split() 311 nr_segs); in __blk_queue_split() 315 nr_segs); in __blk_queue_split() 330 *nr_segs = 1; in __blk_queue_split() 333 split = blk_bio_segment_split(q, *bio, &q->bio_split, nr_segs); in __blk_queue_split() 362 unsigned int nr_segs; in blk_queue_split() local 364 __blk_queue_split(bio, &nr_segs); in blk_queue_split() 583 int ll_back_merge_fn(struct request *req, struct bio *bio, unsigned int nr_segs) in ll_back_merge_fn() argument 601 ll_front_merge_fn(struct request *req, struct bio *bio, unsigned int nr_segs) ll_front_merge_fn() argument 927 bio_attempt_back_merge(struct request *req, struct bio *bio, unsigned int nr_segs) bio_attempt_back_merge() argument 951 bio_attempt_front_merge(struct request *req, struct bio *bio, unsigned int nr_segs) bio_attempt_front_merge() argument 1002 blk_attempt_bio_merge(struct request_queue *q, struct request *rq, struct bio *bio, unsigned int nr_segs, bool sched_allow_merge) blk_attempt_bio_merge() argument 1051 blk_attempt_plug_merge(struct request_queue *q, struct bio *bio, unsigned int nr_segs, struct request **same_queue_rq) blk_attempt_plug_merge() argument 1089 blk_bio_list_merge(struct request_queue *q, struct list_head *list, struct bio *bio, unsigned int nr_segs) blk_bio_list_merge() argument 1114 blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio, unsigned int nr_segs, struct request **merged_request) blk_mq_sched_try_merge() argument [all...] |
H A D | blk-mq-sched.h | 12 unsigned int nr_segs, struct request **merged_request); 14 unsigned int nr_segs); 34 unsigned int nr_segs) in blk_mq_sched_bio_merge() 39 return __blk_mq_sched_bio_merge(q, bio, nr_segs); in blk_mq_sched_bio_merge() 33 blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio, unsigned int nr_segs) blk_mq_sched_bio_merge() argument
|
H A D | blk-map.c | 26 if (data->nr_segs > UIO_MAXIOV) in bio_alloc_map_data() 29 bmd = kmalloc(struct_size(bmd, iov, data->nr_segs), gfp_mask); in bio_alloc_map_data() 32 memcpy(bmd->iov, data->iov, sizeof(struct iovec) * data->nr_segs); in bio_alloc_map_data() 529 unsigned int nr_segs = 0; in blk_rq_append_bio() local 534 nr_segs++; in blk_rq_append_bio() 537 blk_rq_bio_prep(rq, *bio, nr_segs); in blk_rq_append_bio() 539 if (!ll_back_merge_fn(rq, *bio, nr_segs)) { in blk_rq_append_bio()
|
H A D | blk.h | 90 unsigned int nr_segs) in blk_rq_bio_prep() 92 rq->nr_phys_segments = nr_segs; in blk_rq_bio_prep() 183 unsigned int nr_segs, struct request **same_queue_rq); 185 struct bio *bio, unsigned int nr_segs); 235 void __blk_queue_split(struct bio **bio, unsigned int *nr_segs); 237 unsigned int nr_segs); 89 blk_rq_bio_prep(struct request *rq, struct bio *bio, unsigned int nr_segs) blk_rq_bio_prep() argument
|
H A D | blk-mq-sched.c | 355 unsigned int nr_segs) in __blk_mq_sched_bio_merge() 364 return e->type->ops.bio_merge(q, bio, nr_segs); in __blk_mq_sched_bio_merge() 380 if (blk_bio_list_merge(q, &ctx->rq_lists[type], bio, nr_segs)) { in __blk_mq_sched_bio_merge() 354 __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio, unsigned int nr_segs) __blk_mq_sched_bio_merge() argument
|
/kernel/linux/linux-6.6/block/ |
H A D | blk-merge.c | 345 * @nr_segs: returns the number of segments in the returned bio 356 unsigned int *nr_segs) in __bio_split_to_limits() 364 split = bio_split_discard(bio, lim, nr_segs, bs); in __bio_split_to_limits() 367 split = bio_split_write_zeroes(bio, lim, nr_segs, bs); in __bio_split_to_limits() 370 split = bio_split_rw(bio, lim, nr_segs, bs, in __bio_split_to_limits() 404 unsigned int nr_segs; in bio_split_to_limits() local 407 return __bio_split_to_limits(bio, lim, &nr_segs); in bio_split_to_limits() 636 int ll_back_merge_fn(struct request *req, struct bio *bio, unsigned int nr_segs) in ll_back_merge_fn() argument 651 return ll_new_hw_segment(req, bio, nr_segs); in ll_back_merge_fn() 655 unsigned int nr_segs) in ll_front_merge_fn() 354 __bio_split_to_limits(struct bio *bio, const struct queue_limits *lim, unsigned int *nr_segs) __bio_split_to_limits() argument 654 ll_front_merge_fn(struct request *req, struct bio *bio, unsigned int nr_segs) ll_front_merge_fn() argument 977 bio_attempt_back_merge(struct request *req, struct bio *bio, unsigned int nr_segs) bio_attempt_back_merge() argument 1003 bio_attempt_front_merge(struct request *req, struct bio *bio, unsigned int nr_segs) bio_attempt_front_merge() argument 1056 blk_attempt_bio_merge(struct request_queue *q, struct request *rq, struct bio *bio, unsigned int nr_segs, bool sched_allow_merge) blk_attempt_bio_merge() argument 1103 blk_attempt_plug_merge(struct request_queue *q, struct bio *bio, unsigned int nr_segs) blk_attempt_plug_merge() argument 1135 blk_bio_list_merge(struct request_queue *q, struct list_head *list, struct bio *bio, unsigned int nr_segs) blk_bio_list_merge() argument 1160 blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio, unsigned int nr_segs, struct request **merged_request) blk_mq_sched_try_merge() argument [all...] |
H A D | blk-map.c | 26 if (data->nr_segs > UIO_MAXIOV) in bio_alloc_map_data() 29 bmd = kmalloc(struct_size(bmd, iov, data->nr_segs), gfp_mask); in bio_alloc_map_data() 34 memcpy(bmd->iov, iter_iov(data), sizeof(struct iovec) * data->nr_segs); in bio_alloc_map_data() 541 unsigned int nr_segs = 0; in blk_rq_append_bio() local 544 nr_segs++; in blk_rq_append_bio() 547 blk_rq_bio_prep(rq, bio, nr_segs); in blk_rq_append_bio() 549 if (!ll_back_merge_fn(rq, bio, nr_segs)) in blk_rq_append_bio() 566 size_t nr_segs = iter->nr_segs; in blk_rq_map_user_bvec() local 575 if (nr_segs > queue_max_segment in blk_rq_map_user_bvec() [all...] |
H A D | blk-mq-sched.h | 11 unsigned int nr_segs, struct request **merged_request); 13 unsigned int nr_segs);
|
H A D | blk.h | 259 unsigned int nr_segs); 261 struct bio *bio, unsigned int nr_segs); 322 unsigned int *nr_segs); 324 unsigned int nr_segs);
|
H A D | blk-mq-sched.c | 340 unsigned int nr_segs) in blk_mq_sched_bio_merge() 349 ret = e->type->ops.bio_merge(q, bio, nr_segs); in blk_mq_sched_bio_merge() 367 if (blk_bio_list_merge(q, &ctx->rq_lists[type], bio, nr_segs)) in blk_mq_sched_bio_merge() 339 blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio, unsigned int nr_segs) blk_mq_sched_bio_merge() argument
|
H A D | blk-crypto-fallback.c | 162 unsigned int nr_segs = bio_segments(bio_src); in blk_crypto_fallback_clone_bio() local 167 bio = bio_kmalloc(nr_segs, GFP_NOIO); in blk_crypto_fallback_clone_bio() 170 bio_init(bio, bio_src->bi_bdev, bio->bi_inline_vecs, nr_segs, in blk_crypto_fallback_clone_bio()
|
/kernel/linux/linux-6.6/include/linux/ |
H A D | uio.h | 38 unsigned long nr_segs; member 81 unsigned long nr_segs; member 106 state->nr_segs = iter->nr_segs; in iov_iter_save_state() 156 static inline size_t iov_length(const struct iovec *iov, unsigned long nr_segs) in iov_length() argument 161 for (seg = 0; seg < nr_segs; seg++) in iov_length() 279 unsigned long nr_segs, size_t count); 281 unsigned long nr_segs, size_t count); 283 unsigned long nr_segs, size_t count); 367 unsigned long nr_segs, unsigne [all...] |
/kernel/linux/linux-5.10/lib/ |
H A D | iov_iter.c | 112 i->nr_segs -= i->bvec - bvec; \ 122 i->nr_segs -= kvec - i->kvec; \ 134 i->nr_segs -= iov - i->iov; \ 242 i->nr_segs -= iov - i->iov; in copy_page_to_iter_iovec() 326 i->nr_segs -= iov - i->iov; in copy_page_from_iter_iovec() 450 const struct iovec *iov, unsigned long nr_segs, in iov_iter_init() 464 i->nr_segs = nr_segs; in iov_iter_init() 1121 i->nr_segs++; in iov_iter_revert() 1133 i->nr_segs in iov_iter_revert() 449 iov_iter_init(struct iov_iter *i, unsigned int direction, const struct iovec *iov, unsigned long nr_segs, size_t count) iov_iter_init() argument 1163 iov_iter_kvec(struct iov_iter *i, unsigned int direction, const struct kvec *kvec, unsigned long nr_segs, size_t count) iov_iter_kvec() argument 1176 iov_iter_bvec(struct iov_iter *i, unsigned int direction, const struct bio_vec *bvec, unsigned long nr_segs, size_t count) iov_iter_bvec() argument 1650 copy_compat_iovec_from_user(struct iovec *iov, const struct iovec __user *uvec, unsigned long nr_segs) copy_compat_iovec_from_user() argument 1682 copy_iovec_from_user(struct iovec *iov, const struct iovec __user *uvec, unsigned long nr_segs) copy_iovec_from_user() argument 1697 iovec_from_user(const struct iovec __user *uvec, unsigned long nr_segs, unsigned long fast_segs, struct iovec *fast_iov, bool compat) iovec_from_user() argument 1732 __import_iovec(int type, const struct iovec __user *uvec, unsigned nr_segs, unsigned fast_segs, struct iovec **iovp, struct iov_iter *i, bool compat) __import_iovec() argument 1801 import_iovec(int type, const struct iovec __user *uvec, unsigned nr_segs, unsigned fast_segs, struct iovec **iovp, struct iov_iter *i) import_iovec() argument [all...] |
/kernel/linux/linux-6.6/fs/smb/server/ |
H A D | transport_tcp.c | 111 * @nr_segs: number of segments in base iov 117 unsigned int nr_segs, size_t bytes) in kvec_array_init() 128 nr_segs--; in kvec_array_init() 133 memcpy(new, iov, sizeof(*iov) * nr_segs); in kvec_array_init() 136 return nr_segs; in kvec_array_init() 142 * @nr_segs: number of segments in iov 146 static struct kvec *get_conn_iovec(struct tcp_transport *t, unsigned int nr_segs) in get_conn_iovec() argument 150 if (t->iov && nr_segs <= t->nr_iov) in get_conn_iovec() 154 new_iov = kmalloc_array(nr_segs, sizeof(*new_iov), GFP_KERNEL); in get_conn_iovec() 158 t->nr_iov = nr_segs; in get_conn_iovec() 116 kvec_array_init(struct kvec *new, struct kvec *iov, unsigned int nr_segs, size_t bytes) kvec_array_init() argument 304 ksmbd_tcp_readv(struct tcp_transport *t, struct kvec *iov_orig, unsigned int nr_segs, unsigned int to_read, int max_retries) ksmbd_tcp_readv() argument [all...] |
/kernel/linux/linux-5.10/drivers/hwtracing/intel_th/ |
H A D | msu.c | 63 * @nr_segs: number of segments in this window (<= @nr_blocks) 73 unsigned int nr_segs; member 330 for_each_sg(win->sgt->sgl, sg, win->nr_segs, blk) { in msc_win_total_sz() 425 for_each_sg(win->sgt->sgl, sg, win->nr_segs, blk) { in msc_win_oldest_sg() 664 for_each_sg(win->sgt->sgl, sg, win->nr_segs, blk) { in msc_buffer_clear_hw_header() 994 unsigned int nr_segs) in __msc_buffer_win_alloc() 1000 ret = sg_alloc_table(win->sgt, nr_segs, GFP_KERNEL); in __msc_buffer_win_alloc() 1004 for_each_sg(win->sgt->sgl, sg_ptr, nr_segs, i) { in __msc_buffer_win_alloc() 1014 return nr_segs; in __msc_buffer_win_alloc() 1027 static void msc_buffer_set_uc(struct msc_window *win, unsigned int nr_segs) in msc_buffer_set_uc() argument 993 __msc_buffer_win_alloc(struct msc_window *win, unsigned int nr_segs) __msc_buffer_win_alloc() argument 1052 msc_buffer_set_uc(struct msc_window *win, unsigned int nr_segs) msc_buffer_set_uc() argument [all...] |
/kernel/linux/linux-5.10/arch/powerpc/mm/ |
H A D | dma-noncoherent.c | 65 int nr_segs = 1 + ((size - seg_size) + PAGE_SIZE - 1)/PAGE_SIZE; in __dma_sync_page_highmem() local 84 } while (seg_nr < nr_segs); in __dma_sync_page_highmem()
|
/kernel/linux/linux-6.6/arch/powerpc/mm/ |
H A D | dma-noncoherent.c | 65 int nr_segs = 1 + ((size - seg_size) + PAGE_SIZE - 1)/PAGE_SIZE; in __dma_sync_page_highmem() local 84 } while (seg_nr < nr_segs); in __dma_sync_page_highmem()
|
/kernel/linux/linux-5.10/drivers/scsi/ |
H A D | xen-scsifront.c | 1057 unsigned int sg_grant, nr_segs; in scsifront_read_backend_params() local 1061 nr_segs = min_t(unsigned int, sg_grant, SG_ALL); in scsifront_read_backend_params() 1062 nr_segs = max_t(unsigned int, nr_segs, VSCSIIF_SG_TABLESIZE); in scsifront_read_backend_params() 1063 nr_segs = min_t(unsigned int, nr_segs, in scsifront_read_backend_params() 1068 dev_info(&dev->dev, "using up to %d SG entries\n", nr_segs); in scsifront_read_backend_params() 1069 else if (info->pause && nr_segs < host->sg_tablesize) in scsifront_read_backend_params() 1072 host->sg_tablesize, nr_segs); in scsifront_read_backend_params() 1074 host->sg_tablesize = nr_segs; in scsifront_read_backend_params() [all...] |
/kernel/linux/linux-6.6/drivers/scsi/ |
H A D | xen-scsifront.c | 1132 unsigned int sg_grant, nr_segs; in scsifront_read_backend_params() local 1136 nr_segs = min_t(unsigned int, sg_grant, SG_ALL); in scsifront_read_backend_params() 1137 nr_segs = max_t(unsigned int, nr_segs, VSCSIIF_SG_TABLESIZE); in scsifront_read_backend_params() 1138 nr_segs = min_t(unsigned int, nr_segs, in scsifront_read_backend_params() 1143 dev_info(&dev->dev, "using up to %d SG entries\n", nr_segs); in scsifront_read_backend_params() 1144 else if (info->pause && nr_segs < host->sg_tablesize) in scsifront_read_backend_params() 1147 host->sg_tablesize, nr_segs); in scsifront_read_backend_params() 1149 host->sg_tablesize = nr_segs; in scsifront_read_backend_params() [all...] |
/kernel/linux/linux-6.6/drivers/hwtracing/intel_th/ |
H A D | msu.c | 63 * @nr_segs: number of segments in this window (<= @nr_blocks) 73 unsigned int nr_segs; member 330 for_each_sg(win->sgt->sgl, sg, win->nr_segs, blk) { in msc_win_total_sz() 425 for_each_sg(win->sgt->sgl, sg, win->nr_segs, blk) { in msc_win_oldest_sg() 662 for_each_sg(win->sgt->sgl, sg, win->nr_segs, blk) { in msc_buffer_clear_hw_header() 992 unsigned int nr_segs) in __msc_buffer_win_alloc() 998 ret = sg_alloc_table(win->sgt, nr_segs, GFP_KERNEL); in __msc_buffer_win_alloc() 1002 for_each_sg(win->sgt->sgl, sg_ptr, nr_segs, i) { in __msc_buffer_win_alloc() 1012 return nr_segs; in __msc_buffer_win_alloc() 1037 for_each_sg(win->sgt->sgl, sg_ptr, win->nr_segs, in msc_buffer_set_uc() 991 __msc_buffer_win_alloc(struct msc_window *win, unsigned int nr_segs) __msc_buffer_win_alloc() argument [all...] |
/kernel/linux/linux-6.6/drivers/md/bcache/ |
H A D | debug.c | 110 unsigned int nr_segs = bio_segments(bio); in bch_data_verify() local 115 check = bio_kmalloc(nr_segs, GFP_NOIO); in bch_data_verify() 118 bio_init(check, bio->bi_bdev, check->bi_inline_vecs, nr_segs, in bch_data_verify()
|
/kernel/linux/linux-5.10/fs/fuse/ |
H A D | dev.c | 657 unsigned long nr_segs; member 713 BUG_ON(!cs->nr_segs); in fuse_copy_fill() 719 cs->nr_segs--; in fuse_copy_fill() 721 if (cs->nr_segs >= cs->pipe->max_usage) in fuse_copy_fill() 737 cs->nr_segs++; in fuse_copy_fill() 813 BUG_ON(!cs->nr_segs); in fuse_try_move_page() 817 cs->nr_segs--; in fuse_try_move_page() 909 if (cs->nr_segs >= cs->pipe->max_usage) in fuse_ref_page() 927 cs->nr_segs++; in fuse_ref_page() 1404 if (pipe_occupancy(pipe->head, pipe->tail) + cs.nr_segs > pip in fuse_dev_splice_read() [all...] |
/kernel/linux/linux-6.6/fs/fuse/ |
H A D | dev.c | 651 unsigned long nr_segs; member 707 BUG_ON(!cs->nr_segs); in fuse_copy_fill() 713 cs->nr_segs--; in fuse_copy_fill() 715 if (cs->nr_segs >= cs->pipe->max_usage) in fuse_copy_fill() 731 cs->nr_segs++; in fuse_copy_fill() 807 BUG_ON(!cs->nr_segs); in fuse_try_move_page() 811 cs->nr_segs--; in fuse_try_move_page() 900 if (cs->nr_segs >= cs->pipe->max_usage) in fuse_ref_page() 918 cs->nr_segs++; in fuse_ref_page() 1395 if (pipe_occupancy(pipe->head, pipe->tail) + cs.nr_segs > pip in fuse_dev_splice_read() [all...] |