Lines Matching refs:iov
80 struct vringh_kiov *iov,
88 while (len && iov->i < iov->used) {
91 partlen = min(iov->iov[iov->i].iov_len, len);
92 err = xfer(vrh, iov->iov[iov->i].iov_base, ptr, partlen);
98 iov->consumed += partlen;
99 iov->iov[iov->i].iov_len -= partlen;
100 iov->iov[iov->i].iov_base += partlen;
102 if (!iov->iov[iov->i].iov_len) {
103 /* Fix up old iov element then increment. */
104 iov->iov[iov->i].iov_len = iov->consumed;
105 iov->iov[iov->i].iov_base -= iov->consumed;
108 iov->consumed = 0;
109 iov->i++;
191 static int resize_iovec(struct vringh_kiov *iov, gfp_t gfp)
194 unsigned int flag, new_num = (iov->max_num & ~VRINGH_IOV_ALLOCATED) * 2;
199 flag = (iov->max_num & VRINGH_IOV_ALLOCATED);
201 new = krealloc(iov->iov, new_num * sizeof(struct iovec), gfp);
205 memcpy(new, iov->iov,
206 iov->max_num * sizeof(struct iovec));
212 iov->iov = new;
213 iov->max_num = (new_num | flag);
298 struct vringh_kiov *iov;
346 iov = wiov;
348 iov = riov;
357 if (!iov) {
375 if (unlikely(iov->used == (iov->max_num & ~VRINGH_IOV_ALLOCATED))) {
376 err = resize_iovec(iov, gfp);
381 iov->iov[iov->used].iov_base = addr;
382 iov->iov[iov->used].iov_len = len;
383 iov->used++;
692 BUILD_BUG_ON(offsetof(struct vringh_kiov, iov) !=
693 offsetof(struct vringh_iov, iov));
1074 u64 addr, u64 len, struct bio_vec iov[],
1102 iov[ret].bv_page = pfn_to_page(pfn);
1103 iov[ret].bv_len = min(len - s, size);
1104 iov[ret].bv_offset = pa & (PAGE_SIZE - 1);
1117 struct bio_vec iov[16];
1121 len, iov, 16, VHOST_MAP_RO);
1125 iov_iter_bvec(&iter, READ, iov, ret, len);
1136 struct bio_vec iov[16];
1140 len, iov, 16, VHOST_MAP_WO);
1144 iov_iter_bvec(&iter, WRITE, iov, ret, len);
1152 struct bio_vec iov;
1158 &iov, 1, VHOST_MAP_RO);
1162 kaddr = kmap_atomic(iov.bv_page);
1163 from = kaddr + iov.bv_offset;
1173 struct bio_vec iov;
1179 &iov, 1, VHOST_MAP_WO);
1183 kaddr = kmap_atomic(iov.bv_page);
1184 to = kaddr + iov.bv_offset;