Lines Matching defs:iter
44 struct iov_iter *iter;
55 static struct bio *iomap_dio_alloc_bio(const struct iomap_iter *iter,
59 return bio_alloc_bioset(iter->iomap.bdev, nr_vecs, opf,
61 return bio_alloc(iter->iomap.bdev, nr_vecs, opf, GFP_KERNEL);
64 static void iomap_dio_submit_bio(const struct iomap_iter *iter,
78 dio->dops->submit_io(iter, bio, pos);
235 static void iomap_dio_zero(const struct iomap_iter *iter, struct iomap_dio *dio,
242 bio = iomap_dio_alloc_bio(iter, dio, 1, REQ_OP_WRITE | REQ_SYNC | REQ_IDLE);
245 bio->bi_iter.bi_sector = iomap_sector(&iter->iomap, pos);
250 iomap_dio_submit_bio(iter, dio, bio, pos);
275 static loff_t iomap_dio_bio_iter(const struct iomap_iter *iter,
278 const struct iomap *iomap = &iter->iomap;
279 struct inode *inode = iter->inode;
281 loff_t length = iomap_length(iter);
282 loff_t pos = iter->pos;
292 !bdev_iter_is_aligned(iomap->bdev, dio->submit.iter))
326 * Save the original count and trim the iter to just the extent we
327 * are operating on right now. The iter will be re-expanded once
330 orig_count = iov_iter_count(dio->submit.iter);
331 iov_iter_truncate(dio->submit.iter, length);
333 if (!iov_iter_count(dio->submit.iter))
360 iomap_dio_zero(iter, dio, pos - pad, pad);
370 nr_pages = bio_iov_vecs_to_alloc(dio->submit.iter, BIO_MAX_VECS);
374 iov_iter_revert(dio->submit.iter, copied);
379 bio = iomap_dio_alloc_bio(iter, dio, nr_pages, bio_opf);
387 ret = bio_iov_iter_get_pages(bio, dio->submit.iter);
410 nr_pages = bio_iov_vecs_to_alloc(dio->submit.iter,
417 iomap_dio_submit_bio(iter, dio, bio, pos);
433 iomap_dio_zero(iter, dio, pos, fs_block_size - pad);
436 /* Undo iter limitation to current extent */
437 iov_iter_reexpand(dio->submit.iter, orig_count - copied);
443 static loff_t iomap_dio_hole_iter(const struct iomap_iter *iter,
446 loff_t length = iov_iter_zero(iomap_length(iter), dio->submit.iter);
458 struct iov_iter *iter = dio->submit.iter;
472 copied = copy_from_iter(inline_data, length, iter);
479 copied = copy_to_iter(inline_data, length, iter);
487 static loff_t iomap_dio_iter(const struct iomap_iter *iter,
490 switch (iter->iomap.type) {
494 return iomap_dio_hole_iter(iter, dio);
497 return iomap_dio_hole_iter(iter, dio);
498 return iomap_dio_bio_iter(iter, dio);
500 return iomap_dio_bio_iter(iter, dio);
502 return iomap_dio_inline_iter(iter, dio);
530 * page in @iter after preparing a transfer. In that case, the non-resident
540 __iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
548 .len = iov_iter_count(iter),
558 trace_iomap_dio_rw_begin(iocb, iter, dio_flags, done_before);
576 dio->submit.iter = iter;
582 if (iov_iter_rw(iter) == READ) {
589 if (user_backed_iter(iter))
671 * Revert iter to a state corresponding to that as some callers (such
674 if (iov_iter_rw(iter) == READ && iomi.pos >= dio->i_size)
675 iov_iter_revert(iter, iomi.pos - dio->i_size);
742 iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
748 dio = __iomap_dio_rw(iocb, iter, ops, dops, dio_flags, private,