Lines Matching refs:dio

62 static void iomap_dio_submit_bio(struct iomap_dio *dio, struct iomap *iomap,
65 atomic_inc(&dio->ref);
67 if (dio->iocb->ki_flags & IOCB_HIPRI)
68 bio_set_polled(bio, dio->iocb);
70 dio->submit.last_queue = bdev_get_queue(iomap->bdev);
71 if (dio->dops && dio->dops->submit_io)
72 dio->submit.cookie = dio->dops->submit_io(
73 file_inode(dio->iocb->ki_filp),
76 dio->submit.cookie = submit_bio(bio);
79 ssize_t iomap_dio_complete(struct iomap_dio *dio)
81 const struct iomap_dio_ops *dops = dio->dops;
82 struct kiocb *iocb = dio->iocb;
85 ssize_t ret = dio->error;
88 ret = dops->end_io(iocb, dio->size, ret, dio->flags);
91 ret = dio->size;
93 if (offset + ret > dio->i_size &&
94 !(dio->flags & IOMAP_DIO_WRITE))
95 ret = dio->i_size - offset;
111 if (!dio->error && dio->size &&
112 (dio->flags & IOMAP_DIO_WRITE) && inode->i_mapping->nrpages) {
116 (offset + dio->size - 1) >> PAGE_SHIFT);
126 if (ret > 0 && (dio->flags & IOMAP_DIO_NEED_SYNC))
129 kfree(dio);
137 struct iomap_dio *dio = container_of(work, struct iomap_dio, aio.work);
138 struct kiocb *iocb = dio->iocb;
140 iocb->ki_complete(iocb, iomap_dio_complete(dio), 0);
144 * Set an error in the dio if none is set yet. We have to use cmpxchg
148 static inline void iomap_dio_set_error(struct iomap_dio *dio, int ret)
150 cmpxchg(&dio->error, 0, ret);
155 struct iomap_dio *dio = bio->bi_private;
156 bool should_dirty = (dio->flags & IOMAP_DIO_DIRTY);
159 iomap_dio_set_error(dio, blk_status_to_errno(bio->bi_status));
161 if (atomic_dec_and_test(&dio->ref)) {
162 if (dio->wait_for_completion) {
163 struct task_struct *waiter = dio->submit.waiter;
164 WRITE_ONCE(dio->submit.waiter, NULL);
166 } else if (dio->flags & IOMAP_DIO_WRITE) {
167 struct inode *inode = file_inode(dio->iocb->ki_filp);
169 INIT_WORK(&dio->aio.work, iomap_dio_complete_work);
170 queue_work(inode->i_sb->s_dio_done_wq, &dio->aio.work);
172 iomap_dio_complete_work(&dio->aio.work);
185 iomap_dio_zero(struct iomap_dio *dio, struct iomap *iomap, loff_t pos,
195 bio->bi_private = dio;
201 iomap_dio_submit_bio(dio, iomap, bio, pos);
206 struct iomap_dio *dio, struct iomap *iomap)
210 unsigned int align = iov_iter_alignment(dio->submit.iter);
222 dio->flags |= IOMAP_DIO_UNWRITTEN;
227 dio->flags |= IOMAP_DIO_COW;
240 (dio->flags & IOMAP_DIO_WRITE_FUA) &&
250 orig_count = iov_iter_count(dio->submit.iter);
251 iov_iter_truncate(dio->submit.iter, length);
253 nr_pages = iov_iter_npages(dio->submit.iter, BIO_MAX_PAGES);
263 iomap_dio_zero(dio, iomap, pos - pad, pad);
268 if (dio->error) {
269 iov_iter_revert(dio->submit.iter, copied);
277 bio->bi_write_hint = dio->iocb->ki_hint;
278 bio->bi_ioprio = dio->iocb->ki_ioprio;
279 bio->bi_private = dio;
282 ret = bio_iov_iter_get_pages(bio, dio->submit.iter);
295 if (dio->flags & IOMAP_DIO_WRITE) {
300 dio->flags &= ~IOMAP_DIO_WRITE_FUA;
304 if (dio->flags & IOMAP_DIO_DIRTY)
308 dio->size += n;
311 nr_pages = iov_iter_npages(dio->submit.iter, BIO_MAX_PAGES);
312 iomap_dio_submit_bio(dio, iomap, bio, pos);
324 ((dio->flags & IOMAP_DIO_WRITE) && pos >= i_size_read(inode))) {
328 iomap_dio_zero(dio, iomap, pos, fs_block_size - pad);
332 iov_iter_reexpand(dio->submit.iter, orig_count - copied);
339 iomap_dio_hole_actor(loff_t length, struct iomap_dio *dio)
341 length = iov_iter_zero(length, dio->submit.iter);
342 dio->size += length;
348 struct iomap_dio *dio, struct iomap *iomap)
350 struct iov_iter *iter = dio->submit.iter;
355 if (dio->flags & IOMAP_DIO_WRITE) {
369 dio->size += copied;
377 struct iomap_dio *dio = data;
381 if (WARN_ON_ONCE(dio->flags & IOMAP_DIO_WRITE))
383 return iomap_dio_hole_actor(length, dio);
385 if (!(dio->flags & IOMAP_DIO_WRITE))
386 return iomap_dio_hole_actor(length, dio);
387 return iomap_dio_bio_actor(inode, pos, length, dio, iomap);
389 return iomap_dio_bio_actor(inode, pos, length, dio, iomap);
391 return iomap_dio_inline_actor(inode, pos, length, dio, iomap);
400 dio->iocb->ki_filp, current->comm);
432 struct iomap_dio *dio;
440 dio = kmalloc(sizeof(*dio), GFP_KERNEL);
441 if (!dio)
444 dio->iocb = iocb;
445 atomic_set(&dio->ref, 1);
446 dio->size = 0;
447 dio->i_size = i_size_read(inode);
448 dio->dops = dops;
449 dio->error = 0;
450 dio->flags = 0;
452 dio->submit.iter = iter;
453 dio->submit.waiter = current;
454 dio->submit.cookie = BLK_QC_T_NONE;
455 dio->submit.last_queue = NULL;
458 if (pos >= dio->i_size)
462 dio->flags |= IOMAP_DIO_DIRTY;
465 dio->flags |= IOMAP_DIO_WRITE;
469 dio->flags |= IOMAP_DIO_NEED_SYNC;
478 dio->flags |= IOMAP_DIO_WRITE_FUA;
517 ret = iomap_apply(inode, pos, count, flags, ops, dio,
529 if (iov_iter_rw(iter) == READ && pos >= dio->i_size) {
535 iov_iter_revert(iter, pos - dio->i_size);
542 iomap_dio_set_error(dio, ret);
548 if (dio->flags & IOMAP_DIO_WRITE_FUA)
549 dio->flags &= ~IOMAP_DIO_NEED_SYNC;
551 WRITE_ONCE(iocb->ki_cookie, dio->submit.cookie);
552 WRITE_ONCE(iocb->private, dio->submit.last_queue);
556 * might be the last reference to the dio. There are three different
560 * the dio ourselves.
562 * iocb, we must never touch the dio after the decrement, the
569 dio->wait_for_completion = wait_for_completion;
570 if (!atomic_dec_and_test(&dio->ref)) {
576 if (!READ_ONCE(dio->submit.waiter))
580 !dio->submit.last_queue ||
581 !blk_poll(dio->submit.last_queue,
582 dio->submit.cookie, true))
588 return dio;
591 kfree(dio);
603 struct iomap_dio *dio;
605 dio = __iomap_dio_rw(iocb, iter, ops, dops, wait_for_completion);
606 if (IS_ERR_OR_NULL(dio))
607 return PTR_ERR_OR_ZERO(dio);
608 return iomap_dio_complete(dio);