Lines Matching refs:dio

61  * down by dio->blkfactor.  Similarly, fs-blocksize quantities are converted
96 * Deferred addition of a page to the dio. These variables are
117 struct dio {
167 static inline int dio_refill_pages(struct dio *dio, struct dio_submit *sdio)
169 struct page **pages = dio->pages;
170 const enum req_op dio_op = dio->opf & REQ_OP_MASK;
182 if (dio->page_errors == 0)
183 dio->page_errors = ret;
184 dio->pages[0] = ZERO_PAGE(0);
204 * buffered inside the dio so that we can call iov_iter_extract_pages()
208 static inline struct page *dio_get_page(struct dio *dio,
214 ret = dio_refill_pages(dio, sdio);
219 return dio->pages[sdio->head];
222 static void dio_pin_page(struct dio *dio, struct page *page)
224 if (dio->is_pinned)
228 static void dio_unpin_page(struct dio *dio, struct page *page)
230 if (dio->is_pinned)
245 static ssize_t dio_complete(struct dio *dio, ssize_t ret, unsigned int flags)
247 const enum req_op dio_op = dio->opf & REQ_OP_MASK;
248 loff_t offset = dio->iocb->ki_pos;
261 if (dio->result) {
262 transferred = dio->result;
266 ((offset + transferred) > dio->i_size))
267 transferred = dio->i_size - offset;
274 ret = dio->page_errors;
276 ret = dio->io_error;
280 if (dio->end_io) {
282 err = dio->end_io(dio->iocb, offset, ret, dio->private);
294 * And this page cache invalidation has to be after dio->end_io(), as
301 kiocb_invalidate_post_direct_write(dio->iocb, ret);
303 inode_dio_end(dio->inode);
311 dio->iocb->ki_pos += transferred;
314 ret = generic_write_sync(dio->iocb, ret);
315 dio->iocb->ki_complete(dio->iocb, ret);
318 kmem_cache_free(dio_cache, dio);
324 struct dio *dio = container_of(work, struct dio, complete_work);
326 dio_complete(dio, 0, DIO_COMPLETE_ASYNC | DIO_COMPLETE_INVALIDATE);
329 static blk_status_t dio_bio_complete(struct dio *dio, struct bio *bio);
336 struct dio *dio = bio->bi_private;
337 const enum req_op dio_op = dio->opf & REQ_OP_MASK;
343 dio_bio_complete(dio, bio);
345 spin_lock_irqsave(&dio->bio_lock, flags);
346 remaining = --dio->refcount;
347 if (remaining == 1 && dio->waiter)
348 wake_up_process(dio->waiter);
349 spin_unlock_irqrestore(&dio->bio_lock, flags);
360 if (dio->result)
361 defer_completion = dio->defer_completion ||
363 dio->inode->i_mapping->nrpages);
365 INIT_WORK(&dio->complete_work, dio_aio_complete_work);
366 queue_work(dio->inode->i_sb->s_dio_done_wq,
367 &dio->complete_work);
369 dio_complete(dio, 0, DIO_COMPLETE_ASYNC);
378 * During I/O bi_private points at the dio. After I/O, bi_private is used to
379 * implement a singly-linked list of completed BIOs, at dio->bio_list.
383 struct dio *dio = bio->bi_private;
386 spin_lock_irqsave(&dio->bio_lock, flags);
387 bio->bi_private = dio->bio_list;
388 dio->bio_list = bio;
389 if (--dio->refcount == 1 && dio->waiter)
390 wake_up_process(dio->waiter);
391 spin_unlock_irqrestore(&dio->bio_lock, flags);
395 dio_bio_alloc(struct dio *dio, struct dio_submit *sdio,
405 bio = bio_alloc(bdev, nr_vecs, dio->opf, GFP_KERNEL);
407 if (dio->is_async)
411 if (dio->is_pinned)
422 * bios hold a dio reference between submit_bio and ->end_io.
424 static inline void dio_bio_submit(struct dio *dio, struct dio_submit *sdio)
426 const enum req_op dio_op = dio->opf & REQ_OP_MASK;
430 bio->bi_private = dio;
432 spin_lock_irqsave(&dio->bio_lock, flags);
433 dio->refcount++;
434 spin_unlock_irqrestore(&dio->bio_lock, flags);
436 if (dio->is_async && dio_op == REQ_OP_READ && dio->should_dirty)
439 dio->bio_disk = bio->bi_bdev->bd_disk;
451 static inline void dio_cleanup(struct dio *dio, struct dio_submit *sdio)
453 if (dio->is_pinned)
454 unpin_user_pages(dio->pages + sdio->head,
462 * all bios have been issued so that dio->refcount can only decrease. This
463 * requires that the caller hold a reference on the dio.
465 static struct bio *dio_await_one(struct dio *dio)
470 spin_lock_irqsave(&dio->bio_lock, flags);
478 while (dio->refcount > 1 && dio->bio_list == NULL) {
480 dio->waiter = current;
481 spin_unlock_irqrestore(&dio->bio_lock, flags);
484 spin_lock_irqsave(&dio->bio_lock, flags);
485 dio->waiter = NULL;
487 if (dio->bio_list) {
488 bio = dio->bio_list;
489 dio->bio_list = bio->bi_private;
491 spin_unlock_irqrestore(&dio->bio_lock, flags);
498 static blk_status_t dio_bio_complete(struct dio *dio, struct bio *bio)
501 const enum req_op dio_op = dio->opf & REQ_OP_MASK;
502 bool should_dirty = dio_op == REQ_OP_READ && dio->should_dirty;
506 dio->io_error = -EAGAIN;
508 dio->io_error = -EIO;
511 if (dio->is_async && should_dirty) {
524 * errors are propagated through dio->io_error and should be propagated via
527 static void dio_await_completion(struct dio *dio)
531 bio = dio_await_one(dio);
533 dio_bio_complete(dio, bio);
544 static inline int dio_bio_reap(struct dio *dio, struct dio_submit *sdio)
549 while (dio->bio_list) {
554 spin_lock_irqsave(&dio->bio_lock, flags);
555 bio = dio->bio_list;
556 dio->bio_list = bio->bi_private;
557 spin_unlock_irqrestore(&dio->bio_lock, flags);
558 ret2 = blk_status_to_errno(dio_bio_complete(dio, bio));
567 static int dio_set_defer_completion(struct dio *dio)
569 struct super_block *sb = dio->inode->i_sb;
571 if (dio->defer_completion)
573 dio->defer_completion = true;
602 static int get_more_blocks(struct dio *dio, struct dio_submit *sdio,
605 const enum req_op dio_op = dio->opf & REQ_OP_MASK;
618 ret = dio->page_errors;
641 if (dio->flags & DIO_SKIP_HOLES) {
642 i_size = i_size_read(dio->inode);
647 ret = (*sdio->get_block)(dio->inode, fs_startblk,
651 dio->private = map_bh->b_private;
654 ret = dio_set_defer_completion(dio);
662 static inline int dio_new_bio(struct dio *dio, struct dio_submit *sdio,
668 ret = dio_bio_reap(dio, sdio);
674 dio_bio_alloc(dio, sdio, map_bh->b_bdev, sector, nr_pages);
687 static inline int dio_bio_add_page(struct dio *dio, struct dio_submit *sdio)
699 dio_pin_page(dio, sdio->cur_page);
717 * dio, and for dropping the refcount which came from that presence.
719 static inline int dio_send_cur_page(struct dio *dio, struct dio_submit *sdio,
745 dio_bio_submit(dio, sdio);
749 ret = dio_new_bio(dio, sdio, sdio->cur_page_block, map_bh);
754 if (dio_bio_add_page(dio, sdio) != 0) {
755 dio_bio_submit(dio, sdio);
756 ret = dio_new_bio(dio, sdio, sdio->cur_page_block, map_bh);
758 ret = dio_bio_add_page(dio, sdio);
777 * private part of the dio structure. If possible, we just expand the IO
781 * page to the dio instead.
784 submit_page_section(struct dio *dio, struct dio_submit *sdio, struct page *page,
788 const enum req_op dio_op = dio->opf & REQ_OP_MASK;
800 * Can we just grow the current page's presence in the dio?
814 ret = dio_send_cur_page(dio, sdio, map_bh);
815 dio_unpin_page(dio, sdio->cur_page);
821 dio_pin_page(dio, page); /* It is in dio */
833 ret = dio_send_cur_page(dio, sdio, map_bh);
835 dio_bio_submit(dio, sdio);
836 dio_unpin_page(dio, sdio->cur_page);
851 static inline void dio_zero_block(struct dio *dio, struct dio_submit *sdio,
879 if (submit_page_section(dio, sdio, page, 0, this_chunk_bytes,
902 static int do_direct_IO(struct dio *dio, struct dio_submit *sdio,
905 const enum req_op dio_op = dio->opf & REQ_OP_MASK;
914 page = dio_get_page(dio, sdio);
935 ret = get_more_blocks(dio, sdio, map_bh);
937 dio_unpin_page(dio, page);
982 dio_unpin_page(dio, page);
990 i_size_aligned = ALIGN(i_size_read(dio->inode),
995 dio_unpin_page(dio, page);
1001 dio->result += 1 << blkbits;
1011 dio_zero_block(dio, sdio, 0, map_bh);
1029 ret = submit_page_section(dio, sdio, page,
1035 dio_unpin_page(dio, page);
1042 dio->result += this_chunk_bytes;
1051 dio_unpin_page(dio, page);
1057 static inline int drop_refcount(struct dio *dio)
1073 spin_lock_irqsave(&dio->bio_lock, flags);
1074 ret2 = --dio->refcount;
1075 spin_unlock_irqrestore(&dio->bio_lock, flags);
1116 struct dio *dio;
1131 dio = kmem_cache_alloc(dio_cache, GFP_KERNEL);
1132 if (!dio)
1139 memset(dio, 0, offsetof(struct dio, pages));
1141 dio->flags = flags;
1142 if (dio->flags & DIO_LOCKING && iov_iter_rw(iter) == READ) {
1146 dio->is_pinned = iov_iter_extract_will_pin(iter);
1149 dio->i_size = i_size_read(inode);
1150 if (iov_iter_rw(iter) == READ && offset >= dio->i_size) {
1163 if (dio->flags & DIO_LOCKING && iov_iter_rw(iter) == READ) {
1178 dio->is_async = false;
1180 dio->is_async = false;
1182 dio->is_async = true;
1184 dio->inode = inode;
1186 dio->opf = REQ_OP_WRITE | REQ_SYNC | REQ_IDLE;
1188 dio->opf |= REQ_NOWAIT;
1190 dio->opf = REQ_OP_READ;
1197 if (dio->is_async && iov_iter_rw(iter) == WRITE) {
1200 retval = dio_set_defer_completion(dio);
1201 else if (!dio->inode->i_sb->s_dio_done_wq) {
1207 retval = sb_init_dio_done_wq(dio->inode->i_sb);
1224 dio->end_io = end_io;
1228 dio->iocb = iocb;
1230 spin_lock_init(&dio->bio_lock);
1231 dio->refcount = 1;
1233 dio->should_dirty = user_backed_iter(iter) && iov_iter_rw(iter) == READ;
1248 retval = do_direct_IO(dio, &sdio, &map_bh);
1250 dio_cleanup(dio, &sdio);
1263 dio_zero_block(dio, &sdio, 1, &map_bh);
1268 ret2 = dio_send_cur_page(dio, &sdio, &map_bh);
1271 dio_unpin_page(dio, sdio.cur_page);
1275 dio_bio_submit(dio, &sdio);
1283 dio_cleanup(dio, &sdio);
1290 if (iov_iter_rw(iter) == READ && (dio->flags & DIO_LOCKING))
1291 inode_unlock(dio->inode);
1301 if (dio->is_async && retval == 0 && dio->result &&
1302 (iov_iter_rw(iter) == READ || dio->result == count))
1305 dio_await_completion(dio);
1307 if (drop_refcount(dio) == 0) {
1308 retval = dio_complete(dio, retval, DIO_COMPLETE_INVALIDATE);
1315 if (dio->flags & DIO_LOCKING && iov_iter_rw(iter) == READ)
1318 kmem_cache_free(dio_cache, dio);
1325 dio_cache = KMEM_CACHE(dio, SLAB_PANIC);