Lines Matching refs:dio

61  * down by dio->blkfactor.  Similarly, fs-blocksize quantities are converted
97 * Deferred addition of a page to the dio. These variables are
118 struct dio {
169 static inline int dio_refill_pages(struct dio *dio, struct dio_submit *sdio)
173 ret = iov_iter_get_pages(sdio->iter, dio->pages, LONG_MAX, DIO_PAGES,
176 if (ret < 0 && sdio->blocks_available && (dio->op == REQ_OP_WRITE)) {
183 if (dio->page_errors == 0)
184 dio->page_errors = ret;
186 dio->pages[0] = page;
207 * buffered inside the dio so that we can call get_user_pages() against a
211 static inline struct page *dio_get_page(struct dio *dio,
217 ret = dio_refill_pages(dio, sdio);
222 return dio->pages[sdio->head];
236 static ssize_t dio_complete(struct dio *dio, ssize_t ret, unsigned int flags)
238 loff_t offset = dio->iocb->ki_pos;
251 if (dio->result) {
252 transferred = dio->result;
255 if ((dio->op == REQ_OP_READ) &&
256 ((offset + transferred) > dio->i_size))
257 transferred = dio->i_size - offset;
264 ret = dio->page_errors;
266 ret = dio->io_error;
270 if (dio->end_io) {
272 err = dio->end_io(dio->iocb, offset, ret, dio->private);
284 * And this page cache invalidation has to be after dio->end_io(), as
290 ret > 0 && dio->op == REQ_OP_WRITE &&
291 dio->inode->i_mapping->nrpages) {
292 err = invalidate_inode_pages2_range(dio->inode->i_mapping,
296 dio_warn_stale_pagecache(dio->iocb->ki_filp);
299 inode_dio_end(dio->inode);
307 dio->iocb->ki_pos += transferred;
309 if (ret > 0 && dio->op == REQ_OP_WRITE)
310 ret = generic_write_sync(dio->iocb, ret);
311 dio->iocb->ki_complete(dio->iocb, ret, 0);
314 kmem_cache_free(dio_cache, dio);
320 struct dio *dio = container_of(work, struct dio, complete_work);
322 dio_complete(dio, 0, DIO_COMPLETE_ASYNC | DIO_COMPLETE_INVALIDATE);
325 static blk_status_t dio_bio_complete(struct dio *dio, struct bio *bio);
332 struct dio *dio = bio->bi_private;
338 dio_bio_complete(dio, bio);
340 spin_lock_irqsave(&dio->bio_lock, flags);
341 remaining = --dio->refcount;
342 if (remaining == 1 && dio->waiter)
343 wake_up_process(dio->waiter);
344 spin_unlock_irqrestore(&dio->bio_lock, flags);
355 if (dio->result)
356 defer_completion = dio->defer_completion ||
357 (dio->op == REQ_OP_WRITE &&
358 dio->inode->i_mapping->nrpages);
360 INIT_WORK(&dio->complete_work, dio_aio_complete_work);
361 queue_work(dio->inode->i_sb->s_dio_done_wq,
362 &dio->complete_work);
364 dio_complete(dio, 0, DIO_COMPLETE_ASYNC);
373 * During I/O bi_private points at the dio. After I/O, bi_private is used to
374 * implement a singly-linked list of completed BIOs, at dio->bio_list.
378 struct dio *dio = bio->bi_private;
381 spin_lock_irqsave(&dio->bio_lock, flags);
382 bio->bi_private = dio->bio_list;
383 dio->bio_list = bio;
384 if (--dio->refcount == 1 && dio->waiter)
385 wake_up_process(dio->waiter);
386 spin_unlock_irqrestore(&dio->bio_lock, flags);
390 dio_bio_alloc(struct dio *dio, struct dio_submit *sdio,
404 bio_set_op_attrs(bio, dio->op, dio->op_flags);
405 if (dio->is_async)
410 bio->bi_write_hint = dio->iocb->ki_hint;
421 * bios hold a dio reference between submit_bio and ->end_io.
423 static inline void dio_bio_submit(struct dio *dio, struct dio_submit *sdio)
428 bio->bi_private = dio;
430 spin_lock_irqsave(&dio->bio_lock, flags);
431 dio->refcount++;
432 spin_unlock_irqrestore(&dio->bio_lock, flags);
434 if (dio->is_async && dio->op == REQ_OP_READ && dio->should_dirty)
437 dio->bio_disk = bio->bi_disk;
440 sdio->submit_io(bio, dio->inode, sdio->logical_offset_in_bio);
441 dio->bio_cookie = BLK_QC_T_NONE;
443 dio->bio_cookie = submit_bio(bio);
453 static inline void dio_cleanup(struct dio *dio, struct dio_submit *sdio)
456 put_page(dio->pages[sdio->head++]);
462 * all bios have been issued so that dio->refcount can only decrease. This
463 * requires that that the caller hold a reference on the dio.
465 static struct bio *dio_await_one(struct dio *dio)
470 spin_lock_irqsave(&dio->bio_lock, flags);
478 while (dio->refcount > 1 && dio->bio_list == NULL) {
480 dio->waiter = current;
481 spin_unlock_irqrestore(&dio->bio_lock, flags);
482 if (!(dio->iocb->ki_flags & IOCB_HIPRI) ||
483 !blk_poll(dio->bio_disk->queue, dio->bio_cookie, true))
486 spin_lock_irqsave(&dio->bio_lock, flags);
487 dio->waiter = NULL;
489 if (dio->bio_list) {
490 bio = dio->bio_list;
491 dio->bio_list = bio->bi_private;
493 spin_unlock_irqrestore(&dio->bio_lock, flags);
500 static blk_status_t dio_bio_complete(struct dio *dio, struct bio *bio)
503 bool should_dirty = dio->op == REQ_OP_READ && dio->should_dirty;
507 dio->io_error = -EAGAIN;
509 dio->io_error = -EIO;
512 if (dio->is_async && should_dirty) {
525 * errors are propagated through dio->io_error and should be propagated via
528 static void dio_await_completion(struct dio *dio)
532 bio = dio_await_one(dio);
534 dio_bio_complete(dio, bio);
545 static inline int dio_bio_reap(struct dio *dio, struct dio_submit *sdio)
550 while (dio->bio_list) {
555 spin_lock_irqsave(&dio->bio_lock, flags);
556 bio = dio->bio_list;
557 dio->bio_list = bio->bi_private;
558 spin_unlock_irqrestore(&dio->bio_lock, flags);
559 ret2 = blk_status_to_errno(dio_bio_complete(dio, bio));
577 struct workqueue_struct *wq = alloc_workqueue("dio/%s",
592 static int dio_set_defer_completion(struct dio *dio)
594 struct super_block *sb = dio->inode->i_sb;
596 if (dio->defer_completion)
598 dio->defer_completion = true;
627 static int get_more_blocks(struct dio *dio, struct dio_submit *sdio,
642 ret = dio->page_errors;
664 create = dio->op == REQ_OP_WRITE;
665 if (dio->flags & DIO_SKIP_HOLES) {
666 i_size = i_size_read(dio->inode);
671 ret = (*sdio->get_block)(dio->inode, fs_startblk,
675 dio->private = map_bh->b_private;
678 ret = dio_set_defer_completion(dio);
686 static inline int dio_new_bio(struct dio *dio, struct dio_submit *sdio,
692 ret = dio_bio_reap(dio, sdio);
698 dio_bio_alloc(dio, sdio, map_bh->b_bdev, sector, nr_pages);
741 * dio, and for dropping the refcount which came from that presence.
743 static inline int dio_send_cur_page(struct dio *dio, struct dio_submit *sdio,
769 dio_bio_submit(dio, sdio);
773 ret = dio_new_bio(dio, sdio, sdio->cur_page_block, map_bh);
779 dio_bio_submit(dio, sdio);
780 ret = dio_new_bio(dio, sdio, sdio->cur_page_block, map_bh);
801 * private part of the dio structure. If possible, we just expand the IO
805 * page to the dio instead.
808 submit_page_section(struct dio *dio, struct dio_submit *sdio, struct page *page,
815 if (dio->op == REQ_OP_WRITE) {
823 * Can we just grow the current page's presence in the dio?
837 ret = dio_send_cur_page(dio, sdio, map_bh);
844 get_page(page); /* It is in dio */
856 ret = dio_send_cur_page(dio, sdio, map_bh);
858 dio_bio_submit(dio, sdio);
874 static inline void dio_zero_block(struct dio *dio, struct dio_submit *sdio,
902 if (submit_page_section(dio, sdio, page, 0, this_chunk_bytes,
925 static int do_direct_IO(struct dio *dio, struct dio_submit *sdio,
936 page = dio_get_page(dio, sdio);
957 ret = get_more_blocks(dio, sdio, map_bh);
1003 if (dio->op == REQ_OP_WRITE) {
1012 i_size_aligned = ALIGN(i_size_read(dio->inode),
1023 dio->result += 1 << blkbits;
1033 dio_zero_block(dio, sdio, 0, map_bh);
1051 ret = submit_page_section(dio, sdio, page,
1064 dio->result += this_chunk_bytes;
1079 static inline int drop_refcount(struct dio *dio)
1095 spin_lock_irqsave(&dio->bio_lock, flags);
1096 ret2 = --dio->refcount;
1097 spin_unlock_irqrestore(&dio->bio_lock, flags);
1139 struct dio *dio;
1154 dio = kmem_cache_alloc(dio_cache, GFP_KERNEL);
1155 if (!dio)
1162 memset(dio, 0, offsetof(struct dio, pages));
1164 dio->flags = flags;
1165 if (dio->flags & DIO_LOCKING && iov_iter_rw(iter) == READ) {
1171 dio->i_size = i_size_read(inode);
1172 if (iov_iter_rw(iter) == READ && offset >= dio->i_size) {
1185 if (dio->flags & DIO_LOCKING && iov_iter_rw(iter) == READ) {
1200 dio->is_async = false;
1202 dio->is_async = false;
1204 dio->is_async = true;
1206 dio->inode = inode;
1208 dio->op = REQ_OP_WRITE;
1209 dio->op_flags = REQ_SYNC | REQ_IDLE;
1211 dio->op_flags |= REQ_NOWAIT;
1213 dio->op = REQ_OP_READ;
1216 dio->op_flags |= REQ_HIPRI;
1222 if (dio->is_async && iov_iter_rw(iter) == WRITE) {
1225 retval = dio_set_defer_completion(dio);
1226 else if (!dio->inode->i_sb->s_dio_done_wq) {
1232 retval = sb_init_dio_done_wq(dio->inode->i_sb);
1249 dio->end_io = end_io;
1254 dio->iocb = iocb;
1256 spin_lock_init(&dio->bio_lock);
1257 dio->refcount = 1;
1259 dio->should_dirty = iter_is_iovec(iter) && iov_iter_rw(iter) == READ;
1274 retval = do_direct_IO(dio, &sdio, &map_bh);
1276 dio_cleanup(dio, &sdio);
1289 dio_zero_block(dio, &sdio, 1, &map_bh);
1294 ret2 = dio_send_cur_page(dio, &sdio, &map_bh);
1301 dio_bio_submit(dio, &sdio);
1309 dio_cleanup(dio, &sdio);
1316 if (iov_iter_rw(iter) == READ && (dio->flags & DIO_LOCKING))
1317 inode_unlock(dio->inode);
1327 if (dio->is_async && retval == 0 && dio->result &&
1328 (iov_iter_rw(iter) == READ || dio->result == count))
1331 dio_await_completion(dio);
1333 if (drop_refcount(dio) == 0) {
1334 retval = dio_complete(dio, retval, DIO_COMPLETE_INVALIDATE);
1341 if (dio->flags & DIO_LOCKING && iov_iter_rw(iter) == READ)
1344 kmem_cache_free(dio_cache, dio);
1374 dio_cache = KMEM_CACHE(dio, SLAB_PANIC);