/kernel/linux/linux-6.6/fs/btrfs/ |
H A D | bio.h | 69 btrfs_bio_end_io_t end_io; member 96 btrfs_bio_end_io_t end_io, void *private); 99 btrfs_bio_end_io_t end_io, void *private);
|
H A D | bio.c | 46 btrfs_bio_end_io_t end_io, void *private) in btrfs_bio_init() 50 bbio->end_io = end_io; in btrfs_bio_init() 64 btrfs_bio_end_io_t end_io, void *private) in btrfs_bio_alloc() 71 btrfs_bio_init(bbio, fs_info, end_io, private); in btrfs_bio_alloc() 117 bbio->end_io(bbio); in __btrfs_bio_end_io() 120 bbio->end_io(bbio); in __btrfs_bio_end_io() 678 * Save the iter for the end_io handler and preload the checksums for in btrfs_submit_chunk() 45 btrfs_bio_init(struct btrfs_bio *bbio, struct btrfs_fs_info *fs_info, btrfs_bio_end_io_t end_io, void *private) btrfs_bio_init() argument 62 btrfs_bio_alloc(unsigned int nr_vecs, blk_opf_t opf, struct btrfs_fs_info *fs_info, btrfs_bio_end_io_t end_io, void *private) btrfs_bio_alloc() argument
|
/kernel/linux/linux-5.10/block/ |
H A D | blk-flush.c | 131 rq->end_io = rq->flush.saved_end_io; in blk_flush_restore_request() 234 * because its .end_io() is called from timeout code path too for in flush_end_io() 269 return rq->end_io == flush_end_io; in is_flush_rq() 338 flush_rq->end_io = flush_end_io; in blk_kick_flush() 340 * Order WRITE ->end_io and WRITE rq->ref, and its pair is the one in blk_kick_flush() 343 * and READ flush_rq->end_io in blk_kick_flush() 437 rq->flush.saved_end_io = rq->end_io; /* Usually NULL */ in blk_insert_flush() 439 rq->end_io = mq_flush_data_end_io; in blk_insert_flush()
|
H A D | blk-exec.c | 56 rq->end_io = done; in blk_execute_rq_nowait()
|
H A D | blk-core.c | 823 goto end_io; in submit_bio_checks() 827 goto end_io; in submit_bio_checks() 830 goto end_io; in submit_bio_checks() 832 goto end_io; in submit_bio_checks() 844 goto end_io; in submit_bio_checks() 867 goto end_io; in submit_bio_checks() 914 end_io: in submit_bio_checks()
|
/kernel/linux/linux-6.6/block/ |
H A D | blk-flush.c | 136 rq->end_io = rq->flush.saved_end_io; in blk_flush_restore_request() 237 * because its .end_io() is called from timeout code path too for in flush_end_io() 273 return rq->end_io == flush_end_io; in is_flush_rq() 341 flush_rq->end_io = flush_end_io; in blk_kick_flush() 343 * Order WRITE ->end_io and WRITE rq->ref, and its pair is the one in blk_kick_flush() 346 * and READ flush_rq->end_io in blk_kick_flush() 394 rq->flush.saved_end_io = rq->end_io; /* Usually NULL */ in blk_rq_init_flush() 395 rq->end_io = mq_flush_data_end_io; in blk_rq_init_flush()
|
H A D | blk-core.c | 745 goto end_io; in submit_bio_noacct() 749 goto end_io; in submit_bio_noacct() 751 goto end_io; in submit_bio_noacct() 761 goto end_io; in submit_bio_noacct() 766 goto end_io; in submit_bio_noacct() 786 goto end_io; in submit_bio_noacct() 814 end_io: in submit_bio_noacct()
|
/kernel/linux/linux-5.10/fs/ |
H A D | direct-io.c | 117 /* dio_state communicated between submission path and end_io */ 126 dio_iodone_t *end_io; /* IO completion function */ member 270 if (dio->end_io) { in dio_complete() 272 err = dio->end_io(dio->iocb, offset, ret, dio->private); in dio_complete() 284 * And this page cache invalidation has to be after dio->end_io(), as in dio_complete() 286 * end_io() when necessary, otherwise a racing buffer read would cache in dio_complete() 421 * bios hold a dio reference between submit_bio and ->end_io. 1129 get_block_t get_block, dio_iodone_t end_io, in do_blockdev_direct_IO() 1249 dio->end_io = end_io; in do_blockdev_direct_IO() 1127 do_blockdev_direct_IO(struct kiocb *iocb, struct inode *inode, struct block_device *bdev, struct iov_iter *iter, get_block_t get_block, dio_iodone_t end_io, dio_submit_t submit_io, int flags) do_blockdev_direct_IO() argument 1348 __blockdev_direct_IO(struct kiocb *iocb, struct inode *inode, struct block_device *bdev, struct iov_iter *iter, get_block_t get_block, dio_iodone_t end_io, dio_submit_t submit_io, int flags) __blockdev_direct_IO() argument [all...] |
/kernel/linux/linux-6.6/fs/ |
H A D | direct-io.c | 116 /* dio_state communicated between submission path and end_io */ 123 dio_iodone_t *end_io; /* IO completion function */ member 280 if (dio->end_io) { in dio_complete() 282 err = dio->end_io(dio->iocb, offset, ret, dio->private); in dio_complete() 294 * And this page cache invalidation has to be after dio->end_io(), as in dio_complete() 296 * end_io() when necessary, otherwise a racing buffer read would cache in dio_complete() 422 * bios hold a dio reference between submit_bio and ->end_io. 1106 get_block_t get_block, dio_iodone_t end_io, in __blockdev_direct_IO() 1224 dio->end_io = end_io; in __blockdev_direct_IO() 1104 __blockdev_direct_IO(struct kiocb *iocb, struct inode *inode, struct block_device *bdev, struct iov_iter *iter, get_block_t get_block, dio_iodone_t end_io, int flags) __blockdev_direct_IO() argument [all...] |
/kernel/linux/linux-5.10/drivers/md/ |
H A D | dm-path-selector.h | 76 int (*end_io) (struct path_selector *ps, struct dm_path *path, member
|
H A D | dm-bufio.c | 156 void (*end_io)(struct dm_buffer *, blk_status_t); member 577 b->end_io(b, unlikely(error != 0) ? BLK_STS_IOERR : 0); in dmio_complete() 607 b->end_io(b, errno_to_blk_status(r)); in use_dmio() 615 b->end_io(b, status); in bio_complete() 673 static void submit_io(struct dm_buffer *b, int rw, void (*end_io)(struct dm_buffer *, blk_status_t)) in submit_io() 679 b->end_io = end_io; in submit_io()
|
H A D | dm-mpath.c | 575 if (pgpath && pgpath->pg->ps.type->end_io) in multipath_release_clone() 576 pgpath->pg->ps.type->end_io(&pgpath->pg->ps, in multipath_release_clone() 1675 if (ps->type->end_io) in multipath_end_io() 1676 ps->type->end_io(ps, &pgpath->path, mpio->nr_bytes, in multipath_end_io() 1719 if (ps->type->end_io) in multipath_end_io_bio() 1720 ps->type->end_io(ps, &pgpath->path, mpio->nr_bytes, in multipath_end_io_bio() 2147 .end_io = multipath_end_io_bio,
|
H A D | dm-queue-length.c | 252 .end_io = ql_end_io,
|
/kernel/linux/linux-6.6/drivers/md/ |
H A D | dm-path-selector.h | 91 int (*end_io)(struct path_selector *ps, struct dm_path *path, member
|
H A D | dm-mpath.c | 581 if (pgpath && pgpath->pg->ps.type->end_io) in multipath_release_clone() 582 pgpath->pg->ps.type->end_io(&pgpath->pg->ps, in multipath_release_clone() 1683 if (ps->type->end_io) in multipath_end_io() 1684 ps->type->end_io(ps, &pgpath->path, mpio->nr_bytes, in multipath_end_io() 1727 if (ps->type->end_io) in multipath_end_io_bio() 1728 ps->type->end_io(ps, &pgpath->path, mpio->nr_bytes, in multipath_end_io_bio() 2197 .end_io = multipath_end_io_bio,
|
H A D | dm-ps-queue-length.c | 256 .end_io = ql_end_io,
|
/kernel/linux/linux-5.10/mm/ |
H A D | page_io.c | 30 struct page *page, bio_end_io_t end_io) in get_swap_bio() 41 bio->bi_end_io = end_io; in get_swap_bio() 29 get_swap_bio(gfp_t gfp_flags, struct page *page, bio_end_io_t end_io) get_swap_bio() argument
|
/kernel/linux/linux-5.10/fs/gfs2/ |
H A D | lops.c | 254 * @end_io: The bi_end_io callback 262 bio_end_io_t *end_io) in gfs2_log_alloc_bio() 269 bio->bi_end_io = end_io; in gfs2_log_alloc_bio() 281 * @end_io: The bi_end_io callback 294 bio_end_io_t *end_io, bool flush) in gfs2_log_get_bio() 308 *biop = gfs2_log_alloc_bio(sdp, blkno, end_io); in gfs2_log_get_bio() 261 gfs2_log_alloc_bio(struct gfs2_sbd *sdp, u64 blkno, bio_end_io_t *end_io) gfs2_log_alloc_bio() argument 292 gfs2_log_get_bio(struct gfs2_sbd *sdp, u64 blkno, struct bio **biop, int op, bio_end_io_t *end_io, bool flush) gfs2_log_get_bio() argument
|
/kernel/linux/linux-5.10/fs/nfs/blocklayout/ |
H A D | blocklayout.c | 120 bio_end_io_t end_io, struct parallel_io *par) in bl_alloc_init_bio() 134 bio->bi_end_io = end_io; in bl_alloc_init_bio() 148 struct pnfs_block_extent *be, bio_end_io_t end_io, in do_add_page_to_bio() 180 disk_addr >> SECTOR_SHIFT, end_io, par); in do_add_page_to_bio() 119 bl_alloc_init_bio(int npg, struct block_device *bdev, sector_t disk_sector, bio_end_io_t end_io, struct parallel_io *par) bl_alloc_init_bio() argument 146 do_add_page_to_bio(struct bio *bio, int npg, int rw, sector_t isect, struct page *page, struct pnfs_block_dev_map *map, struct pnfs_block_extent *be, bio_end_io_t end_io, struct parallel_io *par, unsigned int offset, int *len) do_add_page_to_bio() argument
|
/kernel/linux/linux-5.10/drivers/lightnvm/ |
H A D | pblk-write.c | 303 unsigned int nr_secs, nvm_end_io_fn(*end_io)) in pblk_alloc_w_rq() 310 rqd->end_io = end_io; in pblk_alloc_w_rq()
|
/kernel/linux/linux-5.10/include/linux/ |
H A D | iomap.h | 214 * submitting the bio and/or override the bio end_io handler for complex 247 * Flags for direct I/O ->end_io: 253 int (*end_io)(struct kiocb *iocb, ssize_t size, int error, member
|
/kernel/linux/linux-6.6/fs/gfs2/ |
H A D | lops.c | 257 * @end_io: The bi_end_io callback 265 bio_end_io_t *end_io) in gfs2_log_alloc_bio() 271 bio->bi_end_io = end_io; in gfs2_log_alloc_bio() 283 * @end_io: The bi_end_io callback 296 bio_end_io_t *end_io, bool flush) in gfs2_log_get_bio() 310 *biop = gfs2_log_alloc_bio(sdp, blkno, end_io); in gfs2_log_get_bio() 264 gfs2_log_alloc_bio(struct gfs2_sbd *sdp, u64 blkno, bio_end_io_t *end_io) gfs2_log_alloc_bio() argument 294 gfs2_log_get_bio(struct gfs2_sbd *sdp, u64 blkno, struct bio **biop, enum req_op op, bio_end_io_t *end_io, bool flush) gfs2_log_get_bio() argument
|
/kernel/linux/linux-5.10/fs/iomap/ |
H A D | direct-io.c | 87 if (dops && dops->end_io) in iomap_dio_complete() 88 ret = dops->end_io(iocb, dio->size, ret, dio->flags); in iomap_dio_complete() 106 * And this page cache invalidation has to be after ->end_io(), as some in iomap_dio_complete() 108 * ->end_io() when necessary, otherwise a racing buffer read would cache in iomap_dio_complete()
|
/kernel/linux/linux-6.6/fs/iomap/ |
H A D | direct-io.c | 90 if (dops && dops->end_io) in iomap_dio_complete() 91 ret = dops->end_io(iocb, dio->size, ret, dio->flags); in iomap_dio_complete() 108 * And this page cache invalidation has to be after ->end_io(), as some in iomap_dio_complete() 110 * ->end_io() when necessary, otherwise a racing buffer read would cache in iomap_dio_complete()
|
/kernel/linux/linux-6.6/include/linux/ |
H A D | blk-mq.h | 189 rq_end_io_fn *end_io; member 845 * ->end_io handler. 856 (req->end_io && !blk_rq_is_passthrough(req))) in blk_mq_add_to_batch()
|