Lines Matching refs:io_kiocb

303 	struct io_kiocb		*head;
304 struct io_kiocb *last;
312 * io_kiocb alloc cache
322 struct io_kiocb *compl_reqs[IO_COMPL_BATCH];
509 struct io_kiocb *req;
544 struct io_kiocb *head;
546 struct io_kiocb *prev;
810 typedef void (*io_req_tw_func_t)(struct io_kiocb *req, bool *locked);
831 struct io_kiocb {
878 struct io_kiocb *link;
905 struct io_kiocb *req;
1079 static bool io_disarm_next(struct io_kiocb *req);
1086 static void io_fill_cqe_req(struct io_kiocb *req, s32 res, u32 cflags);
1088 static void io_put_req(struct io_kiocb *req);
1089 static void io_put_req_deferred(struct io_kiocb *req);
1090 static void io_dismantle_req(struct io_kiocb *req);
1091 static void io_queue_linked_timeout(struct io_kiocb *req);
1095 static void io_clean_op(struct io_kiocb *req);
1097 struct io_kiocb *req, int fd, bool fixed,
1099 static void __io_queue_sqe(struct io_kiocb *req);
1102 static void io_req_task_queue(struct io_kiocb *req);
1104 static int io_req_prep_async(struct io_kiocb *req);
1106 static int io_install_fixed_file(struct io_kiocb *req, struct file *file,
1108 static int io_close_fixed(struct io_kiocb *req, unsigned int issue_flags);
1147 static inline bool req_ref_inc_not_zero(struct io_kiocb *req)
1153 static inline bool req_ref_put_and_test(struct io_kiocb *req)
1162 static inline void req_ref_get(struct io_kiocb *req)
1169 static inline void __io_req_set_refcount(struct io_kiocb *req, int nr)
1177 static inline void io_req_set_refcount(struct io_kiocb *req)
1182 static inline void io_req_set_rsrc_node(struct io_kiocb *req)
1204 static bool io_match_task(struct io_kiocb *head, struct task_struct *task,
1208 struct io_kiocb *req;
1222 static bool io_match_linked(struct io_kiocb *head)
1224 struct io_kiocb *req;
1237 static bool io_match_task_safe(struct io_kiocb *head, struct task_struct *task,
1260 static inline void req_set_fail(struct io_kiocb *req)
1265 static inline void req_fail_link_node(struct io_kiocb *req, int res)
1278 static inline bool io_is_timeout_noseq(struct io_kiocb *req)
1288 struct io_kiocb *req, *tmp;
1378 static bool req_need_defer(struct io_kiocb *req, u32 seq)
1398 static inline bool io_req_ffs_set(struct io_kiocb *req)
1403 static void io_req_track_inflight(struct io_kiocb *req)
1411 static struct io_kiocb *__io_prep_linked_timeout(struct io_kiocb *req)
1425 static inline struct io_kiocb *io_prep_linked_timeout(struct io_kiocb *req)
1432 static void io_prep_async_work(struct io_kiocb *req)
1456 static void io_prep_async_link(struct io_kiocb *req)
1458 struct io_kiocb *cur;
1473 static void io_queue_async_work(struct io_kiocb *req, bool *locked)
1476 struct io_kiocb *link = io_prep_linked_timeout(req);
1505 static void io_kill_timeout(struct io_kiocb *req, int status)
1542 struct io_kiocb *req, *tmp;
1830 static noinline void io_fill_cqe_req(struct io_kiocb *req, s32 res, u32 cflags)
1842 static void io_req_complete_post(struct io_kiocb *req, s32 res,
1879 static inline bool io_req_needs_clean(struct io_kiocb *req)
1884 static inline void io_req_complete_state(struct io_kiocb *req, s32 res,
1894 static inline void __io_req_complete(struct io_kiocb *req, unsigned issue_flags,
1903 static inline void io_req_complete(struct io_kiocb *req, s32 res)
1908 static void io_req_complete_failed(struct io_kiocb *req, s32 res)
1914 static void io_req_complete_fail_submit(struct io_kiocb *req)
1929 static void io_preinit_req(struct io_kiocb *req, struct io_ring_ctx *ctx)
1963 struct io_kiocb *req = list_first_entry(&state->free_list,
1964 struct io_kiocb, inflight_entry);
1982 static struct io_kiocb *io_alloc_req(struct io_ring_ctx *ctx)
2022 static void io_dismantle_req(struct io_kiocb *req)
2038 static void __io_free_req(struct io_kiocb *req)
2053 static inline void io_remove_next_linked(struct io_kiocb *req)
2055 struct io_kiocb *nxt = req->link;
2061 static bool io_kill_linked_timeout(struct io_kiocb *req)
2065 struct io_kiocb *link = req->link;
2082 static void io_fail_links(struct io_kiocb *req)
2085 struct io_kiocb *nxt, *link = req->link;
2104 static bool io_disarm_next(struct io_kiocb *req)
2110 struct io_kiocb *link = req->link;
2134 static struct io_kiocb *__io_req_find_next(struct io_kiocb *req)
2136 struct io_kiocb *nxt;
2161 static inline struct io_kiocb *io_req_find_next(struct io_kiocb *req)
2205 struct io_kiocb *req = container_of(node, struct io_kiocb,
2232 static void io_req_task_work_add(struct io_kiocb *req)
2273 req = container_of(node, struct io_kiocb, io_task_work.node);
2281 static void io_req_task_cancel(struct io_kiocb *req, bool *locked)
2290 static void io_req_task_submit(struct io_kiocb *req, bool *locked)
2302 static void io_req_task_queue_fail(struct io_kiocb *req, int ret)
2309 static void io_req_task_queue(struct io_kiocb *req)
2315 static void io_req_task_queue_reissue(struct io_kiocb *req)
2321 static inline void io_queue_next(struct io_kiocb *req)
2323 struct io_kiocb *nxt = io_req_find_next(req);
2329 static void io_free_req(struct io_kiocb *req)
2335 static void io_free_req_work(struct io_kiocb *req, bool *locked)
2362 static void io_req_free_batch(struct req_batch *rb, struct io_kiocb *req,
2392 struct io_kiocb *req = state->compl_reqs[i];
2403 struct io_kiocb *req = state->compl_reqs[i];
2417 static inline struct io_kiocb *io_put_req_find_next(struct io_kiocb *req)
2419 struct io_kiocb *nxt = NULL;
2428 static inline void io_put_req(struct io_kiocb *req)
2434 static inline void io_put_req_deferred(struct io_kiocb *req)
2457 static unsigned int io_put_kbuf(struct io_kiocb *req, struct io_buffer *kbuf)
2468 static inline unsigned int io_put_rw_kbuf(struct io_kiocb *req)
2505 struct io_kiocb *req;
2515 req = list_first_entry(done, struct io_kiocb, inflight_entry);
2549 struct io_kiocb *req, *tmp;
2679 static void kiocb_end_write(struct io_kiocb *req)
2694 static bool io_resubmit_prep(struct io_kiocb *req)
2704 static bool io_rw_should_reissue(struct io_kiocb *req)
2730 static bool io_resubmit_prep(struct io_kiocb *req)
2734 static bool io_rw_should_reissue(struct io_kiocb *req)
2744 static void io_req_io_end(struct io_kiocb *req)
2756 static bool __io_complete_rw_common(struct io_kiocb *req, long res)
2775 static inline int io_fixup_rw_res(struct io_kiocb *req, long res)
2789 static void io_req_task_complete(struct io_kiocb *req, bool *locked)
2807 static void io_req_rw_complete(struct io_kiocb *req, bool *locked)
2815 struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
2826 struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
2849 static void io_iopoll_req_issued(struct io_kiocb *req)
2866 struct io_kiocb *list_req;
2869 list_req = list_first_entry(&ctx->iopoll_list, struct io_kiocb,
2949 static bool io_file_supports_nowait(struct io_kiocb *req, int rw)
2959 static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe,
3053 static inline loff_t *io_kiocb_update_pos(struct io_kiocb *req)
3073 struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
3113 static int __io_import_fixed(struct io_kiocb *req, int rw, struct iov_iter *iter,
3171 static int io_import_fixed(struct io_kiocb *req, int rw, struct iov_iter *iter)
3196 static struct io_buffer *io_buffer_select(struct io_kiocb *req, size_t *len,
3230 static void __user *io_rw_buffer_select(struct io_kiocb *req, size_t *len,
3247 static ssize_t io_compat_import(struct io_kiocb *req, struct iovec *iov,
3273 static ssize_t __io_iov_buffer_select(struct io_kiocb *req, struct iovec *iov,
3294 static ssize_t io_iov_buffer_select(struct io_kiocb *req, struct iovec *iov,
3316 static int io_import_iovec(int rw, struct io_kiocb *req, struct iovec **iovec,
3367 static ssize_t loop_rw_iter(int rw, struct io_kiocb *req, struct iov_iter *iter)
3426 static void io_req_map_rw(struct io_kiocb *req, const struct iovec *iovec,
3453 static inline int io_alloc_async_data(struct io_kiocb *req)
3460 static int io_setup_async_rw(struct io_kiocb *req, const struct iovec *iovec,
3482 static inline int io_rw_prep_async(struct io_kiocb *req, int rw)
3503 static int io_read_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
3524 struct io_kiocb *req = wait->private;
3550 static bool io_rw_should_retry(struct io_kiocb *req)
3581 static inline int io_iter_do_read(struct io_kiocb *req, struct iov_iter *iter)
3591 static bool need_read_all(struct io_kiocb *req)
3597 static int io_read(struct io_kiocb *req, unsigned int issue_flags)
3729 static int io_write_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
3736 static int io_write(struct io_kiocb *req, unsigned int issue_flags)
3841 static int io_renameat_prep(struct io_kiocb *req,
3874 static int io_renameat(struct io_kiocb *req, unsigned int issue_flags)
3892 static int io_unlinkat_prep(struct io_kiocb *req,
3921 static int io_unlinkat(struct io_kiocb *req, unsigned int issue_flags)
3941 static int io_shutdown_prep(struct io_kiocb *req,
3958 static int io_shutdown(struct io_kiocb *req, unsigned int issue_flags)
3981 static int __io_splice_prep(struct io_kiocb *req,
3998 static int io_tee_prep(struct io_kiocb *req,
4006 static int io_tee(struct io_kiocb *req, unsigned int issue_flags)
4036 static int io_splice_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4045 static int io_splice(struct io_kiocb *req, unsigned int issue_flags)
4082 static int io_nop(struct io_kiocb *req, unsigned int issue_flags)
4093 static int io_fsync_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4112 static int io_fsync(struct io_kiocb *req, unsigned int issue_flags)
4130 static int io_fallocate_prep(struct io_kiocb *req,
4145 static int io_fallocate(struct io_kiocb *req, unsigned int issue_flags)
4162 static int __io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4196 static int io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4205 static int io_openat2_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4224 static int io_openat2(struct io_kiocb *req, unsigned int issue_flags)
4292 static int io_openat(struct io_kiocb *req, unsigned int issue_flags)
4297 static int io_remove_buffers_prep(struct io_kiocb *req,
4344 static int io_remove_buffers(struct io_kiocb *req, unsigned int issue_flags)
4369 static int io_provide_buffers_prep(struct io_kiocb *req,
4432 static int io_provide_buffers(struct io_kiocb *req, unsigned int issue_flags)
4461 static int io_epoll_ctl_prep(struct io_kiocb *req,
4488 static int io_epoll_ctl(struct io_kiocb *req, unsigned int issue_flags)
4508 static int io_madvise_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4525 static int io_madvise(struct io_kiocb *req, unsigned int issue_flags)
4544 static int io_fadvise_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4557 static int io_fadvise(struct io_kiocb *req, unsigned int issue_flags)
4580 static int io_statx_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4598 static int io_statx(struct io_kiocb *req, unsigned int issue_flags)
4615 static int io_close_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4633 static int io_close(struct io_kiocb *req, unsigned int issue_flags)
4684 static int io_sfr_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4700 static int io_sync_file_range(struct io_kiocb *req, unsigned int issue_flags)
4724 static int io_setup_async_msg(struct io_kiocb *req,
4749 static int io_sendmsg_copy_hdr(struct io_kiocb *req,
4764 static int io_sendmsg_prep_async(struct io_kiocb *req)
4774 static int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4799 static int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags)
4854 static int io_send(struct io_kiocb *req, unsigned int issue_flags)
4907 static int __io_recvmsg_copy_hdr(struct io_kiocb *req,
4940 static int __io_compat_recvmsg_copy_hdr(struct io_kiocb *req,
4981 static int io_recvmsg_copy_hdr(struct io_kiocb *req,
4994 static struct io_buffer *io_recv_buffer_select(struct io_kiocb *req,
5009 static inline unsigned int io_put_recv_kbuf(struct io_kiocb *req)
5014 static int io_recvmsg_prep_async(struct io_kiocb *req)
5024 static int io_recvmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
5050 static int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags)
5122 static int io_recv(struct io_kiocb *req, unsigned int issue_flags)
5191 static int io_accept_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
5215 static int io_accept(struct io_kiocb *req, unsigned int issue_flags)
5254 static int io_connect_prep_async(struct io_kiocb *req)
5262 static int io_connect_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
5277 static int io_connect(struct io_kiocb *req, unsigned int issue_flags)
5319 static int io_##op(struct io_kiocb *req, unsigned int issue_flags) \
5326 static int io_##op##_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) \
5333 static int io_##op##_prep_async(struct io_kiocb *req) \
5348 struct io_kiocb *req;
5363 static bool io_poll_get_ownership_slowpath(struct io_kiocb *req)
5384 static inline bool io_poll_get_ownership(struct io_kiocb *req)
5391 static void io_poll_mark_cancelled(struct io_kiocb *req)
5396 static struct io_poll_iocb *io_poll_get_double(struct io_kiocb *req)
5404 static struct io_poll_iocb *io_poll_get_single(struct io_kiocb *req)
5411 static void io_poll_req_insert(struct io_kiocb *req)
5443 static void io_poll_remove_entries(struct io_kiocb *req)
5478 static int io_poll_check_events(struct io_kiocb *req)
5550 static void io_poll_task_func(struct io_kiocb *req, bool *locked)
5573 static void io_apoll_task_func(struct io_kiocb *req, bool *locked)
5594 static void __io_poll_execute(struct io_kiocb *req, int mask)
5606 static inline void io_poll_execute(struct io_kiocb *req, int res)
5612 static void io_poll_cancel_req(struct io_kiocb *req)
5622 struct io_kiocb *req = wait->private;
5673 struct io_kiocb *req = pt->req;
5722 static int __io_arm_poll_handler(struct io_kiocb *req,
5803 static int io_arm_poll_handler(struct io_kiocb *req)
5862 struct io_kiocb *req;
5883 static struct io_kiocb *io_poll_find(struct io_ring_ctx *ctx, __u64 sqe_addr,
5888 struct io_kiocb *req;
5901 static bool io_poll_disarm(struct io_kiocb *req)
5915 struct io_kiocb *req = io_poll_find(ctx, sqe_addr, poll_only);
5937 static int io_poll_update_prep(struct io_kiocb *req,
5970 static int io_poll_add_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
5988 static int io_poll_add(struct io_kiocb *req, unsigned int issue_flags)
6005 static int io_poll_update(struct io_kiocb *req, unsigned int issue_flags)
6008 struct io_kiocb *preq;
6048 static void io_req_task_timeout(struct io_kiocb *req, bool *locked)
6058 struct io_kiocb *req = data->req;
6073 static struct io_kiocb *io_timeout_extract(struct io_ring_ctx *ctx,
6078 struct io_kiocb *req;
6100 struct io_kiocb *req = io_timeout_extract(ctx, user_data);
6132 struct io_kiocb *req;
6156 struct io_kiocb *req = io_timeout_extract(ctx, user_data);
6171 static int io_timeout_remove_prep(struct io_kiocb *req,
6212 static int io_timeout_remove(struct io_kiocb *req, unsigned int issue_flags)
6241 static int io_timeout_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe,
6294 static int io_timeout(struct io_kiocb *req, unsigned int issue_flags)
6327 struct io_kiocb *nxt = list_entry(entry, struct io_kiocb,
6351 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
6383 static int io_try_cancel_userdata(struct io_kiocb *req, u64 sqe_addr)
6406 static int io_async_cancel_prep(struct io_kiocb *req,
6421 static int io_async_cancel(struct io_kiocb *req, unsigned int issue_flags)
6450 static int io_rsrc_update_prep(struct io_kiocb *req,
6466 static int io_files_update(struct io_kiocb *req, unsigned int issue_flags)
6490 static int io_req_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
6568 static int io_req_prep_async(struct io_kiocb *req)
6594 static u32 io_get_sequence(struct io_kiocb *req)
6604 static bool io_drain_req(struct io_kiocb *req)
6606 struct io_kiocb *pos;
6679 static void io_clean_op(struct io_kiocb *req)
6745 static int io_issue_sqe(struct io_kiocb *req, unsigned int issue_flags)
6873 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
6881 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
6882 struct io_kiocb *timeout;
6954 struct io_kiocb *req, int fd,
6977 struct io_kiocb *req, int fd)
6990 struct io_kiocb *req, int fd, bool fixed,
6999 static void io_req_task_link_timeout(struct io_kiocb *req, bool *locked)
7001 struct io_kiocb *prev = req->timeout.prev;
7018 struct io_kiocb *prev, *req = data->req;
7044 static void io_queue_linked_timeout(struct io_kiocb *req)
7066 static void __io_queue_sqe(struct io_kiocb *req)
7069 struct io_kiocb *linked_timeout;
7117 static inline void io_queue_sqe(struct io_kiocb *req)
7143 struct io_kiocb *req,
7163 static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req,
7226 static int io_submit_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
7277 struct io_kiocb *head = link->head;
7392 struct io_kiocb *req;
8474 static int io_install_fixed_file(struct io_kiocb *req, struct file *file,
8524 static int io_close_fixed(struct io_kiocb *req, unsigned int issue_flags)
9336 struct io_kiocb *req, *nxt;
9494 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
9587 struct io_kiocb *req, *tmp;
9656 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
10242 struct io_kiocb *req;
11154 req_cachep = KMEM_CACHE(io_kiocb, SLAB_HWCACHE_ALIGN | SLAB_PANIC |