Lines Matching refs:io_kiocb
136 struct io_kiocb *req;
148 static void io_queue_sqe(struct io_kiocb *req);
194 static bool io_match_linked(struct io_kiocb *head)
196 struct io_kiocb *req;
209 bool io_match_task_safe(struct io_kiocb *head, struct task_struct *task,
232 static inline void req_fail_link_node(struct io_kiocb *req, int res)
238 static inline void io_req_add_to_cache(struct io_kiocb *req, struct io_ring_ctx *ctx)
255 struct io_kiocb *req, *tmp;
360 static bool req_need_defer(struct io_kiocb *req, u32 seq)
371 static void io_clean_op(struct io_kiocb *req)
404 static inline void io_req_track_inflight(struct io_kiocb *req)
412 static struct io_kiocb *__io_prep_linked_timeout(struct io_kiocb *req)
426 static inline struct io_kiocb *io_prep_linked_timeout(struct io_kiocb *req)
433 static noinline void __io_arm_ltimeout(struct io_kiocb *req)
438 static inline void io_arm_ltimeout(struct io_kiocb *req)
444 static void io_prep_async_work(struct io_kiocb *req)
478 static void io_prep_async_link(struct io_kiocb *req)
480 struct io_kiocb *cur;
495 void io_queue_iowq(struct io_kiocb *req, struct io_tw_state *ts_dont_use)
497 struct io_kiocb *link = io_prep_linked_timeout(req);
816 void io_req_cqe_overflow(struct io_kiocb *req)
937 bool io_fill_cqe_req_aux(struct io_kiocb *req, bool defer, s32 res, u32 cflags)
969 static void __io_req_complete_post(struct io_kiocb *req, unsigned issue_flags)
1017 void io_req_complete_post(struct io_kiocb *req, unsigned issue_flags)
1034 void io_req_defer_failed(struct io_kiocb *req, s32 res)
1052 static void io_preinit_req(struct io_kiocb *req, struct io_ring_ctx *ctx)
1110 struct io_kiocb *req = reqs[i];
1118 __cold void io_free_req(struct io_kiocb *req)
1128 static void __io_req_find_next_prep(struct io_kiocb *req)
1137 static inline struct io_kiocb *io_req_find_next(struct io_kiocb *req)
1139 struct io_kiocb *nxt;
1176 struct io_kiocb *req = container_of(node, struct io_kiocb,
1179 prefetch(container_of(next, struct io_kiocb, io_task_work.node));
1221 struct io_kiocb *req;
1224 req = container_of(node, struct io_kiocb, io_task_work.node);
1272 static inline void io_req_local_work_add(struct io_kiocb *req, unsigned flags)
1285 struct io_kiocb *first_req = container_of(first,
1286 struct io_kiocb,
1323 static void io_req_normal_work_add(struct io_kiocb *req)
1341 void __io_req_task_work_add(struct io_kiocb *req, unsigned flags)
1358 struct io_kiocb *req = container_of(node, struct io_kiocb,
1397 struct io_kiocb *req = container_of(node, struct io_kiocb,
1399 prefetch(container_of(next, struct io_kiocb, io_task_work.node));
1449 static void io_req_task_cancel(struct io_kiocb *req, struct io_tw_state *ts)
1455 void io_req_task_submit(struct io_kiocb *req, struct io_tw_state *ts)
1467 void io_req_task_queue_fail(struct io_kiocb *req, int ret)
1474 void io_req_task_queue(struct io_kiocb *req)
1480 void io_queue_next(struct io_kiocb *req)
1482 struct io_kiocb *nxt = io_req_find_next(req);
1493 struct io_kiocb *req = container_of(node, struct io_kiocb,
1537 struct io_kiocb *req = container_of(node, struct io_kiocb,
1666 void io_req_task_complete(struct io_kiocb *req, struct io_tw_state *ts)
1680 static void io_iopoll_req_issued(struct io_kiocb *req, unsigned int issue_flags)
1697 struct io_kiocb *list_req;
1699 list_req = container_of(ctx->iopoll_list.first, struct io_kiocb,
1740 bool io_alloc_async_data(struct io_kiocb *req)
1751 int io_req_prep_async(struct io_kiocb *req)
1770 static u32 io_get_sequence(struct io_kiocb *req)
1773 struct io_kiocb *cur;
1781 static __cold void io_drain_req(struct io_kiocb *req)
1822 static bool io_assign_file(struct io_kiocb *req, const struct io_issue_def *def,
1836 static int io_issue_sqe(struct io_kiocb *req, unsigned int issue_flags)
1878 int io_poll_issue(struct io_kiocb *req, struct io_tw_state *ts)
1887 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
1888 struct io_kiocb *nxt = NULL;
1900 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
1973 inline struct file *io_file_get_fixed(struct io_kiocb *req, int fd,
1994 struct file *io_file_get_normal(struct io_kiocb *req, int fd)
2006 static void io_queue_async(struct io_kiocb *req, int ret)
2009 struct io_kiocb *linked_timeout;
2035 static inline void io_queue_sqe(struct io_kiocb *req)
2052 static void io_queue_sqe_fallback(struct io_kiocb *req)
2084 struct io_kiocb *req,
2101 static void io_init_req_drain(struct io_kiocb *req)
2104 struct io_kiocb *head = ctx->submit_state.link.head;
2120 static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req,
2215 struct io_kiocb *req, int ret)
2219 struct io_kiocb *head = link->head;
2251 static inline int io_submit_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
2402 struct io_kiocb *req;
2870 struct io_kiocb *req;
3055 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
3203 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
4656 * file in io_kiocb and until the opcode field. The openat2 handling
4657 * requires copying in user memory into the io_kiocb object in that
4661 req_cachep = kmem_cache_create_usercopy("io_kiocb",
4662 sizeof(struct io_kiocb), 0,
4665 offsetof(struct io_kiocb, cmd.data),
4666 sizeof_field(struct io_kiocb, cmd.data), NULL);