Lines Matching refs:req

43 	 * removed and the result is set on req->cqe.res.
49 void io_req_cqe_overflow(struct io_kiocb *req);
51 void io_req_defer_failed(struct io_kiocb *req, s32 res);
52 void io_req_complete_post(struct io_kiocb *req, unsigned issue_flags);
54 bool io_fill_cqe_req_aux(struct io_kiocb *req, bool defer, s32 res, u32 cflags);
59 struct file *io_file_get_normal(struct io_kiocb *req, int fd);
60 struct file *io_file_get_fixed(struct io_kiocb *req, int fd,
63 void __io_req_task_work_add(struct io_kiocb *req, unsigned flags);
64 bool io_alloc_async_data(struct io_kiocb *req);
65 void io_req_task_queue(struct io_kiocb *req);
66 void io_queue_iowq(struct io_kiocb *req, struct io_tw_state *ts_dont_use);
67 void io_req_task_complete(struct io_kiocb *req, struct io_tw_state *ts);
68 void io_req_task_queue_fail(struct io_kiocb *req, int ret);
69 void io_req_task_submit(struct io_kiocb *req, struct io_tw_state *ts);
78 int io_poll_issue(struct io_kiocb *req, struct io_tw_state *ts);
82 int io_req_prep_async(struct io_kiocb *req);
87 void io_free_req(struct io_kiocb *req);
88 void io_queue_next(struct io_kiocb *req);
126 static inline void io_req_task_work_add(struct io_kiocb *req)
128 __io_req_task_work_add(req, 0);
158 struct io_kiocb *req)
171 trace_io_uring_complete(req->ctx, req, req->cqe.user_data,
172 req->cqe.res, req->cqe.flags,
173 req->big_cqe.extra1, req->big_cqe.extra2);
175 memcpy(cqe, &req->cqe, sizeof(*cqe));
177 memcpy(cqe->big_cqe, &req->big_cqe, sizeof(*cqe));
178 memset(&req->big_cqe, 0, sizeof(req->big_cqe));
183 static inline void req_set_fail(struct io_kiocb *req)
185 req->flags |= REQ_F_FAIL;
186 if (req->flags & REQ_F_CQE_SKIP) {
187 req->flags &= ~REQ_F_CQE_SKIP;
188 req->flags |= REQ_F_SKIP_LINK_CQES;
192 static inline void io_req_set_res(struct io_kiocb *req, s32 res, u32 cflags)
194 req->cqe.res = res;
195 req->cqe.flags = cflags;
198 static inline bool req_has_async_data(struct io_kiocb *req)
200 return req->flags & REQ_F_ASYNC_DATA;
203 static inline void io_put_file(struct io_kiocb *req)
205 if (!(req->flags & REQ_F_FIXED_FILE) && req->file)
206 fput(req->file);
323 static inline void io_req_complete_defer(struct io_kiocb *req)
324 __must_hold(&req->ctx->uring_lock)
326 struct io_submit_state *state = &req->ctx->submit_state;
328 lockdep_assert_held(&req->ctx->uring_lock);
330 wq_list_add_tail(&req->comp_list, &state->compl_reqs);
358 struct io_kiocb *req;
360 req = container_of(ctx->submit_state.free_list.next, struct io_kiocb, comp_list);
362 return req;
365 static inline bool io_alloc_req(struct io_ring_ctx *ctx, struct io_kiocb **req)
371 *req = io_extract_req(ctx);
386 static inline void io_req_queue_tw_complete(struct io_kiocb *req, s32 res)
388 io_req_set_res(req, res, 0);
389 req->io_task_work.func = io_req_task_complete;
390 io_req_task_work_add(req);