Lines Matching refs:req

38 static inline bool io_is_timeout_noseq(struct io_kiocb *req)
40 struct io_timeout *timeout = io_kiocb_to_cmd(req, struct io_timeout);
41 struct io_timeout_data *data = req->async_data;
46 static inline void io_put_req(struct io_kiocb *req)
48 if (req_ref_put_and_test(req)) {
49 io_queue_next(req);
50 io_free_req(req);
68 static void io_timeout_complete(struct io_kiocb *req, struct io_tw_state *ts)
70 struct io_timeout *timeout = io_kiocb_to_cmd(req, struct io_timeout);
71 struct io_timeout_data *data = req->async_data;
72 struct io_ring_ctx *ctx = req->ctx;
76 filled = io_fill_cqe_req_aux(req, ts->locked, -ETIME,
89 io_req_task_complete(req, ts);
92 static bool io_kill_timeout(struct io_kiocb *req, int status)
93 __must_hold(&req->ctx->timeout_lock)
95 struct io_timeout_data *io = req->async_data;
98 struct io_timeout *timeout = io_kiocb_to_cmd(req, struct io_timeout);
101 req_set_fail(req);
102 atomic_set(&req->ctx->cq_timeouts,
103 atomic_read(&req->ctx->cq_timeouts) + 1);
105 io_req_queue_tw_complete(req, status);
120 struct io_kiocb *req = cmd_to_io_kiocb(timeout);
123 if (io_is_timeout_noseq(req))
138 io_kill_timeout(req, 0);
160 static void io_fail_links(struct io_kiocb *req)
161 __must_hold(&req->ctx->completion_lock)
163 struct io_kiocb *link = req->link;
164 bool ignore_cqes = req->flags & REQ_F_SKIP_LINK_CQES;
174 trace_io_uring_fail_link(req, link);
178 link = req->link;
181 req->link = NULL;
184 static inline void io_remove_next_linked(struct io_kiocb *req)
186 struct io_kiocb *nxt = req->link;
188 req->link = nxt->link;
192 void io_disarm_next(struct io_kiocb *req)
193 __must_hold(&req->ctx->completion_lock)
197 if (req->flags & REQ_F_ARM_LTIMEOUT) {
198 link = req->link;
199 req->flags &= ~REQ_F_ARM_LTIMEOUT;
201 io_remove_next_linked(req);
204 } else if (req->flags & REQ_F_LINK_TIMEOUT) {
205 struct io_ring_ctx *ctx = req->ctx;
208 link = io_disarm_linked_timeout(req);
213 if (unlikely((req->flags & REQ_F_FAIL) &&
214 !(req->flags & REQ_F_HARDLINK)))
215 io_fail_links(req);
218 struct io_kiocb *__io_disarm_linked_timeout(struct io_kiocb *req,
220 __must_hold(&req->ctx->completion_lock)
221 __must_hold(&req->ctx->timeout_lock)
226 io_remove_next_linked(req);
240 struct io_kiocb *req = data->req;
241 struct io_timeout *timeout = io_kiocb_to_cmd(req, struct io_timeout);
242 struct io_ring_ctx *ctx = req->ctx;
247 atomic_set(&req->ctx->cq_timeouts,
248 atomic_read(&req->ctx->cq_timeouts) + 1);
252 req_set_fail(req);
254 io_req_set_res(req, -ETIME, 0);
255 req->io_task_work.func = io_timeout_complete;
256 io_req_task_work_add(req);
266 struct io_kiocb *req = NULL;
272 req = tmp;
276 if (!req)
279 io = req->async_data;
282 timeout = io_kiocb_to_cmd(req, struct io_timeout);
284 return req;
290 struct io_kiocb *req;
293 req = io_timeout_extract(ctx, cd);
296 if (IS_ERR(req))
297 return PTR_ERR(req);
298 io_req_task_queue_fail(req, -ECANCELED);
302 static void io_req_task_link_timeout(struct io_kiocb *req, struct io_tw_state *ts)
305 struct io_timeout *timeout = io_kiocb_to_cmd(req, struct io_timeout);
310 if (!(req->task->flags & PF_EXITING)) {
312 .ctx = req->ctx,
316 ret = io_try_cancel(req->task->io_uring, &cd, issue_flags);
318 io_req_set_res(req, ret ?: -ETIME, 0);
319 io_req_task_complete(req, ts);
322 io_req_set_res(req, -ETIME, 0);
323 io_req_task_complete(req, ts);
331 struct io_kiocb *prev, *req = data->req;
332 struct io_timeout *timeout = io_kiocb_to_cmd(req, struct io_timeout);
333 struct io_ring_ctx *ctx = req->ctx;
353 req->io_task_work.func = io_req_task_link_timeout;
354 io_req_task_work_add(req);
380 struct io_kiocb *req = NULL;
386 req = tmp;
390 if (!req)
393 io = req->async_data;
407 struct io_kiocb *req = io_timeout_extract(ctx, &cd);
408 struct io_timeout *timeout = io_kiocb_to_cmd(req, struct io_timeout);
411 if (IS_ERR(req))
412 return PTR_ERR(req);
415 data = req->async_data;
423 int io_timeout_remove_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
425 struct io_timeout_rem *tr = io_kiocb_to_cmd(req, struct io_timeout_rem);
427 if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT)))
463 int io_timeout_remove(struct io_kiocb *req, unsigned int issue_flags)
465 struct io_timeout_rem *tr = io_kiocb_to_cmd(req, struct io_timeout_rem);
466 struct io_ring_ctx *ctx = req->ctx;
487 req_set_fail(req);
488 io_req_set_res(req, ret, 0);
492 static int __io_timeout_prep(struct io_kiocb *req,
496 struct io_timeout *timeout = io_kiocb_to_cmd(req, struct io_timeout);
519 if (unlikely(off && !req->ctx->off_timeout_used))
520 req->ctx->off_timeout_used = true;
529 if (WARN_ON_ONCE(req_has_async_data(req)))
531 if (io_alloc_async_data(req))
534 data = req->async_data;
535 data->req = req;
549 struct io_submit_link *link = &req->ctx->submit_state.link;
561 int io_timeout_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
563 return __io_timeout_prep(req, sqe, false);
566 int io_link_timeout_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
568 return __io_timeout_prep(req, sqe, true);
571 int io_timeout(struct io_kiocb *req, unsigned int issue_flags)
573 struct io_timeout *timeout = io_kiocb_to_cmd(req, struct io_timeout);
574 struct io_ring_ctx *ctx = req->ctx;
575 struct io_timeout_data *data = req->async_data;
586 if (io_is_timeout_noseq(req)) {
622 void io_queue_linked_timeout(struct io_kiocb *req)
624 struct io_timeout *timeout = io_kiocb_to_cmd(req, struct io_timeout);
625 struct io_ring_ctx *ctx = req->ctx;
633 struct io_timeout_data *data = req->async_data;
642 io_put_req(req);
647 __must_hold(&req->ctx->timeout_lock)
649 struct io_kiocb *req;
656 io_for_each_link(req, head) {
657 if (req->flags & REQ_F_INFLIGHT)
677 struct io_kiocb *req = cmd_to_io_kiocb(timeout);
679 if (io_match_task(req, tsk, cancel_all) &&
680 io_kill_timeout(req, -ECANCELED))