Lines Matching refs:req
34 struct io_kiocb *req;
71 static bool io_poll_get_ownership_slowpath(struct io_kiocb *req)
80 v = atomic_fetch_or(IO_POLL_RETRY_FLAG, &req->poll_refs);
83 return !(atomic_fetch_inc(&req->poll_refs) & IO_POLL_REF_MASK);
92 static inline bool io_poll_get_ownership(struct io_kiocb *req)
94 if (unlikely(atomic_read(&req->poll_refs) >= IO_POLL_REF_BIAS))
95 return io_poll_get_ownership_slowpath(req);
96 return !(atomic_fetch_inc(&req->poll_refs) & IO_POLL_REF_MASK);
99 static void io_poll_mark_cancelled(struct io_kiocb *req)
101 atomic_or(IO_POLL_CANCEL_FLAG, &req->poll_refs);
104 static struct io_poll *io_poll_get_double(struct io_kiocb *req)
107 if (req->opcode == IORING_OP_POLL_ADD)
108 return req->async_data;
109 return req->apoll->double_poll;
112 static struct io_poll *io_poll_get_single(struct io_kiocb *req)
114 if (req->opcode == IORING_OP_POLL_ADD)
115 return io_kiocb_to_cmd(req, struct io_poll);
116 return &req->apoll->poll;
119 static void io_poll_req_insert(struct io_kiocb *req)
121 struct io_hash_table *table = &req->ctx->cancel_table;
122 u32 index = hash_long(req->cqe.user_data, table->hash_bits);
126 hlist_add_head(&req->hash_node, &hb->list);
130 static void io_poll_req_delete(struct io_kiocb *req, struct io_ring_ctx *ctx)
132 struct io_hash_table *table = &req->ctx->cancel_table;
133 u32 index = hash_long(req->cqe.user_data, table->hash_bits);
137 hash_del(&req->hash_node);
141 static void io_poll_req_insert_locked(struct io_kiocb *req)
143 struct io_hash_table *table = &req->ctx->cancel_table_locked;
144 u32 index = hash_long(req->cqe.user_data, table->hash_bits);
146 lockdep_assert_held(&req->ctx->uring_lock);
148 hlist_add_head(&req->hash_node, &table->hbs[index].list);
151 static void io_poll_tw_hash_eject(struct io_kiocb *req, struct io_tw_state *ts)
153 struct io_ring_ctx *ctx = req->ctx;
155 if (req->flags & REQ_F_HASH_LOCKED) {
163 hash_del(&req->hash_node);
164 req->flags &= ~REQ_F_HASH_LOCKED;
166 io_poll_req_delete(req, ctx);
192 static void io_poll_remove_entries(struct io_kiocb *req)
198 if (!(req->flags & (REQ_F_SINGLE_POLL | REQ_F_DOUBLE_POLL)))
217 if (req->flags & REQ_F_SINGLE_POLL)
218 io_poll_remove_entry(io_poll_get_single(req));
219 if (req->flags & REQ_F_DOUBLE_POLL)
220 io_poll_remove_entry(io_poll_get_double(req));
232 static void __io_poll_execute(struct io_kiocb *req, int mask)
234 io_req_set_res(req, mask, 0);
235 req->io_task_work.func = io_poll_task_func;
237 trace_io_uring_task_add(req, mask);
238 io_req_task_work_add(req);
241 static inline void io_poll_execute(struct io_kiocb *req, int res)
243 if (io_poll_get_ownership(req))
244 __io_poll_execute(req, res);
254 * req->cqe.res. IOU_POLL_REMOVE_POLL_USE_RES indicates to remove multishot
255 * poll and that the result is stored in req->cqe.
257 static int io_poll_check_events(struct io_kiocb *req, struct io_tw_state *ts)
261 /* req->task == current here, checking PF_EXITING is safe */
262 if (unlikely(req->task->flags & PF_EXITING))
266 v = atomic_read(&req->poll_refs);
280 req->cqe.res = 0;
283 req->cqe.res = 0;
289 atomic_andnot(IO_POLL_RETRY_FLAG, &req->poll_refs);
295 if (!req->cqe.res) {
296 struct poll_table_struct pt = { ._key = req->apoll_events };
297 req->cqe.res = vfs_poll(req->file, &pt) & req->apoll_events;
304 if (unlikely(!req->cqe.res)) {
306 if (!(req->apoll_events & EPOLLONESHOT))
311 if (req->apoll_events & EPOLLONESHOT)
315 if (!(req->flags & REQ_F_APOLL_MULTISHOT)) {
316 __poll_t mask = mangle_poll(req->cqe.res &
317 req->apoll_events);
319 if (!io_fill_cqe_req_aux(req, ts->locked, mask,
321 io_req_set_res(req, mask, 0);
325 int ret = io_poll_issue(req, ts);
335 req->cqe.res = 0;
341 } while (atomic_sub_return(v & IO_POLL_REF_MASK, &req->poll_refs) &
347 void io_poll_task_func(struct io_kiocb *req, struct io_tw_state *ts)
351 ret = io_poll_check_events(req, ts);
355 __io_poll_execute(req, 0);
358 io_poll_remove_entries(req);
359 io_poll_tw_hash_eject(req, ts);
361 if (req->opcode == IORING_OP_POLL_ADD) {
365 poll = io_kiocb_to_cmd(req, struct io_poll);
366 req->cqe.res = mangle_poll(req->cqe.res & poll->events);
368 io_req_task_submit(req, ts);
371 req->cqe.res = ret;
372 req_set_fail(req);
375 io_req_set_res(req, req->cqe.res, 0);
376 io_req_task_complete(req, ts);
378 io_tw_lock(req->ctx, ts);
381 io_req_task_complete(req, ts);
383 io_req_task_submit(req, ts);
385 io_req_defer_failed(req, ret);
389 static void io_poll_cancel_req(struct io_kiocb *req)
391 io_poll_mark_cancelled(req);
393 io_poll_execute(req, 0);
398 static __cold int io_pollfree_wake(struct io_kiocb *req, struct io_poll *poll)
400 io_poll_mark_cancelled(req);
402 io_poll_execute(req, 0);
415 * as req->head is NULL'ed out, the request can be
426 struct io_kiocb *req = wqe_to_req(wait);
431 return io_pollfree_wake(req, poll);
437 if (io_poll_get_ownership(req)) {
451 req->flags &= ~REQ_F_DOUBLE_POLL;
453 req->flags &= ~REQ_F_SINGLE_POLL;
455 __io_poll_execute(req, mask);
461 static bool io_poll_double_prepare(struct io_kiocb *req)
464 struct io_poll *poll = io_poll_get_single(req);
470 * poll arm might not hold ownership and so race for req->flags with
477 req->flags |= REQ_F_DOUBLE_POLL;
478 if (req->opcode == IORING_OP_POLL_ADD)
479 req->flags |= REQ_F_ASYNC_DATA;
490 struct io_kiocb *req = pt->req;
491 unsigned long wqe_private = (unsigned long) req;
521 if (!io_poll_double_prepare(req)) {
529 req->flags |= REQ_F_SINGLE_POLL;
546 struct io_poll *poll = io_kiocb_to_cmd(pt->req, struct io_poll);
549 (struct io_poll **) &pt->req->async_data);
552 static bool io_poll_can_finish_inline(struct io_kiocb *req,
555 return pt->owning || io_poll_get_ownership(req);
558 static void io_poll_add_hash(struct io_kiocb *req)
560 if (req->flags & REQ_F_HASH_LOCKED)
561 io_poll_req_insert_locked(req);
563 io_poll_req_insert(req);
572 static int __io_arm_poll_handler(struct io_kiocb *req,
577 struct io_ring_ctx *ctx = req->ctx;
579 INIT_HLIST_NODE(&req->hash_node);
580 req->work.cancel_seq = atomic_read(&ctx->cancel_seq);
582 poll->file = req->file;
583 req->apoll_events = poll->events;
586 ipt->req = req;
601 atomic_set(&req->poll_refs, (int)ipt->owning);
605 req->flags &= ~REQ_F_HASH_LOCKED;
607 mask = vfs_poll(req->file, &ipt->pt) & poll->events;
610 io_poll_remove_entries(req);
612 if (!io_poll_can_finish_inline(req, ipt)) {
613 io_poll_mark_cancelled(req);
624 if (!io_poll_can_finish_inline(req, ipt)) {
625 io_poll_add_hash(req);
628 io_poll_remove_entries(req);
630 /* no one else has access to the req, forget about the ref */
634 io_poll_add_hash(req);
637 io_poll_can_finish_inline(req, ipt)) {
638 __io_poll_execute(req, mask);
647 if (atomic_cmpxchg(&req->poll_refs, 1, 0) != 1)
648 __io_poll_execute(req, 0);
657 struct async_poll *apoll = pt->req->apoll;
670 static struct async_poll *io_req_alloc_apoll(struct io_kiocb *req,
673 struct io_ring_ctx *ctx = req->ctx;
677 if (req->flags & REQ_F_POLLED) {
678 apoll = req->apoll;
694 req->apoll = apoll;
700 int io_arm_poll_handler(struct io_kiocb *req, unsigned issue_flags)
702 const struct io_issue_def *def = &io_issue_defs[req->opcode];
712 req->flags |= REQ_F_HASH_LOCKED;
716 if (!file_can_poll(req->file))
718 if (!(req->flags & REQ_F_APOLL_MULTISHOT))
725 if (req->flags & REQ_F_CLEAR_POLLIN)
733 apoll = io_req_alloc_apoll(req, issue_flags);
736 req->flags &= ~(REQ_F_SINGLE_POLL | REQ_F_DOUBLE_POLL);
737 req->flags |= REQ_F_POLLED;
740 io_kbuf_recycle(req, issue_flags);
742 ret = __io_arm_poll_handler(req, &apoll->poll, &ipt, mask, issue_flags);
745 trace_io_uring_poll_arm(req, mask, apoll->poll.events);
755 struct io_kiocb *req;
763 hlist_for_each_entry_safe(req, tmp, &hb->list, hash_node) {
764 if (io_match_task_safe(req, tsk, cancel_all)) {
765 hlist_del_init(&req->hash_node);
766 io_poll_cancel_req(req);
794 struct io_kiocb *req;
801 hlist_for_each_entry(req, &hb->list, hash_node) {
802 if (cd->data != req->cqe.user_data)
804 if (poll_only && req->opcode != IORING_OP_POLL_ADD)
807 if (cd->seq == req->work.cancel_seq)
809 req->work.cancel_seq = cd->seq;
812 return req;
824 struct io_kiocb *req;
833 hlist_for_each_entry(req, &hb->list, hash_node) {
834 if (io_cancel_req_match(req, cd)) {
836 return req;
844 static int io_poll_disarm(struct io_kiocb *req)
846 if (!req)
848 if (!io_poll_get_ownership(req))
850 io_poll_remove_entries(req);
851 hash_del(&req->hash_node);
859 struct io_kiocb *req;
863 req = io_poll_file_find(ctx, cd, table, &bucket);
865 req = io_poll_find(ctx, false, cd, table, &bucket);
867 if (req)
868 io_poll_cancel_req(req);
871 return req ? 0 : -ENOENT;
906 int io_poll_remove_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
908 struct io_poll_update *upd = io_kiocb_to_cmd(req, struct io_poll_update);
936 int io_poll_add_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
938 struct io_poll *poll = io_kiocb_to_cmd(req, struct io_poll);
946 if ((flags & IORING_POLL_ADD_MULTI) && (req->flags & REQ_F_CQE_SKIP))
953 int io_poll_add(struct io_kiocb *req, unsigned int issue_flags)
955 struct io_poll *poll = io_kiocb_to_cmd(req, struct io_poll);
965 if (req->ctx->flags & (IORING_SETUP_SQPOLL|IORING_SETUP_SINGLE_ISSUER))
966 req->flags |= REQ_F_HASH_LOCKED;
968 ret = __io_arm_poll_handler(req, poll, &ipt, poll->events, issue_flags);
970 io_req_set_res(req, ipt.result_mask, 0);
976 int io_poll_remove(struct io_kiocb *req, unsigned int issue_flags)
978 struct io_poll_update *poll_update = io_kiocb_to_cmd(req, struct io_poll_update);
979 struct io_ring_ctx *ctx = req->ctx;
1037 req_set_fail(req);
1041 io_req_set_res(req, ret, 0);