Lines Matching refs:ctx
48 bool io_cqe_cache_refill(struct io_ring_ctx *ctx, bool overflow);
50 int io_run_task_work_sig(struct io_ring_ctx *ctx);
53 bool io_post_aux_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags);
55 void __io_commit_cqring_flush(struct io_ring_ctx *ctx);
73 struct io_ring_ctx *ctx);
79 int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr);
80 int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin);
81 void __io_submit_flush_completions(struct io_ring_ctx *ctx);
90 bool __io_alloc_req_refill(struct io_ring_ctx *ctx);
99 static inline void io_lockdep_assert_cq_locked(struct io_ring_ctx *ctx)
103 if (ctx->flags & IORING_SETUP_IOPOLL) {
104 lockdep_assert_held(&ctx->uring_lock);
105 } else if (!ctx->task_complete) {
106 lockdep_assert_held(&ctx->completion_lock);
107 } else if (ctx->submitter_task) {
114 if (ctx->submitter_task->flags & PF_EXITING)
117 lockdep_assert(current == ctx->submitter_task);
121 static inline void io_lockdep_assert_cq_locked(struct io_ring_ctx *ctx)
134 static inline bool io_get_cqe_overflow(struct io_ring_ctx *ctx,
138 io_lockdep_assert_cq_locked(ctx);
140 if (unlikely(ctx->cqe_cached >= ctx->cqe_sentinel)) {
141 if (unlikely(!io_cqe_cache_refill(ctx, overflow)))
144 *ret = ctx->cqe_cached;
145 ctx->cached_cq_tail++;
146 ctx->cqe_cached++;
147 if (ctx->flags & IORING_SETUP_CQE32)
148 ctx->cqe_cached++;
152 static inline bool io_get_cqe(struct io_ring_ctx *ctx, struct io_uring_cqe **ret)
154 return io_get_cqe_overflow(ctx, ret, false);
157 static __always_inline bool io_fill_cqe_req(struct io_ring_ctx *ctx,
167 if (unlikely(!io_get_cqe(ctx, &cqe)))
171 trace_io_uring_complete(req->ctx, req, req->cqe.user_data,
176 if (ctx->flags & IORING_SETUP_CQE32) {
209 static inline void io_ring_submit_unlock(struct io_ring_ctx *ctx,
212 lockdep_assert_held(&ctx->uring_lock);
214 mutex_unlock(&ctx->uring_lock);
217 static inline void io_ring_submit_lock(struct io_ring_ctx *ctx,
227 mutex_lock(&ctx->uring_lock);
228 lockdep_assert_held(&ctx->uring_lock);
231 static inline void io_commit_cqring(struct io_ring_ctx *ctx)
234 smp_store_release(&ctx->rings->cq.tail, ctx->cached_cq_tail);
237 static inline void io_poll_wq_wake(struct io_ring_ctx *ctx)
239 if (wq_has_sleeper(&ctx->poll_wq))
240 __wake_up(&ctx->poll_wq, TASK_NORMAL, 0,
244 static inline void io_cqring_wake(struct io_ring_ctx *ctx)
256 if (wq_has_sleeper(&ctx->cq_wait))
257 __wake_up(&ctx->cq_wait, TASK_NORMAL, 0,
261 static inline bool io_sqring_full(struct io_ring_ctx *ctx)
263 struct io_rings *r = ctx->rings;
265 return READ_ONCE(r->sq.tail) - ctx->cached_sq_head == ctx->sq_entries;
268 static inline unsigned int io_sqring_entries(struct io_ring_ctx *ctx)
270 struct io_rings *rings = ctx->rings;
274 entries = smp_load_acquire(&rings->sq.tail) - ctx->cached_sq_head;
275 return min(entries, ctx->sq_entries);
305 static inline bool io_task_work_pending(struct io_ring_ctx *ctx)
307 return task_work_pending(current) || !wq_list_empty(&ctx->work_llist);
310 static inline void io_tw_lock(struct io_ring_ctx *ctx, struct io_tw_state *ts)
313 mutex_lock(&ctx->uring_lock);
324 __must_hold(&req->ctx->uring_lock)
326 struct io_submit_state *state = &req->ctx->submit_state;
328 lockdep_assert_held(&req->ctx->uring_lock);
333 static inline void io_commit_cqring_flush(struct io_ring_ctx *ctx)
335 if (unlikely(ctx->off_timeout_used || ctx->drain_active ||
336 ctx->has_evfd || ctx->poll_activated))
337 __io_commit_cqring_flush(ctx);
349 static inline bool io_req_cache_empty(struct io_ring_ctx *ctx)
351 return !ctx->submit_state.free_list.next;
356 static inline struct io_kiocb *io_extract_req(struct io_ring_ctx *ctx)
360 req = container_of(ctx->submit_state.free_list.next, struct io_kiocb, comp_list);
361 wq_stack_extract(&ctx->submit_state.free_list);
365 static inline bool io_alloc_req(struct io_ring_ctx *ctx, struct io_kiocb **req)
367 if (unlikely(io_req_cache_empty(ctx))) {
368 if (!__io_alloc_req_refill(ctx))
371 *req = io_extract_req(ctx);
375 static inline bool io_allowed_defer_tw_run(struct io_ring_ctx *ctx)
377 return likely(ctx->submitter_task == current);
380 static inline bool io_allowed_run_tw(struct io_ring_ctx *ctx)
382 return likely(!(ctx->flags & IORING_SETUP_DEFER_TASKRUN) ||
383 ctx->submitter_task == current);
397 static inline size_t uring_sqe_size(struct io_ring_ctx *ctx)
399 if (ctx->flags & IORING_SETUP_SQE128)