Lines Matching refs:io_ring_ctx

144 static bool io_uring_try_cancel_requests(struct io_ring_ctx *ctx,
177 static inline void io_submit_flush_completions(struct io_ring_ctx *ctx)
184 static inline unsigned int __io_cqring_events(struct io_ring_ctx *ctx)
189 static inline unsigned int __io_cqring_events_user(struct io_ring_ctx *ctx)
220 struct io_ring_ctx *ctx = head->ctx;
238 static inline void io_req_add_to_cache(struct io_kiocb *req, struct io_ring_ctx *ctx)
245 struct io_ring_ctx *ctx = container_of(ref, struct io_ring_ctx, refs);
252 struct io_ring_ctx *ctx = container_of(work, struct io_ring_ctx,
283 static __cold struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
285 struct io_ring_ctx *ctx;
352 static void io_account_cq_overflow(struct io_ring_ctx *ctx)
363 struct io_ring_ctx *ctx = req->ctx;
447 struct io_ring_ctx *ctx = req->ctx;
483 struct io_ring_ctx *ctx = req->ctx;
522 static __cold void io_queue_deferred(struct io_ring_ctx *ctx)
555 static void io_eventfd_signal(struct io_ring_ctx *ctx)
592 static void io_eventfd_flush_signal(struct io_ring_ctx *ctx)
615 void __io_commit_cqring_flush(struct io_ring_ctx *ctx)
630 static inline void __io_cq_lock(struct io_ring_ctx *ctx)
636 static inline void io_cq_lock(struct io_ring_ctx *ctx)
642 static inline void __io_cq_unlock_post(struct io_ring_ctx *ctx)
655 static void io_cq_unlock_post(struct io_ring_ctx *ctx)
665 static void io_cqring_overflow_kill(struct io_ring_ctx *ctx)
682 static void __io_cqring_overflow_flush(struct io_ring_ctx *ctx)
713 static void io_cqring_do_overflow_flush(struct io_ring_ctx *ctx)
723 static void io_cqring_overflow_flush(struct io_ring_ctx *ctx)
776 static bool io_cqring_event_overflow(struct io_ring_ctx *ctx, u64 user_data,
829 bool io_cqe_cache_refill(struct io_ring_ctx *ctx, bool overflow)
861 static bool io_fill_cqe_aux(struct io_ring_ctx *ctx, u64 user_data, s32 res,
889 static void __io_flush_post_cqes(struct io_ring_ctx *ctx)
914 static bool __io_post_aux_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags,
928 bool io_post_aux_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags)
939 struct io_ring_ctx *ctx = req->ctx;
971 struct io_ring_ctx *ctx = req->ctx;
1026 struct io_ring_ctx *ctx = req->ctx;
1052 static void io_preinit_req(struct io_kiocb *req, struct io_ring_ctx *ctx)
1062 static void io_flush_cached_locked_reqs(struct io_ring_ctx *ctx,
1077 __cold bool __io_alloc_req_refill(struct io_ring_ctx *ctx)
1130 struct io_ring_ctx *ctx = req->ctx;
1154 static void ctx_flush_and_put(struct io_ring_ctx *ctx, struct io_tw_state *ts)
1169 struct io_ring_ctx **ctx,
1220 struct io_ring_ctx *last_ctx = NULL;
1248 struct io_ring_ctx *ctx = NULL;
1274 struct io_ring_ctx *ctx = req->ctx;
1326 struct io_ring_ctx *ctx = req->ctx;
1352 static void __cold io_move_task_work_from_local(struct io_ring_ctx *ctx)
1366 static bool io_run_local_work_continue(struct io_ring_ctx *ctx, int events,
1378 static int __io_run_local_work(struct io_ring_ctx *ctx, struct io_tw_state *ts,
1420 static inline int io_run_local_work_locked(struct io_ring_ctx *ctx,
1436 static int io_run_local_work(struct io_ring_ctx *ctx, int min_events)
1488 static void io_free_batch_list(struct io_ring_ctx *ctx,
1526 void __io_submit_flush_completions(struct io_ring_ctx *ctx)
1559 static unsigned io_cqring_events(struct io_ring_ctx *ctx)
1570 static __cold void io_iopoll_try_reap_events(struct io_ring_ctx *ctx)
1594 static int io_iopoll_check(struct io_ring_ctx *ctx, long min)
1682 struct io_ring_ctx *ctx = req->ctx;
1784 struct io_ring_ctx *ctx = req->ctx;
1976 struct io_ring_ctx *ctx = req->ctx;
2083 static inline bool io_check_restriction(struct io_ring_ctx *ctx,
2103 struct io_ring_ctx *ctx = req->ctx;
2120 static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req,
2217 struct io_ring_ctx *ctx = req->ctx;
2251 static inline int io_submit_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
2307 static void io_submit_state_end(struct io_ring_ctx *ctx)
2332 static void io_commit_sqring(struct io_ring_ctx *ctx)
2352 static bool io_get_sqe(struct io_ring_ctx *ctx, const struct io_uring_sqe **sqe)
2386 int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr)
2438 struct io_ring_ctx *ctx;
2444 static inline bool io_has_work(struct io_ring_ctx *ctx)
2452 struct io_ring_ctx *ctx = iowq->ctx;
2477 int io_run_task_work_sig(struct io_ring_ctx *ctx)
2501 static inline int io_cqring_wait_schedule(struct io_ring_ctx *ctx,
2537 static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
2734 static void *io_rings_map(struct io_ring_ctx *ctx, unsigned long uaddr,
2741 static void *io_sqes_map(struct io_ring_ctx *ctx, unsigned long uaddr,
2748 static void io_rings_free(struct io_ring_ctx *ctx)
2774 static unsigned long rings_size(struct io_ring_ctx *ctx, unsigned int sq_entries,
2813 static int io_eventfd_register(struct io_ring_ctx *ctx, void __user *arg,
2851 static int io_eventfd_unregister(struct io_ring_ctx *ctx)
2868 static void io_req_caches_free(struct io_ring_ctx *ctx)
2891 static __cold void io_ring_ctx_free(struct io_ring_ctx *ctx)
2943 struct io_ring_ctx *ctx = container_of(cb, struct io_ring_ctx,
2958 static __cold void io_activate_pollwq(struct io_ring_ctx *ctx)
2982 struct io_ring_ctx *ctx = file->private_data;
3017 static int io_unregister_personality(struct io_ring_ctx *ctx, unsigned id)
3033 struct io_ring_ctx *ctx;
3062 struct io_ring_ctx *ctx = container_of(work, struct io_ring_ctx, exit_work);
3155 static __cold void io_ring_ctx_wait_and_kill(struct io_ring_ctx *ctx)
3189 struct io_ring_ctx *ctx = file->private_data;
3209 static __cold bool io_cancel_defer_files(struct io_ring_ctx *ctx,
3236 static __cold bool io_uring_try_cancel_iowq(struct io_ring_ctx *ctx)
3260 static __cold bool io_uring_try_cancel_requests(struct io_ring_ctx *ctx,
3328 struct io_ring_ctx *ctx;
3412 struct io_ring_ctx *ctx = file->private_data;
3588 struct io_ring_ctx *ctx;
3738 static __cold int io_allocate_scq_urings(struct io_ring_ctx *ctx,
3808 static struct file *io_uring_get_file(struct io_ring_ctx *ctx)
3817 struct io_ring_ctx *ctx;
4078 static __cold int io_probe(struct io_ring_ctx *ctx, void __user *arg,
4118 static int io_register_personality(struct io_ring_ctx *ctx)
4135 static __cold int io_register_restrictions(struct io_ring_ctx *ctx,
4205 static int io_register_enable_rings(struct io_ring_ctx *ctx)
4229 static __cold int __io_register_iowq_aff(struct io_ring_ctx *ctx,
4245 static __cold int io_register_iowq_aff(struct io_ring_ctx *ctx,
4276 static __cold int io_unregister_iowq_aff(struct io_ring_ctx *ctx)
4281 static __cold int io_register_iowq_max_workers(struct io_ring_ctx *ctx,
4364 static int __io_uring_register(struct io_ring_ctx *ctx, unsigned opcode,
4533 struct io_ring_ctx *ctx;