Lines Matching defs:int
152 static int __read_mostly sysctl_io_uring_disabled;
153 static int __read_mostly sysctl_io_uring_group = -1;
184 static inline unsigned int __io_cqring_events(struct io_ring_ctx *ctx)
189 static inline unsigned int __io_cqring_events_user(struct io_ring_ctx *ctx)
232 static inline void req_fail_link_node(struct io_kiocb *req, int res)
269 static int io_alloc_hash_table(struct io_hash_table *table, unsigned bits)
286 int hash_bits;
540 int ops = atomic_xchg(&ev_fd->ops, 0);
757 unsigned int refill = -tctx->cached_refs + IO_TCTX_REFS_CACHE_NR;
767 unsigned int refs = tctx->cached_refs;
832 unsigned int off = ctx->cached_cq_tail & (ctx->cq_entries - 1);
833 unsigned int free, queued, len;
893 unsigned int i;
1082 int ret, i;
1168 static unsigned int handle_tw_list(struct llist_node *node,
1172 unsigned int count = 0;
1252 unsigned int count = 0;
1366 static bool io_run_local_work_continue(struct io_ring_ctx *ctx, int events,
1367 int min_events)
1378 static int __io_run_local_work(struct io_ring_ctx *ctx, struct io_tw_state *ts,
1379 int min_events)
1382 unsigned int loops = 0;
1383 int ret = 0;
1420 static inline int io_run_local_work_locked(struct io_ring_ctx *ctx,
1421 int min_events)
1424 int ret;
1436 static int io_run_local_work(struct io_ring_ctx *ctx, int min_events)
1439 int ret;
1467 void io_req_task_queue_fail(struct io_kiocb *req, int ret)
1594 static int io_iopoll_check(struct io_ring_ctx *ctx, long min)
1596 unsigned int nr_events = 0;
1622 int ret = 0;
1680 static void io_iopoll_req_issued(struct io_kiocb *req, unsigned int issue_flags)
1729 unsigned int io_file_get_flags(struct file *file)
1731 unsigned int res = 0;
1751 int io_req_prep_async(struct io_kiocb *req)
1786 int ret;
1823 unsigned int issue_flags)
1836 static int io_issue_sqe(struct io_kiocb *req, unsigned int issue_flags)
1840 int ret;
1878 int io_poll_issue(struct io_kiocb *req, struct io_tw_state *ts)
1902 unsigned int issue_flags = IO_URING_F_UNLOCKED | IO_URING_F_IOWQ;
1904 int ret = 0, err = -ECANCELED;
1973 inline struct file *io_file_get_fixed(struct io_kiocb *req, int fd,
1974 unsigned int issue_flags)
1982 if (unlikely((unsigned int)fd >= ctx->nr_user_files))
1994 struct file *io_file_get_normal(struct io_kiocb *req, int fd)
2006 static void io_queue_async(struct io_kiocb *req, int ret)
2038 int ret;
2064 int ret = io_req_prep_async(req);
2085 unsigned int sqe_flags)
2120 static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req,
2125 unsigned int sqe_flags;
2126 int personality;
2197 int ret;
2214 static __cold int io_submit_fail_init(const struct io_uring_sqe *sqe,
2215 struct io_kiocb *req, int ret)
2251 static inline int io_submit_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
2256 int ret;
2323 unsigned int max_ios)
2386 int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr)
2389 unsigned int entries = io_sqring_entries(ctx);
2390 unsigned int left;
2391 int ret;
2453 int dist = READ_ONCE(ctx->rings->cq.tail) - (int) iowq->cq_tail;
2463 static int io_wake_function(struct wait_queue_entry *curr, unsigned int mode,
2464 int wake_flags, void *key)
2477 int io_run_task_work_sig(struct io_ring_ctx *ctx)
2501 static inline int io_cqring_wait_schedule(struct io_ring_ctx *ctx,
2504 int ret;
2537 static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
2543 int ret;
2586 int nr_wait = (int) iowq.cq_tail - READ_ONCE(ctx->rings->cq.tail);
2655 static void io_pages_free(struct page ***pages, int npages)
2658 int i;
2677 unsigned int nr_pages;
2679 int ret, i, pinned;
2774 static unsigned long rings_size(struct io_ring_ctx *ctx, unsigned int sq_entries,
2775 unsigned int cq_entries, size_t *sq_offset)
2813 static int io_eventfd_register(struct io_ring_ctx *ctx, void __user *arg,
2814 unsigned int eventfd_async)
2818 int fd;
2834 int ret = PTR_ERR(ev_fd->cq_ev_fd);
2851 static int io_eventfd_unregister(struct io_ring_ctx *ctx)
2871 int nr = 0;
3017 static int io_unregister_personality(struct io_ring_ctx *ctx, unsigned id)
3067 int ret;
3187 static int io_uring_release(struct inode *inode, struct file *file)
3432 unsigned int bgid;
3455 static __cold int io_uring_mmap(struct file *file, struct vm_area_struct *vma)
3514 static int io_uring_mmap(struct file *file, struct vm_area_struct *vma)
3519 static unsigned int io_uring_nommu_mmap_capabilities(struct file *file)
3539 static int io_validate_ext_arg(unsigned flags, const void __user *argp, size_t argsz)
3552 static int io_get_ext_arg(unsigned flags, const void __user *argp, size_t *argsz,
3584 SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit,
3668 int ret2;
3738 static __cold int io_allocate_scq_urings(struct io_ring_ctx *ctx,
3792 static int io_uring_install_fd(struct file *file)
3794 int fd;
3814 static __cold int io_uring_create(unsigned entries, struct io_uring_params *p,
3820 int ret;
4028 int i;
4053 int disabled = READ_ONCE(sysctl_io_uring_disabled);
4078 static __cold int io_probe(struct io_ring_ctx *ctx, void __user *arg,
4083 int i, ret;
4118 static int io_register_personality(struct io_ring_ctx *ctx)
4122 int ret;
4135 static __cold int io_register_restrictions(struct io_ring_ctx *ctx,
4136 void __user *arg, unsigned int nr_args)
4140 int i, ret;
4205 static int io_register_enable_rings(struct io_ring_ctx *ctx)
4229 static __cold int __io_register_iowq_aff(struct io_ring_ctx *ctx,
4232 int ret;
4245 static __cold int io_register_iowq_aff(struct io_ring_ctx *ctx,
4249 int ret;
4276 static __cold int io_unregister_iowq_aff(struct io_ring_ctx *ctx)
4281 static __cold int io_register_iowq_max_workers(struct io_ring_ctx *ctx,
4289 int i, ret;
4364 static int __io_uring_register(struct io_ring_ctx *ctx, unsigned opcode,
4369 int ret;
4530 SYSCALL_DEFINE4(io_uring_register, unsigned int, fd, unsigned int, opcode,
4531 void __user *, arg, unsigned int, nr_args)
4578 static int __init io_uring_init(void)
4602 BUILD_BUG_SQE_ELEM(28, /* compat */ int, rw_flags);
4648 BUILD_BUG_ON(__REQ_F_LAST_BIT > 8 * sizeof(int));