/kernel/linux/linux-6.6/io_uring/ |
H A D | net.h | 35 int io_shutdown(struct io_kiocb *req, unsigned int issue_flags); 40 int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags); 42 int io_send(struct io_kiocb *req, unsigned int issue_flags); 47 int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags); 48 int io_recv(struct io_kiocb *req, unsigned int issue_flags); 53 int io_accept(struct io_kiocb *req, unsigned int issue_flags); 56 int io_socket(struct io_kiocb *req, unsigned int issue_flags); 60 int io_connect(struct io_kiocb *req, unsigned int issue_flags); 62 int io_send_zc(struct io_kiocb *req, unsigned int issue_flags); 63 int io_sendmsg_zc(struct io_kiocb *req, unsigned int issue_flags); [all...] |
H A D | net.c | 82 unsigned int issue_flags) in io_check_multishot() 89 return !(issue_flags & IO_URING_F_IOWQ) || in io_check_multishot() 107 int io_shutdown(struct io_kiocb *req, unsigned int issue_flags) in io_shutdown() argument 113 WARN_ON_ONCE(issue_flags & IO_URING_F_NONBLOCK); in io_shutdown() 131 static void io_netmsg_recycle(struct io_kiocb *req, unsigned int issue_flags) in io_netmsg_recycle() argument 135 if (!req_has_async_data(req) || issue_flags & IO_URING_F_UNLOCKED) in io_netmsg_recycle() 146 unsigned int issue_flags) in io_msg_alloc_async() 152 if (!(issue_flags & IO_URING_F_UNLOCKED)) { in io_msg_alloc_async() 179 unsigned int issue_flags) in io_setup_async_msg() 185 async_msg = io_msg_alloc_async(req, issue_flags); in io_setup_async_msg() 81 io_check_multishot(struct io_kiocb *req, unsigned int issue_flags) io_check_multishot() argument 145 io_msg_alloc_async(struct io_kiocb *req, unsigned int issue_flags) io_msg_alloc_async() argument 177 io_setup_async_msg(struct io_kiocb *req, struct io_async_msghdr *kmsg, unsigned int issue_flags) io_setup_async_msg() argument 336 io_setup_async_addr(struct io_kiocb *req, struct sockaddr_storage *addr_storage, unsigned int issue_flags) io_setup_async_addr() argument 401 io_sendmsg(struct io_kiocb *req, unsigned int issue_flags) io_sendmsg() argument 463 io_send(struct io_kiocb *req, unsigned int issue_flags) io_send() argument 675 io_recv_finish(struct io_kiocb *req, int *ret, struct msghdr *msg, bool mshot_finished, unsigned issue_flags) io_recv_finish() argument 813 io_recvmsg(struct io_kiocb *req, unsigned int issue_flags) io_recvmsg() argument 924 io_recv(struct io_kiocb *req, unsigned int issue_flags) io_recv() argument 1160 io_send_zc(struct io_kiocb *req, unsigned int issue_flags) io_send_zc() argument 1259 io_sendmsg_zc(struct io_kiocb *req, unsigned int issue_flags) io_sendmsg_zc() argument 1380 io_accept(struct io_kiocb *req, unsigned int issue_flags) io_accept() argument 1460 io_socket(struct io_kiocb *req, unsigned int issue_flags) io_socket() argument 1514 io_connect(struct io_kiocb *req, unsigned int issue_flags) io_connect() argument [all...] |
H A D | kbuf.h | 45 unsigned int issue_flags); 49 int io_remove_buffers(struct io_kiocb *req, unsigned int issue_flags); 52 int io_provide_buffers(struct io_kiocb *req, unsigned int issue_flags); 59 unsigned int __io_put_kbuf(struct io_kiocb *req, unsigned issue_flags); 61 void io_kbuf_recycle_legacy(struct io_kiocb *req, unsigned issue_flags); 99 static inline void io_kbuf_recycle(struct io_kiocb *req, unsigned issue_flags) in io_kbuf_recycle() argument 102 io_kbuf_recycle_legacy(req, issue_flags); in io_kbuf_recycle() 137 unsigned issue_flags) in io_put_kbuf() 142 return __io_put_kbuf(req, issue_flags); in io_put_kbuf() 136 io_put_kbuf(struct io_kiocb *req, unsigned issue_flags) io_put_kbuf() argument
|
H A D | openclose.c | 109 int io_openat2(struct io_kiocb *req, unsigned int issue_flags) in io_openat2() argument 123 if (issue_flags & IO_URING_F_NONBLOCK) { in io_openat2() 148 (!resolve_nonblock && (issue_flags & IO_URING_F_NONBLOCK))) in io_openat2() 153 if ((issue_flags & IO_URING_F_NONBLOCK) && !nonblock_set) in io_openat2() 159 ret = io_fixed_fd_install(req, issue_flags, file, in io_openat2() 170 int io_openat(struct io_kiocb *req, unsigned int issue_flags) in io_openat() argument 172 return io_openat2(req, issue_flags); in io_openat() 183 int __io_close_fixed(struct io_ring_ctx *ctx, unsigned int issue_flags, in __io_close_fixed() argument 188 io_ring_submit_lock(ctx, issue_flags); in __io_close_fixed() 190 io_ring_submit_unlock(ctx, issue_flags); in __io_close_fixed() 195 io_close_fixed(struct io_kiocb *req, unsigned int issue_flags) io_close_fixed() argument 219 io_close(struct io_kiocb *req, unsigned int issue_flags) io_close() argument [all...] |
H A D | msg_ring.c | 42 unsigned int issue_flags) in io_double_lock_ctx() 49 if (!(issue_flags & IO_URING_F_UNLOCKED)) { in io_double_lock_ctx() 126 static int io_msg_ring_data(struct io_kiocb *req, unsigned int issue_flags) in io_msg_ring_data() argument 148 if (unlikely(io_double_lock_ctx(target_ctx, issue_flags))) in io_msg_ring_data() 160 static struct file *io_msg_grab_file(struct io_kiocb *req, unsigned int issue_flags) in io_msg_grab_file() argument 167 io_ring_submit_lock(ctx, issue_flags); in io_msg_grab_file() 174 io_ring_submit_unlock(ctx, issue_flags); in io_msg_grab_file() 178 static int io_msg_install_complete(struct io_kiocb *req, unsigned int issue_flags) in io_msg_install_complete() argument 185 if (unlikely(io_double_lock_ctx(target_ctx, issue_flags))) in io_msg_install_complete() 223 static int io_msg_send_fd(struct io_kiocb *req, unsigned int issue_flags) in io_msg_send_fd() argument 41 io_double_lock_ctx(struct io_ring_ctx *octx, unsigned int issue_flags) io_double_lock_ctx() argument 269 io_msg_ring(struct io_kiocb *req, unsigned int issue_flags) io_msg_ring() argument [all...] |
H A D | uring_cmd.c | 19 unsigned issue_flags = ts->locked ? 0 : IO_URING_F_UNLOCKED; in io_uring_cmd_work() local 21 ioucmd->task_work_cb(ioucmd, issue_flags); in io_uring_cmd_work() 55 unsigned issue_flags) in io_uring_cmd_done() 70 .locked = !(issue_flags & IO_URING_F_UNLOCKED), in io_uring_cmd_done() 113 int io_uring_cmd(struct io_kiocb *req, unsigned int issue_flags) in io_uring_cmd() argument 128 issue_flags |= IO_URING_F_SQE128; in io_uring_cmd() 130 issue_flags |= IO_URING_F_CQE32; in io_uring_cmd() 134 issue_flags |= IO_URING_F_IOPOLL; in io_uring_cmd() 139 ret = file->f_op->uring_cmd(ioucmd, issue_flags); in io_uring_cmd() 168 int io_uring_cmd_sock(struct io_uring_cmd *cmd, unsigned int issue_flags) in io_uring_cmd_sock() argument 54 io_uring_cmd_done(struct io_uring_cmd *ioucmd, ssize_t ret, ssize_t res2, unsigned issue_flags) io_uring_cmd_done() argument [all...] |
H A D | poll.h | 28 int io_poll_add(struct io_kiocb *req, unsigned int issue_flags); 31 int io_poll_remove(struct io_kiocb *req, unsigned int issue_flags); 35 unsigned issue_flags); 36 int io_arm_poll_handler(struct io_kiocb *req, unsigned issue_flags);
|
H A D | xattr.c | 108 int io_fgetxattr(struct io_kiocb *req, unsigned int issue_flags) in io_fgetxattr() argument 113 WARN_ON_ONCE(issue_flags & IO_URING_F_NONBLOCK); in io_fgetxattr() 123 int io_getxattr(struct io_kiocb *req, unsigned int issue_flags) in io_getxattr() argument 130 WARN_ON_ONCE(issue_flags & IO_URING_F_NONBLOCK); in io_getxattr() 206 static int __io_setxattr(struct io_kiocb *req, unsigned int issue_flags, in __io_setxattr() argument 221 int io_fsetxattr(struct io_kiocb *req, unsigned int issue_flags) in io_fsetxattr() argument 225 WARN_ON_ONCE(issue_flags & IO_URING_F_NONBLOCK); in io_fsetxattr() 227 ret = __io_setxattr(req, issue_flags, &req->file->f_path); in io_fsetxattr() 232 int io_setxattr(struct io_kiocb *req, unsigned int issue_flags) in io_setxattr() argument 239 WARN_ON_ONCE(issue_flags in io_setxattr() [all...] |
H A D | sync.c | 40 int io_sync_file_range(struct io_kiocb *req, unsigned int issue_flags) in io_sync_file_range() argument 46 WARN_ON_ONCE(issue_flags & IO_URING_F_NONBLOCK); in io_sync_file_range() 70 int io_fsync(struct io_kiocb *req, unsigned int issue_flags) in io_fsync() argument 77 WARN_ON_ONCE(issue_flags & IO_URING_F_NONBLOCK); in io_fsync() 99 int io_fallocate(struct io_kiocb *req, unsigned int issue_flags) in io_fallocate() argument 105 WARN_ON_ONCE(issue_flags & IO_URING_F_NONBLOCK); in io_fallocate()
|
H A D | fs.c | 81 int io_renameat(struct io_kiocb *req, unsigned int issue_flags) in io_renameat() argument 86 WARN_ON_ONCE(issue_flags & IO_URING_F_NONBLOCK); in io_renameat() 130 int io_unlinkat(struct io_kiocb *req, unsigned int issue_flags) in io_unlinkat() argument 135 WARN_ON_ONCE(issue_flags & IO_URING_F_NONBLOCK); in io_unlinkat() 177 int io_mkdirat(struct io_kiocb *req, unsigned int issue_flags) in io_mkdirat() argument 182 WARN_ON_ONCE(issue_flags & IO_URING_F_NONBLOCK); in io_mkdirat() 227 int io_symlinkat(struct io_kiocb *req, unsigned int issue_flags) in io_symlinkat() argument 232 WARN_ON_ONCE(issue_flags & IO_URING_F_NONBLOCK); in io_symlinkat() 272 int io_linkat(struct io_kiocb *req, unsigned int issue_flags) in io_linkat() argument 277 WARN_ON_ONCE(issue_flags in io_linkat() [all...] |
H A D | kbuf.c | 76 void io_kbuf_recycle_legacy(struct io_kiocb *req, unsigned issue_flags) in io_kbuf_recycle_legacy() argument 91 io_ring_submit_lock(ctx, issue_flags); in io_kbuf_recycle_legacy() 99 io_ring_submit_unlock(ctx, issue_flags); in io_kbuf_recycle_legacy() 103 unsigned int __io_put_kbuf(struct io_kiocb *req, unsigned issue_flags) in __io_put_kbuf() argument 122 } else if (issue_flags & IO_URING_F_UNLOCKED) { in __io_put_kbuf() 156 unsigned int issue_flags) in io_ring_buffer_select() 181 if (issue_flags & IO_URING_F_UNLOCKED || !file_can_poll(req->file)) { in io_ring_buffer_select() 199 unsigned int issue_flags) in io_buffer_select() 205 io_ring_submit_lock(req->ctx, issue_flags); in io_buffer_select() 210 ret = io_ring_buffer_select(req, len, bl, issue_flags); in io_buffer_select() 154 io_ring_buffer_select(struct io_kiocb *req, size_t *len, struct io_buffer_list *bl, unsigned int issue_flags) io_ring_buffer_select() argument 198 io_buffer_select(struct io_kiocb *req, size_t *len, unsigned int issue_flags) io_buffer_select() argument 350 io_remove_buffers(struct io_kiocb *req, unsigned int issue_flags) io_remove_buffers() argument 479 io_provide_buffers(struct io_kiocb *req, unsigned int issue_flags) io_provide_buffers() argument [all...] |
H A D | splice.c | 48 int io_tee(struct io_kiocb *req, unsigned int issue_flags) in io_tee() argument 56 WARN_ON_ONCE(issue_flags & IO_URING_F_NONBLOCK); in io_tee() 59 in = io_file_get_fixed(req, sp->splice_fd_in, issue_flags); in io_tee() 88 int io_splice(struct io_kiocb *req, unsigned int issue_flags) in io_splice() argument 97 WARN_ON_ONCE(issue_flags & IO_URING_F_NONBLOCK); in io_splice() 100 in = io_file_get_fixed(req, sp->splice_fd_in, issue_flags); in io_splice()
|
H A D | cancel.c | 103 unsigned issue_flags) in io_try_cancel() 118 ret = io_poll_cancel(ctx, cd, issue_flags); in io_try_cancel() 158 unsigned int issue_flags) in __io_async_cancel() 166 ret = io_try_cancel(tctx, cd, issue_flags); in __io_async_cancel() 175 io_ring_submit_lock(ctx, issue_flags); in __io_async_cancel() 187 io_ring_submit_unlock(ctx, issue_flags); in __io_async_cancel() 191 int io_async_cancel(struct io_kiocb *req, unsigned int issue_flags) in io_async_cancel() argument 209 issue_flags); in io_async_cancel() 220 ret = __io_async_cancel(&cd, tctx, issue_flags); in io_async_cancel() 102 io_try_cancel(struct io_uring_task *tctx, struct io_cancel_data *cd, unsigned issue_flags) io_try_cancel() argument 156 __io_async_cancel(struct io_cancel_data *cd, struct io_uring_task *tctx, unsigned int issue_flags) __io_async_cancel() argument
|
H A D | poll.c | 575 unsigned issue_flags) in __io_arm_poll_handler() 600 ipt->owning = issue_flags & IO_URING_F_UNLOCKED; in __io_arm_poll_handler() 604 if (issue_flags & IO_URING_F_UNLOCKED) in __io_arm_poll_handler() 671 unsigned issue_flags) in io_req_alloc_apoll() 680 } else if (!(issue_flags & IO_URING_F_UNLOCKED)) { in io_req_alloc_apoll() 700 int io_arm_poll_handler(struct io_kiocb *req, unsigned issue_flags) in io_arm_poll_handler() argument 733 apoll = io_req_alloc_apoll(req, issue_flags); in io_arm_poll_handler() 740 io_kbuf_recycle(req, issue_flags); in io_arm_poll_handler() 742 ret = __io_arm_poll_handler(req, &apoll->poll, &ipt, mask, issue_flags); in io_arm_poll_handler() 875 unsigned issue_flags) in io_poll_cancel() 572 __io_arm_poll_handler(struct io_kiocb *req, struct io_poll *poll, struct io_poll_table *ipt, __poll_t mask, unsigned issue_flags) __io_arm_poll_handler() argument 670 io_req_alloc_apoll(struct io_kiocb *req, unsigned issue_flags) io_req_alloc_apoll() argument 874 io_poll_cancel(struct io_ring_ctx *ctx, struct io_cancel_data *cd, unsigned issue_flags) io_poll_cancel() argument 953 io_poll_add(struct io_kiocb *req, unsigned int issue_flags) io_poll_add() argument 976 io_poll_remove(struct io_kiocb *req, unsigned int issue_flags) io_poll_remove() argument [all...] |
H A D | advise.c | 49 int io_madvise(struct io_kiocb *req, unsigned int issue_flags) in io_madvise() argument 55 WARN_ON_ONCE(issue_flags & IO_URING_F_NONBLOCK); in io_madvise() 92 int io_fadvise(struct io_kiocb *req, unsigned int issue_flags) in io_fadvise() argument 97 WARN_ON_ONCE(issue_flags & IO_URING_F_NONBLOCK && io_fadvise_force_async(fa)); in io_fadvise()
|
H A D | rsrc.h | 107 unsigned int issue_flags) in io_req_set_rsrc_node() 110 io_ring_submit_lock(ctx, issue_flags); in io_req_set_rsrc_node() 116 io_ring_submit_unlock(ctx, issue_flags); in io_req_set_rsrc_node() 134 int io_files_update(struct io_kiocb *req, unsigned int issue_flags); 105 io_req_set_rsrc_node(struct io_kiocb *req, struct io_ring_ctx *ctx, unsigned int issue_flags) io_req_set_rsrc_node() argument
|
H A D | io_uring.h | 52 void io_req_complete_post(struct io_kiocb *req, unsigned issue_flags); 61 unsigned issue_flags); 210 unsigned issue_flags) in io_ring_submit_unlock() 213 if (issue_flags & IO_URING_F_UNLOCKED) in io_ring_submit_unlock() 218 unsigned issue_flags) in io_ring_submit_lock() 226 if (issue_flags & IO_URING_F_UNLOCKED) in io_ring_submit_lock() 209 io_ring_submit_unlock(struct io_ring_ctx *ctx, unsigned issue_flags) io_ring_submit_unlock() argument 217 io_ring_submit_lock(struct io_ring_ctx *ctx, unsigned issue_flags) io_ring_submit_lock() argument
|
H A D | cancel.h | 17 int io_async_cancel(struct io_kiocb *req, unsigned int issue_flags); 20 unsigned int issue_flags);
|
H A D | rw.h | 19 int io_read(struct io_kiocb *req, unsigned int issue_flags); 21 int io_write(struct io_kiocb *req, unsigned int issue_flags);
|
H A D | rw.c | 296 unsigned issue_flags = ts->locked ? 0 : IO_URING_F_UNLOCKED; in io_req_rw_complete() local 298 req->cqe.flags |= io_put_kbuf(req, issue_flags); in io_req_rw_complete() 337 unsigned int issue_flags) in kiocb_done() 352 io_put_kbuf(req, issue_flags)); in kiocb_done() 371 unsigned int issue_flags) in __io_import_iovec() 394 buf = io_buffer_select(req, &sqe_len, issue_flags); in __io_import_iovec() 417 unsigned int issue_flags) in io_import_iovec() 419 *iovec = __io_import_iovec(rw, req, s, issue_flags); in io_import_iovec() 715 int io_read(struct io_kiocb *req, unsigned int issue_flags) in io_read() argument 721 bool force_nonblock = issue_flags in io_read() 336 kiocb_done(struct io_kiocb *req, ssize_t ret, unsigned int issue_flags) kiocb_done() argument 369 __io_import_iovec(int ddir, struct io_kiocb *req, struct io_rw_state *s, unsigned int issue_flags) __io_import_iovec() argument 415 io_import_iovec(int rw, struct io_kiocb *req, struct iovec **iovec, struct io_rw_state *s, unsigned int issue_flags) io_import_iovec() argument 863 io_write(struct io_kiocb *req, unsigned int issue_flags) io_write() argument [all...] |
H A D | timeout.h | 34 int io_timeout(struct io_kiocb *req, unsigned int issue_flags); 36 int io_timeout_remove(struct io_kiocb *req, unsigned int issue_flags);
|
H A D | filetable.c | 120 int io_fixed_fd_install(struct io_kiocb *req, unsigned int issue_flags, in io_fixed_fd_install() argument 126 io_ring_submit_lock(ctx, issue_flags); in io_fixed_fd_install() 128 io_ring_submit_unlock(ctx, issue_flags); in io_fixed_fd_install()
|
/kernel/linux/linux-6.6/include/linux/ |
H A D | io_uring.h | 48 unsigned issue_flags); 83 int io_uring_cmd_sock(struct io_uring_cmd *cmd, unsigned int issue_flags); 92 ssize_t ret2, unsigned issue_flags) in io_uring_cmd_done() 117 unsigned int issue_flags) in io_uring_cmd_sock() 91 io_uring_cmd_done(struct io_uring_cmd *cmd, ssize_t ret, ssize_t ret2, unsigned issue_flags) io_uring_cmd_done() argument 116 io_uring_cmd_sock(struct io_uring_cmd *cmd, unsigned int issue_flags) io_uring_cmd_sock() argument
|
/kernel/linux/linux-6.6/drivers/nvme/host/ |
H A D | ioctl.c | 472 unsigned issue_flags) in nvme_uring_task_meta_cb() 493 io_uring_cmd_done(ioucmd, status, result, issue_flags); in nvme_uring_task_meta_cb() 497 unsigned issue_flags) in nvme_uring_task_cb() 504 io_uring_cmd_done(ioucmd, pdu->nvme_status, pdu->u.result, issue_flags); in nvme_uring_task_cb() 561 struct io_uring_cmd *ioucmd, unsigned int issue_flags, bool vec) in nvme_uring_cmd_io() 604 if (issue_flags & IO_URING_F_NONBLOCK) { in nvme_uring_cmd_io() 608 if (issue_flags & IO_URING_F_IOPOLL) in nvme_uring_cmd_io() 742 static int nvme_uring_cmd_checks(unsigned int issue_flags) in nvme_uring_cmd_checks() argument 746 if ((issue_flags & (IO_URING_F_SQE128|IO_URING_F_CQE32)) != in nvme_uring_cmd_checks() 753 unsigned int issue_flags) in nvme_ns_uring_cmd() 471 nvme_uring_task_meta_cb(struct io_uring_cmd *ioucmd, unsigned issue_flags) nvme_uring_task_meta_cb() argument 496 nvme_uring_task_cb(struct io_uring_cmd *ioucmd, unsigned issue_flags) nvme_uring_task_cb() argument 560 nvme_uring_cmd_io(struct nvme_ctrl *ctrl, struct nvme_ns *ns, struct io_uring_cmd *ioucmd, unsigned int issue_flags, bool vec) nvme_uring_cmd_io() argument 752 nvme_ns_uring_cmd(struct nvme_ns *ns, struct io_uring_cmd *ioucmd, unsigned int issue_flags) nvme_ns_uring_cmd() argument 778 nvme_ns_chr_uring_cmd(struct io_uring_cmd *ioucmd, unsigned int issue_flags) nvme_ns_chr_uring_cmd() argument 877 nvme_ns_head_chr_uring_cmd(struct io_uring_cmd *ioucmd, unsigned int issue_flags) nvme_ns_head_chr_uring_cmd() argument 893 nvme_dev_uring_cmd(struct io_uring_cmd *ioucmd, unsigned int issue_flags) nvme_dev_uring_cmd() argument [all...] |
/kernel/linux/linux-5.10/io_uring/ |
H A D | io_uring.c | 1098 unsigned int issue_flags); 1107 unsigned int issue_flags, u32 slot_index); 1108 static int io_close_fixed(struct io_kiocb *req, unsigned int issue_flags); 1894 static inline void __io_req_complete(struct io_kiocb *req, unsigned issue_flags, in __io_req_complete() argument 1897 if (issue_flags & IO_URING_F_COMPLETE_DEFER) in __io_req_complete() 3071 unsigned int issue_flags) in kiocb_done() 3084 __io_req_complete(req, issue_flags, in kiocb_done() 3102 if (!(issue_flags & IO_URING_F_NONBLOCK)) { in kiocb_done() 3104 __io_req_complete(req, issue_flags, ret, cflags); in kiocb_done() 3107 __io_req_complete(req, issue_flags, re in kiocb_done() 3070 kiocb_done(struct kiocb *kiocb, ssize_t ret, unsigned int issue_flags) kiocb_done() argument 3597 io_read(struct io_kiocb *req, unsigned int issue_flags) io_read() argument 3736 io_write(struct io_kiocb *req, unsigned int issue_flags) io_write() argument 3874 io_renameat(struct io_kiocb *req, unsigned int issue_flags) io_renameat() argument 3921 io_unlinkat(struct io_kiocb *req, unsigned int issue_flags) io_unlinkat() argument 3958 io_shutdown(struct io_kiocb *req, unsigned int issue_flags) io_shutdown() argument 4006 io_tee(struct io_kiocb *req, unsigned int issue_flags) io_tee() argument 4045 io_splice(struct io_kiocb *req, unsigned int issue_flags) io_splice() argument 4082 io_nop(struct io_kiocb *req, unsigned int issue_flags) io_nop() argument 4112 io_fsync(struct io_kiocb *req, unsigned int issue_flags) io_fsync() argument 4145 io_fallocate(struct io_kiocb *req, unsigned int issue_flags) io_fallocate() argument 4224 io_openat2(struct io_kiocb *req, unsigned int issue_flags) io_openat2() argument 4292 io_openat(struct io_kiocb *req, unsigned int issue_flags) io_openat() argument 4344 io_remove_buffers(struct io_kiocb *req, unsigned int issue_flags) io_remove_buffers() argument 4432 io_provide_buffers(struct io_kiocb *req, unsigned int issue_flags) io_provide_buffers() argument 4488 io_epoll_ctl(struct io_kiocb *req, unsigned int issue_flags) io_epoll_ctl() argument 4525 io_madvise(struct io_kiocb *req, unsigned int issue_flags) io_madvise() argument 4557 io_fadvise(struct io_kiocb *req, unsigned int issue_flags) io_fadvise() argument 4598 io_statx(struct io_kiocb *req, unsigned int issue_flags) io_statx() argument 4633 io_close(struct io_kiocb *req, unsigned int issue_flags) io_close() argument 4700 io_sync_file_range(struct io_kiocb *req, unsigned int issue_flags) io_sync_file_range() argument 4799 io_sendmsg(struct io_kiocb *req, unsigned int issue_flags) io_sendmsg() argument 4854 io_send(struct io_kiocb *req, unsigned int issue_flags) io_send() argument 5050 io_recvmsg(struct io_kiocb *req, unsigned int issue_flags) io_recvmsg() argument 5122 io_recv(struct io_kiocb *req, unsigned int issue_flags) io_recv() argument 5215 io_accept(struct io_kiocb *req, unsigned int issue_flags) io_accept() argument 5277 io_connect(struct io_kiocb *req, unsigned int issue_flags) io_connect() argument 5988 io_poll_add(struct io_kiocb *req, unsigned int issue_flags) io_poll_add() argument 6005 io_poll_update(struct io_kiocb *req, unsigned int issue_flags) io_poll_update() argument 6212 io_timeout_remove(struct io_kiocb *req, unsigned int issue_flags) io_timeout_remove() argument 6294 io_timeout(struct io_kiocb *req, unsigned int issue_flags) io_timeout() argument 6421 io_async_cancel(struct io_kiocb *req, unsigned int issue_flags) io_async_cancel() argument 6466 io_files_update(struct io_kiocb *req, unsigned int issue_flags) io_files_update() argument 6745 io_issue_sqe(struct io_kiocb *req, unsigned int issue_flags) io_issue_sqe() argument 6953 io_file_get_fixed(struct io_ring_ctx *ctx, struct io_kiocb *req, int fd, unsigned int issue_flags) io_file_get_fixed() argument 6989 io_file_get(struct io_ring_ctx *ctx, struct io_kiocb *req, int fd, bool fixed, unsigned int issue_flags) io_file_get() argument 8474 io_install_fixed_file(struct io_kiocb *req, struct file *file, unsigned int issue_flags, u32 slot_index) io_install_fixed_file() argument 8524 io_close_fixed(struct io_kiocb *req, unsigned int issue_flags) io_close_fixed() argument [all...] |