Lines Matching refs:req
509 struct io_kiocb *req;
810 typedef void (*io_req_tw_func_t)(struct io_kiocb *req, bool *locked);
905 struct io_kiocb *req;
910 /* needs req->file assigned */
1079 static bool io_disarm_next(struct io_kiocb *req);
1086 static void io_fill_cqe_req(struct io_kiocb *req, s32 res, u32 cflags);
1088 static void io_put_req(struct io_kiocb *req);
1089 static void io_put_req_deferred(struct io_kiocb *req);
1090 static void io_dismantle_req(struct io_kiocb *req);
1091 static void io_queue_linked_timeout(struct io_kiocb *req);
1095 static void io_clean_op(struct io_kiocb *req);
1097 struct io_kiocb *req, int fd, bool fixed,
1099 static void __io_queue_sqe(struct io_kiocb *req);
1102 static void io_req_task_queue(struct io_kiocb *req);
1104 static int io_req_prep_async(struct io_kiocb *req);
1106 static int io_install_fixed_file(struct io_kiocb *req, struct file *file,
1108 static int io_close_fixed(struct io_kiocb *req, unsigned int issue_flags);
1144 #define req_ref_zero_or_close_to_overflow(req) \
1145 ((unsigned int) atomic_read(&(req->refs)) + 127u <= 127u)
1147 static inline bool req_ref_inc_not_zero(struct io_kiocb *req)
1149 WARN_ON_ONCE(!(req->flags & REQ_F_REFCOUNT));
1150 return atomic_inc_not_zero(&req->refs);
1153 static inline bool req_ref_put_and_test(struct io_kiocb *req)
1155 if (likely(!(req->flags & REQ_F_REFCOUNT)))
1158 WARN_ON_ONCE(req_ref_zero_or_close_to_overflow(req));
1159 return atomic_dec_and_test(&req->refs);
1162 static inline void req_ref_get(struct io_kiocb *req)
1164 WARN_ON_ONCE(!(req->flags & REQ_F_REFCOUNT));
1165 WARN_ON_ONCE(req_ref_zero_or_close_to_overflow(req));
1166 atomic_inc(&req->refs);
1169 static inline void __io_req_set_refcount(struct io_kiocb *req, int nr)
1171 if (!(req->flags & REQ_F_REFCOUNT)) {
1172 req->flags |= REQ_F_REFCOUNT;
1173 atomic_set(&req->refs, nr);
1177 static inline void io_req_set_refcount(struct io_kiocb *req)
1179 __io_req_set_refcount(req, 1);
1182 static inline void io_req_set_rsrc_node(struct io_kiocb *req)
1184 struct io_ring_ctx *ctx = req->ctx;
1186 if (!req->fixed_rsrc_refs) {
1187 req->fixed_rsrc_refs = &ctx->rsrc_node->refs;
1188 percpu_ref_get(req->fixed_rsrc_refs);
1206 __must_hold(&req->ctx->timeout_lock)
1208 struct io_kiocb *req;
1215 io_for_each_link(req, head) {
1216 if (req->flags & REQ_F_INFLIGHT)
1224 struct io_kiocb *req;
1226 io_for_each_link(req, head) {
1227 if (req->flags & REQ_F_INFLIGHT)
1260 static inline void req_set_fail(struct io_kiocb *req)
1262 req->flags |= REQ_F_FAIL;
1265 static inline void req_fail_link_node(struct io_kiocb *req, int res)
1267 req_set_fail(req);
1268 req->result = res;
1278 static inline bool io_is_timeout_noseq(struct io_kiocb *req)
1280 return !req->timeout.off;
1288 struct io_kiocb *req, *tmp;
1292 llist_for_each_entry_safe(req, tmp, node, io_task_work.fallback_node)
1293 req->io_task_work.func(req, &locked);
1378 static bool req_need_defer(struct io_kiocb *req, u32 seq)
1380 if (unlikely(req->flags & REQ_F_IO_DRAIN)) {
1381 struct io_ring_ctx *ctx = req->ctx;
1398 static inline bool io_req_ffs_set(struct io_kiocb *req)
1400 return IS_ENABLED(CONFIG_64BIT) && (req->flags & REQ_F_FIXED_FILE);
1403 static void io_req_track_inflight(struct io_kiocb *req)
1405 if (!(req->flags & REQ_F_INFLIGHT)) {
1406 req->flags |= REQ_F_INFLIGHT;
1407 atomic_inc(&req->task->io_uring->inflight_tracked);
1411 static struct io_kiocb *__io_prep_linked_timeout(struct io_kiocb *req)
1413 if (WARN_ON_ONCE(!req->link))
1416 req->flags &= ~REQ_F_ARM_LTIMEOUT;
1417 req->flags |= REQ_F_LINK_TIMEOUT;
1420 io_req_set_refcount(req);
1421 __io_req_set_refcount(req->link, 2);
1422 return req->link;
1425 static inline struct io_kiocb *io_prep_linked_timeout(struct io_kiocb *req)
1427 if (likely(!(req->flags & REQ_F_ARM_LTIMEOUT)))
1429 return __io_prep_linked_timeout(req);
1432 static void io_prep_async_work(struct io_kiocb *req)
1434 const struct io_op_def *def = &io_op_defs[req->opcode];
1435 struct io_ring_ctx *ctx = req->ctx;
1437 if (!(req->flags & REQ_F_CREDS)) {
1438 req->flags |= REQ_F_CREDS;
1439 req->creds = get_current_cred();
1442 req->work.list.next = NULL;
1443 req->work.flags = 0;
1444 if (req->flags & REQ_F_FORCE_ASYNC)
1445 req->work.flags |= IO_WQ_WORK_CONCURRENT;
1447 if (req->flags & REQ_F_ISREG) {
1449 io_wq_hash_work(&req->work, file_inode(req->file));
1450 } else if (!req->file || !S_ISBLK(file_inode(req->file)->i_mode)) {
1452 req->work.flags |= IO_WQ_WORK_UNBOUND;
1456 static void io_prep_async_link(struct io_kiocb *req)
1460 if (req->flags & REQ_F_LINK_TIMEOUT) {
1461 struct io_ring_ctx *ctx = req->ctx;
1464 io_for_each_link(cur, req)
1468 io_for_each_link(cur, req)
1473 static void io_queue_async_work(struct io_kiocb *req, bool *locked)
1475 struct io_ring_ctx *ctx = req->ctx;
1476 struct io_kiocb *link = io_prep_linked_timeout(req);
1477 struct io_uring_task *tctx = req->task->io_uring;
1486 io_prep_async_link(req);
1495 if (WARN_ON_ONCE(!same_thread_group(req->task, current)))
1496 req->work.flags |= IO_WQ_WORK_CANCEL;
1498 trace_io_uring_queue_async_work(ctx, io_wq_is_hashed(&req->work), req,
1499 &req->work, req->flags);
1500 io_wq_enqueue(tctx->io_wq, &req->work);
1505 static void io_kill_timeout(struct io_kiocb *req, int status)
1506 __must_hold(&req->ctx->completion_lock)
1507 __must_hold(&req->ctx->timeout_lock)
1509 struct io_timeout_data *io = req->async_data;
1513 req_set_fail(req);
1514 atomic_set(&req->ctx->cq_timeouts,
1515 atomic_read(&req->ctx->cq_timeouts) + 1);
1516 list_del_init(&req->timeout.list);
1517 io_fill_cqe_req(req, status, 0);
1518 io_put_req_deferred(req);
1530 if (req_need_defer(de->req, de->seq))
1533 io_req_task_queue(de->req);
1542 struct io_kiocb *req, *tmp;
1545 list_for_each_entry_safe(req, tmp, &ctx->timeout_list, timeout.list) {
1548 if (io_is_timeout_noseq(req))
1558 events_needed = req->timeout.target_seq - ctx->cq_last_tm_flush;
1563 io_kill_timeout(req, 0);
1830 static noinline void io_fill_cqe_req(struct io_kiocb *req, s32 res, u32 cflags)
1832 __io_fill_cqe(req->ctx, req->user_data, res, cflags);
1842 static void io_req_complete_post(struct io_kiocb *req, s32 res,
1845 struct io_ring_ctx *ctx = req->ctx;
1848 __io_fill_cqe(ctx, req->user_data, res, cflags);
1853 if (req_ref_put_and_test(req)) {
1854 if (req->flags & (REQ_F_LINK | REQ_F_HARDLINK)) {
1855 if (req->flags & IO_DISARM_MASK)
1856 io_disarm_next(req);
1857 if (req->link) {
1858 io_req_task_queue(req->link);
1859 req->link = NULL;
1862 io_dismantle_req(req);
1863 io_put_task(req->task, 1);
1864 list_add(&req->inflight_entry, &ctx->locked_free_list);
1868 req = NULL;
1873 if (req) {
1879 static inline bool io_req_needs_clean(struct io_kiocb *req)
1881 return req->flags & IO_REQ_CLEAN_FLAGS;
1884 static inline void io_req_complete_state(struct io_kiocb *req, s32 res,
1887 if (io_req_needs_clean(req))
1888 io_clean_op(req);
1889 req->result = res;
1890 req->compl.cflags = cflags;
1891 req->flags |= REQ_F_COMPLETE_INLINE;
1894 static inline void __io_req_complete(struct io_kiocb *req, unsigned issue_flags,
1898 io_req_complete_state(req, res, cflags);
1900 io_req_complete_post(req, res, cflags);
1903 static inline void io_req_complete(struct io_kiocb *req, s32 res)
1905 __io_req_complete(req, 0, res, 0);
1908 static void io_req_complete_failed(struct io_kiocb *req, s32 res)
1910 req_set_fail(req);
1911 io_req_complete_post(req, res, 0);
1914 static void io_req_complete_fail_submit(struct io_kiocb *req)
1920 req->flags &= ~REQ_F_HARDLINK;
1921 req->flags |= REQ_F_LINK;
1922 io_req_complete_failed(req, req->result);
1929 static void io_preinit_req(struct io_kiocb *req, struct io_ring_ctx *ctx)
1931 req->ctx = ctx;
1932 req->link = NULL;
1933 req->async_data = NULL;
1935 req->result = 0;
1963 struct io_kiocb *req = list_first_entry(&state->free_list,
1966 list_del(&req->inflight_entry);
1967 state->reqs[nr++] = req;
2022 static void io_dismantle_req(struct io_kiocb *req)
2024 unsigned int flags = req->flags;
2026 if (io_req_needs_clean(req))
2027 io_clean_op(req);
2029 io_put_file(req->file);
2030 if (req->fixed_rsrc_refs)
2031 percpu_ref_put(req->fixed_rsrc_refs);
2032 if (req->async_data) {
2033 kfree(req->async_data);
2034 req->async_data = NULL;
2038 static void __io_free_req(struct io_kiocb *req)
2040 struct io_ring_ctx *ctx = req->ctx;
2042 io_dismantle_req(req);
2043 io_put_task(req->task, 1);
2046 list_add(&req->inflight_entry, &ctx->locked_free_list);
2053 static inline void io_remove_next_linked(struct io_kiocb *req)
2055 struct io_kiocb *nxt = req->link;
2057 req->link = nxt->link;
2061 static bool io_kill_linked_timeout(struct io_kiocb *req)
2062 __must_hold(&req->ctx->completion_lock)
2063 __must_hold(&req->ctx->timeout_lock)
2065 struct io_kiocb *link = req->link;
2070 io_remove_next_linked(req);
2082 static void io_fail_links(struct io_kiocb *req)
2083 __must_hold(&req->ctx->completion_lock)
2085 struct io_kiocb *nxt, *link = req->link;
2087 req->link = NULL;
2097 trace_io_uring_fail_link(req, link);
2104 static bool io_disarm_next(struct io_kiocb *req)
2105 __must_hold(&req->ctx->completion_lock)
2109 if (req->flags & REQ_F_ARM_LTIMEOUT) {
2110 struct io_kiocb *link = req->link;
2112 req->flags &= ~REQ_F_ARM_LTIMEOUT;
2114 io_remove_next_linked(req);
2119 } else if (req->flags & REQ_F_LINK_TIMEOUT) {
2120 struct io_ring_ctx *ctx = req->ctx;
2123 posted = io_kill_linked_timeout(req);
2126 if (unlikely((req->flags & REQ_F_FAIL) &&
2127 !(req->flags & REQ_F_HARDLINK))) {
2128 posted |= (req->link != NULL);
2129 io_fail_links(req);
2134 static struct io_kiocb *__io_req_find_next(struct io_kiocb *req)
2144 if (req->flags & IO_DISARM_MASK) {
2145 struct io_ring_ctx *ctx = req->ctx;
2149 posted = io_disarm_next(req);
2151 io_commit_cqring(req->ctx);
2156 nxt = req->link;
2157 req->link = NULL;
2161 static inline struct io_kiocb *io_req_find_next(struct io_kiocb *req)
2163 if (likely(!(req->flags & (REQ_F_LINK|REQ_F_HARDLINK))))
2165 return __io_req_find_next(req);
2205 struct io_kiocb *req = container_of(node, struct io_kiocb,
2208 if (req->ctx != ctx) {
2210 ctx = req->ctx;
2215 req->io_task_work.func(req, &locked);
2232 static void io_req_task_work_add(struct io_kiocb *req)
2234 struct task_struct *tsk = req->task;
2244 wq_list_add_tail(&req->io_task_work.node, &tctx->task_list);
2260 notify = (req->ctx->flags & IORING_SETUP_SQPOLL) ? TWA_NONE : TWA_SIGNAL;
2273 req = container_of(node, struct io_kiocb, io_task_work.node);
2275 if (llist_add(&req->io_task_work.fallback_node,
2276 &req->ctx->fallback_llist))
2277 schedule_delayed_work(&req->ctx->fallback_work, 1);
2281 static void io_req_task_cancel(struct io_kiocb *req, bool *locked)
2283 struct io_ring_ctx *ctx = req->ctx;
2287 io_req_complete_failed(req, req->result);
2290 static void io_req_task_submit(struct io_kiocb *req, bool *locked)
2292 struct io_ring_ctx *ctx = req->ctx;
2295 /* req->task == current here, checking PF_EXITING is safe */
2296 if (likely(!(req->task->flags & PF_EXITING)))
2297 __io_queue_sqe(req);
2299 io_req_complete_failed(req, -EFAULT);
2302 static void io_req_task_queue_fail(struct io_kiocb *req, int ret)
2304 req->result = ret;
2305 req->io_task_work.func = io_req_task_cancel;
2306 io_req_task_work_add(req);
2309 static void io_req_task_queue(struct io_kiocb *req)
2311 req->io_task_work.func = io_req_task_submit;
2312 io_req_task_work_add(req);
2315 static void io_req_task_queue_reissue(struct io_kiocb *req)
2317 req->io_task_work.func = io_queue_async_work;
2318 io_req_task_work_add(req);
2321 static inline void io_queue_next(struct io_kiocb *req)
2323 struct io_kiocb *nxt = io_req_find_next(req);
2329 static void io_free_req(struct io_kiocb *req)
2331 io_queue_next(req);
2332 __io_free_req(req);
2335 static void io_free_req_work(struct io_kiocb *req, bool *locked)
2337 io_free_req(req);
2362 static void io_req_free_batch(struct req_batch *rb, struct io_kiocb *req,
2365 io_queue_next(req);
2366 io_dismantle_req(req);
2368 if (req->task != rb->task) {
2371 rb->task = req->task;
2378 state->reqs[state->free_reqs++] = req;
2380 list_add(&req->inflight_entry, &state->free_list);
2392 struct io_kiocb *req = state->compl_reqs[i];
2394 __io_fill_cqe(ctx, req->user_data, req->result,
2395 req->compl.cflags);
2403 struct io_kiocb *req = state->compl_reqs[i];
2405 if (req_ref_put_and_test(req))
2406 io_req_free_batch(&rb, req, &ctx->submit_state);
2417 static inline struct io_kiocb *io_put_req_find_next(struct io_kiocb *req)
2421 if (req_ref_put_and_test(req)) {
2422 nxt = io_req_find_next(req);
2423 __io_free_req(req);
2428 static inline void io_put_req(struct io_kiocb *req)
2430 if (req_ref_put_and_test(req))
2431 io_free_req(req);
2434 static inline void io_put_req_deferred(struct io_kiocb *req)
2436 if (req_ref_put_and_test(req)) {
2437 req->io_task_work.func = io_free_req_work;
2438 io_req_task_work_add(req);
2457 static unsigned int io_put_kbuf(struct io_kiocb *req, struct io_buffer *kbuf)
2463 req->flags &= ~REQ_F_BUFFER_SELECTED;
2468 static inline unsigned int io_put_rw_kbuf(struct io_kiocb *req)
2472 if (likely(!(req->flags & REQ_F_BUFFER_SELECTED)))
2474 kbuf = (struct io_buffer *) (unsigned long) req->rw.addr;
2475 return io_put_kbuf(req, kbuf);
2505 struct io_kiocb *req;
2515 req = list_first_entry(done, struct io_kiocb, inflight_entry);
2516 list_del(&req->inflight_entry);
2517 cflags = io_put_rw_kbuf(req);
2522 WRITE_ONCE(cqe->user_data, req->user_data);
2523 WRITE_ONCE(cqe->res, req->result);
2527 io_cqring_event_overflow(ctx, req->user_data,
2528 req->result, cflags);
2532 if (req_ref_put_and_test(req))
2533 io_req_free_batch(&rb, req, &ctx->submit_state);
2549 struct io_kiocb *req, *tmp;
2559 list_for_each_entry_safe(req, tmp, &ctx->iopoll_list, inflight_entry) {
2560 struct kiocb *kiocb = &req->rw.kiocb;
2568 if (READ_ONCE(req->iopoll_completed)) {
2569 list_move_tail(&req->inflight_entry, &done);
2581 /* iopoll may have completed current req */
2582 if (READ_ONCE(req->iopoll_completed))
2583 list_move_tail(&req->inflight_entry, &done);
2679 static void kiocb_end_write(struct io_kiocb *req)
2685 if (req->flags & REQ_F_ISREG) {
2686 struct super_block *sb = file_inode(req->file)->i_sb;
2694 static bool io_resubmit_prep(struct io_kiocb *req)
2696 struct io_async_rw *rw = req->async_data;
2699 return !io_req_prep_async(req);
2704 static bool io_rw_should_reissue(struct io_kiocb *req)
2706 umode_t mode = file_inode(req->file)->i_mode;
2707 struct io_ring_ctx *ctx = req->ctx;
2711 if ((req->flags & REQ_F_NOWAIT) || (io_wq_current_is_worker() &&
2725 if (!same_thread_group(req->task, current) || !in_task())
2730 static bool io_resubmit_prep(struct io_kiocb *req)
2734 static bool io_rw_should_reissue(struct io_kiocb *req)
2744 static void io_req_io_end(struct io_kiocb *req)
2746 struct io_rw *rw = &req->rw;
2749 kiocb_end_write(req);
2750 fsnotify_modify(req->file);
2752 fsnotify_access(req->file);
2756 static bool __io_complete_rw_common(struct io_kiocb *req, long res)
2758 if (res != req->result) {
2760 io_rw_should_reissue(req)) {
2765 io_req_io_end(req);
2766 req->flags |= REQ_F_REISSUE;
2769 req_set_fail(req);
2770 req->result = res;
2775 static inline int io_fixup_rw_res(struct io_kiocb *req, long res)
2777 struct io_async_rw *io = req->async_data;
2789 static void io_req_task_complete(struct io_kiocb *req, bool *locked)
2791 unsigned int cflags = io_put_rw_kbuf(req);
2792 int res = req->result;
2795 struct io_ring_ctx *ctx = req->ctx;
2798 io_req_complete_state(req, res, cflags);
2799 state->compl_reqs[state->compl_nr++] = req;
2803 io_req_complete_post(req, res, cflags);
2807 static void io_req_rw_complete(struct io_kiocb *req, bool *locked)
2809 io_req_io_end(req);
2810 io_req_task_complete(req, locked);
2815 struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
2817 if (__io_complete_rw_common(req, res))
2819 req->result = io_fixup_rw_res(req, res);
2820 req->io_task_work.func = io_req_rw_complete;
2821 io_req_task_work_add(req);
2826 struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
2829 kiocb_end_write(req);
2830 if (unlikely(res != req->result)) {
2831 if (res == -EAGAIN && io_rw_should_reissue(req)) {
2832 req->flags |= REQ_F_REISSUE;
2837 WRITE_ONCE(req->result, res);
2840 WRITE_ONCE(req->iopoll_completed, 1);
2849 static void io_iopoll_req_issued(struct io_kiocb *req)
2851 struct io_ring_ctx *ctx = req->ctx;
2872 if (list_req->file != req->file) {
2876 queue_num1 = blk_qc_t_to_queue_num(req->rw.kiocb.ki_cookie);
2886 if (READ_ONCE(req->iopoll_completed))
2887 list_add(&req->inflight_entry, &ctx->iopoll_list);
2889 list_add_tail(&req->inflight_entry, &ctx->iopoll_list);
2949 static bool io_file_supports_nowait(struct io_kiocb *req, int rw)
2951 if (rw == READ && (req->flags & REQ_F_NOWAIT_READ))
2953 else if (rw == WRITE && (req->flags & REQ_F_NOWAIT_WRITE))
2956 return __io_file_supports_nowait(req->file, rw);
2959 static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe,
2962 struct io_ring_ctx *ctx = req->ctx;
2963 struct kiocb *kiocb = &req->rw.kiocb;
2964 struct file *file = req->file;
2968 if (!io_req_ffs_set(req) && S_ISREG(file_inode(file)->i_mode))
2969 req->flags |= REQ_F_ISREG;
2984 ((file->f_flags & O_NONBLOCK) && !io_file_supports_nowait(req, rw)))
2985 req->flags |= REQ_F_NOWAIT;
3004 req->iopoll_completed = 0;
3012 req->buf_index = READ_ONCE(sqe->buf_index);
3013 req->imu = NULL;
3015 if (req->opcode == IORING_OP_READ_FIXED ||
3016 req->opcode == IORING_OP_WRITE_FIXED) {
3017 struct io_ring_ctx *ctx = req->ctx;
3020 if (unlikely(req->buf_index >= ctx->nr_user_bufs))
3022 index = array_index_nospec(req->buf_index, ctx->nr_user_bufs);
3023 req->imu = ctx->user_bufs[index];
3024 io_req_set_rsrc_node(req);
3027 req->rw.addr = READ_ONCE(sqe->addr);
3028 req->rw.len = READ_ONCE(sqe->len);
3053 static inline loff_t *io_kiocb_update_pos(struct io_kiocb *req)
3055 struct kiocb *kiocb = &req->rw.kiocb;
3060 if (!(req->file->f_mode & FMODE_STREAM)) {
3061 req->flags |= REQ_F_CUR_POS;
3062 kiocb->ki_pos = req->file->f_pos;
3073 struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
3075 if (req->flags & REQ_F_CUR_POS)
3076 req->file->f_pos = kiocb->ki_pos;
3078 if (!__io_complete_rw_common(req, ret)) {
3083 io_req_io_end(req);
3084 __io_req_complete(req, issue_flags,
3085 io_fixup_rw_res(req, ret),
3086 io_put_rw_kbuf(req));
3092 if (req->flags & REQ_F_REISSUE) {
3093 req->flags &= ~REQ_F_REISSUE;
3094 if (io_resubmit_prep(req)) {
3095 io_req_task_queue_reissue(req);
3097 unsigned int cflags = io_put_rw_kbuf(req);
3098 struct io_ring_ctx *ctx = req->ctx;
3100 ret = io_fixup_rw_res(req, ret);
3101 req_set_fail(req);
3104 __io_req_complete(req, issue_flags, ret, cflags);
3107 __io_req_complete(req, issue_flags, ret, cflags);
3113 static int __io_import_fixed(struct io_kiocb *req, int rw, struct iov_iter *iter,
3116 size_t len = req->rw.len;
3117 u64 buf_end, buf_addr = req->rw.addr;
3171 static int io_import_fixed(struct io_kiocb *req, int rw, struct iov_iter *iter)
3173 if (WARN_ON_ONCE(!req->imu))
3175 return __io_import_fixed(req, rw, iter, req->imu);
3196 static struct io_buffer *io_buffer_select(struct io_kiocb *req, size_t *len,
3202 if (req->flags & REQ_F_BUFFER_SELECTED)
3205 io_ring_submit_lock(req->ctx, needs_lock);
3207 lockdep_assert_held(&req->ctx->uring_lock);
3209 head = xa_load(&req->ctx->io_buffers, bgid);
3217 xa_erase(&req->ctx->io_buffers, bgid);
3225 io_ring_submit_unlock(req->ctx, needs_lock);
3230 static void __user *io_rw_buffer_select(struct io_kiocb *req, size_t *len,
3236 kbuf = (struct io_buffer *) (unsigned long) req->rw.addr;
3237 bgid = req->buf_index;
3238 kbuf = io_buffer_select(req, len, bgid, kbuf, needs_lock);
3241 req->rw.addr = (u64) (unsigned long) kbuf;
3242 req->flags |= REQ_F_BUFFER_SELECTED;
3247 static ssize_t io_compat_import(struct io_kiocb *req, struct iovec *iov,
3255 uiov = u64_to_user_ptr(req->rw.addr);
3264 buf = io_rw_buffer_select(req, &len, needs_lock);
3273 static ssize_t __io_iov_buffer_select(struct io_kiocb *req, struct iovec *iov,
3276 struct iovec __user *uiov = u64_to_user_ptr(req->rw.addr);
3286 buf = io_rw_buffer_select(req, &len, needs_lock);
3294 static ssize_t io_iov_buffer_select(struct io_kiocb *req, struct iovec *iov,
3297 if (req->flags & REQ_F_BUFFER_SELECTED) {
3300 kbuf = (struct io_buffer *) (unsigned long) req->rw.addr;
3305 if (req->rw.len != 1)
3309 if (req->ctx->compat)
3310 return io_compat_import(req, iov, needs_lock);
3313 return __io_iov_buffer_select(req, iov, needs_lock);
3316 static int io_import_iovec(int rw, struct io_kiocb *req, struct iovec **iovec,
3319 void __user *buf = u64_to_user_ptr(req->rw.addr);
3320 size_t sqe_len = req->rw.len;
3321 u8 opcode = req->opcode;
3326 return io_import_fixed(req, rw, iter);
3330 if (req->buf_index && !(req->flags & REQ_F_BUFFER_SELECT))
3334 if (req->flags & REQ_F_BUFFER_SELECT) {
3335 buf = io_rw_buffer_select(req, &sqe_len, needs_lock);
3338 req->rw.len = sqe_len;
3346 if (req->flags & REQ_F_BUFFER_SELECT) {
3347 ret = io_iov_buffer_select(req, *iovec, needs_lock);
3355 req->ctx->compat);
3367 static ssize_t loop_rw_iter(int rw, struct io_kiocb *req, struct iov_iter *iter)
3369 struct kiocb *kiocb = &req->rw.kiocb;
3370 struct file *file = req->file;
3393 iovec.iov_base = u64_to_user_ptr(req->rw.addr);
3394 iovec.iov_len = req->rw.len;
3414 req->rw.addr += nr;
3415 req->rw.len -= nr;
3416 if (!req->rw.len)
3426 static void io_req_map_rw(struct io_kiocb *req, const struct iovec *iovec,
3429 struct io_async_rw *rw = req->async_data;
3449 req->flags |= REQ_F_NEED_CLEANUP;
3453 static inline int io_alloc_async_data(struct io_kiocb *req)
3455 WARN_ON_ONCE(!io_op_defs[req->opcode].async_size);
3456 req->async_data = kmalloc(io_op_defs[req->opcode].async_size, GFP_KERNEL);
3457 return req->async_data == NULL;
3460 static int io_setup_async_rw(struct io_kiocb *req, const struct iovec *iovec,
3464 if (!force && !io_op_defs[req->opcode].needs_async_setup)
3466 if (!req->async_data) {
3469 if (io_alloc_async_data(req)) {
3474 io_req_map_rw(req, iovec, fast_iov, iter);
3475 iorw = req->async_data;
3482 static inline int io_rw_prep_async(struct io_kiocb *req, int rw)
3484 struct io_async_rw *iorw = req->async_data;
3491 ret = io_import_iovec(rw, req, &iov, &iorw->iter, false);
3497 req->flags |= REQ_F_NEED_CLEANUP;
3503 static int io_read_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
3505 if (unlikely(!(req->file->f_mode & FMODE_READ)))
3507 return io_prep_rw(req, sqe, READ);
3524 struct io_kiocb *req = wait->private;
3532 req->rw.kiocb.ki_flags &= ~IOCB_WAITQ;
3534 io_req_task_queue(req);
3550 static bool io_rw_should_retry(struct io_kiocb *req)
3552 struct io_async_rw *rw = req->async_data;
3554 struct kiocb *kiocb = &req->rw.kiocb;
3557 if (req->flags & REQ_F_NOWAIT)
3568 if (file_can_poll(req->file) || !(req->file->f_mode & FMODE_BUF_RASYNC))
3572 wait->wait.private = req;
3581 static inline int io_iter_do_read(struct io_kiocb *req, struct iov_iter *iter)
3583 if (req->file->f_op->read_iter)
3584 return call_read_iter(req->file, &req->rw.kiocb, iter);
3585 else if (req->file->f_op->read)
3586 return loop_rw_iter(READ, req, iter);
3591 static bool need_read_all(struct io_kiocb *req)
3593 return req->flags & REQ_F_ISREG ||
3594 S_ISBLK(file_inode(req->file)->i_mode);
3597 static int io_read(struct io_kiocb *req, unsigned int issue_flags)
3600 struct kiocb *kiocb = &req->rw.kiocb;
3602 struct io_async_rw *rw = req->async_data;
3619 ret = io_import_iovec(READ, req, &iovec, iter, !force_nonblock);
3625 req->result = iov_iter_count(iter);
3634 if (force_nonblock && !io_file_supports_nowait(req, READ)) {
3635 ret = io_setup_async_rw(req, iovec, inline_vecs, iter, true);
3639 ppos = io_kiocb_update_pos(req);
3641 ret = rw_verify_area(READ, req->file, ppos, req->result);
3647 ret = io_iter_do_read(req, iter);
3649 if (ret == -EAGAIN || (req->flags & REQ_F_REISSUE)) {
3650 req->flags &= ~REQ_F_REISSUE;
3652 if (!force_nonblock && !(req->ctx->flags & IORING_SETUP_IOPOLL))
3655 if (req->flags & REQ_F_NOWAIT)
3660 } else if (ret <= 0 || ret == req->result || !force_nonblock ||
3661 (req->flags & REQ_F_NOWAIT) || !need_read_all(req)) {
3673 ret2 = io_setup_async_rw(req, iovec, inline_vecs, iter, true);
3678 rw = req->async_data;
3701 if (!io_rw_should_retry(req)) {
3706 req->result = iov_iter_count(iter);
3713 ret = io_iter_do_read(req, iter);
3729 static int io_write_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
3731 if (unlikely(!(req->file->f_mode & FMODE_WRITE)))
3733 return io_prep_rw(req, sqe, WRITE);
3736 static int io_write(struct io_kiocb *req, unsigned int issue_flags)
3739 struct kiocb *kiocb = &req->rw.kiocb;
3741 struct io_async_rw *rw = req->async_data;
3753 ret = io_import_iovec(WRITE, req, &iovec, iter, !force_nonblock);
3759 req->result = iov_iter_count(iter);
3768 if (force_nonblock && !io_file_supports_nowait(req, WRITE))
3773 (req->flags & REQ_F_ISREG))
3776 ppos = io_kiocb_update_pos(req);
3778 ret = rw_verify_area(WRITE, req->file, ppos, req->result);
3789 if (req->flags & REQ_F_ISREG) {
3790 sb_start_write(file_inode(req->file)->i_sb);
3791 __sb_writers_release(file_inode(req->file)->i_sb,
3796 if (req->file->f_op->write_iter)
3797 ret2 = call_write_iter(req->file, kiocb, iter);
3798 else if (req->file->f_op->write)
3799 ret2 = loop_rw_iter(WRITE, req, iter);
3803 if (req->flags & REQ_F_REISSUE) {
3804 req->flags &= ~REQ_F_REISSUE;
3815 if (ret2 == -EAGAIN && (req->flags & REQ_F_NOWAIT))
3819 if ((req->ctx->flags & IORING_SETUP_IOPOLL) && ret2 == -EAGAIN)
3826 ret = io_setup_async_rw(req, iovec, inline_vecs, iter, false);
3829 kiocb_end_write(req);
3841 static int io_renameat_prep(struct io_kiocb *req,
3844 struct io_rename *ren = &req->rename;
3847 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
3851 if (unlikely(req->flags & REQ_F_FIXED_FILE))
3870 req->flags |= REQ_F_NEED_CLEANUP;
3874 static int io_renameat(struct io_kiocb *req, unsigned int issue_flags)
3876 struct io_rename *ren = &req->rename;
3885 req->flags &= ~REQ_F_NEED_CLEANUP;
3887 req_set_fail(req);
3888 io_req_complete(req, ret);
3892 static int io_unlinkat_prep(struct io_kiocb *req,
3895 struct io_unlink *un = &req->unlink;
3898 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
3903 if (unlikely(req->flags & REQ_F_FIXED_FILE))
3917 req->flags |= REQ_F_NEED_CLEANUP;
3921 static int io_unlinkat(struct io_kiocb *req, unsigned int issue_flags)
3923 struct io_unlink *un = &req->unlink;
3934 req->flags &= ~REQ_F_NEED_CLEANUP;
3936 req_set_fail(req);
3937 io_req_complete(req, ret);
3941 static int io_shutdown_prep(struct io_kiocb *req,
3945 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
3951 req->shutdown.how = READ_ONCE(sqe->len);
3958 static int io_shutdown(struct io_kiocb *req, unsigned int issue_flags)
3967 sock = sock_from_file(req->file, &ret);
3971 ret = __sys_shutdown_sock(sock, req->shutdown.how);
3973 req_set_fail(req);
3974 io_req_complete(req, ret);
3981 static int __io_splice_prep(struct io_kiocb *req,
3984 struct io_splice *sp = &req->splice;
3987 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
3998 static int io_tee_prep(struct io_kiocb *req,
4003 return __io_splice_prep(req, sqe);
4006 static int io_tee(struct io_kiocb *req, unsigned int issue_flags)
4008 struct io_splice *sp = &req->splice;
4017 in = io_file_get(req->ctx, req, sp->splice_fd_in,
4031 req_set_fail(req);
4032 io_req_complete(req, ret);
4036 static int io_splice_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4038 struct io_splice *sp = &req->splice;
4042 return __io_splice_prep(req, sqe);
4045 static int io_splice(struct io_kiocb *req, unsigned int issue_flags)
4047 struct io_splice *sp = &req->splice;
4057 in = io_file_get(req->ctx, req, sp->splice_fd_in,
4074 req_set_fail(req);
4075 io_req_complete(req, ret);
4082 static int io_nop(struct io_kiocb *req, unsigned int issue_flags)
4084 struct io_ring_ctx *ctx = req->ctx;
4089 __io_req_complete(req, issue_flags, 0, 0);
4093 static int io_fsync_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4095 struct io_ring_ctx *ctx = req->ctx;
4103 req->sync.flags = READ_ONCE(sqe->fsync_flags);
4104 if (unlikely(req->sync.flags & ~IORING_FSYNC_DATASYNC))
4107 req->sync.off = READ_ONCE(sqe->off);
4108 req->sync.len = READ_ONCE(sqe->len);
4112 static int io_fsync(struct io_kiocb *req, unsigned int issue_flags)
4114 loff_t end = req->sync.off + req->sync.len;
4121 ret = vfs_fsync_range(req->file, req->sync.off,
4123 req->sync.flags & IORING_FSYNC_DATASYNC);
4125 req_set_fail(req);
4126 io_req_complete(req, ret);
4130 static int io_fallocate_prep(struct io_kiocb *req,
4136 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
4139 req->sync.off = READ_ONCE(sqe->off);
4140 req->sync.len = READ_ONCE(sqe->addr);
4141 req->sync.mode = READ_ONCE(sqe->len);
4145 static int io_fallocate(struct io_kiocb *req, unsigned int issue_flags)
4152 ret = vfs_fallocate(req->file, req->sync.mode, req->sync.off,
4153 req->sync.len);
4155 req_set_fail(req);
4157 fsnotify_modify(req->file);
4158 io_req_complete(req, ret);
4162 static int __io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4167 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
4171 if (unlikely(req->flags & REQ_F_FIXED_FILE))
4175 if (!(req->open.how.flags & O_PATH) && force_o_largefile())
4176 req->open.how.flags |= O_LARGEFILE;
4178 req->open.dfd = READ_ONCE(sqe->fd);
4180 req->open.filename = getname(fname);
4181 if (IS_ERR(req->open.filename)) {
4182 ret = PTR_ERR(req->open.filename);
4183 req->open.filename = NULL;
4187 req->open.file_slot = READ_ONCE(sqe->file_index);
4188 if (req->open.file_slot && (req->open.how.flags & O_CLOEXEC))
4191 req->open.nofile = rlimit(RLIMIT_NOFILE);
4192 req->flags |= REQ_F_NEED_CLEANUP;
4196 static int io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4201 req->open.how = build_open_how(flags, mode);
4202 return __io_openat_prep(req, sqe);
4205 static int io_openat2_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4216 ret = copy_struct_from_user(&req->open.how, sizeof(req->open.how), how,
4221 return __io_openat_prep(req, sqe);
4224 static int io_openat2(struct io_kiocb *req, unsigned int issue_flags)
4229 bool fixed = !!req->open.file_slot;
4232 ret = build_open_flags(&req->open.how, &op);
4236 resolve_nonblock = req->open.how.resolve & RESOLVE_CACHED;
4244 if (req->open.how.flags & (O_TRUNC | O_CREAT | __O_TMPFILE))
4251 ret = __get_unused_fd_flags(req->open.how.flags, req->open.nofile);
4256 file = do_filp_open(req->open.dfd, req->open.filename, &op);
4281 ret = io_install_fixed_file(req, file, issue_flags,
4282 req->open.file_slot - 1);
4284 putname(req->open.filename);
4285 req->flags &= ~REQ_F_NEED_CLEANUP;
4287 req_set_fail(req);
4288 __io_req_complete(req, issue_flags, ret, 0);
4292 static int io_openat(struct io_kiocb *req, unsigned int issue_flags)
4294 return io_openat2(req, issue_flags);
4297 static int io_remove_buffers_prep(struct io_kiocb *req,
4300 struct io_provide_buf *p = &req->pbuf;
4344 static int io_remove_buffers(struct io_kiocb *req, unsigned int issue_flags)
4346 struct io_provide_buf *p = &req->pbuf;
4347 struct io_ring_ctx *ctx = req->ctx;
4361 req_set_fail(req);
4364 __io_req_complete(req, issue_flags, ret, 0);
4369 static int io_provide_buffers_prep(struct io_kiocb *req,
4373 struct io_provide_buf *p = &req->pbuf;
4432 static int io_provide_buffers(struct io_kiocb *req, unsigned int issue_flags)
4434 struct io_provide_buf *p = &req->pbuf;
4435 struct io_ring_ctx *ctx = req->ctx;
4454 req_set_fail(req);
4456 __io_req_complete(req, issue_flags, ret, 0);
4461 static int io_epoll_ctl_prep(struct io_kiocb *req,
4467 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
4470 req->epoll.epfd = READ_ONCE(sqe->fd);
4471 req->epoll.op = READ_ONCE(sqe->len);
4472 req->epoll.fd = READ_ONCE(sqe->off);
4474 if (ep_op_has_event(req->epoll.op)) {
4478 if (copy_from_user(&req->epoll.event, ev, sizeof(*ev)))
4488 static int io_epoll_ctl(struct io_kiocb *req, unsigned int issue_flags)
4491 struct io_epoll *ie = &req->epoll;
4500 req_set_fail(req);
4501 __io_req_complete(req, issue_flags, ret, 0);
4508 static int io_madvise_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4513 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
4516 req->madvise.addr = READ_ONCE(sqe->addr);
4517 req->madvise.len = READ_ONCE(sqe->len);
4518 req->madvise.advice = READ_ONCE(sqe->fadvise_advice);
4525 static int io_madvise(struct io_kiocb *req, unsigned int issue_flags)
4528 struct io_madvise *ma = &req->madvise;
4536 req_set_fail(req);
4537 io_req_complete(req, ret);
4544 static int io_fadvise_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4548 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
4551 req->fadvise.offset = READ_ONCE(sqe->off);
4552 req->fadvise.len = READ_ONCE(sqe->len);
4553 req->fadvise.advice = READ_ONCE(sqe->fadvise_advice);
4557 static int io_fadvise(struct io_kiocb *req, unsigned int issue_flags)
4559 struct io_fadvise *fa = &req->fadvise;
4573 ret = vfs_fadvise(req->file, fa->offset, fa->len, fa->advice);
4575 req_set_fail(req);
4576 __io_req_complete(req, issue_flags, ret, 0);
4580 static int io_statx_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4582 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
4586 if (req->flags & REQ_F_FIXED_FILE)
4589 req->statx.dfd = READ_ONCE(sqe->fd);
4590 req->statx.mask = READ_ONCE(sqe->len);
4591 req->statx.filename = u64_to_user_ptr(READ_ONCE(sqe->addr));
4592 req->statx.buffer = u64_to_user_ptr(READ_ONCE(sqe->addr2));
4593 req->statx.flags = READ_ONCE(sqe->statx_flags);
4598 static int io_statx(struct io_kiocb *req, unsigned int issue_flags)
4600 struct io_statx *ctx = &req->statx;
4610 req_set_fail(req);
4611 io_req_complete(req, ret);
4615 static int io_close_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4617 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
4622 if (req->flags & REQ_F_FIXED_FILE)
4625 req->close.fd = READ_ONCE(sqe->fd);
4626 req->close.file_slot = READ_ONCE(sqe->file_index);
4627 if (req->close.file_slot && req->close.fd)
4633 static int io_close(struct io_kiocb *req, unsigned int issue_flags)
4636 struct io_close *close = &req->close;
4641 if (req->close.file_slot) {
4642 ret = io_close_fixed(req, issue_flags);
4677 req_set_fail(req);
4680 __io_req_complete(req, issue_flags, ret, 0);
4684 static int io_sfr_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4686 struct io_ring_ctx *ctx = req->ctx;
4694 req->sync.off = READ_ONCE(sqe->off);
4695 req->sync.len = READ_ONCE(sqe->len);
4696 req->sync.flags = READ_ONCE(sqe->sync_range_flags);
4700 static int io_sync_file_range(struct io_kiocb *req, unsigned int issue_flags)
4708 ret = sync_file_range(req->file, req->sync.off, req->sync.len,
4709 req->sync.flags);
4711 req_set_fail(req);
4712 io_req_complete(req, ret);
4724 static int io_setup_async_msg(struct io_kiocb *req,
4727 struct io_async_msghdr *async_msg = req->async_data;
4731 if (io_alloc_async_data(req)) {
4735 async_msg = req->async_data;
4736 req->flags |= REQ_F_NEED_CLEANUP;
4749 static int io_sendmsg_copy_hdr(struct io_kiocb *req,
4752 struct io_sr_msg *sr = &req->sr_msg;
4757 ret = sendmsg_copy_msghdr(&iomsg->msg, req->sr_msg.umsg,
4758 req->sr_msg.msg_flags, &iomsg->free_iov);
4764 static int io_sendmsg_prep_async(struct io_kiocb *req)
4768 ret = io_sendmsg_copy_hdr(req, req->async_data);
4770 req->flags |= REQ_F_NEED_CLEANUP;
4774 static int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4776 struct io_sr_msg *sr = &req->sr_msg;
4778 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
4789 req->flags |= REQ_F_NOWAIT;
4792 if (req->ctx->compat)
4799 static int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags)
4802 struct io_sr_msg *sr = &req->sr_msg;
4808 sock = sock_from_file(req->file, &ret);
4812 kmsg = req->async_data;
4814 ret = io_sendmsg_copy_hdr(req, &iomsg);
4822 flags = req->sr_msg.msg_flags;
4832 return io_setup_async_msg(req, kmsg);
4837 req->flags |= REQ_F_PARTIAL_IO;
4838 return io_setup_async_msg(req, kmsg);
4840 req_set_fail(req);
4845 req->flags &= ~REQ_F_NEED_CLEANUP;
4850 __io_req_complete(req, issue_flags, ret, 0);
4854 static int io_send(struct io_kiocb *req, unsigned int issue_flags)
4856 struct io_sr_msg *sr = &req->sr_msg;
4864 sock = sock_from_file(req->file, &ret);
4877 flags = req->sr_msg.msg_flags;
4894 req->flags |= REQ_F_PARTIAL_IO;
4897 req_set_fail(req);
4903 __io_req_complete(req, issue_flags, ret, 0);
4907 static int __io_recvmsg_copy_hdr(struct io_kiocb *req,
4910 struct io_sr_msg *sr = &req->sr_msg;
4920 if (req->flags & REQ_F_BUFFER_SELECT) {
4940 static int __io_compat_recvmsg_copy_hdr(struct io_kiocb *req,
4943 struct io_sr_msg *sr = &req->sr_msg;
4955 if (req->flags & REQ_F_BUFFER_SELECT) {
4981 static int io_recvmsg_copy_hdr(struct io_kiocb *req,
4987 if (req->ctx->compat)
4988 return __io_compat_recvmsg_copy_hdr(req, iomsg);
4991 return __io_recvmsg_copy_hdr(req, iomsg);
4994 static struct io_buffer *io_recv_buffer_select(struct io_kiocb *req,
4997 struct io_sr_msg *sr = &req->sr_msg;
5000 kbuf = io_buffer_select(req, &sr->len, sr->bgid, sr->kbuf, needs_lock);
5005 req->flags |= REQ_F_BUFFER_SELECTED;
5009 static inline unsigned int io_put_recv_kbuf(struct io_kiocb *req)
5011 return io_put_kbuf(req, req->sr_msg.kbuf);
5014 static int io_recvmsg_prep_async(struct io_kiocb *req)
5018 ret = io_recvmsg_copy_hdr(req, req->async_data);
5020 req->flags |= REQ_F_NEED_CLEANUP;
5024 static int io_recvmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
5026 struct io_sr_msg *sr = &req->sr_msg;
5028 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
5040 req->flags |= REQ_F_NOWAIT;
5043 if (req->ctx->compat)
5050 static int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags)
5053 struct io_sr_msg *sr = &req->sr_msg;
5061 sock = sock_from_file(req->file, &ret);
5065 kmsg = req->async_data;
5067 ret = io_recvmsg_copy_hdr(req, &iomsg);
5073 if (req->flags & REQ_F_BUFFER_SELECT) {
5074 kbuf = io_recv_buffer_select(req, !force_nonblock);
5078 kmsg->fast_iov[0].iov_len = req->sr_msg.len;
5080 1, req->sr_msg.len);
5083 flags = req->sr_msg.msg_flags;
5089 ret = __sys_recvmsg_sock(sock, &kmsg->msg, req->sr_msg.umsg,
5093 return io_setup_async_msg(req, kmsg);
5100 req->flags |= REQ_F_PARTIAL_IO;
5101 return io_setup_async_msg(req, kmsg);
5103 req_set_fail(req);
5105 req_set_fail(req);
5108 if (req->flags & REQ_F_BUFFER_SELECTED)
5109 cflags = io_put_recv_kbuf(req);
5113 req->flags &= ~REQ_F_NEED_CLEANUP;
5118 __io_req_complete(req, issue_flags, ret, cflags);
5122 static int io_recv(struct io_kiocb *req, unsigned int issue_flags)
5125 struct io_sr_msg *sr = &req->sr_msg;
5135 sock = sock_from_file(req->file, &ret);
5139 if (req->flags & REQ_F_BUFFER_SELECT) {
5140 kbuf = io_recv_buffer_select(req, !force_nonblock);
5157 flags = req->sr_msg.msg_flags;
5173 req->flags |= REQ_F_PARTIAL_IO;
5176 req_set_fail(req);
5179 req_set_fail(req);
5181 if (req->flags & REQ_F_BUFFER_SELECTED)
5182 cflags = io_put_recv_kbuf(req);
5187 __io_req_complete(req, issue_flags, ret, cflags);
5191 static int io_accept_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
5193 struct io_accept *accept = &req->accept;
5195 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
5215 static int io_accept(struct io_kiocb *req, unsigned int issue_flags)
5217 struct io_accept *accept = &req->accept;
5229 file = do_accept(req->file, file_flags, accept->addr, accept->addr_len,
5237 req->flags |= REQ_F_PARTIAL_IO;
5242 req_set_fail(req);
5247 ret = io_install_fixed_file(req, file, issue_flags,
5250 __io_req_complete(req, issue_flags, ret, 0);
5254 static int io_connect_prep_async(struct io_kiocb *req)
5256 struct io_async_connect *io = req->async_data;
5257 struct io_connect *conn = &req->connect;
5262 static int io_connect_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
5264 struct io_connect *conn = &req->connect;
5266 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
5277 static int io_connect(struct io_kiocb *req, unsigned int issue_flags)
5284 if (req->async_data) {
5285 io = req->async_data;
5287 ret = move_addr_to_kernel(req->connect.addr,
5288 req->connect.addr_len,
5297 ret = __sys_connect_file(req->file, &io->address,
5298 req->connect.addr_len, file_flags);
5300 if (req->async_data)
5302 if (io_alloc_async_data(req)) {
5306 memcpy(req->async_data, &__io, sizeof(__io));
5313 req_set_fail(req);
5314 __io_req_complete(req, issue_flags, ret, 0);
5319 static int io_##op(struct io_kiocb *req, unsigned int issue_flags) \
5326 static int io_##op##_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) \
5333 static int io_##op##_prep_async(struct io_kiocb *req) \
5348 struct io_kiocb *req;
5363 static bool io_poll_get_ownership_slowpath(struct io_kiocb *req)
5372 v = atomic_fetch_or(IO_POLL_RETRY_FLAG, &req->poll_refs);
5375 return !(atomic_fetch_inc(&req->poll_refs) & IO_POLL_REF_MASK);
5384 static inline bool io_poll_get_ownership(struct io_kiocb *req)
5386 if (unlikely(atomic_read(&req->poll_refs) >= IO_POLL_REF_BIAS))
5387 return io_poll_get_ownership_slowpath(req);
5388 return !(atomic_fetch_inc(&req->poll_refs) & IO_POLL_REF_MASK);
5391 static void io_poll_mark_cancelled(struct io_kiocb *req)
5393 atomic_or(IO_POLL_CANCEL_FLAG, &req->poll_refs);
5396 static struct io_poll_iocb *io_poll_get_double(struct io_kiocb *req)
5399 if (req->opcode == IORING_OP_POLL_ADD)
5400 return req->async_data;
5401 return req->apoll->double_poll;
5404 static struct io_poll_iocb *io_poll_get_single(struct io_kiocb *req)
5406 if (req->opcode == IORING_OP_POLL_ADD)
5407 return &req->poll;
5408 return &req->apoll->poll;
5411 static void io_poll_req_insert(struct io_kiocb *req)
5413 struct io_ring_ctx *ctx = req->ctx;
5416 list = &ctx->cancel_hash[hash_long(req->user_data, ctx->cancel_hash_bits)];
5417 hlist_add_head(&req->hash_node, list);
5443 static void io_poll_remove_entries(struct io_kiocb *req)
5445 struct io_poll_iocb *poll = io_poll_get_single(req);
5446 struct io_poll_iocb *poll_double = io_poll_get_double(req);
5476 * the request, then the mask is stored in req->result.
5478 static int io_poll_check_events(struct io_kiocb *req)
5480 struct io_ring_ctx *ctx = req->ctx;
5481 struct io_poll_iocb *poll = io_poll_get_single(req);
5484 /* req->task == current here, checking PF_EXITING is safe */
5485 if (unlikely(req->task->flags & PF_EXITING))
5486 io_poll_mark_cancelled(req);
5489 v = atomic_read(&req->poll_refs);
5502 req->result = 0;
5504 req->result = 0;
5510 atomic_andnot(IO_POLL_RETRY_FLAG, &req->poll_refs);
5514 if (!req->result) {
5517 req->result = vfs_poll(req->file, &pt) & poll->events;
5521 if (req->result && !(poll->events & EPOLLONESHOT)) {
5522 __poll_t mask = mangle_poll(req->result & poll->events);
5526 filled = io_fill_cqe_aux(ctx, req->user_data, mask,
5533 } else if (req->result) {
5538 req->result = 0;
5544 } while (atomic_sub_return(v & IO_POLL_REF_MASK, &req->poll_refs) &
5550 static void io_poll_task_func(struct io_kiocb *req, bool *locked)
5552 struct io_ring_ctx *ctx = req->ctx;
5555 ret = io_poll_check_events(req);
5560 req->result = mangle_poll(req->result & req->poll.events);
5562 req->result = ret;
5563 req_set_fail(req);
5566 io_poll_remove_entries(req);
5568 hash_del(&req->hash_node);
5570 io_req_complete_post(req, req->result, 0);
5573 static void io_apoll_task_func(struct io_kiocb *req, bool *locked)
5575 struct io_ring_ctx *ctx = req->ctx;
5578 ret = io_poll_check_events(req);
5582 io_tw_lock(req->ctx, locked);
5583 io_poll_remove_entries(req);
5585 hash_del(&req->hash_node);
5589 io_req_task_submit(req, locked);
5591 io_req_complete_failed(req, ret);
5594 static void __io_poll_execute(struct io_kiocb *req, int mask)
5596 req->result = mask;
5597 if (req->opcode == IORING_OP_POLL_ADD)
5598 req->io_task_work.func = io_poll_task_func;
5600 req->io_task_work.func = io_apoll_task_func;
5602 trace_io_uring_task_add(req->ctx, req->opcode, req->user_data, mask);
5603 io_req_task_work_add(req);
5606 static inline void io_poll_execute(struct io_kiocb *req, int res)
5608 if (io_poll_get_ownership(req))
5609 __io_poll_execute(req, res);
5612 static void io_poll_cancel_req(struct io_kiocb *req)
5614 io_poll_mark_cancelled(req);
5616 io_poll_execute(req, 0);
5622 struct io_kiocb *req = wait->private;
5628 io_poll_mark_cancelled(req);
5630 io_poll_execute(req, 0);
5643 * as req->head is NULL'ed out, the request can be
5655 if (io_poll_get_ownership(req)) {
5664 __io_poll_execute(req, mask);
5673 struct io_kiocb *req = pt->req;
5705 poll->wait.private = req;
5718 __io_queue_proc(&pt->req->poll, pt, head,
5719 (struct io_poll_iocb **) &pt->req->async_data);
5722 static int __io_arm_poll_handler(struct io_kiocb *req,
5726 struct io_ring_ctx *ctx = req->ctx;
5728 INIT_HLIST_NODE(&req->hash_node);
5730 poll->file = req->file;
5731 poll->wait.private = req;
5734 ipt->req = req;
5742 atomic_set(&req->poll_refs, 1);
5743 mask = vfs_poll(req->file, &ipt->pt) & poll->events;
5746 io_poll_remove_entries(req);
5747 /* no one else has access to the req, forget about the ref */
5751 io_poll_remove_entries(req);
5758 io_poll_req_insert(req);
5767 __io_poll_execute(req, mask);
5775 if (atomic_cmpxchg(&req->poll_refs, 1, 0) != 1)
5776 __io_poll_execute(req, 0);
5784 struct async_poll *apoll = pt->req->apoll;
5803 static int io_arm_poll_handler(struct io_kiocb *req)
5805 const struct io_op_def *def = &io_op_defs[req->opcode];
5806 struct io_ring_ctx *ctx = req->ctx;
5812 if (!req->file || !file_can_poll(req->file))
5821 if ((req->opcode == IORING_OP_RECVMSG) &&
5822 (req->sr_msg.msg_flags & MSG_ERRQUEUE))
5828 if (req->flags & REQ_F_POLLED) {
5829 apoll = req->apoll;
5842 req->apoll = apoll;
5843 req->flags |= REQ_F_POLLED;
5846 ret = __io_arm_poll_handler(req, &apoll->poll, &ipt, mask);
5850 trace_io_uring_poll_arm(ctx, req, req->opcode, req->user_data,
5862 struct io_kiocb *req;
5871 hlist_for_each_entry_safe(req, tmp, list, hash_node) {
5872 if (io_match_task_safe(req, tsk, cancel_all)) {
5873 hlist_del_init(&req->hash_node);
5874 io_poll_cancel_req(req);
5888 struct io_kiocb *req;
5891 hlist_for_each_entry(req, list, hash_node) {
5892 if (sqe_addr != req->user_data)
5894 if (poll_only && req->opcode != IORING_OP_POLL_ADD)
5896 return req;
5901 static bool io_poll_disarm(struct io_kiocb *req)
5904 if (!io_poll_get_ownership(req))
5906 io_poll_remove_entries(req);
5907 hash_del(&req->hash_node);
5915 struct io_kiocb *req = io_poll_find(ctx, sqe_addr, poll_only);
5917 if (!req)
5919 io_poll_cancel_req(req);
5937 static int io_poll_update_prep(struct io_kiocb *req,
5940 struct io_poll_update *upd = &req->poll_update;
5943 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
5970 static int io_poll_add_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
5972 struct io_poll_iocb *poll = &req->poll;
5975 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
5983 io_req_set_refcount(req);
5988 static int io_poll_add(struct io_kiocb *req, unsigned int issue_flags)
5990 struct io_poll_iocb *poll = &req->poll;
5996 ret = __io_arm_poll_handler(req, &req->poll, &ipt, poll->events);
5998 req_set_fail(req);
6001 __io_req_complete(req, issue_flags, ret, 0);
6005 static int io_poll_update(struct io_kiocb *req, unsigned int issue_flags)
6007 struct io_ring_ctx *ctx = req->ctx;
6014 preq = io_poll_find(ctx, req->poll_update.old_user_data, true);
6022 if (req->poll_update.update_events || req->poll_update.update_user_data) {
6024 if (req->poll_update.update_events) {
6026 preq->poll.events |= req->poll_update.events & 0xffff;
6029 if (req->poll_update.update_user_data)
6030 preq->user_data = req->poll_update.new_user_data;
6041 req_set_fail(req);
6043 io_req_complete(req, ret);
6048 static void io_req_task_timeout(struct io_kiocb *req, bool *locked)
6050 req_set_fail(req);
6051 io_req_complete_post(req, -ETIME, 0);
6058 struct io_kiocb *req = data->req;
6059 struct io_ring_ctx *ctx = req->ctx;
6063 list_del_init(&req->timeout.list);
6064 atomic_set(&req->ctx->cq_timeouts,
6065 atomic_read(&req->ctx->cq_timeouts) + 1);
6068 req->io_task_work.func = io_req_task_timeout;
6069 io_req_task_work_add(req);
6078 struct io_kiocb *req;
6081 list_for_each_entry(req, &ctx->timeout_list, timeout.list) {
6082 found = user_data == req->user_data;
6089 io = req->async_data;
6092 list_del_init(&req->timeout.list);
6093 return req;
6100 struct io_kiocb *req = io_timeout_extract(ctx, user_data);
6102 if (IS_ERR(req))
6103 return PTR_ERR(req);
6105 req_set_fail(req);
6106 io_fill_cqe_req(req, -ECANCELED, 0);
6107 io_put_req_deferred(req);
6132 struct io_kiocb *req;
6135 list_for_each_entry(req, &ctx->ltimeout_list, timeout.list) {
6136 found = user_data == req->user_data;
6143 io = req->async_data;
6156 struct io_kiocb *req = io_timeout_extract(ctx, user_data);
6159 if (IS_ERR(req))
6160 return PTR_ERR(req);
6162 req->timeout.off = 0; /* noseq */
6163 data = req->async_data;
6164 list_add_tail(&req->timeout.list, &ctx->timeout_list);
6171 static int io_timeout_remove_prep(struct io_kiocb *req,
6174 struct io_timeout_rem *tr = &req->timeout_rem;
6176 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
6178 if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT)))
6212 static int io_timeout_remove(struct io_kiocb *req, unsigned int issue_flags)
6214 struct io_timeout_rem *tr = &req->timeout_rem;
6215 struct io_ring_ctx *ctx = req->ctx;
6218 if (!(req->timeout_rem.flags & IORING_TIMEOUT_UPDATE)) {
6236 req_set_fail(req);
6237 io_req_complete_post(req, ret, 0);
6241 static int io_timeout_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe,
6248 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
6262 INIT_LIST_HEAD(&req->timeout.list);
6263 req->timeout.off = off;
6264 if (unlikely(off && !req->ctx->off_timeout_used))
6265 req->ctx->off_timeout_used = true;
6267 if (!req->async_data && io_alloc_async_data(req))
6270 data = req->async_data;
6271 data->req = req;
6277 INIT_LIST_HEAD(&req->timeout.list);
6282 struct io_submit_link *link = &req->ctx->submit_state.link;
6288 req->timeout.head = link->last;
6294 static int io_timeout(struct io_kiocb *req, unsigned int issue_flags)
6296 struct io_ring_ctx *ctx = req->ctx;
6297 struct io_timeout_data *data = req->async_data;
6299 u32 tail, off = req->timeout.off;
6308 if (io_is_timeout_noseq(req)) {
6314 req->timeout.target_seq = tail + off;
6337 list_add(&req->timeout.list, entry);
6351 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
6354 return req->ctx == cd->ctx && req->user_data == cd->user_data;
6383 static int io_try_cancel_userdata(struct io_kiocb *req, u64 sqe_addr)
6385 struct io_ring_ctx *ctx = req->ctx;
6388 WARN_ON_ONCE(!io_wq_current_is_worker() && req->task != current);
6390 ret = io_async_cancel_one(req->task->io_uring, sqe_addr, ctx);
6406 static int io_async_cancel_prep(struct io_kiocb *req,
6409 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
6411 if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT)))
6417 req->cancel.addr = READ_ONCE(sqe->addr);
6421 static int io_async_cancel(struct io_kiocb *req, unsigned int issue_flags)
6423 struct io_ring_ctx *ctx = req->ctx;
6424 u64 sqe_addr = req->cancel.addr;
6428 ret = io_try_cancel_userdata(req, sqe_addr);
6438 ret = io_async_cancel_one(tctx, req->cancel.addr, ctx);
6445 req_set_fail(req);
6446 io_req_complete_post(req, ret, 0);
6450 static int io_rsrc_update_prep(struct io_kiocb *req,
6453 if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT)))
6458 req->rsrc_update.offset = READ_ONCE(sqe->off);
6459 req->rsrc_update.nr_args = READ_ONCE(sqe->len);
6460 if (!req->rsrc_update.nr_args)
6462 req->rsrc_update.arg = READ_ONCE(sqe->addr);
6466 static int io_files_update(struct io_kiocb *req, unsigned int issue_flags)
6468 struct io_ring_ctx *ctx = req->ctx;
6472 up.offset = req->rsrc_update.offset;
6473 up.data = req->rsrc_update.arg;
6481 &up, req->rsrc_update.nr_args);
6485 req_set_fail(req);
6486 __io_req_complete(req, issue_flags, ret, 0);
6490 static int io_req_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
6492 switch (req->opcode) {
6498 return io_read_prep(req, sqe);
6502 return io_write_prep(req, sqe);
6504 return io_poll_add_prep(req, sqe);
6506 return io_poll_update_prep(req, sqe);
6508 return io_fsync_prep(req, sqe);
6510 return io_sfr_prep(req, sqe);
6513 return io_sendmsg_prep(req, sqe);
6516 return io_recvmsg_prep(req, sqe);
6518 return io_connect_prep(req, sqe);
6520 return io_timeout_prep(req, sqe, false);
6522 return io_timeout_remove_prep(req, sqe);
6524 return io_async_cancel_prep(req, sqe);
6526 return io_timeout_prep(req, sqe, true);
6528 return io_accept_prep(req, sqe);
6530 return io_fallocate_prep(req, sqe);
6532 return io_openat_prep(req, sqe);
6534 return io_close_prep(req, sqe);
6536 return io_rsrc_update_prep(req, sqe);
6538 return io_statx_prep(req, sqe);
6540 return io_fadvise_prep(req, sqe);
6542 return io_madvise_prep(req, sqe);
6544 return io_openat2_prep(req, sqe);
6546 return io_epoll_ctl_prep(req, sqe);
6548 return io_splice_prep(req, sqe);
6550 return io_provide_buffers_prep(req, sqe);
6552 return io_remove_buffers_prep(req, sqe);
6554 return io_tee_prep(req, sqe);
6556 return io_shutdown_prep(req, sqe);
6558 return io_renameat_prep(req, sqe);
6560 return io_unlinkat_prep(req, sqe);
6564 req->opcode);
6568 static int io_req_prep_async(struct io_kiocb *req)
6570 if (!io_op_defs[req->opcode].needs_async_setup)
6572 if (WARN_ON_ONCE(req->async_data))
6574 if (io_alloc_async_data(req))
6577 switch (req->opcode) {
6579 return io_rw_prep_async(req, READ);
6581 return io_rw_prep_async(req, WRITE);
6583 return io_sendmsg_prep_async(req);
6585 return io_recvmsg_prep_async(req);
6587 return io_connect_prep_async(req);
6590 req->opcode);
6594 static u32 io_get_sequence(struct io_kiocb *req)
6596 u32 seq = req->ctx->cached_sq_head;
6598 /* need original cached_sq_head, but it was increased for each req */
6599 io_for_each_link(req, req)
6604 static bool io_drain_req(struct io_kiocb *req)
6607 struct io_ring_ctx *ctx = req->ctx;
6612 if (req->flags & REQ_F_FAIL) {
6613 io_req_complete_fail_submit(req);
6624 req->flags |= REQ_F_IO_DRAIN;
6628 io_for_each_link(pos, req->link) {
6631 req->flags |= REQ_F_IO_DRAIN;
6636 /* Still need defer if there is pending req in defer list. */
6639 !(req->flags & REQ_F_IO_DRAIN))) {
6646 seq = io_get_sequence(req);
6648 if (!req_need_defer(req, seq) && list_empty_careful(&ctx->defer_list))
6651 ret = io_req_prep_async(req);
6654 io_prep_async_link(req);
6659 io_req_complete_failed(req, ret);
6664 if (!req_need_defer(req, seq) && list_empty(&ctx->defer_list)) {
6667 io_queue_async_work(req, NULL);
6671 trace_io_uring_defer(ctx, req, req->user_data);
6672 de->req = req;
6679 static void io_clean_op(struct io_kiocb *req)
6681 if (req->flags & REQ_F_BUFFER_SELECTED) {
6682 switch (req->opcode) {
6686 kfree((void *)(unsigned long)req->rw.addr);
6690 kfree(req->sr_msg.kbuf);
6695 if (req->flags & REQ_F_NEED_CLEANUP) {
6696 switch (req->opcode) {
6703 struct io_async_rw *io = req->async_data;
6710 struct io_async_msghdr *io = req->async_data;
6717 if (req->open.filename)
6718 putname(req->open.filename);
6721 putname(req->rename.oldpath);
6722 putname(req->rename.newpath);
6725 putname(req->unlink.filename);
6729 if ((req->flags & REQ_F_POLLED) && req->apoll) {
6730 kfree(req->apoll->double_poll);
6731 kfree(req->apoll);
6732 req->apoll = NULL;
6734 if (req->flags & REQ_F_INFLIGHT) {
6735 struct io_uring_task *tctx = req->task->io_uring;
6739 if (req->flags & REQ_F_CREDS)
6740 put_cred(req->creds);
6742 req->flags &= ~IO_REQ_CLEAN_FLAGS;
6745 static int io_issue_sqe(struct io_kiocb *req, unsigned int issue_flags)
6747 struct io_ring_ctx *ctx = req->ctx;
6751 if ((req->flags & REQ_F_CREDS) && req->creds != current_cred())
6752 creds = override_creds(req->creds);
6754 switch (req->opcode) {
6756 ret = io_nop(req, issue_flags);
6761 ret = io_read(req, issue_flags);
6766 ret = io_write(req, issue_flags);
6769 ret = io_fsync(req, issue_flags);
6772 ret = io_poll_add(req, issue_flags);
6775 ret = io_poll_update(req, issue_flags);
6778 ret = io_sync_file_range(req, issue_flags);
6781 ret = io_sendmsg(req, issue_flags);
6784 ret = io_send(req, issue_flags);
6787 ret = io_recvmsg(req, issue_flags);
6790 ret = io_recv(req, issue_flags);
6793 ret = io_timeout(req, issue_flags);
6796 ret = io_timeout_remove(req, issue_flags);
6799 ret = io_accept(req, issue_flags);
6802 ret = io_connect(req, issue_flags);
6805 ret = io_async_cancel(req, issue_flags);
6808 ret = io_fallocate(req, issue_flags);
6811 ret = io_openat(req, issue_flags);
6814 ret = io_close(req, issue_flags);
6817 ret = io_files_update(req, issue_flags);
6820 ret = io_statx(req, issue_flags);
6823 ret = io_fadvise(req, issue_flags);
6826 ret = io_madvise(req, issue_flags);
6829 ret = io_openat2(req, issue_flags);
6832 ret = io_epoll_ctl(req, issue_flags);
6835 ret = io_splice(req, issue_flags);
6838 ret = io_provide_buffers(req, issue_flags);
6841 ret = io_remove_buffers(req, issue_flags);
6844 ret = io_tee(req, issue_flags);
6847 ret = io_shutdown(req, issue_flags);
6850 ret = io_renameat(req, issue_flags);
6853 ret = io_unlinkat(req, issue_flags);
6865 if ((ctx->flags & IORING_SETUP_IOPOLL) && req->file)
6866 io_iopoll_req_issued(req);
6873 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
6875 req = io_put_req_find_next(req);
6876 return req ? &req->work : NULL;
6881 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
6886 if (!(req->flags & REQ_F_REFCOUNT))
6887 __io_req_set_refcount(req, 2);
6889 req_ref_get(req);
6891 timeout = io_prep_linked_timeout(req);
6900 ret = io_issue_sqe(req, 0);
6906 if (ret != -EAGAIN || !(req->ctx->flags & IORING_SETUP_IOPOLL))
6914 if (req->flags & REQ_F_NOWAIT)
6923 io_req_task_queue_fail(req, ret);
6954 struct io_kiocb *req, int fd,
6969 req->flags |= (file_ptr << REQ_F_NOWAIT_READ_BIT);
6970 io_req_set_rsrc_node(req);
6977 struct io_kiocb *req, int fd)
6985 io_req_track_inflight(req);
6990 struct io_kiocb *req, int fd, bool fixed,
6994 return io_file_get_fixed(ctx, req, fd, issue_flags);
6996 return io_file_get_normal(ctx, req, fd);
6999 static void io_req_task_link_timeout(struct io_kiocb *req, bool *locked)
7001 struct io_kiocb *prev = req->timeout.prev;
7005 if (!(req->task->flags & PF_EXITING))
7006 ret = io_try_cancel_userdata(req, prev->user_data);
7007 io_req_complete_post(req, ret ?: -ETIME, 0);
7010 io_req_complete_post(req, -ETIME, 0);
7018 struct io_kiocb *prev, *req = data->req;
7019 struct io_ring_ctx *ctx = req->ctx;
7023 prev = req->timeout.head;
7024 req->timeout.head = NULL;
7035 list_del(&req->timeout.list);
7036 req->timeout.prev = prev;
7039 req->io_task_work.func = io_req_task_link_timeout;
7040 io_req_task_work_add(req);
7044 static void io_queue_linked_timeout(struct io_kiocb *req)
7046 struct io_ring_ctx *ctx = req->ctx;
7053 if (req->timeout.head) {
7054 struct io_timeout_data *data = req->async_data;
7059 list_add_tail(&req->timeout.list, &ctx->ltimeout_list);
7063 io_put_req(req);
7066 static void __io_queue_sqe(struct io_kiocb *req)
7067 __must_hold(&req->ctx->uring_lock)
7073 ret = io_issue_sqe(req, IO_URING_F_NONBLOCK|IO_URING_F_COMPLETE_DEFER);
7080 if (req->flags & REQ_F_COMPLETE_INLINE) {
7081 struct io_ring_ctx *ctx = req->ctx;
7084 state->compl_reqs[state->compl_nr++] = req;
7090 linked_timeout = io_prep_linked_timeout(req);
7093 } else if (ret == -EAGAIN && !(req->flags & REQ_F_NOWAIT)) {
7094 linked_timeout = io_prep_linked_timeout(req);
7096 switch (io_arm_poll_handler(req)) {
7106 io_queue_async_work(req, NULL);
7113 io_req_complete_failed(req, ret);
7117 static inline void io_queue_sqe(struct io_kiocb *req)
7118 __must_hold(&req->ctx->uring_lock)
7120 if (unlikely(req->ctx->drain_active) && io_drain_req(req))
7123 if (likely(!(req->flags & (REQ_F_FORCE_ASYNC | REQ_F_FAIL)))) {
7124 __io_queue_sqe(req);
7125 } else if (req->flags & REQ_F_FAIL) {
7126 io_req_complete_fail_submit(req);
7128 int ret = io_req_prep_async(req);
7131 io_req_complete_failed(req, ret);
7133 io_queue_async_work(req, NULL);
7143 struct io_kiocb *req,
7149 if (!test_bit(req->opcode, ctx->restrictions.sqe_op))
7163 static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req,
7171 /* req is partially pre-initialised, see io_preinit_req() */
7172 req->opcode = READ_ONCE(sqe->opcode);
7174 req->flags = sqe_flags = READ_ONCE(sqe->flags);
7175 req->user_data = READ_ONCE(sqe->user_data);
7176 req->file = NULL;
7177 req->fixed_rsrc_refs = NULL;
7178 req->task = current;
7183 if (unlikely(req->opcode >= IORING_OP_LAST))
7185 if (!io_check_restriction(ctx, req, sqe_flags))
7189 !io_op_defs[req->opcode].buffer_select)
7196 req->creds = xa_load(&ctx->personalities, personality);
7197 if (!req->creds)
7199 get_cred(req->creds);
7200 req->flags |= REQ_F_CREDS;
7209 io_op_defs[req->opcode].plug) {
7214 if (io_op_defs[req->opcode].needs_file) {
7215 req->file = io_file_get(ctx, req, READ_ONCE(sqe->fd),
7218 if (unlikely(!req->file))
7226 static int io_submit_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
7233 ret = io_init_req(ctx, req, sqe);
7239 * we can judge a link req is failed or cancelled by if
7241 * it may be set REQ_F_FAIL because of other req's failure
7242 * so let's leverage req->result to distinguish if a head
7243 * is set REQ_F_FAIL because of its failure or other req's
7249 } else if (!(req->flags & (REQ_F_LINK | REQ_F_HARDLINK))) {
7251 * the current req is a normal req, we should return
7254 io_req_complete_failed(req, ret);
7257 req_fail_link_node(req, ret);
7259 ret = io_req_prep(req, sqe);
7265 trace_io_uring_submit_sqe(ctx, req, req->opcode, req->user_data,
7266 req->flags, true,
7279 if (!(req->flags & REQ_F_FAIL)) {
7280 ret = io_req_prep_async(req);
7282 req_fail_link_node(req, ret);
7287 trace_io_uring_link(ctx, req, head);
7288 link->last->link = req;
7289 link->last = req;
7292 if (!(req->flags & (REQ_F_LINK | REQ_F_HARDLINK))) {
7297 if (req->flags & (REQ_F_LINK | REQ_F_HARDLINK)) {
7298 link->head = req;
7299 link->last = req;
7301 io_queue_sqe(req);
7392 struct io_kiocb *req;
7394 req = io_alloc_req(ctx);
7395 if (unlikely(!req)) {
7402 list_add(&req->inflight_entry, &ctx->submit_state.free_list);
7407 if (io_submit_sqe(ctx, req, sqe))
8474 static int io_install_fixed_file(struct io_kiocb *req, struct file *file,
8477 struct io_ring_ctx *ctx = req->ctx;
8524 static int io_close_fixed(struct io_kiocb *req, unsigned int issue_flags)
8526 unsigned int offset = req->close.file_slot - 1;
8527 struct io_ring_ctx *ctx = req->ctx;
9336 struct io_kiocb *req, *nxt;
9338 list_for_each_entry_safe(req, nxt, list, inflight_entry) {
9339 list_del(&req->inflight_entry);
9340 kmem_cache_free(req_cachep, req);
9494 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
9496 return req->ctx == data;
9587 struct io_kiocb *req, *tmp;
9592 list_for_each_entry_safe(req, tmp, &ctx->timeout_list, timeout.list) {
9593 if (io_match_task(req, tsk, cancel_all)) {
9594 io_kill_timeout(req, -ECANCELED);
9656 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
9659 return io_match_task_safe(req, cancel->task, cancel->all);
9670 if (io_match_task_safe(de->req, task, cancel_all)) {
9682 io_req_complete_failed(de->req, -ECANCELED);
10242 struct io_kiocb *req;
10244 hlist_for_each_entry(req, list, hash_node)
10245 seq_printf(m, " op=%d, task_works=%d\n", req->opcode,
10246 req->task->task_works != NULL);