Lines Matching refs:req

136 	struct io_kiocb		*req;
148 static void io_queue_sqe(struct io_kiocb *req);
196 struct io_kiocb *req;
198 io_for_each_link(req, head) {
199 if (req->flags & REQ_F_INFLIGHT)
232 static inline void req_fail_link_node(struct io_kiocb *req, int res)
234 req_set_fail(req);
235 io_req_set_res(req, res, 0);
238 static inline void io_req_add_to_cache(struct io_kiocb *req, struct io_ring_ctx *ctx)
240 wq_stack_add_head(&req->comp_list, &ctx->submit_state.free_list);
255 struct io_kiocb *req, *tmp;
260 llist_for_each_entry_safe(req, tmp, node, io_task_work.node)
261 req->io_task_work.func(req, &ts);
360 static bool req_need_defer(struct io_kiocb *req, u32 seq)
362 if (unlikely(req->flags & REQ_F_IO_DRAIN)) {
363 struct io_ring_ctx *ctx = req->ctx;
371 static void io_clean_op(struct io_kiocb *req)
373 if (req->flags & REQ_F_BUFFER_SELECTED) {
374 spin_lock(&req->ctx->completion_lock);
375 io_put_kbuf_comp(req);
376 spin_unlock(&req->ctx->completion_lock);
379 if (req->flags & REQ_F_NEED_CLEANUP) {
380 const struct io_cold_def *def = &io_cold_defs[req->opcode];
383 def->cleanup(req);
385 if ((req->flags & REQ_F_POLLED) && req->apoll) {
386 kfree(req->apoll->double_poll);
387 kfree(req->apoll);
388 req->apoll = NULL;
390 if (req->flags & REQ_F_INFLIGHT) {
391 struct io_uring_task *tctx = req->task->io_uring;
395 if (req->flags & REQ_F_CREDS)
396 put_cred(req->creds);
397 if (req->flags & REQ_F_ASYNC_DATA) {
398 kfree(req->async_data);
399 req->async_data = NULL;
401 req->flags &= ~IO_REQ_CLEAN_FLAGS;
404 static inline void io_req_track_inflight(struct io_kiocb *req)
406 if (!(req->flags & REQ_F_INFLIGHT)) {
407 req->flags |= REQ_F_INFLIGHT;
408 atomic_inc(&req->task->io_uring->inflight_tracked);
412 static struct io_kiocb *__io_prep_linked_timeout(struct io_kiocb *req)
414 if (WARN_ON_ONCE(!req->link))
417 req->flags &= ~REQ_F_ARM_LTIMEOUT;
418 req->flags |= REQ_F_LINK_TIMEOUT;
421 io_req_set_refcount(req);
422 __io_req_set_refcount(req->link, 2);
423 return req->link;
426 static inline struct io_kiocb *io_prep_linked_timeout(struct io_kiocb *req)
428 if (likely(!(req->flags & REQ_F_ARM_LTIMEOUT)))
430 return __io_prep_linked_timeout(req);
433 static noinline void __io_arm_ltimeout(struct io_kiocb *req)
435 io_queue_linked_timeout(__io_prep_linked_timeout(req));
438 static inline void io_arm_ltimeout(struct io_kiocb *req)
440 if (unlikely(req->flags & REQ_F_ARM_LTIMEOUT))
441 __io_arm_ltimeout(req);
444 static void io_prep_async_work(struct io_kiocb *req)
446 const struct io_issue_def *def = &io_issue_defs[req->opcode];
447 struct io_ring_ctx *ctx = req->ctx;
449 if (!(req->flags & REQ_F_CREDS)) {
450 req->flags |= REQ_F_CREDS;
451 req->creds = get_current_cred();
454 req->work.list.next = NULL;
455 req->work.flags = 0;
456 req->work.cancel_seq = atomic_read(&ctx->cancel_seq);
457 if (req->flags & REQ_F_FORCE_ASYNC)
458 req->work.flags |= IO_WQ_WORK_CONCURRENT;
460 if (req->file && !(req->flags & REQ_F_FIXED_FILE))
461 req->flags |= io_file_get_flags(req->file);
463 if (req->file && (req->flags & REQ_F_ISREG)) {
467 if (should_hash && (req->file->f_flags & O_DIRECT) &&
468 (req->file->f_mode & FMODE_DIO_PARALLEL_WRITE))
471 io_wq_hash_work(&req->work, file_inode(req->file));
472 } else if (!req->file || !S_ISBLK(file_inode(req->file)->i_mode)) {
474 req->work.flags |= IO_WQ_WORK_UNBOUND;
478 static void io_prep_async_link(struct io_kiocb *req)
482 if (req->flags & REQ_F_LINK_TIMEOUT) {
483 struct io_ring_ctx *ctx = req->ctx;
486 io_for_each_link(cur, req)
490 io_for_each_link(cur, req)
495 void io_queue_iowq(struct io_kiocb *req, struct io_tw_state *ts_dont_use)
497 struct io_kiocb *link = io_prep_linked_timeout(req);
498 struct io_uring_task *tctx = req->task->io_uring;
504 io_prep_async_link(req);
513 if (WARN_ON_ONCE(!same_thread_group(req->task, current)))
514 req->work.flags |= IO_WQ_WORK_CANCEL;
516 trace_io_uring_queue_async_work(req, io_wq_is_hashed(&req->work));
517 io_wq_enqueue(tctx->io_wq, &req->work);
528 if (req_need_defer(de->req, de->seq))
531 io_req_task_queue(de->req);
816 void io_req_cqe_overflow(struct io_kiocb *req)
818 io_cqring_event_overflow(req->ctx, req->cqe.user_data,
819 req->cqe.res, req->cqe.flags,
820 req->big_cqe.extra1, req->big_cqe.extra2);
821 memset(&req->big_cqe, 0, sizeof(req->big_cqe));
937 bool io_fill_cqe_req_aux(struct io_kiocb *req, bool defer, s32 res, u32 cflags)
939 struct io_ring_ctx *ctx = req->ctx;
940 u64 user_data = req->cqe.user_data;
969 static void __io_req_complete_post(struct io_kiocb *req, unsigned issue_flags)
971 struct io_ring_ctx *ctx = req->ctx;
975 if (!(req->flags & REQ_F_CQE_SKIP)) {
976 if (!io_fill_cqe_req(ctx, req))
977 io_req_cqe_overflow(req);
984 if (req_ref_put_and_test(req)) {
985 if (req->flags & IO_REQ_LINK_FLAGS) {
986 if (req->flags & IO_DISARM_MASK)
987 io_disarm_next(req);
988 if (req->link) {
989 io_req_task_queue(req->link);
990 req->link = NULL;
993 io_put_kbuf_comp(req);
994 if (unlikely(req->flags & IO_REQ_CLEAN_FLAGS))
995 io_clean_op(req);
996 io_put_file(req);
998 rsrc_node = req->rsrc_node;
1004 io_put_task_remote(req->task);
1005 wq_list_add_head(&req->comp_list, &ctx->locked_free_list);
1017 void io_req_complete_post(struct io_kiocb *req, unsigned issue_flags)
1019 if (req->ctx->task_complete && req->ctx->submitter_task != current) {
1020 req->io_task_work.func = io_req_task_complete;
1021 io_req_task_work_add(req);
1023 !(req->ctx->flags & IORING_SETUP_IOPOLL)) {
1024 __io_req_complete_post(req, issue_flags);
1026 struct io_ring_ctx *ctx = req->ctx;
1029 __io_req_complete_post(req, issue_flags & ~IO_URING_F_UNLOCKED);
1034 void io_req_defer_failed(struct io_kiocb *req, s32 res)
1037 const struct io_cold_def *def = &io_cold_defs[req->opcode];
1039 lockdep_assert_held(&req->ctx->uring_lock);
1041 req_set_fail(req);
1042 io_req_set_res(req, res, io_put_kbuf(req, IO_URING_F_UNLOCKED));
1044 def->fail(req);
1045 io_req_complete_defer(req);
1052 static void io_preinit_req(struct io_kiocb *req, struct io_ring_ctx *ctx)
1054 req->ctx = ctx;
1055 req->link = NULL;
1056 req->async_data = NULL;
1058 memset(&req->cqe, 0, sizeof(req->cqe));
1059 memset(&req->big_cqe, 0, sizeof(req->big_cqe));
1110 struct io_kiocb *req = reqs[i];
1112 io_preinit_req(req, ctx);
1113 io_req_add_to_cache(req, ctx);
1118 __cold void io_free_req(struct io_kiocb *req)
1121 req->flags &= ~REQ_F_REFCOUNT;
1123 req->flags |= REQ_F_CQE_SKIP;
1124 req->io_task_work.func = io_req_task_complete;
1125 io_req_task_work_add(req);
1128 static void __io_req_find_next_prep(struct io_kiocb *req)
1130 struct io_ring_ctx *ctx = req->ctx;
1133 io_disarm_next(req);
1137 static inline struct io_kiocb *io_req_find_next(struct io_kiocb *req)
1147 if (unlikely(req->flags & IO_DISARM_MASK))
1148 __io_req_find_next_prep(req);
1149 nxt = req->link;
1150 req->link = NULL;
1176 struct io_kiocb *req = container_of(node, struct io_kiocb,
1181 if (req->ctx != *ctx) {
1183 *ctx = req->ctx;
1188 INDIRECT_CALL_2(req->io_task_work.func,
1190 req, ts);
1221 struct io_kiocb *req;
1224 req = container_of(node, struct io_kiocb, io_task_work.node);
1226 if (sync && last_ctx != req->ctx) {
1231 last_ctx = req->ctx;
1234 if (llist_add(&req->io_task_work.node,
1235 &req->ctx->fallback_llist))
1236 schedule_delayed_work(&req->ctx->fallback_work, 1);
1272 static inline void io_req_local_work_add(struct io_kiocb *req, unsigned flags)
1274 struct io_ring_ctx *ctx = req->ctx;
1278 if (req->flags & (REQ_F_LINK | REQ_F_HARDLINK))
1299 req->nr_tw = nr_tw;
1300 req->io_task_work.node.next = first;
1302 &req->io_task_work.node));
1323 static void io_req_normal_work_add(struct io_kiocb *req)
1325 struct io_uring_task *tctx = req->task->io_uring;
1326 struct io_ring_ctx *ctx = req->ctx;
1329 if (!llist_add(&req->io_task_work.node, &tctx->task_list))
1335 if (likely(!task_work_add(req->task, &tctx->task_work, ctx->notify_method)))
1341 void __io_req_task_work_add(struct io_kiocb *req, unsigned flags)
1343 if (req->ctx->flags & IORING_SETUP_DEFER_TASKRUN) {
1345 io_req_local_work_add(req, flags);
1348 io_req_normal_work_add(req);
1358 struct io_kiocb *req = container_of(node, struct io_kiocb,
1362 io_req_normal_work_add(req);
1397 struct io_kiocb *req = container_of(node, struct io_kiocb,
1400 INDIRECT_CALL_2(req->io_task_work.func,
1402 req, ts);
1449 static void io_req_task_cancel(struct io_kiocb *req, struct io_tw_state *ts)
1451 io_tw_lock(req->ctx, ts);
1452 io_req_defer_failed(req, req->cqe.res);
1455 void io_req_task_submit(struct io_kiocb *req, struct io_tw_state *ts)
1457 io_tw_lock(req->ctx, ts);
1458 /* req->task == current here, checking PF_EXITING is safe */
1459 if (unlikely(req->task->flags & PF_EXITING))
1460 io_req_defer_failed(req, -EFAULT);
1461 else if (req->flags & REQ_F_FORCE_ASYNC)
1462 io_queue_iowq(req, ts);
1464 io_queue_sqe(req);
1467 void io_req_task_queue_fail(struct io_kiocb *req, int ret)
1469 io_req_set_res(req, ret, 0);
1470 req->io_task_work.func = io_req_task_cancel;
1471 io_req_task_work_add(req);
1474 void io_req_task_queue(struct io_kiocb *req)
1476 req->io_task_work.func = io_req_task_submit;
1477 io_req_task_work_add(req);
1480 void io_queue_next(struct io_kiocb *req)
1482 struct io_kiocb *nxt = io_req_find_next(req);
1493 struct io_kiocb *req = container_of(node, struct io_kiocb,
1496 if (unlikely(req->flags & IO_REQ_CLEAN_SLOW_FLAGS)) {
1497 if (req->flags & REQ_F_REFCOUNT) {
1498 node = req->comp_list.next;
1499 if (!req_ref_put_and_test(req))
1502 if ((req->flags & REQ_F_POLLED) && req->apoll) {
1503 struct async_poll *apoll = req->apoll;
1509 req->flags &= ~REQ_F_POLLED;
1511 if (req->flags & IO_REQ_LINK_FLAGS)
1512 io_queue_next(req);
1513 if (unlikely(req->flags & IO_REQ_CLEAN_FLAGS))
1514 io_clean_op(req);
1516 io_put_file(req);
1518 io_req_put_rsrc_locked(req, ctx);
1520 io_put_task(req->task);
1521 node = req->comp_list.next;
1522 io_req_add_to_cache(req, ctx);
1537 struct io_kiocb *req = container_of(node, struct io_kiocb,
1540 if (!(req->flags & REQ_F_CQE_SKIP) &&
1541 unlikely(!io_fill_cqe_req(ctx, req))) {
1544 io_req_cqe_overflow(req);
1547 io_req_cqe_overflow(req);
1666 void io_req_task_complete(struct io_kiocb *req, struct io_tw_state *ts)
1669 io_req_complete_defer(req);
1671 io_req_complete_post(req, IO_URING_F_UNLOCKED);
1680 static void io_iopoll_req_issued(struct io_kiocb *req, unsigned int issue_flags)
1682 struct io_ring_ctx *ctx = req->ctx;
1701 if (list_req->file != req->file)
1709 if (READ_ONCE(req->iopoll_completed))
1710 wq_list_add_head(&req->comp_list, &ctx->iopoll_list);
1712 wq_list_add_tail(&req->comp_list, &ctx->iopoll_list);
1740 bool io_alloc_async_data(struct io_kiocb *req)
1742 WARN_ON_ONCE(!io_cold_defs[req->opcode].async_size);
1743 req->async_data = kmalloc(io_cold_defs[req->opcode].async_size, GFP_KERNEL);
1744 if (req->async_data) {
1745 req->flags |= REQ_F_ASYNC_DATA;
1751 int io_req_prep_async(struct io_kiocb *req)
1753 const struct io_cold_def *cdef = &io_cold_defs[req->opcode];
1754 const struct io_issue_def *def = &io_issue_defs[req->opcode];
1757 if (def->needs_file && !(req->flags & REQ_F_FIXED_FILE) && !req->file)
1758 req->file = io_file_get_normal(req, req->cqe.fd);
1761 if (WARN_ON_ONCE(req_has_async_data(req)))
1764 if (io_alloc_async_data(req))
1767 return cdef->prep_async(req);
1770 static u32 io_get_sequence(struct io_kiocb *req)
1772 u32 seq = req->ctx->cached_sq_head;
1775 /* need original cached_sq_head, but it was increased for each req */
1776 io_for_each_link(cur, req)
1781 static __cold void io_drain_req(struct io_kiocb *req)
1784 struct io_ring_ctx *ctx = req->ctx;
1787 u32 seq = io_get_sequence(req);
1789 /* Still need defer if there is pending req in defer list. */
1791 if (!req_need_defer(req, seq) && list_empty_careful(&ctx->defer_list)) {
1795 io_req_task_queue(req);
1800 io_prep_async_link(req);
1804 io_req_defer_failed(req, ret);
1809 if (!req_need_defer(req, seq) && list_empty(&ctx->defer_list)) {
1815 trace_io_uring_defer(req);
1816 de->req = req;
1822 static bool io_assign_file(struct io_kiocb *req, const struct io_issue_def *def,
1825 if (req->file || !def->needs_file)
1828 if (req->flags & REQ_F_FIXED_FILE)
1829 req->file = io_file_get_fixed(req, req->cqe.fd, issue_flags);
1831 req->file = io_file_get_normal(req, req->cqe.fd);
1833 return !!req->file;
1836 static int io_issue_sqe(struct io_kiocb *req, unsigned int issue_flags)
1838 const struct io_issue_def *def = &io_issue_defs[req->opcode];
1842 if (unlikely(!io_assign_file(req, def, issue_flags)))
1845 if (unlikely((req->flags & REQ_F_CREDS) && req->creds != current_cred()))
1846 creds = override_creds(req->creds);
1849 audit_uring_entry(req->opcode);
1851 ret = def->issue(req, issue_flags);
1861 io_req_complete_defer(req);
1863 io_req_complete_post(req, issue_flags);
1872 if ((req->ctx->flags & IORING_SETUP_IOPOLL) && def->iopoll_queue)
1873 io_iopoll_req_issued(req, issue_flags);
1878 int io_poll_issue(struct io_kiocb *req, struct io_tw_state *ts)
1880 io_tw_lock(req->ctx, ts);
1881 return io_issue_sqe(req, IO_URING_F_NONBLOCK|IO_URING_F_MULTISHOT|
1887 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
1890 if (req_ref_put_and_test(req)) {
1891 if (req->flags & IO_REQ_LINK_FLAGS)
1892 nxt = io_req_find_next(req);
1893 io_free_req(req);
1900 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
1901 const struct io_issue_def *def = &io_issue_defs[req->opcode];
1907 if (!(req->flags & REQ_F_REFCOUNT))
1908 __io_req_set_refcount(req, 2);
1910 req_ref_get(req);
1912 io_arm_ltimeout(req);
1917 io_req_task_queue_fail(req, err);
1920 if (!io_assign_file(req, def, issue_flags)) {
1926 if (req->flags & REQ_F_FORCE_ASYNC) {
1929 if (opcode_poll && file_can_poll(req->file)) {
1936 ret = io_issue_sqe(req, issue_flags);
1944 if (req->flags & REQ_F_NOWAIT)
1953 if (!(req->ctx->flags & IORING_SETUP_IOPOLL))
1961 if (io_arm_poll_handler(req, issue_flags) == IO_APOLL_OK)
1970 io_req_task_queue_fail(req, ret);
1973 inline struct file *io_file_get_fixed(struct io_kiocb *req, int fd,
1976 struct io_ring_ctx *ctx = req->ctx;
1987 req->flags |= io_slot_flags(slot);
1988 io_req_set_rsrc_node(req, ctx, 0);
1994 struct file *io_file_get_normal(struct io_kiocb *req, int fd)
1998 trace_io_uring_file_get(req, fd);
2002 io_req_track_inflight(req);
2006 static void io_queue_async(struct io_kiocb *req, int ret)
2007 __must_hold(&req->ctx->uring_lock)
2011 if (ret != -EAGAIN || (req->flags & REQ_F_NOWAIT)) {
2012 io_req_defer_failed(req, ret);
2016 linked_timeout = io_prep_linked_timeout(req);
2018 switch (io_arm_poll_handler(req, 0)) {
2020 io_kbuf_recycle(req, 0);
2021 io_req_task_queue(req);
2024 io_kbuf_recycle(req, 0);
2025 io_queue_iowq(req, NULL);
2035 static inline void io_queue_sqe(struct io_kiocb *req)
2036 __must_hold(&req->ctx->uring_lock)
2040 ret = io_issue_sqe(req, IO_URING_F_NONBLOCK|IO_URING_F_COMPLETE_DEFER);
2047 io_arm_ltimeout(req);
2049 io_queue_async(req, ret);
2052 static void io_queue_sqe_fallback(struct io_kiocb *req)
2053 __must_hold(&req->ctx->uring_lock)
2055 if (unlikely(req->flags & REQ_F_FAIL)) {
2060 req->flags &= ~REQ_F_HARDLINK;
2061 req->flags |= REQ_F_LINK;
2062 io_req_defer_failed(req, req->cqe.res);
2064 int ret = io_req_prep_async(req);
2067 io_req_defer_failed(req, ret);
2071 if (unlikely(req->ctx->drain_active))
2072 io_drain_req(req);
2074 io_queue_iowq(req, NULL);
2084 struct io_kiocb *req,
2087 if (!test_bit(req->opcode, ctx->restrictions.sqe_op))
2101 static void io_init_req_drain(struct io_kiocb *req)
2103 struct io_ring_ctx *ctx = req->ctx;
2120 static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req,
2129 /* req is partially pre-initialised, see io_preinit_req() */
2130 req->opcode = opcode = READ_ONCE(sqe->opcode);
2132 req->flags = sqe_flags = READ_ONCE(sqe->flags);
2133 req->cqe.user_data = READ_ONCE(sqe->user_data);
2134 req->file = NULL;
2135 req->rsrc_node = NULL;
2136 req->task = current;
2139 req->opcode = 0;
2150 req->buf_index = READ_ONCE(sqe->buf_group);
2157 io_init_req_drain(req);
2161 if (ctx->restricted && !io_check_restriction(ctx, req, sqe_flags))
2165 req->flags |= REQ_F_FORCE_ASYNC;
2170 req->flags |= REQ_F_IO_DRAIN | REQ_F_FORCE_ASYNC;
2182 req->cqe.fd = READ_ONCE(sqe->fd);
2199 req->creds = xa_load(&ctx->personalities, personality);
2200 if (!req->creds)
2202 get_cred(req->creds);
2203 ret = security_uring_override_creds(req->creds);
2205 put_cred(req->creds);
2208 req->flags |= REQ_F_CREDS;
2211 return def->prep(req, sqe);
2215 struct io_kiocb *req, int ret)
2217 struct io_ring_ctx *ctx = req->ctx;
2221 trace_io_uring_req_failed(sqe, req, ret);
2229 req_fail_link_node(req, ret);
2233 if (!(req->flags & IO_REQ_LINK_FLAGS)) {
2235 link->last->link = req;
2237 req = head;
2239 io_queue_sqe_fallback(req);
2244 link->last->link = req;
2246 link->head = req;
2247 link->last = req;
2251 static inline int io_submit_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
2258 ret = io_init_req(ctx, req, sqe);
2260 return io_submit_fail_init(sqe, req, ret);
2262 trace_io_uring_submit_req(req);
2272 ret = io_req_prep_async(req);
2274 return io_submit_fail_init(sqe, req, ret);
2276 trace_io_uring_link(req, link->head);
2277 link->last->link = req;
2278 link->last = req;
2280 if (req->flags & IO_REQ_LINK_FLAGS)
2283 req = link->head;
2285 if (req->flags & (REQ_F_FORCE_ASYNC | REQ_F_FAIL))
2288 } else if (unlikely(req->flags & (IO_REQ_LINK_FLAGS |
2290 if (req->flags & IO_REQ_LINK_FLAGS) {
2291 link->head = req;
2292 link->last = req;
2295 io_queue_sqe_fallback(req);
2300 io_queue_sqe(req);
2402 struct io_kiocb *req;
2404 if (unlikely(!io_alloc_req(ctx, &req)))
2407 io_req_add_to_cache(req, ctx);
2415 if (unlikely(io_submit_sqe(ctx, req, sqe)) &&
2424 /* try again if it submitted nothing and can't allocate a req */
2870 struct io_kiocb *req;
2877 req = io_extract_req(ctx);
2878 kmem_cache_free(req_cachep, req);
3055 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
3057 return req->ctx == data;
3203 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
3206 return io_match_task_safe(req, cancel->task, cancel->all);
3218 if (io_match_task_safe(de->req, task, cancel_all)) {
3230 io_req_task_queue_fail(de->req, -ECANCELED);