162306a36Sopenharmony_ci// SPDX-License-Identifier: GPL-2.0 262306a36Sopenharmony_ci#include <linux/kernel.h> 362306a36Sopenharmony_ci#include <linux/errno.h> 462306a36Sopenharmony_ci#include <linux/file.h> 562306a36Sopenharmony_ci#include <linux/io_uring.h> 662306a36Sopenharmony_ci 762306a36Sopenharmony_ci#include <trace/events/io_uring.h> 862306a36Sopenharmony_ci 962306a36Sopenharmony_ci#include <uapi/linux/io_uring.h> 1062306a36Sopenharmony_ci 1162306a36Sopenharmony_ci#include "io_uring.h" 1262306a36Sopenharmony_ci#include "refs.h" 1362306a36Sopenharmony_ci#include "cancel.h" 1462306a36Sopenharmony_ci#include "timeout.h" 1562306a36Sopenharmony_ci 1662306a36Sopenharmony_cistruct io_timeout { 1762306a36Sopenharmony_ci struct file *file; 1862306a36Sopenharmony_ci u32 off; 1962306a36Sopenharmony_ci u32 target_seq; 2062306a36Sopenharmony_ci u32 repeats; 2162306a36Sopenharmony_ci struct list_head list; 2262306a36Sopenharmony_ci /* head of the link, used by linked timeouts only */ 2362306a36Sopenharmony_ci struct io_kiocb *head; 2462306a36Sopenharmony_ci /* for linked completions */ 2562306a36Sopenharmony_ci struct io_kiocb *prev; 2662306a36Sopenharmony_ci}; 2762306a36Sopenharmony_ci 2862306a36Sopenharmony_cistruct io_timeout_rem { 2962306a36Sopenharmony_ci struct file *file; 3062306a36Sopenharmony_ci u64 addr; 3162306a36Sopenharmony_ci 3262306a36Sopenharmony_ci /* timeout update */ 3362306a36Sopenharmony_ci struct timespec64 ts; 3462306a36Sopenharmony_ci u32 flags; 3562306a36Sopenharmony_ci bool ltimeout; 3662306a36Sopenharmony_ci}; 3762306a36Sopenharmony_ci 3862306a36Sopenharmony_cistatic inline bool io_is_timeout_noseq(struct io_kiocb *req) 3962306a36Sopenharmony_ci{ 4062306a36Sopenharmony_ci struct io_timeout *timeout = io_kiocb_to_cmd(req, struct io_timeout); 4162306a36Sopenharmony_ci struct io_timeout_data *data = req->async_data; 4262306a36Sopenharmony_ci 4362306a36Sopenharmony_ci return !timeout->off || data->flags & IORING_TIMEOUT_MULTISHOT; 4462306a36Sopenharmony_ci} 4562306a36Sopenharmony_ci 4662306a36Sopenharmony_cistatic inline void io_put_req(struct io_kiocb *req) 4762306a36Sopenharmony_ci{ 4862306a36Sopenharmony_ci if (req_ref_put_and_test(req)) { 4962306a36Sopenharmony_ci io_queue_next(req); 5062306a36Sopenharmony_ci io_free_req(req); 5162306a36Sopenharmony_ci } 5262306a36Sopenharmony_ci} 5362306a36Sopenharmony_ci 5462306a36Sopenharmony_cistatic inline bool io_timeout_finish(struct io_timeout *timeout, 5562306a36Sopenharmony_ci struct io_timeout_data *data) 5662306a36Sopenharmony_ci{ 5762306a36Sopenharmony_ci if (!(data->flags & IORING_TIMEOUT_MULTISHOT)) 5862306a36Sopenharmony_ci return true; 5962306a36Sopenharmony_ci 6062306a36Sopenharmony_ci if (!timeout->off || (timeout->repeats && --timeout->repeats)) 6162306a36Sopenharmony_ci return false; 6262306a36Sopenharmony_ci 6362306a36Sopenharmony_ci return true; 6462306a36Sopenharmony_ci} 6562306a36Sopenharmony_ci 6662306a36Sopenharmony_cistatic enum hrtimer_restart io_timeout_fn(struct hrtimer *timer); 6762306a36Sopenharmony_ci 6862306a36Sopenharmony_cistatic void io_timeout_complete(struct io_kiocb *req, struct io_tw_state *ts) 6962306a36Sopenharmony_ci{ 7062306a36Sopenharmony_ci struct io_timeout *timeout = io_kiocb_to_cmd(req, struct io_timeout); 7162306a36Sopenharmony_ci struct io_timeout_data *data = req->async_data; 7262306a36Sopenharmony_ci struct io_ring_ctx *ctx = req->ctx; 7362306a36Sopenharmony_ci 7462306a36Sopenharmony_ci if (!io_timeout_finish(timeout, data)) { 7562306a36Sopenharmony_ci bool filled; 7662306a36Sopenharmony_ci filled = io_fill_cqe_req_aux(req, ts->locked, -ETIME, 7762306a36Sopenharmony_ci IORING_CQE_F_MORE); 7862306a36Sopenharmony_ci if (filled) { 7962306a36Sopenharmony_ci /* re-arm timer */ 8062306a36Sopenharmony_ci spin_lock_irq(&ctx->timeout_lock); 8162306a36Sopenharmony_ci list_add(&timeout->list, ctx->timeout_list.prev); 8262306a36Sopenharmony_ci data->timer.function = io_timeout_fn; 8362306a36Sopenharmony_ci hrtimer_start(&data->timer, timespec64_to_ktime(data->ts), data->mode); 8462306a36Sopenharmony_ci spin_unlock_irq(&ctx->timeout_lock); 8562306a36Sopenharmony_ci return; 8662306a36Sopenharmony_ci } 8762306a36Sopenharmony_ci } 8862306a36Sopenharmony_ci 8962306a36Sopenharmony_ci io_req_task_complete(req, ts); 9062306a36Sopenharmony_ci} 9162306a36Sopenharmony_ci 9262306a36Sopenharmony_cistatic bool io_kill_timeout(struct io_kiocb *req, int status) 9362306a36Sopenharmony_ci __must_hold(&req->ctx->timeout_lock) 9462306a36Sopenharmony_ci{ 9562306a36Sopenharmony_ci struct io_timeout_data *io = req->async_data; 9662306a36Sopenharmony_ci 9762306a36Sopenharmony_ci if (hrtimer_try_to_cancel(&io->timer) != -1) { 9862306a36Sopenharmony_ci struct io_timeout *timeout = io_kiocb_to_cmd(req, struct io_timeout); 9962306a36Sopenharmony_ci 10062306a36Sopenharmony_ci if (status) 10162306a36Sopenharmony_ci req_set_fail(req); 10262306a36Sopenharmony_ci atomic_set(&req->ctx->cq_timeouts, 10362306a36Sopenharmony_ci atomic_read(&req->ctx->cq_timeouts) + 1); 10462306a36Sopenharmony_ci list_del_init(&timeout->list); 10562306a36Sopenharmony_ci io_req_queue_tw_complete(req, status); 10662306a36Sopenharmony_ci return true; 10762306a36Sopenharmony_ci } 10862306a36Sopenharmony_ci return false; 10962306a36Sopenharmony_ci} 11062306a36Sopenharmony_ci 11162306a36Sopenharmony_ci__cold void io_flush_timeouts(struct io_ring_ctx *ctx) 11262306a36Sopenharmony_ci{ 11362306a36Sopenharmony_ci u32 seq; 11462306a36Sopenharmony_ci struct io_timeout *timeout, *tmp; 11562306a36Sopenharmony_ci 11662306a36Sopenharmony_ci spin_lock_irq(&ctx->timeout_lock); 11762306a36Sopenharmony_ci seq = ctx->cached_cq_tail - atomic_read(&ctx->cq_timeouts); 11862306a36Sopenharmony_ci 11962306a36Sopenharmony_ci list_for_each_entry_safe(timeout, tmp, &ctx->timeout_list, list) { 12062306a36Sopenharmony_ci struct io_kiocb *req = cmd_to_io_kiocb(timeout); 12162306a36Sopenharmony_ci u32 events_needed, events_got; 12262306a36Sopenharmony_ci 12362306a36Sopenharmony_ci if (io_is_timeout_noseq(req)) 12462306a36Sopenharmony_ci break; 12562306a36Sopenharmony_ci 12662306a36Sopenharmony_ci /* 12762306a36Sopenharmony_ci * Since seq can easily wrap around over time, subtract 12862306a36Sopenharmony_ci * the last seq at which timeouts were flushed before comparing. 12962306a36Sopenharmony_ci * Assuming not more than 2^31-1 events have happened since, 13062306a36Sopenharmony_ci * these subtractions won't have wrapped, so we can check if 13162306a36Sopenharmony_ci * target is in [last_seq, current_seq] by comparing the two. 13262306a36Sopenharmony_ci */ 13362306a36Sopenharmony_ci events_needed = timeout->target_seq - ctx->cq_last_tm_flush; 13462306a36Sopenharmony_ci events_got = seq - ctx->cq_last_tm_flush; 13562306a36Sopenharmony_ci if (events_got < events_needed) 13662306a36Sopenharmony_ci break; 13762306a36Sopenharmony_ci 13862306a36Sopenharmony_ci io_kill_timeout(req, 0); 13962306a36Sopenharmony_ci } 14062306a36Sopenharmony_ci ctx->cq_last_tm_flush = seq; 14162306a36Sopenharmony_ci spin_unlock_irq(&ctx->timeout_lock); 14262306a36Sopenharmony_ci} 14362306a36Sopenharmony_ci 14462306a36Sopenharmony_cistatic void io_req_tw_fail_links(struct io_kiocb *link, struct io_tw_state *ts) 14562306a36Sopenharmony_ci{ 14662306a36Sopenharmony_ci io_tw_lock(link->ctx, ts); 14762306a36Sopenharmony_ci while (link) { 14862306a36Sopenharmony_ci struct io_kiocb *nxt = link->link; 14962306a36Sopenharmony_ci long res = -ECANCELED; 15062306a36Sopenharmony_ci 15162306a36Sopenharmony_ci if (link->flags & REQ_F_FAIL) 15262306a36Sopenharmony_ci res = link->cqe.res; 15362306a36Sopenharmony_ci link->link = NULL; 15462306a36Sopenharmony_ci io_req_set_res(link, res, 0); 15562306a36Sopenharmony_ci io_req_task_complete(link, ts); 15662306a36Sopenharmony_ci link = nxt; 15762306a36Sopenharmony_ci } 15862306a36Sopenharmony_ci} 15962306a36Sopenharmony_ci 16062306a36Sopenharmony_cistatic void io_fail_links(struct io_kiocb *req) 16162306a36Sopenharmony_ci __must_hold(&req->ctx->completion_lock) 16262306a36Sopenharmony_ci{ 16362306a36Sopenharmony_ci struct io_kiocb *link = req->link; 16462306a36Sopenharmony_ci bool ignore_cqes = req->flags & REQ_F_SKIP_LINK_CQES; 16562306a36Sopenharmony_ci 16662306a36Sopenharmony_ci if (!link) 16762306a36Sopenharmony_ci return; 16862306a36Sopenharmony_ci 16962306a36Sopenharmony_ci while (link) { 17062306a36Sopenharmony_ci if (ignore_cqes) 17162306a36Sopenharmony_ci link->flags |= REQ_F_CQE_SKIP; 17262306a36Sopenharmony_ci else 17362306a36Sopenharmony_ci link->flags &= ~REQ_F_CQE_SKIP; 17462306a36Sopenharmony_ci trace_io_uring_fail_link(req, link); 17562306a36Sopenharmony_ci link = link->link; 17662306a36Sopenharmony_ci } 17762306a36Sopenharmony_ci 17862306a36Sopenharmony_ci link = req->link; 17962306a36Sopenharmony_ci link->io_task_work.func = io_req_tw_fail_links; 18062306a36Sopenharmony_ci io_req_task_work_add(link); 18162306a36Sopenharmony_ci req->link = NULL; 18262306a36Sopenharmony_ci} 18362306a36Sopenharmony_ci 18462306a36Sopenharmony_cistatic inline void io_remove_next_linked(struct io_kiocb *req) 18562306a36Sopenharmony_ci{ 18662306a36Sopenharmony_ci struct io_kiocb *nxt = req->link; 18762306a36Sopenharmony_ci 18862306a36Sopenharmony_ci req->link = nxt->link; 18962306a36Sopenharmony_ci nxt->link = NULL; 19062306a36Sopenharmony_ci} 19162306a36Sopenharmony_ci 19262306a36Sopenharmony_civoid io_disarm_next(struct io_kiocb *req) 19362306a36Sopenharmony_ci __must_hold(&req->ctx->completion_lock) 19462306a36Sopenharmony_ci{ 19562306a36Sopenharmony_ci struct io_kiocb *link = NULL; 19662306a36Sopenharmony_ci 19762306a36Sopenharmony_ci if (req->flags & REQ_F_ARM_LTIMEOUT) { 19862306a36Sopenharmony_ci link = req->link; 19962306a36Sopenharmony_ci req->flags &= ~REQ_F_ARM_LTIMEOUT; 20062306a36Sopenharmony_ci if (link && link->opcode == IORING_OP_LINK_TIMEOUT) { 20162306a36Sopenharmony_ci io_remove_next_linked(req); 20262306a36Sopenharmony_ci io_req_queue_tw_complete(link, -ECANCELED); 20362306a36Sopenharmony_ci } 20462306a36Sopenharmony_ci } else if (req->flags & REQ_F_LINK_TIMEOUT) { 20562306a36Sopenharmony_ci struct io_ring_ctx *ctx = req->ctx; 20662306a36Sopenharmony_ci 20762306a36Sopenharmony_ci spin_lock_irq(&ctx->timeout_lock); 20862306a36Sopenharmony_ci link = io_disarm_linked_timeout(req); 20962306a36Sopenharmony_ci spin_unlock_irq(&ctx->timeout_lock); 21062306a36Sopenharmony_ci if (link) 21162306a36Sopenharmony_ci io_req_queue_tw_complete(link, -ECANCELED); 21262306a36Sopenharmony_ci } 21362306a36Sopenharmony_ci if (unlikely((req->flags & REQ_F_FAIL) && 21462306a36Sopenharmony_ci !(req->flags & REQ_F_HARDLINK))) 21562306a36Sopenharmony_ci io_fail_links(req); 21662306a36Sopenharmony_ci} 21762306a36Sopenharmony_ci 21862306a36Sopenharmony_cistruct io_kiocb *__io_disarm_linked_timeout(struct io_kiocb *req, 21962306a36Sopenharmony_ci struct io_kiocb *link) 22062306a36Sopenharmony_ci __must_hold(&req->ctx->completion_lock) 22162306a36Sopenharmony_ci __must_hold(&req->ctx->timeout_lock) 22262306a36Sopenharmony_ci{ 22362306a36Sopenharmony_ci struct io_timeout_data *io = link->async_data; 22462306a36Sopenharmony_ci struct io_timeout *timeout = io_kiocb_to_cmd(link, struct io_timeout); 22562306a36Sopenharmony_ci 22662306a36Sopenharmony_ci io_remove_next_linked(req); 22762306a36Sopenharmony_ci timeout->head = NULL; 22862306a36Sopenharmony_ci if (hrtimer_try_to_cancel(&io->timer) != -1) { 22962306a36Sopenharmony_ci list_del(&timeout->list); 23062306a36Sopenharmony_ci return link; 23162306a36Sopenharmony_ci } 23262306a36Sopenharmony_ci 23362306a36Sopenharmony_ci return NULL; 23462306a36Sopenharmony_ci} 23562306a36Sopenharmony_ci 23662306a36Sopenharmony_cistatic enum hrtimer_restart io_timeout_fn(struct hrtimer *timer) 23762306a36Sopenharmony_ci{ 23862306a36Sopenharmony_ci struct io_timeout_data *data = container_of(timer, 23962306a36Sopenharmony_ci struct io_timeout_data, timer); 24062306a36Sopenharmony_ci struct io_kiocb *req = data->req; 24162306a36Sopenharmony_ci struct io_timeout *timeout = io_kiocb_to_cmd(req, struct io_timeout); 24262306a36Sopenharmony_ci struct io_ring_ctx *ctx = req->ctx; 24362306a36Sopenharmony_ci unsigned long flags; 24462306a36Sopenharmony_ci 24562306a36Sopenharmony_ci spin_lock_irqsave(&ctx->timeout_lock, flags); 24662306a36Sopenharmony_ci list_del_init(&timeout->list); 24762306a36Sopenharmony_ci atomic_set(&req->ctx->cq_timeouts, 24862306a36Sopenharmony_ci atomic_read(&req->ctx->cq_timeouts) + 1); 24962306a36Sopenharmony_ci spin_unlock_irqrestore(&ctx->timeout_lock, flags); 25062306a36Sopenharmony_ci 25162306a36Sopenharmony_ci if (!(data->flags & IORING_TIMEOUT_ETIME_SUCCESS)) 25262306a36Sopenharmony_ci req_set_fail(req); 25362306a36Sopenharmony_ci 25462306a36Sopenharmony_ci io_req_set_res(req, -ETIME, 0); 25562306a36Sopenharmony_ci req->io_task_work.func = io_timeout_complete; 25662306a36Sopenharmony_ci io_req_task_work_add(req); 25762306a36Sopenharmony_ci return HRTIMER_NORESTART; 25862306a36Sopenharmony_ci} 25962306a36Sopenharmony_ci 26062306a36Sopenharmony_cistatic struct io_kiocb *io_timeout_extract(struct io_ring_ctx *ctx, 26162306a36Sopenharmony_ci struct io_cancel_data *cd) 26262306a36Sopenharmony_ci __must_hold(&ctx->timeout_lock) 26362306a36Sopenharmony_ci{ 26462306a36Sopenharmony_ci struct io_timeout *timeout; 26562306a36Sopenharmony_ci struct io_timeout_data *io; 26662306a36Sopenharmony_ci struct io_kiocb *req = NULL; 26762306a36Sopenharmony_ci 26862306a36Sopenharmony_ci list_for_each_entry(timeout, &ctx->timeout_list, list) { 26962306a36Sopenharmony_ci struct io_kiocb *tmp = cmd_to_io_kiocb(timeout); 27062306a36Sopenharmony_ci 27162306a36Sopenharmony_ci if (io_cancel_req_match(tmp, cd)) { 27262306a36Sopenharmony_ci req = tmp; 27362306a36Sopenharmony_ci break; 27462306a36Sopenharmony_ci } 27562306a36Sopenharmony_ci } 27662306a36Sopenharmony_ci if (!req) 27762306a36Sopenharmony_ci return ERR_PTR(-ENOENT); 27862306a36Sopenharmony_ci 27962306a36Sopenharmony_ci io = req->async_data; 28062306a36Sopenharmony_ci if (hrtimer_try_to_cancel(&io->timer) == -1) 28162306a36Sopenharmony_ci return ERR_PTR(-EALREADY); 28262306a36Sopenharmony_ci timeout = io_kiocb_to_cmd(req, struct io_timeout); 28362306a36Sopenharmony_ci list_del_init(&timeout->list); 28462306a36Sopenharmony_ci return req; 28562306a36Sopenharmony_ci} 28662306a36Sopenharmony_ci 28762306a36Sopenharmony_ciint io_timeout_cancel(struct io_ring_ctx *ctx, struct io_cancel_data *cd) 28862306a36Sopenharmony_ci __must_hold(&ctx->completion_lock) 28962306a36Sopenharmony_ci{ 29062306a36Sopenharmony_ci struct io_kiocb *req; 29162306a36Sopenharmony_ci 29262306a36Sopenharmony_ci spin_lock_irq(&ctx->timeout_lock); 29362306a36Sopenharmony_ci req = io_timeout_extract(ctx, cd); 29462306a36Sopenharmony_ci spin_unlock_irq(&ctx->timeout_lock); 29562306a36Sopenharmony_ci 29662306a36Sopenharmony_ci if (IS_ERR(req)) 29762306a36Sopenharmony_ci return PTR_ERR(req); 29862306a36Sopenharmony_ci io_req_task_queue_fail(req, -ECANCELED); 29962306a36Sopenharmony_ci return 0; 30062306a36Sopenharmony_ci} 30162306a36Sopenharmony_ci 30262306a36Sopenharmony_cistatic void io_req_task_link_timeout(struct io_kiocb *req, struct io_tw_state *ts) 30362306a36Sopenharmony_ci{ 30462306a36Sopenharmony_ci unsigned issue_flags = ts->locked ? 0 : IO_URING_F_UNLOCKED; 30562306a36Sopenharmony_ci struct io_timeout *timeout = io_kiocb_to_cmd(req, struct io_timeout); 30662306a36Sopenharmony_ci struct io_kiocb *prev = timeout->prev; 30762306a36Sopenharmony_ci int ret = -ENOENT; 30862306a36Sopenharmony_ci 30962306a36Sopenharmony_ci if (prev) { 31062306a36Sopenharmony_ci if (!(req->task->flags & PF_EXITING)) { 31162306a36Sopenharmony_ci struct io_cancel_data cd = { 31262306a36Sopenharmony_ci .ctx = req->ctx, 31362306a36Sopenharmony_ci .data = prev->cqe.user_data, 31462306a36Sopenharmony_ci }; 31562306a36Sopenharmony_ci 31662306a36Sopenharmony_ci ret = io_try_cancel(req->task->io_uring, &cd, issue_flags); 31762306a36Sopenharmony_ci } 31862306a36Sopenharmony_ci io_req_set_res(req, ret ?: -ETIME, 0); 31962306a36Sopenharmony_ci io_req_task_complete(req, ts); 32062306a36Sopenharmony_ci io_put_req(prev); 32162306a36Sopenharmony_ci } else { 32262306a36Sopenharmony_ci io_req_set_res(req, -ETIME, 0); 32362306a36Sopenharmony_ci io_req_task_complete(req, ts); 32462306a36Sopenharmony_ci } 32562306a36Sopenharmony_ci} 32662306a36Sopenharmony_ci 32762306a36Sopenharmony_cistatic enum hrtimer_restart io_link_timeout_fn(struct hrtimer *timer) 32862306a36Sopenharmony_ci{ 32962306a36Sopenharmony_ci struct io_timeout_data *data = container_of(timer, 33062306a36Sopenharmony_ci struct io_timeout_data, timer); 33162306a36Sopenharmony_ci struct io_kiocb *prev, *req = data->req; 33262306a36Sopenharmony_ci struct io_timeout *timeout = io_kiocb_to_cmd(req, struct io_timeout); 33362306a36Sopenharmony_ci struct io_ring_ctx *ctx = req->ctx; 33462306a36Sopenharmony_ci unsigned long flags; 33562306a36Sopenharmony_ci 33662306a36Sopenharmony_ci spin_lock_irqsave(&ctx->timeout_lock, flags); 33762306a36Sopenharmony_ci prev = timeout->head; 33862306a36Sopenharmony_ci timeout->head = NULL; 33962306a36Sopenharmony_ci 34062306a36Sopenharmony_ci /* 34162306a36Sopenharmony_ci * We don't expect the list to be empty, that will only happen if we 34262306a36Sopenharmony_ci * race with the completion of the linked work. 34362306a36Sopenharmony_ci */ 34462306a36Sopenharmony_ci if (prev) { 34562306a36Sopenharmony_ci io_remove_next_linked(prev); 34662306a36Sopenharmony_ci if (!req_ref_inc_not_zero(prev)) 34762306a36Sopenharmony_ci prev = NULL; 34862306a36Sopenharmony_ci } 34962306a36Sopenharmony_ci list_del(&timeout->list); 35062306a36Sopenharmony_ci timeout->prev = prev; 35162306a36Sopenharmony_ci spin_unlock_irqrestore(&ctx->timeout_lock, flags); 35262306a36Sopenharmony_ci 35362306a36Sopenharmony_ci req->io_task_work.func = io_req_task_link_timeout; 35462306a36Sopenharmony_ci io_req_task_work_add(req); 35562306a36Sopenharmony_ci return HRTIMER_NORESTART; 35662306a36Sopenharmony_ci} 35762306a36Sopenharmony_ci 35862306a36Sopenharmony_cistatic clockid_t io_timeout_get_clock(struct io_timeout_data *data) 35962306a36Sopenharmony_ci{ 36062306a36Sopenharmony_ci switch (data->flags & IORING_TIMEOUT_CLOCK_MASK) { 36162306a36Sopenharmony_ci case IORING_TIMEOUT_BOOTTIME: 36262306a36Sopenharmony_ci return CLOCK_BOOTTIME; 36362306a36Sopenharmony_ci case IORING_TIMEOUT_REALTIME: 36462306a36Sopenharmony_ci return CLOCK_REALTIME; 36562306a36Sopenharmony_ci default: 36662306a36Sopenharmony_ci /* can't happen, vetted at prep time */ 36762306a36Sopenharmony_ci WARN_ON_ONCE(1); 36862306a36Sopenharmony_ci fallthrough; 36962306a36Sopenharmony_ci case 0: 37062306a36Sopenharmony_ci return CLOCK_MONOTONIC; 37162306a36Sopenharmony_ci } 37262306a36Sopenharmony_ci} 37362306a36Sopenharmony_ci 37462306a36Sopenharmony_cistatic int io_linked_timeout_update(struct io_ring_ctx *ctx, __u64 user_data, 37562306a36Sopenharmony_ci struct timespec64 *ts, enum hrtimer_mode mode) 37662306a36Sopenharmony_ci __must_hold(&ctx->timeout_lock) 37762306a36Sopenharmony_ci{ 37862306a36Sopenharmony_ci struct io_timeout_data *io; 37962306a36Sopenharmony_ci struct io_timeout *timeout; 38062306a36Sopenharmony_ci struct io_kiocb *req = NULL; 38162306a36Sopenharmony_ci 38262306a36Sopenharmony_ci list_for_each_entry(timeout, &ctx->ltimeout_list, list) { 38362306a36Sopenharmony_ci struct io_kiocb *tmp = cmd_to_io_kiocb(timeout); 38462306a36Sopenharmony_ci 38562306a36Sopenharmony_ci if (user_data == tmp->cqe.user_data) { 38662306a36Sopenharmony_ci req = tmp; 38762306a36Sopenharmony_ci break; 38862306a36Sopenharmony_ci } 38962306a36Sopenharmony_ci } 39062306a36Sopenharmony_ci if (!req) 39162306a36Sopenharmony_ci return -ENOENT; 39262306a36Sopenharmony_ci 39362306a36Sopenharmony_ci io = req->async_data; 39462306a36Sopenharmony_ci if (hrtimer_try_to_cancel(&io->timer) == -1) 39562306a36Sopenharmony_ci return -EALREADY; 39662306a36Sopenharmony_ci hrtimer_init(&io->timer, io_timeout_get_clock(io), mode); 39762306a36Sopenharmony_ci io->timer.function = io_link_timeout_fn; 39862306a36Sopenharmony_ci hrtimer_start(&io->timer, timespec64_to_ktime(*ts), mode); 39962306a36Sopenharmony_ci return 0; 40062306a36Sopenharmony_ci} 40162306a36Sopenharmony_ci 40262306a36Sopenharmony_cistatic int io_timeout_update(struct io_ring_ctx *ctx, __u64 user_data, 40362306a36Sopenharmony_ci struct timespec64 *ts, enum hrtimer_mode mode) 40462306a36Sopenharmony_ci __must_hold(&ctx->timeout_lock) 40562306a36Sopenharmony_ci{ 40662306a36Sopenharmony_ci struct io_cancel_data cd = { .ctx = ctx, .data = user_data, }; 40762306a36Sopenharmony_ci struct io_kiocb *req = io_timeout_extract(ctx, &cd); 40862306a36Sopenharmony_ci struct io_timeout *timeout = io_kiocb_to_cmd(req, struct io_timeout); 40962306a36Sopenharmony_ci struct io_timeout_data *data; 41062306a36Sopenharmony_ci 41162306a36Sopenharmony_ci if (IS_ERR(req)) 41262306a36Sopenharmony_ci return PTR_ERR(req); 41362306a36Sopenharmony_ci 41462306a36Sopenharmony_ci timeout->off = 0; /* noseq */ 41562306a36Sopenharmony_ci data = req->async_data; 41662306a36Sopenharmony_ci list_add_tail(&timeout->list, &ctx->timeout_list); 41762306a36Sopenharmony_ci hrtimer_init(&data->timer, io_timeout_get_clock(data), mode); 41862306a36Sopenharmony_ci data->timer.function = io_timeout_fn; 41962306a36Sopenharmony_ci hrtimer_start(&data->timer, timespec64_to_ktime(*ts), mode); 42062306a36Sopenharmony_ci return 0; 42162306a36Sopenharmony_ci} 42262306a36Sopenharmony_ci 42362306a36Sopenharmony_ciint io_timeout_remove_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) 42462306a36Sopenharmony_ci{ 42562306a36Sopenharmony_ci struct io_timeout_rem *tr = io_kiocb_to_cmd(req, struct io_timeout_rem); 42662306a36Sopenharmony_ci 42762306a36Sopenharmony_ci if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT))) 42862306a36Sopenharmony_ci return -EINVAL; 42962306a36Sopenharmony_ci if (sqe->buf_index || sqe->len || sqe->splice_fd_in) 43062306a36Sopenharmony_ci return -EINVAL; 43162306a36Sopenharmony_ci 43262306a36Sopenharmony_ci tr->ltimeout = false; 43362306a36Sopenharmony_ci tr->addr = READ_ONCE(sqe->addr); 43462306a36Sopenharmony_ci tr->flags = READ_ONCE(sqe->timeout_flags); 43562306a36Sopenharmony_ci if (tr->flags & IORING_TIMEOUT_UPDATE_MASK) { 43662306a36Sopenharmony_ci if (hweight32(tr->flags & IORING_TIMEOUT_CLOCK_MASK) > 1) 43762306a36Sopenharmony_ci return -EINVAL; 43862306a36Sopenharmony_ci if (tr->flags & IORING_LINK_TIMEOUT_UPDATE) 43962306a36Sopenharmony_ci tr->ltimeout = true; 44062306a36Sopenharmony_ci if (tr->flags & ~(IORING_TIMEOUT_UPDATE_MASK|IORING_TIMEOUT_ABS)) 44162306a36Sopenharmony_ci return -EINVAL; 44262306a36Sopenharmony_ci if (get_timespec64(&tr->ts, u64_to_user_ptr(sqe->addr2))) 44362306a36Sopenharmony_ci return -EFAULT; 44462306a36Sopenharmony_ci if (tr->ts.tv_sec < 0 || tr->ts.tv_nsec < 0) 44562306a36Sopenharmony_ci return -EINVAL; 44662306a36Sopenharmony_ci } else if (tr->flags) { 44762306a36Sopenharmony_ci /* timeout removal doesn't support flags */ 44862306a36Sopenharmony_ci return -EINVAL; 44962306a36Sopenharmony_ci } 45062306a36Sopenharmony_ci 45162306a36Sopenharmony_ci return 0; 45262306a36Sopenharmony_ci} 45362306a36Sopenharmony_ci 45462306a36Sopenharmony_cistatic inline enum hrtimer_mode io_translate_timeout_mode(unsigned int flags) 45562306a36Sopenharmony_ci{ 45662306a36Sopenharmony_ci return (flags & IORING_TIMEOUT_ABS) ? HRTIMER_MODE_ABS 45762306a36Sopenharmony_ci : HRTIMER_MODE_REL; 45862306a36Sopenharmony_ci} 45962306a36Sopenharmony_ci 46062306a36Sopenharmony_ci/* 46162306a36Sopenharmony_ci * Remove or update an existing timeout command 46262306a36Sopenharmony_ci */ 46362306a36Sopenharmony_ciint io_timeout_remove(struct io_kiocb *req, unsigned int issue_flags) 46462306a36Sopenharmony_ci{ 46562306a36Sopenharmony_ci struct io_timeout_rem *tr = io_kiocb_to_cmd(req, struct io_timeout_rem); 46662306a36Sopenharmony_ci struct io_ring_ctx *ctx = req->ctx; 46762306a36Sopenharmony_ci int ret; 46862306a36Sopenharmony_ci 46962306a36Sopenharmony_ci if (!(tr->flags & IORING_TIMEOUT_UPDATE)) { 47062306a36Sopenharmony_ci struct io_cancel_data cd = { .ctx = ctx, .data = tr->addr, }; 47162306a36Sopenharmony_ci 47262306a36Sopenharmony_ci spin_lock(&ctx->completion_lock); 47362306a36Sopenharmony_ci ret = io_timeout_cancel(ctx, &cd); 47462306a36Sopenharmony_ci spin_unlock(&ctx->completion_lock); 47562306a36Sopenharmony_ci } else { 47662306a36Sopenharmony_ci enum hrtimer_mode mode = io_translate_timeout_mode(tr->flags); 47762306a36Sopenharmony_ci 47862306a36Sopenharmony_ci spin_lock_irq(&ctx->timeout_lock); 47962306a36Sopenharmony_ci if (tr->ltimeout) 48062306a36Sopenharmony_ci ret = io_linked_timeout_update(ctx, tr->addr, &tr->ts, mode); 48162306a36Sopenharmony_ci else 48262306a36Sopenharmony_ci ret = io_timeout_update(ctx, tr->addr, &tr->ts, mode); 48362306a36Sopenharmony_ci spin_unlock_irq(&ctx->timeout_lock); 48462306a36Sopenharmony_ci } 48562306a36Sopenharmony_ci 48662306a36Sopenharmony_ci if (ret < 0) 48762306a36Sopenharmony_ci req_set_fail(req); 48862306a36Sopenharmony_ci io_req_set_res(req, ret, 0); 48962306a36Sopenharmony_ci return IOU_OK; 49062306a36Sopenharmony_ci} 49162306a36Sopenharmony_ci 49262306a36Sopenharmony_cistatic int __io_timeout_prep(struct io_kiocb *req, 49362306a36Sopenharmony_ci const struct io_uring_sqe *sqe, 49462306a36Sopenharmony_ci bool is_timeout_link) 49562306a36Sopenharmony_ci{ 49662306a36Sopenharmony_ci struct io_timeout *timeout = io_kiocb_to_cmd(req, struct io_timeout); 49762306a36Sopenharmony_ci struct io_timeout_data *data; 49862306a36Sopenharmony_ci unsigned flags; 49962306a36Sopenharmony_ci u32 off = READ_ONCE(sqe->off); 50062306a36Sopenharmony_ci 50162306a36Sopenharmony_ci if (sqe->buf_index || sqe->len != 1 || sqe->splice_fd_in) 50262306a36Sopenharmony_ci return -EINVAL; 50362306a36Sopenharmony_ci if (off && is_timeout_link) 50462306a36Sopenharmony_ci return -EINVAL; 50562306a36Sopenharmony_ci flags = READ_ONCE(sqe->timeout_flags); 50662306a36Sopenharmony_ci if (flags & ~(IORING_TIMEOUT_ABS | IORING_TIMEOUT_CLOCK_MASK | 50762306a36Sopenharmony_ci IORING_TIMEOUT_ETIME_SUCCESS | 50862306a36Sopenharmony_ci IORING_TIMEOUT_MULTISHOT)) 50962306a36Sopenharmony_ci return -EINVAL; 51062306a36Sopenharmony_ci /* more than one clock specified is invalid, obviously */ 51162306a36Sopenharmony_ci if (hweight32(flags & IORING_TIMEOUT_CLOCK_MASK) > 1) 51262306a36Sopenharmony_ci return -EINVAL; 51362306a36Sopenharmony_ci /* multishot requests only make sense with rel values */ 51462306a36Sopenharmony_ci if (!(~flags & (IORING_TIMEOUT_MULTISHOT | IORING_TIMEOUT_ABS))) 51562306a36Sopenharmony_ci return -EINVAL; 51662306a36Sopenharmony_ci 51762306a36Sopenharmony_ci INIT_LIST_HEAD(&timeout->list); 51862306a36Sopenharmony_ci timeout->off = off; 51962306a36Sopenharmony_ci if (unlikely(off && !req->ctx->off_timeout_used)) 52062306a36Sopenharmony_ci req->ctx->off_timeout_used = true; 52162306a36Sopenharmony_ci /* 52262306a36Sopenharmony_ci * for multishot reqs w/ fixed nr of repeats, repeats tracks the 52362306a36Sopenharmony_ci * remaining nr 52462306a36Sopenharmony_ci */ 52562306a36Sopenharmony_ci timeout->repeats = 0; 52662306a36Sopenharmony_ci if ((flags & IORING_TIMEOUT_MULTISHOT) && off > 0) 52762306a36Sopenharmony_ci timeout->repeats = off; 52862306a36Sopenharmony_ci 52962306a36Sopenharmony_ci if (WARN_ON_ONCE(req_has_async_data(req))) 53062306a36Sopenharmony_ci return -EFAULT; 53162306a36Sopenharmony_ci if (io_alloc_async_data(req)) 53262306a36Sopenharmony_ci return -ENOMEM; 53362306a36Sopenharmony_ci 53462306a36Sopenharmony_ci data = req->async_data; 53562306a36Sopenharmony_ci data->req = req; 53662306a36Sopenharmony_ci data->flags = flags; 53762306a36Sopenharmony_ci 53862306a36Sopenharmony_ci if (get_timespec64(&data->ts, u64_to_user_ptr(sqe->addr))) 53962306a36Sopenharmony_ci return -EFAULT; 54062306a36Sopenharmony_ci 54162306a36Sopenharmony_ci if (data->ts.tv_sec < 0 || data->ts.tv_nsec < 0) 54262306a36Sopenharmony_ci return -EINVAL; 54362306a36Sopenharmony_ci 54462306a36Sopenharmony_ci INIT_LIST_HEAD(&timeout->list); 54562306a36Sopenharmony_ci data->mode = io_translate_timeout_mode(flags); 54662306a36Sopenharmony_ci hrtimer_init(&data->timer, io_timeout_get_clock(data), data->mode); 54762306a36Sopenharmony_ci 54862306a36Sopenharmony_ci if (is_timeout_link) { 54962306a36Sopenharmony_ci struct io_submit_link *link = &req->ctx->submit_state.link; 55062306a36Sopenharmony_ci 55162306a36Sopenharmony_ci if (!link->head) 55262306a36Sopenharmony_ci return -EINVAL; 55362306a36Sopenharmony_ci if (link->last->opcode == IORING_OP_LINK_TIMEOUT) 55462306a36Sopenharmony_ci return -EINVAL; 55562306a36Sopenharmony_ci timeout->head = link->last; 55662306a36Sopenharmony_ci link->last->flags |= REQ_F_ARM_LTIMEOUT; 55762306a36Sopenharmony_ci } 55862306a36Sopenharmony_ci return 0; 55962306a36Sopenharmony_ci} 56062306a36Sopenharmony_ci 56162306a36Sopenharmony_ciint io_timeout_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) 56262306a36Sopenharmony_ci{ 56362306a36Sopenharmony_ci return __io_timeout_prep(req, sqe, false); 56462306a36Sopenharmony_ci} 56562306a36Sopenharmony_ci 56662306a36Sopenharmony_ciint io_link_timeout_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) 56762306a36Sopenharmony_ci{ 56862306a36Sopenharmony_ci return __io_timeout_prep(req, sqe, true); 56962306a36Sopenharmony_ci} 57062306a36Sopenharmony_ci 57162306a36Sopenharmony_ciint io_timeout(struct io_kiocb *req, unsigned int issue_flags) 57262306a36Sopenharmony_ci{ 57362306a36Sopenharmony_ci struct io_timeout *timeout = io_kiocb_to_cmd(req, struct io_timeout); 57462306a36Sopenharmony_ci struct io_ring_ctx *ctx = req->ctx; 57562306a36Sopenharmony_ci struct io_timeout_data *data = req->async_data; 57662306a36Sopenharmony_ci struct list_head *entry; 57762306a36Sopenharmony_ci u32 tail, off = timeout->off; 57862306a36Sopenharmony_ci 57962306a36Sopenharmony_ci spin_lock_irq(&ctx->timeout_lock); 58062306a36Sopenharmony_ci 58162306a36Sopenharmony_ci /* 58262306a36Sopenharmony_ci * sqe->off holds how many events that need to occur for this 58362306a36Sopenharmony_ci * timeout event to be satisfied. If it isn't set, then this is 58462306a36Sopenharmony_ci * a pure timeout request, sequence isn't used. 58562306a36Sopenharmony_ci */ 58662306a36Sopenharmony_ci if (io_is_timeout_noseq(req)) { 58762306a36Sopenharmony_ci entry = ctx->timeout_list.prev; 58862306a36Sopenharmony_ci goto add; 58962306a36Sopenharmony_ci } 59062306a36Sopenharmony_ci 59162306a36Sopenharmony_ci tail = data_race(ctx->cached_cq_tail) - atomic_read(&ctx->cq_timeouts); 59262306a36Sopenharmony_ci timeout->target_seq = tail + off; 59362306a36Sopenharmony_ci 59462306a36Sopenharmony_ci /* Update the last seq here in case io_flush_timeouts() hasn't. 59562306a36Sopenharmony_ci * This is safe because ->completion_lock is held, and submissions 59662306a36Sopenharmony_ci * and completions are never mixed in the same ->completion_lock section. 59762306a36Sopenharmony_ci */ 59862306a36Sopenharmony_ci ctx->cq_last_tm_flush = tail; 59962306a36Sopenharmony_ci 60062306a36Sopenharmony_ci /* 60162306a36Sopenharmony_ci * Insertion sort, ensuring the first entry in the list is always 60262306a36Sopenharmony_ci * the one we need first. 60362306a36Sopenharmony_ci */ 60462306a36Sopenharmony_ci list_for_each_prev(entry, &ctx->timeout_list) { 60562306a36Sopenharmony_ci struct io_timeout *nextt = list_entry(entry, struct io_timeout, list); 60662306a36Sopenharmony_ci struct io_kiocb *nxt = cmd_to_io_kiocb(nextt); 60762306a36Sopenharmony_ci 60862306a36Sopenharmony_ci if (io_is_timeout_noseq(nxt)) 60962306a36Sopenharmony_ci continue; 61062306a36Sopenharmony_ci /* nxt.seq is behind @tail, otherwise would've been completed */ 61162306a36Sopenharmony_ci if (off >= nextt->target_seq - tail) 61262306a36Sopenharmony_ci break; 61362306a36Sopenharmony_ci } 61462306a36Sopenharmony_ciadd: 61562306a36Sopenharmony_ci list_add(&timeout->list, entry); 61662306a36Sopenharmony_ci data->timer.function = io_timeout_fn; 61762306a36Sopenharmony_ci hrtimer_start(&data->timer, timespec64_to_ktime(data->ts), data->mode); 61862306a36Sopenharmony_ci spin_unlock_irq(&ctx->timeout_lock); 61962306a36Sopenharmony_ci return IOU_ISSUE_SKIP_COMPLETE; 62062306a36Sopenharmony_ci} 62162306a36Sopenharmony_ci 62262306a36Sopenharmony_civoid io_queue_linked_timeout(struct io_kiocb *req) 62362306a36Sopenharmony_ci{ 62462306a36Sopenharmony_ci struct io_timeout *timeout = io_kiocb_to_cmd(req, struct io_timeout); 62562306a36Sopenharmony_ci struct io_ring_ctx *ctx = req->ctx; 62662306a36Sopenharmony_ci 62762306a36Sopenharmony_ci spin_lock_irq(&ctx->timeout_lock); 62862306a36Sopenharmony_ci /* 62962306a36Sopenharmony_ci * If the back reference is NULL, then our linked request finished 63062306a36Sopenharmony_ci * before we got a chance to setup the timer 63162306a36Sopenharmony_ci */ 63262306a36Sopenharmony_ci if (timeout->head) { 63362306a36Sopenharmony_ci struct io_timeout_data *data = req->async_data; 63462306a36Sopenharmony_ci 63562306a36Sopenharmony_ci data->timer.function = io_link_timeout_fn; 63662306a36Sopenharmony_ci hrtimer_start(&data->timer, timespec64_to_ktime(data->ts), 63762306a36Sopenharmony_ci data->mode); 63862306a36Sopenharmony_ci list_add_tail(&timeout->list, &ctx->ltimeout_list); 63962306a36Sopenharmony_ci } 64062306a36Sopenharmony_ci spin_unlock_irq(&ctx->timeout_lock); 64162306a36Sopenharmony_ci /* drop submission reference */ 64262306a36Sopenharmony_ci io_put_req(req); 64362306a36Sopenharmony_ci} 64462306a36Sopenharmony_ci 64562306a36Sopenharmony_cistatic bool io_match_task(struct io_kiocb *head, struct task_struct *task, 64662306a36Sopenharmony_ci bool cancel_all) 64762306a36Sopenharmony_ci __must_hold(&req->ctx->timeout_lock) 64862306a36Sopenharmony_ci{ 64962306a36Sopenharmony_ci struct io_kiocb *req; 65062306a36Sopenharmony_ci 65162306a36Sopenharmony_ci if (task && head->task != task) 65262306a36Sopenharmony_ci return false; 65362306a36Sopenharmony_ci if (cancel_all) 65462306a36Sopenharmony_ci return true; 65562306a36Sopenharmony_ci 65662306a36Sopenharmony_ci io_for_each_link(req, head) { 65762306a36Sopenharmony_ci if (req->flags & REQ_F_INFLIGHT) 65862306a36Sopenharmony_ci return true; 65962306a36Sopenharmony_ci } 66062306a36Sopenharmony_ci return false; 66162306a36Sopenharmony_ci} 66262306a36Sopenharmony_ci 66362306a36Sopenharmony_ci/* Returns true if we found and killed one or more timeouts */ 66462306a36Sopenharmony_ci__cold bool io_kill_timeouts(struct io_ring_ctx *ctx, struct task_struct *tsk, 66562306a36Sopenharmony_ci bool cancel_all) 66662306a36Sopenharmony_ci{ 66762306a36Sopenharmony_ci struct io_timeout *timeout, *tmp; 66862306a36Sopenharmony_ci int canceled = 0; 66962306a36Sopenharmony_ci 67062306a36Sopenharmony_ci /* 67162306a36Sopenharmony_ci * completion_lock is needed for io_match_task(). Take it before 67262306a36Sopenharmony_ci * timeout_lockfirst to keep locking ordering. 67362306a36Sopenharmony_ci */ 67462306a36Sopenharmony_ci spin_lock(&ctx->completion_lock); 67562306a36Sopenharmony_ci spin_lock_irq(&ctx->timeout_lock); 67662306a36Sopenharmony_ci list_for_each_entry_safe(timeout, tmp, &ctx->timeout_list, list) { 67762306a36Sopenharmony_ci struct io_kiocb *req = cmd_to_io_kiocb(timeout); 67862306a36Sopenharmony_ci 67962306a36Sopenharmony_ci if (io_match_task(req, tsk, cancel_all) && 68062306a36Sopenharmony_ci io_kill_timeout(req, -ECANCELED)) 68162306a36Sopenharmony_ci canceled++; 68262306a36Sopenharmony_ci } 68362306a36Sopenharmony_ci spin_unlock_irq(&ctx->timeout_lock); 68462306a36Sopenharmony_ci spin_unlock(&ctx->completion_lock); 68562306a36Sopenharmony_ci return canceled != 0; 68662306a36Sopenharmony_ci} 687