162306a36Sopenharmony_ci#ifndef IOU_CORE_H
262306a36Sopenharmony_ci#define IOU_CORE_H
362306a36Sopenharmony_ci
462306a36Sopenharmony_ci#include <linux/errno.h>
562306a36Sopenharmony_ci#include <linux/lockdep.h>
662306a36Sopenharmony_ci#include <linux/resume_user_mode.h>
762306a36Sopenharmony_ci#include <linux/kasan.h>
862306a36Sopenharmony_ci#include <linux/io_uring_types.h>
962306a36Sopenharmony_ci#include <uapi/linux/eventpoll.h>
1062306a36Sopenharmony_ci#include "io-wq.h"
1162306a36Sopenharmony_ci#include "slist.h"
1262306a36Sopenharmony_ci#include "filetable.h"
1362306a36Sopenharmony_ci
1462306a36Sopenharmony_ci#ifndef CREATE_TRACE_POINTS
1562306a36Sopenharmony_ci#include <trace/events/io_uring.h>
1662306a36Sopenharmony_ci#endif
1762306a36Sopenharmony_ci
1862306a36Sopenharmony_cienum {
1962306a36Sopenharmony_ci	/*
2062306a36Sopenharmony_ci	 * A hint to not wake right away but delay until there are enough of
2162306a36Sopenharmony_ci	 * tw's queued to match the number of CQEs the task is waiting for.
2262306a36Sopenharmony_ci	 *
2362306a36Sopenharmony_ci	 * Must not be used wirh requests generating more than one CQE.
2462306a36Sopenharmony_ci	 * It's also ignored unless IORING_SETUP_DEFER_TASKRUN is set.
2562306a36Sopenharmony_ci	 */
2662306a36Sopenharmony_ci	IOU_F_TWQ_LAZY_WAKE			= 1,
2762306a36Sopenharmony_ci};
2862306a36Sopenharmony_ci
2962306a36Sopenharmony_cienum {
3062306a36Sopenharmony_ci	IOU_OK			= 0,
3162306a36Sopenharmony_ci	IOU_ISSUE_SKIP_COMPLETE	= -EIOCBQUEUED,
3262306a36Sopenharmony_ci
3362306a36Sopenharmony_ci	/*
3462306a36Sopenharmony_ci	 * Requeue the task_work to restart operations on this request. The
3562306a36Sopenharmony_ci	 * actual value isn't important, should just be not an otherwise
3662306a36Sopenharmony_ci	 * valid error code, yet less than -MAX_ERRNO and valid internally.
3762306a36Sopenharmony_ci	 */
3862306a36Sopenharmony_ci	IOU_REQUEUE		= -3072,
3962306a36Sopenharmony_ci
4062306a36Sopenharmony_ci	/*
4162306a36Sopenharmony_ci	 * Intended only when both IO_URING_F_MULTISHOT is passed
4262306a36Sopenharmony_ci	 * to indicate to the poll runner that multishot should be
4362306a36Sopenharmony_ci	 * removed and the result is set on req->cqe.res.
4462306a36Sopenharmony_ci	 */
4562306a36Sopenharmony_ci	IOU_STOP_MULTISHOT	= -ECANCELED,
4662306a36Sopenharmony_ci};
4762306a36Sopenharmony_ci
4862306a36Sopenharmony_cibool io_cqe_cache_refill(struct io_ring_ctx *ctx, bool overflow);
4962306a36Sopenharmony_civoid io_req_cqe_overflow(struct io_kiocb *req);
5062306a36Sopenharmony_ciint io_run_task_work_sig(struct io_ring_ctx *ctx);
5162306a36Sopenharmony_civoid io_req_defer_failed(struct io_kiocb *req, s32 res);
5262306a36Sopenharmony_civoid io_req_complete_post(struct io_kiocb *req, unsigned issue_flags);
5362306a36Sopenharmony_cibool io_post_aux_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags);
5462306a36Sopenharmony_cibool io_fill_cqe_req_aux(struct io_kiocb *req, bool defer, s32 res, u32 cflags);
5562306a36Sopenharmony_civoid __io_commit_cqring_flush(struct io_ring_ctx *ctx);
5662306a36Sopenharmony_ci
5762306a36Sopenharmony_cistruct page **io_pin_pages(unsigned long ubuf, unsigned long len, int *npages);
5862306a36Sopenharmony_ci
5962306a36Sopenharmony_cistruct file *io_file_get_normal(struct io_kiocb *req, int fd);
6062306a36Sopenharmony_cistruct file *io_file_get_fixed(struct io_kiocb *req, int fd,
6162306a36Sopenharmony_ci			       unsigned issue_flags);
6262306a36Sopenharmony_ci
6362306a36Sopenharmony_civoid __io_req_task_work_add(struct io_kiocb *req, unsigned flags);
6462306a36Sopenharmony_cibool io_alloc_async_data(struct io_kiocb *req);
6562306a36Sopenharmony_civoid io_req_task_queue(struct io_kiocb *req);
6662306a36Sopenharmony_civoid io_queue_iowq(struct io_kiocb *req, struct io_tw_state *ts_dont_use);
6762306a36Sopenharmony_civoid io_req_task_complete(struct io_kiocb *req, struct io_tw_state *ts);
6862306a36Sopenharmony_civoid io_req_task_queue_fail(struct io_kiocb *req, int ret);
6962306a36Sopenharmony_civoid io_req_task_submit(struct io_kiocb *req, struct io_tw_state *ts);
7062306a36Sopenharmony_civoid tctx_task_work(struct callback_head *cb);
7162306a36Sopenharmony_ci__cold void io_uring_cancel_generic(bool cancel_all, struct io_sq_data *sqd);
7262306a36Sopenharmony_ciint io_uring_alloc_task_context(struct task_struct *task,
7362306a36Sopenharmony_ci				struct io_ring_ctx *ctx);
7462306a36Sopenharmony_ci
7562306a36Sopenharmony_ciint io_ring_add_registered_file(struct io_uring_task *tctx, struct file *file,
7662306a36Sopenharmony_ci				     int start, int end);
7762306a36Sopenharmony_ci
7862306a36Sopenharmony_ciint io_poll_issue(struct io_kiocb *req, struct io_tw_state *ts);
7962306a36Sopenharmony_ciint io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr);
8062306a36Sopenharmony_ciint io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin);
8162306a36Sopenharmony_civoid __io_submit_flush_completions(struct io_ring_ctx *ctx);
8262306a36Sopenharmony_ciint io_req_prep_async(struct io_kiocb *req);
8362306a36Sopenharmony_ci
8462306a36Sopenharmony_cistruct io_wq_work *io_wq_free_work(struct io_wq_work *work);
8562306a36Sopenharmony_civoid io_wq_submit_work(struct io_wq_work *work);
8662306a36Sopenharmony_ci
8762306a36Sopenharmony_civoid io_free_req(struct io_kiocb *req);
8862306a36Sopenharmony_civoid io_queue_next(struct io_kiocb *req);
8962306a36Sopenharmony_civoid io_task_refs_refill(struct io_uring_task *tctx);
9062306a36Sopenharmony_cibool __io_alloc_req_refill(struct io_ring_ctx *ctx);
9162306a36Sopenharmony_ci
9262306a36Sopenharmony_cibool io_match_task_safe(struct io_kiocb *head, struct task_struct *task,
9362306a36Sopenharmony_ci			bool cancel_all);
9462306a36Sopenharmony_ci
9562306a36Sopenharmony_civoid *io_mem_alloc(size_t size);
9662306a36Sopenharmony_civoid io_mem_free(void *ptr);
9762306a36Sopenharmony_ci
9862306a36Sopenharmony_ci#if defined(CONFIG_PROVE_LOCKING)
9962306a36Sopenharmony_cistatic inline void io_lockdep_assert_cq_locked(struct io_ring_ctx *ctx)
10062306a36Sopenharmony_ci{
10162306a36Sopenharmony_ci	lockdep_assert(in_task());
10262306a36Sopenharmony_ci
10362306a36Sopenharmony_ci	if (ctx->flags & IORING_SETUP_IOPOLL) {
10462306a36Sopenharmony_ci		lockdep_assert_held(&ctx->uring_lock);
10562306a36Sopenharmony_ci	} else if (!ctx->task_complete) {
10662306a36Sopenharmony_ci		lockdep_assert_held(&ctx->completion_lock);
10762306a36Sopenharmony_ci	} else if (ctx->submitter_task) {
10862306a36Sopenharmony_ci		/*
10962306a36Sopenharmony_ci		 * ->submitter_task may be NULL and we can still post a CQE,
11062306a36Sopenharmony_ci		 * if the ring has been setup with IORING_SETUP_R_DISABLED.
11162306a36Sopenharmony_ci		 * Not from an SQE, as those cannot be submitted, but via
11262306a36Sopenharmony_ci		 * updating tagged resources.
11362306a36Sopenharmony_ci		 */
11462306a36Sopenharmony_ci		if (ctx->submitter_task->flags & PF_EXITING)
11562306a36Sopenharmony_ci			lockdep_assert(current_work());
11662306a36Sopenharmony_ci		else
11762306a36Sopenharmony_ci			lockdep_assert(current == ctx->submitter_task);
11862306a36Sopenharmony_ci	}
11962306a36Sopenharmony_ci}
12062306a36Sopenharmony_ci#else
12162306a36Sopenharmony_cistatic inline void io_lockdep_assert_cq_locked(struct io_ring_ctx *ctx)
12262306a36Sopenharmony_ci{
12362306a36Sopenharmony_ci}
12462306a36Sopenharmony_ci#endif
12562306a36Sopenharmony_ci
12662306a36Sopenharmony_cistatic inline void io_req_task_work_add(struct io_kiocb *req)
12762306a36Sopenharmony_ci{
12862306a36Sopenharmony_ci	__io_req_task_work_add(req, 0);
12962306a36Sopenharmony_ci}
13062306a36Sopenharmony_ci
13162306a36Sopenharmony_ci#define io_for_each_link(pos, head) \
13262306a36Sopenharmony_ci	for (pos = (head); pos; pos = pos->link)
13362306a36Sopenharmony_ci
13462306a36Sopenharmony_cistatic inline bool io_get_cqe_overflow(struct io_ring_ctx *ctx,
13562306a36Sopenharmony_ci					struct io_uring_cqe **ret,
13662306a36Sopenharmony_ci					bool overflow)
13762306a36Sopenharmony_ci{
13862306a36Sopenharmony_ci	io_lockdep_assert_cq_locked(ctx);
13962306a36Sopenharmony_ci
14062306a36Sopenharmony_ci	if (unlikely(ctx->cqe_cached >= ctx->cqe_sentinel)) {
14162306a36Sopenharmony_ci		if (unlikely(!io_cqe_cache_refill(ctx, overflow)))
14262306a36Sopenharmony_ci			return false;
14362306a36Sopenharmony_ci	}
14462306a36Sopenharmony_ci	*ret = ctx->cqe_cached;
14562306a36Sopenharmony_ci	ctx->cached_cq_tail++;
14662306a36Sopenharmony_ci	ctx->cqe_cached++;
14762306a36Sopenharmony_ci	if (ctx->flags & IORING_SETUP_CQE32)
14862306a36Sopenharmony_ci		ctx->cqe_cached++;
14962306a36Sopenharmony_ci	return true;
15062306a36Sopenharmony_ci}
15162306a36Sopenharmony_ci
15262306a36Sopenharmony_cistatic inline bool io_get_cqe(struct io_ring_ctx *ctx, struct io_uring_cqe **ret)
15362306a36Sopenharmony_ci{
15462306a36Sopenharmony_ci	return io_get_cqe_overflow(ctx, ret, false);
15562306a36Sopenharmony_ci}
15662306a36Sopenharmony_ci
15762306a36Sopenharmony_cistatic __always_inline bool io_fill_cqe_req(struct io_ring_ctx *ctx,
15862306a36Sopenharmony_ci					    struct io_kiocb *req)
15962306a36Sopenharmony_ci{
16062306a36Sopenharmony_ci	struct io_uring_cqe *cqe;
16162306a36Sopenharmony_ci
16262306a36Sopenharmony_ci	/*
16362306a36Sopenharmony_ci	 * If we can't get a cq entry, userspace overflowed the
16462306a36Sopenharmony_ci	 * submission (by quite a lot). Increment the overflow count in
16562306a36Sopenharmony_ci	 * the ring.
16662306a36Sopenharmony_ci	 */
16762306a36Sopenharmony_ci	if (unlikely(!io_get_cqe(ctx, &cqe)))
16862306a36Sopenharmony_ci		return false;
16962306a36Sopenharmony_ci
17062306a36Sopenharmony_ci	if (trace_io_uring_complete_enabled())
17162306a36Sopenharmony_ci		trace_io_uring_complete(req->ctx, req, req->cqe.user_data,
17262306a36Sopenharmony_ci					req->cqe.res, req->cqe.flags,
17362306a36Sopenharmony_ci					req->big_cqe.extra1, req->big_cqe.extra2);
17462306a36Sopenharmony_ci
17562306a36Sopenharmony_ci	memcpy(cqe, &req->cqe, sizeof(*cqe));
17662306a36Sopenharmony_ci	if (ctx->flags & IORING_SETUP_CQE32) {
17762306a36Sopenharmony_ci		memcpy(cqe->big_cqe, &req->big_cqe, sizeof(*cqe));
17862306a36Sopenharmony_ci		memset(&req->big_cqe, 0, sizeof(req->big_cqe));
17962306a36Sopenharmony_ci	}
18062306a36Sopenharmony_ci	return true;
18162306a36Sopenharmony_ci}
18262306a36Sopenharmony_ci
18362306a36Sopenharmony_cistatic inline void req_set_fail(struct io_kiocb *req)
18462306a36Sopenharmony_ci{
18562306a36Sopenharmony_ci	req->flags |= REQ_F_FAIL;
18662306a36Sopenharmony_ci	if (req->flags & REQ_F_CQE_SKIP) {
18762306a36Sopenharmony_ci		req->flags &= ~REQ_F_CQE_SKIP;
18862306a36Sopenharmony_ci		req->flags |= REQ_F_SKIP_LINK_CQES;
18962306a36Sopenharmony_ci	}
19062306a36Sopenharmony_ci}
19162306a36Sopenharmony_ci
19262306a36Sopenharmony_cistatic inline void io_req_set_res(struct io_kiocb *req, s32 res, u32 cflags)
19362306a36Sopenharmony_ci{
19462306a36Sopenharmony_ci	req->cqe.res = res;
19562306a36Sopenharmony_ci	req->cqe.flags = cflags;
19662306a36Sopenharmony_ci}
19762306a36Sopenharmony_ci
19862306a36Sopenharmony_cistatic inline bool req_has_async_data(struct io_kiocb *req)
19962306a36Sopenharmony_ci{
20062306a36Sopenharmony_ci	return req->flags & REQ_F_ASYNC_DATA;
20162306a36Sopenharmony_ci}
20262306a36Sopenharmony_ci
20362306a36Sopenharmony_cistatic inline void io_put_file(struct io_kiocb *req)
20462306a36Sopenharmony_ci{
20562306a36Sopenharmony_ci	if (!(req->flags & REQ_F_FIXED_FILE) && req->file)
20662306a36Sopenharmony_ci		fput(req->file);
20762306a36Sopenharmony_ci}
20862306a36Sopenharmony_ci
20962306a36Sopenharmony_cistatic inline void io_ring_submit_unlock(struct io_ring_ctx *ctx,
21062306a36Sopenharmony_ci					 unsigned issue_flags)
21162306a36Sopenharmony_ci{
21262306a36Sopenharmony_ci	lockdep_assert_held(&ctx->uring_lock);
21362306a36Sopenharmony_ci	if (issue_flags & IO_URING_F_UNLOCKED)
21462306a36Sopenharmony_ci		mutex_unlock(&ctx->uring_lock);
21562306a36Sopenharmony_ci}
21662306a36Sopenharmony_ci
21762306a36Sopenharmony_cistatic inline void io_ring_submit_lock(struct io_ring_ctx *ctx,
21862306a36Sopenharmony_ci				       unsigned issue_flags)
21962306a36Sopenharmony_ci{
22062306a36Sopenharmony_ci	/*
22162306a36Sopenharmony_ci	 * "Normal" inline submissions always hold the uring_lock, since we
22262306a36Sopenharmony_ci	 * grab it from the system call. Same is true for the SQPOLL offload.
22362306a36Sopenharmony_ci	 * The only exception is when we've detached the request and issue it
22462306a36Sopenharmony_ci	 * from an async worker thread, grab the lock for that case.
22562306a36Sopenharmony_ci	 */
22662306a36Sopenharmony_ci	if (issue_flags & IO_URING_F_UNLOCKED)
22762306a36Sopenharmony_ci		mutex_lock(&ctx->uring_lock);
22862306a36Sopenharmony_ci	lockdep_assert_held(&ctx->uring_lock);
22962306a36Sopenharmony_ci}
23062306a36Sopenharmony_ci
23162306a36Sopenharmony_cistatic inline void io_commit_cqring(struct io_ring_ctx *ctx)
23262306a36Sopenharmony_ci{
23362306a36Sopenharmony_ci	/* order cqe stores with ring update */
23462306a36Sopenharmony_ci	smp_store_release(&ctx->rings->cq.tail, ctx->cached_cq_tail);
23562306a36Sopenharmony_ci}
23662306a36Sopenharmony_ci
23762306a36Sopenharmony_cistatic inline void io_poll_wq_wake(struct io_ring_ctx *ctx)
23862306a36Sopenharmony_ci{
23962306a36Sopenharmony_ci	if (wq_has_sleeper(&ctx->poll_wq))
24062306a36Sopenharmony_ci		__wake_up(&ctx->poll_wq, TASK_NORMAL, 0,
24162306a36Sopenharmony_ci				poll_to_key(EPOLL_URING_WAKE | EPOLLIN));
24262306a36Sopenharmony_ci}
24362306a36Sopenharmony_ci
24462306a36Sopenharmony_cistatic inline void io_cqring_wake(struct io_ring_ctx *ctx)
24562306a36Sopenharmony_ci{
24662306a36Sopenharmony_ci	/*
24762306a36Sopenharmony_ci	 * Trigger waitqueue handler on all waiters on our waitqueue. This
24862306a36Sopenharmony_ci	 * won't necessarily wake up all the tasks, io_should_wake() will make
24962306a36Sopenharmony_ci	 * that decision.
25062306a36Sopenharmony_ci	 *
25162306a36Sopenharmony_ci	 * Pass in EPOLLIN|EPOLL_URING_WAKE as the poll wakeup key. The latter
25262306a36Sopenharmony_ci	 * set in the mask so that if we recurse back into our own poll
25362306a36Sopenharmony_ci	 * waitqueue handlers, we know we have a dependency between eventfd or
25462306a36Sopenharmony_ci	 * epoll and should terminate multishot poll at that point.
25562306a36Sopenharmony_ci	 */
25662306a36Sopenharmony_ci	if (wq_has_sleeper(&ctx->cq_wait))
25762306a36Sopenharmony_ci		__wake_up(&ctx->cq_wait, TASK_NORMAL, 0,
25862306a36Sopenharmony_ci				poll_to_key(EPOLL_URING_WAKE | EPOLLIN));
25962306a36Sopenharmony_ci}
26062306a36Sopenharmony_ci
26162306a36Sopenharmony_cistatic inline bool io_sqring_full(struct io_ring_ctx *ctx)
26262306a36Sopenharmony_ci{
26362306a36Sopenharmony_ci	struct io_rings *r = ctx->rings;
26462306a36Sopenharmony_ci
26562306a36Sopenharmony_ci	return READ_ONCE(r->sq.tail) - ctx->cached_sq_head == ctx->sq_entries;
26662306a36Sopenharmony_ci}
26762306a36Sopenharmony_ci
26862306a36Sopenharmony_cistatic inline unsigned int io_sqring_entries(struct io_ring_ctx *ctx)
26962306a36Sopenharmony_ci{
27062306a36Sopenharmony_ci	struct io_rings *rings = ctx->rings;
27162306a36Sopenharmony_ci	unsigned int entries;
27262306a36Sopenharmony_ci
27362306a36Sopenharmony_ci	/* make sure SQ entry isn't read before tail */
27462306a36Sopenharmony_ci	entries = smp_load_acquire(&rings->sq.tail) - ctx->cached_sq_head;
27562306a36Sopenharmony_ci	return min(entries, ctx->sq_entries);
27662306a36Sopenharmony_ci}
27762306a36Sopenharmony_ci
27862306a36Sopenharmony_cistatic inline int io_run_task_work(void)
27962306a36Sopenharmony_ci{
28062306a36Sopenharmony_ci	/*
28162306a36Sopenharmony_ci	 * Always check-and-clear the task_work notification signal. With how
28262306a36Sopenharmony_ci	 * signaling works for task_work, we can find it set with nothing to
28362306a36Sopenharmony_ci	 * run. We need to clear it for that case, like get_signal() does.
28462306a36Sopenharmony_ci	 */
28562306a36Sopenharmony_ci	if (test_thread_flag(TIF_NOTIFY_SIGNAL))
28662306a36Sopenharmony_ci		clear_notify_signal();
28762306a36Sopenharmony_ci	/*
28862306a36Sopenharmony_ci	 * PF_IO_WORKER never returns to userspace, so check here if we have
28962306a36Sopenharmony_ci	 * notify work that needs processing.
29062306a36Sopenharmony_ci	 */
29162306a36Sopenharmony_ci	if (current->flags & PF_IO_WORKER &&
29262306a36Sopenharmony_ci	    test_thread_flag(TIF_NOTIFY_RESUME)) {
29362306a36Sopenharmony_ci		__set_current_state(TASK_RUNNING);
29462306a36Sopenharmony_ci		resume_user_mode_work(NULL);
29562306a36Sopenharmony_ci	}
29662306a36Sopenharmony_ci	if (task_work_pending(current)) {
29762306a36Sopenharmony_ci		__set_current_state(TASK_RUNNING);
29862306a36Sopenharmony_ci		task_work_run();
29962306a36Sopenharmony_ci		return 1;
30062306a36Sopenharmony_ci	}
30162306a36Sopenharmony_ci
30262306a36Sopenharmony_ci	return 0;
30362306a36Sopenharmony_ci}
30462306a36Sopenharmony_ci
30562306a36Sopenharmony_cistatic inline bool io_task_work_pending(struct io_ring_ctx *ctx)
30662306a36Sopenharmony_ci{
30762306a36Sopenharmony_ci	return task_work_pending(current) || !wq_list_empty(&ctx->work_llist);
30862306a36Sopenharmony_ci}
30962306a36Sopenharmony_ci
31062306a36Sopenharmony_cistatic inline void io_tw_lock(struct io_ring_ctx *ctx, struct io_tw_state *ts)
31162306a36Sopenharmony_ci{
31262306a36Sopenharmony_ci	if (!ts->locked) {
31362306a36Sopenharmony_ci		mutex_lock(&ctx->uring_lock);
31462306a36Sopenharmony_ci		ts->locked = true;
31562306a36Sopenharmony_ci	}
31662306a36Sopenharmony_ci}
31762306a36Sopenharmony_ci
31862306a36Sopenharmony_ci/*
31962306a36Sopenharmony_ci * Don't complete immediately but use deferred completion infrastructure.
32062306a36Sopenharmony_ci * Protected by ->uring_lock and can only be used either with
32162306a36Sopenharmony_ci * IO_URING_F_COMPLETE_DEFER or inside a tw handler holding the mutex.
32262306a36Sopenharmony_ci */
32362306a36Sopenharmony_cistatic inline void io_req_complete_defer(struct io_kiocb *req)
32462306a36Sopenharmony_ci	__must_hold(&req->ctx->uring_lock)
32562306a36Sopenharmony_ci{
32662306a36Sopenharmony_ci	struct io_submit_state *state = &req->ctx->submit_state;
32762306a36Sopenharmony_ci
32862306a36Sopenharmony_ci	lockdep_assert_held(&req->ctx->uring_lock);
32962306a36Sopenharmony_ci
33062306a36Sopenharmony_ci	wq_list_add_tail(&req->comp_list, &state->compl_reqs);
33162306a36Sopenharmony_ci}
33262306a36Sopenharmony_ci
33362306a36Sopenharmony_cistatic inline void io_commit_cqring_flush(struct io_ring_ctx *ctx)
33462306a36Sopenharmony_ci{
33562306a36Sopenharmony_ci	if (unlikely(ctx->off_timeout_used || ctx->drain_active ||
33662306a36Sopenharmony_ci		     ctx->has_evfd || ctx->poll_activated))
33762306a36Sopenharmony_ci		__io_commit_cqring_flush(ctx);
33862306a36Sopenharmony_ci}
33962306a36Sopenharmony_ci
34062306a36Sopenharmony_cistatic inline void io_get_task_refs(int nr)
34162306a36Sopenharmony_ci{
34262306a36Sopenharmony_ci	struct io_uring_task *tctx = current->io_uring;
34362306a36Sopenharmony_ci
34462306a36Sopenharmony_ci	tctx->cached_refs -= nr;
34562306a36Sopenharmony_ci	if (unlikely(tctx->cached_refs < 0))
34662306a36Sopenharmony_ci		io_task_refs_refill(tctx);
34762306a36Sopenharmony_ci}
34862306a36Sopenharmony_ci
34962306a36Sopenharmony_cistatic inline bool io_req_cache_empty(struct io_ring_ctx *ctx)
35062306a36Sopenharmony_ci{
35162306a36Sopenharmony_ci	return !ctx->submit_state.free_list.next;
35262306a36Sopenharmony_ci}
35362306a36Sopenharmony_ci
35462306a36Sopenharmony_ciextern struct kmem_cache *req_cachep;
35562306a36Sopenharmony_ci
35662306a36Sopenharmony_cistatic inline struct io_kiocb *io_extract_req(struct io_ring_ctx *ctx)
35762306a36Sopenharmony_ci{
35862306a36Sopenharmony_ci	struct io_kiocb *req;
35962306a36Sopenharmony_ci
36062306a36Sopenharmony_ci	req = container_of(ctx->submit_state.free_list.next, struct io_kiocb, comp_list);
36162306a36Sopenharmony_ci	wq_stack_extract(&ctx->submit_state.free_list);
36262306a36Sopenharmony_ci	return req;
36362306a36Sopenharmony_ci}
36462306a36Sopenharmony_ci
36562306a36Sopenharmony_cistatic inline bool io_alloc_req(struct io_ring_ctx *ctx, struct io_kiocb **req)
36662306a36Sopenharmony_ci{
36762306a36Sopenharmony_ci	if (unlikely(io_req_cache_empty(ctx))) {
36862306a36Sopenharmony_ci		if (!__io_alloc_req_refill(ctx))
36962306a36Sopenharmony_ci			return false;
37062306a36Sopenharmony_ci	}
37162306a36Sopenharmony_ci	*req = io_extract_req(ctx);
37262306a36Sopenharmony_ci	return true;
37362306a36Sopenharmony_ci}
37462306a36Sopenharmony_ci
37562306a36Sopenharmony_cistatic inline bool io_allowed_defer_tw_run(struct io_ring_ctx *ctx)
37662306a36Sopenharmony_ci{
37762306a36Sopenharmony_ci	return likely(ctx->submitter_task == current);
37862306a36Sopenharmony_ci}
37962306a36Sopenharmony_ci
38062306a36Sopenharmony_cistatic inline bool io_allowed_run_tw(struct io_ring_ctx *ctx)
38162306a36Sopenharmony_ci{
38262306a36Sopenharmony_ci	return likely(!(ctx->flags & IORING_SETUP_DEFER_TASKRUN) ||
38362306a36Sopenharmony_ci		      ctx->submitter_task == current);
38462306a36Sopenharmony_ci}
38562306a36Sopenharmony_ci
38662306a36Sopenharmony_cistatic inline void io_req_queue_tw_complete(struct io_kiocb *req, s32 res)
38762306a36Sopenharmony_ci{
38862306a36Sopenharmony_ci	io_req_set_res(req, res, 0);
38962306a36Sopenharmony_ci	req->io_task_work.func = io_req_task_complete;
39062306a36Sopenharmony_ci	io_req_task_work_add(req);
39162306a36Sopenharmony_ci}
39262306a36Sopenharmony_ci
39362306a36Sopenharmony_ci/*
39462306a36Sopenharmony_ci * IORING_SETUP_SQE128 contexts allocate twice the normal SQE size for each
39562306a36Sopenharmony_ci * slot.
39662306a36Sopenharmony_ci */
39762306a36Sopenharmony_cistatic inline size_t uring_sqe_size(struct io_ring_ctx *ctx)
39862306a36Sopenharmony_ci{
39962306a36Sopenharmony_ci	if (ctx->flags & IORING_SETUP_SQE128)
40062306a36Sopenharmony_ci		return 2 * sizeof(struct io_uring_sqe);
40162306a36Sopenharmony_ci	return sizeof(struct io_uring_sqe);
40262306a36Sopenharmony_ci}
40362306a36Sopenharmony_ci#endif
404