xref: /kernel/linux/linux-6.6/io_uring/uring_cmd.c (revision 62306a36)
1// SPDX-License-Identifier: GPL-2.0
2#include <linux/kernel.h>
3#include <linux/errno.h>
4#include <linux/file.h>
5#include <linux/io_uring.h>
6#include <linux/security.h>
7#include <linux/nospec.h>
8
9#include <uapi/linux/io_uring.h>
10#include <asm/ioctls.h>
11
12#include "io_uring.h"
13#include "rsrc.h"
14#include "uring_cmd.h"
15
16static void io_uring_cmd_work(struct io_kiocb *req, struct io_tw_state *ts)
17{
18	struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd);
19	unsigned issue_flags = ts->locked ? 0 : IO_URING_F_UNLOCKED;
20
21	ioucmd->task_work_cb(ioucmd, issue_flags);
22}
23
24void __io_uring_cmd_do_in_task(struct io_uring_cmd *ioucmd,
25			void (*task_work_cb)(struct io_uring_cmd *, unsigned),
26			unsigned flags)
27{
28	struct io_kiocb *req = cmd_to_io_kiocb(ioucmd);
29
30	ioucmd->task_work_cb = task_work_cb;
31	req->io_task_work.func = io_uring_cmd_work;
32	__io_req_task_work_add(req, flags);
33}
34EXPORT_SYMBOL_GPL(__io_uring_cmd_do_in_task);
35
36void io_uring_cmd_do_in_task_lazy(struct io_uring_cmd *ioucmd,
37			void (*task_work_cb)(struct io_uring_cmd *, unsigned))
38{
39	__io_uring_cmd_do_in_task(ioucmd, task_work_cb, IOU_F_TWQ_LAZY_WAKE);
40}
41EXPORT_SYMBOL_GPL(io_uring_cmd_do_in_task_lazy);
42
43static inline void io_req_set_cqe32_extra(struct io_kiocb *req,
44					  u64 extra1, u64 extra2)
45{
46	req->big_cqe.extra1 = extra1;
47	req->big_cqe.extra2 = extra2;
48}
49
50/*
51 * Called by consumers of io_uring_cmd, if they originally returned
52 * -EIOCBQUEUED upon receiving the command.
53 */
54void io_uring_cmd_done(struct io_uring_cmd *ioucmd, ssize_t ret, ssize_t res2,
55		       unsigned issue_flags)
56{
57	struct io_kiocb *req = cmd_to_io_kiocb(ioucmd);
58
59	if (ret < 0)
60		req_set_fail(req);
61
62	io_req_set_res(req, ret, 0);
63	if (req->ctx->flags & IORING_SETUP_CQE32)
64		io_req_set_cqe32_extra(req, res2, 0);
65	if (req->ctx->flags & IORING_SETUP_IOPOLL) {
66		/* order with io_iopoll_req_issued() checking ->iopoll_complete */
67		smp_store_release(&req->iopoll_completed, 1);
68	} else {
69		struct io_tw_state ts = {
70			.locked = !(issue_flags & IO_URING_F_UNLOCKED),
71		};
72		io_req_task_complete(req, &ts);
73	}
74}
75EXPORT_SYMBOL_GPL(io_uring_cmd_done);
76
77int io_uring_cmd_prep_async(struct io_kiocb *req)
78{
79	struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd);
80
81	memcpy(req->async_data, ioucmd->sqe, uring_sqe_size(req->ctx));
82	ioucmd->sqe = req->async_data;
83	return 0;
84}
85
86int io_uring_cmd_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
87{
88	struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd);
89
90	if (sqe->__pad1)
91		return -EINVAL;
92
93	ioucmd->flags = READ_ONCE(sqe->uring_cmd_flags);
94	if (ioucmd->flags & ~IORING_URING_CMD_FIXED)
95		return -EINVAL;
96
97	if (ioucmd->flags & IORING_URING_CMD_FIXED) {
98		struct io_ring_ctx *ctx = req->ctx;
99		u16 index;
100
101		req->buf_index = READ_ONCE(sqe->buf_index);
102		if (unlikely(req->buf_index >= ctx->nr_user_bufs))
103			return -EFAULT;
104		index = array_index_nospec(req->buf_index, ctx->nr_user_bufs);
105		req->imu = ctx->user_bufs[index];
106		io_req_set_rsrc_node(req, ctx, 0);
107	}
108	ioucmd->sqe = sqe;
109	ioucmd->cmd_op = READ_ONCE(sqe->cmd_op);
110	return 0;
111}
112
113int io_uring_cmd(struct io_kiocb *req, unsigned int issue_flags)
114{
115	struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd);
116	struct io_ring_ctx *ctx = req->ctx;
117	struct file *file = req->file;
118	int ret;
119
120	if (!file->f_op->uring_cmd)
121		return -EOPNOTSUPP;
122
123	ret = security_uring_cmd(ioucmd);
124	if (ret)
125		return ret;
126
127	if (ctx->flags & IORING_SETUP_SQE128)
128		issue_flags |= IO_URING_F_SQE128;
129	if (ctx->flags & IORING_SETUP_CQE32)
130		issue_flags |= IO_URING_F_CQE32;
131	if (ctx->flags & IORING_SETUP_IOPOLL) {
132		if (!file->f_op->uring_cmd_iopoll)
133			return -EOPNOTSUPP;
134		issue_flags |= IO_URING_F_IOPOLL;
135		req->iopoll_completed = 0;
136		WRITE_ONCE(ioucmd->cookie, NULL);
137	}
138
139	ret = file->f_op->uring_cmd(ioucmd, issue_flags);
140	if (ret == -EAGAIN) {
141		if (!req_has_async_data(req)) {
142			if (io_alloc_async_data(req))
143				return -ENOMEM;
144			io_uring_cmd_prep_async(req);
145		}
146		return -EAGAIN;
147	}
148
149	if (ret != -EIOCBQUEUED) {
150		if (ret < 0)
151			req_set_fail(req);
152		io_req_set_res(req, ret, 0);
153		return ret;
154	}
155
156	return IOU_ISSUE_SKIP_COMPLETE;
157}
158
159int io_uring_cmd_import_fixed(u64 ubuf, unsigned long len, int rw,
160			      struct iov_iter *iter, void *ioucmd)
161{
162	struct io_kiocb *req = cmd_to_io_kiocb(ioucmd);
163
164	return io_import_fixed(rw, iter, req->imu, ubuf, len);
165}
166EXPORT_SYMBOL_GPL(io_uring_cmd_import_fixed);
167
168int io_uring_cmd_sock(struct io_uring_cmd *cmd, unsigned int issue_flags)
169{
170	struct socket *sock = cmd->file->private_data;
171	struct sock *sk = sock->sk;
172	struct proto *prot = READ_ONCE(sk->sk_prot);
173	int ret, arg = 0;
174
175	if (!prot || !prot->ioctl)
176		return -EOPNOTSUPP;
177
178	switch (cmd->sqe->cmd_op) {
179	case SOCKET_URING_OP_SIOCINQ:
180		ret = prot->ioctl(sk, SIOCINQ, &arg);
181		if (ret)
182			return ret;
183		return arg;
184	case SOCKET_URING_OP_SIOCOUTQ:
185		ret = prot->ioctl(sk, SIOCOUTQ, &arg);
186		if (ret)
187			return ret;
188		return arg;
189	default:
190		return -EOPNOTSUPP;
191	}
192}
193EXPORT_SYMBOL_GPL(io_uring_cmd_sock);
194