xref: /kernel/linux/linux-5.10/drivers/vhost/vhost.h (revision 8c2ecf20)
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _VHOST_H
3#define _VHOST_H
4
5#include <linux/eventfd.h>
6#include <linux/vhost.h>
7#include <linux/mm.h>
8#include <linux/mutex.h>
9#include <linux/poll.h>
10#include <linux/file.h>
11#include <linux/uio.h>
12#include <linux/virtio_config.h>
13#include <linux/virtio_ring.h>
14#include <linux/atomic.h>
15#include <linux/vhost_iotlb.h>
16#include <linux/irqbypass.h>
17
18struct vhost_work;
19typedef void (*vhost_work_fn_t)(struct vhost_work *work);
20
21#define VHOST_WORK_QUEUED 1
22struct vhost_work {
23	struct llist_node	  node;
24	vhost_work_fn_t		  fn;
25	unsigned long		  flags;
26};
27
28/* Poll a file (eventfd or socket) */
29/* Note: there's nothing vhost specific about this structure. */
30struct vhost_poll {
31	poll_table                table;
32	wait_queue_head_t        *wqh;
33	wait_queue_entry_t              wait;
34	struct vhost_work	  work;
35	__poll_t		  mask;
36	struct vhost_dev	 *dev;
37};
38
39void vhost_work_init(struct vhost_work *work, vhost_work_fn_t fn);
40void vhost_work_queue(struct vhost_dev *dev, struct vhost_work *work);
41bool vhost_has_work(struct vhost_dev *dev);
42
43void vhost_poll_init(struct vhost_poll *poll, vhost_work_fn_t fn,
44		     __poll_t mask, struct vhost_dev *dev);
45int vhost_poll_start(struct vhost_poll *poll, struct file *file);
46void vhost_poll_stop(struct vhost_poll *poll);
47void vhost_poll_flush(struct vhost_poll *poll);
48void vhost_poll_queue(struct vhost_poll *poll);
49void vhost_work_flush(struct vhost_dev *dev, struct vhost_work *work);
50long vhost_vring_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *argp);
51
52struct vhost_log {
53	u64 addr;
54	u64 len;
55};
56
57enum vhost_uaddr_type {
58	VHOST_ADDR_DESC = 0,
59	VHOST_ADDR_AVAIL = 1,
60	VHOST_ADDR_USED = 2,
61	VHOST_NUM_ADDRS = 3,
62};
63
64struct vhost_vring_call {
65	struct eventfd_ctx *ctx;
66	struct irq_bypass_producer producer;
67};
68
69/* The virtqueue structure describes a queue attached to a device. */
70struct vhost_virtqueue {
71	struct vhost_dev *dev;
72
73	/* The actual ring of buffers. */
74	struct mutex mutex;
75	unsigned int num;
76	vring_desc_t __user *desc;
77	vring_avail_t __user *avail;
78	vring_used_t __user *used;
79	const struct vhost_iotlb_map *meta_iotlb[VHOST_NUM_ADDRS];
80	struct file *kick;
81	struct vhost_vring_call call_ctx;
82	struct eventfd_ctx *error_ctx;
83	struct eventfd_ctx *log_ctx;
84
85	struct vhost_poll poll;
86
87	/* The routine to call when the Guest pings us, or timeout. */
88	vhost_work_fn_t handle_kick;
89
90	/* Last available index we saw.
91	 * Values are limited to 0x7fff, and the high bit is used as
92	 * a wrap counter when using VIRTIO_F_RING_PACKED. */
93	u16 last_avail_idx;
94
95	/* Caches available index value from user. */
96	u16 avail_idx;
97
98	/* Last index we used.
99	 * Values are limited to 0x7fff, and the high bit is used as
100	 * a wrap counter when using VIRTIO_F_RING_PACKED. */
101	u16 last_used_idx;
102
103	/* Used flags */
104	u16 used_flags;
105
106	/* Last used index value we have signalled on */
107	u16 signalled_used;
108
109	/* Last used index value we have signalled on */
110	bool signalled_used_valid;
111
112	/* Log writes to used structure. */
113	bool log_used;
114	u64 log_addr;
115
116	struct iovec iov[UIO_MAXIOV];
117	struct iovec iotlb_iov[64];
118	struct iovec *indirect;
119	struct vring_used_elem *heads;
120	/* Protected by virtqueue mutex. */
121	struct vhost_iotlb *umem;
122	struct vhost_iotlb *iotlb;
123	void *private_data;
124	u64 acked_features;
125	u64 acked_backend_features;
126	/* Log write descriptors */
127	void __user *log_base;
128	struct vhost_log *log;
129	struct iovec log_iov[64];
130
131	/* Ring endianness. Defaults to legacy native endianness.
132	 * Set to true when starting a modern virtio device. */
133	bool is_le;
134#ifdef CONFIG_VHOST_CROSS_ENDIAN_LEGACY
135	/* Ring endianness requested by userspace for cross-endian support. */
136	bool user_be;
137#endif
138	u32 busyloop_timeout;
139};
140
141struct vhost_msg_node {
142  union {
143	  struct vhost_msg msg;
144	  struct vhost_msg_v2 msg_v2;
145  };
146  struct vhost_virtqueue *vq;
147  struct list_head node;
148};
149
150struct vhost_dev {
151	struct mm_struct *mm;
152	struct mutex mutex;
153	struct vhost_virtqueue **vqs;
154	int nvqs;
155	struct eventfd_ctx *log_ctx;
156	struct llist_head work_list;
157	struct task_struct *worker;
158	struct vhost_iotlb *umem;
159	struct vhost_iotlb *iotlb;
160	spinlock_t iotlb_lock;
161	struct list_head read_list;
162	struct list_head pending_list;
163	wait_queue_head_t wait;
164	int iov_limit;
165	int weight;
166	int byte_weight;
167	u64 kcov_handle;
168	bool use_worker;
169	int (*msg_handler)(struct vhost_dev *dev,
170			   struct vhost_iotlb_msg *msg);
171};
172
173bool vhost_exceeds_weight(struct vhost_virtqueue *vq, int pkts, int total_len);
174void vhost_dev_init(struct vhost_dev *, struct vhost_virtqueue **vqs,
175		    int nvqs, int iov_limit, int weight, int byte_weight,
176		    bool use_worker,
177		    int (*msg_handler)(struct vhost_dev *dev,
178				       struct vhost_iotlb_msg *msg));
179long vhost_dev_set_owner(struct vhost_dev *dev);
180bool vhost_dev_has_owner(struct vhost_dev *dev);
181long vhost_dev_check_owner(struct vhost_dev *);
182struct vhost_iotlb *vhost_dev_reset_owner_prepare(void);
183void vhost_dev_reset_owner(struct vhost_dev *dev, struct vhost_iotlb *iotlb);
184void vhost_dev_cleanup(struct vhost_dev *);
185void vhost_dev_stop(struct vhost_dev *);
186long vhost_dev_ioctl(struct vhost_dev *, unsigned int ioctl, void __user *argp);
187long vhost_vring_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *argp);
188bool vhost_vq_access_ok(struct vhost_virtqueue *vq);
189bool vhost_log_access_ok(struct vhost_dev *);
190void vhost_clear_msg(struct vhost_dev *dev);
191
192int vhost_get_vq_desc(struct vhost_virtqueue *,
193		      struct iovec iov[], unsigned int iov_count,
194		      unsigned int *out_num, unsigned int *in_num,
195		      struct vhost_log *log, unsigned int *log_num);
196void vhost_discard_vq_desc(struct vhost_virtqueue *, int n);
197
198bool vhost_vq_is_setup(struct vhost_virtqueue *vq);
199int vhost_vq_init_access(struct vhost_virtqueue *);
200int vhost_add_used(struct vhost_virtqueue *, unsigned int head, int len);
201int vhost_add_used_n(struct vhost_virtqueue *, struct vring_used_elem *heads,
202		     unsigned count);
203void vhost_add_used_and_signal(struct vhost_dev *, struct vhost_virtqueue *,
204			       unsigned int id, int len);
205void vhost_add_used_and_signal_n(struct vhost_dev *, struct vhost_virtqueue *,
206			       struct vring_used_elem *heads, unsigned count);
207void vhost_signal(struct vhost_dev *, struct vhost_virtqueue *);
208void vhost_disable_notify(struct vhost_dev *, struct vhost_virtqueue *);
209bool vhost_vq_avail_empty(struct vhost_dev *, struct vhost_virtqueue *);
210bool vhost_enable_notify(struct vhost_dev *, struct vhost_virtqueue *);
211
212int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log,
213		    unsigned int log_num, u64 len,
214		    struct iovec *iov, int count);
215int vq_meta_prefetch(struct vhost_virtqueue *vq);
216
217struct vhost_msg_node *vhost_new_msg(struct vhost_virtqueue *vq, int type);
218void vhost_enqueue_msg(struct vhost_dev *dev,
219		       struct list_head *head,
220		       struct vhost_msg_node *node);
221struct vhost_msg_node *vhost_dequeue_msg(struct vhost_dev *dev,
222					 struct list_head *head);
223void vhost_set_backend_features(struct vhost_dev *dev, u64 features);
224
225__poll_t vhost_chr_poll(struct file *file, struct vhost_dev *dev,
226			    poll_table *wait);
227ssize_t vhost_chr_read_iter(struct vhost_dev *dev, struct iov_iter *to,
228			    int noblock);
229ssize_t vhost_chr_write_iter(struct vhost_dev *dev,
230			     struct iov_iter *from);
231int vhost_init_device_iotlb(struct vhost_dev *d, bool enabled);
232
233void vhost_iotlb_map_free(struct vhost_iotlb *iotlb,
234			  struct vhost_iotlb_map *map);
235
236#define vq_err(vq, fmt, ...) do {                                  \
237		pr_debug(pr_fmt(fmt), ##__VA_ARGS__);       \
238		if ((vq)->error_ctx)                               \
239				eventfd_signal((vq)->error_ctx, 1);\
240	} while (0)
241
242enum {
243	VHOST_FEATURES = (1ULL << VIRTIO_F_NOTIFY_ON_EMPTY) |
244			 (1ULL << VIRTIO_RING_F_INDIRECT_DESC) |
245			 (1ULL << VIRTIO_RING_F_EVENT_IDX) |
246			 (1ULL << VHOST_F_LOG_ALL) |
247			 (1ULL << VIRTIO_F_ANY_LAYOUT) |
248			 (1ULL << VIRTIO_F_VERSION_1)
249};
250
251/**
252 * vhost_vq_set_backend - Set backend.
253 *
254 * @vq            Virtqueue.
255 * @private_data  The private data.
256 *
257 * Context: Need to call with vq->mutex acquired.
258 */
259static inline void vhost_vq_set_backend(struct vhost_virtqueue *vq,
260					void *private_data)
261{
262	vq->private_data = private_data;
263}
264
265/**
266 * vhost_vq_get_backend - Get backend.
267 *
268 * @vq            Virtqueue.
269 *
270 * Context: Need to call with vq->mutex acquired.
271 * Return: Private data previously set with vhost_vq_set_backend.
272 */
273static inline void *vhost_vq_get_backend(struct vhost_virtqueue *vq)
274{
275	return vq->private_data;
276}
277
278static inline bool vhost_has_feature(struct vhost_virtqueue *vq, int bit)
279{
280	return vq->acked_features & (1ULL << bit);
281}
282
283static inline bool vhost_backend_has_feature(struct vhost_virtqueue *vq, int bit)
284{
285	return vq->acked_backend_features & (1ULL << bit);
286}
287
288#ifdef CONFIG_VHOST_CROSS_ENDIAN_LEGACY
289static inline bool vhost_is_little_endian(struct vhost_virtqueue *vq)
290{
291	return vq->is_le;
292}
293#else
294static inline bool vhost_is_little_endian(struct vhost_virtqueue *vq)
295{
296	return virtio_legacy_is_little_endian() || vq->is_le;
297}
298#endif
299
300/* Memory accessors */
301static inline u16 vhost16_to_cpu(struct vhost_virtqueue *vq, __virtio16 val)
302{
303	return __virtio16_to_cpu(vhost_is_little_endian(vq), val);
304}
305
306static inline __virtio16 cpu_to_vhost16(struct vhost_virtqueue *vq, u16 val)
307{
308	return __cpu_to_virtio16(vhost_is_little_endian(vq), val);
309}
310
311static inline u32 vhost32_to_cpu(struct vhost_virtqueue *vq, __virtio32 val)
312{
313	return __virtio32_to_cpu(vhost_is_little_endian(vq), val);
314}
315
316static inline __virtio32 cpu_to_vhost32(struct vhost_virtqueue *vq, u32 val)
317{
318	return __cpu_to_virtio32(vhost_is_little_endian(vq), val);
319}
320
321static inline u64 vhost64_to_cpu(struct vhost_virtqueue *vq, __virtio64 val)
322{
323	return __virtio64_to_cpu(vhost_is_little_endian(vq), val);
324}
325
326static inline __virtio64 cpu_to_vhost64(struct vhost_virtqueue *vq, u64 val)
327{
328	return __cpu_to_virtio64(vhost_is_little_endian(vq), val);
329}
330#endif
331