xref: /kernel/linux/linux-6.6/net/9p/trans_xen.c (revision 62306a36)
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * linux/fs/9p/trans_xen
4 *
5 * Xen transport layer.
6 *
7 * Copyright (C) 2017 by Stefano Stabellini <stefano@aporeto.com>
8 */
9
10#include <xen/events.h>
11#include <xen/grant_table.h>
12#include <xen/xen.h>
13#include <xen/xenbus.h>
14#include <xen/interface/io/9pfs.h>
15
16#include <linux/module.h>
17#include <linux/spinlock.h>
18#include <net/9p/9p.h>
19#include <net/9p/client.h>
20#include <net/9p/transport.h>
21
22#define XEN_9PFS_NUM_RINGS 2
23#define XEN_9PFS_RING_ORDER 9
24#define XEN_9PFS_RING_SIZE(ring)  XEN_FLEX_RING_SIZE(ring->intf->ring_order)
25
26struct xen_9pfs_header {
27	uint32_t size;
28	uint8_t id;
29	uint16_t tag;
30
31	/* uint8_t sdata[]; */
32} __attribute__((packed));
33
34/* One per ring, more than one per 9pfs share */
35struct xen_9pfs_dataring {
36	struct xen_9pfs_front_priv *priv;
37
38	struct xen_9pfs_data_intf *intf;
39	grant_ref_t ref;
40	int evtchn;
41	int irq;
42	/* protect a ring from concurrent accesses */
43	spinlock_t lock;
44
45	struct xen_9pfs_data data;
46	wait_queue_head_t wq;
47	struct work_struct work;
48};
49
50/* One per 9pfs share */
51struct xen_9pfs_front_priv {
52	struct list_head list;
53	struct xenbus_device *dev;
54	char *tag;
55	struct p9_client *client;
56
57	int num_rings;
58	struct xen_9pfs_dataring *rings;
59};
60
61static LIST_HEAD(xen_9pfs_devs);
62static DEFINE_RWLOCK(xen_9pfs_lock);
63
64/* We don't currently allow canceling of requests */
65static int p9_xen_cancel(struct p9_client *client, struct p9_req_t *req)
66{
67	return 1;
68}
69
70static int p9_xen_create(struct p9_client *client, const char *addr, char *args)
71{
72	struct xen_9pfs_front_priv *priv;
73
74	if (addr == NULL)
75		return -EINVAL;
76
77	read_lock(&xen_9pfs_lock);
78	list_for_each_entry(priv, &xen_9pfs_devs, list) {
79		if (!strcmp(priv->tag, addr)) {
80			priv->client = client;
81			read_unlock(&xen_9pfs_lock);
82			return 0;
83		}
84	}
85	read_unlock(&xen_9pfs_lock);
86	return -EINVAL;
87}
88
89static void p9_xen_close(struct p9_client *client)
90{
91	struct xen_9pfs_front_priv *priv;
92
93	read_lock(&xen_9pfs_lock);
94	list_for_each_entry(priv, &xen_9pfs_devs, list) {
95		if (priv->client == client) {
96			priv->client = NULL;
97			read_unlock(&xen_9pfs_lock);
98			return;
99		}
100	}
101	read_unlock(&xen_9pfs_lock);
102}
103
104static bool p9_xen_write_todo(struct xen_9pfs_dataring *ring, RING_IDX size)
105{
106	RING_IDX cons, prod;
107
108	cons = ring->intf->out_cons;
109	prod = ring->intf->out_prod;
110	virt_mb();
111
112	return XEN_9PFS_RING_SIZE(ring) -
113		xen_9pfs_queued(prod, cons, XEN_9PFS_RING_SIZE(ring)) >= size;
114}
115
116static int p9_xen_request(struct p9_client *client, struct p9_req_t *p9_req)
117{
118	struct xen_9pfs_front_priv *priv;
119	RING_IDX cons, prod, masked_cons, masked_prod;
120	unsigned long flags;
121	u32 size = p9_req->tc.size;
122	struct xen_9pfs_dataring *ring;
123	int num;
124
125	read_lock(&xen_9pfs_lock);
126	list_for_each_entry(priv, &xen_9pfs_devs, list) {
127		if (priv->client == client)
128			break;
129	}
130	read_unlock(&xen_9pfs_lock);
131	if (list_entry_is_head(priv, &xen_9pfs_devs, list))
132		return -EINVAL;
133
134	num = p9_req->tc.tag % priv->num_rings;
135	ring = &priv->rings[num];
136
137again:
138	while (wait_event_killable(ring->wq,
139				   p9_xen_write_todo(ring, size)) != 0)
140		;
141
142	spin_lock_irqsave(&ring->lock, flags);
143	cons = ring->intf->out_cons;
144	prod = ring->intf->out_prod;
145	virt_mb();
146
147	if (XEN_9PFS_RING_SIZE(ring) -
148	    xen_9pfs_queued(prod, cons, XEN_9PFS_RING_SIZE(ring)) < size) {
149		spin_unlock_irqrestore(&ring->lock, flags);
150		goto again;
151	}
152
153	masked_prod = xen_9pfs_mask(prod, XEN_9PFS_RING_SIZE(ring));
154	masked_cons = xen_9pfs_mask(cons, XEN_9PFS_RING_SIZE(ring));
155
156	xen_9pfs_write_packet(ring->data.out, p9_req->tc.sdata, size,
157			      &masked_prod, masked_cons,
158			      XEN_9PFS_RING_SIZE(ring));
159
160	WRITE_ONCE(p9_req->status, REQ_STATUS_SENT);
161	virt_wmb();			/* write ring before updating pointer */
162	prod += size;
163	ring->intf->out_prod = prod;
164	spin_unlock_irqrestore(&ring->lock, flags);
165	notify_remote_via_irq(ring->irq);
166	p9_req_put(client, p9_req);
167
168	return 0;
169}
170
171static void p9_xen_response(struct work_struct *work)
172{
173	struct xen_9pfs_front_priv *priv;
174	struct xen_9pfs_dataring *ring;
175	RING_IDX cons, prod, masked_cons, masked_prod;
176	struct xen_9pfs_header h;
177	struct p9_req_t *req;
178	int status;
179
180	ring = container_of(work, struct xen_9pfs_dataring, work);
181	priv = ring->priv;
182
183	while (1) {
184		cons = ring->intf->in_cons;
185		prod = ring->intf->in_prod;
186		virt_rmb();
187
188		if (xen_9pfs_queued(prod, cons, XEN_9PFS_RING_SIZE(ring)) <
189		    sizeof(h)) {
190			notify_remote_via_irq(ring->irq);
191			return;
192		}
193
194		masked_prod = xen_9pfs_mask(prod, XEN_9PFS_RING_SIZE(ring));
195		masked_cons = xen_9pfs_mask(cons, XEN_9PFS_RING_SIZE(ring));
196
197		/* First, read just the header */
198		xen_9pfs_read_packet(&h, ring->data.in, sizeof(h),
199				     masked_prod, &masked_cons,
200				     XEN_9PFS_RING_SIZE(ring));
201
202		req = p9_tag_lookup(priv->client, h.tag);
203		if (!req || req->status != REQ_STATUS_SENT) {
204			dev_warn(&priv->dev->dev, "Wrong req tag=%x\n", h.tag);
205			cons += h.size;
206			virt_mb();
207			ring->intf->in_cons = cons;
208			continue;
209		}
210
211		if (h.size > req->rc.capacity) {
212			dev_warn(&priv->dev->dev,
213				 "requested packet size too big: %d for tag %d with capacity %zd\n",
214				 h.size, h.tag, req->rc.capacity);
215			WRITE_ONCE(req->status, REQ_STATUS_ERROR);
216			goto recv_error;
217		}
218
219		req->rc.size = h.size;
220		req->rc.id = h.id;
221		req->rc.tag = h.tag;
222		req->rc.offset = 0;
223
224		masked_cons = xen_9pfs_mask(cons, XEN_9PFS_RING_SIZE(ring));
225		/* Then, read the whole packet (including the header) */
226		xen_9pfs_read_packet(req->rc.sdata, ring->data.in, h.size,
227				     masked_prod, &masked_cons,
228				     XEN_9PFS_RING_SIZE(ring));
229
230recv_error:
231		virt_mb();
232		cons += h.size;
233		ring->intf->in_cons = cons;
234
235		status = (req->status != REQ_STATUS_ERROR) ?
236			REQ_STATUS_RCVD : REQ_STATUS_ERROR;
237
238		p9_client_cb(priv->client, req, status);
239	}
240}
241
242static irqreturn_t xen_9pfs_front_event_handler(int irq, void *r)
243{
244	struct xen_9pfs_dataring *ring = r;
245
246	if (!ring || !ring->priv->client) {
247		/* ignore spurious interrupt */
248		return IRQ_HANDLED;
249	}
250
251	wake_up_interruptible(&ring->wq);
252	schedule_work(&ring->work);
253
254	return IRQ_HANDLED;
255}
256
257static struct p9_trans_module p9_xen_trans = {
258	.name = "xen",
259	.maxsize = 1 << (XEN_9PFS_RING_ORDER + XEN_PAGE_SHIFT - 2),
260	.pooled_rbuffers = false,
261	.def = 1,
262	.create = p9_xen_create,
263	.close = p9_xen_close,
264	.request = p9_xen_request,
265	.cancel = p9_xen_cancel,
266	.owner = THIS_MODULE,
267};
268
269static const struct xenbus_device_id xen_9pfs_front_ids[] = {
270	{ "9pfs" },
271	{ "" }
272};
273
274static void xen_9pfs_front_free(struct xen_9pfs_front_priv *priv)
275{
276	int i, j;
277
278	write_lock(&xen_9pfs_lock);
279	list_del(&priv->list);
280	write_unlock(&xen_9pfs_lock);
281
282	for (i = 0; i < priv->num_rings; i++) {
283		struct xen_9pfs_dataring *ring = &priv->rings[i];
284
285		cancel_work_sync(&ring->work);
286
287		if (!priv->rings[i].intf)
288			break;
289		if (priv->rings[i].irq > 0)
290			unbind_from_irqhandler(priv->rings[i].irq, priv->dev);
291		if (priv->rings[i].data.in) {
292			for (j = 0;
293			     j < (1 << priv->rings[i].intf->ring_order);
294			     j++) {
295				grant_ref_t ref;
296
297				ref = priv->rings[i].intf->ref[j];
298				gnttab_end_foreign_access(ref, NULL);
299			}
300			free_pages_exact(priv->rings[i].data.in,
301				   1UL << (priv->rings[i].intf->ring_order +
302					   XEN_PAGE_SHIFT));
303		}
304		gnttab_end_foreign_access(priv->rings[i].ref, NULL);
305		free_page((unsigned long)priv->rings[i].intf);
306	}
307	kfree(priv->rings);
308	kfree(priv->tag);
309	kfree(priv);
310}
311
312static void xen_9pfs_front_remove(struct xenbus_device *dev)
313{
314	struct xen_9pfs_front_priv *priv = dev_get_drvdata(&dev->dev);
315
316	dev_set_drvdata(&dev->dev, NULL);
317	xen_9pfs_front_free(priv);
318}
319
320static int xen_9pfs_front_alloc_dataring(struct xenbus_device *dev,
321					 struct xen_9pfs_dataring *ring,
322					 unsigned int order)
323{
324	int i = 0;
325	int ret = -ENOMEM;
326	void *bytes = NULL;
327
328	init_waitqueue_head(&ring->wq);
329	spin_lock_init(&ring->lock);
330	INIT_WORK(&ring->work, p9_xen_response);
331
332	ring->intf = (struct xen_9pfs_data_intf *)get_zeroed_page(GFP_KERNEL);
333	if (!ring->intf)
334		return ret;
335	ret = gnttab_grant_foreign_access(dev->otherend_id,
336					  virt_to_gfn(ring->intf), 0);
337	if (ret < 0)
338		goto out;
339	ring->ref = ret;
340	bytes = alloc_pages_exact(1UL << (order + XEN_PAGE_SHIFT),
341				  GFP_KERNEL | __GFP_ZERO);
342	if (!bytes) {
343		ret = -ENOMEM;
344		goto out;
345	}
346	for (; i < (1 << order); i++) {
347		ret = gnttab_grant_foreign_access(
348				dev->otherend_id, virt_to_gfn(bytes) + i, 0);
349		if (ret < 0)
350			goto out;
351		ring->intf->ref[i] = ret;
352	}
353	ring->intf->ring_order = order;
354	ring->data.in = bytes;
355	ring->data.out = bytes + XEN_FLEX_RING_SIZE(order);
356
357	ret = xenbus_alloc_evtchn(dev, &ring->evtchn);
358	if (ret)
359		goto out;
360	ring->irq = bind_evtchn_to_irqhandler(ring->evtchn,
361					      xen_9pfs_front_event_handler,
362					      0, "xen_9pfs-frontend", ring);
363	if (ring->irq >= 0)
364		return 0;
365
366	xenbus_free_evtchn(dev, ring->evtchn);
367	ret = ring->irq;
368out:
369	if (bytes) {
370		for (i--; i >= 0; i--)
371			gnttab_end_foreign_access(ring->intf->ref[i], NULL);
372		free_pages_exact(bytes, 1UL << (order + XEN_PAGE_SHIFT));
373	}
374	gnttab_end_foreign_access(ring->ref, NULL);
375	free_page((unsigned long)ring->intf);
376	return ret;
377}
378
379static int xen_9pfs_front_init(struct xenbus_device *dev)
380{
381	int ret, i;
382	struct xenbus_transaction xbt;
383	struct xen_9pfs_front_priv *priv = dev_get_drvdata(&dev->dev);
384	char *versions, *v;
385	unsigned int max_rings, max_ring_order, len = 0;
386
387	versions = xenbus_read(XBT_NIL, dev->otherend, "versions", &len);
388	if (IS_ERR(versions))
389		return PTR_ERR(versions);
390	for (v = versions; *v; v++) {
391		if (simple_strtoul(v, &v, 10) == 1) {
392			v = NULL;
393			break;
394		}
395	}
396	if (v) {
397		kfree(versions);
398		return -EINVAL;
399	}
400	kfree(versions);
401	max_rings = xenbus_read_unsigned(dev->otherend, "max-rings", 0);
402	if (max_rings < XEN_9PFS_NUM_RINGS)
403		return -EINVAL;
404	max_ring_order = xenbus_read_unsigned(dev->otherend,
405					      "max-ring-page-order", 0);
406	if (max_ring_order > XEN_9PFS_RING_ORDER)
407		max_ring_order = XEN_9PFS_RING_ORDER;
408	if (p9_xen_trans.maxsize > XEN_FLEX_RING_SIZE(max_ring_order))
409		p9_xen_trans.maxsize = XEN_FLEX_RING_SIZE(max_ring_order) / 2;
410
411	priv->num_rings = XEN_9PFS_NUM_RINGS;
412	priv->rings = kcalloc(priv->num_rings, sizeof(*priv->rings),
413			      GFP_KERNEL);
414	if (!priv->rings) {
415		kfree(priv);
416		return -ENOMEM;
417	}
418
419	for (i = 0; i < priv->num_rings; i++) {
420		priv->rings[i].priv = priv;
421		ret = xen_9pfs_front_alloc_dataring(dev, &priv->rings[i],
422						    max_ring_order);
423		if (ret < 0)
424			goto error;
425	}
426
427 again:
428	ret = xenbus_transaction_start(&xbt);
429	if (ret) {
430		xenbus_dev_fatal(dev, ret, "starting transaction");
431		goto error;
432	}
433	ret = xenbus_printf(xbt, dev->nodename, "version", "%u", 1);
434	if (ret)
435		goto error_xenbus;
436	ret = xenbus_printf(xbt, dev->nodename, "num-rings", "%u",
437			    priv->num_rings);
438	if (ret)
439		goto error_xenbus;
440	for (i = 0; i < priv->num_rings; i++) {
441		char str[16];
442
443		BUILD_BUG_ON(XEN_9PFS_NUM_RINGS > 9);
444		sprintf(str, "ring-ref%d", i);
445		ret = xenbus_printf(xbt, dev->nodename, str, "%d",
446				    priv->rings[i].ref);
447		if (ret)
448			goto error_xenbus;
449
450		sprintf(str, "event-channel-%d", i);
451		ret = xenbus_printf(xbt, dev->nodename, str, "%u",
452				    priv->rings[i].evtchn);
453		if (ret)
454			goto error_xenbus;
455	}
456	priv->tag = xenbus_read(xbt, dev->nodename, "tag", NULL);
457	if (IS_ERR(priv->tag)) {
458		ret = PTR_ERR(priv->tag);
459		goto error_xenbus;
460	}
461	ret = xenbus_transaction_end(xbt, 0);
462	if (ret) {
463		if (ret == -EAGAIN)
464			goto again;
465		xenbus_dev_fatal(dev, ret, "completing transaction");
466		goto error;
467	}
468
469	return 0;
470
471 error_xenbus:
472	xenbus_transaction_end(xbt, 1);
473	xenbus_dev_fatal(dev, ret, "writing xenstore");
474 error:
475	xen_9pfs_front_free(priv);
476	return ret;
477}
478
479static int xen_9pfs_front_probe(struct xenbus_device *dev,
480				const struct xenbus_device_id *id)
481{
482	struct xen_9pfs_front_priv *priv = NULL;
483
484	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
485	if (!priv)
486		return -ENOMEM;
487
488	priv->dev = dev;
489	dev_set_drvdata(&dev->dev, priv);
490
491	write_lock(&xen_9pfs_lock);
492	list_add_tail(&priv->list, &xen_9pfs_devs);
493	write_unlock(&xen_9pfs_lock);
494
495	return 0;
496}
497
498static int xen_9pfs_front_resume(struct xenbus_device *dev)
499{
500	dev_warn(&dev->dev, "suspend/resume unsupported\n");
501	return 0;
502}
503
504static void xen_9pfs_front_changed(struct xenbus_device *dev,
505				   enum xenbus_state backend_state)
506{
507	switch (backend_state) {
508	case XenbusStateReconfiguring:
509	case XenbusStateReconfigured:
510	case XenbusStateInitialising:
511	case XenbusStateInitialised:
512	case XenbusStateUnknown:
513		break;
514
515	case XenbusStateInitWait:
516		if (!xen_9pfs_front_init(dev))
517			xenbus_switch_state(dev, XenbusStateInitialised);
518		break;
519
520	case XenbusStateConnected:
521		xenbus_switch_state(dev, XenbusStateConnected);
522		break;
523
524	case XenbusStateClosed:
525		if (dev->state == XenbusStateClosed)
526			break;
527		fallthrough;	/* Missed the backend's CLOSING state */
528	case XenbusStateClosing:
529		xenbus_frontend_closed(dev);
530		break;
531	}
532}
533
534static struct xenbus_driver xen_9pfs_front_driver = {
535	.ids = xen_9pfs_front_ids,
536	.probe = xen_9pfs_front_probe,
537	.remove = xen_9pfs_front_remove,
538	.resume = xen_9pfs_front_resume,
539	.otherend_changed = xen_9pfs_front_changed,
540};
541
542static int __init p9_trans_xen_init(void)
543{
544	int rc;
545
546	if (!xen_domain())
547		return -ENODEV;
548
549	pr_info("Initialising Xen transport for 9pfs\n");
550
551	v9fs_register_trans(&p9_xen_trans);
552	rc = xenbus_register_frontend(&xen_9pfs_front_driver);
553	if (rc)
554		v9fs_unregister_trans(&p9_xen_trans);
555
556	return rc;
557}
558module_init(p9_trans_xen_init);
559MODULE_ALIAS_9P("xen");
560
561static void __exit p9_trans_xen_exit(void)
562{
563	v9fs_unregister_trans(&p9_xen_trans);
564	return xenbus_unregister_driver(&xen_9pfs_front_driver);
565}
566module_exit(p9_trans_xen_exit);
567
568MODULE_ALIAS("xen:9pfs");
569MODULE_AUTHOR("Stefano Stabellini <stefano@aporeto.com>");
570MODULE_DESCRIPTION("Xen Transport for 9P");
571MODULE_LICENSE("GPL");
572