1// SPDX-License-Identifier: GPL-2.0
2/*
3 * VFIO based Physical Subchannel device driver
4 *
5 * Copyright IBM Corp. 2017
6 * Copyright Red Hat, Inc. 2019
7 *
8 * Author(s): Dong Jia Shi <bjsdjshi@linux.vnet.ibm.com>
9 *            Xiao Feng Ren <renxiaof@linux.vnet.ibm.com>
10 *            Cornelia Huck <cohuck@redhat.com>
11 */
12
13#include <linux/module.h>
14#include <linux/init.h>
15#include <linux/slab.h>
16#include <linux/mdev.h>
17
18#include <asm/isc.h>
19
20#include "chp.h"
21#include "ioasm.h"
22#include "css.h"
23#include "vfio_ccw_private.h"
24
25struct workqueue_struct *vfio_ccw_work_q;
26struct kmem_cache *vfio_ccw_io_region;
27struct kmem_cache *vfio_ccw_cmd_region;
28struct kmem_cache *vfio_ccw_schib_region;
29struct kmem_cache *vfio_ccw_crw_region;
30
31debug_info_t *vfio_ccw_debug_msg_id;
32debug_info_t *vfio_ccw_debug_trace_id;
33
34/*
35 * Helpers
36 */
37int vfio_ccw_sch_quiesce(struct subchannel *sch)
38{
39	struct vfio_ccw_parent *parent = dev_get_drvdata(&sch->dev);
40	struct vfio_ccw_private *private = dev_get_drvdata(&parent->dev);
41	DECLARE_COMPLETION_ONSTACK(completion);
42	int iretry, ret = 0;
43
44	/*
45	 * Probably an impossible situation, after being called through
46	 * FSM callbacks. But in the event it did, register a warning
47	 * and return as if things were fine.
48	 */
49	if (WARN_ON(!private))
50		return 0;
51
52	iretry = 255;
53	do {
54
55		ret = cio_cancel_halt_clear(sch, &iretry);
56
57		if (ret == -EIO) {
58			pr_err("vfio_ccw: could not quiesce subchannel 0.%x.%04x!\n",
59			       sch->schid.ssid, sch->schid.sch_no);
60			break;
61		}
62
63		/*
64		 * Flush all I/O and wait for
65		 * cancel/halt/clear completion.
66		 */
67		private->completion = &completion;
68		spin_unlock_irq(sch->lock);
69
70		if (ret == -EBUSY)
71			wait_for_completion_timeout(&completion, 3*HZ);
72
73		private->completion = NULL;
74		flush_workqueue(vfio_ccw_work_q);
75		spin_lock_irq(sch->lock);
76		ret = cio_disable_subchannel(sch);
77	} while (ret == -EBUSY);
78
79	return ret;
80}
81
82void vfio_ccw_sch_io_todo(struct work_struct *work)
83{
84	struct vfio_ccw_private *private;
85	struct irb *irb;
86	bool is_final;
87	bool cp_is_finished = false;
88
89	private = container_of(work, struct vfio_ccw_private, io_work);
90	irb = &private->irb;
91
92	is_final = !(scsw_actl(&irb->scsw) &
93		     (SCSW_ACTL_DEVACT | SCSW_ACTL_SCHACT));
94	if (scsw_is_solicited(&irb->scsw)) {
95		cp_update_scsw(&private->cp, &irb->scsw);
96		if (is_final && private->state == VFIO_CCW_STATE_CP_PENDING) {
97			cp_free(&private->cp);
98			cp_is_finished = true;
99		}
100	}
101	mutex_lock(&private->io_mutex);
102	memcpy(private->io_region->irb_area, irb, sizeof(*irb));
103	mutex_unlock(&private->io_mutex);
104
105	/*
106	 * Reset to IDLE only if processing of a channel program
107	 * has finished. Do not overwrite a possible processing
108	 * state if the interrupt was unsolicited, or if the final
109	 * interrupt was for HSCH or CSCH.
110	 */
111	if (cp_is_finished)
112		private->state = VFIO_CCW_STATE_IDLE;
113
114	if (private->io_trigger)
115		eventfd_signal(private->io_trigger, 1);
116}
117
118void vfio_ccw_crw_todo(struct work_struct *work)
119{
120	struct vfio_ccw_private *private;
121
122	private = container_of(work, struct vfio_ccw_private, crw_work);
123
124	if (!list_empty(&private->crw) && private->crw_trigger)
125		eventfd_signal(private->crw_trigger, 1);
126}
127
128/*
129 * Css driver callbacks
130 */
131static void vfio_ccw_sch_irq(struct subchannel *sch)
132{
133	struct vfio_ccw_parent *parent = dev_get_drvdata(&sch->dev);
134	struct vfio_ccw_private *private = dev_get_drvdata(&parent->dev);
135
136	/*
137	 * The subchannel should still be disabled at this point,
138	 * so an interrupt would be quite surprising. As with an
139	 * interrupt while the FSM is closed, let's attempt to
140	 * disable the subchannel again.
141	 */
142	if (!private) {
143		VFIO_CCW_MSG_EVENT(2, "sch %x.%x.%04x: unexpected interrupt\n",
144				   sch->schid.cssid, sch->schid.ssid,
145				   sch->schid.sch_no);
146
147		cio_disable_subchannel(sch);
148		return;
149	}
150
151	inc_irq_stat(IRQIO_CIO);
152	vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_INTERRUPT);
153}
154
155static void vfio_ccw_free_parent(struct device *dev)
156{
157	struct vfio_ccw_parent *parent = container_of(dev, struct vfio_ccw_parent, dev);
158
159	kfree(parent);
160}
161
162static int vfio_ccw_sch_probe(struct subchannel *sch)
163{
164	struct pmcw *pmcw = &sch->schib.pmcw;
165	struct vfio_ccw_parent *parent;
166	int ret = -ENOMEM;
167
168	if (pmcw->qf) {
169		dev_warn(&sch->dev, "vfio: ccw: does not support QDIO: %s\n",
170			 dev_name(&sch->dev));
171		return -ENODEV;
172	}
173
174	parent = kzalloc(struct_size(parent, mdev_types, 1), GFP_KERNEL);
175	if (!parent)
176		return -ENOMEM;
177
178	dev_set_name(&parent->dev, "parent");
179	parent->dev.parent = &sch->dev;
180	parent->dev.release = &vfio_ccw_free_parent;
181	ret = device_register(&parent->dev);
182	if (ret)
183		goto out_free;
184
185	dev_set_drvdata(&sch->dev, parent);
186
187	parent->mdev_type.sysfs_name = "io";
188	parent->mdev_type.pretty_name = "I/O subchannel (Non-QDIO)";
189	parent->mdev_types[0] = &parent->mdev_type;
190	ret = mdev_register_parent(&parent->parent, &sch->dev,
191				   &vfio_ccw_mdev_driver,
192				   parent->mdev_types, 1);
193	if (ret)
194		goto out_unreg;
195
196	VFIO_CCW_MSG_EVENT(4, "bound to subchannel %x.%x.%04x\n",
197			   sch->schid.cssid, sch->schid.ssid,
198			   sch->schid.sch_no);
199	return 0;
200
201out_unreg:
202	device_del(&parent->dev);
203out_free:
204	put_device(&parent->dev);
205	dev_set_drvdata(&sch->dev, NULL);
206	return ret;
207}
208
209static void vfio_ccw_sch_remove(struct subchannel *sch)
210{
211	struct vfio_ccw_parent *parent = dev_get_drvdata(&sch->dev);
212
213	mdev_unregister_parent(&parent->parent);
214
215	device_unregister(&parent->dev);
216	dev_set_drvdata(&sch->dev, NULL);
217
218	VFIO_CCW_MSG_EVENT(4, "unbound from subchannel %x.%x.%04x\n",
219			   sch->schid.cssid, sch->schid.ssid,
220			   sch->schid.sch_no);
221}
222
223static void vfio_ccw_sch_shutdown(struct subchannel *sch)
224{
225	struct vfio_ccw_parent *parent = dev_get_drvdata(&sch->dev);
226	struct vfio_ccw_private *private = dev_get_drvdata(&parent->dev);
227
228	if (!private)
229		return;
230
231	vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_CLOSE);
232	vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_NOT_OPER);
233}
234
235/**
236 * vfio_ccw_sch_event - process subchannel event
237 * @sch: subchannel
238 * @process: non-zero if function is called in process context
239 *
240 * An unspecified event occurred for this subchannel. Adjust data according
241 * to the current operational state of the subchannel. Return zero when the
242 * event has been handled sufficiently or -EAGAIN when this function should
243 * be called again in process context.
244 */
245static int vfio_ccw_sch_event(struct subchannel *sch, int process)
246{
247	struct vfio_ccw_parent *parent = dev_get_drvdata(&sch->dev);
248	struct vfio_ccw_private *private = dev_get_drvdata(&parent->dev);
249	unsigned long flags;
250	int rc = -EAGAIN;
251
252	spin_lock_irqsave(sch->lock, flags);
253	if (!device_is_registered(&sch->dev))
254		goto out_unlock;
255
256	if (work_pending(&sch->todo_work))
257		goto out_unlock;
258
259	rc = 0;
260
261	if (cio_update_schib(sch)) {
262		if (private)
263			vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_NOT_OPER);
264	}
265
266out_unlock:
267	spin_unlock_irqrestore(sch->lock, flags);
268
269	return rc;
270}
271
272static void vfio_ccw_queue_crw(struct vfio_ccw_private *private,
273			       unsigned int rsc,
274			       unsigned int erc,
275			       unsigned int rsid)
276{
277	struct vfio_ccw_crw *crw;
278
279	/*
280	 * If unable to allocate a CRW, just drop the event and
281	 * carry on.  The guest will either see a later one or
282	 * learn when it issues its own store subchannel.
283	 */
284	crw = kzalloc(sizeof(*crw), GFP_ATOMIC);
285	if (!crw)
286		return;
287
288	/*
289	 * Build the CRW based on the inputs given to us.
290	 */
291	crw->crw.rsc = rsc;
292	crw->crw.erc = erc;
293	crw->crw.rsid = rsid;
294
295	list_add_tail(&crw->next, &private->crw);
296	queue_work(vfio_ccw_work_q, &private->crw_work);
297}
298
299static int vfio_ccw_chp_event(struct subchannel *sch,
300			      struct chp_link *link, int event)
301{
302	struct vfio_ccw_parent *parent = dev_get_drvdata(&sch->dev);
303	struct vfio_ccw_private *private = dev_get_drvdata(&parent->dev);
304	int mask = chp_ssd_get_mask(&sch->ssd_info, link);
305	int retry = 255;
306
307	if (!private || !mask)
308		return 0;
309
310	trace_vfio_ccw_chp_event(sch->schid, mask, event);
311	VFIO_CCW_MSG_EVENT(2, "sch %x.%x.%04x: mask=0x%x event=%d\n",
312			   sch->schid.cssid,
313			   sch->schid.ssid, sch->schid.sch_no,
314			   mask, event);
315
316	if (cio_update_schib(sch))
317		return -ENODEV;
318
319	switch (event) {
320	case CHP_VARY_OFF:
321		/* Path logically turned off */
322		sch->opm &= ~mask;
323		sch->lpm &= ~mask;
324		if (sch->schib.pmcw.lpum & mask)
325			cio_cancel_halt_clear(sch, &retry);
326		break;
327	case CHP_OFFLINE:
328		/* Path is gone */
329		if (sch->schib.pmcw.lpum & mask)
330			cio_cancel_halt_clear(sch, &retry);
331		vfio_ccw_queue_crw(private, CRW_RSC_CPATH, CRW_ERC_PERRN,
332				   link->chpid.id);
333		break;
334	case CHP_VARY_ON:
335		/* Path logically turned on */
336		sch->opm |= mask;
337		sch->lpm |= mask;
338		break;
339	case CHP_ONLINE:
340		/* Path became available */
341		sch->lpm |= mask & sch->opm;
342		vfio_ccw_queue_crw(private, CRW_RSC_CPATH, CRW_ERC_INIT,
343				   link->chpid.id);
344		break;
345	}
346
347	return 0;
348}
349
350static struct css_device_id vfio_ccw_sch_ids[] = {
351	{ .match_flags = 0x1, .type = SUBCHANNEL_TYPE_IO, },
352	{ /* end of list */ },
353};
354MODULE_DEVICE_TABLE(css, vfio_ccw_sch_ids);
355
356static struct css_driver vfio_ccw_sch_driver = {
357	.drv = {
358		.name = "vfio_ccw",
359		.owner = THIS_MODULE,
360	},
361	.subchannel_type = vfio_ccw_sch_ids,
362	.irq = vfio_ccw_sch_irq,
363	.probe = vfio_ccw_sch_probe,
364	.remove = vfio_ccw_sch_remove,
365	.shutdown = vfio_ccw_sch_shutdown,
366	.sch_event = vfio_ccw_sch_event,
367	.chp_event = vfio_ccw_chp_event,
368};
369
370static int __init vfio_ccw_debug_init(void)
371{
372	vfio_ccw_debug_msg_id = debug_register("vfio_ccw_msg", 16, 1,
373					       11 * sizeof(long));
374	if (!vfio_ccw_debug_msg_id)
375		goto out_unregister;
376	debug_register_view(vfio_ccw_debug_msg_id, &debug_sprintf_view);
377	debug_set_level(vfio_ccw_debug_msg_id, 2);
378	vfio_ccw_debug_trace_id = debug_register("vfio_ccw_trace", 16, 1, 16);
379	if (!vfio_ccw_debug_trace_id)
380		goto out_unregister;
381	debug_register_view(vfio_ccw_debug_trace_id, &debug_hex_ascii_view);
382	debug_set_level(vfio_ccw_debug_trace_id, 2);
383	return 0;
384
385out_unregister:
386	debug_unregister(vfio_ccw_debug_msg_id);
387	debug_unregister(vfio_ccw_debug_trace_id);
388	return -1;
389}
390
391static void vfio_ccw_debug_exit(void)
392{
393	debug_unregister(vfio_ccw_debug_msg_id);
394	debug_unregister(vfio_ccw_debug_trace_id);
395}
396
397static void vfio_ccw_destroy_regions(void)
398{
399	kmem_cache_destroy(vfio_ccw_crw_region);
400	kmem_cache_destroy(vfio_ccw_schib_region);
401	kmem_cache_destroy(vfio_ccw_cmd_region);
402	kmem_cache_destroy(vfio_ccw_io_region);
403}
404
405static int __init vfio_ccw_sch_init(void)
406{
407	int ret;
408
409	ret = vfio_ccw_debug_init();
410	if (ret)
411		return ret;
412
413	vfio_ccw_work_q = create_singlethread_workqueue("vfio-ccw");
414	if (!vfio_ccw_work_q) {
415		ret = -ENOMEM;
416		goto out_regions;
417	}
418
419	vfio_ccw_io_region = kmem_cache_create_usercopy("vfio_ccw_io_region",
420					sizeof(struct ccw_io_region), 0,
421					SLAB_ACCOUNT, 0,
422					sizeof(struct ccw_io_region), NULL);
423	if (!vfio_ccw_io_region) {
424		ret = -ENOMEM;
425		goto out_regions;
426	}
427
428	vfio_ccw_cmd_region = kmem_cache_create_usercopy("vfio_ccw_cmd_region",
429					sizeof(struct ccw_cmd_region), 0,
430					SLAB_ACCOUNT, 0,
431					sizeof(struct ccw_cmd_region), NULL);
432	if (!vfio_ccw_cmd_region) {
433		ret = -ENOMEM;
434		goto out_regions;
435	}
436
437	vfio_ccw_schib_region = kmem_cache_create_usercopy("vfio_ccw_schib_region",
438					sizeof(struct ccw_schib_region), 0,
439					SLAB_ACCOUNT, 0,
440					sizeof(struct ccw_schib_region), NULL);
441
442	if (!vfio_ccw_schib_region) {
443		ret = -ENOMEM;
444		goto out_regions;
445	}
446
447	vfio_ccw_crw_region = kmem_cache_create_usercopy("vfio_ccw_crw_region",
448					sizeof(struct ccw_crw_region), 0,
449					SLAB_ACCOUNT, 0,
450					sizeof(struct ccw_crw_region), NULL);
451
452	if (!vfio_ccw_crw_region) {
453		ret = -ENOMEM;
454		goto out_regions;
455	}
456
457	ret = mdev_register_driver(&vfio_ccw_mdev_driver);
458	if (ret)
459		goto out_regions;
460
461	isc_register(VFIO_CCW_ISC);
462	ret = css_driver_register(&vfio_ccw_sch_driver);
463	if (ret) {
464		isc_unregister(VFIO_CCW_ISC);
465		goto out_driver;
466	}
467
468	return ret;
469
470out_driver:
471	mdev_unregister_driver(&vfio_ccw_mdev_driver);
472out_regions:
473	vfio_ccw_destroy_regions();
474	destroy_workqueue(vfio_ccw_work_q);
475	vfio_ccw_debug_exit();
476	return ret;
477}
478
479static void __exit vfio_ccw_sch_exit(void)
480{
481	css_driver_unregister(&vfio_ccw_sch_driver);
482	mdev_unregister_driver(&vfio_ccw_mdev_driver);
483	isc_unregister(VFIO_CCW_ISC);
484	vfio_ccw_destroy_regions();
485	destroy_workqueue(vfio_ccw_work_q);
486	vfio_ccw_debug_exit();
487}
488module_init(vfio_ccw_sch_init);
489module_exit(vfio_ccw_sch_exit);
490
491MODULE_LICENSE("GPL v2");
492