1// SPDX-License-Identifier: GPL-1.0+
2/*
3 *  bus driver for ccw devices
4 *
5 *    Copyright IBM Corp. 2002, 2008
6 *    Author(s): Arnd Bergmann (arndb@de.ibm.com)
7 *		 Cornelia Huck (cornelia.huck@de.ibm.com)
8 *		 Martin Schwidefsky (schwidefsky@de.ibm.com)
9 */
10
11#define KMSG_COMPONENT "cio"
12#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
13
14#include <linux/export.h>
15#include <linux/init.h>
16#include <linux/spinlock.h>
17#include <linux/errno.h>
18#include <linux/err.h>
19#include <linux/slab.h>
20#include <linux/list.h>
21#include <linux/device.h>
22#include <linux/workqueue.h>
23#include <linux/delay.h>
24#include <linux/timer.h>
25#include <linux/kernel_stat.h>
26#include <linux/sched/signal.h>
27#include <linux/dma-mapping.h>
28
29#include <asm/ccwdev.h>
30#include <asm/cio.h>
31#include <asm/param.h>		/* HZ */
32#include <asm/cmb.h>
33#include <asm/isc.h>
34
35#include "chp.h"
36#include "cio.h"
37#include "cio_debug.h"
38#include "css.h"
39#include "device.h"
40#include "ioasm.h"
41#include "io_sch.h"
42#include "blacklist.h"
43#include "chsc.h"
44
45static struct timer_list recovery_timer;
46static DEFINE_SPINLOCK(recovery_lock);
47static int recovery_phase;
48static const unsigned long recovery_delay[] = { 3, 30, 300 };
49
50static atomic_t ccw_device_init_count = ATOMIC_INIT(0);
51static DECLARE_WAIT_QUEUE_HEAD(ccw_device_init_wq);
52static struct bus_type ccw_bus_type;
53
54/******************* bus type handling ***********************/
55
56/* The Linux driver model distinguishes between a bus type and
57 * the bus itself. Of course we only have one channel
58 * subsystem driver and one channel system per machine, but
59 * we still use the abstraction. T.R. says it's a good idea. */
60static int
61ccw_bus_match (struct device * dev, struct device_driver * drv)
62{
63	struct ccw_device *cdev = to_ccwdev(dev);
64	struct ccw_driver *cdrv = to_ccwdrv(drv);
65	const struct ccw_device_id *ids = cdrv->ids, *found;
66
67	if (!ids)
68		return 0;
69
70	found = ccw_device_id_match(ids, &cdev->id);
71	if (!found)
72		return 0;
73
74	cdev->id.driver_info = found->driver_info;
75
76	return 1;
77}
78
79/* Store modalias string delimited by prefix/suffix string into buffer with
80 * specified size. Return length of resulting string (excluding trailing '\0')
81 * even if string doesn't fit buffer (snprintf semantics). */
82static int snprint_alias(char *buf, size_t size,
83			 struct ccw_device_id *id, const char *suffix)
84{
85	int len;
86
87	len = snprintf(buf, size, "ccw:t%04Xm%02X", id->cu_type, id->cu_model);
88	if (len > size)
89		return len;
90	buf += len;
91	size -= len;
92
93	if (id->dev_type != 0)
94		len += snprintf(buf, size, "dt%04Xdm%02X%s", id->dev_type,
95				id->dev_model, suffix);
96	else
97		len += snprintf(buf, size, "dtdm%s", suffix);
98
99	return len;
100}
101
102/* Set up environment variables for ccw device uevent. Return 0 on success,
103 * non-zero otherwise. */
104static int ccw_uevent(struct device *dev, struct kobj_uevent_env *env)
105{
106	struct ccw_device *cdev = to_ccwdev(dev);
107	struct ccw_device_id *id = &(cdev->id);
108	int ret;
109	char modalias_buf[30];
110
111	/* CU_TYPE= */
112	ret = add_uevent_var(env, "CU_TYPE=%04X", id->cu_type);
113	if (ret)
114		return ret;
115
116	/* CU_MODEL= */
117	ret = add_uevent_var(env, "CU_MODEL=%02X", id->cu_model);
118	if (ret)
119		return ret;
120
121	/* The next two can be zero, that's ok for us */
122	/* DEV_TYPE= */
123	ret = add_uevent_var(env, "DEV_TYPE=%04X", id->dev_type);
124	if (ret)
125		return ret;
126
127	/* DEV_MODEL= */
128	ret = add_uevent_var(env, "DEV_MODEL=%02X", id->dev_model);
129	if (ret)
130		return ret;
131
132	/* MODALIAS=  */
133	snprint_alias(modalias_buf, sizeof(modalias_buf), id, "");
134	ret = add_uevent_var(env, "MODALIAS=%s", modalias_buf);
135	return ret;
136}
137
138static void io_subchannel_irq(struct subchannel *);
139static int io_subchannel_probe(struct subchannel *);
140static int io_subchannel_remove(struct subchannel *);
141static void io_subchannel_shutdown(struct subchannel *);
142static int io_subchannel_sch_event(struct subchannel *, int);
143static int io_subchannel_chp_event(struct subchannel *, struct chp_link *,
144				   int);
145static void recovery_func(struct timer_list *unused);
146
147static struct css_device_id io_subchannel_ids[] = {
148	{ .match_flags = 0x1, .type = SUBCHANNEL_TYPE_IO, },
149	{ /* end of list */ },
150};
151
152static int io_subchannel_prepare(struct subchannel *sch)
153{
154	struct ccw_device *cdev;
155	/*
156	 * Don't allow suspend while a ccw device registration
157	 * is still outstanding.
158	 */
159	cdev = sch_get_cdev(sch);
160	if (cdev && !device_is_registered(&cdev->dev))
161		return -EAGAIN;
162	return 0;
163}
164
165static int io_subchannel_settle(void)
166{
167	int ret;
168
169	ret = wait_event_interruptible(ccw_device_init_wq,
170				atomic_read(&ccw_device_init_count) == 0);
171	if (ret)
172		return -EINTR;
173	flush_workqueue(cio_work_q);
174	return 0;
175}
176
177static struct css_driver io_subchannel_driver = {
178	.drv = {
179		.owner = THIS_MODULE,
180		.name = "io_subchannel",
181	},
182	.subchannel_type = io_subchannel_ids,
183	.irq = io_subchannel_irq,
184	.sch_event = io_subchannel_sch_event,
185	.chp_event = io_subchannel_chp_event,
186	.probe = io_subchannel_probe,
187	.remove = io_subchannel_remove,
188	.shutdown = io_subchannel_shutdown,
189	.prepare = io_subchannel_prepare,
190	.settle = io_subchannel_settle,
191};
192
193int __init io_subchannel_init(void)
194{
195	int ret;
196
197	timer_setup(&recovery_timer, recovery_func, 0);
198	ret = bus_register(&ccw_bus_type);
199	if (ret)
200		return ret;
201	ret = css_driver_register(&io_subchannel_driver);
202	if (ret)
203		bus_unregister(&ccw_bus_type);
204
205	return ret;
206}
207
208
209/************************ device handling **************************/
210
211static ssize_t
212devtype_show (struct device *dev, struct device_attribute *attr, char *buf)
213{
214	struct ccw_device *cdev = to_ccwdev(dev);
215	struct ccw_device_id *id = &(cdev->id);
216
217	if (id->dev_type != 0)
218		return sprintf(buf, "%04x/%02x\n",
219				id->dev_type, id->dev_model);
220	else
221		return sprintf(buf, "n/a\n");
222}
223
224static ssize_t
225cutype_show (struct device *dev, struct device_attribute *attr, char *buf)
226{
227	struct ccw_device *cdev = to_ccwdev(dev);
228	struct ccw_device_id *id = &(cdev->id);
229
230	return sprintf(buf, "%04x/%02x\n",
231		       id->cu_type, id->cu_model);
232}
233
234static ssize_t
235modalias_show (struct device *dev, struct device_attribute *attr, char *buf)
236{
237	struct ccw_device *cdev = to_ccwdev(dev);
238	struct ccw_device_id *id = &(cdev->id);
239	int len;
240
241	len = snprint_alias(buf, PAGE_SIZE, id, "\n");
242
243	return len > PAGE_SIZE ? PAGE_SIZE : len;
244}
245
246static ssize_t
247online_show (struct device *dev, struct device_attribute *attr, char *buf)
248{
249	struct ccw_device *cdev = to_ccwdev(dev);
250
251	return sprintf(buf, cdev->online ? "1\n" : "0\n");
252}
253
254int ccw_device_is_orphan(struct ccw_device *cdev)
255{
256	return sch_is_pseudo_sch(to_subchannel(cdev->dev.parent));
257}
258
259static void ccw_device_unregister(struct ccw_device *cdev)
260{
261	if (device_is_registered(&cdev->dev)) {
262		/* Undo device_add(). */
263		device_del(&cdev->dev);
264	}
265	if (cdev->private->flags.initialized) {
266		cdev->private->flags.initialized = 0;
267		/* Release reference from device_initialize(). */
268		put_device(&cdev->dev);
269	}
270}
271
272static void io_subchannel_quiesce(struct subchannel *);
273
274/**
275 * ccw_device_set_offline() - disable a ccw device for I/O
276 * @cdev: target ccw device
277 *
278 * This function calls the driver's set_offline() function for @cdev, if
279 * given, and then disables @cdev.
280 * Returns:
281 *   %0 on success and a negative error value on failure.
282 * Context:
283 *  enabled, ccw device lock not held
284 */
285int ccw_device_set_offline(struct ccw_device *cdev)
286{
287	struct subchannel *sch;
288	int ret, state;
289
290	if (!cdev)
291		return -ENODEV;
292	if (!cdev->online || !cdev->drv)
293		return -EINVAL;
294
295	if (cdev->drv->set_offline) {
296		ret = cdev->drv->set_offline(cdev);
297		if (ret != 0)
298			return ret;
299	}
300	spin_lock_irq(cdev->ccwlock);
301	sch = to_subchannel(cdev->dev.parent);
302	cdev->online = 0;
303	/* Wait until a final state or DISCONNECTED is reached */
304	while (!dev_fsm_final_state(cdev) &&
305	       cdev->private->state != DEV_STATE_DISCONNECTED) {
306		spin_unlock_irq(cdev->ccwlock);
307		wait_event(cdev->private->wait_q, (dev_fsm_final_state(cdev) ||
308			   cdev->private->state == DEV_STATE_DISCONNECTED));
309		spin_lock_irq(cdev->ccwlock);
310	}
311	do {
312		ret = ccw_device_offline(cdev);
313		if (!ret)
314			break;
315		CIO_MSG_EVENT(0, "ccw_device_offline returned %d, device "
316			      "0.%x.%04x\n", ret, cdev->private->dev_id.ssid,
317			      cdev->private->dev_id.devno);
318		if (ret != -EBUSY)
319			goto error;
320		state = cdev->private->state;
321		spin_unlock_irq(cdev->ccwlock);
322		io_subchannel_quiesce(sch);
323		spin_lock_irq(cdev->ccwlock);
324		cdev->private->state = state;
325	} while (ret == -EBUSY);
326	spin_unlock_irq(cdev->ccwlock);
327	wait_event(cdev->private->wait_q, (dev_fsm_final_state(cdev) ||
328		   cdev->private->state == DEV_STATE_DISCONNECTED));
329	/* Inform the user if set offline failed. */
330	if (cdev->private->state == DEV_STATE_BOXED) {
331		pr_warn("%s: The device entered boxed state while being set offline\n",
332			dev_name(&cdev->dev));
333	} else if (cdev->private->state == DEV_STATE_NOT_OPER) {
334		pr_warn("%s: The device stopped operating while being set offline\n",
335			dev_name(&cdev->dev));
336	}
337	/* Give up reference from ccw_device_set_online(). */
338	put_device(&cdev->dev);
339	return 0;
340
341error:
342	cdev->private->state = DEV_STATE_OFFLINE;
343	dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
344	spin_unlock_irq(cdev->ccwlock);
345	/* Give up reference from ccw_device_set_online(). */
346	put_device(&cdev->dev);
347	return -ENODEV;
348}
349
350/**
351 * ccw_device_set_online() - enable a ccw device for I/O
352 * @cdev: target ccw device
353 *
354 * This function first enables @cdev and then calls the driver's set_online()
355 * function for @cdev, if given. If set_online() returns an error, @cdev is
356 * disabled again.
357 * Returns:
358 *   %0 on success and a negative error value on failure.
359 * Context:
360 *  enabled, ccw device lock not held
361 */
362int ccw_device_set_online(struct ccw_device *cdev)
363{
364	int ret;
365	int ret2;
366
367	if (!cdev)
368		return -ENODEV;
369	if (cdev->online || !cdev->drv)
370		return -EINVAL;
371	/* Hold on to an extra reference while device is online. */
372	if (!get_device(&cdev->dev))
373		return -ENODEV;
374
375	spin_lock_irq(cdev->ccwlock);
376	ret = ccw_device_online(cdev);
377	spin_unlock_irq(cdev->ccwlock);
378	if (ret == 0)
379		wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev));
380	else {
381		CIO_MSG_EVENT(0, "ccw_device_online returned %d, "
382			      "device 0.%x.%04x\n",
383			      ret, cdev->private->dev_id.ssid,
384			      cdev->private->dev_id.devno);
385		/* Give up online reference since onlining failed. */
386		put_device(&cdev->dev);
387		return ret;
388	}
389	spin_lock_irq(cdev->ccwlock);
390	/* Check if online processing was successful */
391	if ((cdev->private->state != DEV_STATE_ONLINE) &&
392	    (cdev->private->state != DEV_STATE_W4SENSE)) {
393		spin_unlock_irq(cdev->ccwlock);
394		/* Inform the user that set online failed. */
395		if (cdev->private->state == DEV_STATE_BOXED) {
396			pr_warn("%s: Setting the device online failed because it is boxed\n",
397				dev_name(&cdev->dev));
398		} else if (cdev->private->state == DEV_STATE_NOT_OPER) {
399			pr_warn("%s: Setting the device online failed because it is not operational\n",
400				dev_name(&cdev->dev));
401		}
402		/* Give up online reference since onlining failed. */
403		put_device(&cdev->dev);
404		return -ENODEV;
405	}
406	spin_unlock_irq(cdev->ccwlock);
407	if (cdev->drv->set_online)
408		ret = cdev->drv->set_online(cdev);
409	if (ret)
410		goto rollback;
411
412	spin_lock_irq(cdev->ccwlock);
413	cdev->online = 1;
414	spin_unlock_irq(cdev->ccwlock);
415	return 0;
416
417rollback:
418	spin_lock_irq(cdev->ccwlock);
419	/* Wait until a final state or DISCONNECTED is reached */
420	while (!dev_fsm_final_state(cdev) &&
421	       cdev->private->state != DEV_STATE_DISCONNECTED) {
422		spin_unlock_irq(cdev->ccwlock);
423		wait_event(cdev->private->wait_q, (dev_fsm_final_state(cdev) ||
424			   cdev->private->state == DEV_STATE_DISCONNECTED));
425		spin_lock_irq(cdev->ccwlock);
426	}
427	ret2 = ccw_device_offline(cdev);
428	if (ret2)
429		goto error;
430	spin_unlock_irq(cdev->ccwlock);
431	wait_event(cdev->private->wait_q, (dev_fsm_final_state(cdev) ||
432		   cdev->private->state == DEV_STATE_DISCONNECTED));
433	/* Give up online reference since onlining failed. */
434	put_device(&cdev->dev);
435	return ret;
436
437error:
438	CIO_MSG_EVENT(0, "rollback ccw_device_offline returned %d, "
439		      "device 0.%x.%04x\n",
440		      ret2, cdev->private->dev_id.ssid,
441		      cdev->private->dev_id.devno);
442	cdev->private->state = DEV_STATE_OFFLINE;
443	spin_unlock_irq(cdev->ccwlock);
444	/* Give up online reference since onlining failed. */
445	put_device(&cdev->dev);
446	return ret;
447}
448
449static int online_store_handle_offline(struct ccw_device *cdev)
450{
451	if (cdev->private->state == DEV_STATE_DISCONNECTED) {
452		spin_lock_irq(cdev->ccwlock);
453		ccw_device_sched_todo(cdev, CDEV_TODO_UNREG_EVAL);
454		spin_unlock_irq(cdev->ccwlock);
455		return 0;
456	}
457	if (cdev->drv && cdev->drv->set_offline)
458		return ccw_device_set_offline(cdev);
459	return -EINVAL;
460}
461
462static int online_store_recog_and_online(struct ccw_device *cdev)
463{
464	/* Do device recognition, if needed. */
465	if (cdev->private->state == DEV_STATE_BOXED) {
466		spin_lock_irq(cdev->ccwlock);
467		ccw_device_recognition(cdev);
468		spin_unlock_irq(cdev->ccwlock);
469		wait_event(cdev->private->wait_q,
470			   cdev->private->flags.recog_done);
471		if (cdev->private->state != DEV_STATE_OFFLINE)
472			/* recognition failed */
473			return -EAGAIN;
474	}
475	if (cdev->drv && cdev->drv->set_online)
476		return ccw_device_set_online(cdev);
477	return -EINVAL;
478}
479
480static int online_store_handle_online(struct ccw_device *cdev, int force)
481{
482	int ret;
483
484	ret = online_store_recog_and_online(cdev);
485	if (ret && !force)
486		return ret;
487	if (force && cdev->private->state == DEV_STATE_BOXED) {
488		ret = ccw_device_stlck(cdev);
489		if (ret)
490			return ret;
491		if (cdev->id.cu_type == 0)
492			cdev->private->state = DEV_STATE_NOT_OPER;
493		ret = online_store_recog_and_online(cdev);
494		if (ret)
495			return ret;
496	}
497	return 0;
498}
499
500static ssize_t online_store (struct device *dev, struct device_attribute *attr,
501			     const char *buf, size_t count)
502{
503	struct ccw_device *cdev = to_ccwdev(dev);
504	int force, ret;
505	unsigned long i;
506
507	/* Prevent conflict between multiple on-/offline processing requests. */
508	if (atomic_cmpxchg(&cdev->private->onoff, 0, 1) != 0)
509		return -EAGAIN;
510	/* Prevent conflict between internal I/Os and on-/offline processing. */
511	if (!dev_fsm_final_state(cdev) &&
512	    cdev->private->state != DEV_STATE_DISCONNECTED) {
513		ret = -EAGAIN;
514		goto out;
515	}
516	/* Prevent conflict between pending work and on-/offline processing.*/
517	if (work_pending(&cdev->private->todo_work)) {
518		ret = -EAGAIN;
519		goto out;
520	}
521	if (!strncmp(buf, "force\n", count)) {
522		force = 1;
523		i = 1;
524		ret = 0;
525	} else {
526		force = 0;
527		ret = kstrtoul(buf, 16, &i);
528	}
529	if (ret)
530		goto out;
531
532	device_lock(dev);
533	switch (i) {
534	case 0:
535		ret = online_store_handle_offline(cdev);
536		break;
537	case 1:
538		ret = online_store_handle_online(cdev, force);
539		break;
540	default:
541		ret = -EINVAL;
542	}
543	device_unlock(dev);
544
545out:
546	atomic_set(&cdev->private->onoff, 0);
547	return (ret < 0) ? ret : count;
548}
549
550static ssize_t
551available_show (struct device *dev, struct device_attribute *attr, char *buf)
552{
553	struct ccw_device *cdev = to_ccwdev(dev);
554	struct subchannel *sch;
555
556	if (ccw_device_is_orphan(cdev))
557		return sprintf(buf, "no device\n");
558	switch (cdev->private->state) {
559	case DEV_STATE_BOXED:
560		return sprintf(buf, "boxed\n");
561	case DEV_STATE_DISCONNECTED:
562	case DEV_STATE_DISCONNECTED_SENSE_ID:
563	case DEV_STATE_NOT_OPER:
564		sch = to_subchannel(dev->parent);
565		if (!sch->lpm)
566			return sprintf(buf, "no path\n");
567		else
568			return sprintf(buf, "no device\n");
569	default:
570		/* All other states considered fine. */
571		return sprintf(buf, "good\n");
572	}
573}
574
575static ssize_t
576initiate_logging(struct device *dev, struct device_attribute *attr,
577		 const char *buf, size_t count)
578{
579	struct subchannel *sch = to_subchannel(dev);
580	int rc;
581
582	rc = chsc_siosl(sch->schid);
583	if (rc < 0) {
584		pr_warn("Logging for subchannel 0.%x.%04x failed with errno=%d\n",
585			sch->schid.ssid, sch->schid.sch_no, rc);
586		return rc;
587	}
588	pr_notice("Logging for subchannel 0.%x.%04x was triggered\n",
589		  sch->schid.ssid, sch->schid.sch_no);
590	return count;
591}
592
593static ssize_t vpm_show(struct device *dev, struct device_attribute *attr,
594			char *buf)
595{
596	struct subchannel *sch = to_subchannel(dev);
597
598	return sprintf(buf, "%02x\n", sch->vpm);
599}
600
601static DEVICE_ATTR_RO(devtype);
602static DEVICE_ATTR_RO(cutype);
603static DEVICE_ATTR_RO(modalias);
604static DEVICE_ATTR_RW(online);
605static DEVICE_ATTR(availability, 0444, available_show, NULL);
606static DEVICE_ATTR(logging, 0200, NULL, initiate_logging);
607static DEVICE_ATTR_RO(vpm);
608
609static struct attribute *io_subchannel_attrs[] = {
610	&dev_attr_logging.attr,
611	&dev_attr_vpm.attr,
612	NULL,
613};
614
615static const struct attribute_group io_subchannel_attr_group = {
616	.attrs = io_subchannel_attrs,
617};
618
619static struct attribute * ccwdev_attrs[] = {
620	&dev_attr_devtype.attr,
621	&dev_attr_cutype.attr,
622	&dev_attr_modalias.attr,
623	&dev_attr_online.attr,
624	&dev_attr_cmb_enable.attr,
625	&dev_attr_availability.attr,
626	NULL,
627};
628
629static const struct attribute_group ccwdev_attr_group = {
630	.attrs = ccwdev_attrs,
631};
632
633static const struct attribute_group *ccwdev_attr_groups[] = {
634	&ccwdev_attr_group,
635	NULL,
636};
637
638static int ccw_device_add(struct ccw_device *cdev)
639{
640	struct device *dev = &cdev->dev;
641
642	dev->bus = &ccw_bus_type;
643	return device_add(dev);
644}
645
646static int match_dev_id(struct device *dev, const void *data)
647{
648	struct ccw_device *cdev = to_ccwdev(dev);
649	struct ccw_dev_id *dev_id = (void *)data;
650
651	return ccw_dev_id_is_equal(&cdev->private->dev_id, dev_id);
652}
653
654/**
655 * get_ccwdev_by_dev_id() - obtain device from a ccw device id
656 * @dev_id: id of the device to be searched
657 *
658 * This function searches all devices attached to the ccw bus for a device
659 * matching @dev_id.
660 * Returns:
661 *  If a device is found its reference count is increased and returned;
662 *  else %NULL is returned.
663 */
664struct ccw_device *get_ccwdev_by_dev_id(struct ccw_dev_id *dev_id)
665{
666	struct device *dev;
667
668	dev = bus_find_device(&ccw_bus_type, NULL, dev_id, match_dev_id);
669
670	return dev ? to_ccwdev(dev) : NULL;
671}
672EXPORT_SYMBOL_GPL(get_ccwdev_by_dev_id);
673
674static void ccw_device_do_unbind_bind(struct ccw_device *cdev)
675{
676	int ret;
677
678	if (device_is_registered(&cdev->dev)) {
679		device_release_driver(&cdev->dev);
680		ret = device_attach(&cdev->dev);
681		WARN_ON(ret == -ENODEV);
682	}
683}
684
685static void
686ccw_device_release(struct device *dev)
687{
688	struct ccw_device *cdev;
689
690	cdev = to_ccwdev(dev);
691	cio_gp_dma_free(cdev->private->dma_pool, cdev->private->dma_area,
692			sizeof(*cdev->private->dma_area));
693	cio_gp_dma_destroy(cdev->private->dma_pool, &cdev->dev);
694	/* Release reference of parent subchannel. */
695	put_device(cdev->dev.parent);
696	kfree(cdev->private);
697	kfree(cdev);
698}
699
700static struct ccw_device * io_subchannel_allocate_dev(struct subchannel *sch)
701{
702	struct ccw_device *cdev;
703	struct gen_pool *dma_pool;
704
705	cdev  = kzalloc(sizeof(*cdev), GFP_KERNEL);
706	if (!cdev)
707		goto err_cdev;
708	cdev->private = kzalloc(sizeof(struct ccw_device_private),
709				GFP_KERNEL | GFP_DMA);
710	if (!cdev->private)
711		goto err_priv;
712	cdev->dev.coherent_dma_mask = sch->dev.coherent_dma_mask;
713	cdev->dev.dma_mask = sch->dev.dma_mask;
714	dma_pool = cio_gp_dma_create(&cdev->dev, 1);
715	if (!dma_pool)
716		goto err_dma_pool;
717	cdev->private->dma_pool = dma_pool;
718	cdev->private->dma_area = cio_gp_dma_zalloc(dma_pool, &cdev->dev,
719					sizeof(*cdev->private->dma_area));
720	if (!cdev->private->dma_area)
721		goto err_dma_area;
722	return cdev;
723err_dma_area:
724	cio_gp_dma_destroy(dma_pool, &cdev->dev);
725err_dma_pool:
726	kfree(cdev->private);
727err_priv:
728	kfree(cdev);
729err_cdev:
730	return ERR_PTR(-ENOMEM);
731}
732
733static void ccw_device_todo(struct work_struct *work);
734
735static int io_subchannel_initialize_dev(struct subchannel *sch,
736					struct ccw_device *cdev)
737{
738	struct ccw_device_private *priv = cdev->private;
739	int ret;
740
741	priv->cdev = cdev;
742	priv->int_class = IRQIO_CIO;
743	priv->state = DEV_STATE_NOT_OPER;
744	priv->dev_id.devno = sch->schib.pmcw.dev;
745	priv->dev_id.ssid = sch->schid.ssid;
746
747	INIT_WORK(&priv->todo_work, ccw_device_todo);
748	INIT_LIST_HEAD(&priv->cmb_list);
749	init_waitqueue_head(&priv->wait_q);
750	timer_setup(&priv->timer, ccw_device_timeout, 0);
751
752	atomic_set(&priv->onoff, 0);
753	cdev->ccwlock = sch->lock;
754	cdev->dev.parent = &sch->dev;
755	cdev->dev.release = ccw_device_release;
756	cdev->dev.groups = ccwdev_attr_groups;
757	/* Do first half of device_register. */
758	device_initialize(&cdev->dev);
759	ret = dev_set_name(&cdev->dev, "0.%x.%04x", cdev->private->dev_id.ssid,
760			   cdev->private->dev_id.devno);
761	if (ret)
762		goto out_put;
763	if (!get_device(&sch->dev)) {
764		ret = -ENODEV;
765		goto out_put;
766	}
767	priv->flags.initialized = 1;
768	spin_lock_irq(sch->lock);
769	sch_set_cdev(sch, cdev);
770	spin_unlock_irq(sch->lock);
771	return 0;
772
773out_put:
774	/* Release reference from device_initialize(). */
775	put_device(&cdev->dev);
776	return ret;
777}
778
779static struct ccw_device * io_subchannel_create_ccwdev(struct subchannel *sch)
780{
781	struct ccw_device *cdev;
782	int ret;
783
784	cdev = io_subchannel_allocate_dev(sch);
785	if (!IS_ERR(cdev)) {
786		ret = io_subchannel_initialize_dev(sch, cdev);
787		if (ret)
788			cdev = ERR_PTR(ret);
789	}
790	return cdev;
791}
792
793static void io_subchannel_recog(struct ccw_device *, struct subchannel *);
794
795static void sch_create_and_recog_new_device(struct subchannel *sch)
796{
797	struct ccw_device *cdev;
798
799	/* Need to allocate a new ccw device. */
800	cdev = io_subchannel_create_ccwdev(sch);
801	if (IS_ERR(cdev)) {
802		/* OK, we did everything we could... */
803		css_sch_device_unregister(sch);
804		return;
805	}
806	/* Start recognition for the new ccw device. */
807	io_subchannel_recog(cdev, sch);
808}
809
810/*
811 * Register recognized device.
812 */
813static void io_subchannel_register(struct ccw_device *cdev)
814{
815	struct subchannel *sch;
816	int ret, adjust_init_count = 1;
817	unsigned long flags;
818
819	sch = to_subchannel(cdev->dev.parent);
820	/*
821	 * Check if subchannel is still registered. It may have become
822	 * unregistered if a machine check hit us after finishing
823	 * device recognition but before the register work could be
824	 * queued.
825	 */
826	if (!device_is_registered(&sch->dev))
827		goto out_err;
828	css_update_ssd_info(sch);
829	/*
830	 * io_subchannel_register() will also be called after device
831	 * recognition has been done for a boxed device (which will already
832	 * be registered). We need to reprobe since we may now have sense id
833	 * information.
834	 */
835	if (device_is_registered(&cdev->dev)) {
836		if (!cdev->drv) {
837			ret = device_reprobe(&cdev->dev);
838			if (ret)
839				/* We can't do much here. */
840				CIO_MSG_EVENT(0, "device_reprobe() returned"
841					      " %d for 0.%x.%04x\n", ret,
842					      cdev->private->dev_id.ssid,
843					      cdev->private->dev_id.devno);
844		}
845		adjust_init_count = 0;
846		goto out;
847	}
848	/*
849	 * Now we know this subchannel will stay, we can throw
850	 * our delayed uevent.
851	 */
852	if (dev_get_uevent_suppress(&sch->dev)) {
853		dev_set_uevent_suppress(&sch->dev, 0);
854		kobject_uevent(&sch->dev.kobj, KOBJ_ADD);
855	}
856	/* make it known to the system */
857	ret = ccw_device_add(cdev);
858	if (ret) {
859		CIO_MSG_EVENT(0, "Could not register ccw dev 0.%x.%04x: %d\n",
860			      cdev->private->dev_id.ssid,
861			      cdev->private->dev_id.devno, ret);
862		spin_lock_irqsave(sch->lock, flags);
863		sch_set_cdev(sch, NULL);
864		spin_unlock_irqrestore(sch->lock, flags);
865		/* Release initial device reference. */
866		put_device(&cdev->dev);
867		goto out_err;
868	}
869out:
870	cdev->private->flags.recog_done = 1;
871	wake_up(&cdev->private->wait_q);
872out_err:
873	if (adjust_init_count && atomic_dec_and_test(&ccw_device_init_count))
874		wake_up(&ccw_device_init_wq);
875}
876
877static void ccw_device_call_sch_unregister(struct ccw_device *cdev)
878{
879	struct subchannel *sch;
880
881	/* Get subchannel reference for local processing. */
882	if (!get_device(cdev->dev.parent))
883		return;
884	sch = to_subchannel(cdev->dev.parent);
885	css_sch_device_unregister(sch);
886	/* Release subchannel reference for local processing. */
887	put_device(&sch->dev);
888}
889
890/*
891 * subchannel recognition done. Called from the state machine.
892 */
893void
894io_subchannel_recog_done(struct ccw_device *cdev)
895{
896	if (css_init_done == 0) {
897		cdev->private->flags.recog_done = 1;
898		return;
899	}
900	switch (cdev->private->state) {
901	case DEV_STATE_BOXED:
902		/* Device did not respond in time. */
903	case DEV_STATE_NOT_OPER:
904		cdev->private->flags.recog_done = 1;
905		/* Remove device found not operational. */
906		ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
907		if (atomic_dec_and_test(&ccw_device_init_count))
908			wake_up(&ccw_device_init_wq);
909		break;
910	case DEV_STATE_OFFLINE:
911		/*
912		 * We can't register the device in interrupt context so
913		 * we schedule a work item.
914		 */
915		ccw_device_sched_todo(cdev, CDEV_TODO_REGISTER);
916		break;
917	}
918}
919
920static void io_subchannel_recog(struct ccw_device *cdev, struct subchannel *sch)
921{
922	/* Increase counter of devices currently in recognition. */
923	atomic_inc(&ccw_device_init_count);
924
925	/* Start async. device sensing. */
926	spin_lock_irq(sch->lock);
927	ccw_device_recognition(cdev);
928	spin_unlock_irq(sch->lock);
929}
930
931static int ccw_device_move_to_sch(struct ccw_device *cdev,
932				  struct subchannel *sch)
933{
934	struct subchannel *old_sch;
935	int rc, old_enabled = 0;
936
937	old_sch = to_subchannel(cdev->dev.parent);
938	/* Obtain child reference for new parent. */
939	if (!get_device(&sch->dev))
940		return -ENODEV;
941
942	if (!sch_is_pseudo_sch(old_sch)) {
943		spin_lock_irq(old_sch->lock);
944		old_enabled = old_sch->schib.pmcw.ena;
945		rc = 0;
946		if (old_enabled)
947			rc = cio_disable_subchannel(old_sch);
948		spin_unlock_irq(old_sch->lock);
949		if (rc == -EBUSY) {
950			/* Release child reference for new parent. */
951			put_device(&sch->dev);
952			return rc;
953		}
954	}
955
956	mutex_lock(&sch->reg_mutex);
957	rc = device_move(&cdev->dev, &sch->dev, DPM_ORDER_PARENT_BEFORE_DEV);
958	mutex_unlock(&sch->reg_mutex);
959	if (rc) {
960		CIO_MSG_EVENT(0, "device_move(0.%x.%04x,0.%x.%04x)=%d\n",
961			      cdev->private->dev_id.ssid,
962			      cdev->private->dev_id.devno, sch->schid.ssid,
963			      sch->schib.pmcw.dev, rc);
964		if (old_enabled) {
965			/* Try to reenable the old subchannel. */
966			spin_lock_irq(old_sch->lock);
967			cio_enable_subchannel(old_sch, (u32)(addr_t)old_sch);
968			spin_unlock_irq(old_sch->lock);
969		}
970		/* Release child reference for new parent. */
971		put_device(&sch->dev);
972		return rc;
973	}
974	/* Clean up old subchannel. */
975	if (!sch_is_pseudo_sch(old_sch)) {
976		spin_lock_irq(old_sch->lock);
977		sch_set_cdev(old_sch, NULL);
978		spin_unlock_irq(old_sch->lock);
979		css_schedule_eval(old_sch->schid);
980	}
981	/* Release child reference for old parent. */
982	put_device(&old_sch->dev);
983	/* Initialize new subchannel. */
984	spin_lock_irq(sch->lock);
985	cdev->ccwlock = sch->lock;
986	if (!sch_is_pseudo_sch(sch))
987		sch_set_cdev(sch, cdev);
988	spin_unlock_irq(sch->lock);
989	if (!sch_is_pseudo_sch(sch))
990		css_update_ssd_info(sch);
991	return 0;
992}
993
994static int ccw_device_move_to_orph(struct ccw_device *cdev)
995{
996	struct subchannel *sch = to_subchannel(cdev->dev.parent);
997	struct channel_subsystem *css = to_css(sch->dev.parent);
998
999	return ccw_device_move_to_sch(cdev, css->pseudo_subchannel);
1000}
1001
1002static void io_subchannel_irq(struct subchannel *sch)
1003{
1004	struct ccw_device *cdev;
1005
1006	cdev = sch_get_cdev(sch);
1007
1008	CIO_TRACE_EVENT(6, "IRQ");
1009	CIO_TRACE_EVENT(6, dev_name(&sch->dev));
1010	if (cdev)
1011		dev_fsm_event(cdev, DEV_EVENT_INTERRUPT);
1012	else
1013		inc_irq_stat(IRQIO_CIO);
1014}
1015
1016void io_subchannel_init_config(struct subchannel *sch)
1017{
1018	memset(&sch->config, 0, sizeof(sch->config));
1019	sch->config.csense = 1;
1020}
1021
1022static void io_subchannel_init_fields(struct subchannel *sch)
1023{
1024	if (cio_is_console(sch->schid))
1025		sch->opm = 0xff;
1026	else
1027		sch->opm = chp_get_sch_opm(sch);
1028	sch->lpm = sch->schib.pmcw.pam & sch->opm;
1029	sch->isc = cio_is_console(sch->schid) ? CONSOLE_ISC : IO_SCH_ISC;
1030
1031	CIO_MSG_EVENT(6, "Detected device %04x on subchannel 0.%x.%04X"
1032		      " - PIM = %02X, PAM = %02X, POM = %02X\n",
1033		      sch->schib.pmcw.dev, sch->schid.ssid,
1034		      sch->schid.sch_no, sch->schib.pmcw.pim,
1035		      sch->schib.pmcw.pam, sch->schib.pmcw.pom);
1036
1037	io_subchannel_init_config(sch);
1038}
1039
1040/*
1041 * Note: We always return 0 so that we bind to the device even on error.
1042 * This is needed so that our remove function is called on unregister.
1043 */
1044static int io_subchannel_probe(struct subchannel *sch)
1045{
1046	struct io_subchannel_private *io_priv;
1047	struct ccw_device *cdev;
1048	int rc;
1049
1050	if (cio_is_console(sch->schid)) {
1051		rc = sysfs_create_group(&sch->dev.kobj,
1052					&io_subchannel_attr_group);
1053		if (rc)
1054			CIO_MSG_EVENT(0, "Failed to create io subchannel "
1055				      "attributes for subchannel "
1056				      "0.%x.%04x (rc=%d)\n",
1057				      sch->schid.ssid, sch->schid.sch_no, rc);
1058		/*
1059		 * The console subchannel already has an associated ccw_device.
1060		 * Throw the delayed uevent for the subchannel, register
1061		 * the ccw_device and exit.
1062		 */
1063		if (dev_get_uevent_suppress(&sch->dev)) {
1064			/* should always be the case for the console */
1065			dev_set_uevent_suppress(&sch->dev, 0);
1066			kobject_uevent(&sch->dev.kobj, KOBJ_ADD);
1067		}
1068		cdev = sch_get_cdev(sch);
1069		rc = ccw_device_add(cdev);
1070		if (rc) {
1071			/* Release online reference. */
1072			put_device(&cdev->dev);
1073			goto out_schedule;
1074		}
1075		if (atomic_dec_and_test(&ccw_device_init_count))
1076			wake_up(&ccw_device_init_wq);
1077		return 0;
1078	}
1079	io_subchannel_init_fields(sch);
1080	rc = cio_commit_config(sch);
1081	if (rc)
1082		goto out_schedule;
1083	rc = sysfs_create_group(&sch->dev.kobj,
1084				&io_subchannel_attr_group);
1085	if (rc)
1086		goto out_schedule;
1087	/* Allocate I/O subchannel private data. */
1088	io_priv = kzalloc(sizeof(*io_priv), GFP_KERNEL | GFP_DMA);
1089	if (!io_priv)
1090		goto out_schedule;
1091
1092	io_priv->dma_area = dma_alloc_coherent(&sch->dev,
1093				sizeof(*io_priv->dma_area),
1094				&io_priv->dma_area_dma, GFP_KERNEL);
1095	if (!io_priv->dma_area) {
1096		kfree(io_priv);
1097		goto out_schedule;
1098	}
1099
1100	set_io_private(sch, io_priv);
1101	css_schedule_eval(sch->schid);
1102	return 0;
1103
1104out_schedule:
1105	spin_lock_irq(sch->lock);
1106	css_sched_sch_todo(sch, SCH_TODO_UNREG);
1107	spin_unlock_irq(sch->lock);
1108	return 0;
1109}
1110
1111static int io_subchannel_remove(struct subchannel *sch)
1112{
1113	struct io_subchannel_private *io_priv = to_io_private(sch);
1114	struct ccw_device *cdev;
1115
1116	cdev = sch_get_cdev(sch);
1117	if (!cdev)
1118		goto out_free;
1119
1120	ccw_device_unregister(cdev);
1121	spin_lock_irq(sch->lock);
1122	sch_set_cdev(sch, NULL);
1123	set_io_private(sch, NULL);
1124	spin_unlock_irq(sch->lock);
1125out_free:
1126	dma_free_coherent(&sch->dev, sizeof(*io_priv->dma_area),
1127			  io_priv->dma_area, io_priv->dma_area_dma);
1128	kfree(io_priv);
1129	sysfs_remove_group(&sch->dev.kobj, &io_subchannel_attr_group);
1130	return 0;
1131}
1132
1133static void io_subchannel_verify(struct subchannel *sch)
1134{
1135	struct ccw_device *cdev;
1136
1137	cdev = sch_get_cdev(sch);
1138	if (cdev)
1139		dev_fsm_event(cdev, DEV_EVENT_VERIFY);
1140}
1141
1142static void io_subchannel_terminate_path(struct subchannel *sch, u8 mask)
1143{
1144	struct ccw_device *cdev;
1145
1146	cdev = sch_get_cdev(sch);
1147	if (!cdev)
1148		return;
1149	if (cio_update_schib(sch))
1150		goto err;
1151	/* Check for I/O on path. */
1152	if (scsw_actl(&sch->schib.scsw) == 0 || sch->schib.pmcw.lpum != mask)
1153		goto out;
1154	if (cdev->private->state == DEV_STATE_ONLINE) {
1155		ccw_device_kill_io(cdev);
1156		goto out;
1157	}
1158	if (cio_clear(sch))
1159		goto err;
1160out:
1161	/* Trigger path verification. */
1162	dev_fsm_event(cdev, DEV_EVENT_VERIFY);
1163	return;
1164
1165err:
1166	dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
1167}
1168
1169static int io_subchannel_chp_event(struct subchannel *sch,
1170				   struct chp_link *link, int event)
1171{
1172	struct ccw_device *cdev = sch_get_cdev(sch);
1173	int mask;
1174
1175	mask = chp_ssd_get_mask(&sch->ssd_info, link);
1176	if (!mask)
1177		return 0;
1178	switch (event) {
1179	case CHP_VARY_OFF:
1180		sch->opm &= ~mask;
1181		sch->lpm &= ~mask;
1182		if (cdev)
1183			cdev->private->path_gone_mask |= mask;
1184		io_subchannel_terminate_path(sch, mask);
1185		break;
1186	case CHP_VARY_ON:
1187		sch->opm |= mask;
1188		sch->lpm |= mask;
1189		if (cdev)
1190			cdev->private->path_new_mask |= mask;
1191		io_subchannel_verify(sch);
1192		break;
1193	case CHP_OFFLINE:
1194		if (cio_update_schib(sch))
1195			return -ENODEV;
1196		if (cdev)
1197			cdev->private->path_gone_mask |= mask;
1198		io_subchannel_terminate_path(sch, mask);
1199		break;
1200	case CHP_ONLINE:
1201		if (cio_update_schib(sch))
1202			return -ENODEV;
1203		sch->lpm |= mask & sch->opm;
1204		if (cdev)
1205			cdev->private->path_new_mask |= mask;
1206		io_subchannel_verify(sch);
1207		break;
1208	}
1209	return 0;
1210}
1211
1212static void io_subchannel_quiesce(struct subchannel *sch)
1213{
1214	struct ccw_device *cdev;
1215	int ret;
1216
1217	spin_lock_irq(sch->lock);
1218	cdev = sch_get_cdev(sch);
1219	if (cio_is_console(sch->schid))
1220		goto out_unlock;
1221	if (!sch->schib.pmcw.ena)
1222		goto out_unlock;
1223	ret = cio_disable_subchannel(sch);
1224	if (ret != -EBUSY)
1225		goto out_unlock;
1226	if (cdev->handler)
1227		cdev->handler(cdev, cdev->private->intparm, ERR_PTR(-EIO));
1228	while (ret == -EBUSY) {
1229		cdev->private->state = DEV_STATE_QUIESCE;
1230		cdev->private->iretry = 255;
1231		ret = ccw_device_cancel_halt_clear(cdev);
1232		if (ret == -EBUSY) {
1233			ccw_device_set_timeout(cdev, HZ/10);
1234			spin_unlock_irq(sch->lock);
1235			wait_event(cdev->private->wait_q,
1236				   cdev->private->state != DEV_STATE_QUIESCE);
1237			spin_lock_irq(sch->lock);
1238		}
1239		ret = cio_disable_subchannel(sch);
1240	}
1241out_unlock:
1242	spin_unlock_irq(sch->lock);
1243}
1244
1245static void io_subchannel_shutdown(struct subchannel *sch)
1246{
1247	io_subchannel_quiesce(sch);
1248}
1249
1250static int device_is_disconnected(struct ccw_device *cdev)
1251{
1252	if (!cdev)
1253		return 0;
1254	return (cdev->private->state == DEV_STATE_DISCONNECTED ||
1255		cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID);
1256}
1257
1258static int recovery_check(struct device *dev, void *data)
1259{
1260	struct ccw_device *cdev = to_ccwdev(dev);
1261	struct subchannel *sch;
1262	int *redo = data;
1263
1264	spin_lock_irq(cdev->ccwlock);
1265	switch (cdev->private->state) {
1266	case DEV_STATE_ONLINE:
1267		sch = to_subchannel(cdev->dev.parent);
1268		if ((sch->schib.pmcw.pam & sch->opm) == sch->vpm)
1269			break;
1270		fallthrough;
1271	case DEV_STATE_DISCONNECTED:
1272		CIO_MSG_EVENT(3, "recovery: trigger 0.%x.%04x\n",
1273			      cdev->private->dev_id.ssid,
1274			      cdev->private->dev_id.devno);
1275		dev_fsm_event(cdev, DEV_EVENT_VERIFY);
1276		*redo = 1;
1277		break;
1278	case DEV_STATE_DISCONNECTED_SENSE_ID:
1279		*redo = 1;
1280		break;
1281	}
1282	spin_unlock_irq(cdev->ccwlock);
1283
1284	return 0;
1285}
1286
1287static void recovery_work_func(struct work_struct *unused)
1288{
1289	int redo = 0;
1290
1291	bus_for_each_dev(&ccw_bus_type, NULL, &redo, recovery_check);
1292	if (redo) {
1293		spin_lock_irq(&recovery_lock);
1294		if (!timer_pending(&recovery_timer)) {
1295			if (recovery_phase < ARRAY_SIZE(recovery_delay) - 1)
1296				recovery_phase++;
1297			mod_timer(&recovery_timer, jiffies +
1298				  recovery_delay[recovery_phase] * HZ);
1299		}
1300		spin_unlock_irq(&recovery_lock);
1301	} else
1302		CIO_MSG_EVENT(3, "recovery: end\n");
1303}
1304
1305static DECLARE_WORK(recovery_work, recovery_work_func);
1306
1307static void recovery_func(struct timer_list *unused)
1308{
1309	/*
1310	 * We can't do our recovery in softirq context and it's not
1311	 * performance critical, so we schedule it.
1312	 */
1313	schedule_work(&recovery_work);
1314}
1315
1316void ccw_device_schedule_recovery(void)
1317{
1318	unsigned long flags;
1319
1320	CIO_MSG_EVENT(3, "recovery: schedule\n");
1321	spin_lock_irqsave(&recovery_lock, flags);
1322	if (!timer_pending(&recovery_timer) || (recovery_phase != 0)) {
1323		recovery_phase = 0;
1324		mod_timer(&recovery_timer, jiffies + recovery_delay[0] * HZ);
1325	}
1326	spin_unlock_irqrestore(&recovery_lock, flags);
1327}
1328
1329static int purge_fn(struct device *dev, void *data)
1330{
1331	struct ccw_device *cdev = to_ccwdev(dev);
1332	struct ccw_dev_id *id = &cdev->private->dev_id;
1333
1334	spin_lock_irq(cdev->ccwlock);
1335	if (is_blacklisted(id->ssid, id->devno) &&
1336	    (cdev->private->state == DEV_STATE_OFFLINE) &&
1337	    (atomic_cmpxchg(&cdev->private->onoff, 0, 1) == 0)) {
1338		CIO_MSG_EVENT(3, "ccw: purging 0.%x.%04x\n", id->ssid,
1339			      id->devno);
1340		ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
1341		atomic_set(&cdev->private->onoff, 0);
1342	}
1343	spin_unlock_irq(cdev->ccwlock);
1344	/* Abort loop in case of pending signal. */
1345	if (signal_pending(current))
1346		return -EINTR;
1347
1348	return 0;
1349}
1350
1351/**
1352 * ccw_purge_blacklisted - purge unused, blacklisted devices
1353 *
1354 * Unregister all ccw devices that are offline and on the blacklist.
1355 */
1356int ccw_purge_blacklisted(void)
1357{
1358	CIO_MSG_EVENT(2, "ccw: purging blacklisted devices\n");
1359	bus_for_each_dev(&ccw_bus_type, NULL, NULL, purge_fn);
1360	return 0;
1361}
1362
1363void ccw_device_set_disconnected(struct ccw_device *cdev)
1364{
1365	if (!cdev)
1366		return;
1367	ccw_device_set_timeout(cdev, 0);
1368	cdev->private->flags.fake_irb = 0;
1369	cdev->private->state = DEV_STATE_DISCONNECTED;
1370	if (cdev->online)
1371		ccw_device_schedule_recovery();
1372}
1373
1374void ccw_device_set_notoper(struct ccw_device *cdev)
1375{
1376	struct subchannel *sch = to_subchannel(cdev->dev.parent);
1377
1378	CIO_TRACE_EVENT(2, "notoper");
1379	CIO_TRACE_EVENT(2, dev_name(&sch->dev));
1380	ccw_device_set_timeout(cdev, 0);
1381	cio_disable_subchannel(sch);
1382	cdev->private->state = DEV_STATE_NOT_OPER;
1383}
1384
1385enum io_sch_action {
1386	IO_SCH_UNREG,
1387	IO_SCH_ORPH_UNREG,
1388	IO_SCH_UNREG_CDEV,
1389	IO_SCH_ATTACH,
1390	IO_SCH_UNREG_ATTACH,
1391	IO_SCH_ORPH_ATTACH,
1392	IO_SCH_REPROBE,
1393	IO_SCH_VERIFY,
1394	IO_SCH_DISC,
1395	IO_SCH_NOP,
1396};
1397
1398static enum io_sch_action sch_get_action(struct subchannel *sch)
1399{
1400	struct ccw_device *cdev;
1401
1402	cdev = sch_get_cdev(sch);
1403	if (cio_update_schib(sch)) {
1404		/* Not operational. */
1405		if (!cdev)
1406			return IO_SCH_UNREG;
1407		if (ccw_device_notify(cdev, CIO_GONE) != NOTIFY_OK)
1408			return IO_SCH_UNREG;
1409		return IO_SCH_ORPH_UNREG;
1410	}
1411	/* Operational. */
1412	if (!cdev)
1413		return IO_SCH_ATTACH;
1414	if (sch->schib.pmcw.dev != cdev->private->dev_id.devno) {
1415		if (ccw_device_notify(cdev, CIO_GONE) != NOTIFY_OK)
1416			return IO_SCH_UNREG_ATTACH;
1417		return IO_SCH_ORPH_ATTACH;
1418	}
1419	if ((sch->schib.pmcw.pam & sch->opm) == 0) {
1420		if (ccw_device_notify(cdev, CIO_NO_PATH) != NOTIFY_OK)
1421			return IO_SCH_UNREG_CDEV;
1422		return IO_SCH_DISC;
1423	}
1424	if (device_is_disconnected(cdev))
1425		return IO_SCH_REPROBE;
1426	if (cdev->online && !cdev->private->flags.resuming)
1427		return IO_SCH_VERIFY;
1428	if (cdev->private->state == DEV_STATE_NOT_OPER)
1429		return IO_SCH_UNREG_ATTACH;
1430	return IO_SCH_NOP;
1431}
1432
1433/**
1434 * io_subchannel_sch_event - process subchannel event
1435 * @sch: subchannel
1436 * @process: non-zero if function is called in process context
1437 *
1438 * An unspecified event occurred for this subchannel. Adjust data according
1439 * to the current operational state of the subchannel and device. Return
1440 * zero when the event has been handled sufficiently or -EAGAIN when this
1441 * function should be called again in process context.
1442 */
1443static int io_subchannel_sch_event(struct subchannel *sch, int process)
1444{
1445	unsigned long flags;
1446	struct ccw_device *cdev;
1447	struct ccw_dev_id dev_id;
1448	enum io_sch_action action;
1449	int rc = -EAGAIN;
1450
1451	spin_lock_irqsave(sch->lock, flags);
1452	if (!device_is_registered(&sch->dev))
1453		goto out_unlock;
1454	if (work_pending(&sch->todo_work))
1455		goto out_unlock;
1456	cdev = sch_get_cdev(sch);
1457	if (cdev && work_pending(&cdev->private->todo_work))
1458		goto out_unlock;
1459	action = sch_get_action(sch);
1460	CIO_MSG_EVENT(2, "event: sch 0.%x.%04x, process=%d, action=%d\n",
1461		      sch->schid.ssid, sch->schid.sch_no, process,
1462		      action);
1463	/* Perform immediate actions while holding the lock. */
1464	switch (action) {
1465	case IO_SCH_REPROBE:
1466		/* Trigger device recognition. */
1467		ccw_device_trigger_reprobe(cdev);
1468		rc = 0;
1469		goto out_unlock;
1470	case IO_SCH_VERIFY:
1471		/* Trigger path verification. */
1472		io_subchannel_verify(sch);
1473		rc = 0;
1474		goto out_unlock;
1475	case IO_SCH_DISC:
1476		ccw_device_set_disconnected(cdev);
1477		rc = 0;
1478		goto out_unlock;
1479	case IO_SCH_ORPH_UNREG:
1480	case IO_SCH_ORPH_ATTACH:
1481		ccw_device_set_disconnected(cdev);
1482		break;
1483	case IO_SCH_UNREG_CDEV:
1484	case IO_SCH_UNREG_ATTACH:
1485	case IO_SCH_UNREG:
1486		if (!cdev)
1487			break;
1488		if (cdev->private->state == DEV_STATE_SENSE_ID) {
1489			/*
1490			 * Note: delayed work triggered by this event
1491			 * and repeated calls to sch_event are synchronized
1492			 * by the above check for work_pending(cdev).
1493			 */
1494			dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
1495		} else
1496			ccw_device_set_notoper(cdev);
1497		break;
1498	case IO_SCH_NOP:
1499		rc = 0;
1500		goto out_unlock;
1501	default:
1502		break;
1503	}
1504	spin_unlock_irqrestore(sch->lock, flags);
1505	/* All other actions require process context. */
1506	if (!process)
1507		goto out;
1508	/* Handle attached ccw device. */
1509	switch (action) {
1510	case IO_SCH_ORPH_UNREG:
1511	case IO_SCH_ORPH_ATTACH:
1512		/* Move ccw device to orphanage. */
1513		rc = ccw_device_move_to_orph(cdev);
1514		if (rc)
1515			goto out;
1516		break;
1517	case IO_SCH_UNREG_CDEV:
1518	case IO_SCH_UNREG_ATTACH:
1519		spin_lock_irqsave(sch->lock, flags);
1520		if (cdev->private->flags.resuming) {
1521			/* Device will be handled later. */
1522			rc = 0;
1523			goto out_unlock;
1524		}
1525		sch_set_cdev(sch, NULL);
1526		spin_unlock_irqrestore(sch->lock, flags);
1527		/* Unregister ccw device. */
1528		ccw_device_unregister(cdev);
1529		break;
1530	default:
1531		break;
1532	}
1533	/* Handle subchannel. */
1534	switch (action) {
1535	case IO_SCH_ORPH_UNREG:
1536	case IO_SCH_UNREG:
1537		if (!cdev || !cdev->private->flags.resuming)
1538			css_sch_device_unregister(sch);
1539		break;
1540	case IO_SCH_ORPH_ATTACH:
1541	case IO_SCH_UNREG_ATTACH:
1542	case IO_SCH_ATTACH:
1543		dev_id.ssid = sch->schid.ssid;
1544		dev_id.devno = sch->schib.pmcw.dev;
1545		cdev = get_ccwdev_by_dev_id(&dev_id);
1546		if (!cdev) {
1547			sch_create_and_recog_new_device(sch);
1548			break;
1549		}
1550		rc = ccw_device_move_to_sch(cdev, sch);
1551		if (rc) {
1552			/* Release reference from get_ccwdev_by_dev_id() */
1553			put_device(&cdev->dev);
1554			goto out;
1555		}
1556		spin_lock_irqsave(sch->lock, flags);
1557		ccw_device_trigger_reprobe(cdev);
1558		spin_unlock_irqrestore(sch->lock, flags);
1559		/* Release reference from get_ccwdev_by_dev_id() */
1560		put_device(&cdev->dev);
1561		break;
1562	default:
1563		break;
1564	}
1565	return 0;
1566
1567out_unlock:
1568	spin_unlock_irqrestore(sch->lock, flags);
1569out:
1570	return rc;
1571}
1572
1573static void ccw_device_set_int_class(struct ccw_device *cdev)
1574{
1575	struct ccw_driver *cdrv = cdev->drv;
1576
1577	/* Note: we interpret class 0 in this context as an uninitialized
1578	 * field since it translates to a non-I/O interrupt class. */
1579	if (cdrv->int_class != 0)
1580		cdev->private->int_class = cdrv->int_class;
1581	else
1582		cdev->private->int_class = IRQIO_CIO;
1583}
1584
1585#ifdef CONFIG_CCW_CONSOLE
1586int __init ccw_device_enable_console(struct ccw_device *cdev)
1587{
1588	struct subchannel *sch = to_subchannel(cdev->dev.parent);
1589	int rc;
1590
1591	if (!cdev->drv || !cdev->handler)
1592		return -EINVAL;
1593
1594	io_subchannel_init_fields(sch);
1595	rc = cio_commit_config(sch);
1596	if (rc)
1597		return rc;
1598	sch->driver = &io_subchannel_driver;
1599	io_subchannel_recog(cdev, sch);
1600	/* Now wait for the async. recognition to come to an end. */
1601	spin_lock_irq(cdev->ccwlock);
1602	while (!dev_fsm_final_state(cdev))
1603		ccw_device_wait_idle(cdev);
1604
1605	/* Hold on to an extra reference while device is online. */
1606	get_device(&cdev->dev);
1607	rc = ccw_device_online(cdev);
1608	if (rc)
1609		goto out_unlock;
1610
1611	while (!dev_fsm_final_state(cdev))
1612		ccw_device_wait_idle(cdev);
1613
1614	if (cdev->private->state == DEV_STATE_ONLINE)
1615		cdev->online = 1;
1616	else
1617		rc = -EIO;
1618out_unlock:
1619	spin_unlock_irq(cdev->ccwlock);
1620	if (rc) /* Give up online reference since onlining failed. */
1621		put_device(&cdev->dev);
1622	return rc;
1623}
1624
1625struct ccw_device * __init ccw_device_create_console(struct ccw_driver *drv)
1626{
1627	struct io_subchannel_private *io_priv;
1628	struct ccw_device *cdev;
1629	struct subchannel *sch;
1630
1631	sch = cio_probe_console();
1632	if (IS_ERR(sch))
1633		return ERR_CAST(sch);
1634
1635	io_priv = kzalloc(sizeof(*io_priv), GFP_KERNEL | GFP_DMA);
1636	if (!io_priv)
1637		goto err_priv;
1638	io_priv->dma_area = dma_alloc_coherent(&sch->dev,
1639				sizeof(*io_priv->dma_area),
1640				&io_priv->dma_area_dma, GFP_KERNEL);
1641	if (!io_priv->dma_area)
1642		goto err_dma_area;
1643	set_io_private(sch, io_priv);
1644	cdev = io_subchannel_create_ccwdev(sch);
1645	if (IS_ERR(cdev)) {
1646		dma_free_coherent(&sch->dev, sizeof(*io_priv->dma_area),
1647				  io_priv->dma_area, io_priv->dma_area_dma);
1648		set_io_private(sch, NULL);
1649		put_device(&sch->dev);
1650		kfree(io_priv);
1651		return cdev;
1652	}
1653	cdev->drv = drv;
1654	ccw_device_set_int_class(cdev);
1655	return cdev;
1656
1657err_dma_area:
1658	kfree(io_priv);
1659err_priv:
1660	put_device(&sch->dev);
1661	return ERR_PTR(-ENOMEM);
1662}
1663
1664void __init ccw_device_destroy_console(struct ccw_device *cdev)
1665{
1666	struct subchannel *sch = to_subchannel(cdev->dev.parent);
1667	struct io_subchannel_private *io_priv = to_io_private(sch);
1668
1669	set_io_private(sch, NULL);
1670	dma_free_coherent(&sch->dev, sizeof(*io_priv->dma_area),
1671			  io_priv->dma_area, io_priv->dma_area_dma);
1672	put_device(&sch->dev);
1673	put_device(&cdev->dev);
1674	kfree(io_priv);
1675}
1676
1677/**
1678 * ccw_device_wait_idle() - busy wait for device to become idle
1679 * @cdev: ccw device
1680 *
1681 * Poll until activity control is zero, that is, no function or data
1682 * transfer is pending/active.
1683 * Called with device lock being held.
1684 */
1685void ccw_device_wait_idle(struct ccw_device *cdev)
1686{
1687	struct subchannel *sch = to_subchannel(cdev->dev.parent);
1688
1689	while (1) {
1690		cio_tsch(sch);
1691		if (sch->schib.scsw.cmd.actl == 0)
1692			break;
1693		udelay_simple(100);
1694	}
1695}
1696
1697static int ccw_device_pm_restore(struct device *dev);
1698
1699int ccw_device_force_console(struct ccw_device *cdev)
1700{
1701	return ccw_device_pm_restore(&cdev->dev);
1702}
1703EXPORT_SYMBOL_GPL(ccw_device_force_console);
1704#endif
1705
1706/**
1707 * get_ccwdev_by_busid() - obtain device from a bus id
1708 * @cdrv: driver the device is owned by
1709 * @bus_id: bus id of the device to be searched
1710 *
1711 * This function searches all devices owned by @cdrv for a device with a bus
1712 * id matching @bus_id.
1713 * Returns:
1714 *  If a match is found, its reference count of the found device is increased
1715 *  and it is returned; else %NULL is returned.
1716 */
1717struct ccw_device *get_ccwdev_by_busid(struct ccw_driver *cdrv,
1718				       const char *bus_id)
1719{
1720	struct device *dev;
1721
1722	dev = driver_find_device_by_name(&cdrv->driver, bus_id);
1723
1724	return dev ? to_ccwdev(dev) : NULL;
1725}
1726
1727/************************** device driver handling ************************/
1728
1729/* This is the implementation of the ccw_driver class. The probe, remove
1730 * and release methods are initially very similar to the device_driver
1731 * implementations, with the difference that they have ccw_device
1732 * arguments.
1733 *
1734 * A ccw driver also contains the information that is needed for
1735 * device matching.
1736 */
1737static int
1738ccw_device_probe (struct device *dev)
1739{
1740	struct ccw_device *cdev = to_ccwdev(dev);
1741	struct ccw_driver *cdrv = to_ccwdrv(dev->driver);
1742	int ret;
1743
1744	cdev->drv = cdrv; /* to let the driver call _set_online */
1745	ccw_device_set_int_class(cdev);
1746	ret = cdrv->probe ? cdrv->probe(cdev) : -ENODEV;
1747	if (ret) {
1748		cdev->drv = NULL;
1749		cdev->private->int_class = IRQIO_CIO;
1750		return ret;
1751	}
1752
1753	return 0;
1754}
1755
1756static int ccw_device_remove(struct device *dev)
1757{
1758	struct ccw_device *cdev = to_ccwdev(dev);
1759	struct ccw_driver *cdrv = cdev->drv;
1760	struct subchannel *sch;
1761	int ret;
1762
1763	if (cdrv->remove)
1764		cdrv->remove(cdev);
1765
1766	spin_lock_irq(cdev->ccwlock);
1767	if (cdev->online) {
1768		cdev->online = 0;
1769		ret = ccw_device_offline(cdev);
1770		spin_unlock_irq(cdev->ccwlock);
1771		if (ret == 0)
1772			wait_event(cdev->private->wait_q,
1773				   dev_fsm_final_state(cdev));
1774		else
1775			CIO_MSG_EVENT(0, "ccw_device_offline returned %d, "
1776				      "device 0.%x.%04x\n",
1777				      ret, cdev->private->dev_id.ssid,
1778				      cdev->private->dev_id.devno);
1779		/* Give up reference obtained in ccw_device_set_online(). */
1780		put_device(&cdev->dev);
1781		spin_lock_irq(cdev->ccwlock);
1782	}
1783	ccw_device_set_timeout(cdev, 0);
1784	cdev->drv = NULL;
1785	cdev->private->int_class = IRQIO_CIO;
1786	sch = to_subchannel(cdev->dev.parent);
1787	spin_unlock_irq(cdev->ccwlock);
1788	io_subchannel_quiesce(sch);
1789	__disable_cmf(cdev);
1790
1791	return 0;
1792}
1793
1794static void ccw_device_shutdown(struct device *dev)
1795{
1796	struct ccw_device *cdev;
1797
1798	cdev = to_ccwdev(dev);
1799	if (cdev->drv && cdev->drv->shutdown)
1800		cdev->drv->shutdown(cdev);
1801	__disable_cmf(cdev);
1802}
1803
1804static int ccw_device_pm_prepare(struct device *dev)
1805{
1806	struct ccw_device *cdev = to_ccwdev(dev);
1807
1808	if (work_pending(&cdev->private->todo_work))
1809		return -EAGAIN;
1810	/* Fail while device is being set online/offline. */
1811	if (atomic_read(&cdev->private->onoff))
1812		return -EAGAIN;
1813
1814	if (cdev->online && cdev->drv && cdev->drv->prepare)
1815		return cdev->drv->prepare(cdev);
1816
1817	return 0;
1818}
1819
1820static void ccw_device_pm_complete(struct device *dev)
1821{
1822	struct ccw_device *cdev = to_ccwdev(dev);
1823
1824	if (cdev->online && cdev->drv && cdev->drv->complete)
1825		cdev->drv->complete(cdev);
1826}
1827
1828static int ccw_device_pm_freeze(struct device *dev)
1829{
1830	struct ccw_device *cdev = to_ccwdev(dev);
1831	struct subchannel *sch = to_subchannel(cdev->dev.parent);
1832	int ret, cm_enabled;
1833
1834	/* Fail suspend while device is in transistional state. */
1835	if (!dev_fsm_final_state(cdev))
1836		return -EAGAIN;
1837	if (!cdev->online)
1838		return 0;
1839	if (cdev->drv && cdev->drv->freeze) {
1840		ret = cdev->drv->freeze(cdev);
1841		if (ret)
1842			return ret;
1843	}
1844
1845	spin_lock_irq(sch->lock);
1846	cm_enabled = cdev->private->cmb != NULL;
1847	spin_unlock_irq(sch->lock);
1848	if (cm_enabled) {
1849		/* Don't have the css write on memory. */
1850		ret = ccw_set_cmf(cdev, 0);
1851		if (ret)
1852			return ret;
1853	}
1854	/* From here on, disallow device driver I/O. */
1855	spin_lock_irq(sch->lock);
1856	ret = cio_disable_subchannel(sch);
1857	spin_unlock_irq(sch->lock);
1858
1859	return ret;
1860}
1861
1862static int ccw_device_pm_thaw(struct device *dev)
1863{
1864	struct ccw_device *cdev = to_ccwdev(dev);
1865	struct subchannel *sch = to_subchannel(cdev->dev.parent);
1866	int ret, cm_enabled;
1867
1868	if (!cdev->online)
1869		return 0;
1870
1871	spin_lock_irq(sch->lock);
1872	/* Allow device driver I/O again. */
1873	ret = cio_enable_subchannel(sch, (u32)(addr_t)sch);
1874	cm_enabled = cdev->private->cmb != NULL;
1875	spin_unlock_irq(sch->lock);
1876	if (ret)
1877		return ret;
1878
1879	if (cm_enabled) {
1880		ret = ccw_set_cmf(cdev, 1);
1881		if (ret)
1882			return ret;
1883	}
1884
1885	if (cdev->drv && cdev->drv->thaw)
1886		ret = cdev->drv->thaw(cdev);
1887
1888	return ret;
1889}
1890
1891static void __ccw_device_pm_restore(struct ccw_device *cdev)
1892{
1893	struct subchannel *sch = to_subchannel(cdev->dev.parent);
1894
1895	spin_lock_irq(sch->lock);
1896	if (cio_is_console(sch->schid)) {
1897		cio_enable_subchannel(sch, (u32)(addr_t)sch);
1898		goto out_unlock;
1899	}
1900	/*
1901	 * While we were sleeping, devices may have gone or become
1902	 * available again. Kick re-detection.
1903	 */
1904	cdev->private->flags.resuming = 1;
1905	cdev->private->path_new_mask = LPM_ANYPATH;
1906	css_sched_sch_todo(sch, SCH_TODO_EVAL);
1907	spin_unlock_irq(sch->lock);
1908	css_wait_for_slow_path();
1909
1910	/* cdev may have been moved to a different subchannel. */
1911	sch = to_subchannel(cdev->dev.parent);
1912	spin_lock_irq(sch->lock);
1913	if (cdev->private->state != DEV_STATE_ONLINE &&
1914	    cdev->private->state != DEV_STATE_OFFLINE)
1915		goto out_unlock;
1916
1917	ccw_device_recognition(cdev);
1918	spin_unlock_irq(sch->lock);
1919	wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev) ||
1920		   cdev->private->state == DEV_STATE_DISCONNECTED);
1921	spin_lock_irq(sch->lock);
1922
1923out_unlock:
1924	cdev->private->flags.resuming = 0;
1925	spin_unlock_irq(sch->lock);
1926}
1927
1928static int resume_handle_boxed(struct ccw_device *cdev)
1929{
1930	cdev->private->state = DEV_STATE_BOXED;
1931	if (ccw_device_notify(cdev, CIO_BOXED) == NOTIFY_OK)
1932		return 0;
1933	ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
1934	return -ENODEV;
1935}
1936
1937static int resume_handle_disc(struct ccw_device *cdev)
1938{
1939	cdev->private->state = DEV_STATE_DISCONNECTED;
1940	if (ccw_device_notify(cdev, CIO_GONE) == NOTIFY_OK)
1941		return 0;
1942	ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
1943	return -ENODEV;
1944}
1945
1946static int ccw_device_pm_restore(struct device *dev)
1947{
1948	struct ccw_device *cdev = to_ccwdev(dev);
1949	struct subchannel *sch;
1950	int ret = 0;
1951
1952	__ccw_device_pm_restore(cdev);
1953	sch = to_subchannel(cdev->dev.parent);
1954	spin_lock_irq(sch->lock);
1955	if (cio_is_console(sch->schid))
1956		goto out_restore;
1957
1958	/* check recognition results */
1959	switch (cdev->private->state) {
1960	case DEV_STATE_OFFLINE:
1961	case DEV_STATE_ONLINE:
1962		cdev->private->flags.donotify = 0;
1963		break;
1964	case DEV_STATE_BOXED:
1965		ret = resume_handle_boxed(cdev);
1966		if (ret)
1967			goto out_unlock;
1968		goto out_restore;
1969	default:
1970		ret = resume_handle_disc(cdev);
1971		if (ret)
1972			goto out_unlock;
1973		goto out_restore;
1974	}
1975	/* check if the device type has changed */
1976	if (!ccw_device_test_sense_data(cdev)) {
1977		ccw_device_update_sense_data(cdev);
1978		ccw_device_sched_todo(cdev, CDEV_TODO_REBIND);
1979		ret = -ENODEV;
1980		goto out_unlock;
1981	}
1982	if (!cdev->online)
1983		goto out_unlock;
1984
1985	if (ccw_device_online(cdev)) {
1986		ret = resume_handle_disc(cdev);
1987		if (ret)
1988			goto out_unlock;
1989		goto out_restore;
1990	}
1991	spin_unlock_irq(sch->lock);
1992	wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev));
1993	spin_lock_irq(sch->lock);
1994
1995	if (ccw_device_notify(cdev, CIO_OPER) == NOTIFY_BAD) {
1996		ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
1997		ret = -ENODEV;
1998		goto out_unlock;
1999	}
2000
2001	/* reenable cmf, if needed */
2002	if (cdev->private->cmb) {
2003		spin_unlock_irq(sch->lock);
2004		ret = ccw_set_cmf(cdev, 1);
2005		spin_lock_irq(sch->lock);
2006		if (ret) {
2007			CIO_MSG_EVENT(2, "resume: cdev 0.%x.%04x: cmf failed "
2008				      "(rc=%d)\n", cdev->private->dev_id.ssid,
2009				      cdev->private->dev_id.devno, ret);
2010			ret = 0;
2011		}
2012	}
2013
2014out_restore:
2015	spin_unlock_irq(sch->lock);
2016	if (cdev->online && cdev->drv && cdev->drv->restore)
2017		ret = cdev->drv->restore(cdev);
2018	return ret;
2019
2020out_unlock:
2021	spin_unlock_irq(sch->lock);
2022	return ret;
2023}
2024
2025static const struct dev_pm_ops ccw_pm_ops = {
2026	.prepare = ccw_device_pm_prepare,
2027	.complete = ccw_device_pm_complete,
2028	.freeze = ccw_device_pm_freeze,
2029	.thaw = ccw_device_pm_thaw,
2030	.restore = ccw_device_pm_restore,
2031};
2032
2033static struct bus_type ccw_bus_type = {
2034	.name   = "ccw",
2035	.match  = ccw_bus_match,
2036	.uevent = ccw_uevent,
2037	.probe  = ccw_device_probe,
2038	.remove = ccw_device_remove,
2039	.shutdown = ccw_device_shutdown,
2040	.pm = &ccw_pm_ops,
2041};
2042
2043/**
2044 * ccw_driver_register() - register a ccw driver
2045 * @cdriver: driver to be registered
2046 *
2047 * This function is mainly a wrapper around driver_register().
2048 * Returns:
2049 *   %0 on success and a negative error value on failure.
2050 */
2051int ccw_driver_register(struct ccw_driver *cdriver)
2052{
2053	struct device_driver *drv = &cdriver->driver;
2054
2055	drv->bus = &ccw_bus_type;
2056
2057	return driver_register(drv);
2058}
2059
2060/**
2061 * ccw_driver_unregister() - deregister a ccw driver
2062 * @cdriver: driver to be deregistered
2063 *
2064 * This function is mainly a wrapper around driver_unregister().
2065 */
2066void ccw_driver_unregister(struct ccw_driver *cdriver)
2067{
2068	driver_unregister(&cdriver->driver);
2069}
2070
2071static void ccw_device_todo(struct work_struct *work)
2072{
2073	struct ccw_device_private *priv;
2074	struct ccw_device *cdev;
2075	struct subchannel *sch;
2076	enum cdev_todo todo;
2077
2078	priv = container_of(work, struct ccw_device_private, todo_work);
2079	cdev = priv->cdev;
2080	sch = to_subchannel(cdev->dev.parent);
2081	/* Find out todo. */
2082	spin_lock_irq(cdev->ccwlock);
2083	todo = priv->todo;
2084	priv->todo = CDEV_TODO_NOTHING;
2085	CIO_MSG_EVENT(4, "cdev_todo: cdev=0.%x.%04x todo=%d\n",
2086		      priv->dev_id.ssid, priv->dev_id.devno, todo);
2087	spin_unlock_irq(cdev->ccwlock);
2088	/* Perform todo. */
2089	switch (todo) {
2090	case CDEV_TODO_ENABLE_CMF:
2091		cmf_reenable(cdev);
2092		break;
2093	case CDEV_TODO_REBIND:
2094		ccw_device_do_unbind_bind(cdev);
2095		break;
2096	case CDEV_TODO_REGISTER:
2097		io_subchannel_register(cdev);
2098		break;
2099	case CDEV_TODO_UNREG_EVAL:
2100		if (!sch_is_pseudo_sch(sch))
2101			css_schedule_eval(sch->schid);
2102		fallthrough;
2103	case CDEV_TODO_UNREG:
2104		if (sch_is_pseudo_sch(sch))
2105			ccw_device_unregister(cdev);
2106		else
2107			ccw_device_call_sch_unregister(cdev);
2108		break;
2109	default:
2110		break;
2111	}
2112	/* Release workqueue ref. */
2113	put_device(&cdev->dev);
2114}
2115
2116/**
2117 * ccw_device_sched_todo - schedule ccw device operation
2118 * @cdev: ccw device
2119 * @todo: todo
2120 *
2121 * Schedule the operation identified by @todo to be performed on the slow path
2122 * workqueue. Do nothing if another operation with higher priority is already
2123 * scheduled. Needs to be called with ccwdev lock held.
2124 */
2125void ccw_device_sched_todo(struct ccw_device *cdev, enum cdev_todo todo)
2126{
2127	CIO_MSG_EVENT(4, "cdev_todo: sched cdev=0.%x.%04x todo=%d\n",
2128		      cdev->private->dev_id.ssid, cdev->private->dev_id.devno,
2129		      todo);
2130	if (cdev->private->todo >= todo)
2131		return;
2132	cdev->private->todo = todo;
2133	/* Get workqueue ref. */
2134	if (!get_device(&cdev->dev))
2135		return;
2136	if (!queue_work(cio_work_q, &cdev->private->todo_work)) {
2137		/* Already queued, release workqueue ref. */
2138		put_device(&cdev->dev);
2139	}
2140}
2141
2142/**
2143 * ccw_device_siosl() - initiate logging
2144 * @cdev: ccw device
2145 *
2146 * This function is used to invoke model-dependent logging within the channel
2147 * subsystem.
2148 */
2149int ccw_device_siosl(struct ccw_device *cdev)
2150{
2151	struct subchannel *sch = to_subchannel(cdev->dev.parent);
2152
2153	return chsc_siosl(sch->schid);
2154}
2155EXPORT_SYMBOL_GPL(ccw_device_siosl);
2156
2157EXPORT_SYMBOL(ccw_device_set_online);
2158EXPORT_SYMBOL(ccw_device_set_offline);
2159EXPORT_SYMBOL(ccw_driver_register);
2160EXPORT_SYMBOL(ccw_driver_unregister);
2161EXPORT_SYMBOL(get_ccwdev_by_busid);
2162