1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
4 *		    Horst Hummel <Horst.Hummel@de.ibm.com>
5 *		    Carsten Otte <Cotte@de.ibm.com>
6 *		    Martin Schwidefsky <schwidefsky@de.ibm.com>
7 * Bugreports.to..: <Linux390@de.ibm.com>
8 * Copyright IBM Corp. 1999, 2009
9 */
10
11#define KMSG_COMPONENT "dasd"
12#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
13
14#include <linux/kmod.h>
15#include <linux/init.h>
16#include <linux/interrupt.h>
17#include <linux/ctype.h>
18#include <linux/major.h>
19#include <linux/slab.h>
20#include <linux/hdreg.h>
21#include <linux/async.h>
22#include <linux/mutex.h>
23#include <linux/debugfs.h>
24#include <linux/seq_file.h>
25#include <linux/vmalloc.h>
26
27#include <asm/ccwdev.h>
28#include <asm/ebcdic.h>
29#include <asm/idals.h>
30#include <asm/itcw.h>
31#include <asm/diag.h>
32
33/* This is ugly... */
34#define PRINTK_HEADER "dasd:"
35
36#include "dasd_int.h"
37/*
38 * SECTION: Constant definitions to be used within this file
39 */
40#define DASD_CHANQ_MAX_SIZE 4
41
42#define DASD_DIAG_MOD		"dasd_diag_mod"
43
44static unsigned int queue_depth = 32;
45static unsigned int nr_hw_queues = 4;
46
47module_param(queue_depth, uint, 0444);
48MODULE_PARM_DESC(queue_depth, "Default queue depth for new DASD devices");
49
50module_param(nr_hw_queues, uint, 0444);
51MODULE_PARM_DESC(nr_hw_queues, "Default number of hardware queues for new DASD devices");
52
53/*
54 * SECTION: exported variables of dasd.c
55 */
56debug_info_t *dasd_debug_area;
57EXPORT_SYMBOL(dasd_debug_area);
58static struct dentry *dasd_debugfs_root_entry;
59struct dasd_discipline *dasd_diag_discipline_pointer;
60EXPORT_SYMBOL(dasd_diag_discipline_pointer);
61void dasd_int_handler(struct ccw_device *, unsigned long, struct irb *);
62
63MODULE_AUTHOR("Holger Smolinski <Holger.Smolinski@de.ibm.com>");
64MODULE_DESCRIPTION("Linux on S/390 DASD device driver,"
65		   " Copyright IBM Corp. 2000");
66MODULE_SUPPORTED_DEVICE("dasd");
67MODULE_LICENSE("GPL");
68
69/*
70 * SECTION: prototypes for static functions of dasd.c
71 */
72static int  dasd_alloc_queue(struct dasd_block *);
73static void dasd_free_queue(struct dasd_block *);
74static int dasd_flush_block_queue(struct dasd_block *);
75static void dasd_device_tasklet(unsigned long);
76static void dasd_block_tasklet(unsigned long);
77static void do_kick_device(struct work_struct *);
78static void do_restore_device(struct work_struct *);
79static void do_reload_device(struct work_struct *);
80static void do_requeue_requests(struct work_struct *);
81static void dasd_return_cqr_cb(struct dasd_ccw_req *, void *);
82static void dasd_device_timeout(struct timer_list *);
83static void dasd_block_timeout(struct timer_list *);
84static void __dasd_process_erp(struct dasd_device *, struct dasd_ccw_req *);
85static void dasd_profile_init(struct dasd_profile *, struct dentry *);
86static void dasd_profile_exit(struct dasd_profile *);
87static void dasd_hosts_init(struct dentry *, struct dasd_device *);
88static void dasd_hosts_exit(struct dasd_device *);
89
90/*
91 * SECTION: Operations on the device structure.
92 */
93static wait_queue_head_t dasd_init_waitq;
94static wait_queue_head_t dasd_flush_wq;
95static wait_queue_head_t generic_waitq;
96static wait_queue_head_t shutdown_waitq;
97
98/*
99 * Allocate memory for a new device structure.
100 */
101struct dasd_device *dasd_alloc_device(void)
102{
103	struct dasd_device *device;
104
105	device = kzalloc(sizeof(struct dasd_device), GFP_ATOMIC);
106	if (!device)
107		return ERR_PTR(-ENOMEM);
108
109	/* Get two pages for normal block device operations. */
110	device->ccw_mem = (void *) __get_free_pages(GFP_ATOMIC | GFP_DMA, 1);
111	if (!device->ccw_mem) {
112		kfree(device);
113		return ERR_PTR(-ENOMEM);
114	}
115	/* Get one page for error recovery. */
116	device->erp_mem = (void *) get_zeroed_page(GFP_ATOMIC | GFP_DMA);
117	if (!device->erp_mem) {
118		free_pages((unsigned long) device->ccw_mem, 1);
119		kfree(device);
120		return ERR_PTR(-ENOMEM);
121	}
122	/* Get two pages for ese format. */
123	device->ese_mem = (void *)__get_free_pages(GFP_ATOMIC | GFP_DMA, 1);
124	if (!device->ese_mem) {
125		free_page((unsigned long) device->erp_mem);
126		free_pages((unsigned long) device->ccw_mem, 1);
127		kfree(device);
128		return ERR_PTR(-ENOMEM);
129	}
130
131	dasd_init_chunklist(&device->ccw_chunks, device->ccw_mem, PAGE_SIZE*2);
132	dasd_init_chunklist(&device->erp_chunks, device->erp_mem, PAGE_SIZE);
133	dasd_init_chunklist(&device->ese_chunks, device->ese_mem, PAGE_SIZE * 2);
134	spin_lock_init(&device->mem_lock);
135	atomic_set(&device->tasklet_scheduled, 0);
136	tasklet_init(&device->tasklet, dasd_device_tasklet,
137		     (unsigned long) device);
138	INIT_LIST_HEAD(&device->ccw_queue);
139	timer_setup(&device->timer, dasd_device_timeout, 0);
140	INIT_WORK(&device->kick_work, do_kick_device);
141	INIT_WORK(&device->restore_device, do_restore_device);
142	INIT_WORK(&device->reload_device, do_reload_device);
143	INIT_WORK(&device->requeue_requests, do_requeue_requests);
144	device->state = DASD_STATE_NEW;
145	device->target = DASD_STATE_NEW;
146	mutex_init(&device->state_mutex);
147	spin_lock_init(&device->profile.lock);
148	return device;
149}
150
151/*
152 * Free memory of a device structure.
153 */
154void dasd_free_device(struct dasd_device *device)
155{
156	kfree(device->private);
157	free_pages((unsigned long) device->ese_mem, 1);
158	free_page((unsigned long) device->erp_mem);
159	free_pages((unsigned long) device->ccw_mem, 1);
160	kfree(device);
161}
162
163/*
164 * Allocate memory for a new device structure.
165 */
166struct dasd_block *dasd_alloc_block(void)
167{
168	struct dasd_block *block;
169
170	block = kzalloc(sizeof(*block), GFP_ATOMIC);
171	if (!block)
172		return ERR_PTR(-ENOMEM);
173	/* open_count = 0 means device online but not in use */
174	atomic_set(&block->open_count, -1);
175
176	atomic_set(&block->tasklet_scheduled, 0);
177	tasklet_init(&block->tasklet, dasd_block_tasklet,
178		     (unsigned long) block);
179	INIT_LIST_HEAD(&block->ccw_queue);
180	spin_lock_init(&block->queue_lock);
181	INIT_LIST_HEAD(&block->format_list);
182	spin_lock_init(&block->format_lock);
183	timer_setup(&block->timer, dasd_block_timeout, 0);
184	spin_lock_init(&block->profile.lock);
185
186	return block;
187}
188EXPORT_SYMBOL_GPL(dasd_alloc_block);
189
190/*
191 * Free memory of a device structure.
192 */
193void dasd_free_block(struct dasd_block *block)
194{
195	kfree(block);
196}
197EXPORT_SYMBOL_GPL(dasd_free_block);
198
199/*
200 * Make a new device known to the system.
201 */
202static int dasd_state_new_to_known(struct dasd_device *device)
203{
204	int rc;
205
206	/*
207	 * As long as the device is not in state DASD_STATE_NEW we want to
208	 * keep the reference count > 0.
209	 */
210	dasd_get_device(device);
211
212	if (device->block) {
213		rc = dasd_alloc_queue(device->block);
214		if (rc) {
215			dasd_put_device(device);
216			return rc;
217		}
218	}
219	device->state = DASD_STATE_KNOWN;
220	return 0;
221}
222
223/*
224 * Let the system forget about a device.
225 */
226static int dasd_state_known_to_new(struct dasd_device *device)
227{
228	/* Disable extended error reporting for this device. */
229	dasd_eer_disable(device);
230	device->state = DASD_STATE_NEW;
231
232	if (device->block)
233		dasd_free_queue(device->block);
234
235	/* Give up reference we took in dasd_state_new_to_known. */
236	dasd_put_device(device);
237	return 0;
238}
239
240static struct dentry *dasd_debugfs_setup(const char *name,
241					 struct dentry *base_dentry)
242{
243	struct dentry *pde;
244
245	if (!base_dentry)
246		return NULL;
247	pde = debugfs_create_dir(name, base_dentry);
248	if (!pde || IS_ERR(pde))
249		return NULL;
250	return pde;
251}
252
253/*
254 * Request the irq line for the device.
255 */
256static int dasd_state_known_to_basic(struct dasd_device *device)
257{
258	struct dasd_block *block = device->block;
259	int rc = 0;
260
261	/* Allocate and register gendisk structure. */
262	if (block) {
263		rc = dasd_gendisk_alloc(block);
264		if (rc)
265			return rc;
266		block->debugfs_dentry =
267			dasd_debugfs_setup(block->gdp->disk_name,
268					   dasd_debugfs_root_entry);
269		dasd_profile_init(&block->profile, block->debugfs_dentry);
270		if (dasd_global_profile_level == DASD_PROFILE_ON)
271			dasd_profile_on(&device->block->profile);
272	}
273	device->debugfs_dentry =
274		dasd_debugfs_setup(dev_name(&device->cdev->dev),
275				   dasd_debugfs_root_entry);
276	dasd_profile_init(&device->profile, device->debugfs_dentry);
277	dasd_hosts_init(device->debugfs_dentry, device);
278
279	/* register 'device' debug area, used for all DBF_DEV_XXX calls */
280	device->debug_area = debug_register(dev_name(&device->cdev->dev), 4, 1,
281					    8 * sizeof(long));
282	debug_register_view(device->debug_area, &debug_sprintf_view);
283	debug_set_level(device->debug_area, DBF_WARNING);
284	DBF_DEV_EVENT(DBF_EMERG, device, "%s", "debug area created");
285
286	device->state = DASD_STATE_BASIC;
287
288	return rc;
289}
290
291/*
292 * Release the irq line for the device. Terminate any running i/o.
293 */
294static int dasd_state_basic_to_known(struct dasd_device *device)
295{
296	int rc;
297
298	if (device->discipline->basic_to_known) {
299		rc = device->discipline->basic_to_known(device);
300		if (rc)
301			return rc;
302	}
303
304	if (device->block) {
305		dasd_profile_exit(&device->block->profile);
306		debugfs_remove(device->block->debugfs_dentry);
307		dasd_gendisk_free(device->block);
308		dasd_block_clear_timer(device->block);
309	}
310	rc = dasd_flush_device_queue(device);
311	if (rc)
312		return rc;
313	dasd_device_clear_timer(device);
314	dasd_profile_exit(&device->profile);
315	dasd_hosts_exit(device);
316	debugfs_remove(device->debugfs_dentry);
317	DBF_DEV_EVENT(DBF_EMERG, device, "%p debug area deleted", device);
318	if (device->debug_area != NULL) {
319		debug_unregister(device->debug_area);
320		device->debug_area = NULL;
321	}
322	device->state = DASD_STATE_KNOWN;
323	return 0;
324}
325
326/*
327 * Do the initial analysis. The do_analysis function may return
328 * -EAGAIN in which case the device keeps the state DASD_STATE_BASIC
329 * until the discipline decides to continue the startup sequence
330 * by calling the function dasd_change_state. The eckd disciplines
331 * uses this to start a ccw that detects the format. The completion
332 * interrupt for this detection ccw uses the kernel event daemon to
333 * trigger the call to dasd_change_state. All this is done in the
334 * discipline code, see dasd_eckd.c.
335 * After the analysis ccw is done (do_analysis returned 0) the block
336 * device is setup.
337 * In case the analysis returns an error, the device setup is stopped
338 * (a fake disk was already added to allow formatting).
339 */
340static int dasd_state_basic_to_ready(struct dasd_device *device)
341{
342	int rc;
343	struct dasd_block *block;
344	struct gendisk *disk;
345
346	rc = 0;
347	block = device->block;
348	/* make disk known with correct capacity */
349	if (block) {
350		if (block->base->discipline->do_analysis != NULL)
351			rc = block->base->discipline->do_analysis(block);
352		if (rc) {
353			if (rc != -EAGAIN) {
354				device->state = DASD_STATE_UNFMT;
355				disk = device->block->gdp;
356				kobject_uevent(&disk_to_dev(disk)->kobj,
357					       KOBJ_CHANGE);
358				goto out;
359			}
360			return rc;
361		}
362		if (device->discipline->setup_blk_queue)
363			device->discipline->setup_blk_queue(block);
364		set_capacity(block->gdp,
365			     block->blocks << block->s2b_shift);
366		device->state = DASD_STATE_READY;
367		rc = dasd_scan_partitions(block);
368		if (rc) {
369			device->state = DASD_STATE_BASIC;
370			return rc;
371		}
372	} else {
373		device->state = DASD_STATE_READY;
374	}
375out:
376	if (device->discipline->basic_to_ready)
377		rc = device->discipline->basic_to_ready(device);
378	return rc;
379}
380
381static inline
382int _wait_for_empty_queues(struct dasd_device *device)
383{
384	if (device->block)
385		return list_empty(&device->ccw_queue) &&
386			list_empty(&device->block->ccw_queue);
387	else
388		return list_empty(&device->ccw_queue);
389}
390
391/*
392 * Remove device from block device layer. Destroy dirty buffers.
393 * Forget format information. Check if the target level is basic
394 * and if it is create fake disk for formatting.
395 */
396static int dasd_state_ready_to_basic(struct dasd_device *device)
397{
398	int rc;
399
400	device->state = DASD_STATE_BASIC;
401	if (device->block) {
402		struct dasd_block *block = device->block;
403		rc = dasd_flush_block_queue(block);
404		if (rc) {
405			device->state = DASD_STATE_READY;
406			return rc;
407		}
408		dasd_destroy_partitions(block);
409		block->blocks = 0;
410		block->bp_block = 0;
411		block->s2b_shift = 0;
412	}
413	return 0;
414}
415
416/*
417 * Back to basic.
418 */
419static int dasd_state_unfmt_to_basic(struct dasd_device *device)
420{
421	device->state = DASD_STATE_BASIC;
422	return 0;
423}
424
425/*
426 * Make the device online and schedule the bottom half to start
427 * the requeueing of requests from the linux request queue to the
428 * ccw queue.
429 */
430static int
431dasd_state_ready_to_online(struct dasd_device * device)
432{
433	struct gendisk *disk;
434	struct disk_part_iter piter;
435	struct hd_struct *part;
436
437	device->state = DASD_STATE_ONLINE;
438	if (device->block) {
439		dasd_schedule_block_bh(device->block);
440		if ((device->features & DASD_FEATURE_USERAW)) {
441			disk = device->block->gdp;
442			kobject_uevent(&disk_to_dev(disk)->kobj, KOBJ_CHANGE);
443			return 0;
444		}
445		disk = device->block->bdev->bd_disk;
446		disk_part_iter_init(&piter, disk, DISK_PITER_INCL_PART0);
447		while ((part = disk_part_iter_next(&piter)))
448			kobject_uevent(&part_to_dev(part)->kobj, KOBJ_CHANGE);
449		disk_part_iter_exit(&piter);
450	}
451	return 0;
452}
453
454/*
455 * Stop the requeueing of requests again.
456 */
457static int dasd_state_online_to_ready(struct dasd_device *device)
458{
459	int rc;
460	struct gendisk *disk;
461	struct disk_part_iter piter;
462	struct hd_struct *part;
463
464	if (device->discipline->online_to_ready) {
465		rc = device->discipline->online_to_ready(device);
466		if (rc)
467			return rc;
468	}
469
470	device->state = DASD_STATE_READY;
471	if (device->block && !(device->features & DASD_FEATURE_USERAW)) {
472		disk = device->block->bdev->bd_disk;
473		disk_part_iter_init(&piter, disk, DISK_PITER_INCL_PART0);
474		while ((part = disk_part_iter_next(&piter)))
475			kobject_uevent(&part_to_dev(part)->kobj, KOBJ_CHANGE);
476		disk_part_iter_exit(&piter);
477	}
478	return 0;
479}
480
481/*
482 * Device startup state changes.
483 */
484static int dasd_increase_state(struct dasd_device *device)
485{
486	int rc;
487
488	rc = 0;
489	if (device->state == DASD_STATE_NEW &&
490	    device->target >= DASD_STATE_KNOWN)
491		rc = dasd_state_new_to_known(device);
492
493	if (!rc &&
494	    device->state == DASD_STATE_KNOWN &&
495	    device->target >= DASD_STATE_BASIC)
496		rc = dasd_state_known_to_basic(device);
497
498	if (!rc &&
499	    device->state == DASD_STATE_BASIC &&
500	    device->target >= DASD_STATE_READY)
501		rc = dasd_state_basic_to_ready(device);
502
503	if (!rc &&
504	    device->state == DASD_STATE_UNFMT &&
505	    device->target > DASD_STATE_UNFMT)
506		rc = -EPERM;
507
508	if (!rc &&
509	    device->state == DASD_STATE_READY &&
510	    device->target >= DASD_STATE_ONLINE)
511		rc = dasd_state_ready_to_online(device);
512
513	return rc;
514}
515
516/*
517 * Device shutdown state changes.
518 */
519static int dasd_decrease_state(struct dasd_device *device)
520{
521	int rc;
522
523	rc = 0;
524	if (device->state == DASD_STATE_ONLINE &&
525	    device->target <= DASD_STATE_READY)
526		rc = dasd_state_online_to_ready(device);
527
528	if (!rc &&
529	    device->state == DASD_STATE_READY &&
530	    device->target <= DASD_STATE_BASIC)
531		rc = dasd_state_ready_to_basic(device);
532
533	if (!rc &&
534	    device->state == DASD_STATE_UNFMT &&
535	    device->target <= DASD_STATE_BASIC)
536		rc = dasd_state_unfmt_to_basic(device);
537
538	if (!rc &&
539	    device->state == DASD_STATE_BASIC &&
540	    device->target <= DASD_STATE_KNOWN)
541		rc = dasd_state_basic_to_known(device);
542
543	if (!rc &&
544	    device->state == DASD_STATE_KNOWN &&
545	    device->target <= DASD_STATE_NEW)
546		rc = dasd_state_known_to_new(device);
547
548	return rc;
549}
550
551/*
552 * This is the main startup/shutdown routine.
553 */
554static void dasd_change_state(struct dasd_device *device)
555{
556	int rc;
557
558	if (device->state == device->target)
559		/* Already where we want to go today... */
560		return;
561	if (device->state < device->target)
562		rc = dasd_increase_state(device);
563	else
564		rc = dasd_decrease_state(device);
565	if (rc == -EAGAIN)
566		return;
567	if (rc)
568		device->target = device->state;
569
570	/* let user-space know that the device status changed */
571	kobject_uevent(&device->cdev->dev.kobj, KOBJ_CHANGE);
572
573	if (device->state == device->target)
574		wake_up(&dasd_init_waitq);
575}
576
577/*
578 * Kick starter for devices that did not complete the startup/shutdown
579 * procedure or were sleeping because of a pending state.
580 * dasd_kick_device will schedule a call do do_kick_device to the kernel
581 * event daemon.
582 */
583static void do_kick_device(struct work_struct *work)
584{
585	struct dasd_device *device = container_of(work, struct dasd_device, kick_work);
586	mutex_lock(&device->state_mutex);
587	dasd_change_state(device);
588	mutex_unlock(&device->state_mutex);
589	dasd_schedule_device_bh(device);
590	dasd_put_device(device);
591}
592
593void dasd_kick_device(struct dasd_device *device)
594{
595	dasd_get_device(device);
596	/* queue call to dasd_kick_device to the kernel event daemon. */
597	if (!schedule_work(&device->kick_work))
598		dasd_put_device(device);
599}
600EXPORT_SYMBOL(dasd_kick_device);
601
602/*
603 * dasd_reload_device will schedule a call do do_reload_device to the kernel
604 * event daemon.
605 */
606static void do_reload_device(struct work_struct *work)
607{
608	struct dasd_device *device = container_of(work, struct dasd_device,
609						  reload_device);
610	device->discipline->reload(device);
611	dasd_put_device(device);
612}
613
614void dasd_reload_device(struct dasd_device *device)
615{
616	dasd_get_device(device);
617	/* queue call to dasd_reload_device to the kernel event daemon. */
618	if (!schedule_work(&device->reload_device))
619		dasd_put_device(device);
620}
621EXPORT_SYMBOL(dasd_reload_device);
622
623/*
624 * dasd_restore_device will schedule a call do do_restore_device to the kernel
625 * event daemon.
626 */
627static void do_restore_device(struct work_struct *work)
628{
629	struct dasd_device *device = container_of(work, struct dasd_device,
630						  restore_device);
631	device->cdev->drv->restore(device->cdev);
632	dasd_put_device(device);
633}
634
635void dasd_restore_device(struct dasd_device *device)
636{
637	dasd_get_device(device);
638	/* queue call to dasd_restore_device to the kernel event daemon. */
639	if (!schedule_work(&device->restore_device))
640		dasd_put_device(device);
641}
642
643/*
644 * Set the target state for a device and starts the state change.
645 */
646void dasd_set_target_state(struct dasd_device *device, int target)
647{
648	dasd_get_device(device);
649	mutex_lock(&device->state_mutex);
650	/* If we are in probeonly mode stop at DASD_STATE_READY. */
651	if (dasd_probeonly && target > DASD_STATE_READY)
652		target = DASD_STATE_READY;
653	if (device->target != target) {
654		if (device->state == target)
655			wake_up(&dasd_init_waitq);
656		device->target = target;
657	}
658	if (device->state != device->target)
659		dasd_change_state(device);
660	mutex_unlock(&device->state_mutex);
661	dasd_put_device(device);
662}
663EXPORT_SYMBOL(dasd_set_target_state);
664
665/*
666 * Enable devices with device numbers in [from..to].
667 */
668static inline int _wait_for_device(struct dasd_device *device)
669{
670	return (device->state == device->target);
671}
672
673void dasd_enable_device(struct dasd_device *device)
674{
675	dasd_set_target_state(device, DASD_STATE_ONLINE);
676	if (device->state <= DASD_STATE_KNOWN)
677		/* No discipline for device found. */
678		dasd_set_target_state(device, DASD_STATE_NEW);
679	/* Now wait for the devices to come up. */
680	wait_event(dasd_init_waitq, _wait_for_device(device));
681
682	dasd_reload_device(device);
683	if (device->discipline->kick_validate)
684		device->discipline->kick_validate(device);
685}
686EXPORT_SYMBOL(dasd_enable_device);
687
688/*
689 * SECTION: device operation (interrupt handler, start i/o, term i/o ...)
690 */
691
692unsigned int dasd_global_profile_level = DASD_PROFILE_OFF;
693
694#ifdef CONFIG_DASD_PROFILE
695struct dasd_profile dasd_global_profile = {
696	.lock = __SPIN_LOCK_UNLOCKED(dasd_global_profile.lock),
697};
698static struct dentry *dasd_debugfs_global_entry;
699
700/*
701 * Add profiling information for cqr before execution.
702 */
703static void dasd_profile_start(struct dasd_block *block,
704			       struct dasd_ccw_req *cqr,
705			       struct request *req)
706{
707	struct list_head *l;
708	unsigned int counter;
709	struct dasd_device *device;
710
711	/* count the length of the chanq for statistics */
712	counter = 0;
713	if (dasd_global_profile_level || block->profile.data)
714		list_for_each(l, &block->ccw_queue)
715			if (++counter >= 31)
716				break;
717
718	spin_lock(&dasd_global_profile.lock);
719	if (dasd_global_profile.data) {
720		dasd_global_profile.data->dasd_io_nr_req[counter]++;
721		if (rq_data_dir(req) == READ)
722			dasd_global_profile.data->dasd_read_nr_req[counter]++;
723	}
724	spin_unlock(&dasd_global_profile.lock);
725
726	spin_lock(&block->profile.lock);
727	if (block->profile.data) {
728		block->profile.data->dasd_io_nr_req[counter]++;
729		if (rq_data_dir(req) == READ)
730			block->profile.data->dasd_read_nr_req[counter]++;
731	}
732	spin_unlock(&block->profile.lock);
733
734	/*
735	 * We count the request for the start device, even though it may run on
736	 * some other device due to error recovery. This way we make sure that
737	 * we count each request only once.
738	 */
739	device = cqr->startdev;
740	if (!device->profile.data)
741		return;
742
743	spin_lock(get_ccwdev_lock(device->cdev));
744	counter = 1; /* request is not yet queued on the start device */
745	list_for_each(l, &device->ccw_queue)
746		if (++counter >= 31)
747			break;
748	spin_unlock(get_ccwdev_lock(device->cdev));
749
750	spin_lock(&device->profile.lock);
751	device->profile.data->dasd_io_nr_req[counter]++;
752	if (rq_data_dir(req) == READ)
753		device->profile.data->dasd_read_nr_req[counter]++;
754	spin_unlock(&device->profile.lock);
755}
756
757/*
758 * Add profiling information for cqr after execution.
759 */
760
761#define dasd_profile_counter(value, index)			   \
762{								   \
763	for (index = 0; index < 31 && value >> (2+index); index++) \
764		;						   \
765}
766
767static void dasd_profile_end_add_data(struct dasd_profile_info *data,
768				      int is_alias,
769				      int is_tpm,
770				      int is_read,
771				      long sectors,
772				      int sectors_ind,
773				      int tottime_ind,
774				      int tottimeps_ind,
775				      int strtime_ind,
776				      int irqtime_ind,
777				      int irqtimeps_ind,
778				      int endtime_ind)
779{
780	/* in case of an overflow, reset the whole profile */
781	if (data->dasd_io_reqs == UINT_MAX) {
782			memset(data, 0, sizeof(*data));
783			ktime_get_real_ts64(&data->starttod);
784	}
785	data->dasd_io_reqs++;
786	data->dasd_io_sects += sectors;
787	if (is_alias)
788		data->dasd_io_alias++;
789	if (is_tpm)
790		data->dasd_io_tpm++;
791
792	data->dasd_io_secs[sectors_ind]++;
793	data->dasd_io_times[tottime_ind]++;
794	data->dasd_io_timps[tottimeps_ind]++;
795	data->dasd_io_time1[strtime_ind]++;
796	data->dasd_io_time2[irqtime_ind]++;
797	data->dasd_io_time2ps[irqtimeps_ind]++;
798	data->dasd_io_time3[endtime_ind]++;
799
800	if (is_read) {
801		data->dasd_read_reqs++;
802		data->dasd_read_sects += sectors;
803		if (is_alias)
804			data->dasd_read_alias++;
805		if (is_tpm)
806			data->dasd_read_tpm++;
807		data->dasd_read_secs[sectors_ind]++;
808		data->dasd_read_times[tottime_ind]++;
809		data->dasd_read_time1[strtime_ind]++;
810		data->dasd_read_time2[irqtime_ind]++;
811		data->dasd_read_time3[endtime_ind]++;
812	}
813}
814
815static void dasd_profile_end(struct dasd_block *block,
816			     struct dasd_ccw_req *cqr,
817			     struct request *req)
818{
819	unsigned long strtime, irqtime, endtime, tottime;
820	unsigned long tottimeps, sectors;
821	struct dasd_device *device;
822	int sectors_ind, tottime_ind, tottimeps_ind, strtime_ind;
823	int irqtime_ind, irqtimeps_ind, endtime_ind;
824	struct dasd_profile_info *data;
825
826	device = cqr->startdev;
827	if (!(dasd_global_profile_level ||
828	      block->profile.data ||
829	      device->profile.data))
830		return;
831
832	sectors = blk_rq_sectors(req);
833	if (!cqr->buildclk || !cqr->startclk ||
834	    !cqr->stopclk || !cqr->endclk ||
835	    !sectors)
836		return;
837
838	strtime = ((cqr->startclk - cqr->buildclk) >> 12);
839	irqtime = ((cqr->stopclk - cqr->startclk) >> 12);
840	endtime = ((cqr->endclk - cqr->stopclk) >> 12);
841	tottime = ((cqr->endclk - cqr->buildclk) >> 12);
842	tottimeps = tottime / sectors;
843
844	dasd_profile_counter(sectors, sectors_ind);
845	dasd_profile_counter(tottime, tottime_ind);
846	dasd_profile_counter(tottimeps, tottimeps_ind);
847	dasd_profile_counter(strtime, strtime_ind);
848	dasd_profile_counter(irqtime, irqtime_ind);
849	dasd_profile_counter(irqtime / sectors, irqtimeps_ind);
850	dasd_profile_counter(endtime, endtime_ind);
851
852	spin_lock(&dasd_global_profile.lock);
853	if (dasd_global_profile.data) {
854		data = dasd_global_profile.data;
855		data->dasd_sum_times += tottime;
856		data->dasd_sum_time_str += strtime;
857		data->dasd_sum_time_irq += irqtime;
858		data->dasd_sum_time_end += endtime;
859		dasd_profile_end_add_data(dasd_global_profile.data,
860					  cqr->startdev != block->base,
861					  cqr->cpmode == 1,
862					  rq_data_dir(req) == READ,
863					  sectors, sectors_ind, tottime_ind,
864					  tottimeps_ind, strtime_ind,
865					  irqtime_ind, irqtimeps_ind,
866					  endtime_ind);
867	}
868	spin_unlock(&dasd_global_profile.lock);
869
870	spin_lock(&block->profile.lock);
871	if (block->profile.data) {
872		data = block->profile.data;
873		data->dasd_sum_times += tottime;
874		data->dasd_sum_time_str += strtime;
875		data->dasd_sum_time_irq += irqtime;
876		data->dasd_sum_time_end += endtime;
877		dasd_profile_end_add_data(block->profile.data,
878					  cqr->startdev != block->base,
879					  cqr->cpmode == 1,
880					  rq_data_dir(req) == READ,
881					  sectors, sectors_ind, tottime_ind,
882					  tottimeps_ind, strtime_ind,
883					  irqtime_ind, irqtimeps_ind,
884					  endtime_ind);
885	}
886	spin_unlock(&block->profile.lock);
887
888	spin_lock(&device->profile.lock);
889	if (device->profile.data) {
890		data = device->profile.data;
891		data->dasd_sum_times += tottime;
892		data->dasd_sum_time_str += strtime;
893		data->dasd_sum_time_irq += irqtime;
894		data->dasd_sum_time_end += endtime;
895		dasd_profile_end_add_data(device->profile.data,
896					  cqr->startdev != block->base,
897					  cqr->cpmode == 1,
898					  rq_data_dir(req) == READ,
899					  sectors, sectors_ind, tottime_ind,
900					  tottimeps_ind, strtime_ind,
901					  irqtime_ind, irqtimeps_ind,
902					  endtime_ind);
903	}
904	spin_unlock(&device->profile.lock);
905}
906
907void dasd_profile_reset(struct dasd_profile *profile)
908{
909	struct dasd_profile_info *data;
910
911	spin_lock_bh(&profile->lock);
912	data = profile->data;
913	if (!data) {
914		spin_unlock_bh(&profile->lock);
915		return;
916	}
917	memset(data, 0, sizeof(*data));
918	ktime_get_real_ts64(&data->starttod);
919	spin_unlock_bh(&profile->lock);
920}
921
922int dasd_profile_on(struct dasd_profile *profile)
923{
924	struct dasd_profile_info *data;
925
926	data = kzalloc(sizeof(*data), GFP_KERNEL);
927	if (!data)
928		return -ENOMEM;
929	spin_lock_bh(&profile->lock);
930	if (profile->data) {
931		spin_unlock_bh(&profile->lock);
932		kfree(data);
933		return 0;
934	}
935	ktime_get_real_ts64(&data->starttod);
936	profile->data = data;
937	spin_unlock_bh(&profile->lock);
938	return 0;
939}
940
941void dasd_profile_off(struct dasd_profile *profile)
942{
943	spin_lock_bh(&profile->lock);
944	kfree(profile->data);
945	profile->data = NULL;
946	spin_unlock_bh(&profile->lock);
947}
948
949char *dasd_get_user_string(const char __user *user_buf, size_t user_len)
950{
951	char *buffer;
952
953	buffer = vmalloc(user_len + 1);
954	if (buffer == NULL)
955		return ERR_PTR(-ENOMEM);
956	if (copy_from_user(buffer, user_buf, user_len) != 0) {
957		vfree(buffer);
958		return ERR_PTR(-EFAULT);
959	}
960	/* got the string, now strip linefeed. */
961	if (buffer[user_len - 1] == '\n')
962		buffer[user_len - 1] = 0;
963	else
964		buffer[user_len] = 0;
965	return buffer;
966}
967
968static ssize_t dasd_stats_write(struct file *file,
969				const char __user *user_buf,
970				size_t user_len, loff_t *pos)
971{
972	char *buffer, *str;
973	int rc;
974	struct seq_file *m = (struct seq_file *)file->private_data;
975	struct dasd_profile *prof = m->private;
976
977	if (user_len > 65536)
978		user_len = 65536;
979	buffer = dasd_get_user_string(user_buf, user_len);
980	if (IS_ERR(buffer))
981		return PTR_ERR(buffer);
982
983	str = skip_spaces(buffer);
984	rc = user_len;
985	if (strncmp(str, "reset", 5) == 0) {
986		dasd_profile_reset(prof);
987	} else if (strncmp(str, "on", 2) == 0) {
988		rc = dasd_profile_on(prof);
989		if (rc)
990			goto out;
991		rc = user_len;
992		if (prof == &dasd_global_profile) {
993			dasd_profile_reset(prof);
994			dasd_global_profile_level = DASD_PROFILE_GLOBAL_ONLY;
995		}
996	} else if (strncmp(str, "off", 3) == 0) {
997		if (prof == &dasd_global_profile)
998			dasd_global_profile_level = DASD_PROFILE_OFF;
999		dasd_profile_off(prof);
1000	} else
1001		rc = -EINVAL;
1002out:
1003	vfree(buffer);
1004	return rc;
1005}
1006
1007static void dasd_stats_array(struct seq_file *m, unsigned int *array)
1008{
1009	int i;
1010
1011	for (i = 0; i < 32; i++)
1012		seq_printf(m, "%u ", array[i]);
1013	seq_putc(m, '\n');
1014}
1015
1016static void dasd_stats_seq_print(struct seq_file *m,
1017				 struct dasd_profile_info *data)
1018{
1019	seq_printf(m, "start_time %lld.%09ld\n",
1020		   (s64)data->starttod.tv_sec, data->starttod.tv_nsec);
1021	seq_printf(m, "total_requests %u\n", data->dasd_io_reqs);
1022	seq_printf(m, "total_sectors %u\n", data->dasd_io_sects);
1023	seq_printf(m, "total_pav %u\n", data->dasd_io_alias);
1024	seq_printf(m, "total_hpf %u\n", data->dasd_io_tpm);
1025	seq_printf(m, "avg_total %lu\n", data->dasd_io_reqs ?
1026		   data->dasd_sum_times / data->dasd_io_reqs : 0UL);
1027	seq_printf(m, "avg_build_to_ssch %lu\n", data->dasd_io_reqs ?
1028		   data->dasd_sum_time_str / data->dasd_io_reqs : 0UL);
1029	seq_printf(m, "avg_ssch_to_irq %lu\n", data->dasd_io_reqs ?
1030		   data->dasd_sum_time_irq / data->dasd_io_reqs : 0UL);
1031	seq_printf(m, "avg_irq_to_end %lu\n", data->dasd_io_reqs ?
1032		   data->dasd_sum_time_end / data->dasd_io_reqs : 0UL);
1033	seq_puts(m, "histogram_sectors ");
1034	dasd_stats_array(m, data->dasd_io_secs);
1035	seq_puts(m, "histogram_io_times ");
1036	dasd_stats_array(m, data->dasd_io_times);
1037	seq_puts(m, "histogram_io_times_weighted ");
1038	dasd_stats_array(m, data->dasd_io_timps);
1039	seq_puts(m, "histogram_time_build_to_ssch ");
1040	dasd_stats_array(m, data->dasd_io_time1);
1041	seq_puts(m, "histogram_time_ssch_to_irq ");
1042	dasd_stats_array(m, data->dasd_io_time2);
1043	seq_puts(m, "histogram_time_ssch_to_irq_weighted ");
1044	dasd_stats_array(m, data->dasd_io_time2ps);
1045	seq_puts(m, "histogram_time_irq_to_end ");
1046	dasd_stats_array(m, data->dasd_io_time3);
1047	seq_puts(m, "histogram_ccw_queue_length ");
1048	dasd_stats_array(m, data->dasd_io_nr_req);
1049	seq_printf(m, "total_read_requests %u\n", data->dasd_read_reqs);
1050	seq_printf(m, "total_read_sectors %u\n", data->dasd_read_sects);
1051	seq_printf(m, "total_read_pav %u\n", data->dasd_read_alias);
1052	seq_printf(m, "total_read_hpf %u\n", data->dasd_read_tpm);
1053	seq_puts(m, "histogram_read_sectors ");
1054	dasd_stats_array(m, data->dasd_read_secs);
1055	seq_puts(m, "histogram_read_times ");
1056	dasd_stats_array(m, data->dasd_read_times);
1057	seq_puts(m, "histogram_read_time_build_to_ssch ");
1058	dasd_stats_array(m, data->dasd_read_time1);
1059	seq_puts(m, "histogram_read_time_ssch_to_irq ");
1060	dasd_stats_array(m, data->dasd_read_time2);
1061	seq_puts(m, "histogram_read_time_irq_to_end ");
1062	dasd_stats_array(m, data->dasd_read_time3);
1063	seq_puts(m, "histogram_read_ccw_queue_length ");
1064	dasd_stats_array(m, data->dasd_read_nr_req);
1065}
1066
1067static int dasd_stats_show(struct seq_file *m, void *v)
1068{
1069	struct dasd_profile *profile;
1070	struct dasd_profile_info *data;
1071
1072	profile = m->private;
1073	spin_lock_bh(&profile->lock);
1074	data = profile->data;
1075	if (!data) {
1076		spin_unlock_bh(&profile->lock);
1077		seq_puts(m, "disabled\n");
1078		return 0;
1079	}
1080	dasd_stats_seq_print(m, data);
1081	spin_unlock_bh(&profile->lock);
1082	return 0;
1083}
1084
1085static int dasd_stats_open(struct inode *inode, struct file *file)
1086{
1087	struct dasd_profile *profile = inode->i_private;
1088	return single_open(file, dasd_stats_show, profile);
1089}
1090
1091static const struct file_operations dasd_stats_raw_fops = {
1092	.owner		= THIS_MODULE,
1093	.open		= dasd_stats_open,
1094	.read		= seq_read,
1095	.llseek		= seq_lseek,
1096	.release	= single_release,
1097	.write		= dasd_stats_write,
1098};
1099
1100static void dasd_profile_init(struct dasd_profile *profile,
1101			      struct dentry *base_dentry)
1102{
1103	umode_t mode;
1104	struct dentry *pde;
1105
1106	if (!base_dentry)
1107		return;
1108	profile->dentry = NULL;
1109	profile->data = NULL;
1110	mode = (S_IRUSR | S_IWUSR | S_IFREG);
1111	pde = debugfs_create_file("statistics", mode, base_dentry,
1112				  profile, &dasd_stats_raw_fops);
1113	if (pde && !IS_ERR(pde))
1114		profile->dentry = pde;
1115	return;
1116}
1117
1118static void dasd_profile_exit(struct dasd_profile *profile)
1119{
1120	dasd_profile_off(profile);
1121	debugfs_remove(profile->dentry);
1122	profile->dentry = NULL;
1123}
1124
1125static void dasd_statistics_removeroot(void)
1126{
1127	dasd_global_profile_level = DASD_PROFILE_OFF;
1128	dasd_profile_exit(&dasd_global_profile);
1129	debugfs_remove(dasd_debugfs_global_entry);
1130	debugfs_remove(dasd_debugfs_root_entry);
1131}
1132
1133static void dasd_statistics_createroot(void)
1134{
1135	struct dentry *pde;
1136
1137	dasd_debugfs_root_entry = NULL;
1138	pde = debugfs_create_dir("dasd", NULL);
1139	if (!pde || IS_ERR(pde))
1140		goto error;
1141	dasd_debugfs_root_entry = pde;
1142	pde = debugfs_create_dir("global", dasd_debugfs_root_entry);
1143	if (!pde || IS_ERR(pde))
1144		goto error;
1145	dasd_debugfs_global_entry = pde;
1146	dasd_profile_init(&dasd_global_profile, dasd_debugfs_global_entry);
1147	return;
1148
1149error:
1150	DBF_EVENT(DBF_ERR, "%s",
1151		  "Creation of the dasd debugfs interface failed");
1152	dasd_statistics_removeroot();
1153	return;
1154}
1155
1156#else
1157#define dasd_profile_start(block, cqr, req) do {} while (0)
1158#define dasd_profile_end(block, cqr, req) do {} while (0)
1159
1160static void dasd_statistics_createroot(void)
1161{
1162	return;
1163}
1164
1165static void dasd_statistics_removeroot(void)
1166{
1167	return;
1168}
1169
1170int dasd_stats_generic_show(struct seq_file *m, void *v)
1171{
1172	seq_puts(m, "Statistics are not activated in this kernel\n");
1173	return 0;
1174}
1175
1176static void dasd_profile_init(struct dasd_profile *profile,
1177			      struct dentry *base_dentry)
1178{
1179	return;
1180}
1181
1182static void dasd_profile_exit(struct dasd_profile *profile)
1183{
1184	return;
1185}
1186
1187int dasd_profile_on(struct dasd_profile *profile)
1188{
1189	return 0;
1190}
1191
1192#endif				/* CONFIG_DASD_PROFILE */
1193
1194static int dasd_hosts_show(struct seq_file *m, void *v)
1195{
1196	struct dasd_device *device;
1197	int rc = -EOPNOTSUPP;
1198
1199	device = m->private;
1200	dasd_get_device(device);
1201
1202	if (device->discipline->hosts_print)
1203		rc = device->discipline->hosts_print(device, m);
1204
1205	dasd_put_device(device);
1206	return rc;
1207}
1208
1209DEFINE_SHOW_ATTRIBUTE(dasd_hosts);
1210
1211static void dasd_hosts_exit(struct dasd_device *device)
1212{
1213	debugfs_remove(device->hosts_dentry);
1214	device->hosts_dentry = NULL;
1215}
1216
1217static void dasd_hosts_init(struct dentry *base_dentry,
1218			    struct dasd_device *device)
1219{
1220	struct dentry *pde;
1221	umode_t mode;
1222
1223	if (!base_dentry)
1224		return;
1225
1226	mode = S_IRUSR | S_IFREG;
1227	pde = debugfs_create_file("host_access_list", mode, base_dentry,
1228				  device, &dasd_hosts_fops);
1229	if (pde && !IS_ERR(pde))
1230		device->hosts_dentry = pde;
1231}
1232
1233struct dasd_ccw_req *dasd_smalloc_request(int magic, int cplength, int datasize,
1234					  struct dasd_device *device,
1235					  struct dasd_ccw_req *cqr)
1236{
1237	unsigned long flags;
1238	char *data, *chunk;
1239	int size = 0;
1240
1241	if (cplength > 0)
1242		size += cplength * sizeof(struct ccw1);
1243	if (datasize > 0)
1244		size += datasize;
1245	if (!cqr)
1246		size += (sizeof(*cqr) + 7L) & -8L;
1247
1248	spin_lock_irqsave(&device->mem_lock, flags);
1249	data = chunk = dasd_alloc_chunk(&device->ccw_chunks, size);
1250	spin_unlock_irqrestore(&device->mem_lock, flags);
1251	if (!chunk)
1252		return ERR_PTR(-ENOMEM);
1253	if (!cqr) {
1254		cqr = (void *) data;
1255		data += (sizeof(*cqr) + 7L) & -8L;
1256	}
1257	memset(cqr, 0, sizeof(*cqr));
1258	cqr->mem_chunk = chunk;
1259	if (cplength > 0) {
1260		cqr->cpaddr = data;
1261		data += cplength * sizeof(struct ccw1);
1262		memset(cqr->cpaddr, 0, cplength * sizeof(struct ccw1));
1263	}
1264	if (datasize > 0) {
1265		cqr->data = data;
1266 		memset(cqr->data, 0, datasize);
1267	}
1268	cqr->magic = magic;
1269	set_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
1270	dasd_get_device(device);
1271	return cqr;
1272}
1273EXPORT_SYMBOL(dasd_smalloc_request);
1274
1275struct dasd_ccw_req *dasd_fmalloc_request(int magic, int cplength,
1276					  int datasize,
1277					  struct dasd_device *device)
1278{
1279	struct dasd_ccw_req *cqr;
1280	unsigned long flags;
1281	int size, cqr_size;
1282	char *data;
1283
1284	cqr_size = (sizeof(*cqr) + 7L) & -8L;
1285	size = cqr_size;
1286	if (cplength > 0)
1287		size += cplength * sizeof(struct ccw1);
1288	if (datasize > 0)
1289		size += datasize;
1290
1291	spin_lock_irqsave(&device->mem_lock, flags);
1292	cqr = dasd_alloc_chunk(&device->ese_chunks, size);
1293	spin_unlock_irqrestore(&device->mem_lock, flags);
1294	if (!cqr)
1295		return ERR_PTR(-ENOMEM);
1296	memset(cqr, 0, sizeof(*cqr));
1297	data = (char *)cqr + cqr_size;
1298	cqr->cpaddr = NULL;
1299	if (cplength > 0) {
1300		cqr->cpaddr = data;
1301		data += cplength * sizeof(struct ccw1);
1302		memset(cqr->cpaddr, 0, cplength * sizeof(struct ccw1));
1303	}
1304	cqr->data = NULL;
1305	if (datasize > 0) {
1306		cqr->data = data;
1307		memset(cqr->data, 0, datasize);
1308	}
1309
1310	cqr->magic = magic;
1311	set_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
1312	dasd_get_device(device);
1313
1314	return cqr;
1315}
1316EXPORT_SYMBOL(dasd_fmalloc_request);
1317
1318void dasd_sfree_request(struct dasd_ccw_req *cqr, struct dasd_device *device)
1319{
1320	unsigned long flags;
1321
1322	spin_lock_irqsave(&device->mem_lock, flags);
1323	dasd_free_chunk(&device->ccw_chunks, cqr->mem_chunk);
1324	spin_unlock_irqrestore(&device->mem_lock, flags);
1325	dasd_put_device(device);
1326}
1327EXPORT_SYMBOL(dasd_sfree_request);
1328
1329void dasd_ffree_request(struct dasd_ccw_req *cqr, struct dasd_device *device)
1330{
1331	unsigned long flags;
1332
1333	spin_lock_irqsave(&device->mem_lock, flags);
1334	dasd_free_chunk(&device->ese_chunks, cqr);
1335	spin_unlock_irqrestore(&device->mem_lock, flags);
1336	dasd_put_device(device);
1337}
1338EXPORT_SYMBOL(dasd_ffree_request);
1339
1340/*
1341 * Check discipline magic in cqr.
1342 */
1343static inline int dasd_check_cqr(struct dasd_ccw_req *cqr)
1344{
1345	struct dasd_device *device;
1346
1347	if (cqr == NULL)
1348		return -EINVAL;
1349	device = cqr->startdev;
1350	if (strncmp((char *) &cqr->magic, device->discipline->ebcname, 4)) {
1351		DBF_DEV_EVENT(DBF_WARNING, device,
1352			    " dasd_ccw_req 0x%08x magic doesn't match"
1353			    " discipline 0x%08x",
1354			    cqr->magic,
1355			    *(unsigned int *) device->discipline->name);
1356		return -EINVAL;
1357	}
1358	return 0;
1359}
1360
1361/*
1362 * Terminate the current i/o and set the request to clear_pending.
1363 * Timer keeps device runnig.
1364 * ccw_device_clear can fail if the i/o subsystem
1365 * is in a bad mood.
1366 */
1367int dasd_term_IO(struct dasd_ccw_req *cqr)
1368{
1369	struct dasd_device *device;
1370	int retries, rc;
1371	char errorstring[ERRORLENGTH];
1372
1373	/* Check the cqr */
1374	rc = dasd_check_cqr(cqr);
1375	if (rc)
1376		return rc;
1377	retries = 0;
1378	device = (struct dasd_device *) cqr->startdev;
1379	while ((retries < 5) && (cqr->status == DASD_CQR_IN_IO)) {
1380		rc = ccw_device_clear(device->cdev, (long) cqr);
1381		switch (rc) {
1382		case 0:	/* termination successful */
1383			cqr->status = DASD_CQR_CLEAR_PENDING;
1384			cqr->stopclk = get_tod_clock();
1385			cqr->starttime = 0;
1386			DBF_DEV_EVENT(DBF_DEBUG, device,
1387				      "terminate cqr %p successful",
1388				      cqr);
1389			break;
1390		case -ENODEV:
1391			DBF_DEV_EVENT(DBF_ERR, device, "%s",
1392				      "device gone, retry");
1393			break;
1394		case -EINVAL:
1395			/*
1396			 * device not valid so no I/O could be running
1397			 * handle CQR as termination successful
1398			 */
1399			cqr->status = DASD_CQR_CLEARED;
1400			cqr->stopclk = get_tod_clock();
1401			cqr->starttime = 0;
1402			/* no retries for invalid devices */
1403			cqr->retries = -1;
1404			DBF_DEV_EVENT(DBF_ERR, device, "%s",
1405				      "EINVAL, handle as terminated");
1406			/* fake rc to success */
1407			rc = 0;
1408			break;
1409		default:
1410			/* internal error 10 - unknown rc*/
1411			snprintf(errorstring, ERRORLENGTH, "10 %d", rc);
1412			dev_err(&device->cdev->dev, "An error occurred in the "
1413				"DASD device driver, reason=%s\n", errorstring);
1414			BUG();
1415			break;
1416		}
1417		retries++;
1418	}
1419	dasd_schedule_device_bh(device);
1420	return rc;
1421}
1422EXPORT_SYMBOL(dasd_term_IO);
1423
1424/*
1425 * Start the i/o. This start_IO can fail if the channel is really busy.
1426 * In that case set up a timer to start the request later.
1427 */
1428int dasd_start_IO(struct dasd_ccw_req *cqr)
1429{
1430	struct dasd_device *device;
1431	int rc;
1432	char errorstring[ERRORLENGTH];
1433
1434	/* Check the cqr */
1435	rc = dasd_check_cqr(cqr);
1436	if (rc) {
1437		cqr->intrc = rc;
1438		return rc;
1439	}
1440	device = (struct dasd_device *) cqr->startdev;
1441	if (((cqr->block &&
1442	      test_bit(DASD_FLAG_LOCK_STOLEN, &cqr->block->base->flags)) ||
1443	     test_bit(DASD_FLAG_LOCK_STOLEN, &device->flags)) &&
1444	    !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) {
1445		DBF_DEV_EVENT(DBF_DEBUG, device, "start_IO: return request %p "
1446			      "because of stolen lock", cqr);
1447		cqr->status = DASD_CQR_ERROR;
1448		cqr->intrc = -EPERM;
1449		return -EPERM;
1450	}
1451	if (cqr->retries < 0) {
1452		/* internal error 14 - start_IO run out of retries */
1453		sprintf(errorstring, "14 %p", cqr);
1454		dev_err(&device->cdev->dev, "An error occurred in the DASD "
1455			"device driver, reason=%s\n", errorstring);
1456		cqr->status = DASD_CQR_ERROR;
1457		return -EIO;
1458	}
1459	cqr->startclk = get_tod_clock();
1460	cqr->starttime = jiffies;
1461	cqr->retries--;
1462	if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) {
1463		cqr->lpm &= dasd_path_get_opm(device);
1464		if (!cqr->lpm)
1465			cqr->lpm = dasd_path_get_opm(device);
1466	}
1467	/*
1468	 * remember the amount of formatted tracks to prevent double format on
1469	 * ESE devices
1470	 */
1471	if (cqr->block)
1472		cqr->trkcount = atomic_read(&cqr->block->trkcount);
1473
1474	if (cqr->cpmode == 1) {
1475		rc = ccw_device_tm_start(device->cdev, cqr->cpaddr,
1476					 (long) cqr, cqr->lpm);
1477	} else {
1478		rc = ccw_device_start(device->cdev, cqr->cpaddr,
1479				      (long) cqr, cqr->lpm, 0);
1480	}
1481	switch (rc) {
1482	case 0:
1483		cqr->status = DASD_CQR_IN_IO;
1484		break;
1485	case -EBUSY:
1486		DBF_DEV_EVENT(DBF_WARNING, device, "%s",
1487			      "start_IO: device busy, retry later");
1488		break;
1489	case -EACCES:
1490		/* -EACCES indicates that the request used only a subset of the
1491		 * available paths and all these paths are gone. If the lpm of
1492		 * this request was only a subset of the opm (e.g. the ppm) then
1493		 * we just do a retry with all available paths.
1494		 * If we already use the full opm, something is amiss, and we
1495		 * need a full path verification.
1496		 */
1497		if (test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) {
1498			DBF_DEV_EVENT(DBF_WARNING, device,
1499				      "start_IO: selected paths gone (%x)",
1500				      cqr->lpm);
1501		} else if (cqr->lpm != dasd_path_get_opm(device)) {
1502			cqr->lpm = dasd_path_get_opm(device);
1503			DBF_DEV_EVENT(DBF_DEBUG, device, "%s",
1504				      "start_IO: selected paths gone,"
1505				      " retry on all paths");
1506		} else {
1507			DBF_DEV_EVENT(DBF_WARNING, device, "%s",
1508				      "start_IO: all paths in opm gone,"
1509				      " do path verification");
1510			dasd_generic_last_path_gone(device);
1511			dasd_path_no_path(device);
1512			dasd_path_set_tbvpm(device,
1513					  ccw_device_get_path_mask(
1514						  device->cdev));
1515		}
1516		break;
1517	case -ENODEV:
1518		DBF_DEV_EVENT(DBF_WARNING, device, "%s",
1519			      "start_IO: -ENODEV device gone, retry");
1520		break;
1521	case -EIO:
1522		DBF_DEV_EVENT(DBF_WARNING, device, "%s",
1523			      "start_IO: -EIO device gone, retry");
1524		break;
1525	case -EINVAL:
1526		/* most likely caused in power management context */
1527		DBF_DEV_EVENT(DBF_WARNING, device, "%s",
1528			      "start_IO: -EINVAL device currently "
1529			      "not accessible");
1530		break;
1531	default:
1532		/* internal error 11 - unknown rc */
1533		snprintf(errorstring, ERRORLENGTH, "11 %d", rc);
1534		dev_err(&device->cdev->dev,
1535			"An error occurred in the DASD device driver, "
1536			"reason=%s\n", errorstring);
1537		BUG();
1538		break;
1539	}
1540	cqr->intrc = rc;
1541	return rc;
1542}
1543EXPORT_SYMBOL(dasd_start_IO);
1544
1545/*
1546 * Timeout function for dasd devices. This is used for different purposes
1547 *  1) missing interrupt handler for normal operation
1548 *  2) delayed start of request where start_IO failed with -EBUSY
1549 *  3) timeout for missing state change interrupts
1550 * The head of the ccw queue will have status DASD_CQR_IN_IO for 1),
1551 * DASD_CQR_QUEUED for 2) and 3).
1552 */
1553static void dasd_device_timeout(struct timer_list *t)
1554{
1555	unsigned long flags;
1556	struct dasd_device *device;
1557
1558	device = from_timer(device, t, timer);
1559	spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
1560	/* re-activate request queue */
1561	dasd_device_remove_stop_bits(device, DASD_STOPPED_PENDING);
1562	spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
1563	dasd_schedule_device_bh(device);
1564}
1565
1566/*
1567 * Setup timeout for a device in jiffies.
1568 */
1569void dasd_device_set_timer(struct dasd_device *device, int expires)
1570{
1571	if (expires == 0)
1572		del_timer(&device->timer);
1573	else
1574		mod_timer(&device->timer, jiffies + expires);
1575}
1576EXPORT_SYMBOL(dasd_device_set_timer);
1577
1578/*
1579 * Clear timeout for a device.
1580 */
1581void dasd_device_clear_timer(struct dasd_device *device)
1582{
1583	del_timer(&device->timer);
1584}
1585EXPORT_SYMBOL(dasd_device_clear_timer);
1586
1587static void dasd_handle_killed_request(struct ccw_device *cdev,
1588				       unsigned long intparm)
1589{
1590	struct dasd_ccw_req *cqr;
1591	struct dasd_device *device;
1592
1593	if (!intparm)
1594		return;
1595	cqr = (struct dasd_ccw_req *) intparm;
1596	if (cqr->status != DASD_CQR_IN_IO) {
1597		DBF_EVENT_DEVID(DBF_DEBUG, cdev,
1598				"invalid status in handle_killed_request: "
1599				"%02x", cqr->status);
1600		return;
1601	}
1602
1603	device = dasd_device_from_cdev_locked(cdev);
1604	if (IS_ERR(device)) {
1605		DBF_EVENT_DEVID(DBF_DEBUG, cdev, "%s",
1606				"unable to get device from cdev");
1607		return;
1608	}
1609
1610	if (!cqr->startdev ||
1611	    device != cqr->startdev ||
1612	    strncmp(cqr->startdev->discipline->ebcname,
1613		    (char *) &cqr->magic, 4)) {
1614		DBF_EVENT_DEVID(DBF_DEBUG, cdev, "%s",
1615				"invalid device in request");
1616		dasd_put_device(device);
1617		return;
1618	}
1619
1620	/* Schedule request to be retried. */
1621	cqr->status = DASD_CQR_QUEUED;
1622
1623	dasd_device_clear_timer(device);
1624	dasd_schedule_device_bh(device);
1625	dasd_put_device(device);
1626}
1627
1628void dasd_generic_handle_state_change(struct dasd_device *device)
1629{
1630	/* First of all start sense subsystem status request. */
1631	dasd_eer_snss(device);
1632
1633	dasd_device_remove_stop_bits(device, DASD_STOPPED_PENDING);
1634	dasd_schedule_device_bh(device);
1635	if (device->block) {
1636		dasd_schedule_block_bh(device->block);
1637		if (device->block->request_queue)
1638			blk_mq_run_hw_queues(device->block->request_queue,
1639					     true);
1640	}
1641}
1642EXPORT_SYMBOL_GPL(dasd_generic_handle_state_change);
1643
1644static int dasd_check_hpf_error(struct irb *irb)
1645{
1646	return (scsw_tm_is_valid_schxs(&irb->scsw) &&
1647	    (irb->scsw.tm.sesq == SCSW_SESQ_DEV_NOFCX ||
1648	     irb->scsw.tm.sesq == SCSW_SESQ_PATH_NOFCX));
1649}
1650
1651static int dasd_ese_needs_format(struct dasd_block *block, struct irb *irb)
1652{
1653	struct dasd_device *device = NULL;
1654	u8 *sense = NULL;
1655
1656	if (!block)
1657		return 0;
1658	device = block->base;
1659	if (!device || !device->discipline->is_ese)
1660		return 0;
1661	if (!device->discipline->is_ese(device))
1662		return 0;
1663
1664	sense = dasd_get_sense(irb);
1665	if (!sense)
1666		return 0;
1667
1668	return !!(sense[1] & SNS1_NO_REC_FOUND) ||
1669		!!(sense[1] & SNS1_FILE_PROTECTED) ||
1670		scsw_cstat(&irb->scsw) == SCHN_STAT_INCORR_LEN;
1671}
1672
1673static int dasd_ese_oos_cond(u8 *sense)
1674{
1675	return sense[0] & SNS0_EQUIPMENT_CHECK &&
1676		sense[1] & SNS1_PERM_ERR &&
1677		sense[1] & SNS1_WRITE_INHIBITED &&
1678		sense[25] == 0x01;
1679}
1680
1681/*
1682 * Interrupt handler for "normal" ssch-io based dasd devices.
1683 */
1684void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
1685		      struct irb *irb)
1686{
1687	struct dasd_ccw_req *cqr, *next, *fcqr;
1688	struct dasd_device *device;
1689	unsigned long now;
1690	int nrf_suppressed = 0;
1691	int fp_suppressed = 0;
1692	struct request *req;
1693	u8 *sense = NULL;
1694	int expires;
1695
1696	cqr = (struct dasd_ccw_req *) intparm;
1697	if (IS_ERR(irb)) {
1698		switch (PTR_ERR(irb)) {
1699		case -EIO:
1700			if (cqr && cqr->status == DASD_CQR_CLEAR_PENDING) {
1701				device = cqr->startdev;
1702				cqr->status = DASD_CQR_CLEARED;
1703				dasd_device_clear_timer(device);
1704				wake_up(&dasd_flush_wq);
1705				dasd_schedule_device_bh(device);
1706				return;
1707			}
1708			break;
1709		case -ETIMEDOUT:
1710			DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s: "
1711					"request timed out\n", __func__);
1712			break;
1713		default:
1714			DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s: "
1715					"unknown error %ld\n", __func__,
1716					PTR_ERR(irb));
1717		}
1718		dasd_handle_killed_request(cdev, intparm);
1719		return;
1720	}
1721
1722	now = get_tod_clock();
1723	/* check for conditions that should be handled immediately */
1724	if (!cqr ||
1725	    !(scsw_dstat(&irb->scsw) == (DEV_STAT_CHN_END | DEV_STAT_DEV_END) &&
1726	      scsw_cstat(&irb->scsw) == 0)) {
1727		if (cqr)
1728			memcpy(&cqr->irb, irb, sizeof(*irb));
1729		device = dasd_device_from_cdev_locked(cdev);
1730		if (IS_ERR(device))
1731			return;
1732		/* ignore unsolicited interrupts for DIAG discipline */
1733		if (device->discipline == dasd_diag_discipline_pointer) {
1734			dasd_put_device(device);
1735			return;
1736		}
1737
1738		/*
1739		 * In some cases 'File Protected' or 'No Record Found' errors
1740		 * might be expected and debug log messages for the
1741		 * corresponding interrupts shouldn't be written then.
1742		 * Check if either of the according suppress bits is set.
1743		 */
1744		sense = dasd_get_sense(irb);
1745		if (sense) {
1746			fp_suppressed = (sense[1] & SNS1_FILE_PROTECTED) &&
1747				test_bit(DASD_CQR_SUPPRESS_FP, &cqr->flags);
1748			nrf_suppressed = (sense[1] & SNS1_NO_REC_FOUND) &&
1749				test_bit(DASD_CQR_SUPPRESS_NRF, &cqr->flags);
1750
1751			/*
1752			 * Extent pool probably out-of-space.
1753			 * Stop device and check exhaust level.
1754			 */
1755			if (dasd_ese_oos_cond(sense)) {
1756				dasd_generic_space_exhaust(device, cqr);
1757				device->discipline->ext_pool_exhaust(device, cqr);
1758				dasd_put_device(device);
1759				return;
1760			}
1761		}
1762		if (!(fp_suppressed || nrf_suppressed))
1763			device->discipline->dump_sense_dbf(device, irb, "int");
1764
1765		if (device->features & DASD_FEATURE_ERPLOG)
1766			device->discipline->dump_sense(device, cqr, irb);
1767		device->discipline->check_for_device_change(device, cqr, irb);
1768		dasd_put_device(device);
1769	}
1770
1771	/* check for for attention message */
1772	if (scsw_dstat(&irb->scsw) & DEV_STAT_ATTENTION) {
1773		device = dasd_device_from_cdev_locked(cdev);
1774		if (!IS_ERR(device)) {
1775			device->discipline->check_attention(device,
1776							    irb->esw.esw1.lpum);
1777			dasd_put_device(device);
1778		}
1779	}
1780
1781	if (!cqr)
1782		return;
1783
1784	device = (struct dasd_device *) cqr->startdev;
1785	if (!device ||
1786	    strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) {
1787		DBF_EVENT_DEVID(DBF_DEBUG, cdev, "%s",
1788				"invalid device in request");
1789		return;
1790	}
1791
1792	if (dasd_ese_needs_format(cqr->block, irb)) {
1793		req = dasd_get_callback_data(cqr);
1794		if (!req) {
1795			cqr->status = DASD_CQR_ERROR;
1796			return;
1797		}
1798		if (rq_data_dir(req) == READ) {
1799			device->discipline->ese_read(cqr, irb);
1800			cqr->status = DASD_CQR_SUCCESS;
1801			cqr->stopclk = now;
1802			dasd_device_clear_timer(device);
1803			dasd_schedule_device_bh(device);
1804			return;
1805		}
1806		fcqr = device->discipline->ese_format(device, cqr, irb);
1807		if (IS_ERR(fcqr)) {
1808			if (PTR_ERR(fcqr) == -EINVAL) {
1809				cqr->status = DASD_CQR_ERROR;
1810				return;
1811			}
1812			/*
1813			 * If we can't format now, let the request go
1814			 * one extra round. Maybe we can format later.
1815			 */
1816			cqr->status = DASD_CQR_QUEUED;
1817			dasd_schedule_device_bh(device);
1818			return;
1819		} else {
1820			fcqr->status = DASD_CQR_QUEUED;
1821			cqr->status = DASD_CQR_QUEUED;
1822			list_add(&fcqr->devlist, &device->ccw_queue);
1823			dasd_schedule_device_bh(device);
1824			return;
1825		}
1826	}
1827
1828	/* Check for clear pending */
1829	if (cqr->status == DASD_CQR_CLEAR_PENDING &&
1830	    scsw_fctl(&irb->scsw) & SCSW_FCTL_CLEAR_FUNC) {
1831		cqr->status = DASD_CQR_CLEARED;
1832		dasd_device_clear_timer(device);
1833		wake_up(&dasd_flush_wq);
1834		dasd_schedule_device_bh(device);
1835		return;
1836	}
1837
1838	/* check status - the request might have been killed by dyn detach */
1839	if (cqr->status != DASD_CQR_IN_IO) {
1840		DBF_DEV_EVENT(DBF_DEBUG, device, "invalid status: bus_id %s, "
1841			      "status %02x", dev_name(&cdev->dev), cqr->status);
1842		return;
1843	}
1844
1845	next = NULL;
1846	expires = 0;
1847	if (scsw_dstat(&irb->scsw) == (DEV_STAT_CHN_END | DEV_STAT_DEV_END) &&
1848	    scsw_cstat(&irb->scsw) == 0) {
1849		/* request was completed successfully */
1850		cqr->status = DASD_CQR_SUCCESS;
1851		cqr->stopclk = now;
1852		/* Start first request on queue if possible -> fast_io. */
1853		if (cqr->devlist.next != &device->ccw_queue) {
1854			next = list_entry(cqr->devlist.next,
1855					  struct dasd_ccw_req, devlist);
1856		}
1857	} else {  /* error */
1858		/* check for HPF error
1859		 * call discipline function to requeue all requests
1860		 * and disable HPF accordingly
1861		 */
1862		if (cqr->cpmode && dasd_check_hpf_error(irb) &&
1863		    device->discipline->handle_hpf_error)
1864			device->discipline->handle_hpf_error(device, irb);
1865		/*
1866		 * If we don't want complex ERP for this request, then just
1867		 * reset this and retry it in the fastpath
1868		 */
1869		if (!test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags) &&
1870		    cqr->retries > 0) {
1871			if (cqr->lpm == dasd_path_get_opm(device))
1872				DBF_DEV_EVENT(DBF_DEBUG, device,
1873					      "default ERP in fastpath "
1874					      "(%i retries left)",
1875					      cqr->retries);
1876			if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags))
1877				cqr->lpm = dasd_path_get_opm(device);
1878			cqr->status = DASD_CQR_QUEUED;
1879			next = cqr;
1880		} else
1881			cqr->status = DASD_CQR_ERROR;
1882	}
1883	if (next && (next->status == DASD_CQR_QUEUED) &&
1884	    (!device->stopped)) {
1885		if (device->discipline->start_IO(next) == 0)
1886			expires = next->expires;
1887	}
1888	if (expires != 0)
1889		dasd_device_set_timer(device, expires);
1890	else
1891		dasd_device_clear_timer(device);
1892	dasd_schedule_device_bh(device);
1893}
1894EXPORT_SYMBOL(dasd_int_handler);
1895
1896enum uc_todo dasd_generic_uc_handler(struct ccw_device *cdev, struct irb *irb)
1897{
1898	struct dasd_device *device;
1899
1900	device = dasd_device_from_cdev_locked(cdev);
1901
1902	if (IS_ERR(device))
1903		goto out;
1904	if (test_bit(DASD_FLAG_OFFLINE, &device->flags) ||
1905	   device->state != device->target ||
1906	   !device->discipline->check_for_device_change){
1907		dasd_put_device(device);
1908		goto out;
1909	}
1910	if (device->discipline->dump_sense_dbf)
1911		device->discipline->dump_sense_dbf(device, irb, "uc");
1912	device->discipline->check_for_device_change(device, NULL, irb);
1913	dasd_put_device(device);
1914out:
1915	return UC_TODO_RETRY;
1916}
1917EXPORT_SYMBOL_GPL(dasd_generic_uc_handler);
1918
1919/*
1920 * If we have an error on a dasd_block layer request then we cancel
1921 * and return all further requests from the same dasd_block as well.
1922 */
1923static void __dasd_device_recovery(struct dasd_device *device,
1924				   struct dasd_ccw_req *ref_cqr)
1925{
1926	struct list_head *l, *n;
1927	struct dasd_ccw_req *cqr;
1928
1929	/*
1930	 * only requeue request that came from the dasd_block layer
1931	 */
1932	if (!ref_cqr->block)
1933		return;
1934
1935	list_for_each_safe(l, n, &device->ccw_queue) {
1936		cqr = list_entry(l, struct dasd_ccw_req, devlist);
1937		if (cqr->status == DASD_CQR_QUEUED &&
1938		    ref_cqr->block == cqr->block) {
1939			cqr->status = DASD_CQR_CLEARED;
1940		}
1941	}
1942};
1943
1944/*
1945 * Remove those ccw requests from the queue that need to be returned
1946 * to the upper layer.
1947 */
1948static void __dasd_device_process_ccw_queue(struct dasd_device *device,
1949					    struct list_head *final_queue)
1950{
1951	struct list_head *l, *n;
1952	struct dasd_ccw_req *cqr;
1953
1954	/* Process request with final status. */
1955	list_for_each_safe(l, n, &device->ccw_queue) {
1956		cqr = list_entry(l, struct dasd_ccw_req, devlist);
1957
1958		/* Skip any non-final request. */
1959		if (cqr->status == DASD_CQR_QUEUED ||
1960		    cqr->status == DASD_CQR_IN_IO ||
1961		    cqr->status == DASD_CQR_CLEAR_PENDING)
1962			continue;
1963		if (cqr->status == DASD_CQR_ERROR) {
1964			__dasd_device_recovery(device, cqr);
1965		}
1966		/* Rechain finished requests to final queue */
1967		list_move_tail(&cqr->devlist, final_queue);
1968	}
1969}
1970
1971static void __dasd_process_cqr(struct dasd_device *device,
1972			       struct dasd_ccw_req *cqr)
1973{
1974	char errorstring[ERRORLENGTH];
1975
1976	switch (cqr->status) {
1977	case DASD_CQR_SUCCESS:
1978		cqr->status = DASD_CQR_DONE;
1979		break;
1980	case DASD_CQR_ERROR:
1981		cqr->status = DASD_CQR_NEED_ERP;
1982		break;
1983	case DASD_CQR_CLEARED:
1984		cqr->status = DASD_CQR_TERMINATED;
1985		break;
1986	default:
1987		/* internal error 12 - wrong cqr status*/
1988		snprintf(errorstring, ERRORLENGTH, "12 %p %x02", cqr, cqr->status);
1989		dev_err(&device->cdev->dev,
1990			"An error occurred in the DASD device driver, "
1991			"reason=%s\n", errorstring);
1992		BUG();
1993	}
1994	if (cqr->callback)
1995		cqr->callback(cqr, cqr->callback_data);
1996}
1997
1998/*
1999 * the cqrs from the final queue are returned to the upper layer
2000 * by setting a dasd_block state and calling the callback function
2001 */
2002static void __dasd_device_process_final_queue(struct dasd_device *device,
2003					      struct list_head *final_queue)
2004{
2005	struct list_head *l, *n;
2006	struct dasd_ccw_req *cqr;
2007	struct dasd_block *block;
2008
2009	list_for_each_safe(l, n, final_queue) {
2010		cqr = list_entry(l, struct dasd_ccw_req, devlist);
2011		list_del_init(&cqr->devlist);
2012		block = cqr->block;
2013		if (!block) {
2014			__dasd_process_cqr(device, cqr);
2015		} else {
2016			spin_lock_bh(&block->queue_lock);
2017			__dasd_process_cqr(device, cqr);
2018			spin_unlock_bh(&block->queue_lock);
2019		}
2020	}
2021}
2022
2023/*
2024 * Take a look at the first request on the ccw queue and check
2025 * if it reached its expire time. If so, terminate the IO.
2026 */
2027static void __dasd_device_check_expire(struct dasd_device *device)
2028{
2029	struct dasd_ccw_req *cqr;
2030
2031	if (list_empty(&device->ccw_queue))
2032		return;
2033	cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist);
2034	if ((cqr->status == DASD_CQR_IN_IO && cqr->expires != 0) &&
2035	    (time_after_eq(jiffies, cqr->expires + cqr->starttime))) {
2036		if (test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) {
2037			/*
2038			 * IO in safe offline processing should not
2039			 * run out of retries
2040			 */
2041			cqr->retries++;
2042		}
2043		if (device->discipline->term_IO(cqr) != 0) {
2044			/* Hmpf, try again in 5 sec */
2045			dev_err(&device->cdev->dev,
2046				"cqr %p timed out (%lus) but cannot be "
2047				"ended, retrying in 5 s\n",
2048				cqr, (cqr->expires/HZ));
2049			cqr->expires += 5*HZ;
2050			dasd_device_set_timer(device, 5*HZ);
2051		} else {
2052			dev_err(&device->cdev->dev,
2053				"cqr %p timed out (%lus), %i retries "
2054				"remaining\n", cqr, (cqr->expires/HZ),
2055				cqr->retries);
2056		}
2057	}
2058}
2059
2060/*
2061 * return 1 when device is not eligible for IO
2062 */
2063static int __dasd_device_is_unusable(struct dasd_device *device,
2064				     struct dasd_ccw_req *cqr)
2065{
2066	int mask = ~(DASD_STOPPED_DC_WAIT | DASD_UNRESUMED_PM | DASD_STOPPED_NOSPC);
2067
2068	if (test_bit(DASD_FLAG_OFFLINE, &device->flags) &&
2069	    !test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) {
2070		/*
2071		 * dasd is being set offline
2072		 * but it is no safe offline where we have to allow I/O
2073		 */
2074		return 1;
2075	}
2076	if (device->stopped) {
2077		if (device->stopped & mask) {
2078			/* stopped and CQR will not change that. */
2079			return 1;
2080		}
2081		if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) {
2082			/* CQR is not able to change device to
2083			 * operational. */
2084			return 1;
2085		}
2086		/* CQR required to get device operational. */
2087	}
2088	return 0;
2089}
2090
2091/*
2092 * Take a look at the first request on the ccw queue and check
2093 * if it needs to be started.
2094 */
2095static void __dasd_device_start_head(struct dasd_device *device)
2096{
2097	struct dasd_ccw_req *cqr;
2098	int rc;
2099
2100	if (list_empty(&device->ccw_queue))
2101		return;
2102	cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist);
2103	if (cqr->status != DASD_CQR_QUEUED)
2104		return;
2105	/* if device is not usable return request to upper layer */
2106	if (__dasd_device_is_unusable(device, cqr)) {
2107		cqr->intrc = -EAGAIN;
2108		cqr->status = DASD_CQR_CLEARED;
2109		dasd_schedule_device_bh(device);
2110		return;
2111	}
2112
2113	rc = device->discipline->start_IO(cqr);
2114	if (rc == 0)
2115		dasd_device_set_timer(device, cqr->expires);
2116	else if (rc == -EACCES) {
2117		dasd_schedule_device_bh(device);
2118	} else
2119		/* Hmpf, try again in 1/2 sec */
2120		dasd_device_set_timer(device, 50);
2121}
2122
2123static void __dasd_device_check_path_events(struct dasd_device *device)
2124{
2125	int rc;
2126
2127	if (!dasd_path_get_tbvpm(device))
2128		return;
2129
2130	if (device->stopped &
2131	    ~(DASD_STOPPED_DC_WAIT | DASD_UNRESUMED_PM))
2132		return;
2133	rc = device->discipline->pe_handler(device,
2134					    dasd_path_get_tbvpm(device));
2135	if (rc)
2136		dasd_device_set_timer(device, 50);
2137	else
2138		dasd_path_clear_all_verify(device);
2139};
2140
2141/*
2142 * Go through all request on the dasd_device request queue,
2143 * terminate them on the cdev if necessary, and return them to the
2144 * submitting layer via callback.
2145 * Note:
2146 * Make sure that all 'submitting layers' still exist when
2147 * this function is called!. In other words, when 'device' is a base
2148 * device then all block layer requests must have been removed before
2149 * via dasd_flush_block_queue.
2150 */
2151int dasd_flush_device_queue(struct dasd_device *device)
2152{
2153	struct dasd_ccw_req *cqr, *n;
2154	int rc;
2155	struct list_head flush_queue;
2156
2157	INIT_LIST_HEAD(&flush_queue);
2158	spin_lock_irq(get_ccwdev_lock(device->cdev));
2159	rc = 0;
2160	list_for_each_entry_safe(cqr, n, &device->ccw_queue, devlist) {
2161		/* Check status and move request to flush_queue */
2162		switch (cqr->status) {
2163		case DASD_CQR_IN_IO:
2164			rc = device->discipline->term_IO(cqr);
2165			if (rc) {
2166				/* unable to terminate requeust */
2167				dev_err(&device->cdev->dev,
2168					"Flushing the DASD request queue "
2169					"failed for request %p\n", cqr);
2170				/* stop flush processing */
2171				goto finished;
2172			}
2173			break;
2174		case DASD_CQR_QUEUED:
2175			cqr->stopclk = get_tod_clock();
2176			cqr->status = DASD_CQR_CLEARED;
2177			break;
2178		default: /* no need to modify the others */
2179			break;
2180		}
2181		list_move_tail(&cqr->devlist, &flush_queue);
2182	}
2183finished:
2184	spin_unlock_irq(get_ccwdev_lock(device->cdev));
2185	/*
2186	 * After this point all requests must be in state CLEAR_PENDING,
2187	 * CLEARED, SUCCESS or ERROR. Now wait for CLEAR_PENDING to become
2188	 * one of the others.
2189	 */
2190	list_for_each_entry_safe(cqr, n, &flush_queue, devlist)
2191		wait_event(dasd_flush_wq,
2192			   (cqr->status != DASD_CQR_CLEAR_PENDING));
2193	/*
2194	 * Now set each request back to TERMINATED, DONE or NEED_ERP
2195	 * and call the callback function of flushed requests
2196	 */
2197	__dasd_device_process_final_queue(device, &flush_queue);
2198	return rc;
2199}
2200EXPORT_SYMBOL_GPL(dasd_flush_device_queue);
2201
2202/*
2203 * Acquire the device lock and process queues for the device.
2204 */
2205static void dasd_device_tasklet(unsigned long data)
2206{
2207	struct dasd_device *device = (struct dasd_device *) data;
2208	struct list_head final_queue;
2209
2210	atomic_set (&device->tasklet_scheduled, 0);
2211	INIT_LIST_HEAD(&final_queue);
2212	spin_lock_irq(get_ccwdev_lock(device->cdev));
2213	/* Check expire time of first request on the ccw queue. */
2214	__dasd_device_check_expire(device);
2215	/* find final requests on ccw queue */
2216	__dasd_device_process_ccw_queue(device, &final_queue);
2217	__dasd_device_check_path_events(device);
2218	spin_unlock_irq(get_ccwdev_lock(device->cdev));
2219	/* Now call the callback function of requests with final status */
2220	__dasd_device_process_final_queue(device, &final_queue);
2221	spin_lock_irq(get_ccwdev_lock(device->cdev));
2222	/* Now check if the head of the ccw queue needs to be started. */
2223	__dasd_device_start_head(device);
2224	spin_unlock_irq(get_ccwdev_lock(device->cdev));
2225	if (waitqueue_active(&shutdown_waitq))
2226		wake_up(&shutdown_waitq);
2227	dasd_put_device(device);
2228}
2229
2230/*
2231 * Schedules a call to dasd_tasklet over the device tasklet.
2232 */
2233void dasd_schedule_device_bh(struct dasd_device *device)
2234{
2235	/* Protect against rescheduling. */
2236	if (atomic_cmpxchg (&device->tasklet_scheduled, 0, 1) != 0)
2237		return;
2238	dasd_get_device(device);
2239	tasklet_hi_schedule(&device->tasklet);
2240}
2241EXPORT_SYMBOL(dasd_schedule_device_bh);
2242
2243void dasd_device_set_stop_bits(struct dasd_device *device, int bits)
2244{
2245	device->stopped |= bits;
2246}
2247EXPORT_SYMBOL_GPL(dasd_device_set_stop_bits);
2248
2249void dasd_device_remove_stop_bits(struct dasd_device *device, int bits)
2250{
2251	device->stopped &= ~bits;
2252	if (!device->stopped)
2253		wake_up(&generic_waitq);
2254}
2255EXPORT_SYMBOL_GPL(dasd_device_remove_stop_bits);
2256
2257/*
2258 * Queue a request to the head of the device ccw_queue.
2259 * Start the I/O if possible.
2260 */
2261void dasd_add_request_head(struct dasd_ccw_req *cqr)
2262{
2263	struct dasd_device *device;
2264	unsigned long flags;
2265
2266	device = cqr->startdev;
2267	spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
2268	cqr->status = DASD_CQR_QUEUED;
2269	list_add(&cqr->devlist, &device->ccw_queue);
2270	/* let the bh start the request to keep them in order */
2271	dasd_schedule_device_bh(device);
2272	spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
2273}
2274EXPORT_SYMBOL(dasd_add_request_head);
2275
2276/*
2277 * Queue a request to the tail of the device ccw_queue.
2278 * Start the I/O if possible.
2279 */
2280void dasd_add_request_tail(struct dasd_ccw_req *cqr)
2281{
2282	struct dasd_device *device;
2283	unsigned long flags;
2284
2285	device = cqr->startdev;
2286	spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
2287	cqr->status = DASD_CQR_QUEUED;
2288	list_add_tail(&cqr->devlist, &device->ccw_queue);
2289	/* let the bh start the request to keep them in order */
2290	dasd_schedule_device_bh(device);
2291	spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
2292}
2293EXPORT_SYMBOL(dasd_add_request_tail);
2294
2295/*
2296 * Wakeup helper for the 'sleep_on' functions.
2297 */
2298void dasd_wakeup_cb(struct dasd_ccw_req *cqr, void *data)
2299{
2300	spin_lock_irq(get_ccwdev_lock(cqr->startdev->cdev));
2301	cqr->callback_data = DASD_SLEEPON_END_TAG;
2302	spin_unlock_irq(get_ccwdev_lock(cqr->startdev->cdev));
2303	wake_up(&generic_waitq);
2304}
2305EXPORT_SYMBOL_GPL(dasd_wakeup_cb);
2306
2307static inline int _wait_for_wakeup(struct dasd_ccw_req *cqr)
2308{
2309	struct dasd_device *device;
2310	int rc;
2311
2312	device = cqr->startdev;
2313	spin_lock_irq(get_ccwdev_lock(device->cdev));
2314	rc = (cqr->callback_data == DASD_SLEEPON_END_TAG);
2315	spin_unlock_irq(get_ccwdev_lock(device->cdev));
2316	return rc;
2317}
2318
2319/*
2320 * checks if error recovery is necessary, returns 1 if yes, 0 otherwise.
2321 */
2322static int __dasd_sleep_on_erp(struct dasd_ccw_req *cqr)
2323{
2324	struct dasd_device *device;
2325	dasd_erp_fn_t erp_fn;
2326
2327	if (cqr->status == DASD_CQR_FILLED)
2328		return 0;
2329	device = cqr->startdev;
2330	if (test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags)) {
2331		if (cqr->status == DASD_CQR_TERMINATED) {
2332			device->discipline->handle_terminated_request(cqr);
2333			return 1;
2334		}
2335		if (cqr->status == DASD_CQR_NEED_ERP) {
2336			erp_fn = device->discipline->erp_action(cqr);
2337			erp_fn(cqr);
2338			return 1;
2339		}
2340		if (cqr->status == DASD_CQR_FAILED)
2341			dasd_log_sense(cqr, &cqr->irb);
2342		if (cqr->refers) {
2343			__dasd_process_erp(device, cqr);
2344			return 1;
2345		}
2346	}
2347	return 0;
2348}
2349
2350static int __dasd_sleep_on_loop_condition(struct dasd_ccw_req *cqr)
2351{
2352	if (test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags)) {
2353		if (cqr->refers) /* erp is not done yet */
2354			return 1;
2355		return ((cqr->status != DASD_CQR_DONE) &&
2356			(cqr->status != DASD_CQR_FAILED));
2357	} else
2358		return (cqr->status == DASD_CQR_FILLED);
2359}
2360
2361static int _dasd_sleep_on(struct dasd_ccw_req *maincqr, int interruptible)
2362{
2363	struct dasd_device *device;
2364	int rc;
2365	struct list_head ccw_queue;
2366	struct dasd_ccw_req *cqr;
2367
2368	INIT_LIST_HEAD(&ccw_queue);
2369	maincqr->status = DASD_CQR_FILLED;
2370	device = maincqr->startdev;
2371	list_add(&maincqr->blocklist, &ccw_queue);
2372	for (cqr = maincqr;  __dasd_sleep_on_loop_condition(cqr);
2373	     cqr = list_first_entry(&ccw_queue,
2374				    struct dasd_ccw_req, blocklist)) {
2375
2376		if (__dasd_sleep_on_erp(cqr))
2377			continue;
2378		if (cqr->status != DASD_CQR_FILLED) /* could be failed */
2379			continue;
2380		if (test_bit(DASD_FLAG_LOCK_STOLEN, &device->flags) &&
2381		    !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) {
2382			cqr->status = DASD_CQR_FAILED;
2383			cqr->intrc = -EPERM;
2384			continue;
2385		}
2386		/* Non-temporary stop condition will trigger fail fast */
2387		if (device->stopped & ~DASD_STOPPED_PENDING &&
2388		    test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) &&
2389		    (!dasd_eer_enabled(device))) {
2390			cqr->status = DASD_CQR_FAILED;
2391			cqr->intrc = -ENOLINK;
2392			continue;
2393		}
2394		/*
2395		 * Don't try to start requests if device is in
2396		 * offline processing, it might wait forever
2397		 */
2398		if (test_bit(DASD_FLAG_OFFLINE, &device->flags)) {
2399			cqr->status = DASD_CQR_FAILED;
2400			cqr->intrc = -ENODEV;
2401			continue;
2402		}
2403		/*
2404		 * Don't try to start requests if device is stopped
2405		 * except path verification requests
2406		 */
2407		if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) {
2408			if (interruptible) {
2409				rc = wait_event_interruptible(
2410					generic_waitq, !(device->stopped));
2411				if (rc == -ERESTARTSYS) {
2412					cqr->status = DASD_CQR_FAILED;
2413					maincqr->intrc = rc;
2414					continue;
2415				}
2416			} else
2417				wait_event(generic_waitq, !(device->stopped));
2418		}
2419		if (!cqr->callback)
2420			cqr->callback = dasd_wakeup_cb;
2421
2422		cqr->callback_data = DASD_SLEEPON_START_TAG;
2423		dasd_add_request_tail(cqr);
2424		if (interruptible) {
2425			rc = wait_event_interruptible(
2426				generic_waitq, _wait_for_wakeup(cqr));
2427			if (rc == -ERESTARTSYS) {
2428				dasd_cancel_req(cqr);
2429				/* wait (non-interruptible) for final status */
2430				wait_event(generic_waitq,
2431					   _wait_for_wakeup(cqr));
2432				cqr->status = DASD_CQR_FAILED;
2433				maincqr->intrc = rc;
2434				continue;
2435			}
2436		} else
2437			wait_event(generic_waitq, _wait_for_wakeup(cqr));
2438	}
2439
2440	maincqr->endclk = get_tod_clock();
2441	if ((maincqr->status != DASD_CQR_DONE) &&
2442	    (maincqr->intrc != -ERESTARTSYS))
2443		dasd_log_sense(maincqr, &maincqr->irb);
2444	if (maincqr->status == DASD_CQR_DONE)
2445		rc = 0;
2446	else if (maincqr->intrc)
2447		rc = maincqr->intrc;
2448	else
2449		rc = -EIO;
2450	return rc;
2451}
2452
2453static inline int _wait_for_wakeup_queue(struct list_head *ccw_queue)
2454{
2455	struct dasd_ccw_req *cqr;
2456
2457	list_for_each_entry(cqr, ccw_queue, blocklist) {
2458		if (cqr->callback_data != DASD_SLEEPON_END_TAG)
2459			return 0;
2460	}
2461
2462	return 1;
2463}
2464
2465static int _dasd_sleep_on_queue(struct list_head *ccw_queue, int interruptible)
2466{
2467	struct dasd_device *device;
2468	struct dasd_ccw_req *cqr, *n;
2469	u8 *sense = NULL;
2470	int rc;
2471
2472retry:
2473	list_for_each_entry_safe(cqr, n, ccw_queue, blocklist) {
2474		device = cqr->startdev;
2475		if (cqr->status != DASD_CQR_FILLED) /*could be failed*/
2476			continue;
2477
2478		if (test_bit(DASD_FLAG_LOCK_STOLEN, &device->flags) &&
2479		    !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) {
2480			cqr->status = DASD_CQR_FAILED;
2481			cqr->intrc = -EPERM;
2482			continue;
2483		}
2484		/*Non-temporary stop condition will trigger fail fast*/
2485		if (device->stopped & ~DASD_STOPPED_PENDING &&
2486		    test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) &&
2487		    !dasd_eer_enabled(device)) {
2488			cqr->status = DASD_CQR_FAILED;
2489			cqr->intrc = -EAGAIN;
2490			continue;
2491		}
2492
2493		/*Don't try to start requests if device is stopped*/
2494		if (interruptible) {
2495			rc = wait_event_interruptible(
2496				generic_waitq, !device->stopped);
2497			if (rc == -ERESTARTSYS) {
2498				cqr->status = DASD_CQR_FAILED;
2499				cqr->intrc = rc;
2500				continue;
2501			}
2502		} else
2503			wait_event(generic_waitq, !(device->stopped));
2504
2505		if (!cqr->callback)
2506			cqr->callback = dasd_wakeup_cb;
2507		cqr->callback_data = DASD_SLEEPON_START_TAG;
2508		dasd_add_request_tail(cqr);
2509	}
2510
2511	wait_event(generic_waitq, _wait_for_wakeup_queue(ccw_queue));
2512
2513	rc = 0;
2514	list_for_each_entry_safe(cqr, n, ccw_queue, blocklist) {
2515		/*
2516		 * In some cases the 'File Protected' or 'Incorrect Length'
2517		 * error might be expected and error recovery would be
2518		 * unnecessary in these cases.	Check if the according suppress
2519		 * bit is set.
2520		 */
2521		sense = dasd_get_sense(&cqr->irb);
2522		if (sense && sense[1] & SNS1_FILE_PROTECTED &&
2523		    test_bit(DASD_CQR_SUPPRESS_FP, &cqr->flags))
2524			continue;
2525		if (scsw_cstat(&cqr->irb.scsw) == 0x40 &&
2526		    test_bit(DASD_CQR_SUPPRESS_IL, &cqr->flags))
2527			continue;
2528
2529		/*
2530		 * for alias devices simplify error recovery and
2531		 * return to upper layer
2532		 * do not skip ERP requests
2533		 */
2534		if (cqr->startdev != cqr->basedev && !cqr->refers &&
2535		    (cqr->status == DASD_CQR_TERMINATED ||
2536		     cqr->status == DASD_CQR_NEED_ERP))
2537			return -EAGAIN;
2538
2539		/* normal recovery for basedev IO */
2540		if (__dasd_sleep_on_erp(cqr))
2541			/* handle erp first */
2542			goto retry;
2543	}
2544
2545	return 0;
2546}
2547
2548/*
2549 * Queue a request to the tail of the device ccw_queue and wait for
2550 * it's completion.
2551 */
2552int dasd_sleep_on(struct dasd_ccw_req *cqr)
2553{
2554	return _dasd_sleep_on(cqr, 0);
2555}
2556EXPORT_SYMBOL(dasd_sleep_on);
2557
2558/*
2559 * Start requests from a ccw_queue and wait for their completion.
2560 */
2561int dasd_sleep_on_queue(struct list_head *ccw_queue)
2562{
2563	return _dasd_sleep_on_queue(ccw_queue, 0);
2564}
2565EXPORT_SYMBOL(dasd_sleep_on_queue);
2566
2567/*
2568 * Start requests from a ccw_queue and wait interruptible for their completion.
2569 */
2570int dasd_sleep_on_queue_interruptible(struct list_head *ccw_queue)
2571{
2572	return _dasd_sleep_on_queue(ccw_queue, 1);
2573}
2574EXPORT_SYMBOL(dasd_sleep_on_queue_interruptible);
2575
2576/*
2577 * Queue a request to the tail of the device ccw_queue and wait
2578 * interruptible for it's completion.
2579 */
2580int dasd_sleep_on_interruptible(struct dasd_ccw_req *cqr)
2581{
2582	return _dasd_sleep_on(cqr, 1);
2583}
2584EXPORT_SYMBOL(dasd_sleep_on_interruptible);
2585
2586/*
2587 * Whoa nelly now it gets really hairy. For some functions (e.g. steal lock
2588 * for eckd devices) the currently running request has to be terminated
2589 * and be put back to status queued, before the special request is added
2590 * to the head of the queue. Then the special request is waited on normally.
2591 */
2592static inline int _dasd_term_running_cqr(struct dasd_device *device)
2593{
2594	struct dasd_ccw_req *cqr;
2595	int rc;
2596
2597	if (list_empty(&device->ccw_queue))
2598		return 0;
2599	cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist);
2600	rc = device->discipline->term_IO(cqr);
2601	if (!rc)
2602		/*
2603		 * CQR terminated because a more important request is pending.
2604		 * Undo decreasing of retry counter because this is
2605		 * not an error case.
2606		 */
2607		cqr->retries++;
2608	return rc;
2609}
2610
2611int dasd_sleep_on_immediatly(struct dasd_ccw_req *cqr)
2612{
2613	struct dasd_device *device;
2614	int rc;
2615
2616	device = cqr->startdev;
2617	if (test_bit(DASD_FLAG_LOCK_STOLEN, &device->flags) &&
2618	    !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) {
2619		cqr->status = DASD_CQR_FAILED;
2620		cqr->intrc = -EPERM;
2621		return -EIO;
2622	}
2623	spin_lock_irq(get_ccwdev_lock(device->cdev));
2624	rc = _dasd_term_running_cqr(device);
2625	if (rc) {
2626		spin_unlock_irq(get_ccwdev_lock(device->cdev));
2627		return rc;
2628	}
2629	cqr->callback = dasd_wakeup_cb;
2630	cqr->callback_data = DASD_SLEEPON_START_TAG;
2631	cqr->status = DASD_CQR_QUEUED;
2632	/*
2633	 * add new request as second
2634	 * first the terminated cqr needs to be finished
2635	 */
2636	list_add(&cqr->devlist, device->ccw_queue.next);
2637
2638	/* let the bh start the request to keep them in order */
2639	dasd_schedule_device_bh(device);
2640
2641	spin_unlock_irq(get_ccwdev_lock(device->cdev));
2642
2643	wait_event(generic_waitq, _wait_for_wakeup(cqr));
2644
2645	if (cqr->status == DASD_CQR_DONE)
2646		rc = 0;
2647	else if (cqr->intrc)
2648		rc = cqr->intrc;
2649	else
2650		rc = -EIO;
2651
2652	/* kick tasklets */
2653	dasd_schedule_device_bh(device);
2654	if (device->block)
2655		dasd_schedule_block_bh(device->block);
2656
2657	return rc;
2658}
2659EXPORT_SYMBOL(dasd_sleep_on_immediatly);
2660
2661/*
2662 * Cancels a request that was started with dasd_sleep_on_req.
2663 * This is useful to timeout requests. The request will be
2664 * terminated if it is currently in i/o.
2665 * Returns 0 if request termination was successful
2666 *	   negative error code if termination failed
2667 * Cancellation of a request is an asynchronous operation! The calling
2668 * function has to wait until the request is properly returned via callback.
2669 */
2670static int __dasd_cancel_req(struct dasd_ccw_req *cqr)
2671{
2672	struct dasd_device *device = cqr->startdev;
2673	int rc = 0;
2674
2675	switch (cqr->status) {
2676	case DASD_CQR_QUEUED:
2677		/* request was not started - just set to cleared */
2678		cqr->status = DASD_CQR_CLEARED;
2679		break;
2680	case DASD_CQR_IN_IO:
2681		/* request in IO - terminate IO and release again */
2682		rc = device->discipline->term_IO(cqr);
2683		if (rc) {
2684			dev_err(&device->cdev->dev,
2685				"Cancelling request %p failed with rc=%d\n",
2686				cqr, rc);
2687		} else {
2688			cqr->stopclk = get_tod_clock();
2689		}
2690		break;
2691	default: /* already finished or clear pending - do nothing */
2692		break;
2693	}
2694	dasd_schedule_device_bh(device);
2695	return rc;
2696}
2697
2698int dasd_cancel_req(struct dasd_ccw_req *cqr)
2699{
2700	struct dasd_device *device = cqr->startdev;
2701	unsigned long flags;
2702	int rc;
2703
2704	spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
2705	rc = __dasd_cancel_req(cqr);
2706	spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
2707	return rc;
2708}
2709
2710/*
2711 * SECTION: Operations of the dasd_block layer.
2712 */
2713
2714/*
2715 * Timeout function for dasd_block. This is used when the block layer
2716 * is waiting for something that may not come reliably, (e.g. a state
2717 * change interrupt)
2718 */
2719static void dasd_block_timeout(struct timer_list *t)
2720{
2721	unsigned long flags;
2722	struct dasd_block *block;
2723
2724	block = from_timer(block, t, timer);
2725	spin_lock_irqsave(get_ccwdev_lock(block->base->cdev), flags);
2726	/* re-activate request queue */
2727	dasd_device_remove_stop_bits(block->base, DASD_STOPPED_PENDING);
2728	spin_unlock_irqrestore(get_ccwdev_lock(block->base->cdev), flags);
2729	dasd_schedule_block_bh(block);
2730	blk_mq_run_hw_queues(block->request_queue, true);
2731}
2732
2733/*
2734 * Setup timeout for a dasd_block in jiffies.
2735 */
2736void dasd_block_set_timer(struct dasd_block *block, int expires)
2737{
2738	if (expires == 0)
2739		del_timer(&block->timer);
2740	else
2741		mod_timer(&block->timer, jiffies + expires);
2742}
2743EXPORT_SYMBOL(dasd_block_set_timer);
2744
2745/*
2746 * Clear timeout for a dasd_block.
2747 */
2748void dasd_block_clear_timer(struct dasd_block *block)
2749{
2750	del_timer(&block->timer);
2751}
2752EXPORT_SYMBOL(dasd_block_clear_timer);
2753
2754/*
2755 * Process finished error recovery ccw.
2756 */
2757static void __dasd_process_erp(struct dasd_device *device,
2758			       struct dasd_ccw_req *cqr)
2759{
2760	dasd_erp_fn_t erp_fn;
2761
2762	if (cqr->status == DASD_CQR_DONE)
2763		DBF_DEV_EVENT(DBF_NOTICE, device, "%s", "ERP successful");
2764	else
2765		dev_err(&device->cdev->dev, "ERP failed for the DASD\n");
2766	erp_fn = device->discipline->erp_postaction(cqr);
2767	erp_fn(cqr);
2768}
2769
2770static void __dasd_cleanup_cqr(struct dasd_ccw_req *cqr)
2771{
2772	struct request *req;
2773	blk_status_t error = BLK_STS_OK;
2774	unsigned int proc_bytes;
2775	int status;
2776
2777	req = (struct request *) cqr->callback_data;
2778	dasd_profile_end(cqr->block, cqr, req);
2779
2780	proc_bytes = cqr->proc_bytes;
2781	status = cqr->block->base->discipline->free_cp(cqr, req);
2782	if (status < 0)
2783		error = errno_to_blk_status(status);
2784	else if (status == 0) {
2785		switch (cqr->intrc) {
2786		case -EPERM:
2787			error = BLK_STS_NEXUS;
2788			break;
2789		case -ENOLINK:
2790			error = BLK_STS_TRANSPORT;
2791			break;
2792		case -ETIMEDOUT:
2793			error = BLK_STS_TIMEOUT;
2794			break;
2795		default:
2796			error = BLK_STS_IOERR;
2797			break;
2798		}
2799	}
2800
2801	/*
2802	 * We need to take care for ETIMEDOUT errors here since the
2803	 * complete callback does not get called in this case.
2804	 * Take care of all errors here and avoid additional code to
2805	 * transfer the error value to the complete callback.
2806	 */
2807	if (error) {
2808		blk_mq_end_request(req, error);
2809		blk_mq_run_hw_queues(req->q, true);
2810	} else {
2811		/*
2812		 * Partial completed requests can happen with ESE devices.
2813		 * During read we might have gotten a NRF error and have to
2814		 * complete a request partially.
2815		 */
2816		if (proc_bytes) {
2817			blk_update_request(req, BLK_STS_OK, proc_bytes);
2818			blk_mq_requeue_request(req, true);
2819		} else if (likely(!blk_should_fake_timeout(req->q))) {
2820			blk_mq_complete_request(req);
2821		}
2822	}
2823}
2824
2825/*
2826 * Process ccw request queue.
2827 */
2828static void __dasd_process_block_ccw_queue(struct dasd_block *block,
2829					   struct list_head *final_queue)
2830{
2831	struct list_head *l, *n;
2832	struct dasd_ccw_req *cqr;
2833	dasd_erp_fn_t erp_fn;
2834	unsigned long flags;
2835	struct dasd_device *base = block->base;
2836
2837restart:
2838	/* Process request with final status. */
2839	list_for_each_safe(l, n, &block->ccw_queue) {
2840		cqr = list_entry(l, struct dasd_ccw_req, blocklist);
2841		if (cqr->status != DASD_CQR_DONE &&
2842		    cqr->status != DASD_CQR_FAILED &&
2843		    cqr->status != DASD_CQR_NEED_ERP &&
2844		    cqr->status != DASD_CQR_TERMINATED)
2845			continue;
2846
2847		if (cqr->status == DASD_CQR_TERMINATED) {
2848			base->discipline->handle_terminated_request(cqr);
2849			goto restart;
2850		}
2851
2852		/*  Process requests that may be recovered */
2853		if (cqr->status == DASD_CQR_NEED_ERP) {
2854			erp_fn = base->discipline->erp_action(cqr);
2855			if (IS_ERR(erp_fn(cqr)))
2856				continue;
2857			goto restart;
2858		}
2859
2860		/* log sense for fatal error */
2861		if (cqr->status == DASD_CQR_FAILED) {
2862			dasd_log_sense(cqr, &cqr->irb);
2863		}
2864
2865		/* First of all call extended error reporting. */
2866		if (dasd_eer_enabled(base) &&
2867		    cqr->status == DASD_CQR_FAILED) {
2868			dasd_eer_write(base, cqr, DASD_EER_FATALERROR);
2869
2870			/* restart request  */
2871			cqr->status = DASD_CQR_FILLED;
2872			cqr->retries = 255;
2873			spin_lock_irqsave(get_ccwdev_lock(base->cdev), flags);
2874			dasd_device_set_stop_bits(base, DASD_STOPPED_QUIESCE);
2875			spin_unlock_irqrestore(get_ccwdev_lock(base->cdev),
2876					       flags);
2877			goto restart;
2878		}
2879
2880		/* Process finished ERP request. */
2881		if (cqr->refers) {
2882			__dasd_process_erp(base, cqr);
2883			goto restart;
2884		}
2885
2886		/* Rechain finished requests to final queue */
2887		cqr->endclk = get_tod_clock();
2888		list_move_tail(&cqr->blocklist, final_queue);
2889	}
2890}
2891
2892static void dasd_return_cqr_cb(struct dasd_ccw_req *cqr, void *data)
2893{
2894	dasd_schedule_block_bh(cqr->block);
2895}
2896
2897static void __dasd_block_start_head(struct dasd_block *block)
2898{
2899	struct dasd_ccw_req *cqr;
2900
2901	if (list_empty(&block->ccw_queue))
2902		return;
2903	/* We allways begin with the first requests on the queue, as some
2904	 * of previously started requests have to be enqueued on a
2905	 * dasd_device again for error recovery.
2906	 */
2907	list_for_each_entry(cqr, &block->ccw_queue, blocklist) {
2908		if (cqr->status != DASD_CQR_FILLED)
2909			continue;
2910		if (test_bit(DASD_FLAG_LOCK_STOLEN, &block->base->flags) &&
2911		    !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) {
2912			cqr->status = DASD_CQR_FAILED;
2913			cqr->intrc = -EPERM;
2914			dasd_schedule_block_bh(block);
2915			continue;
2916		}
2917		/* Non-temporary stop condition will trigger fail fast */
2918		if (block->base->stopped & ~DASD_STOPPED_PENDING &&
2919		    test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) &&
2920		    (!dasd_eer_enabled(block->base))) {
2921			cqr->status = DASD_CQR_FAILED;
2922			cqr->intrc = -ENOLINK;
2923			dasd_schedule_block_bh(block);
2924			continue;
2925		}
2926		/* Don't try to start requests if device is stopped */
2927		if (block->base->stopped)
2928			return;
2929
2930		/* just a fail safe check, should not happen */
2931		if (!cqr->startdev)
2932			cqr->startdev = block->base;
2933
2934		/* make sure that the requests we submit find their way back */
2935		cqr->callback = dasd_return_cqr_cb;
2936
2937		dasd_add_request_tail(cqr);
2938	}
2939}
2940
2941/*
2942 * Central dasd_block layer routine. Takes requests from the generic
2943 * block layer request queue, creates ccw requests, enqueues them on
2944 * a dasd_device and processes ccw requests that have been returned.
2945 */
2946static void dasd_block_tasklet(unsigned long data)
2947{
2948	struct dasd_block *block = (struct dasd_block *) data;
2949	struct list_head final_queue;
2950	struct list_head *l, *n;
2951	struct dasd_ccw_req *cqr;
2952	struct dasd_queue *dq;
2953
2954	atomic_set(&block->tasklet_scheduled, 0);
2955	INIT_LIST_HEAD(&final_queue);
2956	spin_lock_irq(&block->queue_lock);
2957	/* Finish off requests on ccw queue */
2958	__dasd_process_block_ccw_queue(block, &final_queue);
2959	spin_unlock_irq(&block->queue_lock);
2960
2961	/* Now call the callback function of requests with final status */
2962	list_for_each_safe(l, n, &final_queue) {
2963		cqr = list_entry(l, struct dasd_ccw_req, blocklist);
2964		dq = cqr->dq;
2965		spin_lock_irq(&dq->lock);
2966		list_del_init(&cqr->blocklist);
2967		__dasd_cleanup_cqr(cqr);
2968		spin_unlock_irq(&dq->lock);
2969	}
2970
2971	spin_lock_irq(&block->queue_lock);
2972	/* Now check if the head of the ccw queue needs to be started. */
2973	__dasd_block_start_head(block);
2974	spin_unlock_irq(&block->queue_lock);
2975
2976	if (waitqueue_active(&shutdown_waitq))
2977		wake_up(&shutdown_waitq);
2978	dasd_put_device(block->base);
2979}
2980
2981static void _dasd_wake_block_flush_cb(struct dasd_ccw_req *cqr, void *data)
2982{
2983	wake_up(&dasd_flush_wq);
2984}
2985
2986/*
2987 * Requeue a request back to the block request queue
2988 * only works for block requests
2989 */
2990static void _dasd_requeue_request(struct dasd_ccw_req *cqr)
2991{
2992	struct request *req;
2993
2994	/*
2995	 * If the request is an ERP request there is nothing to requeue.
2996	 * This will be done with the remaining original request.
2997	 */
2998	if (cqr->refers)
2999		return;
3000	spin_lock_irq(&cqr->dq->lock);
3001	req = (struct request *) cqr->callback_data;
3002	blk_mq_requeue_request(req, true);
3003	spin_unlock_irq(&cqr->dq->lock);
3004
3005	return;
3006}
3007
3008static int _dasd_requests_to_flushqueue(struct dasd_block *block,
3009					struct list_head *flush_queue)
3010{
3011	struct dasd_ccw_req *cqr, *n;
3012	unsigned long flags;
3013	int rc, i;
3014
3015	spin_lock_irqsave(&block->queue_lock, flags);
3016	rc = 0;
3017restart:
3018	list_for_each_entry_safe(cqr, n, &block->ccw_queue, blocklist) {
3019		/* if this request currently owned by a dasd_device cancel it */
3020		if (cqr->status >= DASD_CQR_QUEUED)
3021			rc = dasd_cancel_req(cqr);
3022		if (rc < 0)
3023			break;
3024		/* Rechain request (including erp chain) so it won't be
3025		 * touched by the dasd_block_tasklet anymore.
3026		 * Replace the callback so we notice when the request
3027		 * is returned from the dasd_device layer.
3028		 */
3029		cqr->callback = _dasd_wake_block_flush_cb;
3030		for (i = 0; cqr; cqr = cqr->refers, i++)
3031			list_move_tail(&cqr->blocklist, flush_queue);
3032		if (i > 1)
3033			/* moved more than one request - need to restart */
3034			goto restart;
3035	}
3036	spin_unlock_irqrestore(&block->queue_lock, flags);
3037
3038	return rc;
3039}
3040
3041/*
3042 * Go through all request on the dasd_block request queue, cancel them
3043 * on the respective dasd_device, and return them to the generic
3044 * block layer.
3045 */
3046static int dasd_flush_block_queue(struct dasd_block *block)
3047{
3048	struct dasd_ccw_req *cqr, *n;
3049	struct list_head flush_queue;
3050	unsigned long flags;
3051	int rc;
3052
3053	INIT_LIST_HEAD(&flush_queue);
3054	rc = _dasd_requests_to_flushqueue(block, &flush_queue);
3055
3056	/* Now call the callback function of flushed requests */
3057restart_cb:
3058	list_for_each_entry_safe(cqr, n, &flush_queue, blocklist) {
3059		wait_event(dasd_flush_wq, (cqr->status < DASD_CQR_QUEUED));
3060		/* Process finished ERP request. */
3061		if (cqr->refers) {
3062			spin_lock_bh(&block->queue_lock);
3063			__dasd_process_erp(block->base, cqr);
3064			spin_unlock_bh(&block->queue_lock);
3065			/* restart list_for_xx loop since dasd_process_erp
3066			 * might remove multiple elements */
3067			goto restart_cb;
3068		}
3069		/* call the callback function */
3070		spin_lock_irqsave(&cqr->dq->lock, flags);
3071		cqr->endclk = get_tod_clock();
3072		list_del_init(&cqr->blocklist);
3073		__dasd_cleanup_cqr(cqr);
3074		spin_unlock_irqrestore(&cqr->dq->lock, flags);
3075	}
3076	return rc;
3077}
3078
3079/*
3080 * Schedules a call to dasd_tasklet over the device tasklet.
3081 */
3082void dasd_schedule_block_bh(struct dasd_block *block)
3083{
3084	/* Protect against rescheduling. */
3085	if (atomic_cmpxchg(&block->tasklet_scheduled, 0, 1) != 0)
3086		return;
3087	/* life cycle of block is bound to it's base device */
3088	dasd_get_device(block->base);
3089	tasklet_hi_schedule(&block->tasklet);
3090}
3091EXPORT_SYMBOL(dasd_schedule_block_bh);
3092
3093
3094/*
3095 * SECTION: external block device operations
3096 * (request queue handling, open, release, etc.)
3097 */
3098
3099/*
3100 * Dasd request queue function. Called from ll_rw_blk.c
3101 */
3102static blk_status_t do_dasd_request(struct blk_mq_hw_ctx *hctx,
3103				    const struct blk_mq_queue_data *qd)
3104{
3105	struct dasd_block *block = hctx->queue->queuedata;
3106	struct dasd_queue *dq = hctx->driver_data;
3107	struct request *req = qd->rq;
3108	struct dasd_device *basedev;
3109	struct dasd_ccw_req *cqr;
3110	blk_status_t rc = BLK_STS_OK;
3111
3112	basedev = block->base;
3113	spin_lock_irq(&dq->lock);
3114	if (basedev->state < DASD_STATE_READY ||
3115	    test_bit(DASD_FLAG_OFFLINE, &basedev->flags)) {
3116		DBF_DEV_EVENT(DBF_ERR, basedev,
3117			      "device not ready for request %p", req);
3118		rc = BLK_STS_IOERR;
3119		goto out;
3120	}
3121
3122	/*
3123	 * if device is stopped do not fetch new requests
3124	 * except failfast is active which will let requests fail
3125	 * immediately in __dasd_block_start_head()
3126	 */
3127	if (basedev->stopped && !(basedev->features & DASD_FEATURE_FAILFAST)) {
3128		DBF_DEV_EVENT(DBF_ERR, basedev,
3129			      "device stopped request %p", req);
3130		rc = BLK_STS_RESOURCE;
3131		goto out;
3132	}
3133
3134	if (basedev->features & DASD_FEATURE_READONLY &&
3135	    rq_data_dir(req) == WRITE) {
3136		DBF_DEV_EVENT(DBF_ERR, basedev,
3137			      "Rejecting write request %p", req);
3138		rc = BLK_STS_IOERR;
3139		goto out;
3140	}
3141
3142	if (test_bit(DASD_FLAG_ABORTALL, &basedev->flags) &&
3143	    (basedev->features & DASD_FEATURE_FAILFAST ||
3144	     blk_noretry_request(req))) {
3145		DBF_DEV_EVENT(DBF_ERR, basedev,
3146			      "Rejecting failfast request %p", req);
3147		rc = BLK_STS_IOERR;
3148		goto out;
3149	}
3150
3151	cqr = basedev->discipline->build_cp(basedev, block, req);
3152	if (IS_ERR(cqr)) {
3153		if (PTR_ERR(cqr) == -EBUSY ||
3154		    PTR_ERR(cqr) == -ENOMEM ||
3155		    PTR_ERR(cqr) == -EAGAIN) {
3156			rc = BLK_STS_RESOURCE;
3157			goto out;
3158		}
3159		DBF_DEV_EVENT(DBF_ERR, basedev,
3160			      "CCW creation failed (rc=%ld) on request %p",
3161			      PTR_ERR(cqr), req);
3162		rc = BLK_STS_IOERR;
3163		goto out;
3164	}
3165	/*
3166	 *  Note: callback is set to dasd_return_cqr_cb in
3167	 * __dasd_block_start_head to cover erp requests as well
3168	 */
3169	cqr->callback_data = req;
3170	cqr->status = DASD_CQR_FILLED;
3171	cqr->dq = dq;
3172
3173	blk_mq_start_request(req);
3174	spin_lock(&block->queue_lock);
3175	list_add_tail(&cqr->blocklist, &block->ccw_queue);
3176	INIT_LIST_HEAD(&cqr->devlist);
3177	dasd_profile_start(block, cqr, req);
3178	dasd_schedule_block_bh(block);
3179	spin_unlock(&block->queue_lock);
3180
3181out:
3182	spin_unlock_irq(&dq->lock);
3183	return rc;
3184}
3185
3186/*
3187 * Block timeout callback, called from the block layer
3188 *
3189 * Return values:
3190 * BLK_EH_RESET_TIMER if the request should be left running
3191 * BLK_EH_DONE if the request is handled or terminated
3192 *		      by the driver.
3193 */
3194enum blk_eh_timer_return dasd_times_out(struct request *req, bool reserved)
3195{
3196	struct dasd_block *block = req->q->queuedata;
3197	struct dasd_device *device;
3198	struct dasd_ccw_req *cqr;
3199	unsigned long flags;
3200	int rc = 0;
3201
3202	cqr = blk_mq_rq_to_pdu(req);
3203	if (!cqr)
3204		return BLK_EH_DONE;
3205
3206	spin_lock_irqsave(&cqr->dq->lock, flags);
3207	device = cqr->startdev ? cqr->startdev : block->base;
3208	if (!device->blk_timeout) {
3209		spin_unlock_irqrestore(&cqr->dq->lock, flags);
3210		return BLK_EH_RESET_TIMER;
3211	}
3212	DBF_DEV_EVENT(DBF_WARNING, device,
3213		      " dasd_times_out cqr %p status %x",
3214		      cqr, cqr->status);
3215
3216	spin_lock(&block->queue_lock);
3217	spin_lock(get_ccwdev_lock(device->cdev));
3218	cqr->retries = -1;
3219	cqr->intrc = -ETIMEDOUT;
3220	if (cqr->status >= DASD_CQR_QUEUED) {
3221		rc = __dasd_cancel_req(cqr);
3222	} else if (cqr->status == DASD_CQR_FILLED ||
3223		   cqr->status == DASD_CQR_NEED_ERP) {
3224		cqr->status = DASD_CQR_TERMINATED;
3225	} else if (cqr->status == DASD_CQR_IN_ERP) {
3226		struct dasd_ccw_req *searchcqr, *nextcqr, *tmpcqr;
3227
3228		list_for_each_entry_safe(searchcqr, nextcqr,
3229					 &block->ccw_queue, blocklist) {
3230			tmpcqr = searchcqr;
3231			while (tmpcqr->refers)
3232				tmpcqr = tmpcqr->refers;
3233			if (tmpcqr != cqr)
3234				continue;
3235			/* searchcqr is an ERP request for cqr */
3236			searchcqr->retries = -1;
3237			searchcqr->intrc = -ETIMEDOUT;
3238			if (searchcqr->status >= DASD_CQR_QUEUED) {
3239				rc = __dasd_cancel_req(searchcqr);
3240			} else if ((searchcqr->status == DASD_CQR_FILLED) ||
3241				   (searchcqr->status == DASD_CQR_NEED_ERP)) {
3242				searchcqr->status = DASD_CQR_TERMINATED;
3243				rc = 0;
3244			} else if (searchcqr->status == DASD_CQR_IN_ERP) {
3245				/*
3246				 * Shouldn't happen; most recent ERP
3247				 * request is at the front of queue
3248				 */
3249				continue;
3250			}
3251			break;
3252		}
3253	}
3254	spin_unlock(get_ccwdev_lock(device->cdev));
3255	dasd_schedule_block_bh(block);
3256	spin_unlock(&block->queue_lock);
3257	spin_unlock_irqrestore(&cqr->dq->lock, flags);
3258
3259	return rc ? BLK_EH_RESET_TIMER : BLK_EH_DONE;
3260}
3261
3262static int dasd_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
3263			  unsigned int idx)
3264{
3265	struct dasd_queue *dq = kzalloc(sizeof(*dq), GFP_KERNEL);
3266
3267	if (!dq)
3268		return -ENOMEM;
3269
3270	spin_lock_init(&dq->lock);
3271	hctx->driver_data = dq;
3272
3273	return 0;
3274}
3275
3276static void dasd_exit_hctx(struct blk_mq_hw_ctx *hctx, unsigned int idx)
3277{
3278	kfree(hctx->driver_data);
3279	hctx->driver_data = NULL;
3280}
3281
3282static void dasd_request_done(struct request *req)
3283{
3284	blk_mq_end_request(req, 0);
3285	blk_mq_run_hw_queues(req->q, true);
3286}
3287
3288static struct blk_mq_ops dasd_mq_ops = {
3289	.queue_rq = do_dasd_request,
3290	.complete = dasd_request_done,
3291	.timeout = dasd_times_out,
3292	.init_hctx = dasd_init_hctx,
3293	.exit_hctx = dasd_exit_hctx,
3294};
3295
3296/*
3297 * Allocate and initialize request queue and default I/O scheduler.
3298 */
3299static int dasd_alloc_queue(struct dasd_block *block)
3300{
3301	int rc;
3302
3303	block->tag_set.ops = &dasd_mq_ops;
3304	block->tag_set.cmd_size = sizeof(struct dasd_ccw_req);
3305	block->tag_set.nr_hw_queues = nr_hw_queues;
3306	block->tag_set.queue_depth = queue_depth;
3307	block->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
3308	block->tag_set.numa_node = NUMA_NO_NODE;
3309
3310	rc = blk_mq_alloc_tag_set(&block->tag_set);
3311	if (rc)
3312		return rc;
3313
3314	block->request_queue = blk_mq_init_queue(&block->tag_set);
3315	if (IS_ERR(block->request_queue))
3316		return PTR_ERR(block->request_queue);
3317
3318	block->request_queue->queuedata = block;
3319
3320	return 0;
3321}
3322
3323/*
3324 * Deactivate and free request queue.
3325 */
3326static void dasd_free_queue(struct dasd_block *block)
3327{
3328	if (block->request_queue) {
3329		blk_cleanup_queue(block->request_queue);
3330		blk_mq_free_tag_set(&block->tag_set);
3331		block->request_queue = NULL;
3332	}
3333}
3334
3335static int dasd_open(struct block_device *bdev, fmode_t mode)
3336{
3337	struct dasd_device *base;
3338	int rc;
3339
3340	base = dasd_device_from_gendisk(bdev->bd_disk);
3341	if (!base)
3342		return -ENODEV;
3343
3344	atomic_inc(&base->block->open_count);
3345	if (test_bit(DASD_FLAG_OFFLINE, &base->flags)) {
3346		rc = -ENODEV;
3347		goto unlock;
3348	}
3349
3350	if (!try_module_get(base->discipline->owner)) {
3351		rc = -EINVAL;
3352		goto unlock;
3353	}
3354
3355	if (dasd_probeonly) {
3356		dev_info(&base->cdev->dev,
3357			 "Accessing the DASD failed because it is in "
3358			 "probeonly mode\n");
3359		rc = -EPERM;
3360		goto out;
3361	}
3362
3363	if (base->state <= DASD_STATE_BASIC) {
3364		DBF_DEV_EVENT(DBF_ERR, base, " %s",
3365			      " Cannot open unrecognized device");
3366		rc = -ENODEV;
3367		goto out;
3368	}
3369
3370	if ((mode & FMODE_WRITE) &&
3371	    (test_bit(DASD_FLAG_DEVICE_RO, &base->flags) ||
3372	     (base->features & DASD_FEATURE_READONLY))) {
3373		rc = -EROFS;
3374		goto out;
3375	}
3376
3377	dasd_put_device(base);
3378	return 0;
3379
3380out:
3381	module_put(base->discipline->owner);
3382unlock:
3383	atomic_dec(&base->block->open_count);
3384	dasd_put_device(base);
3385	return rc;
3386}
3387
3388static void dasd_release(struct gendisk *disk, fmode_t mode)
3389{
3390	struct dasd_device *base = dasd_device_from_gendisk(disk);
3391	if (base) {
3392		atomic_dec(&base->block->open_count);
3393		module_put(base->discipline->owner);
3394		dasd_put_device(base);
3395	}
3396}
3397
3398/*
3399 * Return disk geometry.
3400 */
3401static int dasd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
3402{
3403	struct dasd_device *base;
3404
3405	base = dasd_device_from_gendisk(bdev->bd_disk);
3406	if (!base)
3407		return -ENODEV;
3408
3409	if (!base->discipline ||
3410	    !base->discipline->fill_geometry) {
3411		dasd_put_device(base);
3412		return -EINVAL;
3413	}
3414	base->discipline->fill_geometry(base->block, geo);
3415	geo->start = get_start_sect(bdev) >> base->block->s2b_shift;
3416	dasd_put_device(base);
3417	return 0;
3418}
3419
3420const struct block_device_operations
3421dasd_device_operations = {
3422	.owner		= THIS_MODULE,
3423	.open		= dasd_open,
3424	.release	= dasd_release,
3425	.ioctl		= dasd_ioctl,
3426	.compat_ioctl	= dasd_ioctl,
3427	.getgeo		= dasd_getgeo,
3428};
3429
3430/*******************************************************************************
3431 * end of block device operations
3432 */
3433
3434static void
3435dasd_exit(void)
3436{
3437#ifdef CONFIG_PROC_FS
3438	dasd_proc_exit();
3439#endif
3440	dasd_eer_exit();
3441	kmem_cache_destroy(dasd_page_cache);
3442	dasd_page_cache = NULL;
3443	dasd_gendisk_exit();
3444	dasd_devmap_exit();
3445	if (dasd_debug_area != NULL) {
3446		debug_unregister(dasd_debug_area);
3447		dasd_debug_area = NULL;
3448	}
3449	dasd_statistics_removeroot();
3450}
3451
3452/*
3453 * SECTION: common functions for ccw_driver use
3454 */
3455
3456/*
3457 * Is the device read-only?
3458 * Note that this function does not report the setting of the
3459 * readonly device attribute, but how it is configured in z/VM.
3460 */
3461int dasd_device_is_ro(struct dasd_device *device)
3462{
3463	struct ccw_dev_id dev_id;
3464	struct diag210 diag_data;
3465	int rc;
3466
3467	if (!MACHINE_IS_VM)
3468		return 0;
3469	ccw_device_get_id(device->cdev, &dev_id);
3470	memset(&diag_data, 0, sizeof(diag_data));
3471	diag_data.vrdcdvno = dev_id.devno;
3472	diag_data.vrdclen = sizeof(diag_data);
3473	rc = diag210(&diag_data);
3474	if (rc == 0 || rc == 2) {
3475		return diag_data.vrdcvfla & 0x80;
3476	} else {
3477		DBF_EVENT(DBF_WARNING, "diag210 failed for dev=%04x with rc=%d",
3478			  dev_id.devno, rc);
3479		return 0;
3480	}
3481}
3482EXPORT_SYMBOL_GPL(dasd_device_is_ro);
3483
3484static void dasd_generic_auto_online(void *data, async_cookie_t cookie)
3485{
3486	struct ccw_device *cdev = data;
3487	int ret;
3488
3489	ret = ccw_device_set_online(cdev);
3490	if (ret)
3491		pr_warn("%s: Setting the DASD online failed with rc=%d\n",
3492			dev_name(&cdev->dev), ret);
3493}
3494
3495/*
3496 * Initial attempt at a probe function. this can be simplified once
3497 * the other detection code is gone.
3498 */
3499int dasd_generic_probe(struct ccw_device *cdev,
3500		       struct dasd_discipline *discipline)
3501{
3502	int ret;
3503
3504	ret = dasd_add_sysfs_files(cdev);
3505	if (ret) {
3506		DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s",
3507				"dasd_generic_probe: could not add "
3508				"sysfs entries");
3509		return ret;
3510	}
3511	cdev->handler = &dasd_int_handler;
3512
3513	/*
3514	 * Automatically online either all dasd devices (dasd_autodetect)
3515	 * or all devices specified with dasd= parameters during
3516	 * initial probe.
3517	 */
3518	if ((dasd_get_feature(cdev, DASD_FEATURE_INITIAL_ONLINE) > 0 ) ||
3519	    (dasd_autodetect && dasd_busid_known(dev_name(&cdev->dev)) != 0))
3520		async_schedule(dasd_generic_auto_online, cdev);
3521	return 0;
3522}
3523EXPORT_SYMBOL_GPL(dasd_generic_probe);
3524
3525void dasd_generic_free_discipline(struct dasd_device *device)
3526{
3527	/* Forget the discipline information. */
3528	if (device->discipline) {
3529		if (device->discipline->uncheck_device)
3530			device->discipline->uncheck_device(device);
3531		module_put(device->discipline->owner);
3532		device->discipline = NULL;
3533	}
3534	if (device->base_discipline) {
3535		module_put(device->base_discipline->owner);
3536		device->base_discipline = NULL;
3537	}
3538}
3539EXPORT_SYMBOL_GPL(dasd_generic_free_discipline);
3540
3541/*
3542 * This will one day be called from a global not_oper handler.
3543 * It is also used by driver_unregister during module unload.
3544 */
3545void dasd_generic_remove(struct ccw_device *cdev)
3546{
3547	struct dasd_device *device;
3548	struct dasd_block *block;
3549
3550	device = dasd_device_from_cdev(cdev);
3551	if (IS_ERR(device)) {
3552		dasd_remove_sysfs_files(cdev);
3553		return;
3554	}
3555	if (test_and_set_bit(DASD_FLAG_OFFLINE, &device->flags) &&
3556	    !test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) {
3557		/* Already doing offline processing */
3558		dasd_put_device(device);
3559		dasd_remove_sysfs_files(cdev);
3560		return;
3561	}
3562	/*
3563	 * This device is removed unconditionally. Set offline
3564	 * flag to prevent dasd_open from opening it while it is
3565	 * no quite down yet.
3566	 */
3567	dasd_set_target_state(device, DASD_STATE_NEW);
3568	cdev->handler = NULL;
3569	/* dasd_delete_device destroys the device reference. */
3570	block = device->block;
3571	dasd_delete_device(device);
3572	/*
3573	 * life cycle of block is bound to device, so delete it after
3574	 * device was safely removed
3575	 */
3576	if (block)
3577		dasd_free_block(block);
3578
3579	dasd_remove_sysfs_files(cdev);
3580}
3581EXPORT_SYMBOL_GPL(dasd_generic_remove);
3582
3583/*
3584 * Activate a device. This is called from dasd_{eckd,fba}_probe() when either
3585 * the device is detected for the first time and is supposed to be used
3586 * or the user has started activation through sysfs.
3587 */
3588int dasd_generic_set_online(struct ccw_device *cdev,
3589			    struct dasd_discipline *base_discipline)
3590{
3591	struct dasd_discipline *discipline;
3592	struct dasd_device *device;
3593	int rc;
3594
3595	/* first online clears initial online feature flag */
3596	dasd_set_feature(cdev, DASD_FEATURE_INITIAL_ONLINE, 0);
3597	device = dasd_create_device(cdev);
3598	if (IS_ERR(device))
3599		return PTR_ERR(device);
3600
3601	discipline = base_discipline;
3602	if (device->features & DASD_FEATURE_USEDIAG) {
3603	  	if (!dasd_diag_discipline_pointer) {
3604			/* Try to load the required module. */
3605			rc = request_module(DASD_DIAG_MOD);
3606			if (rc) {
3607				pr_warn("%s Setting the DASD online failed "
3608					"because the required module %s "
3609					"could not be loaded (rc=%d)\n",
3610					dev_name(&cdev->dev), DASD_DIAG_MOD,
3611					rc);
3612				dasd_delete_device(device);
3613				return -ENODEV;
3614			}
3615		}
3616		/* Module init could have failed, so check again here after
3617		 * request_module(). */
3618		if (!dasd_diag_discipline_pointer) {
3619			pr_warn("%s Setting the DASD online failed because of missing DIAG discipline\n",
3620				dev_name(&cdev->dev));
3621			dasd_delete_device(device);
3622			return -ENODEV;
3623		}
3624		discipline = dasd_diag_discipline_pointer;
3625	}
3626	if (!try_module_get(base_discipline->owner)) {
3627		dasd_delete_device(device);
3628		return -EINVAL;
3629	}
3630	if (!try_module_get(discipline->owner)) {
3631		module_put(base_discipline->owner);
3632		dasd_delete_device(device);
3633		return -EINVAL;
3634	}
3635	device->base_discipline = base_discipline;
3636	device->discipline = discipline;
3637
3638	/* check_device will allocate block device if necessary */
3639	rc = discipline->check_device(device);
3640	if (rc) {
3641		pr_warn("%s Setting the DASD online with discipline %s failed with rc=%i\n",
3642			dev_name(&cdev->dev), discipline->name, rc);
3643		module_put(discipline->owner);
3644		module_put(base_discipline->owner);
3645		dasd_delete_device(device);
3646		return rc;
3647	}
3648
3649	dasd_set_target_state(device, DASD_STATE_ONLINE);
3650	if (device->state <= DASD_STATE_KNOWN) {
3651		pr_warn("%s Setting the DASD online failed because of a missing discipline\n",
3652			dev_name(&cdev->dev));
3653		rc = -ENODEV;
3654		dasd_set_target_state(device, DASD_STATE_NEW);
3655		if (device->block)
3656			dasd_free_block(device->block);
3657		dasd_delete_device(device);
3658	} else
3659		pr_debug("dasd_generic device %s found\n",
3660				dev_name(&cdev->dev));
3661
3662	wait_event(dasd_init_waitq, _wait_for_device(device));
3663
3664	dasd_put_device(device);
3665	return rc;
3666}
3667EXPORT_SYMBOL_GPL(dasd_generic_set_online);
3668
3669int dasd_generic_set_offline(struct ccw_device *cdev)
3670{
3671	struct dasd_device *device;
3672	struct dasd_block *block;
3673	int max_count, open_count, rc;
3674	unsigned long flags;
3675
3676	rc = 0;
3677	spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
3678	device = dasd_device_from_cdev_locked(cdev);
3679	if (IS_ERR(device)) {
3680		spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
3681		return PTR_ERR(device);
3682	}
3683
3684	/*
3685	 * We must make sure that this device is currently not in use.
3686	 * The open_count is increased for every opener, that includes
3687	 * the blkdev_get in dasd_scan_partitions. We are only interested
3688	 * in the other openers.
3689	 */
3690	if (device->block) {
3691		max_count = device->block->bdev ? 0 : -1;
3692		open_count = atomic_read(&device->block->open_count);
3693		if (open_count > max_count) {
3694			if (open_count > 0)
3695				pr_warn("%s: The DASD cannot be set offline with open count %i\n",
3696					dev_name(&cdev->dev), open_count);
3697			else
3698				pr_warn("%s: The DASD cannot be set offline while it is in use\n",
3699					dev_name(&cdev->dev));
3700			rc = -EBUSY;
3701			goto out_err;
3702		}
3703	}
3704
3705	/*
3706	 * Test if the offline processing is already running and exit if so.
3707	 * If a safe offline is being processed this could only be a normal
3708	 * offline that should be able to overtake the safe offline and
3709	 * cancel any I/O we do not want to wait for any longer
3710	 */
3711	if (test_bit(DASD_FLAG_OFFLINE, &device->flags)) {
3712		if (test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) {
3713			clear_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING,
3714				  &device->flags);
3715		} else {
3716			rc = -EBUSY;
3717			goto out_err;
3718		}
3719	}
3720	set_bit(DASD_FLAG_OFFLINE, &device->flags);
3721
3722	/*
3723	 * if safe_offline is called set safe_offline_running flag and
3724	 * clear safe_offline so that a call to normal offline
3725	 * can overrun safe_offline processing
3726	 */
3727	if (test_and_clear_bit(DASD_FLAG_SAFE_OFFLINE, &device->flags) &&
3728	    !test_and_set_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) {
3729		/* need to unlock here to wait for outstanding I/O */
3730		spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
3731		/*
3732		 * If we want to set the device safe offline all IO operations
3733		 * should be finished before continuing the offline process
3734		 * so sync bdev first and then wait for our queues to become
3735		 * empty
3736		 */
3737		if (device->block) {
3738			rc = fsync_bdev(device->block->bdev);
3739			if (rc != 0)
3740				goto interrupted;
3741		}
3742		dasd_schedule_device_bh(device);
3743		rc = wait_event_interruptible(shutdown_waitq,
3744					      _wait_for_empty_queues(device));
3745		if (rc != 0)
3746			goto interrupted;
3747
3748		/*
3749		 * check if a normal offline process overtook the offline
3750		 * processing in this case simply do nothing beside returning
3751		 * that we got interrupted
3752		 * otherwise mark safe offline as not running any longer and
3753		 * continue with normal offline
3754		 */
3755		spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
3756		if (!test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) {
3757			rc = -ERESTARTSYS;
3758			goto out_err;
3759		}
3760		clear_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags);
3761	}
3762	spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
3763
3764	dasd_set_target_state(device, DASD_STATE_NEW);
3765	/* dasd_delete_device destroys the device reference. */
3766	block = device->block;
3767	dasd_delete_device(device);
3768	/*
3769	 * life cycle of block is bound to device, so delete it after
3770	 * device was safely removed
3771	 */
3772	if (block)
3773		dasd_free_block(block);
3774
3775	return 0;
3776
3777interrupted:
3778	/* interrupted by signal */
3779	spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
3780	clear_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags);
3781	clear_bit(DASD_FLAG_OFFLINE, &device->flags);
3782out_err:
3783	dasd_put_device(device);
3784	spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
3785	return rc;
3786}
3787EXPORT_SYMBOL_GPL(dasd_generic_set_offline);
3788
3789int dasd_generic_last_path_gone(struct dasd_device *device)
3790{
3791	struct dasd_ccw_req *cqr;
3792
3793	dev_warn(&device->cdev->dev, "No operational channel path is left "
3794		 "for the device\n");
3795	DBF_DEV_EVENT(DBF_WARNING, device, "%s", "last path gone");
3796	/* First of all call extended error reporting. */
3797	dasd_eer_write(device, NULL, DASD_EER_NOPATH);
3798
3799	if (device->state < DASD_STATE_BASIC)
3800		return 0;
3801	/* Device is active. We want to keep it. */
3802	list_for_each_entry(cqr, &device->ccw_queue, devlist)
3803		if ((cqr->status == DASD_CQR_IN_IO) ||
3804		    (cqr->status == DASD_CQR_CLEAR_PENDING)) {
3805			cqr->status = DASD_CQR_QUEUED;
3806			cqr->retries++;
3807		}
3808	dasd_device_set_stop_bits(device, DASD_STOPPED_DC_WAIT);
3809	dasd_device_clear_timer(device);
3810	dasd_schedule_device_bh(device);
3811	return 1;
3812}
3813EXPORT_SYMBOL_GPL(dasd_generic_last_path_gone);
3814
3815int dasd_generic_path_operational(struct dasd_device *device)
3816{
3817	dev_info(&device->cdev->dev, "A channel path to the device has become "
3818		 "operational\n");
3819	DBF_DEV_EVENT(DBF_WARNING, device, "%s", "path operational");
3820	dasd_device_remove_stop_bits(device, DASD_STOPPED_DC_WAIT);
3821	if (device->stopped & DASD_UNRESUMED_PM) {
3822		dasd_device_remove_stop_bits(device, DASD_UNRESUMED_PM);
3823		dasd_restore_device(device);
3824		return 1;
3825	}
3826	dasd_schedule_device_bh(device);
3827	if (device->block) {
3828		dasd_schedule_block_bh(device->block);
3829		if (device->block->request_queue)
3830			blk_mq_run_hw_queues(device->block->request_queue,
3831					     true);
3832		}
3833
3834	if (!device->stopped)
3835		wake_up(&generic_waitq);
3836
3837	return 1;
3838}
3839EXPORT_SYMBOL_GPL(dasd_generic_path_operational);
3840
3841int dasd_generic_notify(struct ccw_device *cdev, int event)
3842{
3843	struct dasd_device *device;
3844	int ret;
3845
3846	device = dasd_device_from_cdev_locked(cdev);
3847	if (IS_ERR(device))
3848		return 0;
3849	ret = 0;
3850	switch (event) {
3851	case CIO_GONE:
3852	case CIO_BOXED:
3853	case CIO_NO_PATH:
3854		dasd_path_no_path(device);
3855		ret = dasd_generic_last_path_gone(device);
3856		break;
3857	case CIO_OPER:
3858		ret = 1;
3859		if (dasd_path_get_opm(device))
3860			ret = dasd_generic_path_operational(device);
3861		break;
3862	}
3863	dasd_put_device(device);
3864	return ret;
3865}
3866EXPORT_SYMBOL_GPL(dasd_generic_notify);
3867
3868void dasd_generic_path_event(struct ccw_device *cdev, int *path_event)
3869{
3870	struct dasd_device *device;
3871	int chp, oldopm, hpfpm, ifccpm;
3872
3873	device = dasd_device_from_cdev_locked(cdev);
3874	if (IS_ERR(device))
3875		return;
3876
3877	oldopm = dasd_path_get_opm(device);
3878	for (chp = 0; chp < 8; chp++) {
3879		if (path_event[chp] & PE_PATH_GONE) {
3880			dasd_path_notoper(device, chp);
3881		}
3882		if (path_event[chp] & PE_PATH_AVAILABLE) {
3883			dasd_path_available(device, chp);
3884			dasd_schedule_device_bh(device);
3885		}
3886		if (path_event[chp] & PE_PATHGROUP_ESTABLISHED) {
3887			if (!dasd_path_is_operational(device, chp) &&
3888			    !dasd_path_need_verify(device, chp)) {
3889				/*
3890				 * we can not establish a pathgroup on an
3891				 * unavailable path, so trigger a path
3892				 * verification first
3893				 */
3894			dasd_path_available(device, chp);
3895			dasd_schedule_device_bh(device);
3896			}
3897			DBF_DEV_EVENT(DBF_WARNING, device, "%s",
3898				      "Pathgroup re-established\n");
3899			if (device->discipline->kick_validate)
3900				device->discipline->kick_validate(device);
3901		}
3902	}
3903	hpfpm = dasd_path_get_hpfpm(device);
3904	ifccpm = dasd_path_get_ifccpm(device);
3905	if (!dasd_path_get_opm(device) && hpfpm) {
3906		/*
3907		 * device has no operational paths but at least one path is
3908		 * disabled due to HPF errors
3909		 * disable HPF at all and use the path(s) again
3910		 */
3911		if (device->discipline->disable_hpf)
3912			device->discipline->disable_hpf(device);
3913		dasd_device_set_stop_bits(device, DASD_STOPPED_NOT_ACC);
3914		dasd_path_set_tbvpm(device, hpfpm);
3915		dasd_schedule_device_bh(device);
3916		dasd_schedule_requeue(device);
3917	} else if (!dasd_path_get_opm(device) && ifccpm) {
3918		/*
3919		 * device has no operational paths but at least one path is
3920		 * disabled due to IFCC errors
3921		 * trigger path verification on paths with IFCC errors
3922		 */
3923		dasd_path_set_tbvpm(device, ifccpm);
3924		dasd_schedule_device_bh(device);
3925	}
3926	if (oldopm && !dasd_path_get_opm(device) && !hpfpm && !ifccpm) {
3927		dev_warn(&device->cdev->dev,
3928			 "No verified channel paths remain for the device\n");
3929		DBF_DEV_EVENT(DBF_WARNING, device,
3930			      "%s", "last verified path gone");
3931		dasd_eer_write(device, NULL, DASD_EER_NOPATH);
3932		dasd_device_set_stop_bits(device,
3933					  DASD_STOPPED_DC_WAIT);
3934	}
3935	dasd_put_device(device);
3936}
3937EXPORT_SYMBOL_GPL(dasd_generic_path_event);
3938
3939int dasd_generic_verify_path(struct dasd_device *device, __u8 lpm)
3940{
3941	if (!dasd_path_get_opm(device) && lpm) {
3942		dasd_path_set_opm(device, lpm);
3943		dasd_generic_path_operational(device);
3944	} else
3945		dasd_path_add_opm(device, lpm);
3946	return 0;
3947}
3948EXPORT_SYMBOL_GPL(dasd_generic_verify_path);
3949
3950void dasd_generic_space_exhaust(struct dasd_device *device,
3951				struct dasd_ccw_req *cqr)
3952{
3953	dasd_eer_write(device, NULL, DASD_EER_NOSPC);
3954
3955	if (device->state < DASD_STATE_BASIC)
3956		return;
3957
3958	if (cqr->status == DASD_CQR_IN_IO ||
3959	    cqr->status == DASD_CQR_CLEAR_PENDING) {
3960		cqr->status = DASD_CQR_QUEUED;
3961		cqr->retries++;
3962	}
3963	dasd_device_set_stop_bits(device, DASD_STOPPED_NOSPC);
3964	dasd_device_clear_timer(device);
3965	dasd_schedule_device_bh(device);
3966}
3967EXPORT_SYMBOL_GPL(dasd_generic_space_exhaust);
3968
3969void dasd_generic_space_avail(struct dasd_device *device)
3970{
3971	dev_info(&device->cdev->dev, "Extent pool space is available\n");
3972	DBF_DEV_EVENT(DBF_WARNING, device, "%s", "space available");
3973
3974	dasd_device_remove_stop_bits(device, DASD_STOPPED_NOSPC);
3975	dasd_schedule_device_bh(device);
3976
3977	if (device->block) {
3978		dasd_schedule_block_bh(device->block);
3979		if (device->block->request_queue)
3980			blk_mq_run_hw_queues(device->block->request_queue, true);
3981	}
3982	if (!device->stopped)
3983		wake_up(&generic_waitq);
3984}
3985EXPORT_SYMBOL_GPL(dasd_generic_space_avail);
3986
3987/*
3988 * clear active requests and requeue them to block layer if possible
3989 */
3990static int dasd_generic_requeue_all_requests(struct dasd_device *device)
3991{
3992	struct dasd_block *block = device->block;
3993	struct list_head requeue_queue;
3994	struct dasd_ccw_req *cqr, *n;
3995	int rc;
3996
3997	if (!block)
3998		return 0;
3999
4000	INIT_LIST_HEAD(&requeue_queue);
4001	rc = _dasd_requests_to_flushqueue(block, &requeue_queue);
4002
4003	/* Now call the callback function of flushed requests */
4004restart_cb:
4005	list_for_each_entry_safe(cqr, n, &requeue_queue, blocklist) {
4006		wait_event(dasd_flush_wq, (cqr->status < DASD_CQR_QUEUED));
4007		/* Process finished ERP request. */
4008		if (cqr->refers) {
4009			spin_lock_bh(&block->queue_lock);
4010			__dasd_process_erp(block->base, cqr);
4011			spin_unlock_bh(&block->queue_lock);
4012			/* restart list_for_xx loop since dasd_process_erp
4013			 * might remove multiple elements
4014			 */
4015			goto restart_cb;
4016		}
4017		_dasd_requeue_request(cqr);
4018		list_del_init(&cqr->blocklist);
4019		cqr->block->base->discipline->free_cp(
4020			cqr, (struct request *) cqr->callback_data);
4021	}
4022	dasd_schedule_device_bh(device);
4023	return rc;
4024}
4025
4026static void do_requeue_requests(struct work_struct *work)
4027{
4028	struct dasd_device *device = container_of(work, struct dasd_device,
4029						  requeue_requests);
4030	dasd_generic_requeue_all_requests(device);
4031	dasd_device_remove_stop_bits(device, DASD_STOPPED_NOT_ACC);
4032	if (device->block)
4033		dasd_schedule_block_bh(device->block);
4034	dasd_put_device(device);
4035}
4036
4037void dasd_schedule_requeue(struct dasd_device *device)
4038{
4039	dasd_get_device(device);
4040	/* queue call to dasd_reload_device to the kernel event daemon. */
4041	if (!schedule_work(&device->requeue_requests))
4042		dasd_put_device(device);
4043}
4044EXPORT_SYMBOL(dasd_schedule_requeue);
4045
4046int dasd_generic_pm_freeze(struct ccw_device *cdev)
4047{
4048	struct dasd_device *device = dasd_device_from_cdev(cdev);
4049
4050	if (IS_ERR(device))
4051		return PTR_ERR(device);
4052
4053	/* mark device as suspended */
4054	set_bit(DASD_FLAG_SUSPENDED, &device->flags);
4055
4056	if (device->discipline->freeze)
4057		device->discipline->freeze(device);
4058
4059	/* disallow new I/O  */
4060	dasd_device_set_stop_bits(device, DASD_STOPPED_PM);
4061
4062	return dasd_generic_requeue_all_requests(device);
4063}
4064EXPORT_SYMBOL_GPL(dasd_generic_pm_freeze);
4065
4066int dasd_generic_restore_device(struct ccw_device *cdev)
4067{
4068	struct dasd_device *device = dasd_device_from_cdev(cdev);
4069	int rc = 0;
4070
4071	if (IS_ERR(device))
4072		return PTR_ERR(device);
4073
4074	/* allow new IO again */
4075	dasd_device_remove_stop_bits(device,
4076				     (DASD_STOPPED_PM | DASD_UNRESUMED_PM));
4077
4078	dasd_schedule_device_bh(device);
4079
4080	/*
4081	 * call discipline restore function
4082	 * if device is stopped do nothing e.g. for disconnected devices
4083	 */
4084	if (device->discipline->restore && !(device->stopped))
4085		rc = device->discipline->restore(device);
4086	if (rc || device->stopped)
4087		/*
4088		 * if the resume failed for the DASD we put it in
4089		 * an UNRESUMED stop state
4090		 */
4091		device->stopped |= DASD_UNRESUMED_PM;
4092
4093	if (device->block) {
4094		dasd_schedule_block_bh(device->block);
4095		if (device->block->request_queue)
4096			blk_mq_run_hw_queues(device->block->request_queue,
4097					     true);
4098	}
4099
4100	clear_bit(DASD_FLAG_SUSPENDED, &device->flags);
4101	dasd_put_device(device);
4102	return 0;
4103}
4104EXPORT_SYMBOL_GPL(dasd_generic_restore_device);
4105
4106static struct dasd_ccw_req *dasd_generic_build_rdc(struct dasd_device *device,
4107						   int rdc_buffer_size,
4108						   int magic)
4109{
4110	struct dasd_ccw_req *cqr;
4111	struct ccw1 *ccw;
4112
4113	cqr = dasd_smalloc_request(magic, 1 /* RDC */, rdc_buffer_size, device,
4114				   NULL);
4115
4116	if (IS_ERR(cqr)) {
4117		/* internal error 13 - Allocating the RDC request failed*/
4118		dev_err(&device->cdev->dev,
4119			 "An error occurred in the DASD device driver, "
4120			 "reason=%s\n", "13");
4121		return cqr;
4122	}
4123
4124	ccw = cqr->cpaddr;
4125	ccw->cmd_code = CCW_CMD_RDC;
4126	ccw->cda = (__u32)(addr_t) cqr->data;
4127	ccw->flags = 0;
4128	ccw->count = rdc_buffer_size;
4129	cqr->startdev = device;
4130	cqr->memdev = device;
4131	cqr->expires = 10*HZ;
4132	cqr->retries = 256;
4133	cqr->buildclk = get_tod_clock();
4134	cqr->status = DASD_CQR_FILLED;
4135	return cqr;
4136}
4137
4138
4139int dasd_generic_read_dev_chars(struct dasd_device *device, int magic,
4140				void *rdc_buffer, int rdc_buffer_size)
4141{
4142	int ret;
4143	struct dasd_ccw_req *cqr;
4144
4145	cqr = dasd_generic_build_rdc(device, rdc_buffer_size, magic);
4146	if (IS_ERR(cqr))
4147		return PTR_ERR(cqr);
4148
4149	ret = dasd_sleep_on(cqr);
4150	if (ret == 0)
4151		memcpy(rdc_buffer, cqr->data, rdc_buffer_size);
4152	dasd_sfree_request(cqr, cqr->memdev);
4153	return ret;
4154}
4155EXPORT_SYMBOL_GPL(dasd_generic_read_dev_chars);
4156
4157/*
4158 *   In command mode and transport mode we need to look for sense
4159 *   data in different places. The sense data itself is allways
4160 *   an array of 32 bytes, so we can unify the sense data access
4161 *   for both modes.
4162 */
4163char *dasd_get_sense(struct irb *irb)
4164{
4165	struct tsb *tsb = NULL;
4166	char *sense = NULL;
4167
4168	if (scsw_is_tm(&irb->scsw) && (irb->scsw.tm.fcxs == 0x01)) {
4169		if (irb->scsw.tm.tcw)
4170			tsb = tcw_get_tsb((struct tcw *)(unsigned long)
4171					  irb->scsw.tm.tcw);
4172		if (tsb && tsb->length == 64 && tsb->flags)
4173			switch (tsb->flags & 0x07) {
4174			case 1:	/* tsa_iostat */
4175				sense = tsb->tsa.iostat.sense;
4176				break;
4177			case 2: /* tsa_ddpc */
4178				sense = tsb->tsa.ddpc.sense;
4179				break;
4180			default:
4181				/* currently we don't use interrogate data */
4182				break;
4183			}
4184	} else if (irb->esw.esw0.erw.cons) {
4185		sense = irb->ecw;
4186	}
4187	return sense;
4188}
4189EXPORT_SYMBOL_GPL(dasd_get_sense);
4190
4191void dasd_generic_shutdown(struct ccw_device *cdev)
4192{
4193	struct dasd_device *device;
4194
4195	device = dasd_device_from_cdev(cdev);
4196	if (IS_ERR(device))
4197		return;
4198
4199	if (device->block)
4200		dasd_schedule_block_bh(device->block);
4201
4202	dasd_schedule_device_bh(device);
4203
4204	wait_event(shutdown_waitq, _wait_for_empty_queues(device));
4205}
4206EXPORT_SYMBOL_GPL(dasd_generic_shutdown);
4207
4208static int __init dasd_init(void)
4209{
4210	int rc;
4211
4212	init_waitqueue_head(&dasd_init_waitq);
4213	init_waitqueue_head(&dasd_flush_wq);
4214	init_waitqueue_head(&generic_waitq);
4215	init_waitqueue_head(&shutdown_waitq);
4216
4217	/* register 'common' DASD debug area, used for all DBF_XXX calls */
4218	dasd_debug_area = debug_register("dasd", 1, 1, 8 * sizeof(long));
4219	if (dasd_debug_area == NULL) {
4220		rc = -ENOMEM;
4221		goto failed;
4222	}
4223	debug_register_view(dasd_debug_area, &debug_sprintf_view);
4224	debug_set_level(dasd_debug_area, DBF_WARNING);
4225
4226	DBF_EVENT(DBF_EMERG, "%s", "debug area created");
4227
4228	dasd_diag_discipline_pointer = NULL;
4229
4230	dasd_statistics_createroot();
4231
4232	rc = dasd_devmap_init();
4233	if (rc)
4234		goto failed;
4235	rc = dasd_gendisk_init();
4236	if (rc)
4237		goto failed;
4238	rc = dasd_parse();
4239	if (rc)
4240		goto failed;
4241	rc = dasd_eer_init();
4242	if (rc)
4243		goto failed;
4244#ifdef CONFIG_PROC_FS
4245	rc = dasd_proc_init();
4246	if (rc)
4247		goto failed;
4248#endif
4249
4250	return 0;
4251failed:
4252	pr_info("The DASD device driver could not be initialized\n");
4253	dasd_exit();
4254	return rc;
4255}
4256
4257module_init(dasd_init);
4258module_exit(dasd_exit);
4259