cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

dasd.c (113122B)


      1// SPDX-License-Identifier: GPL-2.0
      2/*
      3 * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
      4 *		    Horst Hummel <Horst.Hummel@de.ibm.com>
      5 *		    Carsten Otte <Cotte@de.ibm.com>
      6 *		    Martin Schwidefsky <schwidefsky@de.ibm.com>
      7 * Bugreports.to..: <Linux390@de.ibm.com>
      8 * Copyright IBM Corp. 1999, 2009
      9 */
     10
     11#define KMSG_COMPONENT "dasd"
     12#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
     13
     14#include <linux/kmod.h>
     15#include <linux/init.h>
     16#include <linux/interrupt.h>
     17#include <linux/ctype.h>
     18#include <linux/major.h>
     19#include <linux/slab.h>
     20#include <linux/hdreg.h>
     21#include <linux/async.h>
     22#include <linux/mutex.h>
     23#include <linux/debugfs.h>
     24#include <linux/seq_file.h>
     25#include <linux/vmalloc.h>
     26
     27#include <asm/ccwdev.h>
     28#include <asm/ebcdic.h>
     29#include <asm/idals.h>
     30#include <asm/itcw.h>
     31#include <asm/diag.h>
     32
     33/* This is ugly... */
     34#define PRINTK_HEADER "dasd:"
     35
     36#include "dasd_int.h"
     37/*
     38 * SECTION: Constant definitions to be used within this file
     39 */
     40#define DASD_CHANQ_MAX_SIZE 4
     41
     42#define DASD_DIAG_MOD		"dasd_diag_mod"
     43
     44static unsigned int queue_depth = 32;
     45static unsigned int nr_hw_queues = 4;
     46
     47module_param(queue_depth, uint, 0444);
     48MODULE_PARM_DESC(queue_depth, "Default queue depth for new DASD devices");
     49
     50module_param(nr_hw_queues, uint, 0444);
     51MODULE_PARM_DESC(nr_hw_queues, "Default number of hardware queues for new DASD devices");
     52
     53/*
     54 * SECTION: exported variables of dasd.c
     55 */
     56debug_info_t *dasd_debug_area;
     57EXPORT_SYMBOL(dasd_debug_area);
     58static struct dentry *dasd_debugfs_root_entry;
     59struct dasd_discipline *dasd_diag_discipline_pointer;
     60EXPORT_SYMBOL(dasd_diag_discipline_pointer);
     61void dasd_int_handler(struct ccw_device *, unsigned long, struct irb *);
     62
     63MODULE_AUTHOR("Holger Smolinski <Holger.Smolinski@de.ibm.com>");
     64MODULE_DESCRIPTION("Linux on S/390 DASD device driver,"
     65		   " Copyright IBM Corp. 2000");
     66MODULE_LICENSE("GPL");
     67
     68/*
     69 * SECTION: prototypes for static functions of dasd.c
     70 */
     71static int  dasd_alloc_queue(struct dasd_block *);
     72static void dasd_free_queue(struct dasd_block *);
     73static int dasd_flush_block_queue(struct dasd_block *);
     74static void dasd_device_tasklet(unsigned long);
     75static void dasd_block_tasklet(unsigned long);
     76static void do_kick_device(struct work_struct *);
     77static void do_reload_device(struct work_struct *);
     78static void do_requeue_requests(struct work_struct *);
     79static void dasd_return_cqr_cb(struct dasd_ccw_req *, void *);
     80static void dasd_device_timeout(struct timer_list *);
     81static void dasd_block_timeout(struct timer_list *);
     82static void __dasd_process_erp(struct dasd_device *, struct dasd_ccw_req *);
     83static void dasd_profile_init(struct dasd_profile *, struct dentry *);
     84static void dasd_profile_exit(struct dasd_profile *);
     85static void dasd_hosts_init(struct dentry *, struct dasd_device *);
     86static void dasd_hosts_exit(struct dasd_device *);
     87
     88/*
     89 * SECTION: Operations on the device structure.
     90 */
     91static wait_queue_head_t dasd_init_waitq;
     92static wait_queue_head_t dasd_flush_wq;
     93static wait_queue_head_t generic_waitq;
     94static wait_queue_head_t shutdown_waitq;
     95
     96/*
     97 * Allocate memory for a new device structure.
     98 */
     99struct dasd_device *dasd_alloc_device(void)
    100{
    101	struct dasd_device *device;
    102
    103	device = kzalloc(sizeof(struct dasd_device), GFP_ATOMIC);
    104	if (!device)
    105		return ERR_PTR(-ENOMEM);
    106
    107	/* Get two pages for normal block device operations. */
    108	device->ccw_mem = (void *) __get_free_pages(GFP_ATOMIC | GFP_DMA, 1);
    109	if (!device->ccw_mem) {
    110		kfree(device);
    111		return ERR_PTR(-ENOMEM);
    112	}
    113	/* Get one page for error recovery. */
    114	device->erp_mem = (void *) get_zeroed_page(GFP_ATOMIC | GFP_DMA);
    115	if (!device->erp_mem) {
    116		free_pages((unsigned long) device->ccw_mem, 1);
    117		kfree(device);
    118		return ERR_PTR(-ENOMEM);
    119	}
    120	/* Get two pages for ese format. */
    121	device->ese_mem = (void *)__get_free_pages(GFP_ATOMIC | GFP_DMA, 1);
    122	if (!device->ese_mem) {
    123		free_page((unsigned long) device->erp_mem);
    124		free_pages((unsigned long) device->ccw_mem, 1);
    125		kfree(device);
    126		return ERR_PTR(-ENOMEM);
    127	}
    128
    129	dasd_init_chunklist(&device->ccw_chunks, device->ccw_mem, PAGE_SIZE*2);
    130	dasd_init_chunklist(&device->erp_chunks, device->erp_mem, PAGE_SIZE);
    131	dasd_init_chunklist(&device->ese_chunks, device->ese_mem, PAGE_SIZE * 2);
    132	spin_lock_init(&device->mem_lock);
    133	atomic_set(&device->tasklet_scheduled, 0);
    134	tasklet_init(&device->tasklet, dasd_device_tasklet,
    135		     (unsigned long) device);
    136	INIT_LIST_HEAD(&device->ccw_queue);
    137	timer_setup(&device->timer, dasd_device_timeout, 0);
    138	INIT_WORK(&device->kick_work, do_kick_device);
    139	INIT_WORK(&device->reload_device, do_reload_device);
    140	INIT_WORK(&device->requeue_requests, do_requeue_requests);
    141	device->state = DASD_STATE_NEW;
    142	device->target = DASD_STATE_NEW;
    143	mutex_init(&device->state_mutex);
    144	spin_lock_init(&device->profile.lock);
    145	return device;
    146}
    147
    148/*
    149 * Free memory of a device structure.
    150 */
    151void dasd_free_device(struct dasd_device *device)
    152{
    153	kfree(device->private);
    154	free_pages((unsigned long) device->ese_mem, 1);
    155	free_page((unsigned long) device->erp_mem);
    156	free_pages((unsigned long) device->ccw_mem, 1);
    157	kfree(device);
    158}
    159
    160/*
    161 * Allocate memory for a new device structure.
    162 */
    163struct dasd_block *dasd_alloc_block(void)
    164{
    165	struct dasd_block *block;
    166
    167	block = kzalloc(sizeof(*block), GFP_ATOMIC);
    168	if (!block)
    169		return ERR_PTR(-ENOMEM);
    170	/* open_count = 0 means device online but not in use */
    171	atomic_set(&block->open_count, -1);
    172
    173	atomic_set(&block->tasklet_scheduled, 0);
    174	tasklet_init(&block->tasklet, dasd_block_tasklet,
    175		     (unsigned long) block);
    176	INIT_LIST_HEAD(&block->ccw_queue);
    177	spin_lock_init(&block->queue_lock);
    178	INIT_LIST_HEAD(&block->format_list);
    179	spin_lock_init(&block->format_lock);
    180	timer_setup(&block->timer, dasd_block_timeout, 0);
    181	spin_lock_init(&block->profile.lock);
    182
    183	return block;
    184}
    185EXPORT_SYMBOL_GPL(dasd_alloc_block);
    186
    187/*
    188 * Free memory of a device structure.
    189 */
    190void dasd_free_block(struct dasd_block *block)
    191{
    192	kfree(block);
    193}
    194EXPORT_SYMBOL_GPL(dasd_free_block);
    195
    196/*
    197 * Make a new device known to the system.
    198 */
    199static int dasd_state_new_to_known(struct dasd_device *device)
    200{
    201	int rc;
    202
    203	/*
    204	 * As long as the device is not in state DASD_STATE_NEW we want to
    205	 * keep the reference count > 0.
    206	 */
    207	dasd_get_device(device);
    208
    209	if (device->block) {
    210		rc = dasd_alloc_queue(device->block);
    211		if (rc) {
    212			dasd_put_device(device);
    213			return rc;
    214		}
    215	}
    216	device->state = DASD_STATE_KNOWN;
    217	return 0;
    218}
    219
    220/*
    221 * Let the system forget about a device.
    222 */
    223static int dasd_state_known_to_new(struct dasd_device *device)
    224{
    225	/* Disable extended error reporting for this device. */
    226	dasd_eer_disable(device);
    227	device->state = DASD_STATE_NEW;
    228
    229	if (device->block)
    230		dasd_free_queue(device->block);
    231
    232	/* Give up reference we took in dasd_state_new_to_known. */
    233	dasd_put_device(device);
    234	return 0;
    235}
    236
    237static struct dentry *dasd_debugfs_setup(const char *name,
    238					 struct dentry *base_dentry)
    239{
    240	struct dentry *pde;
    241
    242	if (!base_dentry)
    243		return NULL;
    244	pde = debugfs_create_dir(name, base_dentry);
    245	if (!pde || IS_ERR(pde))
    246		return NULL;
    247	return pde;
    248}
    249
    250/*
    251 * Request the irq line for the device.
    252 */
    253static int dasd_state_known_to_basic(struct dasd_device *device)
    254{
    255	struct dasd_block *block = device->block;
    256	int rc = 0;
    257
    258	/* Allocate and register gendisk structure. */
    259	if (block) {
    260		rc = dasd_gendisk_alloc(block);
    261		if (rc)
    262			return rc;
    263		block->debugfs_dentry =
    264			dasd_debugfs_setup(block->gdp->disk_name,
    265					   dasd_debugfs_root_entry);
    266		dasd_profile_init(&block->profile, block->debugfs_dentry);
    267		if (dasd_global_profile_level == DASD_PROFILE_ON)
    268			dasd_profile_on(&device->block->profile);
    269	}
    270	device->debugfs_dentry =
    271		dasd_debugfs_setup(dev_name(&device->cdev->dev),
    272				   dasd_debugfs_root_entry);
    273	dasd_profile_init(&device->profile, device->debugfs_dentry);
    274	dasd_hosts_init(device->debugfs_dentry, device);
    275
    276	/* register 'device' debug area, used for all DBF_DEV_XXX calls */
    277	device->debug_area = debug_register(dev_name(&device->cdev->dev), 4, 1,
    278					    8 * sizeof(long));
    279	debug_register_view(device->debug_area, &debug_sprintf_view);
    280	debug_set_level(device->debug_area, DBF_WARNING);
    281	DBF_DEV_EVENT(DBF_EMERG, device, "%s", "debug area created");
    282
    283	device->state = DASD_STATE_BASIC;
    284
    285	return rc;
    286}
    287
    288/*
    289 * Release the irq line for the device. Terminate any running i/o.
    290 */
    291static int dasd_state_basic_to_known(struct dasd_device *device)
    292{
    293	int rc;
    294
    295	if (device->discipline->basic_to_known) {
    296		rc = device->discipline->basic_to_known(device);
    297		if (rc)
    298			return rc;
    299	}
    300
    301	if (device->block) {
    302		dasd_profile_exit(&device->block->profile);
    303		debugfs_remove(device->block->debugfs_dentry);
    304		dasd_gendisk_free(device->block);
    305		dasd_block_clear_timer(device->block);
    306	}
    307	rc = dasd_flush_device_queue(device);
    308	if (rc)
    309		return rc;
    310	dasd_device_clear_timer(device);
    311	dasd_profile_exit(&device->profile);
    312	dasd_hosts_exit(device);
    313	debugfs_remove(device->debugfs_dentry);
    314	DBF_DEV_EVENT(DBF_EMERG, device, "%p debug area deleted", device);
    315	if (device->debug_area != NULL) {
    316		debug_unregister(device->debug_area);
    317		device->debug_area = NULL;
    318	}
    319	device->state = DASD_STATE_KNOWN;
    320	return 0;
    321}
    322
    323/*
    324 * Do the initial analysis. The do_analysis function may return
    325 * -EAGAIN in which case the device keeps the state DASD_STATE_BASIC
    326 * until the discipline decides to continue the startup sequence
    327 * by calling the function dasd_change_state. The eckd disciplines
    328 * uses this to start a ccw that detects the format. The completion
    329 * interrupt for this detection ccw uses the kernel event daemon to
    330 * trigger the call to dasd_change_state. All this is done in the
    331 * discipline code, see dasd_eckd.c.
    332 * After the analysis ccw is done (do_analysis returned 0) the block
    333 * device is setup.
    334 * In case the analysis returns an error, the device setup is stopped
    335 * (a fake disk was already added to allow formatting).
    336 */
    337static int dasd_state_basic_to_ready(struct dasd_device *device)
    338{
    339	int rc;
    340	struct dasd_block *block;
    341	struct gendisk *disk;
    342
    343	rc = 0;
    344	block = device->block;
    345	/* make disk known with correct capacity */
    346	if (block) {
    347		if (block->base->discipline->do_analysis != NULL)
    348			rc = block->base->discipline->do_analysis(block);
    349		if (rc) {
    350			if (rc != -EAGAIN) {
    351				device->state = DASD_STATE_UNFMT;
    352				disk = device->block->gdp;
    353				kobject_uevent(&disk_to_dev(disk)->kobj,
    354					       KOBJ_CHANGE);
    355				goto out;
    356			}
    357			return rc;
    358		}
    359		if (device->discipline->setup_blk_queue)
    360			device->discipline->setup_blk_queue(block);
    361		set_capacity(block->gdp,
    362			     block->blocks << block->s2b_shift);
    363		device->state = DASD_STATE_READY;
    364		rc = dasd_scan_partitions(block);
    365		if (rc) {
    366			device->state = DASD_STATE_BASIC;
    367			return rc;
    368		}
    369	} else {
    370		device->state = DASD_STATE_READY;
    371	}
    372out:
    373	if (device->discipline->basic_to_ready)
    374		rc = device->discipline->basic_to_ready(device);
    375	return rc;
    376}
    377
    378static inline
    379int _wait_for_empty_queues(struct dasd_device *device)
    380{
    381	if (device->block)
    382		return list_empty(&device->ccw_queue) &&
    383			list_empty(&device->block->ccw_queue);
    384	else
    385		return list_empty(&device->ccw_queue);
    386}
    387
    388/*
    389 * Remove device from block device layer. Destroy dirty buffers.
    390 * Forget format information. Check if the target level is basic
    391 * and if it is create fake disk for formatting.
    392 */
    393static int dasd_state_ready_to_basic(struct dasd_device *device)
    394{
    395	int rc;
    396
    397	device->state = DASD_STATE_BASIC;
    398	if (device->block) {
    399		struct dasd_block *block = device->block;
    400		rc = dasd_flush_block_queue(block);
    401		if (rc) {
    402			device->state = DASD_STATE_READY;
    403			return rc;
    404		}
    405		dasd_destroy_partitions(block);
    406		block->blocks = 0;
    407		block->bp_block = 0;
    408		block->s2b_shift = 0;
    409	}
    410	return 0;
    411}
    412
    413/*
    414 * Back to basic.
    415 */
    416static int dasd_state_unfmt_to_basic(struct dasd_device *device)
    417{
    418	device->state = DASD_STATE_BASIC;
    419	return 0;
    420}
    421
    422/*
    423 * Make the device online and schedule the bottom half to start
    424 * the requeueing of requests from the linux request queue to the
    425 * ccw queue.
    426 */
    427static int
    428dasd_state_ready_to_online(struct dasd_device * device)
    429{
    430	device->state = DASD_STATE_ONLINE;
    431	if (device->block) {
    432		dasd_schedule_block_bh(device->block);
    433		if ((device->features & DASD_FEATURE_USERAW)) {
    434			kobject_uevent(&disk_to_dev(device->block->gdp)->kobj,
    435					KOBJ_CHANGE);
    436			return 0;
    437		}
    438		disk_uevent(device->block->bdev->bd_disk, KOBJ_CHANGE);
    439	}
    440	return 0;
    441}
    442
    443/*
    444 * Stop the requeueing of requests again.
    445 */
    446static int dasd_state_online_to_ready(struct dasd_device *device)
    447{
    448	int rc;
    449
    450	if (device->discipline->online_to_ready) {
    451		rc = device->discipline->online_to_ready(device);
    452		if (rc)
    453			return rc;
    454	}
    455
    456	device->state = DASD_STATE_READY;
    457	if (device->block && !(device->features & DASD_FEATURE_USERAW))
    458		disk_uevent(device->block->bdev->bd_disk, KOBJ_CHANGE);
    459	return 0;
    460}
    461
    462/*
    463 * Device startup state changes.
    464 */
    465static int dasd_increase_state(struct dasd_device *device)
    466{
    467	int rc;
    468
    469	rc = 0;
    470	if (device->state == DASD_STATE_NEW &&
    471	    device->target >= DASD_STATE_KNOWN)
    472		rc = dasd_state_new_to_known(device);
    473
    474	if (!rc &&
    475	    device->state == DASD_STATE_KNOWN &&
    476	    device->target >= DASD_STATE_BASIC)
    477		rc = dasd_state_known_to_basic(device);
    478
    479	if (!rc &&
    480	    device->state == DASD_STATE_BASIC &&
    481	    device->target >= DASD_STATE_READY)
    482		rc = dasd_state_basic_to_ready(device);
    483
    484	if (!rc &&
    485	    device->state == DASD_STATE_UNFMT &&
    486	    device->target > DASD_STATE_UNFMT)
    487		rc = -EPERM;
    488
    489	if (!rc &&
    490	    device->state == DASD_STATE_READY &&
    491	    device->target >= DASD_STATE_ONLINE)
    492		rc = dasd_state_ready_to_online(device);
    493
    494	return rc;
    495}
    496
    497/*
    498 * Device shutdown state changes.
    499 */
    500static int dasd_decrease_state(struct dasd_device *device)
    501{
    502	int rc;
    503
    504	rc = 0;
    505	if (device->state == DASD_STATE_ONLINE &&
    506	    device->target <= DASD_STATE_READY)
    507		rc = dasd_state_online_to_ready(device);
    508
    509	if (!rc &&
    510	    device->state == DASD_STATE_READY &&
    511	    device->target <= DASD_STATE_BASIC)
    512		rc = dasd_state_ready_to_basic(device);
    513
    514	if (!rc &&
    515	    device->state == DASD_STATE_UNFMT &&
    516	    device->target <= DASD_STATE_BASIC)
    517		rc = dasd_state_unfmt_to_basic(device);
    518
    519	if (!rc &&
    520	    device->state == DASD_STATE_BASIC &&
    521	    device->target <= DASD_STATE_KNOWN)
    522		rc = dasd_state_basic_to_known(device);
    523
    524	if (!rc &&
    525	    device->state == DASD_STATE_KNOWN &&
    526	    device->target <= DASD_STATE_NEW)
    527		rc = dasd_state_known_to_new(device);
    528
    529	return rc;
    530}
    531
    532/*
    533 * This is the main startup/shutdown routine.
    534 */
    535static void dasd_change_state(struct dasd_device *device)
    536{
    537	int rc;
    538
    539	if (device->state == device->target)
    540		/* Already where we want to go today... */
    541		return;
    542	if (device->state < device->target)
    543		rc = dasd_increase_state(device);
    544	else
    545		rc = dasd_decrease_state(device);
    546	if (rc == -EAGAIN)
    547		return;
    548	if (rc)
    549		device->target = device->state;
    550
    551	/* let user-space know that the device status changed */
    552	kobject_uevent(&device->cdev->dev.kobj, KOBJ_CHANGE);
    553
    554	if (device->state == device->target)
    555		wake_up(&dasd_init_waitq);
    556}
    557
    558/*
    559 * Kick starter for devices that did not complete the startup/shutdown
    560 * procedure or were sleeping because of a pending state.
    561 * dasd_kick_device will schedule a call do do_kick_device to the kernel
    562 * event daemon.
    563 */
    564static void do_kick_device(struct work_struct *work)
    565{
    566	struct dasd_device *device = container_of(work, struct dasd_device, kick_work);
    567	mutex_lock(&device->state_mutex);
    568	dasd_change_state(device);
    569	mutex_unlock(&device->state_mutex);
    570	dasd_schedule_device_bh(device);
    571	dasd_put_device(device);
    572}
    573
    574void dasd_kick_device(struct dasd_device *device)
    575{
    576	dasd_get_device(device);
    577	/* queue call to dasd_kick_device to the kernel event daemon. */
    578	if (!schedule_work(&device->kick_work))
    579		dasd_put_device(device);
    580}
    581EXPORT_SYMBOL(dasd_kick_device);
    582
    583/*
    584 * dasd_reload_device will schedule a call do do_reload_device to the kernel
    585 * event daemon.
    586 */
    587static void do_reload_device(struct work_struct *work)
    588{
    589	struct dasd_device *device = container_of(work, struct dasd_device,
    590						  reload_device);
    591	device->discipline->reload(device);
    592	dasd_put_device(device);
    593}
    594
    595void dasd_reload_device(struct dasd_device *device)
    596{
    597	dasd_get_device(device);
    598	/* queue call to dasd_reload_device to the kernel event daemon. */
    599	if (!schedule_work(&device->reload_device))
    600		dasd_put_device(device);
    601}
    602EXPORT_SYMBOL(dasd_reload_device);
    603
    604/*
    605 * Set the target state for a device and starts the state change.
    606 */
    607void dasd_set_target_state(struct dasd_device *device, int target)
    608{
    609	dasd_get_device(device);
    610	mutex_lock(&device->state_mutex);
    611	/* If we are in probeonly mode stop at DASD_STATE_READY. */
    612	if (dasd_probeonly && target > DASD_STATE_READY)
    613		target = DASD_STATE_READY;
    614	if (device->target != target) {
    615		if (device->state == target)
    616			wake_up(&dasd_init_waitq);
    617		device->target = target;
    618	}
    619	if (device->state != device->target)
    620		dasd_change_state(device);
    621	mutex_unlock(&device->state_mutex);
    622	dasd_put_device(device);
    623}
    624
    625/*
    626 * Enable devices with device numbers in [from..to].
    627 */
    628static inline int _wait_for_device(struct dasd_device *device)
    629{
    630	return (device->state == device->target);
    631}
    632
    633void dasd_enable_device(struct dasd_device *device)
    634{
    635	dasd_set_target_state(device, DASD_STATE_ONLINE);
    636	if (device->state <= DASD_STATE_KNOWN)
    637		/* No discipline for device found. */
    638		dasd_set_target_state(device, DASD_STATE_NEW);
    639	/* Now wait for the devices to come up. */
    640	wait_event(dasd_init_waitq, _wait_for_device(device));
    641
    642	dasd_reload_device(device);
    643	if (device->discipline->kick_validate)
    644		device->discipline->kick_validate(device);
    645}
    646EXPORT_SYMBOL(dasd_enable_device);
    647
    648/*
    649 * SECTION: device operation (interrupt handler, start i/o, term i/o ...)
    650 */
    651
    652unsigned int dasd_global_profile_level = DASD_PROFILE_OFF;
    653
    654#ifdef CONFIG_DASD_PROFILE
    655struct dasd_profile dasd_global_profile = {
    656	.lock = __SPIN_LOCK_UNLOCKED(dasd_global_profile.lock),
    657};
    658static struct dentry *dasd_debugfs_global_entry;
    659
    660/*
    661 * Add profiling information for cqr before execution.
    662 */
    663static void dasd_profile_start(struct dasd_block *block,
    664			       struct dasd_ccw_req *cqr,
    665			       struct request *req)
    666{
    667	struct list_head *l;
    668	unsigned int counter;
    669	struct dasd_device *device;
    670
    671	/* count the length of the chanq for statistics */
    672	counter = 0;
    673	if (dasd_global_profile_level || block->profile.data)
    674		list_for_each(l, &block->ccw_queue)
    675			if (++counter >= 31)
    676				break;
    677
    678	spin_lock(&dasd_global_profile.lock);
    679	if (dasd_global_profile.data) {
    680		dasd_global_profile.data->dasd_io_nr_req[counter]++;
    681		if (rq_data_dir(req) == READ)
    682			dasd_global_profile.data->dasd_read_nr_req[counter]++;
    683	}
    684	spin_unlock(&dasd_global_profile.lock);
    685
    686	spin_lock(&block->profile.lock);
    687	if (block->profile.data) {
    688		block->profile.data->dasd_io_nr_req[counter]++;
    689		if (rq_data_dir(req) == READ)
    690			block->profile.data->dasd_read_nr_req[counter]++;
    691	}
    692	spin_unlock(&block->profile.lock);
    693
    694	/*
    695	 * We count the request for the start device, even though it may run on
    696	 * some other device due to error recovery. This way we make sure that
    697	 * we count each request only once.
    698	 */
    699	device = cqr->startdev;
    700	if (device->profile.data) {
    701		counter = 1; /* request is not yet queued on the start device */
    702		list_for_each(l, &device->ccw_queue)
    703			if (++counter >= 31)
    704				break;
    705	}
    706	spin_lock(&device->profile.lock);
    707	if (device->profile.data) {
    708		device->profile.data->dasd_io_nr_req[counter]++;
    709		if (rq_data_dir(req) == READ)
    710			device->profile.data->dasd_read_nr_req[counter]++;
    711	}
    712	spin_unlock(&device->profile.lock);
    713}
    714
    715/*
    716 * Add profiling information for cqr after execution.
    717 */
    718
    719#define dasd_profile_counter(value, index)			   \
    720{								   \
    721	for (index = 0; index < 31 && value >> (2+index); index++) \
    722		;						   \
    723}
    724
    725static void dasd_profile_end_add_data(struct dasd_profile_info *data,
    726				      int is_alias,
    727				      int is_tpm,
    728				      int is_read,
    729				      long sectors,
    730				      int sectors_ind,
    731				      int tottime_ind,
    732				      int tottimeps_ind,
    733				      int strtime_ind,
    734				      int irqtime_ind,
    735				      int irqtimeps_ind,
    736				      int endtime_ind)
    737{
    738	/* in case of an overflow, reset the whole profile */
    739	if (data->dasd_io_reqs == UINT_MAX) {
    740			memset(data, 0, sizeof(*data));
    741			ktime_get_real_ts64(&data->starttod);
    742	}
    743	data->dasd_io_reqs++;
    744	data->dasd_io_sects += sectors;
    745	if (is_alias)
    746		data->dasd_io_alias++;
    747	if (is_tpm)
    748		data->dasd_io_tpm++;
    749
    750	data->dasd_io_secs[sectors_ind]++;
    751	data->dasd_io_times[tottime_ind]++;
    752	data->dasd_io_timps[tottimeps_ind]++;
    753	data->dasd_io_time1[strtime_ind]++;
    754	data->dasd_io_time2[irqtime_ind]++;
    755	data->dasd_io_time2ps[irqtimeps_ind]++;
    756	data->dasd_io_time3[endtime_ind]++;
    757
    758	if (is_read) {
    759		data->dasd_read_reqs++;
    760		data->dasd_read_sects += sectors;
    761		if (is_alias)
    762			data->dasd_read_alias++;
    763		if (is_tpm)
    764			data->dasd_read_tpm++;
    765		data->dasd_read_secs[sectors_ind]++;
    766		data->dasd_read_times[tottime_ind]++;
    767		data->dasd_read_time1[strtime_ind]++;
    768		data->dasd_read_time2[irqtime_ind]++;
    769		data->dasd_read_time3[endtime_ind]++;
    770	}
    771}
    772
    773static void dasd_profile_end(struct dasd_block *block,
    774			     struct dasd_ccw_req *cqr,
    775			     struct request *req)
    776{
    777	unsigned long strtime, irqtime, endtime, tottime;
    778	unsigned long tottimeps, sectors;
    779	struct dasd_device *device;
    780	int sectors_ind, tottime_ind, tottimeps_ind, strtime_ind;
    781	int irqtime_ind, irqtimeps_ind, endtime_ind;
    782	struct dasd_profile_info *data;
    783
    784	device = cqr->startdev;
    785	if (!(dasd_global_profile_level ||
    786	      block->profile.data ||
    787	      device->profile.data))
    788		return;
    789
    790	sectors = blk_rq_sectors(req);
    791	if (!cqr->buildclk || !cqr->startclk ||
    792	    !cqr->stopclk || !cqr->endclk ||
    793	    !sectors)
    794		return;
    795
    796	strtime = ((cqr->startclk - cqr->buildclk) >> 12);
    797	irqtime = ((cqr->stopclk - cqr->startclk) >> 12);
    798	endtime = ((cqr->endclk - cqr->stopclk) >> 12);
    799	tottime = ((cqr->endclk - cqr->buildclk) >> 12);
    800	tottimeps = tottime / sectors;
    801
    802	dasd_profile_counter(sectors, sectors_ind);
    803	dasd_profile_counter(tottime, tottime_ind);
    804	dasd_profile_counter(tottimeps, tottimeps_ind);
    805	dasd_profile_counter(strtime, strtime_ind);
    806	dasd_profile_counter(irqtime, irqtime_ind);
    807	dasd_profile_counter(irqtime / sectors, irqtimeps_ind);
    808	dasd_profile_counter(endtime, endtime_ind);
    809
    810	spin_lock(&dasd_global_profile.lock);
    811	if (dasd_global_profile.data) {
    812		data = dasd_global_profile.data;
    813		data->dasd_sum_times += tottime;
    814		data->dasd_sum_time_str += strtime;
    815		data->dasd_sum_time_irq += irqtime;
    816		data->dasd_sum_time_end += endtime;
    817		dasd_profile_end_add_data(dasd_global_profile.data,
    818					  cqr->startdev != block->base,
    819					  cqr->cpmode == 1,
    820					  rq_data_dir(req) == READ,
    821					  sectors, sectors_ind, tottime_ind,
    822					  tottimeps_ind, strtime_ind,
    823					  irqtime_ind, irqtimeps_ind,
    824					  endtime_ind);
    825	}
    826	spin_unlock(&dasd_global_profile.lock);
    827
    828	spin_lock(&block->profile.lock);
    829	if (block->profile.data) {
    830		data = block->profile.data;
    831		data->dasd_sum_times += tottime;
    832		data->dasd_sum_time_str += strtime;
    833		data->dasd_sum_time_irq += irqtime;
    834		data->dasd_sum_time_end += endtime;
    835		dasd_profile_end_add_data(block->profile.data,
    836					  cqr->startdev != block->base,
    837					  cqr->cpmode == 1,
    838					  rq_data_dir(req) == READ,
    839					  sectors, sectors_ind, tottime_ind,
    840					  tottimeps_ind, strtime_ind,
    841					  irqtime_ind, irqtimeps_ind,
    842					  endtime_ind);
    843	}
    844	spin_unlock(&block->profile.lock);
    845
    846	spin_lock(&device->profile.lock);
    847	if (device->profile.data) {
    848		data = device->profile.data;
    849		data->dasd_sum_times += tottime;
    850		data->dasd_sum_time_str += strtime;
    851		data->dasd_sum_time_irq += irqtime;
    852		data->dasd_sum_time_end += endtime;
    853		dasd_profile_end_add_data(device->profile.data,
    854					  cqr->startdev != block->base,
    855					  cqr->cpmode == 1,
    856					  rq_data_dir(req) == READ,
    857					  sectors, sectors_ind, tottime_ind,
    858					  tottimeps_ind, strtime_ind,
    859					  irqtime_ind, irqtimeps_ind,
    860					  endtime_ind);
    861	}
    862	spin_unlock(&device->profile.lock);
    863}
    864
    865void dasd_profile_reset(struct dasd_profile *profile)
    866{
    867	struct dasd_profile_info *data;
    868
    869	spin_lock_bh(&profile->lock);
    870	data = profile->data;
    871	if (!data) {
    872		spin_unlock_bh(&profile->lock);
    873		return;
    874	}
    875	memset(data, 0, sizeof(*data));
    876	ktime_get_real_ts64(&data->starttod);
    877	spin_unlock_bh(&profile->lock);
    878}
    879
    880int dasd_profile_on(struct dasd_profile *profile)
    881{
    882	struct dasd_profile_info *data;
    883
    884	data = kzalloc(sizeof(*data), GFP_KERNEL);
    885	if (!data)
    886		return -ENOMEM;
    887	spin_lock_bh(&profile->lock);
    888	if (profile->data) {
    889		spin_unlock_bh(&profile->lock);
    890		kfree(data);
    891		return 0;
    892	}
    893	ktime_get_real_ts64(&data->starttod);
    894	profile->data = data;
    895	spin_unlock_bh(&profile->lock);
    896	return 0;
    897}
    898
    899void dasd_profile_off(struct dasd_profile *profile)
    900{
    901	spin_lock_bh(&profile->lock);
    902	kfree(profile->data);
    903	profile->data = NULL;
    904	spin_unlock_bh(&profile->lock);
    905}
    906
    907char *dasd_get_user_string(const char __user *user_buf, size_t user_len)
    908{
    909	char *buffer;
    910
    911	buffer = vmalloc(user_len + 1);
    912	if (buffer == NULL)
    913		return ERR_PTR(-ENOMEM);
    914	if (copy_from_user(buffer, user_buf, user_len) != 0) {
    915		vfree(buffer);
    916		return ERR_PTR(-EFAULT);
    917	}
    918	/* got the string, now strip linefeed. */
    919	if (buffer[user_len - 1] == '\n')
    920		buffer[user_len - 1] = 0;
    921	else
    922		buffer[user_len] = 0;
    923	return buffer;
    924}
    925
    926static ssize_t dasd_stats_write(struct file *file,
    927				const char __user *user_buf,
    928				size_t user_len, loff_t *pos)
    929{
    930	char *buffer, *str;
    931	int rc;
    932	struct seq_file *m = (struct seq_file *)file->private_data;
    933	struct dasd_profile *prof = m->private;
    934
    935	if (user_len > 65536)
    936		user_len = 65536;
    937	buffer = dasd_get_user_string(user_buf, user_len);
    938	if (IS_ERR(buffer))
    939		return PTR_ERR(buffer);
    940
    941	str = skip_spaces(buffer);
    942	rc = user_len;
    943	if (strncmp(str, "reset", 5) == 0) {
    944		dasd_profile_reset(prof);
    945	} else if (strncmp(str, "on", 2) == 0) {
    946		rc = dasd_profile_on(prof);
    947		if (rc)
    948			goto out;
    949		rc = user_len;
    950		if (prof == &dasd_global_profile) {
    951			dasd_profile_reset(prof);
    952			dasd_global_profile_level = DASD_PROFILE_GLOBAL_ONLY;
    953		}
    954	} else if (strncmp(str, "off", 3) == 0) {
    955		if (prof == &dasd_global_profile)
    956			dasd_global_profile_level = DASD_PROFILE_OFF;
    957		dasd_profile_off(prof);
    958	} else
    959		rc = -EINVAL;
    960out:
    961	vfree(buffer);
    962	return rc;
    963}
    964
    965static void dasd_stats_array(struct seq_file *m, unsigned int *array)
    966{
    967	int i;
    968
    969	for (i = 0; i < 32; i++)
    970		seq_printf(m, "%u ", array[i]);
    971	seq_putc(m, '\n');
    972}
    973
    974static void dasd_stats_seq_print(struct seq_file *m,
    975				 struct dasd_profile_info *data)
    976{
    977	seq_printf(m, "start_time %lld.%09ld\n",
    978		   (s64)data->starttod.tv_sec, data->starttod.tv_nsec);
    979	seq_printf(m, "total_requests %u\n", data->dasd_io_reqs);
    980	seq_printf(m, "total_sectors %u\n", data->dasd_io_sects);
    981	seq_printf(m, "total_pav %u\n", data->dasd_io_alias);
    982	seq_printf(m, "total_hpf %u\n", data->dasd_io_tpm);
    983	seq_printf(m, "avg_total %lu\n", data->dasd_io_reqs ?
    984		   data->dasd_sum_times / data->dasd_io_reqs : 0UL);
    985	seq_printf(m, "avg_build_to_ssch %lu\n", data->dasd_io_reqs ?
    986		   data->dasd_sum_time_str / data->dasd_io_reqs : 0UL);
    987	seq_printf(m, "avg_ssch_to_irq %lu\n", data->dasd_io_reqs ?
    988		   data->dasd_sum_time_irq / data->dasd_io_reqs : 0UL);
    989	seq_printf(m, "avg_irq_to_end %lu\n", data->dasd_io_reqs ?
    990		   data->dasd_sum_time_end / data->dasd_io_reqs : 0UL);
    991	seq_puts(m, "histogram_sectors ");
    992	dasd_stats_array(m, data->dasd_io_secs);
    993	seq_puts(m, "histogram_io_times ");
    994	dasd_stats_array(m, data->dasd_io_times);
    995	seq_puts(m, "histogram_io_times_weighted ");
    996	dasd_stats_array(m, data->dasd_io_timps);
    997	seq_puts(m, "histogram_time_build_to_ssch ");
    998	dasd_stats_array(m, data->dasd_io_time1);
    999	seq_puts(m, "histogram_time_ssch_to_irq ");
   1000	dasd_stats_array(m, data->dasd_io_time2);
   1001	seq_puts(m, "histogram_time_ssch_to_irq_weighted ");
   1002	dasd_stats_array(m, data->dasd_io_time2ps);
   1003	seq_puts(m, "histogram_time_irq_to_end ");
   1004	dasd_stats_array(m, data->dasd_io_time3);
   1005	seq_puts(m, "histogram_ccw_queue_length ");
   1006	dasd_stats_array(m, data->dasd_io_nr_req);
   1007	seq_printf(m, "total_read_requests %u\n", data->dasd_read_reqs);
   1008	seq_printf(m, "total_read_sectors %u\n", data->dasd_read_sects);
   1009	seq_printf(m, "total_read_pav %u\n", data->dasd_read_alias);
   1010	seq_printf(m, "total_read_hpf %u\n", data->dasd_read_tpm);
   1011	seq_puts(m, "histogram_read_sectors ");
   1012	dasd_stats_array(m, data->dasd_read_secs);
   1013	seq_puts(m, "histogram_read_times ");
   1014	dasd_stats_array(m, data->dasd_read_times);
   1015	seq_puts(m, "histogram_read_time_build_to_ssch ");
   1016	dasd_stats_array(m, data->dasd_read_time1);
   1017	seq_puts(m, "histogram_read_time_ssch_to_irq ");
   1018	dasd_stats_array(m, data->dasd_read_time2);
   1019	seq_puts(m, "histogram_read_time_irq_to_end ");
   1020	dasd_stats_array(m, data->dasd_read_time3);
   1021	seq_puts(m, "histogram_read_ccw_queue_length ");
   1022	dasd_stats_array(m, data->dasd_read_nr_req);
   1023}
   1024
   1025static int dasd_stats_show(struct seq_file *m, void *v)
   1026{
   1027	struct dasd_profile *profile;
   1028	struct dasd_profile_info *data;
   1029
   1030	profile = m->private;
   1031	spin_lock_bh(&profile->lock);
   1032	data = profile->data;
   1033	if (!data) {
   1034		spin_unlock_bh(&profile->lock);
   1035		seq_puts(m, "disabled\n");
   1036		return 0;
   1037	}
   1038	dasd_stats_seq_print(m, data);
   1039	spin_unlock_bh(&profile->lock);
   1040	return 0;
   1041}
   1042
   1043static int dasd_stats_open(struct inode *inode, struct file *file)
   1044{
   1045	struct dasd_profile *profile = inode->i_private;
   1046	return single_open(file, dasd_stats_show, profile);
   1047}
   1048
   1049static const struct file_operations dasd_stats_raw_fops = {
   1050	.owner		= THIS_MODULE,
   1051	.open		= dasd_stats_open,
   1052	.read		= seq_read,
   1053	.llseek		= seq_lseek,
   1054	.release	= single_release,
   1055	.write		= dasd_stats_write,
   1056};
   1057
   1058static void dasd_profile_init(struct dasd_profile *profile,
   1059			      struct dentry *base_dentry)
   1060{
   1061	umode_t mode;
   1062	struct dentry *pde;
   1063
   1064	if (!base_dentry)
   1065		return;
   1066	profile->dentry = NULL;
   1067	profile->data = NULL;
   1068	mode = (S_IRUSR | S_IWUSR | S_IFREG);
   1069	pde = debugfs_create_file("statistics", mode, base_dentry,
   1070				  profile, &dasd_stats_raw_fops);
   1071	if (pde && !IS_ERR(pde))
   1072		profile->dentry = pde;
   1073	return;
   1074}
   1075
   1076static void dasd_profile_exit(struct dasd_profile *profile)
   1077{
   1078	dasd_profile_off(profile);
   1079	debugfs_remove(profile->dentry);
   1080	profile->dentry = NULL;
   1081}
   1082
   1083static void dasd_statistics_removeroot(void)
   1084{
   1085	dasd_global_profile_level = DASD_PROFILE_OFF;
   1086	dasd_profile_exit(&dasd_global_profile);
   1087	debugfs_remove(dasd_debugfs_global_entry);
   1088	debugfs_remove(dasd_debugfs_root_entry);
   1089}
   1090
   1091static void dasd_statistics_createroot(void)
   1092{
   1093	struct dentry *pde;
   1094
   1095	dasd_debugfs_root_entry = NULL;
   1096	pde = debugfs_create_dir("dasd", NULL);
   1097	if (!pde || IS_ERR(pde))
   1098		goto error;
   1099	dasd_debugfs_root_entry = pde;
   1100	pde = debugfs_create_dir("global", dasd_debugfs_root_entry);
   1101	if (!pde || IS_ERR(pde))
   1102		goto error;
   1103	dasd_debugfs_global_entry = pde;
   1104	dasd_profile_init(&dasd_global_profile, dasd_debugfs_global_entry);
   1105	return;
   1106
   1107error:
   1108	DBF_EVENT(DBF_ERR, "%s",
   1109		  "Creation of the dasd debugfs interface failed");
   1110	dasd_statistics_removeroot();
   1111	return;
   1112}
   1113
   1114#else
   1115#define dasd_profile_start(block, cqr, req) do {} while (0)
   1116#define dasd_profile_end(block, cqr, req) do {} while (0)
   1117
   1118static void dasd_statistics_createroot(void)
   1119{
   1120	return;
   1121}
   1122
   1123static void dasd_statistics_removeroot(void)
   1124{
   1125	return;
   1126}
   1127
   1128int dasd_stats_generic_show(struct seq_file *m, void *v)
   1129{
   1130	seq_puts(m, "Statistics are not activated in this kernel\n");
   1131	return 0;
   1132}
   1133
   1134static void dasd_profile_init(struct dasd_profile *profile,
   1135			      struct dentry *base_dentry)
   1136{
   1137	return;
   1138}
   1139
   1140static void dasd_profile_exit(struct dasd_profile *profile)
   1141{
   1142	return;
   1143}
   1144
   1145int dasd_profile_on(struct dasd_profile *profile)
   1146{
   1147	return 0;
   1148}
   1149
   1150#endif				/* CONFIG_DASD_PROFILE */
   1151
   1152static int dasd_hosts_show(struct seq_file *m, void *v)
   1153{
   1154	struct dasd_device *device;
   1155	int rc = -EOPNOTSUPP;
   1156
   1157	device = m->private;
   1158	dasd_get_device(device);
   1159
   1160	if (device->discipline->hosts_print)
   1161		rc = device->discipline->hosts_print(device, m);
   1162
   1163	dasd_put_device(device);
   1164	return rc;
   1165}
   1166
   1167DEFINE_SHOW_ATTRIBUTE(dasd_hosts);
   1168
   1169static void dasd_hosts_exit(struct dasd_device *device)
   1170{
   1171	debugfs_remove(device->hosts_dentry);
   1172	device->hosts_dentry = NULL;
   1173}
   1174
   1175static void dasd_hosts_init(struct dentry *base_dentry,
   1176			    struct dasd_device *device)
   1177{
   1178	struct dentry *pde;
   1179	umode_t mode;
   1180
   1181	if (!base_dentry)
   1182		return;
   1183
   1184	mode = S_IRUSR | S_IFREG;
   1185	pde = debugfs_create_file("host_access_list", mode, base_dentry,
   1186				  device, &dasd_hosts_fops);
   1187	if (pde && !IS_ERR(pde))
   1188		device->hosts_dentry = pde;
   1189}
   1190
   1191struct dasd_ccw_req *dasd_smalloc_request(int magic, int cplength, int datasize,
   1192					  struct dasd_device *device,
   1193					  struct dasd_ccw_req *cqr)
   1194{
   1195	unsigned long flags;
   1196	char *data, *chunk;
   1197	int size = 0;
   1198
   1199	if (cplength > 0)
   1200		size += cplength * sizeof(struct ccw1);
   1201	if (datasize > 0)
   1202		size += datasize;
   1203	if (!cqr)
   1204		size += (sizeof(*cqr) + 7L) & -8L;
   1205
   1206	spin_lock_irqsave(&device->mem_lock, flags);
   1207	data = chunk = dasd_alloc_chunk(&device->ccw_chunks, size);
   1208	spin_unlock_irqrestore(&device->mem_lock, flags);
   1209	if (!chunk)
   1210		return ERR_PTR(-ENOMEM);
   1211	if (!cqr) {
   1212		cqr = (void *) data;
   1213		data += (sizeof(*cqr) + 7L) & -8L;
   1214	}
   1215	memset(cqr, 0, sizeof(*cqr));
   1216	cqr->mem_chunk = chunk;
   1217	if (cplength > 0) {
   1218		cqr->cpaddr = data;
   1219		data += cplength * sizeof(struct ccw1);
   1220		memset(cqr->cpaddr, 0, cplength * sizeof(struct ccw1));
   1221	}
   1222	if (datasize > 0) {
   1223		cqr->data = data;
   1224 		memset(cqr->data, 0, datasize);
   1225	}
   1226	cqr->magic = magic;
   1227	set_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
   1228	dasd_get_device(device);
   1229	return cqr;
   1230}
   1231EXPORT_SYMBOL(dasd_smalloc_request);
   1232
   1233struct dasd_ccw_req *dasd_fmalloc_request(int magic, int cplength,
   1234					  int datasize,
   1235					  struct dasd_device *device)
   1236{
   1237	struct dasd_ccw_req *cqr;
   1238	unsigned long flags;
   1239	int size, cqr_size;
   1240	char *data;
   1241
   1242	cqr_size = (sizeof(*cqr) + 7L) & -8L;
   1243	size = cqr_size;
   1244	if (cplength > 0)
   1245		size += cplength * sizeof(struct ccw1);
   1246	if (datasize > 0)
   1247		size += datasize;
   1248
   1249	spin_lock_irqsave(&device->mem_lock, flags);
   1250	cqr = dasd_alloc_chunk(&device->ese_chunks, size);
   1251	spin_unlock_irqrestore(&device->mem_lock, flags);
   1252	if (!cqr)
   1253		return ERR_PTR(-ENOMEM);
   1254	memset(cqr, 0, sizeof(*cqr));
   1255	data = (char *)cqr + cqr_size;
   1256	cqr->cpaddr = NULL;
   1257	if (cplength > 0) {
   1258		cqr->cpaddr = data;
   1259		data += cplength * sizeof(struct ccw1);
   1260		memset(cqr->cpaddr, 0, cplength * sizeof(struct ccw1));
   1261	}
   1262	cqr->data = NULL;
   1263	if (datasize > 0) {
   1264		cqr->data = data;
   1265		memset(cqr->data, 0, datasize);
   1266	}
   1267
   1268	cqr->magic = magic;
   1269	set_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
   1270	dasd_get_device(device);
   1271
   1272	return cqr;
   1273}
   1274EXPORT_SYMBOL(dasd_fmalloc_request);
   1275
   1276void dasd_sfree_request(struct dasd_ccw_req *cqr, struct dasd_device *device)
   1277{
   1278	unsigned long flags;
   1279
   1280	spin_lock_irqsave(&device->mem_lock, flags);
   1281	dasd_free_chunk(&device->ccw_chunks, cqr->mem_chunk);
   1282	spin_unlock_irqrestore(&device->mem_lock, flags);
   1283	dasd_put_device(device);
   1284}
   1285EXPORT_SYMBOL(dasd_sfree_request);
   1286
   1287void dasd_ffree_request(struct dasd_ccw_req *cqr, struct dasd_device *device)
   1288{
   1289	unsigned long flags;
   1290
   1291	spin_lock_irqsave(&device->mem_lock, flags);
   1292	dasd_free_chunk(&device->ese_chunks, cqr);
   1293	spin_unlock_irqrestore(&device->mem_lock, flags);
   1294	dasd_put_device(device);
   1295}
   1296EXPORT_SYMBOL(dasd_ffree_request);
   1297
   1298/*
   1299 * Check discipline magic in cqr.
   1300 */
   1301static inline int dasd_check_cqr(struct dasd_ccw_req *cqr)
   1302{
   1303	struct dasd_device *device;
   1304
   1305	if (cqr == NULL)
   1306		return -EINVAL;
   1307	device = cqr->startdev;
   1308	if (strncmp((char *) &cqr->magic, device->discipline->ebcname, 4)) {
   1309		DBF_DEV_EVENT(DBF_WARNING, device,
   1310			    " dasd_ccw_req 0x%08x magic doesn't match"
   1311			    " discipline 0x%08x",
   1312			    cqr->magic,
   1313			    *(unsigned int *) device->discipline->name);
   1314		return -EINVAL;
   1315	}
   1316	return 0;
   1317}
   1318
   1319/*
   1320 * Terminate the current i/o and set the request to clear_pending.
   1321 * Timer keeps device runnig.
   1322 * ccw_device_clear can fail if the i/o subsystem
   1323 * is in a bad mood.
   1324 */
   1325int dasd_term_IO(struct dasd_ccw_req *cqr)
   1326{
   1327	struct dasd_device *device;
   1328	int retries, rc;
   1329	char errorstring[ERRORLENGTH];
   1330
   1331	/* Check the cqr */
   1332	rc = dasd_check_cqr(cqr);
   1333	if (rc)
   1334		return rc;
   1335	retries = 0;
   1336	device = (struct dasd_device *) cqr->startdev;
   1337	while ((retries < 5) && (cqr->status == DASD_CQR_IN_IO)) {
   1338		rc = ccw_device_clear(device->cdev, (long) cqr);
   1339		switch (rc) {
   1340		case 0:	/* termination successful */
   1341			cqr->status = DASD_CQR_CLEAR_PENDING;
   1342			cqr->stopclk = get_tod_clock();
   1343			cqr->starttime = 0;
   1344			DBF_DEV_EVENT(DBF_DEBUG, device,
   1345				      "terminate cqr %p successful",
   1346				      cqr);
   1347			break;
   1348		case -ENODEV:
   1349			DBF_DEV_EVENT(DBF_ERR, device, "%s",
   1350				      "device gone, retry");
   1351			break;
   1352		case -EINVAL:
   1353			/*
   1354			 * device not valid so no I/O could be running
   1355			 * handle CQR as termination successful
   1356			 */
   1357			cqr->status = DASD_CQR_CLEARED;
   1358			cqr->stopclk = get_tod_clock();
   1359			cqr->starttime = 0;
   1360			/* no retries for invalid devices */
   1361			cqr->retries = -1;
   1362			DBF_DEV_EVENT(DBF_ERR, device, "%s",
   1363				      "EINVAL, handle as terminated");
   1364			/* fake rc to success */
   1365			rc = 0;
   1366			break;
   1367		default:
   1368			/* internal error 10 - unknown rc*/
   1369			snprintf(errorstring, ERRORLENGTH, "10 %d", rc);
   1370			dev_err(&device->cdev->dev, "An error occurred in the "
   1371				"DASD device driver, reason=%s\n", errorstring);
   1372			BUG();
   1373			break;
   1374		}
   1375		retries++;
   1376	}
   1377	dasd_schedule_device_bh(device);
   1378	return rc;
   1379}
   1380EXPORT_SYMBOL(dasd_term_IO);
   1381
   1382/*
   1383 * Start the i/o. This start_IO can fail if the channel is really busy.
   1384 * In that case set up a timer to start the request later.
   1385 */
   1386int dasd_start_IO(struct dasd_ccw_req *cqr)
   1387{
   1388	struct dasd_device *device;
   1389	int rc;
   1390	char errorstring[ERRORLENGTH];
   1391
   1392	/* Check the cqr */
   1393	rc = dasd_check_cqr(cqr);
   1394	if (rc) {
   1395		cqr->intrc = rc;
   1396		return rc;
   1397	}
   1398	device = (struct dasd_device *) cqr->startdev;
   1399	if (((cqr->block &&
   1400	      test_bit(DASD_FLAG_LOCK_STOLEN, &cqr->block->base->flags)) ||
   1401	     test_bit(DASD_FLAG_LOCK_STOLEN, &device->flags)) &&
   1402	    !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) {
   1403		DBF_DEV_EVENT(DBF_DEBUG, device, "start_IO: return request %p "
   1404			      "because of stolen lock", cqr);
   1405		cqr->status = DASD_CQR_ERROR;
   1406		cqr->intrc = -EPERM;
   1407		return -EPERM;
   1408	}
   1409	if (cqr->retries < 0) {
   1410		/* internal error 14 - start_IO run out of retries */
   1411		sprintf(errorstring, "14 %p", cqr);
   1412		dev_err(&device->cdev->dev, "An error occurred in the DASD "
   1413			"device driver, reason=%s\n", errorstring);
   1414		cqr->status = DASD_CQR_ERROR;
   1415		return -EIO;
   1416	}
   1417	cqr->startclk = get_tod_clock();
   1418	cqr->starttime = jiffies;
   1419	cqr->retries--;
   1420	if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) {
   1421		cqr->lpm &= dasd_path_get_opm(device);
   1422		if (!cqr->lpm)
   1423			cqr->lpm = dasd_path_get_opm(device);
   1424	}
   1425	/*
   1426	 * remember the amount of formatted tracks to prevent double format on
   1427	 * ESE devices
   1428	 */
   1429	if (cqr->block)
   1430		cqr->trkcount = atomic_read(&cqr->block->trkcount);
   1431
   1432	if (cqr->cpmode == 1) {
   1433		rc = ccw_device_tm_start(device->cdev, cqr->cpaddr,
   1434					 (long) cqr, cqr->lpm);
   1435	} else {
   1436		rc = ccw_device_start(device->cdev, cqr->cpaddr,
   1437				      (long) cqr, cqr->lpm, 0);
   1438	}
   1439	switch (rc) {
   1440	case 0:
   1441		cqr->status = DASD_CQR_IN_IO;
   1442		break;
   1443	case -EBUSY:
   1444		DBF_DEV_EVENT(DBF_WARNING, device, "%s",
   1445			      "start_IO: device busy, retry later");
   1446		break;
   1447	case -EACCES:
   1448		/* -EACCES indicates that the request used only a subset of the
   1449		 * available paths and all these paths are gone. If the lpm of
   1450		 * this request was only a subset of the opm (e.g. the ppm) then
   1451		 * we just do a retry with all available paths.
   1452		 * If we already use the full opm, something is amiss, and we
   1453		 * need a full path verification.
   1454		 */
   1455		if (test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) {
   1456			DBF_DEV_EVENT(DBF_WARNING, device,
   1457				      "start_IO: selected paths gone (%x)",
   1458				      cqr->lpm);
   1459		} else if (cqr->lpm != dasd_path_get_opm(device)) {
   1460			cqr->lpm = dasd_path_get_opm(device);
   1461			DBF_DEV_EVENT(DBF_DEBUG, device, "%s",
   1462				      "start_IO: selected paths gone,"
   1463				      " retry on all paths");
   1464		} else {
   1465			DBF_DEV_EVENT(DBF_WARNING, device, "%s",
   1466				      "start_IO: all paths in opm gone,"
   1467				      " do path verification");
   1468			dasd_generic_last_path_gone(device);
   1469			dasd_path_no_path(device);
   1470			dasd_path_set_tbvpm(device,
   1471					  ccw_device_get_path_mask(
   1472						  device->cdev));
   1473		}
   1474		break;
   1475	case -ENODEV:
   1476		DBF_DEV_EVENT(DBF_WARNING, device, "%s",
   1477			      "start_IO: -ENODEV device gone, retry");
   1478		break;
   1479	case -EIO:
   1480		DBF_DEV_EVENT(DBF_WARNING, device, "%s",
   1481			      "start_IO: -EIO device gone, retry");
   1482		break;
   1483	case -EINVAL:
   1484		DBF_DEV_EVENT(DBF_WARNING, device, "%s",
   1485			      "start_IO: -EINVAL device currently "
   1486			      "not accessible");
   1487		break;
   1488	default:
   1489		/* internal error 11 - unknown rc */
   1490		snprintf(errorstring, ERRORLENGTH, "11 %d", rc);
   1491		dev_err(&device->cdev->dev,
   1492			"An error occurred in the DASD device driver, "
   1493			"reason=%s\n", errorstring);
   1494		BUG();
   1495		break;
   1496	}
   1497	cqr->intrc = rc;
   1498	return rc;
   1499}
   1500EXPORT_SYMBOL(dasd_start_IO);
   1501
   1502/*
   1503 * Timeout function for dasd devices. This is used for different purposes
   1504 *  1) missing interrupt handler for normal operation
   1505 *  2) delayed start of request where start_IO failed with -EBUSY
   1506 *  3) timeout for missing state change interrupts
   1507 * The head of the ccw queue will have status DASD_CQR_IN_IO for 1),
   1508 * DASD_CQR_QUEUED for 2) and 3).
   1509 */
   1510static void dasd_device_timeout(struct timer_list *t)
   1511{
   1512	unsigned long flags;
   1513	struct dasd_device *device;
   1514
   1515	device = from_timer(device, t, timer);
   1516	spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
   1517	/* re-activate request queue */
   1518	dasd_device_remove_stop_bits(device, DASD_STOPPED_PENDING);
   1519	spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
   1520	dasd_schedule_device_bh(device);
   1521}
   1522
   1523/*
   1524 * Setup timeout for a device in jiffies.
   1525 */
   1526void dasd_device_set_timer(struct dasd_device *device, int expires)
   1527{
   1528	if (expires == 0)
   1529		del_timer(&device->timer);
   1530	else
   1531		mod_timer(&device->timer, jiffies + expires);
   1532}
   1533EXPORT_SYMBOL(dasd_device_set_timer);
   1534
   1535/*
   1536 * Clear timeout for a device.
   1537 */
   1538void dasd_device_clear_timer(struct dasd_device *device)
   1539{
   1540	del_timer(&device->timer);
   1541}
   1542EXPORT_SYMBOL(dasd_device_clear_timer);
   1543
   1544static void dasd_handle_killed_request(struct ccw_device *cdev,
   1545				       unsigned long intparm)
   1546{
   1547	struct dasd_ccw_req *cqr;
   1548	struct dasd_device *device;
   1549
   1550	if (!intparm)
   1551		return;
   1552	cqr = (struct dasd_ccw_req *) intparm;
   1553	if (cqr->status != DASD_CQR_IN_IO) {
   1554		DBF_EVENT_DEVID(DBF_DEBUG, cdev,
   1555				"invalid status in handle_killed_request: "
   1556				"%02x", cqr->status);
   1557		return;
   1558	}
   1559
   1560	device = dasd_device_from_cdev_locked(cdev);
   1561	if (IS_ERR(device)) {
   1562		DBF_EVENT_DEVID(DBF_DEBUG, cdev, "%s",
   1563				"unable to get device from cdev");
   1564		return;
   1565	}
   1566
   1567	if (!cqr->startdev ||
   1568	    device != cqr->startdev ||
   1569	    strncmp(cqr->startdev->discipline->ebcname,
   1570		    (char *) &cqr->magic, 4)) {
   1571		DBF_EVENT_DEVID(DBF_DEBUG, cdev, "%s",
   1572				"invalid device in request");
   1573		dasd_put_device(device);
   1574		return;
   1575	}
   1576
   1577	/* Schedule request to be retried. */
   1578	cqr->status = DASD_CQR_QUEUED;
   1579
   1580	dasd_device_clear_timer(device);
   1581	dasd_schedule_device_bh(device);
   1582	dasd_put_device(device);
   1583}
   1584
   1585void dasd_generic_handle_state_change(struct dasd_device *device)
   1586{
   1587	/* First of all start sense subsystem status request. */
   1588	dasd_eer_snss(device);
   1589
   1590	dasd_device_remove_stop_bits(device, DASD_STOPPED_PENDING);
   1591	dasd_schedule_device_bh(device);
   1592	if (device->block) {
   1593		dasd_schedule_block_bh(device->block);
   1594		if (device->block->request_queue)
   1595			blk_mq_run_hw_queues(device->block->request_queue,
   1596					     true);
   1597	}
   1598}
   1599EXPORT_SYMBOL_GPL(dasd_generic_handle_state_change);
   1600
   1601static int dasd_check_hpf_error(struct irb *irb)
   1602{
   1603	return (scsw_tm_is_valid_schxs(&irb->scsw) &&
   1604	    (irb->scsw.tm.sesq == SCSW_SESQ_DEV_NOFCX ||
   1605	     irb->scsw.tm.sesq == SCSW_SESQ_PATH_NOFCX));
   1606}
   1607
   1608static int dasd_ese_needs_format(struct dasd_block *block, struct irb *irb)
   1609{
   1610	struct dasd_device *device = NULL;
   1611	u8 *sense = NULL;
   1612
   1613	if (!block)
   1614		return 0;
   1615	device = block->base;
   1616	if (!device || !device->discipline->is_ese)
   1617		return 0;
   1618	if (!device->discipline->is_ese(device))
   1619		return 0;
   1620
   1621	sense = dasd_get_sense(irb);
   1622	if (!sense)
   1623		return 0;
   1624
   1625	return !!(sense[1] & SNS1_NO_REC_FOUND) ||
   1626		!!(sense[1] & SNS1_FILE_PROTECTED) ||
   1627		scsw_cstat(&irb->scsw) == SCHN_STAT_INCORR_LEN;
   1628}
   1629
   1630static int dasd_ese_oos_cond(u8 *sense)
   1631{
   1632	return sense[0] & SNS0_EQUIPMENT_CHECK &&
   1633		sense[1] & SNS1_PERM_ERR &&
   1634		sense[1] & SNS1_WRITE_INHIBITED &&
   1635		sense[25] == 0x01;
   1636}
   1637
   1638/*
   1639 * Interrupt handler for "normal" ssch-io based dasd devices.
   1640 */
   1641void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
   1642		      struct irb *irb)
   1643{
   1644	struct dasd_ccw_req *cqr, *next, *fcqr;
   1645	struct dasd_device *device;
   1646	unsigned long now;
   1647	int nrf_suppressed = 0;
   1648	int fp_suppressed = 0;
   1649	struct request *req;
   1650	u8 *sense = NULL;
   1651	int expires;
   1652
   1653	cqr = (struct dasd_ccw_req *) intparm;
   1654	if (IS_ERR(irb)) {
   1655		switch (PTR_ERR(irb)) {
   1656		case -EIO:
   1657			if (cqr && cqr->status == DASD_CQR_CLEAR_PENDING) {
   1658				device = cqr->startdev;
   1659				cqr->status = DASD_CQR_CLEARED;
   1660				dasd_device_clear_timer(device);
   1661				wake_up(&dasd_flush_wq);
   1662				dasd_schedule_device_bh(device);
   1663				return;
   1664			}
   1665			break;
   1666		case -ETIMEDOUT:
   1667			DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s: "
   1668					"request timed out\n", __func__);
   1669			break;
   1670		default:
   1671			DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s: "
   1672					"unknown error %ld\n", __func__,
   1673					PTR_ERR(irb));
   1674		}
   1675		dasd_handle_killed_request(cdev, intparm);
   1676		return;
   1677	}
   1678
   1679	now = get_tod_clock();
   1680	/* check for conditions that should be handled immediately */
   1681	if (!cqr ||
   1682	    !(scsw_dstat(&irb->scsw) == (DEV_STAT_CHN_END | DEV_STAT_DEV_END) &&
   1683	      scsw_cstat(&irb->scsw) == 0)) {
   1684		if (cqr)
   1685			memcpy(&cqr->irb, irb, sizeof(*irb));
   1686		device = dasd_device_from_cdev_locked(cdev);
   1687		if (IS_ERR(device))
   1688			return;
   1689		/* ignore unsolicited interrupts for DIAG discipline */
   1690		if (device->discipline == dasd_diag_discipline_pointer) {
   1691			dasd_put_device(device);
   1692			return;
   1693		}
   1694
   1695		/*
   1696		 * In some cases 'File Protected' or 'No Record Found' errors
   1697		 * might be expected and debug log messages for the
   1698		 * corresponding interrupts shouldn't be written then.
   1699		 * Check if either of the according suppress bits is set.
   1700		 */
   1701		sense = dasd_get_sense(irb);
   1702		if (sense) {
   1703			fp_suppressed = (sense[1] & SNS1_FILE_PROTECTED) &&
   1704				test_bit(DASD_CQR_SUPPRESS_FP, &cqr->flags);
   1705			nrf_suppressed = (sense[1] & SNS1_NO_REC_FOUND) &&
   1706				test_bit(DASD_CQR_SUPPRESS_NRF, &cqr->flags);
   1707
   1708			/*
   1709			 * Extent pool probably out-of-space.
   1710			 * Stop device and check exhaust level.
   1711			 */
   1712			if (dasd_ese_oos_cond(sense)) {
   1713				dasd_generic_space_exhaust(device, cqr);
   1714				device->discipline->ext_pool_exhaust(device, cqr);
   1715				dasd_put_device(device);
   1716				return;
   1717			}
   1718		}
   1719		if (!(fp_suppressed || nrf_suppressed))
   1720			device->discipline->dump_sense_dbf(device, irb, "int");
   1721
   1722		if (device->features & DASD_FEATURE_ERPLOG)
   1723			device->discipline->dump_sense(device, cqr, irb);
   1724		device->discipline->check_for_device_change(device, cqr, irb);
   1725		dasd_put_device(device);
   1726	}
   1727
   1728	/* check for for attention message */
   1729	if (scsw_dstat(&irb->scsw) & DEV_STAT_ATTENTION) {
   1730		device = dasd_device_from_cdev_locked(cdev);
   1731		if (!IS_ERR(device)) {
   1732			device->discipline->check_attention(device,
   1733							    irb->esw.esw1.lpum);
   1734			dasd_put_device(device);
   1735		}
   1736	}
   1737
   1738	if (!cqr)
   1739		return;
   1740
   1741	device = (struct dasd_device *) cqr->startdev;
   1742	if (!device ||
   1743	    strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) {
   1744		DBF_EVENT_DEVID(DBF_DEBUG, cdev, "%s",
   1745				"invalid device in request");
   1746		return;
   1747	}
   1748
   1749	if (dasd_ese_needs_format(cqr->block, irb)) {
   1750		req = dasd_get_callback_data(cqr);
   1751		if (!req) {
   1752			cqr->status = DASD_CQR_ERROR;
   1753			return;
   1754		}
   1755		if (rq_data_dir(req) == READ) {
   1756			device->discipline->ese_read(cqr, irb);
   1757			cqr->status = DASD_CQR_SUCCESS;
   1758			cqr->stopclk = now;
   1759			dasd_device_clear_timer(device);
   1760			dasd_schedule_device_bh(device);
   1761			return;
   1762		}
   1763		fcqr = device->discipline->ese_format(device, cqr, irb);
   1764		if (IS_ERR(fcqr)) {
   1765			if (PTR_ERR(fcqr) == -EINVAL) {
   1766				cqr->status = DASD_CQR_ERROR;
   1767				return;
   1768			}
   1769			/*
   1770			 * If we can't format now, let the request go
   1771			 * one extra round. Maybe we can format later.
   1772			 */
   1773			cqr->status = DASD_CQR_QUEUED;
   1774			dasd_schedule_device_bh(device);
   1775			return;
   1776		} else {
   1777			fcqr->status = DASD_CQR_QUEUED;
   1778			cqr->status = DASD_CQR_QUEUED;
   1779			list_add(&fcqr->devlist, &device->ccw_queue);
   1780			dasd_schedule_device_bh(device);
   1781			return;
   1782		}
   1783	}
   1784
   1785	/* Check for clear pending */
   1786	if (cqr->status == DASD_CQR_CLEAR_PENDING &&
   1787	    scsw_fctl(&irb->scsw) & SCSW_FCTL_CLEAR_FUNC) {
   1788		cqr->status = DASD_CQR_CLEARED;
   1789		dasd_device_clear_timer(device);
   1790		wake_up(&dasd_flush_wq);
   1791		dasd_schedule_device_bh(device);
   1792		return;
   1793	}
   1794
   1795	/* check status - the request might have been killed by dyn detach */
   1796	if (cqr->status != DASD_CQR_IN_IO) {
   1797		DBF_DEV_EVENT(DBF_DEBUG, device, "invalid status: bus_id %s, "
   1798			      "status %02x", dev_name(&cdev->dev), cqr->status);
   1799		return;
   1800	}
   1801
   1802	next = NULL;
   1803	expires = 0;
   1804	if (scsw_dstat(&irb->scsw) == (DEV_STAT_CHN_END | DEV_STAT_DEV_END) &&
   1805	    scsw_cstat(&irb->scsw) == 0) {
   1806		/* request was completed successfully */
   1807		cqr->status = DASD_CQR_SUCCESS;
   1808		cqr->stopclk = now;
   1809		/* Start first request on queue if possible -> fast_io. */
   1810		if (cqr->devlist.next != &device->ccw_queue) {
   1811			next = list_entry(cqr->devlist.next,
   1812					  struct dasd_ccw_req, devlist);
   1813		}
   1814	} else {  /* error */
   1815		/* check for HPF error
   1816		 * call discipline function to requeue all requests
   1817		 * and disable HPF accordingly
   1818		 */
   1819		if (cqr->cpmode && dasd_check_hpf_error(irb) &&
   1820		    device->discipline->handle_hpf_error)
   1821			device->discipline->handle_hpf_error(device, irb);
   1822		/*
   1823		 * If we don't want complex ERP for this request, then just
   1824		 * reset this and retry it in the fastpath
   1825		 */
   1826		if (!test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags) &&
   1827		    cqr->retries > 0) {
   1828			if (cqr->lpm == dasd_path_get_opm(device))
   1829				DBF_DEV_EVENT(DBF_DEBUG, device,
   1830					      "default ERP in fastpath "
   1831					      "(%i retries left)",
   1832					      cqr->retries);
   1833			if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags))
   1834				cqr->lpm = dasd_path_get_opm(device);
   1835			cqr->status = DASD_CQR_QUEUED;
   1836			next = cqr;
   1837		} else
   1838			cqr->status = DASD_CQR_ERROR;
   1839	}
   1840	if (next && (next->status == DASD_CQR_QUEUED) &&
   1841	    (!device->stopped)) {
   1842		if (device->discipline->start_IO(next) == 0)
   1843			expires = next->expires;
   1844	}
   1845	if (expires != 0)
   1846		dasd_device_set_timer(device, expires);
   1847	else
   1848		dasd_device_clear_timer(device);
   1849	dasd_schedule_device_bh(device);
   1850}
   1851EXPORT_SYMBOL(dasd_int_handler);
   1852
   1853enum uc_todo dasd_generic_uc_handler(struct ccw_device *cdev, struct irb *irb)
   1854{
   1855	struct dasd_device *device;
   1856
   1857	device = dasd_device_from_cdev_locked(cdev);
   1858
   1859	if (IS_ERR(device))
   1860		goto out;
   1861	if (test_bit(DASD_FLAG_OFFLINE, &device->flags) ||
   1862	   device->state != device->target ||
   1863	   !device->discipline->check_for_device_change){
   1864		dasd_put_device(device);
   1865		goto out;
   1866	}
   1867	if (device->discipline->dump_sense_dbf)
   1868		device->discipline->dump_sense_dbf(device, irb, "uc");
   1869	device->discipline->check_for_device_change(device, NULL, irb);
   1870	dasd_put_device(device);
   1871out:
   1872	return UC_TODO_RETRY;
   1873}
   1874EXPORT_SYMBOL_GPL(dasd_generic_uc_handler);
   1875
   1876/*
   1877 * If we have an error on a dasd_block layer request then we cancel
   1878 * and return all further requests from the same dasd_block as well.
   1879 */
   1880static void __dasd_device_recovery(struct dasd_device *device,
   1881				   struct dasd_ccw_req *ref_cqr)
   1882{
   1883	struct list_head *l, *n;
   1884	struct dasd_ccw_req *cqr;
   1885
   1886	/*
   1887	 * only requeue request that came from the dasd_block layer
   1888	 */
   1889	if (!ref_cqr->block)
   1890		return;
   1891
   1892	list_for_each_safe(l, n, &device->ccw_queue) {
   1893		cqr = list_entry(l, struct dasd_ccw_req, devlist);
   1894		if (cqr->status == DASD_CQR_QUEUED &&
   1895		    ref_cqr->block == cqr->block) {
   1896			cqr->status = DASD_CQR_CLEARED;
   1897		}
   1898	}
   1899};
   1900
   1901/*
   1902 * Remove those ccw requests from the queue that need to be returned
   1903 * to the upper layer.
   1904 */
   1905static void __dasd_device_process_ccw_queue(struct dasd_device *device,
   1906					    struct list_head *final_queue)
   1907{
   1908	struct list_head *l, *n;
   1909	struct dasd_ccw_req *cqr;
   1910
   1911	/* Process request with final status. */
   1912	list_for_each_safe(l, n, &device->ccw_queue) {
   1913		cqr = list_entry(l, struct dasd_ccw_req, devlist);
   1914
   1915		/* Skip any non-final request. */
   1916		if (cqr->status == DASD_CQR_QUEUED ||
   1917		    cqr->status == DASD_CQR_IN_IO ||
   1918		    cqr->status == DASD_CQR_CLEAR_PENDING)
   1919			continue;
   1920		if (cqr->status == DASD_CQR_ERROR) {
   1921			__dasd_device_recovery(device, cqr);
   1922		}
   1923		/* Rechain finished requests to final queue */
   1924		list_move_tail(&cqr->devlist, final_queue);
   1925	}
   1926}
   1927
   1928static void __dasd_process_cqr(struct dasd_device *device,
   1929			       struct dasd_ccw_req *cqr)
   1930{
   1931	char errorstring[ERRORLENGTH];
   1932
   1933	switch (cqr->status) {
   1934	case DASD_CQR_SUCCESS:
   1935		cqr->status = DASD_CQR_DONE;
   1936		break;
   1937	case DASD_CQR_ERROR:
   1938		cqr->status = DASD_CQR_NEED_ERP;
   1939		break;
   1940	case DASD_CQR_CLEARED:
   1941		cqr->status = DASD_CQR_TERMINATED;
   1942		break;
   1943	default:
   1944		/* internal error 12 - wrong cqr status*/
   1945		snprintf(errorstring, ERRORLENGTH, "12 %p %x02", cqr, cqr->status);
   1946		dev_err(&device->cdev->dev,
   1947			"An error occurred in the DASD device driver, "
   1948			"reason=%s\n", errorstring);
   1949		BUG();
   1950	}
   1951	if (cqr->callback)
   1952		cqr->callback(cqr, cqr->callback_data);
   1953}
   1954
   1955/*
   1956 * the cqrs from the final queue are returned to the upper layer
   1957 * by setting a dasd_block state and calling the callback function
   1958 */
   1959static void __dasd_device_process_final_queue(struct dasd_device *device,
   1960					      struct list_head *final_queue)
   1961{
   1962	struct list_head *l, *n;
   1963	struct dasd_ccw_req *cqr;
   1964	struct dasd_block *block;
   1965
   1966	list_for_each_safe(l, n, final_queue) {
   1967		cqr = list_entry(l, struct dasd_ccw_req, devlist);
   1968		list_del_init(&cqr->devlist);
   1969		block = cqr->block;
   1970		if (!block) {
   1971			__dasd_process_cqr(device, cqr);
   1972		} else {
   1973			spin_lock_bh(&block->queue_lock);
   1974			__dasd_process_cqr(device, cqr);
   1975			spin_unlock_bh(&block->queue_lock);
   1976		}
   1977	}
   1978}
   1979
   1980/*
   1981 * Take a look at the first request on the ccw queue and check
   1982 * if it reached its expire time. If so, terminate the IO.
   1983 */
   1984static void __dasd_device_check_expire(struct dasd_device *device)
   1985{
   1986	struct dasd_ccw_req *cqr;
   1987
   1988	if (list_empty(&device->ccw_queue))
   1989		return;
   1990	cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist);
   1991	if ((cqr->status == DASD_CQR_IN_IO && cqr->expires != 0) &&
   1992	    (time_after_eq(jiffies, cqr->expires + cqr->starttime))) {
   1993		if (test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) {
   1994			/*
   1995			 * IO in safe offline processing should not
   1996			 * run out of retries
   1997			 */
   1998			cqr->retries++;
   1999		}
   2000		if (device->discipline->term_IO(cqr) != 0) {
   2001			/* Hmpf, try again in 5 sec */
   2002			dev_err(&device->cdev->dev,
   2003				"cqr %p timed out (%lus) but cannot be "
   2004				"ended, retrying in 5 s\n",
   2005				cqr, (cqr->expires/HZ));
   2006			cqr->expires += 5*HZ;
   2007			dasd_device_set_timer(device, 5*HZ);
   2008		} else {
   2009			dev_err(&device->cdev->dev,
   2010				"cqr %p timed out (%lus), %i retries "
   2011				"remaining\n", cqr, (cqr->expires/HZ),
   2012				cqr->retries);
   2013		}
   2014	}
   2015}
   2016
   2017/*
   2018 * return 1 when device is not eligible for IO
   2019 */
   2020static int __dasd_device_is_unusable(struct dasd_device *device,
   2021				     struct dasd_ccw_req *cqr)
   2022{
   2023	int mask = ~(DASD_STOPPED_DC_WAIT | DASD_STOPPED_NOSPC);
   2024
   2025	if (test_bit(DASD_FLAG_OFFLINE, &device->flags) &&
   2026	    !test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) {
   2027		/*
   2028		 * dasd is being set offline
   2029		 * but it is no safe offline where we have to allow I/O
   2030		 */
   2031		return 1;
   2032	}
   2033	if (device->stopped) {
   2034		if (device->stopped & mask) {
   2035			/* stopped and CQR will not change that. */
   2036			return 1;
   2037		}
   2038		if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) {
   2039			/* CQR is not able to change device to
   2040			 * operational. */
   2041			return 1;
   2042		}
   2043		/* CQR required to get device operational. */
   2044	}
   2045	return 0;
   2046}
   2047
   2048/*
   2049 * Take a look at the first request on the ccw queue and check
   2050 * if it needs to be started.
   2051 */
   2052static void __dasd_device_start_head(struct dasd_device *device)
   2053{
   2054	struct dasd_ccw_req *cqr;
   2055	int rc;
   2056
   2057	if (list_empty(&device->ccw_queue))
   2058		return;
   2059	cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist);
   2060	if (cqr->status != DASD_CQR_QUEUED)
   2061		return;
   2062	/* if device is not usable return request to upper layer */
   2063	if (__dasd_device_is_unusable(device, cqr)) {
   2064		cqr->intrc = -EAGAIN;
   2065		cqr->status = DASD_CQR_CLEARED;
   2066		dasd_schedule_device_bh(device);
   2067		return;
   2068	}
   2069
   2070	rc = device->discipline->start_IO(cqr);
   2071	if (rc == 0)
   2072		dasd_device_set_timer(device, cqr->expires);
   2073	else if (rc == -EACCES) {
   2074		dasd_schedule_device_bh(device);
   2075	} else
   2076		/* Hmpf, try again in 1/2 sec */
   2077		dasd_device_set_timer(device, 50);
   2078}
   2079
   2080static void __dasd_device_check_path_events(struct dasd_device *device)
   2081{
   2082	__u8 tbvpm, fcsecpm;
   2083	int rc;
   2084
   2085	tbvpm = dasd_path_get_tbvpm(device);
   2086	fcsecpm = dasd_path_get_fcsecpm(device);
   2087
   2088	if (!tbvpm && !fcsecpm)
   2089		return;
   2090
   2091	if (device->stopped & ~(DASD_STOPPED_DC_WAIT))
   2092		return;
   2093
   2094	dasd_path_clear_all_verify(device);
   2095	dasd_path_clear_all_fcsec(device);
   2096
   2097	rc = device->discipline->pe_handler(device, tbvpm, fcsecpm);
   2098	if (rc) {
   2099		dasd_path_add_tbvpm(device, tbvpm);
   2100		dasd_path_add_fcsecpm(device, fcsecpm);
   2101		dasd_device_set_timer(device, 50);
   2102	}
   2103};
   2104
   2105/*
   2106 * Go through all request on the dasd_device request queue,
   2107 * terminate them on the cdev if necessary, and return them to the
   2108 * submitting layer via callback.
   2109 * Note:
   2110 * Make sure that all 'submitting layers' still exist when
   2111 * this function is called!. In other words, when 'device' is a base
   2112 * device then all block layer requests must have been removed before
   2113 * via dasd_flush_block_queue.
   2114 */
   2115int dasd_flush_device_queue(struct dasd_device *device)
   2116{
   2117	struct dasd_ccw_req *cqr, *n;
   2118	int rc;
   2119	struct list_head flush_queue;
   2120
   2121	INIT_LIST_HEAD(&flush_queue);
   2122	spin_lock_irq(get_ccwdev_lock(device->cdev));
   2123	rc = 0;
   2124	list_for_each_entry_safe(cqr, n, &device->ccw_queue, devlist) {
   2125		/* Check status and move request to flush_queue */
   2126		switch (cqr->status) {
   2127		case DASD_CQR_IN_IO:
   2128			rc = device->discipline->term_IO(cqr);
   2129			if (rc) {
   2130				/* unable to terminate requeust */
   2131				dev_err(&device->cdev->dev,
   2132					"Flushing the DASD request queue "
   2133					"failed for request %p\n", cqr);
   2134				/* stop flush processing */
   2135				goto finished;
   2136			}
   2137			break;
   2138		case DASD_CQR_QUEUED:
   2139			cqr->stopclk = get_tod_clock();
   2140			cqr->status = DASD_CQR_CLEARED;
   2141			break;
   2142		default: /* no need to modify the others */
   2143			break;
   2144		}
   2145		list_move_tail(&cqr->devlist, &flush_queue);
   2146	}
   2147finished:
   2148	spin_unlock_irq(get_ccwdev_lock(device->cdev));
   2149	/*
   2150	 * After this point all requests must be in state CLEAR_PENDING,
   2151	 * CLEARED, SUCCESS or ERROR. Now wait for CLEAR_PENDING to become
   2152	 * one of the others.
   2153	 */
   2154	list_for_each_entry_safe(cqr, n, &flush_queue, devlist)
   2155		wait_event(dasd_flush_wq,
   2156			   (cqr->status != DASD_CQR_CLEAR_PENDING));
   2157	/*
   2158	 * Now set each request back to TERMINATED, DONE or NEED_ERP
   2159	 * and call the callback function of flushed requests
   2160	 */
   2161	__dasd_device_process_final_queue(device, &flush_queue);
   2162	return rc;
   2163}
   2164EXPORT_SYMBOL_GPL(dasd_flush_device_queue);
   2165
   2166/*
   2167 * Acquire the device lock and process queues for the device.
   2168 */
   2169static void dasd_device_tasklet(unsigned long data)
   2170{
   2171	struct dasd_device *device = (struct dasd_device *) data;
   2172	struct list_head final_queue;
   2173
   2174	atomic_set (&device->tasklet_scheduled, 0);
   2175	INIT_LIST_HEAD(&final_queue);
   2176	spin_lock_irq(get_ccwdev_lock(device->cdev));
   2177	/* Check expire time of first request on the ccw queue. */
   2178	__dasd_device_check_expire(device);
   2179	/* find final requests on ccw queue */
   2180	__dasd_device_process_ccw_queue(device, &final_queue);
   2181	__dasd_device_check_path_events(device);
   2182	spin_unlock_irq(get_ccwdev_lock(device->cdev));
   2183	/* Now call the callback function of requests with final status */
   2184	__dasd_device_process_final_queue(device, &final_queue);
   2185	spin_lock_irq(get_ccwdev_lock(device->cdev));
   2186	/* Now check if the head of the ccw queue needs to be started. */
   2187	__dasd_device_start_head(device);
   2188	spin_unlock_irq(get_ccwdev_lock(device->cdev));
   2189	if (waitqueue_active(&shutdown_waitq))
   2190		wake_up(&shutdown_waitq);
   2191	dasd_put_device(device);
   2192}
   2193
   2194/*
   2195 * Schedules a call to dasd_tasklet over the device tasklet.
   2196 */
   2197void dasd_schedule_device_bh(struct dasd_device *device)
   2198{
   2199	/* Protect against rescheduling. */
   2200	if (atomic_cmpxchg (&device->tasklet_scheduled, 0, 1) != 0)
   2201		return;
   2202	dasd_get_device(device);
   2203	tasklet_hi_schedule(&device->tasklet);
   2204}
   2205EXPORT_SYMBOL(dasd_schedule_device_bh);
   2206
   2207void dasd_device_set_stop_bits(struct dasd_device *device, int bits)
   2208{
   2209	device->stopped |= bits;
   2210}
   2211EXPORT_SYMBOL_GPL(dasd_device_set_stop_bits);
   2212
   2213void dasd_device_remove_stop_bits(struct dasd_device *device, int bits)
   2214{
   2215	device->stopped &= ~bits;
   2216	if (!device->stopped)
   2217		wake_up(&generic_waitq);
   2218}
   2219EXPORT_SYMBOL_GPL(dasd_device_remove_stop_bits);
   2220
   2221/*
   2222 * Queue a request to the head of the device ccw_queue.
   2223 * Start the I/O if possible.
   2224 */
   2225void dasd_add_request_head(struct dasd_ccw_req *cqr)
   2226{
   2227	struct dasd_device *device;
   2228	unsigned long flags;
   2229
   2230	device = cqr->startdev;
   2231	spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
   2232	cqr->status = DASD_CQR_QUEUED;
   2233	list_add(&cqr->devlist, &device->ccw_queue);
   2234	/* let the bh start the request to keep them in order */
   2235	dasd_schedule_device_bh(device);
   2236	spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
   2237}
   2238EXPORT_SYMBOL(dasd_add_request_head);
   2239
   2240/*
   2241 * Queue a request to the tail of the device ccw_queue.
   2242 * Start the I/O if possible.
   2243 */
   2244void dasd_add_request_tail(struct dasd_ccw_req *cqr)
   2245{
   2246	struct dasd_device *device;
   2247	unsigned long flags;
   2248
   2249	device = cqr->startdev;
   2250	spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
   2251	cqr->status = DASD_CQR_QUEUED;
   2252	list_add_tail(&cqr->devlist, &device->ccw_queue);
   2253	/* let the bh start the request to keep them in order */
   2254	dasd_schedule_device_bh(device);
   2255	spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
   2256}
   2257EXPORT_SYMBOL(dasd_add_request_tail);
   2258
   2259/*
   2260 * Wakeup helper for the 'sleep_on' functions.
   2261 */
   2262void dasd_wakeup_cb(struct dasd_ccw_req *cqr, void *data)
   2263{
   2264	spin_lock_irq(get_ccwdev_lock(cqr->startdev->cdev));
   2265	cqr->callback_data = DASD_SLEEPON_END_TAG;
   2266	spin_unlock_irq(get_ccwdev_lock(cqr->startdev->cdev));
   2267	wake_up(&generic_waitq);
   2268}
   2269EXPORT_SYMBOL_GPL(dasd_wakeup_cb);
   2270
   2271static inline int _wait_for_wakeup(struct dasd_ccw_req *cqr)
   2272{
   2273	struct dasd_device *device;
   2274	int rc;
   2275
   2276	device = cqr->startdev;
   2277	spin_lock_irq(get_ccwdev_lock(device->cdev));
   2278	rc = (cqr->callback_data == DASD_SLEEPON_END_TAG);
   2279	spin_unlock_irq(get_ccwdev_lock(device->cdev));
   2280	return rc;
   2281}
   2282
   2283/*
   2284 * checks if error recovery is necessary, returns 1 if yes, 0 otherwise.
   2285 */
   2286static int __dasd_sleep_on_erp(struct dasd_ccw_req *cqr)
   2287{
   2288	struct dasd_device *device;
   2289	dasd_erp_fn_t erp_fn;
   2290
   2291	if (cqr->status == DASD_CQR_FILLED)
   2292		return 0;
   2293	device = cqr->startdev;
   2294	if (test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags)) {
   2295		if (cqr->status == DASD_CQR_TERMINATED) {
   2296			device->discipline->handle_terminated_request(cqr);
   2297			return 1;
   2298		}
   2299		if (cqr->status == DASD_CQR_NEED_ERP) {
   2300			erp_fn = device->discipline->erp_action(cqr);
   2301			erp_fn(cqr);
   2302			return 1;
   2303		}
   2304		if (cqr->status == DASD_CQR_FAILED)
   2305			dasd_log_sense(cqr, &cqr->irb);
   2306		if (cqr->refers) {
   2307			__dasd_process_erp(device, cqr);
   2308			return 1;
   2309		}
   2310	}
   2311	return 0;
   2312}
   2313
   2314static int __dasd_sleep_on_loop_condition(struct dasd_ccw_req *cqr)
   2315{
   2316	if (test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags)) {
   2317		if (cqr->refers) /* erp is not done yet */
   2318			return 1;
   2319		return ((cqr->status != DASD_CQR_DONE) &&
   2320			(cqr->status != DASD_CQR_FAILED));
   2321	} else
   2322		return (cqr->status == DASD_CQR_FILLED);
   2323}
   2324
   2325static int _dasd_sleep_on(struct dasd_ccw_req *maincqr, int interruptible)
   2326{
   2327	struct dasd_device *device;
   2328	int rc;
   2329	struct list_head ccw_queue;
   2330	struct dasd_ccw_req *cqr;
   2331
   2332	INIT_LIST_HEAD(&ccw_queue);
   2333	maincqr->status = DASD_CQR_FILLED;
   2334	device = maincqr->startdev;
   2335	list_add(&maincqr->blocklist, &ccw_queue);
   2336	for (cqr = maincqr;  __dasd_sleep_on_loop_condition(cqr);
   2337	     cqr = list_first_entry(&ccw_queue,
   2338				    struct dasd_ccw_req, blocklist)) {
   2339
   2340		if (__dasd_sleep_on_erp(cqr))
   2341			continue;
   2342		if (cqr->status != DASD_CQR_FILLED) /* could be failed */
   2343			continue;
   2344		if (test_bit(DASD_FLAG_LOCK_STOLEN, &device->flags) &&
   2345		    !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) {
   2346			cqr->status = DASD_CQR_FAILED;
   2347			cqr->intrc = -EPERM;
   2348			continue;
   2349		}
   2350		/* Non-temporary stop condition will trigger fail fast */
   2351		if (device->stopped & ~DASD_STOPPED_PENDING &&
   2352		    test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) &&
   2353		    (!dasd_eer_enabled(device))) {
   2354			cqr->status = DASD_CQR_FAILED;
   2355			cqr->intrc = -ENOLINK;
   2356			continue;
   2357		}
   2358		/*
   2359		 * Don't try to start requests if device is in
   2360		 * offline processing, it might wait forever
   2361		 */
   2362		if (test_bit(DASD_FLAG_OFFLINE, &device->flags)) {
   2363			cqr->status = DASD_CQR_FAILED;
   2364			cqr->intrc = -ENODEV;
   2365			continue;
   2366		}
   2367		/*
   2368		 * Don't try to start requests if device is stopped
   2369		 * except path verification requests
   2370		 */
   2371		if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) {
   2372			if (interruptible) {
   2373				rc = wait_event_interruptible(
   2374					generic_waitq, !(device->stopped));
   2375				if (rc == -ERESTARTSYS) {
   2376					cqr->status = DASD_CQR_FAILED;
   2377					maincqr->intrc = rc;
   2378					continue;
   2379				}
   2380			} else
   2381				wait_event(generic_waitq, !(device->stopped));
   2382		}
   2383		if (!cqr->callback)
   2384			cqr->callback = dasd_wakeup_cb;
   2385
   2386		cqr->callback_data = DASD_SLEEPON_START_TAG;
   2387		dasd_add_request_tail(cqr);
   2388		if (interruptible) {
   2389			rc = wait_event_interruptible(
   2390				generic_waitq, _wait_for_wakeup(cqr));
   2391			if (rc == -ERESTARTSYS) {
   2392				dasd_cancel_req(cqr);
   2393				/* wait (non-interruptible) for final status */
   2394				wait_event(generic_waitq,
   2395					   _wait_for_wakeup(cqr));
   2396				cqr->status = DASD_CQR_FAILED;
   2397				maincqr->intrc = rc;
   2398				continue;
   2399			}
   2400		} else
   2401			wait_event(generic_waitq, _wait_for_wakeup(cqr));
   2402	}
   2403
   2404	maincqr->endclk = get_tod_clock();
   2405	if ((maincqr->status != DASD_CQR_DONE) &&
   2406	    (maincqr->intrc != -ERESTARTSYS))
   2407		dasd_log_sense(maincqr, &maincqr->irb);
   2408	if (maincqr->status == DASD_CQR_DONE)
   2409		rc = 0;
   2410	else if (maincqr->intrc)
   2411		rc = maincqr->intrc;
   2412	else
   2413		rc = -EIO;
   2414	return rc;
   2415}
   2416
   2417static inline int _wait_for_wakeup_queue(struct list_head *ccw_queue)
   2418{
   2419	struct dasd_ccw_req *cqr;
   2420
   2421	list_for_each_entry(cqr, ccw_queue, blocklist) {
   2422		if (cqr->callback_data != DASD_SLEEPON_END_TAG)
   2423			return 0;
   2424	}
   2425
   2426	return 1;
   2427}
   2428
   2429static int _dasd_sleep_on_queue(struct list_head *ccw_queue, int interruptible)
   2430{
   2431	struct dasd_device *device;
   2432	struct dasd_ccw_req *cqr, *n;
   2433	u8 *sense = NULL;
   2434	int rc;
   2435
   2436retry:
   2437	list_for_each_entry_safe(cqr, n, ccw_queue, blocklist) {
   2438		device = cqr->startdev;
   2439		if (cqr->status != DASD_CQR_FILLED) /*could be failed*/
   2440			continue;
   2441
   2442		if (test_bit(DASD_FLAG_LOCK_STOLEN, &device->flags) &&
   2443		    !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) {
   2444			cqr->status = DASD_CQR_FAILED;
   2445			cqr->intrc = -EPERM;
   2446			continue;
   2447		}
   2448		/*Non-temporary stop condition will trigger fail fast*/
   2449		if (device->stopped & ~DASD_STOPPED_PENDING &&
   2450		    test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) &&
   2451		    !dasd_eer_enabled(device)) {
   2452			cqr->status = DASD_CQR_FAILED;
   2453			cqr->intrc = -EAGAIN;
   2454			continue;
   2455		}
   2456
   2457		/*Don't try to start requests if device is stopped*/
   2458		if (interruptible) {
   2459			rc = wait_event_interruptible(
   2460				generic_waitq, !device->stopped);
   2461			if (rc == -ERESTARTSYS) {
   2462				cqr->status = DASD_CQR_FAILED;
   2463				cqr->intrc = rc;
   2464				continue;
   2465			}
   2466		} else
   2467			wait_event(generic_waitq, !(device->stopped));
   2468
   2469		if (!cqr->callback)
   2470			cqr->callback = dasd_wakeup_cb;
   2471		cqr->callback_data = DASD_SLEEPON_START_TAG;
   2472		dasd_add_request_tail(cqr);
   2473	}
   2474
   2475	wait_event(generic_waitq, _wait_for_wakeup_queue(ccw_queue));
   2476
   2477	rc = 0;
   2478	list_for_each_entry_safe(cqr, n, ccw_queue, blocklist) {
   2479		/*
   2480		 * In some cases the 'File Protected' or 'Incorrect Length'
   2481		 * error might be expected and error recovery would be
   2482		 * unnecessary in these cases.	Check if the according suppress
   2483		 * bit is set.
   2484		 */
   2485		sense = dasd_get_sense(&cqr->irb);
   2486		if (sense && sense[1] & SNS1_FILE_PROTECTED &&
   2487		    test_bit(DASD_CQR_SUPPRESS_FP, &cqr->flags))
   2488			continue;
   2489		if (scsw_cstat(&cqr->irb.scsw) == 0x40 &&
   2490		    test_bit(DASD_CQR_SUPPRESS_IL, &cqr->flags))
   2491			continue;
   2492
   2493		/*
   2494		 * for alias devices simplify error recovery and
   2495		 * return to upper layer
   2496		 * do not skip ERP requests
   2497		 */
   2498		if (cqr->startdev != cqr->basedev && !cqr->refers &&
   2499		    (cqr->status == DASD_CQR_TERMINATED ||
   2500		     cqr->status == DASD_CQR_NEED_ERP))
   2501			return -EAGAIN;
   2502
   2503		/* normal recovery for basedev IO */
   2504		if (__dasd_sleep_on_erp(cqr))
   2505			/* handle erp first */
   2506			goto retry;
   2507	}
   2508
   2509	return 0;
   2510}
   2511
   2512/*
   2513 * Queue a request to the tail of the device ccw_queue and wait for
   2514 * it's completion.
   2515 */
   2516int dasd_sleep_on(struct dasd_ccw_req *cqr)
   2517{
   2518	return _dasd_sleep_on(cqr, 0);
   2519}
   2520EXPORT_SYMBOL(dasd_sleep_on);
   2521
   2522/*
   2523 * Start requests from a ccw_queue and wait for their completion.
   2524 */
   2525int dasd_sleep_on_queue(struct list_head *ccw_queue)
   2526{
   2527	return _dasd_sleep_on_queue(ccw_queue, 0);
   2528}
   2529EXPORT_SYMBOL(dasd_sleep_on_queue);
   2530
   2531/*
   2532 * Start requests from a ccw_queue and wait interruptible for their completion.
   2533 */
   2534int dasd_sleep_on_queue_interruptible(struct list_head *ccw_queue)
   2535{
   2536	return _dasd_sleep_on_queue(ccw_queue, 1);
   2537}
   2538EXPORT_SYMBOL(dasd_sleep_on_queue_interruptible);
   2539
   2540/*
   2541 * Queue a request to the tail of the device ccw_queue and wait
   2542 * interruptible for it's completion.
   2543 */
   2544int dasd_sleep_on_interruptible(struct dasd_ccw_req *cqr)
   2545{
   2546	return _dasd_sleep_on(cqr, 1);
   2547}
   2548EXPORT_SYMBOL(dasd_sleep_on_interruptible);
   2549
   2550/*
   2551 * Whoa nelly now it gets really hairy. For some functions (e.g. steal lock
   2552 * for eckd devices) the currently running request has to be terminated
   2553 * and be put back to status queued, before the special request is added
   2554 * to the head of the queue. Then the special request is waited on normally.
   2555 */
   2556static inline int _dasd_term_running_cqr(struct dasd_device *device)
   2557{
   2558	struct dasd_ccw_req *cqr;
   2559	int rc;
   2560
   2561	if (list_empty(&device->ccw_queue))
   2562		return 0;
   2563	cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist);
   2564	rc = device->discipline->term_IO(cqr);
   2565	if (!rc)
   2566		/*
   2567		 * CQR terminated because a more important request is pending.
   2568		 * Undo decreasing of retry counter because this is
   2569		 * not an error case.
   2570		 */
   2571		cqr->retries++;
   2572	return rc;
   2573}
   2574
   2575int dasd_sleep_on_immediatly(struct dasd_ccw_req *cqr)
   2576{
   2577	struct dasd_device *device;
   2578	int rc;
   2579
   2580	device = cqr->startdev;
   2581	if (test_bit(DASD_FLAG_LOCK_STOLEN, &device->flags) &&
   2582	    !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) {
   2583		cqr->status = DASD_CQR_FAILED;
   2584		cqr->intrc = -EPERM;
   2585		return -EIO;
   2586	}
   2587	spin_lock_irq(get_ccwdev_lock(device->cdev));
   2588	rc = _dasd_term_running_cqr(device);
   2589	if (rc) {
   2590		spin_unlock_irq(get_ccwdev_lock(device->cdev));
   2591		return rc;
   2592	}
   2593	cqr->callback = dasd_wakeup_cb;
   2594	cqr->callback_data = DASD_SLEEPON_START_TAG;
   2595	cqr->status = DASD_CQR_QUEUED;
   2596	/*
   2597	 * add new request as second
   2598	 * first the terminated cqr needs to be finished
   2599	 */
   2600	list_add(&cqr->devlist, device->ccw_queue.next);
   2601
   2602	/* let the bh start the request to keep them in order */
   2603	dasd_schedule_device_bh(device);
   2604
   2605	spin_unlock_irq(get_ccwdev_lock(device->cdev));
   2606
   2607	wait_event(generic_waitq, _wait_for_wakeup(cqr));
   2608
   2609	if (cqr->status == DASD_CQR_DONE)
   2610		rc = 0;
   2611	else if (cqr->intrc)
   2612		rc = cqr->intrc;
   2613	else
   2614		rc = -EIO;
   2615
   2616	/* kick tasklets */
   2617	dasd_schedule_device_bh(device);
   2618	if (device->block)
   2619		dasd_schedule_block_bh(device->block);
   2620
   2621	return rc;
   2622}
   2623EXPORT_SYMBOL(dasd_sleep_on_immediatly);
   2624
   2625/*
   2626 * Cancels a request that was started with dasd_sleep_on_req.
   2627 * This is useful to timeout requests. The request will be
   2628 * terminated if it is currently in i/o.
   2629 * Returns 0 if request termination was successful
   2630 *	   negative error code if termination failed
   2631 * Cancellation of a request is an asynchronous operation! The calling
   2632 * function has to wait until the request is properly returned via callback.
   2633 */
   2634static int __dasd_cancel_req(struct dasd_ccw_req *cqr)
   2635{
   2636	struct dasd_device *device = cqr->startdev;
   2637	int rc = 0;
   2638
   2639	switch (cqr->status) {
   2640	case DASD_CQR_QUEUED:
   2641		/* request was not started - just set to cleared */
   2642		cqr->status = DASD_CQR_CLEARED;
   2643		break;
   2644	case DASD_CQR_IN_IO:
   2645		/* request in IO - terminate IO and release again */
   2646		rc = device->discipline->term_IO(cqr);
   2647		if (rc) {
   2648			dev_err(&device->cdev->dev,
   2649				"Cancelling request %p failed with rc=%d\n",
   2650				cqr, rc);
   2651		} else {
   2652			cqr->stopclk = get_tod_clock();
   2653		}
   2654		break;
   2655	default: /* already finished or clear pending - do nothing */
   2656		break;
   2657	}
   2658	dasd_schedule_device_bh(device);
   2659	return rc;
   2660}
   2661
   2662int dasd_cancel_req(struct dasd_ccw_req *cqr)
   2663{
   2664	struct dasd_device *device = cqr->startdev;
   2665	unsigned long flags;
   2666	int rc;
   2667
   2668	spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
   2669	rc = __dasd_cancel_req(cqr);
   2670	spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
   2671	return rc;
   2672}
   2673
   2674/*
   2675 * SECTION: Operations of the dasd_block layer.
   2676 */
   2677
   2678/*
   2679 * Timeout function for dasd_block. This is used when the block layer
   2680 * is waiting for something that may not come reliably, (e.g. a state
   2681 * change interrupt)
   2682 */
   2683static void dasd_block_timeout(struct timer_list *t)
   2684{
   2685	unsigned long flags;
   2686	struct dasd_block *block;
   2687
   2688	block = from_timer(block, t, timer);
   2689	spin_lock_irqsave(get_ccwdev_lock(block->base->cdev), flags);
   2690	/* re-activate request queue */
   2691	dasd_device_remove_stop_bits(block->base, DASD_STOPPED_PENDING);
   2692	spin_unlock_irqrestore(get_ccwdev_lock(block->base->cdev), flags);
   2693	dasd_schedule_block_bh(block);
   2694	blk_mq_run_hw_queues(block->request_queue, true);
   2695}
   2696
   2697/*
   2698 * Setup timeout for a dasd_block in jiffies.
   2699 */
   2700void dasd_block_set_timer(struct dasd_block *block, int expires)
   2701{
   2702	if (expires == 0)
   2703		del_timer(&block->timer);
   2704	else
   2705		mod_timer(&block->timer, jiffies + expires);
   2706}
   2707EXPORT_SYMBOL(dasd_block_set_timer);
   2708
   2709/*
   2710 * Clear timeout for a dasd_block.
   2711 */
   2712void dasd_block_clear_timer(struct dasd_block *block)
   2713{
   2714	del_timer(&block->timer);
   2715}
   2716EXPORT_SYMBOL(dasd_block_clear_timer);
   2717
   2718/*
   2719 * Process finished error recovery ccw.
   2720 */
   2721static void __dasd_process_erp(struct dasd_device *device,
   2722			       struct dasd_ccw_req *cqr)
   2723{
   2724	dasd_erp_fn_t erp_fn;
   2725
   2726	if (cqr->status == DASD_CQR_DONE)
   2727		DBF_DEV_EVENT(DBF_NOTICE, device, "%s", "ERP successful");
   2728	else
   2729		dev_err(&device->cdev->dev, "ERP failed for the DASD\n");
   2730	erp_fn = device->discipline->erp_postaction(cqr);
   2731	erp_fn(cqr);
   2732}
   2733
   2734static void __dasd_cleanup_cqr(struct dasd_ccw_req *cqr)
   2735{
   2736	struct request *req;
   2737	blk_status_t error = BLK_STS_OK;
   2738	unsigned int proc_bytes;
   2739	int status;
   2740
   2741	req = (struct request *) cqr->callback_data;
   2742	dasd_profile_end(cqr->block, cqr, req);
   2743
   2744	proc_bytes = cqr->proc_bytes;
   2745	status = cqr->block->base->discipline->free_cp(cqr, req);
   2746	if (status < 0)
   2747		error = errno_to_blk_status(status);
   2748	else if (status == 0) {
   2749		switch (cqr->intrc) {
   2750		case -EPERM:
   2751			error = BLK_STS_NEXUS;
   2752			break;
   2753		case -ENOLINK:
   2754			error = BLK_STS_TRANSPORT;
   2755			break;
   2756		case -ETIMEDOUT:
   2757			error = BLK_STS_TIMEOUT;
   2758			break;
   2759		default:
   2760			error = BLK_STS_IOERR;
   2761			break;
   2762		}
   2763	}
   2764
   2765	/*
   2766	 * We need to take care for ETIMEDOUT errors here since the
   2767	 * complete callback does not get called in this case.
   2768	 * Take care of all errors here and avoid additional code to
   2769	 * transfer the error value to the complete callback.
   2770	 */
   2771	if (error) {
   2772		blk_mq_end_request(req, error);
   2773		blk_mq_run_hw_queues(req->q, true);
   2774	} else {
   2775		/*
   2776		 * Partial completed requests can happen with ESE devices.
   2777		 * During read we might have gotten a NRF error and have to
   2778		 * complete a request partially.
   2779		 */
   2780		if (proc_bytes) {
   2781			blk_update_request(req, BLK_STS_OK, proc_bytes);
   2782			blk_mq_requeue_request(req, true);
   2783		} else if (likely(!blk_should_fake_timeout(req->q))) {
   2784			blk_mq_complete_request(req);
   2785		}
   2786	}
   2787}
   2788
   2789/*
   2790 * Process ccw request queue.
   2791 */
   2792static void __dasd_process_block_ccw_queue(struct dasd_block *block,
   2793					   struct list_head *final_queue)
   2794{
   2795	struct list_head *l, *n;
   2796	struct dasd_ccw_req *cqr;
   2797	dasd_erp_fn_t erp_fn;
   2798	unsigned long flags;
   2799	struct dasd_device *base = block->base;
   2800
   2801restart:
   2802	/* Process request with final status. */
   2803	list_for_each_safe(l, n, &block->ccw_queue) {
   2804		cqr = list_entry(l, struct dasd_ccw_req, blocklist);
   2805		if (cqr->status != DASD_CQR_DONE &&
   2806		    cqr->status != DASD_CQR_FAILED &&
   2807		    cqr->status != DASD_CQR_NEED_ERP &&
   2808		    cqr->status != DASD_CQR_TERMINATED)
   2809			continue;
   2810
   2811		if (cqr->status == DASD_CQR_TERMINATED) {
   2812			base->discipline->handle_terminated_request(cqr);
   2813			goto restart;
   2814		}
   2815
   2816		/*  Process requests that may be recovered */
   2817		if (cqr->status == DASD_CQR_NEED_ERP) {
   2818			erp_fn = base->discipline->erp_action(cqr);
   2819			if (IS_ERR(erp_fn(cqr)))
   2820				continue;
   2821			goto restart;
   2822		}
   2823
   2824		/* log sense for fatal error */
   2825		if (cqr->status == DASD_CQR_FAILED) {
   2826			dasd_log_sense(cqr, &cqr->irb);
   2827		}
   2828
   2829		/* First of all call extended error reporting. */
   2830		if (dasd_eer_enabled(base) &&
   2831		    cqr->status == DASD_CQR_FAILED) {
   2832			dasd_eer_write(base, cqr, DASD_EER_FATALERROR);
   2833
   2834			/* restart request  */
   2835			cqr->status = DASD_CQR_FILLED;
   2836			cqr->retries = 255;
   2837			spin_lock_irqsave(get_ccwdev_lock(base->cdev), flags);
   2838			dasd_device_set_stop_bits(base, DASD_STOPPED_QUIESCE);
   2839			spin_unlock_irqrestore(get_ccwdev_lock(base->cdev),
   2840					       flags);
   2841			goto restart;
   2842		}
   2843
   2844		/* Process finished ERP request. */
   2845		if (cqr->refers) {
   2846			__dasd_process_erp(base, cqr);
   2847			goto restart;
   2848		}
   2849
   2850		/* Rechain finished requests to final queue */
   2851		cqr->endclk = get_tod_clock();
   2852		list_move_tail(&cqr->blocklist, final_queue);
   2853	}
   2854}
   2855
   2856static void dasd_return_cqr_cb(struct dasd_ccw_req *cqr, void *data)
   2857{
   2858	dasd_schedule_block_bh(cqr->block);
   2859}
   2860
   2861static void __dasd_block_start_head(struct dasd_block *block)
   2862{
   2863	struct dasd_ccw_req *cqr;
   2864
   2865	if (list_empty(&block->ccw_queue))
   2866		return;
   2867	/* We allways begin with the first requests on the queue, as some
   2868	 * of previously started requests have to be enqueued on a
   2869	 * dasd_device again for error recovery.
   2870	 */
   2871	list_for_each_entry(cqr, &block->ccw_queue, blocklist) {
   2872		if (cqr->status != DASD_CQR_FILLED)
   2873			continue;
   2874		if (test_bit(DASD_FLAG_LOCK_STOLEN, &block->base->flags) &&
   2875		    !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) {
   2876			cqr->status = DASD_CQR_FAILED;
   2877			cqr->intrc = -EPERM;
   2878			dasd_schedule_block_bh(block);
   2879			continue;
   2880		}
   2881		/* Non-temporary stop condition will trigger fail fast */
   2882		if (block->base->stopped & ~DASD_STOPPED_PENDING &&
   2883		    test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) &&
   2884		    (!dasd_eer_enabled(block->base))) {
   2885			cqr->status = DASD_CQR_FAILED;
   2886			cqr->intrc = -ENOLINK;
   2887			dasd_schedule_block_bh(block);
   2888			continue;
   2889		}
   2890		/* Don't try to start requests if device is stopped */
   2891		if (block->base->stopped)
   2892			return;
   2893
   2894		/* just a fail safe check, should not happen */
   2895		if (!cqr->startdev)
   2896			cqr->startdev = block->base;
   2897
   2898		/* make sure that the requests we submit find their way back */
   2899		cqr->callback = dasd_return_cqr_cb;
   2900
   2901		dasd_add_request_tail(cqr);
   2902	}
   2903}
   2904
   2905/*
   2906 * Central dasd_block layer routine. Takes requests from the generic
   2907 * block layer request queue, creates ccw requests, enqueues them on
   2908 * a dasd_device and processes ccw requests that have been returned.
   2909 */
   2910static void dasd_block_tasklet(unsigned long data)
   2911{
   2912	struct dasd_block *block = (struct dasd_block *) data;
   2913	struct list_head final_queue;
   2914	struct list_head *l, *n;
   2915	struct dasd_ccw_req *cqr;
   2916	struct dasd_queue *dq;
   2917
   2918	atomic_set(&block->tasklet_scheduled, 0);
   2919	INIT_LIST_HEAD(&final_queue);
   2920	spin_lock_irq(&block->queue_lock);
   2921	/* Finish off requests on ccw queue */
   2922	__dasd_process_block_ccw_queue(block, &final_queue);
   2923	spin_unlock_irq(&block->queue_lock);
   2924
   2925	/* Now call the callback function of requests with final status */
   2926	list_for_each_safe(l, n, &final_queue) {
   2927		cqr = list_entry(l, struct dasd_ccw_req, blocklist);
   2928		dq = cqr->dq;
   2929		spin_lock_irq(&dq->lock);
   2930		list_del_init(&cqr->blocklist);
   2931		__dasd_cleanup_cqr(cqr);
   2932		spin_unlock_irq(&dq->lock);
   2933	}
   2934
   2935	spin_lock_irq(&block->queue_lock);
   2936	/* Now check if the head of the ccw queue needs to be started. */
   2937	__dasd_block_start_head(block);
   2938	spin_unlock_irq(&block->queue_lock);
   2939
   2940	if (waitqueue_active(&shutdown_waitq))
   2941		wake_up(&shutdown_waitq);
   2942	dasd_put_device(block->base);
   2943}
   2944
   2945static void _dasd_wake_block_flush_cb(struct dasd_ccw_req *cqr, void *data)
   2946{
   2947	wake_up(&dasd_flush_wq);
   2948}
   2949
   2950/*
   2951 * Requeue a request back to the block request queue
   2952 * only works for block requests
   2953 */
   2954static int _dasd_requeue_request(struct dasd_ccw_req *cqr)
   2955{
   2956	struct dasd_block *block = cqr->block;
   2957	struct request *req;
   2958
   2959	if (!block)
   2960		return -EINVAL;
   2961	/*
   2962	 * If the request is an ERP request there is nothing to requeue.
   2963	 * This will be done with the remaining original request.
   2964	 */
   2965	if (cqr->refers)
   2966		return 0;
   2967	spin_lock_irq(&cqr->dq->lock);
   2968	req = (struct request *) cqr->callback_data;
   2969	blk_mq_requeue_request(req, false);
   2970	spin_unlock_irq(&cqr->dq->lock);
   2971
   2972	return 0;
   2973}
   2974
   2975/*
   2976 * Go through all request on the dasd_block request queue, cancel them
   2977 * on the respective dasd_device, and return them to the generic
   2978 * block layer.
   2979 */
   2980static int dasd_flush_block_queue(struct dasd_block *block)
   2981{
   2982	struct dasd_ccw_req *cqr, *n;
   2983	int rc, i;
   2984	struct list_head flush_queue;
   2985	unsigned long flags;
   2986
   2987	INIT_LIST_HEAD(&flush_queue);
   2988	spin_lock_bh(&block->queue_lock);
   2989	rc = 0;
   2990restart:
   2991	list_for_each_entry_safe(cqr, n, &block->ccw_queue, blocklist) {
   2992		/* if this request currently owned by a dasd_device cancel it */
   2993		if (cqr->status >= DASD_CQR_QUEUED)
   2994			rc = dasd_cancel_req(cqr);
   2995		if (rc < 0)
   2996			break;
   2997		/* Rechain request (including erp chain) so it won't be
   2998		 * touched by the dasd_block_tasklet anymore.
   2999		 * Replace the callback so we notice when the request
   3000		 * is returned from the dasd_device layer.
   3001		 */
   3002		cqr->callback = _dasd_wake_block_flush_cb;
   3003		for (i = 0; cqr != NULL; cqr = cqr->refers, i++)
   3004			list_move_tail(&cqr->blocklist, &flush_queue);
   3005		if (i > 1)
   3006			/* moved more than one request - need to restart */
   3007			goto restart;
   3008	}
   3009	spin_unlock_bh(&block->queue_lock);
   3010	/* Now call the callback function of flushed requests */
   3011restart_cb:
   3012	list_for_each_entry_safe(cqr, n, &flush_queue, blocklist) {
   3013		wait_event(dasd_flush_wq, (cqr->status < DASD_CQR_QUEUED));
   3014		/* Process finished ERP request. */
   3015		if (cqr->refers) {
   3016			spin_lock_bh(&block->queue_lock);
   3017			__dasd_process_erp(block->base, cqr);
   3018			spin_unlock_bh(&block->queue_lock);
   3019			/* restart list_for_xx loop since dasd_process_erp
   3020			 * might remove multiple elements */
   3021			goto restart_cb;
   3022		}
   3023		/* call the callback function */
   3024		spin_lock_irqsave(&cqr->dq->lock, flags);
   3025		cqr->endclk = get_tod_clock();
   3026		list_del_init(&cqr->blocklist);
   3027		__dasd_cleanup_cqr(cqr);
   3028		spin_unlock_irqrestore(&cqr->dq->lock, flags);
   3029	}
   3030	return rc;
   3031}
   3032
   3033/*
   3034 * Schedules a call to dasd_tasklet over the device tasklet.
   3035 */
   3036void dasd_schedule_block_bh(struct dasd_block *block)
   3037{
   3038	/* Protect against rescheduling. */
   3039	if (atomic_cmpxchg(&block->tasklet_scheduled, 0, 1) != 0)
   3040		return;
   3041	/* life cycle of block is bound to it's base device */
   3042	dasd_get_device(block->base);
   3043	tasklet_hi_schedule(&block->tasklet);
   3044}
   3045EXPORT_SYMBOL(dasd_schedule_block_bh);
   3046
   3047
   3048/*
   3049 * SECTION: external block device operations
   3050 * (request queue handling, open, release, etc.)
   3051 */
   3052
   3053/*
   3054 * Dasd request queue function. Called from ll_rw_blk.c
   3055 */
   3056static blk_status_t do_dasd_request(struct blk_mq_hw_ctx *hctx,
   3057				    const struct blk_mq_queue_data *qd)
   3058{
   3059	struct dasd_block *block = hctx->queue->queuedata;
   3060	struct dasd_queue *dq = hctx->driver_data;
   3061	struct request *req = qd->rq;
   3062	struct dasd_device *basedev;
   3063	struct dasd_ccw_req *cqr;
   3064	blk_status_t rc = BLK_STS_OK;
   3065
   3066	basedev = block->base;
   3067	spin_lock_irq(&dq->lock);
   3068	if (basedev->state < DASD_STATE_READY ||
   3069	    test_bit(DASD_FLAG_OFFLINE, &basedev->flags)) {
   3070		DBF_DEV_EVENT(DBF_ERR, basedev,
   3071			      "device not ready for request %p", req);
   3072		rc = BLK_STS_IOERR;
   3073		goto out;
   3074	}
   3075
   3076	/*
   3077	 * if device is stopped do not fetch new requests
   3078	 * except failfast is active which will let requests fail
   3079	 * immediately in __dasd_block_start_head()
   3080	 */
   3081	if (basedev->stopped && !(basedev->features & DASD_FEATURE_FAILFAST)) {
   3082		DBF_DEV_EVENT(DBF_ERR, basedev,
   3083			      "device stopped request %p", req);
   3084		rc = BLK_STS_RESOURCE;
   3085		goto out;
   3086	}
   3087
   3088	if (basedev->features & DASD_FEATURE_READONLY &&
   3089	    rq_data_dir(req) == WRITE) {
   3090		DBF_DEV_EVENT(DBF_ERR, basedev,
   3091			      "Rejecting write request %p", req);
   3092		rc = BLK_STS_IOERR;
   3093		goto out;
   3094	}
   3095
   3096	if (test_bit(DASD_FLAG_ABORTALL, &basedev->flags) &&
   3097	    (basedev->features & DASD_FEATURE_FAILFAST ||
   3098	     blk_noretry_request(req))) {
   3099		DBF_DEV_EVENT(DBF_ERR, basedev,
   3100			      "Rejecting failfast request %p", req);
   3101		rc = BLK_STS_IOERR;
   3102		goto out;
   3103	}
   3104
   3105	cqr = basedev->discipline->build_cp(basedev, block, req);
   3106	if (IS_ERR(cqr)) {
   3107		if (PTR_ERR(cqr) == -EBUSY ||
   3108		    PTR_ERR(cqr) == -ENOMEM ||
   3109		    PTR_ERR(cqr) == -EAGAIN) {
   3110			rc = BLK_STS_RESOURCE;
   3111			goto out;
   3112		}
   3113		DBF_DEV_EVENT(DBF_ERR, basedev,
   3114			      "CCW creation failed (rc=%ld) on request %p",
   3115			      PTR_ERR(cqr), req);
   3116		rc = BLK_STS_IOERR;
   3117		goto out;
   3118	}
   3119	/*
   3120	 *  Note: callback is set to dasd_return_cqr_cb in
   3121	 * __dasd_block_start_head to cover erp requests as well
   3122	 */
   3123	cqr->callback_data = req;
   3124	cqr->status = DASD_CQR_FILLED;
   3125	cqr->dq = dq;
   3126
   3127	blk_mq_start_request(req);
   3128	spin_lock(&block->queue_lock);
   3129	list_add_tail(&cqr->blocklist, &block->ccw_queue);
   3130	INIT_LIST_HEAD(&cqr->devlist);
   3131	dasd_profile_start(block, cqr, req);
   3132	dasd_schedule_block_bh(block);
   3133	spin_unlock(&block->queue_lock);
   3134
   3135out:
   3136	spin_unlock_irq(&dq->lock);
   3137	return rc;
   3138}
   3139
   3140/*
   3141 * Block timeout callback, called from the block layer
   3142 *
   3143 * Return values:
   3144 * BLK_EH_RESET_TIMER if the request should be left running
   3145 * BLK_EH_DONE if the request is handled or terminated
   3146 *		      by the driver.
   3147 */
   3148enum blk_eh_timer_return dasd_times_out(struct request *req, bool reserved)
   3149{
   3150	struct dasd_block *block = req->q->queuedata;
   3151	struct dasd_device *device;
   3152	struct dasd_ccw_req *cqr;
   3153	unsigned long flags;
   3154	int rc = 0;
   3155
   3156	cqr = blk_mq_rq_to_pdu(req);
   3157	if (!cqr)
   3158		return BLK_EH_DONE;
   3159
   3160	spin_lock_irqsave(&cqr->dq->lock, flags);
   3161	device = cqr->startdev ? cqr->startdev : block->base;
   3162	if (!device->blk_timeout) {
   3163		spin_unlock_irqrestore(&cqr->dq->lock, flags);
   3164		return BLK_EH_RESET_TIMER;
   3165	}
   3166	DBF_DEV_EVENT(DBF_WARNING, device,
   3167		      " dasd_times_out cqr %p status %x",
   3168		      cqr, cqr->status);
   3169
   3170	spin_lock(&block->queue_lock);
   3171	spin_lock(get_ccwdev_lock(device->cdev));
   3172	cqr->retries = -1;
   3173	cqr->intrc = -ETIMEDOUT;
   3174	if (cqr->status >= DASD_CQR_QUEUED) {
   3175		rc = __dasd_cancel_req(cqr);
   3176	} else if (cqr->status == DASD_CQR_FILLED ||
   3177		   cqr->status == DASD_CQR_NEED_ERP) {
   3178		cqr->status = DASD_CQR_TERMINATED;
   3179	} else if (cqr->status == DASD_CQR_IN_ERP) {
   3180		struct dasd_ccw_req *searchcqr, *nextcqr, *tmpcqr;
   3181
   3182		list_for_each_entry_safe(searchcqr, nextcqr,
   3183					 &block->ccw_queue, blocklist) {
   3184			tmpcqr = searchcqr;
   3185			while (tmpcqr->refers)
   3186				tmpcqr = tmpcqr->refers;
   3187			if (tmpcqr != cqr)
   3188				continue;
   3189			/* searchcqr is an ERP request for cqr */
   3190			searchcqr->retries = -1;
   3191			searchcqr->intrc = -ETIMEDOUT;
   3192			if (searchcqr->status >= DASD_CQR_QUEUED) {
   3193				rc = __dasd_cancel_req(searchcqr);
   3194			} else if ((searchcqr->status == DASD_CQR_FILLED) ||
   3195				   (searchcqr->status == DASD_CQR_NEED_ERP)) {
   3196				searchcqr->status = DASD_CQR_TERMINATED;
   3197				rc = 0;
   3198			} else if (searchcqr->status == DASD_CQR_IN_ERP) {
   3199				/*
   3200				 * Shouldn't happen; most recent ERP
   3201				 * request is at the front of queue
   3202				 */
   3203				continue;
   3204			}
   3205			break;
   3206		}
   3207	}
   3208	spin_unlock(get_ccwdev_lock(device->cdev));
   3209	dasd_schedule_block_bh(block);
   3210	spin_unlock(&block->queue_lock);
   3211	spin_unlock_irqrestore(&cqr->dq->lock, flags);
   3212
   3213	return rc ? BLK_EH_RESET_TIMER : BLK_EH_DONE;
   3214}
   3215
   3216static int dasd_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
   3217			  unsigned int idx)
   3218{
   3219	struct dasd_queue *dq = kzalloc(sizeof(*dq), GFP_KERNEL);
   3220
   3221	if (!dq)
   3222		return -ENOMEM;
   3223
   3224	spin_lock_init(&dq->lock);
   3225	hctx->driver_data = dq;
   3226
   3227	return 0;
   3228}
   3229
   3230static void dasd_exit_hctx(struct blk_mq_hw_ctx *hctx, unsigned int idx)
   3231{
   3232	kfree(hctx->driver_data);
   3233	hctx->driver_data = NULL;
   3234}
   3235
   3236static void dasd_request_done(struct request *req)
   3237{
   3238	blk_mq_end_request(req, 0);
   3239	blk_mq_run_hw_queues(req->q, true);
   3240}
   3241
   3242static struct blk_mq_ops dasd_mq_ops = {
   3243	.queue_rq = do_dasd_request,
   3244	.complete = dasd_request_done,
   3245	.timeout = dasd_times_out,
   3246	.init_hctx = dasd_init_hctx,
   3247	.exit_hctx = dasd_exit_hctx,
   3248};
   3249
   3250/*
   3251 * Allocate and initialize request queue and default I/O scheduler.
   3252 */
   3253static int dasd_alloc_queue(struct dasd_block *block)
   3254{
   3255	int rc;
   3256
   3257	block->tag_set.ops = &dasd_mq_ops;
   3258	block->tag_set.cmd_size = sizeof(struct dasd_ccw_req);
   3259	block->tag_set.nr_hw_queues = nr_hw_queues;
   3260	block->tag_set.queue_depth = queue_depth;
   3261	block->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
   3262	block->tag_set.numa_node = NUMA_NO_NODE;
   3263
   3264	rc = blk_mq_alloc_tag_set(&block->tag_set);
   3265	if (rc)
   3266		return rc;
   3267
   3268	block->request_queue = blk_mq_init_queue(&block->tag_set);
   3269	if (IS_ERR(block->request_queue))
   3270		return PTR_ERR(block->request_queue);
   3271
   3272	block->request_queue->queuedata = block;
   3273
   3274	return 0;
   3275}
   3276
   3277/*
   3278 * Deactivate and free request queue.
   3279 */
   3280static void dasd_free_queue(struct dasd_block *block)
   3281{
   3282	if (block->request_queue) {
   3283		blk_cleanup_queue(block->request_queue);
   3284		blk_mq_free_tag_set(&block->tag_set);
   3285		block->request_queue = NULL;
   3286	}
   3287}
   3288
   3289static int dasd_open(struct block_device *bdev, fmode_t mode)
   3290{
   3291	struct dasd_device *base;
   3292	int rc;
   3293
   3294	base = dasd_device_from_gendisk(bdev->bd_disk);
   3295	if (!base)
   3296		return -ENODEV;
   3297
   3298	atomic_inc(&base->block->open_count);
   3299	if (test_bit(DASD_FLAG_OFFLINE, &base->flags)) {
   3300		rc = -ENODEV;
   3301		goto unlock;
   3302	}
   3303
   3304	if (!try_module_get(base->discipline->owner)) {
   3305		rc = -EINVAL;
   3306		goto unlock;
   3307	}
   3308
   3309	if (dasd_probeonly) {
   3310		dev_info(&base->cdev->dev,
   3311			 "Accessing the DASD failed because it is in "
   3312			 "probeonly mode\n");
   3313		rc = -EPERM;
   3314		goto out;
   3315	}
   3316
   3317	if (base->state <= DASD_STATE_BASIC) {
   3318		DBF_DEV_EVENT(DBF_ERR, base, " %s",
   3319			      " Cannot open unrecognized device");
   3320		rc = -ENODEV;
   3321		goto out;
   3322	}
   3323
   3324	if ((mode & FMODE_WRITE) &&
   3325	    (test_bit(DASD_FLAG_DEVICE_RO, &base->flags) ||
   3326	     (base->features & DASD_FEATURE_READONLY))) {
   3327		rc = -EROFS;
   3328		goto out;
   3329	}
   3330
   3331	dasd_put_device(base);
   3332	return 0;
   3333
   3334out:
   3335	module_put(base->discipline->owner);
   3336unlock:
   3337	atomic_dec(&base->block->open_count);
   3338	dasd_put_device(base);
   3339	return rc;
   3340}
   3341
   3342static void dasd_release(struct gendisk *disk, fmode_t mode)
   3343{
   3344	struct dasd_device *base = dasd_device_from_gendisk(disk);
   3345	if (base) {
   3346		atomic_dec(&base->block->open_count);
   3347		module_put(base->discipline->owner);
   3348		dasd_put_device(base);
   3349	}
   3350}
   3351
   3352/*
   3353 * Return disk geometry.
   3354 */
   3355static int dasd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
   3356{
   3357	struct dasd_device *base;
   3358
   3359	base = dasd_device_from_gendisk(bdev->bd_disk);
   3360	if (!base)
   3361		return -ENODEV;
   3362
   3363	if (!base->discipline ||
   3364	    !base->discipline->fill_geometry) {
   3365		dasd_put_device(base);
   3366		return -EINVAL;
   3367	}
   3368	base->discipline->fill_geometry(base->block, geo);
   3369	geo->start = get_start_sect(bdev) >> base->block->s2b_shift;
   3370	dasd_put_device(base);
   3371	return 0;
   3372}
   3373
   3374const struct block_device_operations
   3375dasd_device_operations = {
   3376	.owner		= THIS_MODULE,
   3377	.open		= dasd_open,
   3378	.release	= dasd_release,
   3379	.ioctl		= dasd_ioctl,
   3380	.compat_ioctl	= dasd_ioctl,
   3381	.getgeo		= dasd_getgeo,
   3382	.set_read_only	= dasd_set_read_only,
   3383};
   3384
   3385/*******************************************************************************
   3386 * end of block device operations
   3387 */
   3388
   3389static void
   3390dasd_exit(void)
   3391{
   3392#ifdef CONFIG_PROC_FS
   3393	dasd_proc_exit();
   3394#endif
   3395	dasd_eer_exit();
   3396	kmem_cache_destroy(dasd_page_cache);
   3397	dasd_page_cache = NULL;
   3398	dasd_gendisk_exit();
   3399	dasd_devmap_exit();
   3400	if (dasd_debug_area != NULL) {
   3401		debug_unregister(dasd_debug_area);
   3402		dasd_debug_area = NULL;
   3403	}
   3404	dasd_statistics_removeroot();
   3405}
   3406
   3407/*
   3408 * SECTION: common functions for ccw_driver use
   3409 */
   3410
   3411/*
   3412 * Is the device read-only?
   3413 * Note that this function does not report the setting of the
   3414 * readonly device attribute, but how it is configured in z/VM.
   3415 */
   3416int dasd_device_is_ro(struct dasd_device *device)
   3417{
   3418	struct ccw_dev_id dev_id;
   3419	struct diag210 diag_data;
   3420	int rc;
   3421
   3422	if (!MACHINE_IS_VM)
   3423		return 0;
   3424	ccw_device_get_id(device->cdev, &dev_id);
   3425	memset(&diag_data, 0, sizeof(diag_data));
   3426	diag_data.vrdcdvno = dev_id.devno;
   3427	diag_data.vrdclen = sizeof(diag_data);
   3428	rc = diag210(&diag_data);
   3429	if (rc == 0 || rc == 2) {
   3430		return diag_data.vrdcvfla & 0x80;
   3431	} else {
   3432		DBF_EVENT(DBF_WARNING, "diag210 failed for dev=%04x with rc=%d",
   3433			  dev_id.devno, rc);
   3434		return 0;
   3435	}
   3436}
   3437EXPORT_SYMBOL_GPL(dasd_device_is_ro);
   3438
   3439static void dasd_generic_auto_online(void *data, async_cookie_t cookie)
   3440{
   3441	struct ccw_device *cdev = data;
   3442	int ret;
   3443
   3444	ret = ccw_device_set_online(cdev);
   3445	if (ret)
   3446		pr_warn("%s: Setting the DASD online failed with rc=%d\n",
   3447			dev_name(&cdev->dev), ret);
   3448}
   3449
   3450/*
   3451 * Initial attempt at a probe function. this can be simplified once
   3452 * the other detection code is gone.
   3453 */
   3454int dasd_generic_probe(struct ccw_device *cdev)
   3455{
   3456	cdev->handler = &dasd_int_handler;
   3457
   3458	/*
   3459	 * Automatically online either all dasd devices (dasd_autodetect)
   3460	 * or all devices specified with dasd= parameters during
   3461	 * initial probe.
   3462	 */
   3463	if ((dasd_get_feature(cdev, DASD_FEATURE_INITIAL_ONLINE) > 0 ) ||
   3464	    (dasd_autodetect && dasd_busid_known(dev_name(&cdev->dev)) != 0))
   3465		async_schedule(dasd_generic_auto_online, cdev);
   3466	return 0;
   3467}
   3468EXPORT_SYMBOL_GPL(dasd_generic_probe);
   3469
   3470void dasd_generic_free_discipline(struct dasd_device *device)
   3471{
   3472	/* Forget the discipline information. */
   3473	if (device->discipline) {
   3474		if (device->discipline->uncheck_device)
   3475			device->discipline->uncheck_device(device);
   3476		module_put(device->discipline->owner);
   3477		device->discipline = NULL;
   3478	}
   3479	if (device->base_discipline) {
   3480		module_put(device->base_discipline->owner);
   3481		device->base_discipline = NULL;
   3482	}
   3483}
   3484EXPORT_SYMBOL_GPL(dasd_generic_free_discipline);
   3485
   3486/*
   3487 * This will one day be called from a global not_oper handler.
   3488 * It is also used by driver_unregister during module unload.
   3489 */
   3490void dasd_generic_remove(struct ccw_device *cdev)
   3491{
   3492	struct dasd_device *device;
   3493	struct dasd_block *block;
   3494
   3495	device = dasd_device_from_cdev(cdev);
   3496	if (IS_ERR(device))
   3497		return;
   3498
   3499	if (test_and_set_bit(DASD_FLAG_OFFLINE, &device->flags) &&
   3500	    !test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) {
   3501		/* Already doing offline processing */
   3502		dasd_put_device(device);
   3503		return;
   3504	}
   3505	/*
   3506	 * This device is removed unconditionally. Set offline
   3507	 * flag to prevent dasd_open from opening it while it is
   3508	 * no quite down yet.
   3509	 */
   3510	dasd_set_target_state(device, DASD_STATE_NEW);
   3511	cdev->handler = NULL;
   3512	/* dasd_delete_device destroys the device reference. */
   3513	block = device->block;
   3514	dasd_delete_device(device);
   3515	/*
   3516	 * life cycle of block is bound to device, so delete it after
   3517	 * device was safely removed
   3518	 */
   3519	if (block)
   3520		dasd_free_block(block);
   3521}
   3522EXPORT_SYMBOL_GPL(dasd_generic_remove);
   3523
   3524/*
   3525 * Activate a device. This is called from dasd_{eckd,fba}_probe() when either
   3526 * the device is detected for the first time and is supposed to be used
   3527 * or the user has started activation through sysfs.
   3528 */
   3529int dasd_generic_set_online(struct ccw_device *cdev,
   3530			    struct dasd_discipline *base_discipline)
   3531{
   3532	struct dasd_discipline *discipline;
   3533	struct dasd_device *device;
   3534	int rc;
   3535
   3536	/* first online clears initial online feature flag */
   3537	dasd_set_feature(cdev, DASD_FEATURE_INITIAL_ONLINE, 0);
   3538	device = dasd_create_device(cdev);
   3539	if (IS_ERR(device))
   3540		return PTR_ERR(device);
   3541
   3542	discipline = base_discipline;
   3543	if (device->features & DASD_FEATURE_USEDIAG) {
   3544	  	if (!dasd_diag_discipline_pointer) {
   3545			/* Try to load the required module. */
   3546			rc = request_module(DASD_DIAG_MOD);
   3547			if (rc) {
   3548				pr_warn("%s Setting the DASD online failed "
   3549					"because the required module %s "
   3550					"could not be loaded (rc=%d)\n",
   3551					dev_name(&cdev->dev), DASD_DIAG_MOD,
   3552					rc);
   3553				dasd_delete_device(device);
   3554				return -ENODEV;
   3555			}
   3556		}
   3557		/* Module init could have failed, so check again here after
   3558		 * request_module(). */
   3559		if (!dasd_diag_discipline_pointer) {
   3560			pr_warn("%s Setting the DASD online failed because of missing DIAG discipline\n",
   3561				dev_name(&cdev->dev));
   3562			dasd_delete_device(device);
   3563			return -ENODEV;
   3564		}
   3565		discipline = dasd_diag_discipline_pointer;
   3566	}
   3567	if (!try_module_get(base_discipline->owner)) {
   3568		dasd_delete_device(device);
   3569		return -EINVAL;
   3570	}
   3571	if (!try_module_get(discipline->owner)) {
   3572		module_put(base_discipline->owner);
   3573		dasd_delete_device(device);
   3574		return -EINVAL;
   3575	}
   3576	device->base_discipline = base_discipline;
   3577	device->discipline = discipline;
   3578
   3579	/* check_device will allocate block device if necessary */
   3580	rc = discipline->check_device(device);
   3581	if (rc) {
   3582		pr_warn("%s Setting the DASD online with discipline %s failed with rc=%i\n",
   3583			dev_name(&cdev->dev), discipline->name, rc);
   3584		module_put(discipline->owner);
   3585		module_put(base_discipline->owner);
   3586		dasd_delete_device(device);
   3587		return rc;
   3588	}
   3589
   3590	dasd_set_target_state(device, DASD_STATE_ONLINE);
   3591	if (device->state <= DASD_STATE_KNOWN) {
   3592		pr_warn("%s Setting the DASD online failed because of a missing discipline\n",
   3593			dev_name(&cdev->dev));
   3594		rc = -ENODEV;
   3595		dasd_set_target_state(device, DASD_STATE_NEW);
   3596		if (device->block)
   3597			dasd_free_block(device->block);
   3598		dasd_delete_device(device);
   3599	} else
   3600		pr_debug("dasd_generic device %s found\n",
   3601				dev_name(&cdev->dev));
   3602
   3603	wait_event(dasd_init_waitq, _wait_for_device(device));
   3604
   3605	dasd_put_device(device);
   3606	return rc;
   3607}
   3608EXPORT_SYMBOL_GPL(dasd_generic_set_online);
   3609
   3610int dasd_generic_set_offline(struct ccw_device *cdev)
   3611{
   3612	struct dasd_device *device;
   3613	struct dasd_block *block;
   3614	int max_count, open_count, rc;
   3615	unsigned long flags;
   3616
   3617	rc = 0;
   3618	spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
   3619	device = dasd_device_from_cdev_locked(cdev);
   3620	if (IS_ERR(device)) {
   3621		spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
   3622		return PTR_ERR(device);
   3623	}
   3624
   3625	/*
   3626	 * We must make sure that this device is currently not in use.
   3627	 * The open_count is increased for every opener, that includes
   3628	 * the blkdev_get in dasd_scan_partitions. We are only interested
   3629	 * in the other openers.
   3630	 */
   3631	if (device->block) {
   3632		max_count = device->block->bdev ? 0 : -1;
   3633		open_count = atomic_read(&device->block->open_count);
   3634		if (open_count > max_count) {
   3635			if (open_count > 0)
   3636				pr_warn("%s: The DASD cannot be set offline with open count %i\n",
   3637					dev_name(&cdev->dev), open_count);
   3638			else
   3639				pr_warn("%s: The DASD cannot be set offline while it is in use\n",
   3640					dev_name(&cdev->dev));
   3641			rc = -EBUSY;
   3642			goto out_err;
   3643		}
   3644	}
   3645
   3646	/*
   3647	 * Test if the offline processing is already running and exit if so.
   3648	 * If a safe offline is being processed this could only be a normal
   3649	 * offline that should be able to overtake the safe offline and
   3650	 * cancel any I/O we do not want to wait for any longer
   3651	 */
   3652	if (test_bit(DASD_FLAG_OFFLINE, &device->flags)) {
   3653		if (test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) {
   3654			clear_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING,
   3655				  &device->flags);
   3656		} else {
   3657			rc = -EBUSY;
   3658			goto out_err;
   3659		}
   3660	}
   3661	set_bit(DASD_FLAG_OFFLINE, &device->flags);
   3662
   3663	/*
   3664	 * if safe_offline is called set safe_offline_running flag and
   3665	 * clear safe_offline so that a call to normal offline
   3666	 * can overrun safe_offline processing
   3667	 */
   3668	if (test_and_clear_bit(DASD_FLAG_SAFE_OFFLINE, &device->flags) &&
   3669	    !test_and_set_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) {
   3670		/* need to unlock here to wait for outstanding I/O */
   3671		spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
   3672		/*
   3673		 * If we want to set the device safe offline all IO operations
   3674		 * should be finished before continuing the offline process
   3675		 * so sync bdev first and then wait for our queues to become
   3676		 * empty
   3677		 */
   3678		if (device->block) {
   3679			rc = fsync_bdev(device->block->bdev);
   3680			if (rc != 0)
   3681				goto interrupted;
   3682		}
   3683		dasd_schedule_device_bh(device);
   3684		rc = wait_event_interruptible(shutdown_waitq,
   3685					      _wait_for_empty_queues(device));
   3686		if (rc != 0)
   3687			goto interrupted;
   3688
   3689		/*
   3690		 * check if a normal offline process overtook the offline
   3691		 * processing in this case simply do nothing beside returning
   3692		 * that we got interrupted
   3693		 * otherwise mark safe offline as not running any longer and
   3694		 * continue with normal offline
   3695		 */
   3696		spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
   3697		if (!test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) {
   3698			rc = -ERESTARTSYS;
   3699			goto out_err;
   3700		}
   3701		clear_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags);
   3702	}
   3703	spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
   3704
   3705	dasd_set_target_state(device, DASD_STATE_NEW);
   3706	/* dasd_delete_device destroys the device reference. */
   3707	block = device->block;
   3708	dasd_delete_device(device);
   3709	/*
   3710	 * life cycle of block is bound to device, so delete it after
   3711	 * device was safely removed
   3712	 */
   3713	if (block)
   3714		dasd_free_block(block);
   3715
   3716	return 0;
   3717
   3718interrupted:
   3719	/* interrupted by signal */
   3720	spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
   3721	clear_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags);
   3722	clear_bit(DASD_FLAG_OFFLINE, &device->flags);
   3723out_err:
   3724	dasd_put_device(device);
   3725	spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
   3726	return rc;
   3727}
   3728EXPORT_SYMBOL_GPL(dasd_generic_set_offline);
   3729
   3730int dasd_generic_last_path_gone(struct dasd_device *device)
   3731{
   3732	struct dasd_ccw_req *cqr;
   3733
   3734	dev_warn(&device->cdev->dev, "No operational channel path is left "
   3735		 "for the device\n");
   3736	DBF_DEV_EVENT(DBF_WARNING, device, "%s", "last path gone");
   3737	/* First of all call extended error reporting. */
   3738	dasd_eer_write(device, NULL, DASD_EER_NOPATH);
   3739
   3740	if (device->state < DASD_STATE_BASIC)
   3741		return 0;
   3742	/* Device is active. We want to keep it. */
   3743	list_for_each_entry(cqr, &device->ccw_queue, devlist)
   3744		if ((cqr->status == DASD_CQR_IN_IO) ||
   3745		    (cqr->status == DASD_CQR_CLEAR_PENDING)) {
   3746			cqr->status = DASD_CQR_QUEUED;
   3747			cqr->retries++;
   3748		}
   3749	dasd_device_set_stop_bits(device, DASD_STOPPED_DC_WAIT);
   3750	dasd_device_clear_timer(device);
   3751	dasd_schedule_device_bh(device);
   3752	return 1;
   3753}
   3754EXPORT_SYMBOL_GPL(dasd_generic_last_path_gone);
   3755
   3756int dasd_generic_path_operational(struct dasd_device *device)
   3757{
   3758	dev_info(&device->cdev->dev, "A channel path to the device has become "
   3759		 "operational\n");
   3760	DBF_DEV_EVENT(DBF_WARNING, device, "%s", "path operational");
   3761	dasd_device_remove_stop_bits(device, DASD_STOPPED_DC_WAIT);
   3762	dasd_schedule_device_bh(device);
   3763	if (device->block) {
   3764		dasd_schedule_block_bh(device->block);
   3765		if (device->block->request_queue)
   3766			blk_mq_run_hw_queues(device->block->request_queue,
   3767					     true);
   3768		}
   3769
   3770	if (!device->stopped)
   3771		wake_up(&generic_waitq);
   3772
   3773	return 1;
   3774}
   3775EXPORT_SYMBOL_GPL(dasd_generic_path_operational);
   3776
   3777int dasd_generic_notify(struct ccw_device *cdev, int event)
   3778{
   3779	struct dasd_device *device;
   3780	int ret;
   3781
   3782	device = dasd_device_from_cdev_locked(cdev);
   3783	if (IS_ERR(device))
   3784		return 0;
   3785	ret = 0;
   3786	switch (event) {
   3787	case CIO_GONE:
   3788	case CIO_BOXED:
   3789	case CIO_NO_PATH:
   3790		dasd_path_no_path(device);
   3791		ret = dasd_generic_last_path_gone(device);
   3792		break;
   3793	case CIO_OPER:
   3794		ret = 1;
   3795		if (dasd_path_get_opm(device))
   3796			ret = dasd_generic_path_operational(device);
   3797		break;
   3798	}
   3799	dasd_put_device(device);
   3800	return ret;
   3801}
   3802EXPORT_SYMBOL_GPL(dasd_generic_notify);
   3803
   3804void dasd_generic_path_event(struct ccw_device *cdev, int *path_event)
   3805{
   3806	struct dasd_device *device;
   3807	int chp, oldopm, hpfpm, ifccpm;
   3808
   3809	device = dasd_device_from_cdev_locked(cdev);
   3810	if (IS_ERR(device))
   3811		return;
   3812
   3813	oldopm = dasd_path_get_opm(device);
   3814	for (chp = 0; chp < 8; chp++) {
   3815		if (path_event[chp] & PE_PATH_GONE) {
   3816			dasd_path_notoper(device, chp);
   3817		}
   3818		if (path_event[chp] & PE_PATH_AVAILABLE) {
   3819			dasd_path_available(device, chp);
   3820			dasd_schedule_device_bh(device);
   3821		}
   3822		if (path_event[chp] & PE_PATHGROUP_ESTABLISHED) {
   3823			if (!dasd_path_is_operational(device, chp) &&
   3824			    !dasd_path_need_verify(device, chp)) {
   3825				/*
   3826				 * we can not establish a pathgroup on an
   3827				 * unavailable path, so trigger a path
   3828				 * verification first
   3829				 */
   3830			dasd_path_available(device, chp);
   3831			dasd_schedule_device_bh(device);
   3832			}
   3833			DBF_DEV_EVENT(DBF_WARNING, device, "%s",
   3834				      "Pathgroup re-established\n");
   3835			if (device->discipline->kick_validate)
   3836				device->discipline->kick_validate(device);
   3837		}
   3838		if (path_event[chp] & PE_PATH_FCES_EVENT) {
   3839			dasd_path_fcsec_update(device, chp);
   3840			dasd_schedule_device_bh(device);
   3841		}
   3842	}
   3843	hpfpm = dasd_path_get_hpfpm(device);
   3844	ifccpm = dasd_path_get_ifccpm(device);
   3845	if (!dasd_path_get_opm(device) && hpfpm) {
   3846		/*
   3847		 * device has no operational paths but at least one path is
   3848		 * disabled due to HPF errors
   3849		 * disable HPF at all and use the path(s) again
   3850		 */
   3851		if (device->discipline->disable_hpf)
   3852			device->discipline->disable_hpf(device);
   3853		dasd_device_set_stop_bits(device, DASD_STOPPED_NOT_ACC);
   3854		dasd_path_set_tbvpm(device, hpfpm);
   3855		dasd_schedule_device_bh(device);
   3856		dasd_schedule_requeue(device);
   3857	} else if (!dasd_path_get_opm(device) && ifccpm) {
   3858		/*
   3859		 * device has no operational paths but at least one path is
   3860		 * disabled due to IFCC errors
   3861		 * trigger path verification on paths with IFCC errors
   3862		 */
   3863		dasd_path_set_tbvpm(device, ifccpm);
   3864		dasd_schedule_device_bh(device);
   3865	}
   3866	if (oldopm && !dasd_path_get_opm(device) && !hpfpm && !ifccpm) {
   3867		dev_warn(&device->cdev->dev,
   3868			 "No verified channel paths remain for the device\n");
   3869		DBF_DEV_EVENT(DBF_WARNING, device,
   3870			      "%s", "last verified path gone");
   3871		dasd_eer_write(device, NULL, DASD_EER_NOPATH);
   3872		dasd_device_set_stop_bits(device,
   3873					  DASD_STOPPED_DC_WAIT);
   3874	}
   3875	dasd_put_device(device);
   3876}
   3877EXPORT_SYMBOL_GPL(dasd_generic_path_event);
   3878
   3879int dasd_generic_verify_path(struct dasd_device *device, __u8 lpm)
   3880{
   3881	if (!dasd_path_get_opm(device) && lpm) {
   3882		dasd_path_set_opm(device, lpm);
   3883		dasd_generic_path_operational(device);
   3884	} else
   3885		dasd_path_add_opm(device, lpm);
   3886	return 0;
   3887}
   3888EXPORT_SYMBOL_GPL(dasd_generic_verify_path);
   3889
   3890void dasd_generic_space_exhaust(struct dasd_device *device,
   3891				struct dasd_ccw_req *cqr)
   3892{
   3893	dasd_eer_write(device, NULL, DASD_EER_NOSPC);
   3894
   3895	if (device->state < DASD_STATE_BASIC)
   3896		return;
   3897
   3898	if (cqr->status == DASD_CQR_IN_IO ||
   3899	    cqr->status == DASD_CQR_CLEAR_PENDING) {
   3900		cqr->status = DASD_CQR_QUEUED;
   3901		cqr->retries++;
   3902	}
   3903	dasd_device_set_stop_bits(device, DASD_STOPPED_NOSPC);
   3904	dasd_device_clear_timer(device);
   3905	dasd_schedule_device_bh(device);
   3906}
   3907EXPORT_SYMBOL_GPL(dasd_generic_space_exhaust);
   3908
   3909void dasd_generic_space_avail(struct dasd_device *device)
   3910{
   3911	dev_info(&device->cdev->dev, "Extent pool space is available\n");
   3912	DBF_DEV_EVENT(DBF_WARNING, device, "%s", "space available");
   3913
   3914	dasd_device_remove_stop_bits(device, DASD_STOPPED_NOSPC);
   3915	dasd_schedule_device_bh(device);
   3916
   3917	if (device->block) {
   3918		dasd_schedule_block_bh(device->block);
   3919		if (device->block->request_queue)
   3920			blk_mq_run_hw_queues(device->block->request_queue, true);
   3921	}
   3922	if (!device->stopped)
   3923		wake_up(&generic_waitq);
   3924}
   3925EXPORT_SYMBOL_GPL(dasd_generic_space_avail);
   3926
   3927/*
   3928 * clear active requests and requeue them to block layer if possible
   3929 */
   3930static int dasd_generic_requeue_all_requests(struct dasd_device *device)
   3931{
   3932	struct list_head requeue_queue;
   3933	struct dasd_ccw_req *cqr, *n;
   3934	struct dasd_ccw_req *refers;
   3935	int rc;
   3936
   3937	INIT_LIST_HEAD(&requeue_queue);
   3938	spin_lock_irq(get_ccwdev_lock(device->cdev));
   3939	rc = 0;
   3940	list_for_each_entry_safe(cqr, n, &device->ccw_queue, devlist) {
   3941		/* Check status and move request to flush_queue */
   3942		if (cqr->status == DASD_CQR_IN_IO) {
   3943			rc = device->discipline->term_IO(cqr);
   3944			if (rc) {
   3945				/* unable to terminate requeust */
   3946				dev_err(&device->cdev->dev,
   3947					"Unable to terminate request %p "
   3948					"on suspend\n", cqr);
   3949				spin_unlock_irq(get_ccwdev_lock(device->cdev));
   3950				dasd_put_device(device);
   3951				return rc;
   3952			}
   3953		}
   3954		list_move_tail(&cqr->devlist, &requeue_queue);
   3955	}
   3956	spin_unlock_irq(get_ccwdev_lock(device->cdev));
   3957
   3958	list_for_each_entry_safe(cqr, n, &requeue_queue, devlist) {
   3959		wait_event(dasd_flush_wq,
   3960			   (cqr->status != DASD_CQR_CLEAR_PENDING));
   3961
   3962		/*
   3963		 * requeue requests to blocklayer will only work
   3964		 * for block device requests
   3965		 */
   3966		if (_dasd_requeue_request(cqr))
   3967			continue;
   3968
   3969		/* remove requests from device and block queue */
   3970		list_del_init(&cqr->devlist);
   3971		while (cqr->refers != NULL) {
   3972			refers = cqr->refers;
   3973			/* remove the request from the block queue */
   3974			list_del(&cqr->blocklist);
   3975			/* free the finished erp request */
   3976			dasd_free_erp_request(cqr, cqr->memdev);
   3977			cqr = refers;
   3978		}
   3979
   3980		/*
   3981		 * _dasd_requeue_request already checked for a valid
   3982		 * blockdevice, no need to check again
   3983		 * all erp requests (cqr->refers) have a cqr->block
   3984		 * pointer copy from the original cqr
   3985		 */
   3986		list_del_init(&cqr->blocklist);
   3987		cqr->block->base->discipline->free_cp(
   3988			cqr, (struct request *) cqr->callback_data);
   3989	}
   3990
   3991	/*
   3992	 * if requests remain then they are internal request
   3993	 * and go back to the device queue
   3994	 */
   3995	if (!list_empty(&requeue_queue)) {
   3996		/* move freeze_queue to start of the ccw_queue */
   3997		spin_lock_irq(get_ccwdev_lock(device->cdev));
   3998		list_splice_tail(&requeue_queue, &device->ccw_queue);
   3999		spin_unlock_irq(get_ccwdev_lock(device->cdev));
   4000	}
   4001	dasd_schedule_device_bh(device);
   4002	return rc;
   4003}
   4004
   4005static void do_requeue_requests(struct work_struct *work)
   4006{
   4007	struct dasd_device *device = container_of(work, struct dasd_device,
   4008						  requeue_requests);
   4009	dasd_generic_requeue_all_requests(device);
   4010	dasd_device_remove_stop_bits(device, DASD_STOPPED_NOT_ACC);
   4011	if (device->block)
   4012		dasd_schedule_block_bh(device->block);
   4013	dasd_put_device(device);
   4014}
   4015
   4016void dasd_schedule_requeue(struct dasd_device *device)
   4017{
   4018	dasd_get_device(device);
   4019	/* queue call to dasd_reload_device to the kernel event daemon. */
   4020	if (!schedule_work(&device->requeue_requests))
   4021		dasd_put_device(device);
   4022}
   4023EXPORT_SYMBOL(dasd_schedule_requeue);
   4024
   4025static struct dasd_ccw_req *dasd_generic_build_rdc(struct dasd_device *device,
   4026						   int rdc_buffer_size,
   4027						   int magic)
   4028{
   4029	struct dasd_ccw_req *cqr;
   4030	struct ccw1 *ccw;
   4031
   4032	cqr = dasd_smalloc_request(magic, 1 /* RDC */, rdc_buffer_size, device,
   4033				   NULL);
   4034
   4035	if (IS_ERR(cqr)) {
   4036		/* internal error 13 - Allocating the RDC request failed*/
   4037		dev_err(&device->cdev->dev,
   4038			 "An error occurred in the DASD device driver, "
   4039			 "reason=%s\n", "13");
   4040		return cqr;
   4041	}
   4042
   4043	ccw = cqr->cpaddr;
   4044	ccw->cmd_code = CCW_CMD_RDC;
   4045	ccw->cda = (__u32)(addr_t) cqr->data;
   4046	ccw->flags = 0;
   4047	ccw->count = rdc_buffer_size;
   4048	cqr->startdev = device;
   4049	cqr->memdev = device;
   4050	cqr->expires = 10*HZ;
   4051	cqr->retries = 256;
   4052	cqr->buildclk = get_tod_clock();
   4053	cqr->status = DASD_CQR_FILLED;
   4054	return cqr;
   4055}
   4056
   4057
   4058int dasd_generic_read_dev_chars(struct dasd_device *device, int magic,
   4059				void *rdc_buffer, int rdc_buffer_size)
   4060{
   4061	int ret;
   4062	struct dasd_ccw_req *cqr;
   4063
   4064	cqr = dasd_generic_build_rdc(device, rdc_buffer_size, magic);
   4065	if (IS_ERR(cqr))
   4066		return PTR_ERR(cqr);
   4067
   4068	ret = dasd_sleep_on(cqr);
   4069	if (ret == 0)
   4070		memcpy(rdc_buffer, cqr->data, rdc_buffer_size);
   4071	dasd_sfree_request(cqr, cqr->memdev);
   4072	return ret;
   4073}
   4074EXPORT_SYMBOL_GPL(dasd_generic_read_dev_chars);
   4075
   4076/*
   4077 *   In command mode and transport mode we need to look for sense
   4078 *   data in different places. The sense data itself is allways
   4079 *   an array of 32 bytes, so we can unify the sense data access
   4080 *   for both modes.
   4081 */
   4082char *dasd_get_sense(struct irb *irb)
   4083{
   4084	struct tsb *tsb = NULL;
   4085	char *sense = NULL;
   4086
   4087	if (scsw_is_tm(&irb->scsw) && (irb->scsw.tm.fcxs == 0x01)) {
   4088		if (irb->scsw.tm.tcw)
   4089			tsb = tcw_get_tsb((struct tcw *)(unsigned long)
   4090					  irb->scsw.tm.tcw);
   4091		if (tsb && tsb->length == 64 && tsb->flags)
   4092			switch (tsb->flags & 0x07) {
   4093			case 1:	/* tsa_iostat */
   4094				sense = tsb->tsa.iostat.sense;
   4095				break;
   4096			case 2: /* tsa_ddpc */
   4097				sense = tsb->tsa.ddpc.sense;
   4098				break;
   4099			default:
   4100				/* currently we don't use interrogate data */
   4101				break;
   4102			}
   4103	} else if (irb->esw.esw0.erw.cons) {
   4104		sense = irb->ecw;
   4105	}
   4106	return sense;
   4107}
   4108EXPORT_SYMBOL_GPL(dasd_get_sense);
   4109
   4110void dasd_generic_shutdown(struct ccw_device *cdev)
   4111{
   4112	struct dasd_device *device;
   4113
   4114	device = dasd_device_from_cdev(cdev);
   4115	if (IS_ERR(device))
   4116		return;
   4117
   4118	if (device->block)
   4119		dasd_schedule_block_bh(device->block);
   4120
   4121	dasd_schedule_device_bh(device);
   4122
   4123	wait_event(shutdown_waitq, _wait_for_empty_queues(device));
   4124}
   4125EXPORT_SYMBOL_GPL(dasd_generic_shutdown);
   4126
   4127static int __init dasd_init(void)
   4128{
   4129	int rc;
   4130
   4131	init_waitqueue_head(&dasd_init_waitq);
   4132	init_waitqueue_head(&dasd_flush_wq);
   4133	init_waitqueue_head(&generic_waitq);
   4134	init_waitqueue_head(&shutdown_waitq);
   4135
   4136	/* register 'common' DASD debug area, used for all DBF_XXX calls */
   4137	dasd_debug_area = debug_register("dasd", 1, 1, 8 * sizeof(long));
   4138	if (dasd_debug_area == NULL) {
   4139		rc = -ENOMEM;
   4140		goto failed;
   4141	}
   4142	debug_register_view(dasd_debug_area, &debug_sprintf_view);
   4143	debug_set_level(dasd_debug_area, DBF_WARNING);
   4144
   4145	DBF_EVENT(DBF_EMERG, "%s", "debug area created");
   4146
   4147	dasd_diag_discipline_pointer = NULL;
   4148
   4149	dasd_statistics_createroot();
   4150
   4151	rc = dasd_devmap_init();
   4152	if (rc)
   4153		goto failed;
   4154	rc = dasd_gendisk_init();
   4155	if (rc)
   4156		goto failed;
   4157	rc = dasd_parse();
   4158	if (rc)
   4159		goto failed;
   4160	rc = dasd_eer_init();
   4161	if (rc)
   4162		goto failed;
   4163#ifdef CONFIG_PROC_FS
   4164	rc = dasd_proc_init();
   4165	if (rc)
   4166		goto failed;
   4167#endif
   4168
   4169	return 0;
   4170failed:
   4171	pr_info("The DASD device driver could not be initialized\n");
   4172	dasd_exit();
   4173	return rc;
   4174}
   4175
   4176module_init(dasd_init);
   4177module_exit(dasd_exit);