cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

tape_core.c (33674B)


      1// SPDX-License-Identifier: GPL-2.0
      2/*
      3 *    basic function of the tape device driver
      4 *
      5 *  S390 and zSeries version
      6 *    Copyright IBM Corp. 2001, 2009
      7 *    Author(s): Carsten Otte <cotte@de.ibm.com>
      8 *		 Michael Holzheu <holzheu@de.ibm.com>
      9 *		 Tuan Ngo-Anh <ngoanh@de.ibm.com>
     10 *		 Martin Schwidefsky <schwidefsky@de.ibm.com>
     11 *		 Stefan Bader <shbader@de.ibm.com>
     12 */
     13
     14#define KMSG_COMPONENT "tape"
     15#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
     16
     17#include <linux/module.h>
     18#include <linux/init.h>	     // for kernel parameters
     19#include <linux/kmod.h>	     // for requesting modules
     20#include <linux/spinlock.h>  // for locks
     21#include <linux/vmalloc.h>
     22#include <linux/list.h>
     23#include <linux/slab.h>
     24
     25#include <asm/types.h>	     // for variable types
     26
     27#define TAPE_DBF_AREA	tape_core_dbf
     28
     29#include "tape.h"
     30#include "tape_std.h"
     31
     32#define LONG_BUSY_TIMEOUT 180 /* seconds */
     33
     34static void __tape_do_irq (struct ccw_device *, unsigned long, struct irb *);
     35static void tape_delayed_next_request(struct work_struct *);
     36static void tape_long_busy_timeout(struct timer_list *t);
     37
     38/*
     39 * One list to contain all tape devices of all disciplines, so
     40 * we can assign the devices to minor numbers of the same major
     41 * The list is protected by the rwlock
     42 */
     43static LIST_HEAD(tape_device_list);
     44static DEFINE_RWLOCK(tape_device_lock);
     45
     46/*
     47 * Pointer to debug area.
     48 */
     49debug_info_t *TAPE_DBF_AREA = NULL;
     50EXPORT_SYMBOL(TAPE_DBF_AREA);
     51
     52/*
     53 * Printable strings for tape enumerations.
     54 */
     55const char *tape_state_verbose[TS_SIZE] =
     56{
     57	[TS_UNUSED]   = "UNUSED",
     58	[TS_IN_USE]   = "IN_USE",
     59	[TS_BLKUSE]   = "BLKUSE",
     60	[TS_INIT]     = "INIT  ",
     61	[TS_NOT_OPER] = "NOT_OP"
     62};
     63
     64const char *tape_op_verbose[TO_SIZE] =
     65{
     66	[TO_BLOCK] = "BLK",	[TO_BSB] = "BSB",
     67	[TO_BSF] = "BSF",	[TO_DSE] = "DSE",
     68	[TO_FSB] = "FSB",	[TO_FSF] = "FSF",
     69	[TO_LBL] = "LBL",	[TO_NOP] = "NOP",
     70	[TO_RBA] = "RBA",	[TO_RBI] = "RBI",
     71	[TO_RFO] = "RFO",	[TO_REW] = "REW",
     72	[TO_RUN] = "RUN",	[TO_WRI] = "WRI",
     73	[TO_WTM] = "WTM",	[TO_MSEN] = "MSN",
     74	[TO_LOAD] = "LOA",	[TO_READ_CONFIG] = "RCF",
     75	[TO_READ_ATTMSG] = "RAT",
     76	[TO_DIS] = "DIS",	[TO_ASSIGN] = "ASS",
     77	[TO_UNASSIGN] = "UAS",  [TO_CRYPT_ON] = "CON",
     78	[TO_CRYPT_OFF] = "COF",	[TO_KEKL_SET] = "KLS",
     79	[TO_KEKL_QUERY] = "KLQ",[TO_RDC] = "RDC",
     80};
     81
     82static int devid_to_int(struct ccw_dev_id *dev_id)
     83{
     84	return dev_id->devno + (dev_id->ssid << 16);
     85}
     86
     87/*
     88 * Some channel attached tape specific attributes.
     89 *
     90 * FIXME: In the future the first_minor and blocksize attribute should be
     91 *        replaced by a link to the cdev tree.
     92 */
     93static ssize_t
     94tape_medium_state_show(struct device *dev, struct device_attribute *attr, char *buf)
     95{
     96	struct tape_device *tdev;
     97
     98	tdev = dev_get_drvdata(dev);
     99	return scnprintf(buf, PAGE_SIZE, "%i\n", tdev->medium_state);
    100}
    101
    102static
    103DEVICE_ATTR(medium_state, 0444, tape_medium_state_show, NULL);
    104
    105static ssize_t
    106tape_first_minor_show(struct device *dev, struct device_attribute *attr, char *buf)
    107{
    108	struct tape_device *tdev;
    109
    110	tdev = dev_get_drvdata(dev);
    111	return scnprintf(buf, PAGE_SIZE, "%i\n", tdev->first_minor);
    112}
    113
    114static
    115DEVICE_ATTR(first_minor, 0444, tape_first_minor_show, NULL);
    116
    117static ssize_t
    118tape_state_show(struct device *dev, struct device_attribute *attr, char *buf)
    119{
    120	struct tape_device *tdev;
    121
    122	tdev = dev_get_drvdata(dev);
    123	return scnprintf(buf, PAGE_SIZE, "%s\n", (tdev->first_minor < 0) ?
    124		"OFFLINE" : tape_state_verbose[tdev->tape_state]);
    125}
    126
    127static
    128DEVICE_ATTR(state, 0444, tape_state_show, NULL);
    129
    130static ssize_t
    131tape_operation_show(struct device *dev, struct device_attribute *attr, char *buf)
    132{
    133	struct tape_device *tdev;
    134	ssize_t rc;
    135
    136	tdev = dev_get_drvdata(dev);
    137	if (tdev->first_minor < 0)
    138		return scnprintf(buf, PAGE_SIZE, "N/A\n");
    139
    140	spin_lock_irq(get_ccwdev_lock(tdev->cdev));
    141	if (list_empty(&tdev->req_queue))
    142		rc = scnprintf(buf, PAGE_SIZE, "---\n");
    143	else {
    144		struct tape_request *req;
    145
    146		req = list_entry(tdev->req_queue.next, struct tape_request,
    147			list);
    148		rc = scnprintf(buf,PAGE_SIZE, "%s\n", tape_op_verbose[req->op]);
    149	}
    150	spin_unlock_irq(get_ccwdev_lock(tdev->cdev));
    151	return rc;
    152}
    153
    154static
    155DEVICE_ATTR(operation, 0444, tape_operation_show, NULL);
    156
    157static ssize_t
    158tape_blocksize_show(struct device *dev, struct device_attribute *attr, char *buf)
    159{
    160	struct tape_device *tdev;
    161
    162	tdev = dev_get_drvdata(dev);
    163
    164	return scnprintf(buf, PAGE_SIZE, "%i\n", tdev->char_data.block_size);
    165}
    166
    167static
    168DEVICE_ATTR(blocksize, 0444, tape_blocksize_show, NULL);
    169
    170static struct attribute *tape_attrs[] = {
    171	&dev_attr_medium_state.attr,
    172	&dev_attr_first_minor.attr,
    173	&dev_attr_state.attr,
    174	&dev_attr_operation.attr,
    175	&dev_attr_blocksize.attr,
    176	NULL
    177};
    178
    179static const struct attribute_group tape_attr_group = {
    180	.attrs = tape_attrs,
    181};
    182
    183/*
    184 * Tape state functions
    185 */
    186void
    187tape_state_set(struct tape_device *device, enum tape_state newstate)
    188{
    189	const char *str;
    190
    191	if (device->tape_state == TS_NOT_OPER) {
    192		DBF_EVENT(3, "ts_set err: not oper\n");
    193		return;
    194	}
    195	DBF_EVENT(4, "ts. dev:	%x\n", device->first_minor);
    196	DBF_EVENT(4, "old ts:\t\n");
    197	if (device->tape_state < TS_SIZE && device->tape_state >=0 )
    198		str = tape_state_verbose[device->tape_state];
    199	else
    200		str = "UNKNOWN TS";
    201	DBF_EVENT(4, "%s\n", str);
    202	DBF_EVENT(4, "new ts:\t\n");
    203	if (newstate < TS_SIZE && newstate >= 0)
    204		str = tape_state_verbose[newstate];
    205	else
    206		str = "UNKNOWN TS";
    207	DBF_EVENT(4, "%s\n", str);
    208	device->tape_state = newstate;
    209	wake_up(&device->state_change_wq);
    210}
    211
    212struct tape_med_state_work_data {
    213	struct tape_device *device;
    214	enum tape_medium_state state;
    215	struct work_struct  work;
    216};
    217
    218static void
    219tape_med_state_work_handler(struct work_struct *work)
    220{
    221	static char env_state_loaded[] = "MEDIUM_STATE=LOADED";
    222	static char env_state_unloaded[] = "MEDIUM_STATE=UNLOADED";
    223	struct tape_med_state_work_data *p =
    224		container_of(work, struct tape_med_state_work_data, work);
    225	struct tape_device *device = p->device;
    226	char *envp[] = { NULL, NULL };
    227
    228	switch (p->state) {
    229	case MS_UNLOADED:
    230		pr_info("%s: The tape cartridge has been successfully "
    231			"unloaded\n", dev_name(&device->cdev->dev));
    232		envp[0] = env_state_unloaded;
    233		kobject_uevent_env(&device->cdev->dev.kobj, KOBJ_CHANGE, envp);
    234		break;
    235	case MS_LOADED:
    236		pr_info("%s: A tape cartridge has been mounted\n",
    237			dev_name(&device->cdev->dev));
    238		envp[0] = env_state_loaded;
    239		kobject_uevent_env(&device->cdev->dev.kobj, KOBJ_CHANGE, envp);
    240		break;
    241	default:
    242		break;
    243	}
    244	tape_put_device(device);
    245	kfree(p);
    246}
    247
    248static void
    249tape_med_state_work(struct tape_device *device, enum tape_medium_state state)
    250{
    251	struct tape_med_state_work_data *p;
    252
    253	p = kzalloc(sizeof(*p), GFP_ATOMIC);
    254	if (p) {
    255		INIT_WORK(&p->work, tape_med_state_work_handler);
    256		p->device = tape_get_device(device);
    257		p->state = state;
    258		schedule_work(&p->work);
    259	}
    260}
    261
    262void
    263tape_med_state_set(struct tape_device *device, enum tape_medium_state newstate)
    264{
    265	enum tape_medium_state oldstate;
    266
    267	oldstate = device->medium_state;
    268	if (oldstate == newstate)
    269		return;
    270	device->medium_state = newstate;
    271	switch(newstate){
    272	case MS_UNLOADED:
    273		device->tape_generic_status |= GMT_DR_OPEN(~0);
    274		if (oldstate == MS_LOADED)
    275			tape_med_state_work(device, MS_UNLOADED);
    276		break;
    277	case MS_LOADED:
    278		device->tape_generic_status &= ~GMT_DR_OPEN(~0);
    279		if (oldstate == MS_UNLOADED)
    280			tape_med_state_work(device, MS_LOADED);
    281		break;
    282	default:
    283		break;
    284	}
    285	wake_up(&device->state_change_wq);
    286}
    287
    288/*
    289 * Stop running ccw. Has to be called with the device lock held.
    290 */
    291static int
    292__tape_cancel_io(struct tape_device *device, struct tape_request *request)
    293{
    294	int retries;
    295	int rc;
    296
    297	/* Check if interrupt has already been processed */
    298	if (request->callback == NULL)
    299		return 0;
    300
    301	rc = 0;
    302	for (retries = 0; retries < 5; retries++) {
    303		rc = ccw_device_clear(device->cdev, (long) request);
    304
    305		switch (rc) {
    306			case 0:
    307				request->status	= TAPE_REQUEST_DONE;
    308				return 0;
    309			case -EBUSY:
    310				request->status	= TAPE_REQUEST_CANCEL;
    311				schedule_delayed_work(&device->tape_dnr, 0);
    312				return 0;
    313			case -ENODEV:
    314				DBF_EXCEPTION(2, "device gone, retry\n");
    315				break;
    316			case -EIO:
    317				DBF_EXCEPTION(2, "I/O error, retry\n");
    318				break;
    319			default:
    320				BUG();
    321		}
    322	}
    323
    324	return rc;
    325}
    326
    327/*
    328 * Add device into the sorted list, giving it the first
    329 * available minor number.
    330 */
    331static int
    332tape_assign_minor(struct tape_device *device)
    333{
    334	struct tape_device *tmp;
    335	int minor;
    336
    337	minor = 0;
    338	write_lock(&tape_device_lock);
    339	list_for_each_entry(tmp, &tape_device_list, node) {
    340		if (minor < tmp->first_minor)
    341			break;
    342		minor += TAPE_MINORS_PER_DEV;
    343	}
    344	if (minor >= 256) {
    345		write_unlock(&tape_device_lock);
    346		return -ENODEV;
    347	}
    348	device->first_minor = minor;
    349	list_add_tail(&device->node, &tmp->node);
    350	write_unlock(&tape_device_lock);
    351	return 0;
    352}
    353
    354/* remove device from the list */
    355static void
    356tape_remove_minor(struct tape_device *device)
    357{
    358	write_lock(&tape_device_lock);
    359	list_del_init(&device->node);
    360	device->first_minor = -1;
    361	write_unlock(&tape_device_lock);
    362}
    363
    364/*
    365 * Set a device online.
    366 *
    367 * This function is called by the common I/O layer to move a device from the
    368 * detected but offline into the online state.
    369 * If we return an error (RC < 0) the device remains in the offline state. This
    370 * can happen if the device is assigned somewhere else, for example.
    371 */
    372int
    373tape_generic_online(struct tape_device *device,
    374		   struct tape_discipline *discipline)
    375{
    376	int rc;
    377
    378	DBF_LH(6, "tape_enable_device(%p, %p)\n", device, discipline);
    379
    380	if (device->tape_state != TS_INIT) {
    381		DBF_LH(3, "Tapestate not INIT (%d)\n", device->tape_state);
    382		return -EINVAL;
    383	}
    384
    385	timer_setup(&device->lb_timeout, tape_long_busy_timeout, 0);
    386
    387	/* Let the discipline have a go at the device. */
    388	device->discipline = discipline;
    389	if (!try_module_get(discipline->owner)) {
    390		return -EINVAL;
    391	}
    392
    393	rc = discipline->setup_device(device);
    394	if (rc)
    395		goto out;
    396	rc = tape_assign_minor(device);
    397	if (rc)
    398		goto out_discipline;
    399
    400	rc = tapechar_setup_device(device);
    401	if (rc)
    402		goto out_minor;
    403
    404	tape_state_set(device, TS_UNUSED);
    405
    406	DBF_LH(3, "(%08x): Drive set online\n", device->cdev_id);
    407
    408	return 0;
    409
    410out_minor:
    411	tape_remove_minor(device);
    412out_discipline:
    413	device->discipline->cleanup_device(device);
    414	device->discipline = NULL;
    415out:
    416	module_put(discipline->owner);
    417	return rc;
    418}
    419
    420static void
    421tape_cleanup_device(struct tape_device *device)
    422{
    423	tapechar_cleanup_device(device);
    424	device->discipline->cleanup_device(device);
    425	module_put(device->discipline->owner);
    426	tape_remove_minor(device);
    427	tape_med_state_set(device, MS_UNKNOWN);
    428}
    429
    430/*
    431 * Set device offline.
    432 *
    433 * Called by the common I/O layer if the drive should set offline on user
    434 * request. We may prevent this by returning an error.
    435 * Manual offline is only allowed while the drive is not in use.
    436 */
    437int
    438tape_generic_offline(struct ccw_device *cdev)
    439{
    440	struct tape_device *device;
    441
    442	device = dev_get_drvdata(&cdev->dev);
    443	if (!device) {
    444		return -ENODEV;
    445	}
    446
    447	DBF_LH(3, "(%08x): tape_generic_offline(%p)\n",
    448		device->cdev_id, device);
    449
    450	spin_lock_irq(get_ccwdev_lock(device->cdev));
    451	switch (device->tape_state) {
    452		case TS_INIT:
    453		case TS_NOT_OPER:
    454			spin_unlock_irq(get_ccwdev_lock(device->cdev));
    455			break;
    456		case TS_UNUSED:
    457			tape_state_set(device, TS_INIT);
    458			spin_unlock_irq(get_ccwdev_lock(device->cdev));
    459			tape_cleanup_device(device);
    460			break;
    461		default:
    462			DBF_EVENT(3, "(%08x): Set offline failed "
    463				"- drive in use.\n",
    464				device->cdev_id);
    465			spin_unlock_irq(get_ccwdev_lock(device->cdev));
    466			return -EBUSY;
    467	}
    468
    469	DBF_LH(3, "(%08x): Drive set offline.\n", device->cdev_id);
    470	return 0;
    471}
    472
    473/*
    474 * Allocate memory for a new device structure.
    475 */
    476static struct tape_device *
    477tape_alloc_device(void)
    478{
    479	struct tape_device *device;
    480
    481	device = kzalloc(sizeof(struct tape_device), GFP_KERNEL);
    482	if (device == NULL) {
    483		DBF_EXCEPTION(2, "ti:no mem\n");
    484		return ERR_PTR(-ENOMEM);
    485	}
    486	device->modeset_byte = kmalloc(1, GFP_KERNEL | GFP_DMA);
    487	if (device->modeset_byte == NULL) {
    488		DBF_EXCEPTION(2, "ti:no mem\n");
    489		kfree(device);
    490		return ERR_PTR(-ENOMEM);
    491	}
    492	mutex_init(&device->mutex);
    493	INIT_LIST_HEAD(&device->req_queue);
    494	INIT_LIST_HEAD(&device->node);
    495	init_waitqueue_head(&device->state_change_wq);
    496	init_waitqueue_head(&device->wait_queue);
    497	device->tape_state = TS_INIT;
    498	device->medium_state = MS_UNKNOWN;
    499	*device->modeset_byte = 0;
    500	device->first_minor = -1;
    501	atomic_set(&device->ref_count, 1);
    502	INIT_DELAYED_WORK(&device->tape_dnr, tape_delayed_next_request);
    503
    504	return device;
    505}
    506
    507/*
    508 * Get a reference to an existing device structure. This will automatically
    509 * increment the reference count.
    510 */
    511struct tape_device *
    512tape_get_device(struct tape_device *device)
    513{
    514	int count;
    515
    516	count = atomic_inc_return(&device->ref_count);
    517	DBF_EVENT(4, "tape_get_device(%p) = %i\n", device, count);
    518	return device;
    519}
    520
    521/*
    522 * Decrease the reference counter of a devices structure. If the
    523 * reference counter reaches zero free the device structure.
    524 * The function returns a NULL pointer to be used by the caller
    525 * for clearing reference pointers.
    526 */
    527void
    528tape_put_device(struct tape_device *device)
    529{
    530	int count;
    531
    532	count = atomic_dec_return(&device->ref_count);
    533	DBF_EVENT(4, "tape_put_device(%p) -> %i\n", device, count);
    534	BUG_ON(count < 0);
    535	if (count == 0) {
    536		kfree(device->modeset_byte);
    537		kfree(device);
    538	}
    539}
    540
    541/*
    542 * Find tape device by a device index.
    543 */
    544struct tape_device *
    545tape_find_device(int devindex)
    546{
    547	struct tape_device *device, *tmp;
    548
    549	device = ERR_PTR(-ENODEV);
    550	read_lock(&tape_device_lock);
    551	list_for_each_entry(tmp, &tape_device_list, node) {
    552		if (tmp->first_minor / TAPE_MINORS_PER_DEV == devindex) {
    553			device = tape_get_device(tmp);
    554			break;
    555		}
    556	}
    557	read_unlock(&tape_device_lock);
    558	return device;
    559}
    560
    561/*
    562 * Driverfs tape probe function.
    563 */
    564int
    565tape_generic_probe(struct ccw_device *cdev)
    566{
    567	struct tape_device *device;
    568	int ret;
    569	struct ccw_dev_id dev_id;
    570
    571	device = tape_alloc_device();
    572	if (IS_ERR(device))
    573		return -ENODEV;
    574	ccw_device_set_options(cdev, CCWDEV_DO_PATHGROUP |
    575				     CCWDEV_DO_MULTIPATH);
    576	ret = sysfs_create_group(&cdev->dev.kobj, &tape_attr_group);
    577	if (ret) {
    578		tape_put_device(device);
    579		return ret;
    580	}
    581	dev_set_drvdata(&cdev->dev, device);
    582	cdev->handler = __tape_do_irq;
    583	device->cdev = cdev;
    584	ccw_device_get_id(cdev, &dev_id);
    585	device->cdev_id = devid_to_int(&dev_id);
    586	return ret;
    587}
    588
    589static void
    590__tape_discard_requests(struct tape_device *device)
    591{
    592	struct tape_request *	request;
    593	struct list_head *	l, *n;
    594
    595	list_for_each_safe(l, n, &device->req_queue) {
    596		request = list_entry(l, struct tape_request, list);
    597		if (request->status == TAPE_REQUEST_IN_IO)
    598			request->status = TAPE_REQUEST_DONE;
    599		list_del(&request->list);
    600
    601		/* Decrease ref_count for removed request. */
    602		request->device = NULL;
    603		tape_put_device(device);
    604		request->rc = -EIO;
    605		if (request->callback != NULL)
    606			request->callback(request, request->callback_data);
    607	}
    608}
    609
    610/*
    611 * Driverfs tape remove function.
    612 *
    613 * This function is called whenever the common I/O layer detects the device
    614 * gone. This can happen at any time and we cannot refuse.
    615 */
    616void
    617tape_generic_remove(struct ccw_device *cdev)
    618{
    619	struct tape_device *	device;
    620
    621	device = dev_get_drvdata(&cdev->dev);
    622	if (!device) {
    623		return;
    624	}
    625	DBF_LH(3, "(%08x): tape_generic_remove(%p)\n", device->cdev_id, cdev);
    626
    627	spin_lock_irq(get_ccwdev_lock(device->cdev));
    628	switch (device->tape_state) {
    629		case TS_INIT:
    630			tape_state_set(device, TS_NOT_OPER);
    631			fallthrough;
    632		case TS_NOT_OPER:
    633			/*
    634			 * Nothing to do.
    635			 */
    636			spin_unlock_irq(get_ccwdev_lock(device->cdev));
    637			break;
    638		case TS_UNUSED:
    639			/*
    640			 * Need only to release the device.
    641			 */
    642			tape_state_set(device, TS_NOT_OPER);
    643			spin_unlock_irq(get_ccwdev_lock(device->cdev));
    644			tape_cleanup_device(device);
    645			break;
    646		default:
    647			/*
    648			 * There may be requests on the queue. We will not get
    649			 * an interrupt for a request that was running. So we
    650			 * just post them all as I/O errors.
    651			 */
    652			DBF_EVENT(3, "(%08x): Drive in use vanished!\n",
    653				device->cdev_id);
    654			pr_warn("%s: A tape unit was detached while in use\n",
    655				dev_name(&device->cdev->dev));
    656			tape_state_set(device, TS_NOT_OPER);
    657			__tape_discard_requests(device);
    658			spin_unlock_irq(get_ccwdev_lock(device->cdev));
    659			tape_cleanup_device(device);
    660	}
    661
    662	device = dev_get_drvdata(&cdev->dev);
    663	if (device) {
    664		sysfs_remove_group(&cdev->dev.kobj, &tape_attr_group);
    665		dev_set_drvdata(&cdev->dev, NULL);
    666		tape_put_device(device);
    667	}
    668}
    669
    670/*
    671 * Allocate a new tape ccw request
    672 */
    673struct tape_request *
    674tape_alloc_request(int cplength, int datasize)
    675{
    676	struct tape_request *request;
    677
    678	BUG_ON(datasize > PAGE_SIZE || (cplength*sizeof(struct ccw1)) > PAGE_SIZE);
    679
    680	DBF_LH(6, "tape_alloc_request(%d, %d)\n", cplength, datasize);
    681
    682	request = kzalloc(sizeof(struct tape_request), GFP_KERNEL);
    683	if (request == NULL) {
    684		DBF_EXCEPTION(1, "cqra nomem\n");
    685		return ERR_PTR(-ENOMEM);
    686	}
    687	/* allocate channel program */
    688	if (cplength > 0) {
    689		request->cpaddr = kcalloc(cplength, sizeof(struct ccw1),
    690					  GFP_ATOMIC | GFP_DMA);
    691		if (request->cpaddr == NULL) {
    692			DBF_EXCEPTION(1, "cqra nomem\n");
    693			kfree(request);
    694			return ERR_PTR(-ENOMEM);
    695		}
    696	}
    697	/* alloc small kernel buffer */
    698	if (datasize > 0) {
    699		request->cpdata = kzalloc(datasize, GFP_KERNEL | GFP_DMA);
    700		if (request->cpdata == NULL) {
    701			DBF_EXCEPTION(1, "cqra nomem\n");
    702			kfree(request->cpaddr);
    703			kfree(request);
    704			return ERR_PTR(-ENOMEM);
    705		}
    706	}
    707	DBF_LH(6, "New request %p(%p/%p)\n", request, request->cpaddr,
    708		request->cpdata);
    709
    710	return request;
    711}
    712
    713/*
    714 * Free tape ccw request
    715 */
    716void
    717tape_free_request (struct tape_request * request)
    718{
    719	DBF_LH(6, "Free request %p\n", request);
    720
    721	if (request->device)
    722		tape_put_device(request->device);
    723	kfree(request->cpdata);
    724	kfree(request->cpaddr);
    725	kfree(request);
    726}
    727
    728static int
    729__tape_start_io(struct tape_device *device, struct tape_request *request)
    730{
    731	int rc;
    732
    733	rc = ccw_device_start(
    734		device->cdev,
    735		request->cpaddr,
    736		(unsigned long) request,
    737		0x00,
    738		request->options
    739	);
    740	if (rc == 0) {
    741		request->status = TAPE_REQUEST_IN_IO;
    742	} else if (rc == -EBUSY) {
    743		/* The common I/O subsystem is currently busy. Retry later. */
    744		request->status = TAPE_REQUEST_QUEUED;
    745		schedule_delayed_work(&device->tape_dnr, 0);
    746		rc = 0;
    747	} else {
    748		/* Start failed. Remove request and indicate failure. */
    749		DBF_EVENT(1, "tape: start request failed with RC = %i\n", rc);
    750	}
    751	return rc;
    752}
    753
    754static void
    755__tape_start_next_request(struct tape_device *device)
    756{
    757	struct list_head *l, *n;
    758	struct tape_request *request;
    759	int rc;
    760
    761	DBF_LH(6, "__tape_start_next_request(%p)\n", device);
    762	/*
    763	 * Try to start each request on request queue until one is
    764	 * started successful.
    765	 */
    766	list_for_each_safe(l, n, &device->req_queue) {
    767		request = list_entry(l, struct tape_request, list);
    768
    769		/*
    770		 * Avoid race condition if bottom-half was triggered more than
    771		 * once.
    772		 */
    773		if (request->status == TAPE_REQUEST_IN_IO)
    774			return;
    775		/*
    776		 * Request has already been stopped. We have to wait until
    777		 * the request is removed from the queue in the interrupt
    778		 * handling.
    779		 */
    780		if (request->status == TAPE_REQUEST_DONE)
    781			return;
    782
    783		/*
    784		 * We wanted to cancel the request but the common I/O layer
    785		 * was busy at that time. This can only happen if this
    786		 * function is called by delayed_next_request.
    787		 * Otherwise we start the next request on the queue.
    788		 */
    789		if (request->status == TAPE_REQUEST_CANCEL) {
    790			rc = __tape_cancel_io(device, request);
    791		} else {
    792			rc = __tape_start_io(device, request);
    793		}
    794		if (rc == 0)
    795			return;
    796
    797		/* Set ending status. */
    798		request->rc = rc;
    799		request->status = TAPE_REQUEST_DONE;
    800
    801		/* Remove from request queue. */
    802		list_del(&request->list);
    803
    804		/* Do callback. */
    805		if (request->callback != NULL)
    806			request->callback(request, request->callback_data);
    807	}
    808}
    809
    810static void
    811tape_delayed_next_request(struct work_struct *work)
    812{
    813	struct tape_device *device =
    814		container_of(work, struct tape_device, tape_dnr.work);
    815
    816	DBF_LH(6, "tape_delayed_next_request(%p)\n", device);
    817	spin_lock_irq(get_ccwdev_lock(device->cdev));
    818	__tape_start_next_request(device);
    819	spin_unlock_irq(get_ccwdev_lock(device->cdev));
    820}
    821
    822static void tape_long_busy_timeout(struct timer_list *t)
    823{
    824	struct tape_device *device = from_timer(device, t, lb_timeout);
    825	struct tape_request *request;
    826
    827	spin_lock_irq(get_ccwdev_lock(device->cdev));
    828	request = list_entry(device->req_queue.next, struct tape_request, list);
    829	BUG_ON(request->status != TAPE_REQUEST_LONG_BUSY);
    830	DBF_LH(6, "%08x: Long busy timeout.\n", device->cdev_id);
    831	__tape_start_next_request(device);
    832	tape_put_device(device);
    833	spin_unlock_irq(get_ccwdev_lock(device->cdev));
    834}
    835
    836static void
    837__tape_end_request(
    838	struct tape_device *	device,
    839	struct tape_request *	request,
    840	int			rc)
    841{
    842	DBF_LH(6, "__tape_end_request(%p, %p, %i)\n", device, request, rc);
    843	if (request) {
    844		request->rc = rc;
    845		request->status = TAPE_REQUEST_DONE;
    846
    847		/* Remove from request queue. */
    848		list_del(&request->list);
    849
    850		/* Do callback. */
    851		if (request->callback != NULL)
    852			request->callback(request, request->callback_data);
    853	}
    854
    855	/* Start next request. */
    856	if (!list_empty(&device->req_queue))
    857		__tape_start_next_request(device);
    858}
    859
    860/*
    861 * Write sense data to dbf
    862 */
    863void
    864tape_dump_sense_dbf(struct tape_device *device, struct tape_request *request,
    865		    struct irb *irb)
    866{
    867	unsigned int *sptr;
    868	const char* op;
    869
    870	if (request != NULL)
    871		op = tape_op_verbose[request->op];
    872	else
    873		op = "---";
    874	DBF_EVENT(3, "DSTAT : %02x   CSTAT: %02x\n",
    875		  irb->scsw.cmd.dstat, irb->scsw.cmd.cstat);
    876	DBF_EVENT(3, "DEVICE: %08x OP\t: %s\n", device->cdev_id, op);
    877	sptr = (unsigned int *) irb->ecw;
    878	DBF_EVENT(3, "%08x %08x\n", sptr[0], sptr[1]);
    879	DBF_EVENT(3, "%08x %08x\n", sptr[2], sptr[3]);
    880	DBF_EVENT(3, "%08x %08x\n", sptr[4], sptr[5]);
    881	DBF_EVENT(3, "%08x %08x\n", sptr[6], sptr[7]);
    882}
    883
    884/*
    885 * I/O helper function. Adds the request to the request queue
    886 * and starts it if the tape is idle. Has to be called with
    887 * the device lock held.
    888 */
    889static int
    890__tape_start_request(struct tape_device *device, struct tape_request *request)
    891{
    892	int rc;
    893
    894	switch (request->op) {
    895		case TO_MSEN:
    896		case TO_ASSIGN:
    897		case TO_UNASSIGN:
    898		case TO_READ_ATTMSG:
    899		case TO_RDC:
    900			if (device->tape_state == TS_INIT)
    901				break;
    902			if (device->tape_state == TS_UNUSED)
    903				break;
    904			fallthrough;
    905		default:
    906			if (device->tape_state == TS_BLKUSE)
    907				break;
    908			if (device->tape_state != TS_IN_USE)
    909				return -ENODEV;
    910	}
    911
    912	/* Increase use count of device for the added request. */
    913	request->device = tape_get_device(device);
    914
    915	if (list_empty(&device->req_queue)) {
    916		/* No other requests are on the queue. Start this one. */
    917		rc = __tape_start_io(device, request);
    918		if (rc)
    919			return rc;
    920
    921		DBF_LH(5, "Request %p added for execution.\n", request);
    922		list_add(&request->list, &device->req_queue);
    923	} else {
    924		DBF_LH(5, "Request %p add to queue.\n", request);
    925		request->status = TAPE_REQUEST_QUEUED;
    926		list_add_tail(&request->list, &device->req_queue);
    927	}
    928	return 0;
    929}
    930
    931/*
    932 * Add the request to the request queue, try to start it if the
    933 * tape is idle. Return without waiting for end of i/o.
    934 */
    935int
    936tape_do_io_async(struct tape_device *device, struct tape_request *request)
    937{
    938	int rc;
    939
    940	DBF_LH(6, "tape_do_io_async(%p, %p)\n", device, request);
    941
    942	spin_lock_irq(get_ccwdev_lock(device->cdev));
    943	/* Add request to request queue and try to start it. */
    944	rc = __tape_start_request(device, request);
    945	spin_unlock_irq(get_ccwdev_lock(device->cdev));
    946	return rc;
    947}
    948
    949/*
    950 * tape_do_io/__tape_wake_up
    951 * Add the request to the request queue, try to start it if the
    952 * tape is idle and wait uninterruptible for its completion.
    953 */
    954static void
    955__tape_wake_up(struct tape_request *request, void *data)
    956{
    957	request->callback = NULL;
    958	wake_up((wait_queue_head_t *) data);
    959}
    960
    961int
    962tape_do_io(struct tape_device *device, struct tape_request *request)
    963{
    964	int rc;
    965
    966	spin_lock_irq(get_ccwdev_lock(device->cdev));
    967	/* Setup callback */
    968	request->callback = __tape_wake_up;
    969	request->callback_data = &device->wait_queue;
    970	/* Add request to request queue and try to start it. */
    971	rc = __tape_start_request(device, request);
    972	spin_unlock_irq(get_ccwdev_lock(device->cdev));
    973	if (rc)
    974		return rc;
    975	/* Request added to the queue. Wait for its completion. */
    976	wait_event(device->wait_queue, (request->callback == NULL));
    977	/* Get rc from request */
    978	return request->rc;
    979}
    980
    981/*
    982 * tape_do_io_interruptible/__tape_wake_up_interruptible
    983 * Add the request to the request queue, try to start it if the
    984 * tape is idle and wait uninterruptible for its completion.
    985 */
    986static void
    987__tape_wake_up_interruptible(struct tape_request *request, void *data)
    988{
    989	request->callback = NULL;
    990	wake_up_interruptible((wait_queue_head_t *) data);
    991}
    992
    993int
    994tape_do_io_interruptible(struct tape_device *device,
    995			 struct tape_request *request)
    996{
    997	int rc;
    998
    999	spin_lock_irq(get_ccwdev_lock(device->cdev));
   1000	/* Setup callback */
   1001	request->callback = __tape_wake_up_interruptible;
   1002	request->callback_data = &device->wait_queue;
   1003	rc = __tape_start_request(device, request);
   1004	spin_unlock_irq(get_ccwdev_lock(device->cdev));
   1005	if (rc)
   1006		return rc;
   1007	/* Request added to the queue. Wait for its completion. */
   1008	rc = wait_event_interruptible(device->wait_queue,
   1009				      (request->callback == NULL));
   1010	if (rc != -ERESTARTSYS)
   1011		/* Request finished normally. */
   1012		return request->rc;
   1013
   1014	/* Interrupted by a signal. We have to stop the current request. */
   1015	spin_lock_irq(get_ccwdev_lock(device->cdev));
   1016	rc = __tape_cancel_io(device, request);
   1017	spin_unlock_irq(get_ccwdev_lock(device->cdev));
   1018	if (rc == 0) {
   1019		/* Wait for the interrupt that acknowledges the halt. */
   1020		do {
   1021			rc = wait_event_interruptible(
   1022				device->wait_queue,
   1023				(request->callback == NULL)
   1024			);
   1025		} while (rc == -ERESTARTSYS);
   1026
   1027		DBF_EVENT(3, "IO stopped on %08x\n", device->cdev_id);
   1028		rc = -ERESTARTSYS;
   1029	}
   1030	return rc;
   1031}
   1032
   1033/*
   1034 * Stop running ccw.
   1035 */
   1036int
   1037tape_cancel_io(struct tape_device *device, struct tape_request *request)
   1038{
   1039	int rc;
   1040
   1041	spin_lock_irq(get_ccwdev_lock(device->cdev));
   1042	rc = __tape_cancel_io(device, request);
   1043	spin_unlock_irq(get_ccwdev_lock(device->cdev));
   1044	return rc;
   1045}
   1046
   1047/*
   1048 * Tape interrupt routine, called from the ccw_device layer
   1049 */
   1050static void
   1051__tape_do_irq (struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
   1052{
   1053	struct tape_device *device;
   1054	struct tape_request *request;
   1055	int rc;
   1056
   1057	device = dev_get_drvdata(&cdev->dev);
   1058	if (device == NULL) {
   1059		return;
   1060	}
   1061	request = (struct tape_request *) intparm;
   1062
   1063	DBF_LH(6, "__tape_do_irq(device=%p, request=%p)\n", device, request);
   1064
   1065	/* On special conditions irb is an error pointer */
   1066	if (IS_ERR(irb)) {
   1067		/* FIXME: What to do with the request? */
   1068		switch (PTR_ERR(irb)) {
   1069			case -ETIMEDOUT:
   1070				DBF_LH(1, "(%08x): Request timed out\n",
   1071				       device->cdev_id);
   1072				fallthrough;
   1073			case -EIO:
   1074				__tape_end_request(device, request, -EIO);
   1075				break;
   1076			default:
   1077				DBF_LH(1, "(%08x): Unexpected i/o error %li\n",
   1078				       device->cdev_id,	PTR_ERR(irb));
   1079		}
   1080		return;
   1081	}
   1082
   1083	/*
   1084	 * If the condition code is not zero and the start function bit is
   1085	 * still set, this is an deferred error and the last start I/O did
   1086	 * not succeed. At this point the condition that caused the deferred
   1087	 * error might still apply. So we just schedule the request to be
   1088	 * started later.
   1089	 */
   1090	if (irb->scsw.cmd.cc != 0 &&
   1091	    (irb->scsw.cmd.fctl & SCSW_FCTL_START_FUNC) &&
   1092	    (request->status == TAPE_REQUEST_IN_IO)) {
   1093		DBF_EVENT(3,"(%08x): deferred cc=%i, fctl=%i. restarting\n",
   1094			device->cdev_id, irb->scsw.cmd.cc, irb->scsw.cmd.fctl);
   1095		request->status = TAPE_REQUEST_QUEUED;
   1096		schedule_delayed_work(&device->tape_dnr, HZ);
   1097		return;
   1098	}
   1099
   1100	/* May be an unsolicited irq */
   1101	if(request != NULL)
   1102		request->rescnt = irb->scsw.cmd.count;
   1103	else if ((irb->scsw.cmd.dstat == 0x85 || irb->scsw.cmd.dstat == 0x80) &&
   1104		 !list_empty(&device->req_queue)) {
   1105		/* Not Ready to Ready after long busy ? */
   1106		struct tape_request *req;
   1107		req = list_entry(device->req_queue.next,
   1108				 struct tape_request, list);
   1109		if (req->status == TAPE_REQUEST_LONG_BUSY) {
   1110			DBF_EVENT(3, "(%08x): del timer\n", device->cdev_id);
   1111			if (del_timer(&device->lb_timeout)) {
   1112				tape_put_device(device);
   1113				__tape_start_next_request(device);
   1114			}
   1115			return;
   1116		}
   1117	}
   1118	if (irb->scsw.cmd.dstat != 0x0c) {
   1119		/* Set the 'ONLINE' flag depending on sense byte 1 */
   1120		if(*(((__u8 *) irb->ecw) + 1) & SENSE_DRIVE_ONLINE)
   1121			device->tape_generic_status |= GMT_ONLINE(~0);
   1122		else
   1123			device->tape_generic_status &= ~GMT_ONLINE(~0);
   1124
   1125		/*
   1126		 * Any request that does not come back with channel end
   1127		 * and device end is unusual. Log the sense data.
   1128		 */
   1129		DBF_EVENT(3,"-- Tape Interrupthandler --\n");
   1130		tape_dump_sense_dbf(device, request, irb);
   1131	} else {
   1132		/* Upon normal completion the device _is_ online */
   1133		device->tape_generic_status |= GMT_ONLINE(~0);
   1134	}
   1135	if (device->tape_state == TS_NOT_OPER) {
   1136		DBF_EVENT(6, "tape:device is not operational\n");
   1137		return;
   1138	}
   1139
   1140	/*
   1141	 * Request that were canceled still come back with an interrupt.
   1142	 * To detect these request the state will be set to TAPE_REQUEST_DONE.
   1143	 */
   1144	if(request != NULL && request->status == TAPE_REQUEST_DONE) {
   1145		__tape_end_request(device, request, -EIO);
   1146		return;
   1147	}
   1148
   1149	rc = device->discipline->irq(device, request, irb);
   1150	/*
   1151	 * rc < 0 : request finished unsuccessfully.
   1152	 * rc == TAPE_IO_SUCCESS: request finished successfully.
   1153	 * rc == TAPE_IO_PENDING: request is still running. Ignore rc.
   1154	 * rc == TAPE_IO_RETRY: request finished but needs another go.
   1155	 * rc == TAPE_IO_STOP: request needs to get terminated.
   1156	 */
   1157	switch (rc) {
   1158		case TAPE_IO_SUCCESS:
   1159			/* Upon normal completion the device _is_ online */
   1160			device->tape_generic_status |= GMT_ONLINE(~0);
   1161			__tape_end_request(device, request, rc);
   1162			break;
   1163		case TAPE_IO_PENDING:
   1164			break;
   1165		case TAPE_IO_LONG_BUSY:
   1166			device->lb_timeout.expires = jiffies +
   1167				LONG_BUSY_TIMEOUT * HZ;
   1168			DBF_EVENT(3, "(%08x): add timer\n", device->cdev_id);
   1169			add_timer(&device->lb_timeout);
   1170			request->status = TAPE_REQUEST_LONG_BUSY;
   1171			break;
   1172		case TAPE_IO_RETRY:
   1173			rc = __tape_start_io(device, request);
   1174			if (rc)
   1175				__tape_end_request(device, request, rc);
   1176			break;
   1177		case TAPE_IO_STOP:
   1178			rc = __tape_cancel_io(device, request);
   1179			if (rc)
   1180				__tape_end_request(device, request, rc);
   1181			break;
   1182		default:
   1183			if (rc > 0) {
   1184				DBF_EVENT(6, "xunknownrc\n");
   1185				__tape_end_request(device, request, -EIO);
   1186			} else {
   1187				__tape_end_request(device, request, rc);
   1188			}
   1189			break;
   1190	}
   1191}
   1192
   1193/*
   1194 * Tape device open function used by tape_char frontend.
   1195 */
   1196int
   1197tape_open(struct tape_device *device)
   1198{
   1199	int rc;
   1200
   1201	spin_lock_irq(get_ccwdev_lock(device->cdev));
   1202	if (device->tape_state == TS_NOT_OPER) {
   1203		DBF_EVENT(6, "TAPE:nodev\n");
   1204		rc = -ENODEV;
   1205	} else if (device->tape_state == TS_IN_USE) {
   1206		DBF_EVENT(6, "TAPE:dbusy\n");
   1207		rc = -EBUSY;
   1208	} else if (device->tape_state == TS_BLKUSE) {
   1209		DBF_EVENT(6, "TAPE:dbusy\n");
   1210		rc = -EBUSY;
   1211	} else if (device->discipline != NULL &&
   1212		   !try_module_get(device->discipline->owner)) {
   1213		DBF_EVENT(6, "TAPE:nodisc\n");
   1214		rc = -ENODEV;
   1215	} else {
   1216		tape_state_set(device, TS_IN_USE);
   1217		rc = 0;
   1218	}
   1219	spin_unlock_irq(get_ccwdev_lock(device->cdev));
   1220	return rc;
   1221}
   1222
   1223/*
   1224 * Tape device release function used by tape_char frontend.
   1225 */
   1226int
   1227tape_release(struct tape_device *device)
   1228{
   1229	spin_lock_irq(get_ccwdev_lock(device->cdev));
   1230	if (device->tape_state == TS_IN_USE)
   1231		tape_state_set(device, TS_UNUSED);
   1232	module_put(device->discipline->owner);
   1233	spin_unlock_irq(get_ccwdev_lock(device->cdev));
   1234	return 0;
   1235}
   1236
   1237/*
   1238 * Execute a magnetic tape command a number of times.
   1239 */
   1240int
   1241tape_mtop(struct tape_device *device, int mt_op, int mt_count)
   1242{
   1243	tape_mtop_fn fn;
   1244	int rc;
   1245
   1246	DBF_EVENT(6, "TAPE:mtio\n");
   1247	DBF_EVENT(6, "TAPE:ioop: %x\n", mt_op);
   1248	DBF_EVENT(6, "TAPE:arg:	 %x\n", mt_count);
   1249
   1250	if (mt_op < 0 || mt_op >= TAPE_NR_MTOPS)
   1251		return -EINVAL;
   1252	fn = device->discipline->mtop_array[mt_op];
   1253	if (fn == NULL)
   1254		return -EINVAL;
   1255
   1256	/* We assume that the backends can handle count up to 500. */
   1257	if (mt_op == MTBSR  || mt_op == MTFSR  || mt_op == MTFSF  ||
   1258	    mt_op == MTBSF  || mt_op == MTFSFM || mt_op == MTBSFM) {
   1259		rc = 0;
   1260		for (; mt_count > 500; mt_count -= 500)
   1261			if ((rc = fn(device, 500)) != 0)
   1262				break;
   1263		if (rc == 0)
   1264			rc = fn(device, mt_count);
   1265	} else
   1266		rc = fn(device, mt_count);
   1267	return rc;
   1268
   1269}
   1270
   1271/*
   1272 * Tape init function.
   1273 */
   1274static int
   1275tape_init (void)
   1276{
   1277	TAPE_DBF_AREA = debug_register ( "tape", 2, 2, 4*sizeof(long));
   1278	debug_register_view(TAPE_DBF_AREA, &debug_sprintf_view);
   1279#ifdef DBF_LIKE_HELL
   1280	debug_set_level(TAPE_DBF_AREA, 6);
   1281#endif
   1282	DBF_EVENT(3, "tape init\n");
   1283	tape_proc_init();
   1284	tapechar_init ();
   1285	return 0;
   1286}
   1287
   1288/*
   1289 * Tape exit function.
   1290 */
   1291static void
   1292tape_exit(void)
   1293{
   1294	DBF_EVENT(6, "tape exit\n");
   1295
   1296	/* Get rid of the frontends */
   1297	tapechar_exit();
   1298	tape_proc_cleanup();
   1299	debug_unregister (TAPE_DBF_AREA);
   1300}
   1301
   1302MODULE_AUTHOR("(C) 2001 IBM Deutschland Entwicklung GmbH by Carsten Otte and "
   1303	      "Michael Holzheu (cotte@de.ibm.com,holzheu@de.ibm.com)");
   1304MODULE_DESCRIPTION("Linux on zSeries channel attached tape device driver");
   1305MODULE_LICENSE("GPL");
   1306
   1307module_init(tape_init);
   1308module_exit(tape_exit);
   1309
   1310EXPORT_SYMBOL(tape_generic_remove);
   1311EXPORT_SYMBOL(tape_generic_probe);
   1312EXPORT_SYMBOL(tape_generic_online);
   1313EXPORT_SYMBOL(tape_generic_offline);
   1314EXPORT_SYMBOL(tape_put_device);
   1315EXPORT_SYMBOL(tape_get_device);
   1316EXPORT_SYMBOL(tape_state_verbose);
   1317EXPORT_SYMBOL(tape_op_verbose);
   1318EXPORT_SYMBOL(tape_state_set);
   1319EXPORT_SYMBOL(tape_med_state_set);
   1320EXPORT_SYMBOL(tape_alloc_request);
   1321EXPORT_SYMBOL(tape_free_request);
   1322EXPORT_SYMBOL(tape_dump_sense_dbf);
   1323EXPORT_SYMBOL(tape_do_io);
   1324EXPORT_SYMBOL(tape_do_io_async);
   1325EXPORT_SYMBOL(tape_do_io_interruptible);
   1326EXPORT_SYMBOL(tape_cancel_io);
   1327EXPORT_SYMBOL(tape_mtop);