cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

device.c (49100B)


      1// SPDX-License-Identifier: GPL-1.0+
      2/*
      3 *  bus driver for ccw devices
      4 *
      5 *    Copyright IBM Corp. 2002, 2008
      6 *    Author(s): Arnd Bergmann (arndb@de.ibm.com)
      7 *		 Cornelia Huck (cornelia.huck@de.ibm.com)
      8 *		 Martin Schwidefsky (schwidefsky@de.ibm.com)
      9 */
     10
     11#define KMSG_COMPONENT "cio"
     12#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
     13
     14#include <linux/export.h>
     15#include <linux/init.h>
     16#include <linux/spinlock.h>
     17#include <linux/errno.h>
     18#include <linux/err.h>
     19#include <linux/slab.h>
     20#include <linux/list.h>
     21#include <linux/device.h>
     22#include <linux/workqueue.h>
     23#include <linux/delay.h>
     24#include <linux/timer.h>
     25#include <linux/kernel_stat.h>
     26#include <linux/sched/signal.h>
     27#include <linux/dma-mapping.h>
     28
     29#include <asm/ccwdev.h>
     30#include <asm/cio.h>
     31#include <asm/param.h>		/* HZ */
     32#include <asm/cmb.h>
     33#include <asm/isc.h>
     34
     35#include "chp.h"
     36#include "cio.h"
     37#include "cio_debug.h"
     38#include "css.h"
     39#include "device.h"
     40#include "ioasm.h"
     41#include "io_sch.h"
     42#include "blacklist.h"
     43#include "chsc.h"
     44
     45static struct timer_list recovery_timer;
     46static DEFINE_SPINLOCK(recovery_lock);
     47static int recovery_phase;
     48static const unsigned long recovery_delay[] = { 3, 30, 300 };
     49
     50static atomic_t ccw_device_init_count = ATOMIC_INIT(0);
     51static DECLARE_WAIT_QUEUE_HEAD(ccw_device_init_wq);
     52static struct bus_type ccw_bus_type;
     53
     54/******************* bus type handling ***********************/
     55
     56/* The Linux driver model distinguishes between a bus type and
     57 * the bus itself. Of course we only have one channel
     58 * subsystem driver and one channel system per machine, but
     59 * we still use the abstraction. T.R. says it's a good idea. */
     60static int
     61ccw_bus_match (struct device * dev, struct device_driver * drv)
     62{
     63	struct ccw_device *cdev = to_ccwdev(dev);
     64	struct ccw_driver *cdrv = to_ccwdrv(drv);
     65	const struct ccw_device_id *ids = cdrv->ids, *found;
     66
     67	if (!ids)
     68		return 0;
     69
     70	found = ccw_device_id_match(ids, &cdev->id);
     71	if (!found)
     72		return 0;
     73
     74	cdev->id.driver_info = found->driver_info;
     75
     76	return 1;
     77}
     78
     79/* Store modalias string delimited by prefix/suffix string into buffer with
     80 * specified size. Return length of resulting string (excluding trailing '\0')
     81 * even if string doesn't fit buffer (snprintf semantics). */
     82static int snprint_alias(char *buf, size_t size,
     83			 struct ccw_device_id *id, const char *suffix)
     84{
     85	int len;
     86
     87	len = snprintf(buf, size, "ccw:t%04Xm%02X", id->cu_type, id->cu_model);
     88	if (len > size)
     89		return len;
     90	buf += len;
     91	size -= len;
     92
     93	if (id->dev_type != 0)
     94		len += snprintf(buf, size, "dt%04Xdm%02X%s", id->dev_type,
     95				id->dev_model, suffix);
     96	else
     97		len += snprintf(buf, size, "dtdm%s", suffix);
     98
     99	return len;
    100}
    101
    102/* Set up environment variables for ccw device uevent. Return 0 on success,
    103 * non-zero otherwise. */
    104static int ccw_uevent(struct device *dev, struct kobj_uevent_env *env)
    105{
    106	struct ccw_device *cdev = to_ccwdev(dev);
    107	struct ccw_device_id *id = &(cdev->id);
    108	int ret;
    109	char modalias_buf[30];
    110
    111	/* CU_TYPE= */
    112	ret = add_uevent_var(env, "CU_TYPE=%04X", id->cu_type);
    113	if (ret)
    114		return ret;
    115
    116	/* CU_MODEL= */
    117	ret = add_uevent_var(env, "CU_MODEL=%02X", id->cu_model);
    118	if (ret)
    119		return ret;
    120
    121	/* The next two can be zero, that's ok for us */
    122	/* DEV_TYPE= */
    123	ret = add_uevent_var(env, "DEV_TYPE=%04X", id->dev_type);
    124	if (ret)
    125		return ret;
    126
    127	/* DEV_MODEL= */
    128	ret = add_uevent_var(env, "DEV_MODEL=%02X", id->dev_model);
    129	if (ret)
    130		return ret;
    131
    132	/* MODALIAS=  */
    133	snprint_alias(modalias_buf, sizeof(modalias_buf), id, "");
    134	ret = add_uevent_var(env, "MODALIAS=%s", modalias_buf);
    135	return ret;
    136}
    137
    138static void io_subchannel_irq(struct subchannel *);
    139static int io_subchannel_probe(struct subchannel *);
    140static void io_subchannel_remove(struct subchannel *);
    141static void io_subchannel_shutdown(struct subchannel *);
    142static int io_subchannel_sch_event(struct subchannel *, int);
    143static int io_subchannel_chp_event(struct subchannel *, struct chp_link *,
    144				   int);
    145static void recovery_func(struct timer_list *unused);
    146
    147static struct css_device_id io_subchannel_ids[] = {
    148	{ .match_flags = 0x1, .type = SUBCHANNEL_TYPE_IO, },
    149	{ /* end of list */ },
    150};
    151
    152static int io_subchannel_settle(void)
    153{
    154	int ret;
    155
    156	ret = wait_event_interruptible(ccw_device_init_wq,
    157				atomic_read(&ccw_device_init_count) == 0);
    158	if (ret)
    159		return -EINTR;
    160	flush_workqueue(cio_work_q);
    161	return 0;
    162}
    163
    164static struct css_driver io_subchannel_driver = {
    165	.drv = {
    166		.owner = THIS_MODULE,
    167		.name = "io_subchannel",
    168	},
    169	.subchannel_type = io_subchannel_ids,
    170	.irq = io_subchannel_irq,
    171	.sch_event = io_subchannel_sch_event,
    172	.chp_event = io_subchannel_chp_event,
    173	.probe = io_subchannel_probe,
    174	.remove = io_subchannel_remove,
    175	.shutdown = io_subchannel_shutdown,
    176	.settle = io_subchannel_settle,
    177};
    178
    179int __init io_subchannel_init(void)
    180{
    181	int ret;
    182
    183	timer_setup(&recovery_timer, recovery_func, 0);
    184	ret = bus_register(&ccw_bus_type);
    185	if (ret)
    186		return ret;
    187	ret = css_driver_register(&io_subchannel_driver);
    188	if (ret)
    189		bus_unregister(&ccw_bus_type);
    190
    191	return ret;
    192}
    193
    194
    195/************************ device handling **************************/
    196
    197static ssize_t
    198devtype_show (struct device *dev, struct device_attribute *attr, char *buf)
    199{
    200	struct ccw_device *cdev = to_ccwdev(dev);
    201	struct ccw_device_id *id = &(cdev->id);
    202
    203	if (id->dev_type != 0)
    204		return sprintf(buf, "%04x/%02x\n",
    205				id->dev_type, id->dev_model);
    206	else
    207		return sprintf(buf, "n/a\n");
    208}
    209
    210static ssize_t
    211cutype_show (struct device *dev, struct device_attribute *attr, char *buf)
    212{
    213	struct ccw_device *cdev = to_ccwdev(dev);
    214	struct ccw_device_id *id = &(cdev->id);
    215
    216	return sprintf(buf, "%04x/%02x\n",
    217		       id->cu_type, id->cu_model);
    218}
    219
    220static ssize_t
    221modalias_show (struct device *dev, struct device_attribute *attr, char *buf)
    222{
    223	struct ccw_device *cdev = to_ccwdev(dev);
    224	struct ccw_device_id *id = &(cdev->id);
    225	int len;
    226
    227	len = snprint_alias(buf, PAGE_SIZE, id, "\n");
    228
    229	return len > PAGE_SIZE ? PAGE_SIZE : len;
    230}
    231
    232static ssize_t
    233online_show (struct device *dev, struct device_attribute *attr, char *buf)
    234{
    235	struct ccw_device *cdev = to_ccwdev(dev);
    236
    237	return sprintf(buf, cdev->online ? "1\n" : "0\n");
    238}
    239
    240int ccw_device_is_orphan(struct ccw_device *cdev)
    241{
    242	return sch_is_pseudo_sch(to_subchannel(cdev->dev.parent));
    243}
    244
    245static void ccw_device_unregister(struct ccw_device *cdev)
    246{
    247	if (device_is_registered(&cdev->dev)) {
    248		/* Undo device_add(). */
    249		device_del(&cdev->dev);
    250	}
    251	if (cdev->private->flags.initialized) {
    252		cdev->private->flags.initialized = 0;
    253		/* Release reference from device_initialize(). */
    254		put_device(&cdev->dev);
    255	}
    256}
    257
    258static void io_subchannel_quiesce(struct subchannel *);
    259
    260/**
    261 * ccw_device_set_offline() - disable a ccw device for I/O
    262 * @cdev: target ccw device
    263 *
    264 * This function calls the driver's set_offline() function for @cdev, if
    265 * given, and then disables @cdev.
    266 * Returns:
    267 *   %0 on success and a negative error value on failure.
    268 * Context:
    269 *  enabled, ccw device lock not held
    270 */
    271int ccw_device_set_offline(struct ccw_device *cdev)
    272{
    273	struct subchannel *sch;
    274	int ret, state;
    275
    276	if (!cdev)
    277		return -ENODEV;
    278	if (!cdev->online || !cdev->drv)
    279		return -EINVAL;
    280
    281	if (cdev->drv->set_offline) {
    282		ret = cdev->drv->set_offline(cdev);
    283		if (ret != 0)
    284			return ret;
    285	}
    286	spin_lock_irq(cdev->ccwlock);
    287	sch = to_subchannel(cdev->dev.parent);
    288	cdev->online = 0;
    289	/* Wait until a final state or DISCONNECTED is reached */
    290	while (!dev_fsm_final_state(cdev) &&
    291	       cdev->private->state != DEV_STATE_DISCONNECTED) {
    292		spin_unlock_irq(cdev->ccwlock);
    293		wait_event(cdev->private->wait_q, (dev_fsm_final_state(cdev) ||
    294			   cdev->private->state == DEV_STATE_DISCONNECTED));
    295		spin_lock_irq(cdev->ccwlock);
    296	}
    297	do {
    298		ret = ccw_device_offline(cdev);
    299		if (!ret)
    300			break;
    301		CIO_MSG_EVENT(0, "ccw_device_offline returned %d, device "
    302			      "0.%x.%04x\n", ret, cdev->private->dev_id.ssid,
    303			      cdev->private->dev_id.devno);
    304		if (ret != -EBUSY)
    305			goto error;
    306		state = cdev->private->state;
    307		spin_unlock_irq(cdev->ccwlock);
    308		io_subchannel_quiesce(sch);
    309		spin_lock_irq(cdev->ccwlock);
    310		cdev->private->state = state;
    311	} while (ret == -EBUSY);
    312	spin_unlock_irq(cdev->ccwlock);
    313	wait_event(cdev->private->wait_q, (dev_fsm_final_state(cdev) ||
    314		   cdev->private->state == DEV_STATE_DISCONNECTED));
    315	/* Inform the user if set offline failed. */
    316	if (cdev->private->state == DEV_STATE_BOXED) {
    317		pr_warn("%s: The device entered boxed state while being set offline\n",
    318			dev_name(&cdev->dev));
    319	} else if (cdev->private->state == DEV_STATE_NOT_OPER) {
    320		pr_warn("%s: The device stopped operating while being set offline\n",
    321			dev_name(&cdev->dev));
    322	}
    323	/* Give up reference from ccw_device_set_online(). */
    324	put_device(&cdev->dev);
    325	return 0;
    326
    327error:
    328	cdev->private->state = DEV_STATE_OFFLINE;
    329	dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
    330	spin_unlock_irq(cdev->ccwlock);
    331	/* Give up reference from ccw_device_set_online(). */
    332	put_device(&cdev->dev);
    333	return -ENODEV;
    334}
    335
    336/**
    337 * ccw_device_set_online() - enable a ccw device for I/O
    338 * @cdev: target ccw device
    339 *
    340 * This function first enables @cdev and then calls the driver's set_online()
    341 * function for @cdev, if given. If set_online() returns an error, @cdev is
    342 * disabled again.
    343 * Returns:
    344 *   %0 on success and a negative error value on failure.
    345 * Context:
    346 *  enabled, ccw device lock not held
    347 */
    348int ccw_device_set_online(struct ccw_device *cdev)
    349{
    350	int ret;
    351	int ret2;
    352
    353	if (!cdev)
    354		return -ENODEV;
    355	if (cdev->online || !cdev->drv)
    356		return -EINVAL;
    357	/* Hold on to an extra reference while device is online. */
    358	if (!get_device(&cdev->dev))
    359		return -ENODEV;
    360
    361	spin_lock_irq(cdev->ccwlock);
    362	ret = ccw_device_online(cdev);
    363	spin_unlock_irq(cdev->ccwlock);
    364	if (ret == 0)
    365		wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev));
    366	else {
    367		CIO_MSG_EVENT(0, "ccw_device_online returned %d, "
    368			      "device 0.%x.%04x\n",
    369			      ret, cdev->private->dev_id.ssid,
    370			      cdev->private->dev_id.devno);
    371		/* Give up online reference since onlining failed. */
    372		put_device(&cdev->dev);
    373		return ret;
    374	}
    375	spin_lock_irq(cdev->ccwlock);
    376	/* Check if online processing was successful */
    377	if ((cdev->private->state != DEV_STATE_ONLINE) &&
    378	    (cdev->private->state != DEV_STATE_W4SENSE)) {
    379		spin_unlock_irq(cdev->ccwlock);
    380		/* Inform the user that set online failed. */
    381		if (cdev->private->state == DEV_STATE_BOXED) {
    382			pr_warn("%s: Setting the device online failed because it is boxed\n",
    383				dev_name(&cdev->dev));
    384		} else if (cdev->private->state == DEV_STATE_NOT_OPER) {
    385			pr_warn("%s: Setting the device online failed because it is not operational\n",
    386				dev_name(&cdev->dev));
    387		}
    388		/* Give up online reference since onlining failed. */
    389		put_device(&cdev->dev);
    390		return -ENODEV;
    391	}
    392	spin_unlock_irq(cdev->ccwlock);
    393	if (cdev->drv->set_online)
    394		ret = cdev->drv->set_online(cdev);
    395	if (ret)
    396		goto rollback;
    397
    398	spin_lock_irq(cdev->ccwlock);
    399	cdev->online = 1;
    400	spin_unlock_irq(cdev->ccwlock);
    401	return 0;
    402
    403rollback:
    404	spin_lock_irq(cdev->ccwlock);
    405	/* Wait until a final state or DISCONNECTED is reached */
    406	while (!dev_fsm_final_state(cdev) &&
    407	       cdev->private->state != DEV_STATE_DISCONNECTED) {
    408		spin_unlock_irq(cdev->ccwlock);
    409		wait_event(cdev->private->wait_q, (dev_fsm_final_state(cdev) ||
    410			   cdev->private->state == DEV_STATE_DISCONNECTED));
    411		spin_lock_irq(cdev->ccwlock);
    412	}
    413	ret2 = ccw_device_offline(cdev);
    414	if (ret2)
    415		goto error;
    416	spin_unlock_irq(cdev->ccwlock);
    417	wait_event(cdev->private->wait_q, (dev_fsm_final_state(cdev) ||
    418		   cdev->private->state == DEV_STATE_DISCONNECTED));
    419	/* Give up online reference since onlining failed. */
    420	put_device(&cdev->dev);
    421	return ret;
    422
    423error:
    424	CIO_MSG_EVENT(0, "rollback ccw_device_offline returned %d, "
    425		      "device 0.%x.%04x\n",
    426		      ret2, cdev->private->dev_id.ssid,
    427		      cdev->private->dev_id.devno);
    428	cdev->private->state = DEV_STATE_OFFLINE;
    429	spin_unlock_irq(cdev->ccwlock);
    430	/* Give up online reference since onlining failed. */
    431	put_device(&cdev->dev);
    432	return ret;
    433}
    434
    435static int online_store_handle_offline(struct ccw_device *cdev)
    436{
    437	if (cdev->private->state == DEV_STATE_DISCONNECTED) {
    438		spin_lock_irq(cdev->ccwlock);
    439		ccw_device_sched_todo(cdev, CDEV_TODO_UNREG_EVAL);
    440		spin_unlock_irq(cdev->ccwlock);
    441		return 0;
    442	}
    443	if (cdev->drv && cdev->drv->set_offline)
    444		return ccw_device_set_offline(cdev);
    445	return -EINVAL;
    446}
    447
    448static int online_store_recog_and_online(struct ccw_device *cdev)
    449{
    450	/* Do device recognition, if needed. */
    451	if (cdev->private->state == DEV_STATE_BOXED) {
    452		spin_lock_irq(cdev->ccwlock);
    453		ccw_device_recognition(cdev);
    454		spin_unlock_irq(cdev->ccwlock);
    455		wait_event(cdev->private->wait_q,
    456			   cdev->private->flags.recog_done);
    457		if (cdev->private->state != DEV_STATE_OFFLINE)
    458			/* recognition failed */
    459			return -EAGAIN;
    460	}
    461	if (cdev->drv && cdev->drv->set_online)
    462		return ccw_device_set_online(cdev);
    463	return -EINVAL;
    464}
    465
    466static int online_store_handle_online(struct ccw_device *cdev, int force)
    467{
    468	int ret;
    469
    470	ret = online_store_recog_and_online(cdev);
    471	if (ret && !force)
    472		return ret;
    473	if (force && cdev->private->state == DEV_STATE_BOXED) {
    474		ret = ccw_device_stlck(cdev);
    475		if (ret)
    476			return ret;
    477		if (cdev->id.cu_type == 0)
    478			cdev->private->state = DEV_STATE_NOT_OPER;
    479		ret = online_store_recog_and_online(cdev);
    480		if (ret)
    481			return ret;
    482	}
    483	return 0;
    484}
    485
    486static ssize_t online_store (struct device *dev, struct device_attribute *attr,
    487			     const char *buf, size_t count)
    488{
    489	struct ccw_device *cdev = to_ccwdev(dev);
    490	int force, ret;
    491	unsigned long i;
    492
    493	/* Prevent conflict between multiple on-/offline processing requests. */
    494	if (atomic_cmpxchg(&cdev->private->onoff, 0, 1) != 0)
    495		return -EAGAIN;
    496	/* Prevent conflict between internal I/Os and on-/offline processing. */
    497	if (!dev_fsm_final_state(cdev) &&
    498	    cdev->private->state != DEV_STATE_DISCONNECTED) {
    499		ret = -EAGAIN;
    500		goto out;
    501	}
    502	/* Prevent conflict between pending work and on-/offline processing.*/
    503	if (work_pending(&cdev->private->todo_work)) {
    504		ret = -EAGAIN;
    505		goto out;
    506	}
    507	if (!strncmp(buf, "force\n", count)) {
    508		force = 1;
    509		i = 1;
    510		ret = 0;
    511	} else {
    512		force = 0;
    513		ret = kstrtoul(buf, 16, &i);
    514	}
    515	if (ret)
    516		goto out;
    517
    518	device_lock(dev);
    519	switch (i) {
    520	case 0:
    521		ret = online_store_handle_offline(cdev);
    522		break;
    523	case 1:
    524		ret = online_store_handle_online(cdev, force);
    525		break;
    526	default:
    527		ret = -EINVAL;
    528	}
    529	device_unlock(dev);
    530
    531out:
    532	atomic_set(&cdev->private->onoff, 0);
    533	return (ret < 0) ? ret : count;
    534}
    535
    536static ssize_t
    537available_show (struct device *dev, struct device_attribute *attr, char *buf)
    538{
    539	struct ccw_device *cdev = to_ccwdev(dev);
    540	struct subchannel *sch;
    541
    542	if (ccw_device_is_orphan(cdev))
    543		return sprintf(buf, "no device\n");
    544	switch (cdev->private->state) {
    545	case DEV_STATE_BOXED:
    546		return sprintf(buf, "boxed\n");
    547	case DEV_STATE_DISCONNECTED:
    548	case DEV_STATE_DISCONNECTED_SENSE_ID:
    549	case DEV_STATE_NOT_OPER:
    550		sch = to_subchannel(dev->parent);
    551		if (!sch->lpm)
    552			return sprintf(buf, "no path\n");
    553		else
    554			return sprintf(buf, "no device\n");
    555	default:
    556		/* All other states considered fine. */
    557		return sprintf(buf, "good\n");
    558	}
    559}
    560
    561static ssize_t
    562initiate_logging(struct device *dev, struct device_attribute *attr,
    563		 const char *buf, size_t count)
    564{
    565	struct subchannel *sch = to_subchannel(dev);
    566	int rc;
    567
    568	rc = chsc_siosl(sch->schid);
    569	if (rc < 0) {
    570		pr_warn("Logging for subchannel 0.%x.%04x failed with errno=%d\n",
    571			sch->schid.ssid, sch->schid.sch_no, rc);
    572		return rc;
    573	}
    574	pr_notice("Logging for subchannel 0.%x.%04x was triggered\n",
    575		  sch->schid.ssid, sch->schid.sch_no);
    576	return count;
    577}
    578
    579static ssize_t vpm_show(struct device *dev, struct device_attribute *attr,
    580			char *buf)
    581{
    582	struct subchannel *sch = to_subchannel(dev);
    583
    584	return sprintf(buf, "%02x\n", sch->vpm);
    585}
    586
    587static DEVICE_ATTR_RO(devtype);
    588static DEVICE_ATTR_RO(cutype);
    589static DEVICE_ATTR_RO(modalias);
    590static DEVICE_ATTR_RW(online);
    591static DEVICE_ATTR(availability, 0444, available_show, NULL);
    592static DEVICE_ATTR(logging, 0200, NULL, initiate_logging);
    593static DEVICE_ATTR_RO(vpm);
    594
    595static struct attribute *io_subchannel_attrs[] = {
    596	&dev_attr_logging.attr,
    597	&dev_attr_vpm.attr,
    598	NULL,
    599};
    600
    601static const struct attribute_group io_subchannel_attr_group = {
    602	.attrs = io_subchannel_attrs,
    603};
    604
    605static struct attribute * ccwdev_attrs[] = {
    606	&dev_attr_devtype.attr,
    607	&dev_attr_cutype.attr,
    608	&dev_attr_modalias.attr,
    609	&dev_attr_online.attr,
    610	&dev_attr_cmb_enable.attr,
    611	&dev_attr_availability.attr,
    612	NULL,
    613};
    614
    615static const struct attribute_group ccwdev_attr_group = {
    616	.attrs = ccwdev_attrs,
    617};
    618
    619static const struct attribute_group *ccwdev_attr_groups[] = {
    620	&ccwdev_attr_group,
    621	NULL,
    622};
    623
    624static int match_dev_id(struct device *dev, const void *data)
    625{
    626	struct ccw_device *cdev = to_ccwdev(dev);
    627	struct ccw_dev_id *dev_id = (void *)data;
    628
    629	return ccw_dev_id_is_equal(&cdev->private->dev_id, dev_id);
    630}
    631
    632/**
    633 * get_ccwdev_by_dev_id() - obtain device from a ccw device id
    634 * @dev_id: id of the device to be searched
    635 *
    636 * This function searches all devices attached to the ccw bus for a device
    637 * matching @dev_id.
    638 * Returns:
    639 *  If a device is found its reference count is increased and returned;
    640 *  else %NULL is returned.
    641 */
    642struct ccw_device *get_ccwdev_by_dev_id(struct ccw_dev_id *dev_id)
    643{
    644	struct device *dev;
    645
    646	dev = bus_find_device(&ccw_bus_type, NULL, dev_id, match_dev_id);
    647
    648	return dev ? to_ccwdev(dev) : NULL;
    649}
    650EXPORT_SYMBOL_GPL(get_ccwdev_by_dev_id);
    651
    652static void ccw_device_do_unbind_bind(struct ccw_device *cdev)
    653{
    654	int ret;
    655
    656	if (device_is_registered(&cdev->dev)) {
    657		device_release_driver(&cdev->dev);
    658		ret = device_attach(&cdev->dev);
    659		WARN_ON(ret == -ENODEV);
    660	}
    661}
    662
    663static void
    664ccw_device_release(struct device *dev)
    665{
    666	struct ccw_device *cdev;
    667
    668	cdev = to_ccwdev(dev);
    669	cio_gp_dma_free(cdev->private->dma_pool, cdev->private->dma_area,
    670			sizeof(*cdev->private->dma_area));
    671	cio_gp_dma_destroy(cdev->private->dma_pool, &cdev->dev);
    672	/* Release reference of parent subchannel. */
    673	put_device(cdev->dev.parent);
    674	kfree(cdev->private);
    675	kfree(cdev);
    676}
    677
    678static struct ccw_device * io_subchannel_allocate_dev(struct subchannel *sch)
    679{
    680	struct ccw_device *cdev;
    681	struct gen_pool *dma_pool;
    682	int ret;
    683
    684	cdev  = kzalloc(sizeof(*cdev), GFP_KERNEL);
    685	if (!cdev) {
    686		ret = -ENOMEM;
    687		goto err_cdev;
    688	}
    689	cdev->private = kzalloc(sizeof(struct ccw_device_private),
    690				GFP_KERNEL | GFP_DMA);
    691	if (!cdev->private) {
    692		ret = -ENOMEM;
    693		goto err_priv;
    694	}
    695
    696	cdev->dev.dma_mask = sch->dev.dma_mask;
    697	ret = dma_set_coherent_mask(&cdev->dev, sch->dev.coherent_dma_mask);
    698	if (ret)
    699		goto err_coherent_mask;
    700
    701	dma_pool = cio_gp_dma_create(&cdev->dev, 1);
    702	if (!dma_pool) {
    703		ret = -ENOMEM;
    704		goto err_dma_pool;
    705	}
    706	cdev->private->dma_pool = dma_pool;
    707	cdev->private->dma_area = cio_gp_dma_zalloc(dma_pool, &cdev->dev,
    708					sizeof(*cdev->private->dma_area));
    709	if (!cdev->private->dma_area) {
    710		ret = -ENOMEM;
    711		goto err_dma_area;
    712	}
    713	return cdev;
    714err_dma_area:
    715	cio_gp_dma_destroy(dma_pool, &cdev->dev);
    716err_dma_pool:
    717err_coherent_mask:
    718	kfree(cdev->private);
    719err_priv:
    720	kfree(cdev);
    721err_cdev:
    722	return ERR_PTR(ret);
    723}
    724
    725static void ccw_device_todo(struct work_struct *work);
    726
    727static int io_subchannel_initialize_dev(struct subchannel *sch,
    728					struct ccw_device *cdev)
    729{
    730	struct ccw_device_private *priv = cdev->private;
    731	int ret;
    732
    733	priv->cdev = cdev;
    734	priv->int_class = IRQIO_CIO;
    735	priv->state = DEV_STATE_NOT_OPER;
    736	priv->dev_id.devno = sch->schib.pmcw.dev;
    737	priv->dev_id.ssid = sch->schid.ssid;
    738
    739	INIT_WORK(&priv->todo_work, ccw_device_todo);
    740	INIT_LIST_HEAD(&priv->cmb_list);
    741	init_waitqueue_head(&priv->wait_q);
    742	timer_setup(&priv->timer, ccw_device_timeout, 0);
    743
    744	atomic_set(&priv->onoff, 0);
    745	cdev->ccwlock = sch->lock;
    746	cdev->dev.parent = &sch->dev;
    747	cdev->dev.release = ccw_device_release;
    748	cdev->dev.bus = &ccw_bus_type;
    749	cdev->dev.groups = ccwdev_attr_groups;
    750	/* Do first half of device_register. */
    751	device_initialize(&cdev->dev);
    752	ret = dev_set_name(&cdev->dev, "0.%x.%04x", cdev->private->dev_id.ssid,
    753			   cdev->private->dev_id.devno);
    754	if (ret)
    755		goto out_put;
    756	if (!get_device(&sch->dev)) {
    757		ret = -ENODEV;
    758		goto out_put;
    759	}
    760	priv->flags.initialized = 1;
    761	spin_lock_irq(sch->lock);
    762	sch_set_cdev(sch, cdev);
    763	spin_unlock_irq(sch->lock);
    764	return 0;
    765
    766out_put:
    767	/* Release reference from device_initialize(). */
    768	put_device(&cdev->dev);
    769	return ret;
    770}
    771
    772static struct ccw_device * io_subchannel_create_ccwdev(struct subchannel *sch)
    773{
    774	struct ccw_device *cdev;
    775	int ret;
    776
    777	cdev = io_subchannel_allocate_dev(sch);
    778	if (!IS_ERR(cdev)) {
    779		ret = io_subchannel_initialize_dev(sch, cdev);
    780		if (ret)
    781			cdev = ERR_PTR(ret);
    782	}
    783	return cdev;
    784}
    785
    786static void io_subchannel_recog(struct ccw_device *, struct subchannel *);
    787
    788static void sch_create_and_recog_new_device(struct subchannel *sch)
    789{
    790	struct ccw_device *cdev;
    791
    792	/* Need to allocate a new ccw device. */
    793	cdev = io_subchannel_create_ccwdev(sch);
    794	if (IS_ERR(cdev)) {
    795		/* OK, we did everything we could... */
    796		css_sch_device_unregister(sch);
    797		return;
    798	}
    799	/* Start recognition for the new ccw device. */
    800	io_subchannel_recog(cdev, sch);
    801}
    802
    803/*
    804 * Register recognized device.
    805 */
    806static void io_subchannel_register(struct ccw_device *cdev)
    807{
    808	struct subchannel *sch;
    809	int ret, adjust_init_count = 1;
    810	unsigned long flags;
    811
    812	sch = to_subchannel(cdev->dev.parent);
    813	/*
    814	 * Check if subchannel is still registered. It may have become
    815	 * unregistered if a machine check hit us after finishing
    816	 * device recognition but before the register work could be
    817	 * queued.
    818	 */
    819	if (!device_is_registered(&sch->dev))
    820		goto out_err;
    821	css_update_ssd_info(sch);
    822	/*
    823	 * io_subchannel_register() will also be called after device
    824	 * recognition has been done for a boxed device (which will already
    825	 * be registered). We need to reprobe since we may now have sense id
    826	 * information.
    827	 */
    828	if (device_is_registered(&cdev->dev)) {
    829		if (!cdev->drv) {
    830			ret = device_reprobe(&cdev->dev);
    831			if (ret)
    832				/* We can't do much here. */
    833				CIO_MSG_EVENT(0, "device_reprobe() returned"
    834					      " %d for 0.%x.%04x\n", ret,
    835					      cdev->private->dev_id.ssid,
    836					      cdev->private->dev_id.devno);
    837		}
    838		adjust_init_count = 0;
    839		goto out;
    840	}
    841	/* make it known to the system */
    842	ret = device_add(&cdev->dev);
    843	if (ret) {
    844		CIO_MSG_EVENT(0, "Could not register ccw dev 0.%x.%04x: %d\n",
    845			      cdev->private->dev_id.ssid,
    846			      cdev->private->dev_id.devno, ret);
    847		spin_lock_irqsave(sch->lock, flags);
    848		sch_set_cdev(sch, NULL);
    849		spin_unlock_irqrestore(sch->lock, flags);
    850		/* Release initial device reference. */
    851		put_device(&cdev->dev);
    852		goto out_err;
    853	}
    854out:
    855	cdev->private->flags.recog_done = 1;
    856	wake_up(&cdev->private->wait_q);
    857out_err:
    858	if (adjust_init_count && atomic_dec_and_test(&ccw_device_init_count))
    859		wake_up(&ccw_device_init_wq);
    860}
    861
    862/*
    863 * subchannel recognition done. Called from the state machine.
    864 */
    865void
    866io_subchannel_recog_done(struct ccw_device *cdev)
    867{
    868	if (css_init_done == 0) {
    869		cdev->private->flags.recog_done = 1;
    870		return;
    871	}
    872	switch (cdev->private->state) {
    873	case DEV_STATE_BOXED:
    874		/* Device did not respond in time. */
    875	case DEV_STATE_NOT_OPER:
    876		cdev->private->flags.recog_done = 1;
    877		/* Remove device found not operational. */
    878		ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
    879		if (atomic_dec_and_test(&ccw_device_init_count))
    880			wake_up(&ccw_device_init_wq);
    881		break;
    882	case DEV_STATE_OFFLINE:
    883		/*
    884		 * We can't register the device in interrupt context so
    885		 * we schedule a work item.
    886		 */
    887		ccw_device_sched_todo(cdev, CDEV_TODO_REGISTER);
    888		break;
    889	}
    890}
    891
    892static void io_subchannel_recog(struct ccw_device *cdev, struct subchannel *sch)
    893{
    894	/* Increase counter of devices currently in recognition. */
    895	atomic_inc(&ccw_device_init_count);
    896
    897	/* Start async. device sensing. */
    898	spin_lock_irq(sch->lock);
    899	ccw_device_recognition(cdev);
    900	spin_unlock_irq(sch->lock);
    901}
    902
    903static int ccw_device_move_to_sch(struct ccw_device *cdev,
    904				  struct subchannel *sch)
    905{
    906	struct subchannel *old_sch;
    907	int rc, old_enabled = 0;
    908
    909	old_sch = to_subchannel(cdev->dev.parent);
    910	/* Obtain child reference for new parent. */
    911	if (!get_device(&sch->dev))
    912		return -ENODEV;
    913
    914	if (!sch_is_pseudo_sch(old_sch)) {
    915		spin_lock_irq(old_sch->lock);
    916		old_enabled = old_sch->schib.pmcw.ena;
    917		rc = 0;
    918		if (old_enabled)
    919			rc = cio_disable_subchannel(old_sch);
    920		spin_unlock_irq(old_sch->lock);
    921		if (rc == -EBUSY) {
    922			/* Release child reference for new parent. */
    923			put_device(&sch->dev);
    924			return rc;
    925		}
    926	}
    927
    928	mutex_lock(&sch->reg_mutex);
    929	rc = device_move(&cdev->dev, &sch->dev, DPM_ORDER_PARENT_BEFORE_DEV);
    930	mutex_unlock(&sch->reg_mutex);
    931	if (rc) {
    932		CIO_MSG_EVENT(0, "device_move(0.%x.%04x,0.%x.%04x)=%d\n",
    933			      cdev->private->dev_id.ssid,
    934			      cdev->private->dev_id.devno, sch->schid.ssid,
    935			      sch->schib.pmcw.dev, rc);
    936		if (old_enabled) {
    937			/* Try to reenable the old subchannel. */
    938			spin_lock_irq(old_sch->lock);
    939			cio_enable_subchannel(old_sch, (u32)(addr_t)old_sch);
    940			spin_unlock_irq(old_sch->lock);
    941		}
    942		/* Release child reference for new parent. */
    943		put_device(&sch->dev);
    944		return rc;
    945	}
    946	/* Clean up old subchannel. */
    947	if (!sch_is_pseudo_sch(old_sch)) {
    948		spin_lock_irq(old_sch->lock);
    949		sch_set_cdev(old_sch, NULL);
    950		spin_unlock_irq(old_sch->lock);
    951		css_schedule_eval(old_sch->schid);
    952	}
    953	/* Release child reference for old parent. */
    954	put_device(&old_sch->dev);
    955	/* Initialize new subchannel. */
    956	spin_lock_irq(sch->lock);
    957	cdev->ccwlock = sch->lock;
    958	if (!sch_is_pseudo_sch(sch))
    959		sch_set_cdev(sch, cdev);
    960	spin_unlock_irq(sch->lock);
    961	if (!sch_is_pseudo_sch(sch))
    962		css_update_ssd_info(sch);
    963	return 0;
    964}
    965
    966static int ccw_device_move_to_orph(struct ccw_device *cdev)
    967{
    968	struct subchannel *sch = to_subchannel(cdev->dev.parent);
    969	struct channel_subsystem *css = to_css(sch->dev.parent);
    970
    971	return ccw_device_move_to_sch(cdev, css->pseudo_subchannel);
    972}
    973
    974static void io_subchannel_irq(struct subchannel *sch)
    975{
    976	struct ccw_device *cdev;
    977
    978	cdev = sch_get_cdev(sch);
    979
    980	CIO_TRACE_EVENT(6, "IRQ");
    981	CIO_TRACE_EVENT(6, dev_name(&sch->dev));
    982	if (cdev)
    983		dev_fsm_event(cdev, DEV_EVENT_INTERRUPT);
    984	else
    985		inc_irq_stat(IRQIO_CIO);
    986}
    987
    988void io_subchannel_init_config(struct subchannel *sch)
    989{
    990	memset(&sch->config, 0, sizeof(sch->config));
    991	sch->config.csense = 1;
    992}
    993
    994static void io_subchannel_init_fields(struct subchannel *sch)
    995{
    996	if (cio_is_console(sch->schid))
    997		sch->opm = 0xff;
    998	else
    999		sch->opm = chp_get_sch_opm(sch);
   1000	sch->lpm = sch->schib.pmcw.pam & sch->opm;
   1001	sch->isc = cio_is_console(sch->schid) ? CONSOLE_ISC : IO_SCH_ISC;
   1002
   1003	CIO_MSG_EVENT(6, "Detected device %04x on subchannel 0.%x.%04X"
   1004		      " - PIM = %02X, PAM = %02X, POM = %02X\n",
   1005		      sch->schib.pmcw.dev, sch->schid.ssid,
   1006		      sch->schid.sch_no, sch->schib.pmcw.pim,
   1007		      sch->schib.pmcw.pam, sch->schib.pmcw.pom);
   1008
   1009	io_subchannel_init_config(sch);
   1010}
   1011
   1012/*
   1013 * Note: We always return 0 so that we bind to the device even on error.
   1014 * This is needed so that our remove function is called on unregister.
   1015 */
   1016static int io_subchannel_probe(struct subchannel *sch)
   1017{
   1018	struct io_subchannel_private *io_priv;
   1019	struct ccw_device *cdev;
   1020	int rc;
   1021
   1022	if (cio_is_console(sch->schid)) {
   1023		rc = sysfs_create_group(&sch->dev.kobj,
   1024					&io_subchannel_attr_group);
   1025		if (rc)
   1026			CIO_MSG_EVENT(0, "Failed to create io subchannel "
   1027				      "attributes for subchannel "
   1028				      "0.%x.%04x (rc=%d)\n",
   1029				      sch->schid.ssid, sch->schid.sch_no, rc);
   1030		/*
   1031		* The console subchannel already has an associated ccw_device.
   1032		* Register it and exit.
   1033		*/
   1034		cdev = sch_get_cdev(sch);
   1035		rc = device_add(&cdev->dev);
   1036		if (rc) {
   1037			/* Release online reference. */
   1038			put_device(&cdev->dev);
   1039			goto out_schedule;
   1040		}
   1041		if (atomic_dec_and_test(&ccw_device_init_count))
   1042			wake_up(&ccw_device_init_wq);
   1043		return 0;
   1044	}
   1045	io_subchannel_init_fields(sch);
   1046	rc = cio_commit_config(sch);
   1047	if (rc)
   1048		goto out_schedule;
   1049	rc = sysfs_create_group(&sch->dev.kobj,
   1050				&io_subchannel_attr_group);
   1051	if (rc)
   1052		goto out_schedule;
   1053	/* Allocate I/O subchannel private data. */
   1054	io_priv = kzalloc(sizeof(*io_priv), GFP_KERNEL | GFP_DMA);
   1055	if (!io_priv)
   1056		goto out_schedule;
   1057
   1058	io_priv->dma_area = dma_alloc_coherent(&sch->dev,
   1059				sizeof(*io_priv->dma_area),
   1060				&io_priv->dma_area_dma, GFP_KERNEL);
   1061	if (!io_priv->dma_area) {
   1062		kfree(io_priv);
   1063		goto out_schedule;
   1064	}
   1065
   1066	set_io_private(sch, io_priv);
   1067	css_schedule_eval(sch->schid);
   1068	return 0;
   1069
   1070out_schedule:
   1071	spin_lock_irq(sch->lock);
   1072	css_sched_sch_todo(sch, SCH_TODO_UNREG);
   1073	spin_unlock_irq(sch->lock);
   1074	return 0;
   1075}
   1076
   1077static void io_subchannel_remove(struct subchannel *sch)
   1078{
   1079	struct io_subchannel_private *io_priv = to_io_private(sch);
   1080	struct ccw_device *cdev;
   1081
   1082	cdev = sch_get_cdev(sch);
   1083	if (!cdev)
   1084		goto out_free;
   1085
   1086	ccw_device_unregister(cdev);
   1087	spin_lock_irq(sch->lock);
   1088	sch_set_cdev(sch, NULL);
   1089	set_io_private(sch, NULL);
   1090	spin_unlock_irq(sch->lock);
   1091out_free:
   1092	dma_free_coherent(&sch->dev, sizeof(*io_priv->dma_area),
   1093			  io_priv->dma_area, io_priv->dma_area_dma);
   1094	kfree(io_priv);
   1095	sysfs_remove_group(&sch->dev.kobj, &io_subchannel_attr_group);
   1096}
   1097
   1098static void io_subchannel_verify(struct subchannel *sch)
   1099{
   1100	struct ccw_device *cdev;
   1101
   1102	cdev = sch_get_cdev(sch);
   1103	if (cdev)
   1104		dev_fsm_event(cdev, DEV_EVENT_VERIFY);
   1105}
   1106
   1107static void io_subchannel_terminate_path(struct subchannel *sch, u8 mask)
   1108{
   1109	struct ccw_device *cdev;
   1110
   1111	cdev = sch_get_cdev(sch);
   1112	if (!cdev)
   1113		return;
   1114	if (cio_update_schib(sch))
   1115		goto err;
   1116	/* Check for I/O on path. */
   1117	if (scsw_actl(&sch->schib.scsw) == 0 || sch->schib.pmcw.lpum != mask)
   1118		goto out;
   1119	if (cdev->private->state == DEV_STATE_ONLINE) {
   1120		ccw_device_kill_io(cdev);
   1121		goto out;
   1122	}
   1123	if (cio_clear(sch))
   1124		goto err;
   1125out:
   1126	/* Trigger path verification. */
   1127	dev_fsm_event(cdev, DEV_EVENT_VERIFY);
   1128	return;
   1129
   1130err:
   1131	dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
   1132}
   1133
   1134static int io_subchannel_chp_event(struct subchannel *sch,
   1135				   struct chp_link *link, int event)
   1136{
   1137	struct ccw_device *cdev = sch_get_cdev(sch);
   1138	int mask, chpid, valid_bit;
   1139	int path_event[8];
   1140
   1141	mask = chp_ssd_get_mask(&sch->ssd_info, link);
   1142	if (!mask)
   1143		return 0;
   1144	switch (event) {
   1145	case CHP_VARY_OFF:
   1146		sch->opm &= ~mask;
   1147		sch->lpm &= ~mask;
   1148		if (cdev)
   1149			cdev->private->path_gone_mask |= mask;
   1150		io_subchannel_terminate_path(sch, mask);
   1151		break;
   1152	case CHP_VARY_ON:
   1153		sch->opm |= mask;
   1154		sch->lpm |= mask;
   1155		if (cdev)
   1156			cdev->private->path_new_mask |= mask;
   1157		io_subchannel_verify(sch);
   1158		break;
   1159	case CHP_OFFLINE:
   1160		if (cio_update_schib(sch))
   1161			return -ENODEV;
   1162		if (cdev)
   1163			cdev->private->path_gone_mask |= mask;
   1164		io_subchannel_terminate_path(sch, mask);
   1165		break;
   1166	case CHP_ONLINE:
   1167		if (cio_update_schib(sch))
   1168			return -ENODEV;
   1169		sch->lpm |= mask & sch->opm;
   1170		if (cdev)
   1171			cdev->private->path_new_mask |= mask;
   1172		io_subchannel_verify(sch);
   1173		break;
   1174	case CHP_FCES_EVENT:
   1175		/* Forward Endpoint Security event */
   1176		for (chpid = 0, valid_bit = 0x80; chpid < 8; chpid++,
   1177				valid_bit >>= 1) {
   1178			if (mask & valid_bit)
   1179				path_event[chpid] = PE_PATH_FCES_EVENT;
   1180			else
   1181				path_event[chpid] = PE_NONE;
   1182		}
   1183		if (cdev && cdev->drv && cdev->drv->path_event)
   1184			cdev->drv->path_event(cdev, path_event);
   1185		break;
   1186	}
   1187	return 0;
   1188}
   1189
   1190static void io_subchannel_quiesce(struct subchannel *sch)
   1191{
   1192	struct ccw_device *cdev;
   1193	int ret;
   1194
   1195	spin_lock_irq(sch->lock);
   1196	cdev = sch_get_cdev(sch);
   1197	if (cio_is_console(sch->schid))
   1198		goto out_unlock;
   1199	if (!sch->schib.pmcw.ena)
   1200		goto out_unlock;
   1201	ret = cio_disable_subchannel(sch);
   1202	if (ret != -EBUSY)
   1203		goto out_unlock;
   1204	if (cdev->handler)
   1205		cdev->handler(cdev, cdev->private->intparm, ERR_PTR(-EIO));
   1206	while (ret == -EBUSY) {
   1207		cdev->private->state = DEV_STATE_QUIESCE;
   1208		cdev->private->iretry = 255;
   1209		ret = ccw_device_cancel_halt_clear(cdev);
   1210		if (ret == -EBUSY) {
   1211			ccw_device_set_timeout(cdev, HZ/10);
   1212			spin_unlock_irq(sch->lock);
   1213			wait_event(cdev->private->wait_q,
   1214				   cdev->private->state != DEV_STATE_QUIESCE);
   1215			spin_lock_irq(sch->lock);
   1216		}
   1217		ret = cio_disable_subchannel(sch);
   1218	}
   1219out_unlock:
   1220	spin_unlock_irq(sch->lock);
   1221}
   1222
   1223static void io_subchannel_shutdown(struct subchannel *sch)
   1224{
   1225	io_subchannel_quiesce(sch);
   1226}
   1227
   1228static int device_is_disconnected(struct ccw_device *cdev)
   1229{
   1230	if (!cdev)
   1231		return 0;
   1232	return (cdev->private->state == DEV_STATE_DISCONNECTED ||
   1233		cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID);
   1234}
   1235
   1236static int recovery_check(struct device *dev, void *data)
   1237{
   1238	struct ccw_device *cdev = to_ccwdev(dev);
   1239	struct subchannel *sch;
   1240	int *redo = data;
   1241
   1242	spin_lock_irq(cdev->ccwlock);
   1243	switch (cdev->private->state) {
   1244	case DEV_STATE_ONLINE:
   1245		sch = to_subchannel(cdev->dev.parent);
   1246		if ((sch->schib.pmcw.pam & sch->opm) == sch->vpm)
   1247			break;
   1248		fallthrough;
   1249	case DEV_STATE_DISCONNECTED:
   1250		CIO_MSG_EVENT(3, "recovery: trigger 0.%x.%04x\n",
   1251			      cdev->private->dev_id.ssid,
   1252			      cdev->private->dev_id.devno);
   1253		dev_fsm_event(cdev, DEV_EVENT_VERIFY);
   1254		*redo = 1;
   1255		break;
   1256	case DEV_STATE_DISCONNECTED_SENSE_ID:
   1257		*redo = 1;
   1258		break;
   1259	}
   1260	spin_unlock_irq(cdev->ccwlock);
   1261
   1262	return 0;
   1263}
   1264
   1265static void recovery_work_func(struct work_struct *unused)
   1266{
   1267	int redo = 0;
   1268
   1269	bus_for_each_dev(&ccw_bus_type, NULL, &redo, recovery_check);
   1270	if (redo) {
   1271		spin_lock_irq(&recovery_lock);
   1272		if (!timer_pending(&recovery_timer)) {
   1273			if (recovery_phase < ARRAY_SIZE(recovery_delay) - 1)
   1274				recovery_phase++;
   1275			mod_timer(&recovery_timer, jiffies +
   1276				  recovery_delay[recovery_phase] * HZ);
   1277		}
   1278		spin_unlock_irq(&recovery_lock);
   1279	} else
   1280		CIO_MSG_EVENT(3, "recovery: end\n");
   1281}
   1282
   1283static DECLARE_WORK(recovery_work, recovery_work_func);
   1284
   1285static void recovery_func(struct timer_list *unused)
   1286{
   1287	/*
   1288	 * We can't do our recovery in softirq context and it's not
   1289	 * performance critical, so we schedule it.
   1290	 */
   1291	schedule_work(&recovery_work);
   1292}
   1293
   1294void ccw_device_schedule_recovery(void)
   1295{
   1296	unsigned long flags;
   1297
   1298	CIO_MSG_EVENT(3, "recovery: schedule\n");
   1299	spin_lock_irqsave(&recovery_lock, flags);
   1300	if (!timer_pending(&recovery_timer) || (recovery_phase != 0)) {
   1301		recovery_phase = 0;
   1302		mod_timer(&recovery_timer, jiffies + recovery_delay[0] * HZ);
   1303	}
   1304	spin_unlock_irqrestore(&recovery_lock, flags);
   1305}
   1306
   1307static int purge_fn(struct device *dev, void *data)
   1308{
   1309	struct ccw_device *cdev = to_ccwdev(dev);
   1310	struct ccw_dev_id *id = &cdev->private->dev_id;
   1311	struct subchannel *sch = to_subchannel(cdev->dev.parent);
   1312
   1313	spin_lock_irq(cdev->ccwlock);
   1314	if (is_blacklisted(id->ssid, id->devno) &&
   1315	    (cdev->private->state == DEV_STATE_OFFLINE) &&
   1316	    (atomic_cmpxchg(&cdev->private->onoff, 0, 1) == 0)) {
   1317		CIO_MSG_EVENT(3, "ccw: purging 0.%x.%04x\n", id->ssid,
   1318			      id->devno);
   1319		ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
   1320		css_sched_sch_todo(sch, SCH_TODO_UNREG);
   1321		atomic_set(&cdev->private->onoff, 0);
   1322	}
   1323	spin_unlock_irq(cdev->ccwlock);
   1324	/* Abort loop in case of pending signal. */
   1325	if (signal_pending(current))
   1326		return -EINTR;
   1327
   1328	return 0;
   1329}
   1330
   1331/**
   1332 * ccw_purge_blacklisted - purge unused, blacklisted devices
   1333 *
   1334 * Unregister all ccw devices that are offline and on the blacklist.
   1335 */
   1336int ccw_purge_blacklisted(void)
   1337{
   1338	CIO_MSG_EVENT(2, "ccw: purging blacklisted devices\n");
   1339	bus_for_each_dev(&ccw_bus_type, NULL, NULL, purge_fn);
   1340	return 0;
   1341}
   1342
   1343void ccw_device_set_disconnected(struct ccw_device *cdev)
   1344{
   1345	if (!cdev)
   1346		return;
   1347	ccw_device_set_timeout(cdev, 0);
   1348	cdev->private->flags.fake_irb = 0;
   1349	cdev->private->state = DEV_STATE_DISCONNECTED;
   1350	if (cdev->online)
   1351		ccw_device_schedule_recovery();
   1352}
   1353
   1354void ccw_device_set_notoper(struct ccw_device *cdev)
   1355{
   1356	struct subchannel *sch = to_subchannel(cdev->dev.parent);
   1357
   1358	CIO_TRACE_EVENT(2, "notoper");
   1359	CIO_TRACE_EVENT(2, dev_name(&sch->dev));
   1360	ccw_device_set_timeout(cdev, 0);
   1361	cio_disable_subchannel(sch);
   1362	cdev->private->state = DEV_STATE_NOT_OPER;
   1363}
   1364
   1365enum io_sch_action {
   1366	IO_SCH_UNREG,
   1367	IO_SCH_ORPH_UNREG,
   1368	IO_SCH_ATTACH,
   1369	IO_SCH_UNREG_ATTACH,
   1370	IO_SCH_ORPH_ATTACH,
   1371	IO_SCH_REPROBE,
   1372	IO_SCH_VERIFY,
   1373	IO_SCH_DISC,
   1374	IO_SCH_NOP,
   1375};
   1376
   1377static enum io_sch_action sch_get_action(struct subchannel *sch)
   1378{
   1379	struct ccw_device *cdev;
   1380
   1381	cdev = sch_get_cdev(sch);
   1382	if (cio_update_schib(sch)) {
   1383		/* Not operational. */
   1384		if (!cdev)
   1385			return IO_SCH_UNREG;
   1386		if (ccw_device_notify(cdev, CIO_GONE) != NOTIFY_OK)
   1387			return IO_SCH_UNREG;
   1388		return IO_SCH_ORPH_UNREG;
   1389	}
   1390	/* Operational. */
   1391	if (!cdev)
   1392		return IO_SCH_ATTACH;
   1393	if (sch->schib.pmcw.dev != cdev->private->dev_id.devno) {
   1394		if (ccw_device_notify(cdev, CIO_GONE) != NOTIFY_OK)
   1395			return IO_SCH_UNREG_ATTACH;
   1396		return IO_SCH_ORPH_ATTACH;
   1397	}
   1398	if ((sch->schib.pmcw.pam & sch->opm) == 0) {
   1399		if (ccw_device_notify(cdev, CIO_NO_PATH) != NOTIFY_OK)
   1400			return IO_SCH_UNREG;
   1401		return IO_SCH_DISC;
   1402	}
   1403	if (device_is_disconnected(cdev))
   1404		return IO_SCH_REPROBE;
   1405	if (cdev->online)
   1406		return IO_SCH_VERIFY;
   1407	if (cdev->private->state == DEV_STATE_NOT_OPER)
   1408		return IO_SCH_UNREG_ATTACH;
   1409	return IO_SCH_NOP;
   1410}
   1411
   1412/**
   1413 * io_subchannel_sch_event - process subchannel event
   1414 * @sch: subchannel
   1415 * @process: non-zero if function is called in process context
   1416 *
   1417 * An unspecified event occurred for this subchannel. Adjust data according
   1418 * to the current operational state of the subchannel and device. Return
   1419 * zero when the event has been handled sufficiently or -EAGAIN when this
   1420 * function should be called again in process context.
   1421 */
   1422static int io_subchannel_sch_event(struct subchannel *sch, int process)
   1423{
   1424	unsigned long flags;
   1425	struct ccw_device *cdev;
   1426	struct ccw_dev_id dev_id;
   1427	enum io_sch_action action;
   1428	int rc = -EAGAIN;
   1429
   1430	spin_lock_irqsave(sch->lock, flags);
   1431	if (!device_is_registered(&sch->dev))
   1432		goto out_unlock;
   1433	if (work_pending(&sch->todo_work))
   1434		goto out_unlock;
   1435	cdev = sch_get_cdev(sch);
   1436	if (cdev && work_pending(&cdev->private->todo_work))
   1437		goto out_unlock;
   1438	action = sch_get_action(sch);
   1439	CIO_MSG_EVENT(2, "event: sch 0.%x.%04x, process=%d, action=%d\n",
   1440		      sch->schid.ssid, sch->schid.sch_no, process,
   1441		      action);
   1442	/* Perform immediate actions while holding the lock. */
   1443	switch (action) {
   1444	case IO_SCH_REPROBE:
   1445		/* Trigger device recognition. */
   1446		ccw_device_trigger_reprobe(cdev);
   1447		rc = 0;
   1448		goto out_unlock;
   1449	case IO_SCH_VERIFY:
   1450		/* Trigger path verification. */
   1451		io_subchannel_verify(sch);
   1452		rc = 0;
   1453		goto out_unlock;
   1454	case IO_SCH_DISC:
   1455		ccw_device_set_disconnected(cdev);
   1456		rc = 0;
   1457		goto out_unlock;
   1458	case IO_SCH_ORPH_UNREG:
   1459	case IO_SCH_ORPH_ATTACH:
   1460		ccw_device_set_disconnected(cdev);
   1461		break;
   1462	case IO_SCH_UNREG_ATTACH:
   1463	case IO_SCH_UNREG:
   1464		if (!cdev)
   1465			break;
   1466		if (cdev->private->state == DEV_STATE_SENSE_ID) {
   1467			/*
   1468			 * Note: delayed work triggered by this event
   1469			 * and repeated calls to sch_event are synchronized
   1470			 * by the above check for work_pending(cdev).
   1471			 */
   1472			dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
   1473		} else
   1474			ccw_device_set_notoper(cdev);
   1475		break;
   1476	case IO_SCH_NOP:
   1477		rc = 0;
   1478		goto out_unlock;
   1479	default:
   1480		break;
   1481	}
   1482	spin_unlock_irqrestore(sch->lock, flags);
   1483	/* All other actions require process context. */
   1484	if (!process)
   1485		goto out;
   1486	/* Handle attached ccw device. */
   1487	switch (action) {
   1488	case IO_SCH_ORPH_UNREG:
   1489	case IO_SCH_ORPH_ATTACH:
   1490		/* Move ccw device to orphanage. */
   1491		rc = ccw_device_move_to_orph(cdev);
   1492		if (rc)
   1493			goto out;
   1494		break;
   1495	case IO_SCH_UNREG_ATTACH:
   1496		spin_lock_irqsave(sch->lock, flags);
   1497		sch_set_cdev(sch, NULL);
   1498		spin_unlock_irqrestore(sch->lock, flags);
   1499		/* Unregister ccw device. */
   1500		ccw_device_unregister(cdev);
   1501		break;
   1502	default:
   1503		break;
   1504	}
   1505	/* Handle subchannel. */
   1506	switch (action) {
   1507	case IO_SCH_ORPH_UNREG:
   1508	case IO_SCH_UNREG:
   1509		css_sch_device_unregister(sch);
   1510		break;
   1511	case IO_SCH_ORPH_ATTACH:
   1512	case IO_SCH_UNREG_ATTACH:
   1513	case IO_SCH_ATTACH:
   1514		dev_id.ssid = sch->schid.ssid;
   1515		dev_id.devno = sch->schib.pmcw.dev;
   1516		cdev = get_ccwdev_by_dev_id(&dev_id);
   1517		if (!cdev) {
   1518			sch_create_and_recog_new_device(sch);
   1519			break;
   1520		}
   1521		rc = ccw_device_move_to_sch(cdev, sch);
   1522		if (rc) {
   1523			/* Release reference from get_ccwdev_by_dev_id() */
   1524			put_device(&cdev->dev);
   1525			goto out;
   1526		}
   1527		spin_lock_irqsave(sch->lock, flags);
   1528		ccw_device_trigger_reprobe(cdev);
   1529		spin_unlock_irqrestore(sch->lock, flags);
   1530		/* Release reference from get_ccwdev_by_dev_id() */
   1531		put_device(&cdev->dev);
   1532		break;
   1533	default:
   1534		break;
   1535	}
   1536	return 0;
   1537
   1538out_unlock:
   1539	spin_unlock_irqrestore(sch->lock, flags);
   1540out:
   1541	return rc;
   1542}
   1543
   1544static void ccw_device_set_int_class(struct ccw_device *cdev)
   1545{
   1546	struct ccw_driver *cdrv = cdev->drv;
   1547
   1548	/* Note: we interpret class 0 in this context as an uninitialized
   1549	 * field since it translates to a non-I/O interrupt class. */
   1550	if (cdrv->int_class != 0)
   1551		cdev->private->int_class = cdrv->int_class;
   1552	else
   1553		cdev->private->int_class = IRQIO_CIO;
   1554}
   1555
   1556#ifdef CONFIG_CCW_CONSOLE
   1557int __init ccw_device_enable_console(struct ccw_device *cdev)
   1558{
   1559	struct subchannel *sch = to_subchannel(cdev->dev.parent);
   1560	int rc;
   1561
   1562	if (!cdev->drv || !cdev->handler)
   1563		return -EINVAL;
   1564
   1565	io_subchannel_init_fields(sch);
   1566	rc = cio_commit_config(sch);
   1567	if (rc)
   1568		return rc;
   1569	sch->driver = &io_subchannel_driver;
   1570	io_subchannel_recog(cdev, sch);
   1571	/* Now wait for the async. recognition to come to an end. */
   1572	spin_lock_irq(cdev->ccwlock);
   1573	while (!dev_fsm_final_state(cdev))
   1574		ccw_device_wait_idle(cdev);
   1575
   1576	/* Hold on to an extra reference while device is online. */
   1577	get_device(&cdev->dev);
   1578	rc = ccw_device_online(cdev);
   1579	if (rc)
   1580		goto out_unlock;
   1581
   1582	while (!dev_fsm_final_state(cdev))
   1583		ccw_device_wait_idle(cdev);
   1584
   1585	if (cdev->private->state == DEV_STATE_ONLINE)
   1586		cdev->online = 1;
   1587	else
   1588		rc = -EIO;
   1589out_unlock:
   1590	spin_unlock_irq(cdev->ccwlock);
   1591	if (rc) /* Give up online reference since onlining failed. */
   1592		put_device(&cdev->dev);
   1593	return rc;
   1594}
   1595
   1596struct ccw_device * __init ccw_device_create_console(struct ccw_driver *drv)
   1597{
   1598	struct io_subchannel_private *io_priv;
   1599	struct ccw_device *cdev;
   1600	struct subchannel *sch;
   1601
   1602	sch = cio_probe_console();
   1603	if (IS_ERR(sch))
   1604		return ERR_CAST(sch);
   1605
   1606	io_priv = kzalloc(sizeof(*io_priv), GFP_KERNEL | GFP_DMA);
   1607	if (!io_priv)
   1608		goto err_priv;
   1609	io_priv->dma_area = dma_alloc_coherent(&sch->dev,
   1610				sizeof(*io_priv->dma_area),
   1611				&io_priv->dma_area_dma, GFP_KERNEL);
   1612	if (!io_priv->dma_area)
   1613		goto err_dma_area;
   1614	set_io_private(sch, io_priv);
   1615	cdev = io_subchannel_create_ccwdev(sch);
   1616	if (IS_ERR(cdev)) {
   1617		dma_free_coherent(&sch->dev, sizeof(*io_priv->dma_area),
   1618				  io_priv->dma_area, io_priv->dma_area_dma);
   1619		set_io_private(sch, NULL);
   1620		put_device(&sch->dev);
   1621		kfree(io_priv);
   1622		return cdev;
   1623	}
   1624	cdev->drv = drv;
   1625	ccw_device_set_int_class(cdev);
   1626	return cdev;
   1627
   1628err_dma_area:
   1629	kfree(io_priv);
   1630err_priv:
   1631	put_device(&sch->dev);
   1632	return ERR_PTR(-ENOMEM);
   1633}
   1634
   1635void __init ccw_device_destroy_console(struct ccw_device *cdev)
   1636{
   1637	struct subchannel *sch = to_subchannel(cdev->dev.parent);
   1638	struct io_subchannel_private *io_priv = to_io_private(sch);
   1639
   1640	set_io_private(sch, NULL);
   1641	dma_free_coherent(&sch->dev, sizeof(*io_priv->dma_area),
   1642			  io_priv->dma_area, io_priv->dma_area_dma);
   1643	put_device(&sch->dev);
   1644	put_device(&cdev->dev);
   1645	kfree(io_priv);
   1646}
   1647
   1648/**
   1649 * ccw_device_wait_idle() - busy wait for device to become idle
   1650 * @cdev: ccw device
   1651 *
   1652 * Poll until activity control is zero, that is, no function or data
   1653 * transfer is pending/active.
   1654 * Called with device lock being held.
   1655 */
   1656void ccw_device_wait_idle(struct ccw_device *cdev)
   1657{
   1658	struct subchannel *sch = to_subchannel(cdev->dev.parent);
   1659
   1660	while (1) {
   1661		cio_tsch(sch);
   1662		if (sch->schib.scsw.cmd.actl == 0)
   1663			break;
   1664		udelay(100);
   1665	}
   1666}
   1667#endif
   1668
   1669/**
   1670 * get_ccwdev_by_busid() - obtain device from a bus id
   1671 * @cdrv: driver the device is owned by
   1672 * @bus_id: bus id of the device to be searched
   1673 *
   1674 * This function searches all devices owned by @cdrv for a device with a bus
   1675 * id matching @bus_id.
   1676 * Returns:
   1677 *  If a match is found, its reference count of the found device is increased
   1678 *  and it is returned; else %NULL is returned.
   1679 */
   1680struct ccw_device *get_ccwdev_by_busid(struct ccw_driver *cdrv,
   1681				       const char *bus_id)
   1682{
   1683	struct device *dev;
   1684
   1685	dev = driver_find_device_by_name(&cdrv->driver, bus_id);
   1686
   1687	return dev ? to_ccwdev(dev) : NULL;
   1688}
   1689
   1690/************************** device driver handling ************************/
   1691
   1692/* This is the implementation of the ccw_driver class. The probe, remove
   1693 * and release methods are initially very similar to the device_driver
   1694 * implementations, with the difference that they have ccw_device
   1695 * arguments.
   1696 *
   1697 * A ccw driver also contains the information that is needed for
   1698 * device matching.
   1699 */
   1700static int
   1701ccw_device_probe (struct device *dev)
   1702{
   1703	struct ccw_device *cdev = to_ccwdev(dev);
   1704	struct ccw_driver *cdrv = to_ccwdrv(dev->driver);
   1705	int ret;
   1706
   1707	cdev->drv = cdrv; /* to let the driver call _set_online */
   1708	ccw_device_set_int_class(cdev);
   1709	ret = cdrv->probe ? cdrv->probe(cdev) : -ENODEV;
   1710	if (ret) {
   1711		cdev->drv = NULL;
   1712		cdev->private->int_class = IRQIO_CIO;
   1713		return ret;
   1714	}
   1715
   1716	return 0;
   1717}
   1718
   1719static void ccw_device_remove(struct device *dev)
   1720{
   1721	struct ccw_device *cdev = to_ccwdev(dev);
   1722	struct ccw_driver *cdrv = cdev->drv;
   1723	struct subchannel *sch;
   1724	int ret;
   1725
   1726	if (cdrv->remove)
   1727		cdrv->remove(cdev);
   1728
   1729	spin_lock_irq(cdev->ccwlock);
   1730	if (cdev->online) {
   1731		cdev->online = 0;
   1732		ret = ccw_device_offline(cdev);
   1733		spin_unlock_irq(cdev->ccwlock);
   1734		if (ret == 0)
   1735			wait_event(cdev->private->wait_q,
   1736				   dev_fsm_final_state(cdev));
   1737		else
   1738			CIO_MSG_EVENT(0, "ccw_device_offline returned %d, "
   1739				      "device 0.%x.%04x\n",
   1740				      ret, cdev->private->dev_id.ssid,
   1741				      cdev->private->dev_id.devno);
   1742		/* Give up reference obtained in ccw_device_set_online(). */
   1743		put_device(&cdev->dev);
   1744		spin_lock_irq(cdev->ccwlock);
   1745	}
   1746	ccw_device_set_timeout(cdev, 0);
   1747	cdev->drv = NULL;
   1748	cdev->private->int_class = IRQIO_CIO;
   1749	sch = to_subchannel(cdev->dev.parent);
   1750	spin_unlock_irq(cdev->ccwlock);
   1751	io_subchannel_quiesce(sch);
   1752	__disable_cmf(cdev);
   1753}
   1754
   1755static void ccw_device_shutdown(struct device *dev)
   1756{
   1757	struct ccw_device *cdev;
   1758
   1759	cdev = to_ccwdev(dev);
   1760	if (cdev->drv && cdev->drv->shutdown)
   1761		cdev->drv->shutdown(cdev);
   1762	__disable_cmf(cdev);
   1763}
   1764
   1765static struct bus_type ccw_bus_type = {
   1766	.name   = "ccw",
   1767	.match  = ccw_bus_match,
   1768	.uevent = ccw_uevent,
   1769	.probe  = ccw_device_probe,
   1770	.remove = ccw_device_remove,
   1771	.shutdown = ccw_device_shutdown,
   1772};
   1773
   1774/**
   1775 * ccw_driver_register() - register a ccw driver
   1776 * @cdriver: driver to be registered
   1777 *
   1778 * This function is mainly a wrapper around driver_register().
   1779 * Returns:
   1780 *   %0 on success and a negative error value on failure.
   1781 */
   1782int ccw_driver_register(struct ccw_driver *cdriver)
   1783{
   1784	struct device_driver *drv = &cdriver->driver;
   1785
   1786	drv->bus = &ccw_bus_type;
   1787
   1788	return driver_register(drv);
   1789}
   1790
   1791/**
   1792 * ccw_driver_unregister() - deregister a ccw driver
   1793 * @cdriver: driver to be deregistered
   1794 *
   1795 * This function is mainly a wrapper around driver_unregister().
   1796 */
   1797void ccw_driver_unregister(struct ccw_driver *cdriver)
   1798{
   1799	driver_unregister(&cdriver->driver);
   1800}
   1801
   1802static void ccw_device_todo(struct work_struct *work)
   1803{
   1804	struct ccw_device_private *priv;
   1805	struct ccw_device *cdev;
   1806	struct subchannel *sch;
   1807	enum cdev_todo todo;
   1808
   1809	priv = container_of(work, struct ccw_device_private, todo_work);
   1810	cdev = priv->cdev;
   1811	sch = to_subchannel(cdev->dev.parent);
   1812	/* Find out todo. */
   1813	spin_lock_irq(cdev->ccwlock);
   1814	todo = priv->todo;
   1815	priv->todo = CDEV_TODO_NOTHING;
   1816	CIO_MSG_EVENT(4, "cdev_todo: cdev=0.%x.%04x todo=%d\n",
   1817		      priv->dev_id.ssid, priv->dev_id.devno, todo);
   1818	spin_unlock_irq(cdev->ccwlock);
   1819	/* Perform todo. */
   1820	switch (todo) {
   1821	case CDEV_TODO_ENABLE_CMF:
   1822		cmf_reenable(cdev);
   1823		break;
   1824	case CDEV_TODO_REBIND:
   1825		ccw_device_do_unbind_bind(cdev);
   1826		break;
   1827	case CDEV_TODO_REGISTER:
   1828		io_subchannel_register(cdev);
   1829		break;
   1830	case CDEV_TODO_UNREG_EVAL:
   1831		if (!sch_is_pseudo_sch(sch))
   1832			css_schedule_eval(sch->schid);
   1833		fallthrough;
   1834	case CDEV_TODO_UNREG:
   1835		spin_lock_irq(sch->lock);
   1836		sch_set_cdev(sch, NULL);
   1837		spin_unlock_irq(sch->lock);
   1838		ccw_device_unregister(cdev);
   1839		break;
   1840	default:
   1841		break;
   1842	}
   1843	/* Release workqueue ref. */
   1844	put_device(&cdev->dev);
   1845}
   1846
   1847/**
   1848 * ccw_device_sched_todo - schedule ccw device operation
   1849 * @cdev: ccw device
   1850 * @todo: todo
   1851 *
   1852 * Schedule the operation identified by @todo to be performed on the slow path
   1853 * workqueue. Do nothing if another operation with higher priority is already
   1854 * scheduled. Needs to be called with ccwdev lock held.
   1855 */
   1856void ccw_device_sched_todo(struct ccw_device *cdev, enum cdev_todo todo)
   1857{
   1858	CIO_MSG_EVENT(4, "cdev_todo: sched cdev=0.%x.%04x todo=%d\n",
   1859		      cdev->private->dev_id.ssid, cdev->private->dev_id.devno,
   1860		      todo);
   1861	if (cdev->private->todo >= todo)
   1862		return;
   1863	cdev->private->todo = todo;
   1864	/* Get workqueue ref. */
   1865	if (!get_device(&cdev->dev))
   1866		return;
   1867	if (!queue_work(cio_work_q, &cdev->private->todo_work)) {
   1868		/* Already queued, release workqueue ref. */
   1869		put_device(&cdev->dev);
   1870	}
   1871}
   1872
   1873/**
   1874 * ccw_device_siosl() - initiate logging
   1875 * @cdev: ccw device
   1876 *
   1877 * This function is used to invoke model-dependent logging within the channel
   1878 * subsystem.
   1879 */
   1880int ccw_device_siosl(struct ccw_device *cdev)
   1881{
   1882	struct subchannel *sch = to_subchannel(cdev->dev.parent);
   1883
   1884	return chsc_siosl(sch->schid);
   1885}
   1886EXPORT_SYMBOL_GPL(ccw_device_siosl);
   1887
   1888EXPORT_SYMBOL(ccw_device_set_online);
   1889EXPORT_SYMBOL(ccw_device_set_offline);
   1890EXPORT_SYMBOL(ccw_driver_register);
   1891EXPORT_SYMBOL(ccw_driver_unregister);
   1892EXPORT_SYMBOL(get_ccwdev_by_busid);