cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

chp.c (19936B)


      1// SPDX-License-Identifier: GPL-2.0
      2/*
      3 *    Copyright IBM Corp. 1999, 2010
      4 *    Author(s): Cornelia Huck (cornelia.huck@de.ibm.com)
      5 *		 Arnd Bergmann (arndb@de.ibm.com)
      6 *		 Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
      7 */
      8
      9#include <linux/bug.h>
     10#include <linux/workqueue.h>
     11#include <linux/spinlock.h>
     12#include <linux/export.h>
     13#include <linux/sched.h>
     14#include <linux/init.h>
     15#include <linux/jiffies.h>
     16#include <linux/wait.h>
     17#include <linux/mutex.h>
     18#include <linux/errno.h>
     19#include <linux/slab.h>
     20#include <asm/chpid.h>
     21#include <asm/sclp.h>
     22#include <asm/crw.h>
     23
     24#include "cio.h"
     25#include "css.h"
     26#include "ioasm.h"
     27#include "cio_debug.h"
     28#include "chp.h"
     29
     30#define to_channelpath(device) container_of(device, struct channel_path, dev)
     31#define CHP_INFO_UPDATE_INTERVAL	1*HZ
     32
     33enum cfg_task_t {
     34	cfg_none,
     35	cfg_configure,
     36	cfg_deconfigure
     37};
     38
     39/* Map for pending configure tasks. */
     40static enum cfg_task_t chp_cfg_task[__MAX_CSSID + 1][__MAX_CHPID + 1];
     41static DEFINE_SPINLOCK(cfg_lock);
     42
     43/* Map for channel-path status. */
     44static struct sclp_chp_info chp_info;
     45static DEFINE_MUTEX(info_lock);
     46
     47/* Time after which channel-path status may be outdated. */
     48static unsigned long chp_info_expires;
     49
     50static struct work_struct cfg_work;
     51
     52/* Wait queue for configure completion events. */
     53static DECLARE_WAIT_QUEUE_HEAD(cfg_wait_queue);
     54
     55/* Set vary state for given chpid. */
     56static void set_chp_logically_online(struct chp_id chpid, int onoff)
     57{
     58	chpid_to_chp(chpid)->state = onoff;
     59}
     60
     61/* On success return 0 if channel-path is varied offline, 1 if it is varied
     62 * online. Return -ENODEV if channel-path is not registered. */
     63int chp_get_status(struct chp_id chpid)
     64{
     65	return (chpid_to_chp(chpid) ? chpid_to_chp(chpid)->state : -ENODEV);
     66}
     67
     68/**
     69 * chp_get_sch_opm - return opm for subchannel
     70 * @sch: subchannel
     71 *
     72 * Calculate and return the operational path mask (opm) based on the chpids
     73 * used by the subchannel and the status of the associated channel-paths.
     74 */
     75u8 chp_get_sch_opm(struct subchannel *sch)
     76{
     77	struct chp_id chpid;
     78	int opm;
     79	int i;
     80
     81	opm = 0;
     82	chp_id_init(&chpid);
     83	for (i = 0; i < 8; i++) {
     84		opm <<= 1;
     85		chpid.id = sch->schib.pmcw.chpid[i];
     86		if (chp_get_status(chpid) != 0)
     87			opm |= 1;
     88	}
     89	return opm;
     90}
     91EXPORT_SYMBOL_GPL(chp_get_sch_opm);
     92
     93/**
     94 * chp_is_registered - check if a channel-path is registered
     95 * @chpid: channel-path ID
     96 *
     97 * Return non-zero if a channel-path with the given chpid is registered,
     98 * zero otherwise.
     99 */
    100int chp_is_registered(struct chp_id chpid)
    101{
    102	return chpid_to_chp(chpid) != NULL;
    103}
    104
    105/*
    106 * Function: s390_vary_chpid
    107 * Varies the specified chpid online or offline
    108 */
    109static int s390_vary_chpid(struct chp_id chpid, int on)
    110{
    111	char dbf_text[15];
    112	int status;
    113
    114	sprintf(dbf_text, on?"varyon%x.%02x":"varyoff%x.%02x", chpid.cssid,
    115		chpid.id);
    116	CIO_TRACE_EVENT(2, dbf_text);
    117
    118	status = chp_get_status(chpid);
    119	if (!on && !status)
    120		return 0;
    121
    122	set_chp_logically_online(chpid, on);
    123	chsc_chp_vary(chpid, on);
    124	return 0;
    125}
    126
    127/*
    128 * Channel measurement related functions
    129 */
    130static ssize_t chp_measurement_chars_read(struct file *filp,
    131					  struct kobject *kobj,
    132					  struct bin_attribute *bin_attr,
    133					  char *buf, loff_t off, size_t count)
    134{
    135	struct channel_path *chp;
    136	struct device *device;
    137
    138	device = kobj_to_dev(kobj);
    139	chp = to_channelpath(device);
    140	if (chp->cmg == -1)
    141		return 0;
    142
    143	return memory_read_from_buffer(buf, count, &off, &chp->cmg_chars,
    144				       sizeof(chp->cmg_chars));
    145}
    146
    147static const struct bin_attribute chp_measurement_chars_attr = {
    148	.attr = {
    149		.name = "measurement_chars",
    150		.mode = S_IRUSR,
    151	},
    152	.size = sizeof(struct cmg_chars),
    153	.read = chp_measurement_chars_read,
    154};
    155
    156static void chp_measurement_copy_block(struct cmg_entry *buf,
    157				       struct channel_subsystem *css,
    158				       struct chp_id chpid)
    159{
    160	void *area;
    161	struct cmg_entry *entry, reference_buf;
    162	int idx;
    163
    164	if (chpid.id < 128) {
    165		area = css->cub_addr1;
    166		idx = chpid.id;
    167	} else {
    168		area = css->cub_addr2;
    169		idx = chpid.id - 128;
    170	}
    171	entry = area + (idx * sizeof(struct cmg_entry));
    172	do {
    173		memcpy(buf, entry, sizeof(*entry));
    174		memcpy(&reference_buf, entry, sizeof(*entry));
    175	} while (reference_buf.values[0] != buf->values[0]);
    176}
    177
    178static ssize_t chp_measurement_read(struct file *filp, struct kobject *kobj,
    179				    struct bin_attribute *bin_attr,
    180				    char *buf, loff_t off, size_t count)
    181{
    182	struct channel_path *chp;
    183	struct channel_subsystem *css;
    184	struct device *device;
    185	unsigned int size;
    186
    187	device = kobj_to_dev(kobj);
    188	chp = to_channelpath(device);
    189	css = to_css(chp->dev.parent);
    190
    191	size = sizeof(struct cmg_entry);
    192
    193	/* Only allow single reads. */
    194	if (off || count < size)
    195		return 0;
    196	chp_measurement_copy_block((struct cmg_entry *)buf, css, chp->chpid);
    197	count = size;
    198	return count;
    199}
    200
    201static const struct bin_attribute chp_measurement_attr = {
    202	.attr = {
    203		.name = "measurement",
    204		.mode = S_IRUSR,
    205	},
    206	.size = sizeof(struct cmg_entry),
    207	.read = chp_measurement_read,
    208};
    209
    210void chp_remove_cmg_attr(struct channel_path *chp)
    211{
    212	device_remove_bin_file(&chp->dev, &chp_measurement_chars_attr);
    213	device_remove_bin_file(&chp->dev, &chp_measurement_attr);
    214}
    215
    216int chp_add_cmg_attr(struct channel_path *chp)
    217{
    218	int ret;
    219
    220	ret = device_create_bin_file(&chp->dev, &chp_measurement_chars_attr);
    221	if (ret)
    222		return ret;
    223	ret = device_create_bin_file(&chp->dev, &chp_measurement_attr);
    224	if (ret)
    225		device_remove_bin_file(&chp->dev, &chp_measurement_chars_attr);
    226	return ret;
    227}
    228
    229/*
    230 * Files for the channel path entries.
    231 */
    232static ssize_t chp_status_show(struct device *dev,
    233			       struct device_attribute *attr, char *buf)
    234{
    235	struct channel_path *chp = to_channelpath(dev);
    236	int status;
    237
    238	mutex_lock(&chp->lock);
    239	status = chp->state;
    240	mutex_unlock(&chp->lock);
    241
    242	return status ? sprintf(buf, "online\n") : sprintf(buf, "offline\n");
    243}
    244
    245static ssize_t chp_status_write(struct device *dev,
    246				struct device_attribute *attr,
    247				const char *buf, size_t count)
    248{
    249	struct channel_path *cp = to_channelpath(dev);
    250	char cmd[10];
    251	int num_args;
    252	int error;
    253
    254	num_args = sscanf(buf, "%5s", cmd);
    255	if (!num_args)
    256		return count;
    257
    258	/* Wait until previous actions have settled. */
    259	css_wait_for_slow_path();
    260
    261	if (!strncasecmp(cmd, "on", 2) || !strcmp(cmd, "1")) {
    262		mutex_lock(&cp->lock);
    263		error = s390_vary_chpid(cp->chpid, 1);
    264		mutex_unlock(&cp->lock);
    265	} else if (!strncasecmp(cmd, "off", 3) || !strcmp(cmd, "0")) {
    266		mutex_lock(&cp->lock);
    267		error = s390_vary_chpid(cp->chpid, 0);
    268		mutex_unlock(&cp->lock);
    269	} else
    270		error = -EINVAL;
    271
    272	return error < 0 ? error : count;
    273}
    274
    275static DEVICE_ATTR(status, 0644, chp_status_show, chp_status_write);
    276
    277static ssize_t chp_configure_show(struct device *dev,
    278				  struct device_attribute *attr, char *buf)
    279{
    280	struct channel_path *cp;
    281	int status;
    282
    283	cp = to_channelpath(dev);
    284	status = chp_info_get_status(cp->chpid);
    285	if (status < 0)
    286		return status;
    287
    288	return sysfs_emit(buf, "%d\n", status);
    289}
    290
    291static int cfg_wait_idle(void);
    292
    293static ssize_t chp_configure_write(struct device *dev,
    294				   struct device_attribute *attr,
    295				   const char *buf, size_t count)
    296{
    297	struct channel_path *cp;
    298	int val;
    299	char delim;
    300
    301	if (sscanf(buf, "%d %c", &val, &delim) != 1)
    302		return -EINVAL;
    303	if (val != 0 && val != 1)
    304		return -EINVAL;
    305	cp = to_channelpath(dev);
    306	chp_cfg_schedule(cp->chpid, val);
    307	cfg_wait_idle();
    308
    309	return count;
    310}
    311
    312static DEVICE_ATTR(configure, 0644, chp_configure_show, chp_configure_write);
    313
    314static ssize_t chp_type_show(struct device *dev, struct device_attribute *attr,
    315			     char *buf)
    316{
    317	struct channel_path *chp = to_channelpath(dev);
    318	u8 type;
    319
    320	mutex_lock(&chp->lock);
    321	type = chp->desc.desc;
    322	mutex_unlock(&chp->lock);
    323	return sprintf(buf, "%x\n", type);
    324}
    325
    326static DEVICE_ATTR(type, 0444, chp_type_show, NULL);
    327
    328static ssize_t chp_cmg_show(struct device *dev, struct device_attribute *attr,
    329			    char *buf)
    330{
    331	struct channel_path *chp = to_channelpath(dev);
    332
    333	if (!chp)
    334		return 0;
    335	if (chp->cmg == -1) /* channel measurements not available */
    336		return sprintf(buf, "unknown\n");
    337	return sprintf(buf, "%x\n", chp->cmg);
    338}
    339
    340static DEVICE_ATTR(cmg, 0444, chp_cmg_show, NULL);
    341
    342static ssize_t chp_shared_show(struct device *dev,
    343			       struct device_attribute *attr, char *buf)
    344{
    345	struct channel_path *chp = to_channelpath(dev);
    346
    347	if (!chp)
    348		return 0;
    349	if (chp->shared == -1) /* channel measurements not available */
    350		return sprintf(buf, "unknown\n");
    351	return sprintf(buf, "%x\n", chp->shared);
    352}
    353
    354static DEVICE_ATTR(shared, 0444, chp_shared_show, NULL);
    355
    356static ssize_t chp_chid_show(struct device *dev, struct device_attribute *attr,
    357			     char *buf)
    358{
    359	struct channel_path *chp = to_channelpath(dev);
    360	ssize_t rc;
    361
    362	mutex_lock(&chp->lock);
    363	if (chp->desc_fmt1.flags & 0x10)
    364		rc = sprintf(buf, "%04x\n", chp->desc_fmt1.chid);
    365	else
    366		rc = 0;
    367	mutex_unlock(&chp->lock);
    368
    369	return rc;
    370}
    371static DEVICE_ATTR(chid, 0444, chp_chid_show, NULL);
    372
    373static ssize_t chp_chid_external_show(struct device *dev,
    374				      struct device_attribute *attr, char *buf)
    375{
    376	struct channel_path *chp = to_channelpath(dev);
    377	ssize_t rc;
    378
    379	mutex_lock(&chp->lock);
    380	if (chp->desc_fmt1.flags & 0x10)
    381		rc = sprintf(buf, "%x\n", chp->desc_fmt1.flags & 0x8 ? 1 : 0);
    382	else
    383		rc = 0;
    384	mutex_unlock(&chp->lock);
    385
    386	return rc;
    387}
    388static DEVICE_ATTR(chid_external, 0444, chp_chid_external_show, NULL);
    389
    390static ssize_t chp_esc_show(struct device *dev,
    391			    struct device_attribute *attr, char *buf)
    392{
    393	struct channel_path *chp = to_channelpath(dev);
    394	ssize_t rc;
    395
    396	mutex_lock(&chp->lock);
    397	rc = sprintf(buf, "%x\n", chp->desc_fmt1.esc);
    398	mutex_unlock(&chp->lock);
    399
    400	return rc;
    401}
    402static DEVICE_ATTR(esc, 0444, chp_esc_show, NULL);
    403
    404static ssize_t util_string_read(struct file *filp, struct kobject *kobj,
    405				struct bin_attribute *attr, char *buf,
    406				loff_t off, size_t count)
    407{
    408	struct channel_path *chp = to_channelpath(kobj_to_dev(kobj));
    409	ssize_t rc;
    410
    411	mutex_lock(&chp->lock);
    412	rc = memory_read_from_buffer(buf, count, &off, chp->desc_fmt3.util_str,
    413				     sizeof(chp->desc_fmt3.util_str));
    414	mutex_unlock(&chp->lock);
    415
    416	return rc;
    417}
    418static BIN_ATTR_RO(util_string,
    419		   sizeof(((struct channel_path_desc_fmt3 *)0)->util_str));
    420
    421static struct bin_attribute *chp_bin_attrs[] = {
    422	&bin_attr_util_string,
    423	NULL,
    424};
    425
    426static struct attribute *chp_attrs[] = {
    427	&dev_attr_status.attr,
    428	&dev_attr_configure.attr,
    429	&dev_attr_type.attr,
    430	&dev_attr_cmg.attr,
    431	&dev_attr_shared.attr,
    432	&dev_attr_chid.attr,
    433	&dev_attr_chid_external.attr,
    434	&dev_attr_esc.attr,
    435	NULL,
    436};
    437static struct attribute_group chp_attr_group = {
    438	.attrs = chp_attrs,
    439	.bin_attrs = chp_bin_attrs,
    440};
    441static const struct attribute_group *chp_attr_groups[] = {
    442	&chp_attr_group,
    443	NULL,
    444};
    445
    446static void chp_release(struct device *dev)
    447{
    448	struct channel_path *cp;
    449
    450	cp = to_channelpath(dev);
    451	kfree(cp);
    452}
    453
    454/**
    455 * chp_update_desc - update channel-path description
    456 * @chp: channel-path
    457 *
    458 * Update the channel-path description of the specified channel-path
    459 * including channel measurement related information.
    460 * Return zero on success, non-zero otherwise.
    461 */
    462int chp_update_desc(struct channel_path *chp)
    463{
    464	int rc;
    465
    466	rc = chsc_determine_fmt0_channel_path_desc(chp->chpid, &chp->desc);
    467	if (rc)
    468		return rc;
    469
    470	/*
    471	 * Fetching the following data is optional. Not all machines or
    472	 * hypervisors implement the required chsc commands.
    473	 */
    474	chsc_determine_fmt1_channel_path_desc(chp->chpid, &chp->desc_fmt1);
    475	chsc_determine_fmt3_channel_path_desc(chp->chpid, &chp->desc_fmt3);
    476	chsc_get_channel_measurement_chars(chp);
    477
    478	return 0;
    479}
    480
    481/**
    482 * chp_new - register a new channel-path
    483 * @chpid: channel-path ID
    484 *
    485 * Create and register data structure representing new channel-path. Return
    486 * zero on success, non-zero otherwise.
    487 */
    488int chp_new(struct chp_id chpid)
    489{
    490	struct channel_subsystem *css = css_by_id(chpid.cssid);
    491	struct channel_path *chp;
    492	int ret = 0;
    493
    494	mutex_lock(&css->mutex);
    495	if (chp_is_registered(chpid))
    496		goto out;
    497
    498	chp = kzalloc(sizeof(struct channel_path), GFP_KERNEL);
    499	if (!chp) {
    500		ret = -ENOMEM;
    501		goto out;
    502	}
    503	/* fill in status, etc. */
    504	chp->chpid = chpid;
    505	chp->state = 1;
    506	chp->dev.parent = &css->device;
    507	chp->dev.groups = chp_attr_groups;
    508	chp->dev.release = chp_release;
    509	mutex_init(&chp->lock);
    510
    511	/* Obtain channel path description and fill it in. */
    512	ret = chp_update_desc(chp);
    513	if (ret)
    514		goto out_free;
    515	if ((chp->desc.flags & 0x80) == 0) {
    516		ret = -ENODEV;
    517		goto out_free;
    518	}
    519	dev_set_name(&chp->dev, "chp%x.%02x", chpid.cssid, chpid.id);
    520
    521	/* make it known to the system */
    522	ret = device_register(&chp->dev);
    523	if (ret) {
    524		CIO_MSG_EVENT(0, "Could not register chp%x.%02x: %d\n",
    525			      chpid.cssid, chpid.id, ret);
    526		put_device(&chp->dev);
    527		goto out;
    528	}
    529
    530	if (css->cm_enabled) {
    531		ret = chp_add_cmg_attr(chp);
    532		if (ret) {
    533			device_unregister(&chp->dev);
    534			goto out;
    535		}
    536	}
    537	css->chps[chpid.id] = chp;
    538	goto out;
    539out_free:
    540	kfree(chp);
    541out:
    542	mutex_unlock(&css->mutex);
    543	return ret;
    544}
    545
    546/**
    547 * chp_get_chp_desc - return newly allocated channel-path description
    548 * @chpid: channel-path ID
    549 *
    550 * On success return a newly allocated copy of the channel-path description
    551 * data associated with the given channel-path ID. Return %NULL on error.
    552 */
    553struct channel_path_desc_fmt0 *chp_get_chp_desc(struct chp_id chpid)
    554{
    555	struct channel_path *chp;
    556	struct channel_path_desc_fmt0 *desc;
    557
    558	chp = chpid_to_chp(chpid);
    559	if (!chp)
    560		return NULL;
    561	desc = kmalloc(sizeof(*desc), GFP_KERNEL);
    562	if (!desc)
    563		return NULL;
    564
    565	mutex_lock(&chp->lock);
    566	memcpy(desc, &chp->desc, sizeof(*desc));
    567	mutex_unlock(&chp->lock);
    568	return desc;
    569}
    570
    571/**
    572 * chp_process_crw - process channel-path status change
    573 * @crw0: channel report-word to handler
    574 * @crw1: second channel-report word (always NULL)
    575 * @overflow: crw overflow indication
    576 *
    577 * Handle channel-report-words indicating that the status of a channel-path
    578 * has changed.
    579 */
    580static void chp_process_crw(struct crw *crw0, struct crw *crw1,
    581			    int overflow)
    582{
    583	struct chp_id chpid;
    584
    585	if (overflow) {
    586		css_schedule_eval_all();
    587		return;
    588	}
    589	CIO_CRW_EVENT(2, "CRW reports slct=%d, oflw=%d, "
    590		      "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
    591		      crw0->slct, crw0->oflw, crw0->chn, crw0->rsc, crw0->anc,
    592		      crw0->erc, crw0->rsid);
    593	/*
    594	 * Check for solicited machine checks. These are
    595	 * created by reset channel path and need not be
    596	 * handled here.
    597	 */
    598	if (crw0->slct) {
    599		CIO_CRW_EVENT(2, "solicited machine check for "
    600			      "channel path %02X\n", crw0->rsid);
    601		return;
    602	}
    603	chp_id_init(&chpid);
    604	chpid.id = crw0->rsid;
    605	switch (crw0->erc) {
    606	case CRW_ERC_IPARM: /* Path has come. */
    607	case CRW_ERC_INIT:
    608		chp_new(chpid);
    609		chsc_chp_online(chpid);
    610		break;
    611	case CRW_ERC_PERRI: /* Path has gone. */
    612	case CRW_ERC_PERRN:
    613		chsc_chp_offline(chpid);
    614		break;
    615	default:
    616		CIO_CRW_EVENT(2, "Don't know how to handle erc=%x\n",
    617			      crw0->erc);
    618	}
    619}
    620
    621int chp_ssd_get_mask(struct chsc_ssd_info *ssd, struct chp_link *link)
    622{
    623	int i;
    624	int mask;
    625
    626	for (i = 0; i < 8; i++) {
    627		mask = 0x80 >> i;
    628		if (!(ssd->path_mask & mask))
    629			continue;
    630		if (!chp_id_is_equal(&ssd->chpid[i], &link->chpid))
    631			continue;
    632		if ((ssd->fla_valid_mask & mask) &&
    633		    ((ssd->fla[i] & link->fla_mask) != link->fla))
    634			continue;
    635		return mask;
    636	}
    637	return 0;
    638}
    639EXPORT_SYMBOL_GPL(chp_ssd_get_mask);
    640
    641static inline int info_bit_num(struct chp_id id)
    642{
    643	return id.id + id.cssid * (__MAX_CHPID + 1);
    644}
    645
    646/* Force chp_info refresh on next call to info_validate(). */
    647static void info_expire(void)
    648{
    649	mutex_lock(&info_lock);
    650	chp_info_expires = jiffies - 1;
    651	mutex_unlock(&info_lock);
    652}
    653
    654/* Ensure that chp_info is up-to-date. */
    655static int info_update(void)
    656{
    657	int rc;
    658
    659	mutex_lock(&info_lock);
    660	rc = 0;
    661	if (time_after(jiffies, chp_info_expires)) {
    662		/* Data is too old, update. */
    663		rc = sclp_chp_read_info(&chp_info);
    664		chp_info_expires = jiffies + CHP_INFO_UPDATE_INTERVAL ;
    665	}
    666	mutex_unlock(&info_lock);
    667
    668	return rc;
    669}
    670
    671/**
    672 * chp_info_get_status - retrieve configure status of a channel-path
    673 * @chpid: channel-path ID
    674 *
    675 * On success, return 0 for standby, 1 for configured, 2 for reserved,
    676 * 3 for not recognized. Return negative error code on error.
    677 */
    678int chp_info_get_status(struct chp_id chpid)
    679{
    680	int rc;
    681	int bit;
    682
    683	rc = info_update();
    684	if (rc)
    685		return rc;
    686
    687	bit = info_bit_num(chpid);
    688	mutex_lock(&info_lock);
    689	if (!chp_test_bit(chp_info.recognized, bit))
    690		rc = CHP_STATUS_NOT_RECOGNIZED;
    691	else if (chp_test_bit(chp_info.configured, bit))
    692		rc = CHP_STATUS_CONFIGURED;
    693	else if (chp_test_bit(chp_info.standby, bit))
    694		rc = CHP_STATUS_STANDBY;
    695	else
    696		rc = CHP_STATUS_RESERVED;
    697	mutex_unlock(&info_lock);
    698
    699	return rc;
    700}
    701
    702/* Return configure task for chpid. */
    703static enum cfg_task_t cfg_get_task(struct chp_id chpid)
    704{
    705	return chp_cfg_task[chpid.cssid][chpid.id];
    706}
    707
    708/* Set configure task for chpid. */
    709static void cfg_set_task(struct chp_id chpid, enum cfg_task_t cfg)
    710{
    711	chp_cfg_task[chpid.cssid][chpid.id] = cfg;
    712}
    713
    714/* Fetch the first configure task. Set chpid accordingly. */
    715static enum cfg_task_t chp_cfg_fetch_task(struct chp_id *chpid)
    716{
    717	enum cfg_task_t t = cfg_none;
    718
    719	chp_id_for_each(chpid) {
    720		t = cfg_get_task(*chpid);
    721		if (t != cfg_none)
    722			break;
    723	}
    724
    725	return t;
    726}
    727
    728/* Perform one configure/deconfigure request. Reschedule work function until
    729 * last request. */
    730static void cfg_func(struct work_struct *work)
    731{
    732	struct chp_id chpid;
    733	enum cfg_task_t t;
    734	int rc;
    735
    736	spin_lock(&cfg_lock);
    737	t = chp_cfg_fetch_task(&chpid);
    738	spin_unlock(&cfg_lock);
    739
    740	switch (t) {
    741	case cfg_configure:
    742		rc = sclp_chp_configure(chpid);
    743		if (rc)
    744			CIO_MSG_EVENT(2, "chp: sclp_chp_configure(%x.%02x)="
    745				      "%d\n", chpid.cssid, chpid.id, rc);
    746		else {
    747			info_expire();
    748			chsc_chp_online(chpid);
    749		}
    750		break;
    751	case cfg_deconfigure:
    752		rc = sclp_chp_deconfigure(chpid);
    753		if (rc)
    754			CIO_MSG_EVENT(2, "chp: sclp_chp_deconfigure(%x.%02x)="
    755				      "%d\n", chpid.cssid, chpid.id, rc);
    756		else {
    757			info_expire();
    758			chsc_chp_offline(chpid);
    759		}
    760		break;
    761	case cfg_none:
    762		/* Get updated information after last change. */
    763		info_update();
    764		wake_up_interruptible(&cfg_wait_queue);
    765		return;
    766	}
    767	spin_lock(&cfg_lock);
    768	if (t == cfg_get_task(chpid))
    769		cfg_set_task(chpid, cfg_none);
    770	spin_unlock(&cfg_lock);
    771	schedule_work(&cfg_work);
    772}
    773
    774/**
    775 * chp_cfg_schedule - schedule chpid configuration request
    776 * @chpid: channel-path ID
    777 * @configure: Non-zero for configure, zero for deconfigure
    778 *
    779 * Schedule a channel-path configuration/deconfiguration request.
    780 */
    781void chp_cfg_schedule(struct chp_id chpid, int configure)
    782{
    783	CIO_MSG_EVENT(2, "chp_cfg_sched%x.%02x=%d\n", chpid.cssid, chpid.id,
    784		      configure);
    785	spin_lock(&cfg_lock);
    786	cfg_set_task(chpid, configure ? cfg_configure : cfg_deconfigure);
    787	spin_unlock(&cfg_lock);
    788	schedule_work(&cfg_work);
    789}
    790
    791/**
    792 * chp_cfg_cancel_deconfigure - cancel chpid deconfiguration request
    793 * @chpid: channel-path ID
    794 *
    795 * Cancel an active channel-path deconfiguration request if it has not yet
    796 * been performed.
    797 */
    798void chp_cfg_cancel_deconfigure(struct chp_id chpid)
    799{
    800	CIO_MSG_EVENT(2, "chp_cfg_cancel:%x.%02x\n", chpid.cssid, chpid.id);
    801	spin_lock(&cfg_lock);
    802	if (cfg_get_task(chpid) == cfg_deconfigure)
    803		cfg_set_task(chpid, cfg_none);
    804	spin_unlock(&cfg_lock);
    805}
    806
    807static bool cfg_idle(void)
    808{
    809	struct chp_id chpid;
    810	enum cfg_task_t t;
    811
    812	spin_lock(&cfg_lock);
    813	t = chp_cfg_fetch_task(&chpid);
    814	spin_unlock(&cfg_lock);
    815
    816	return t == cfg_none;
    817}
    818
    819static int cfg_wait_idle(void)
    820{
    821	if (wait_event_interruptible(cfg_wait_queue, cfg_idle()))
    822		return -ERESTARTSYS;
    823	return 0;
    824}
    825
    826static int __init chp_init(void)
    827{
    828	struct chp_id chpid;
    829	int state, ret;
    830
    831	ret = crw_register_handler(CRW_RSC_CPATH, chp_process_crw);
    832	if (ret)
    833		return ret;
    834	INIT_WORK(&cfg_work, cfg_func);
    835	if (info_update())
    836		return 0;
    837	/* Register available channel-paths. */
    838	chp_id_for_each(&chpid) {
    839		state = chp_info_get_status(chpid);
    840		if (state == CHP_STATUS_CONFIGURED ||
    841		    state == CHP_STATUS_STANDBY)
    842			chp_new(chpid);
    843	}
    844
    845	return 0;
    846}
    847
    848subsys_initcall(chp_init);