cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

css.c (33670B)


      1// SPDX-License-Identifier: GPL-2.0
      2/*
      3 * driver for channel subsystem
      4 *
      5 * Copyright IBM Corp. 2002, 2010
      6 *
      7 * Author(s): Arnd Bergmann (arndb@de.ibm.com)
      8 *	      Cornelia Huck (cornelia.huck@de.ibm.com)
      9 */
     10
     11#define KMSG_COMPONENT "cio"
     12#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
     13
     14#include <linux/export.h>
     15#include <linux/init.h>
     16#include <linux/device.h>
     17#include <linux/slab.h>
     18#include <linux/errno.h>
     19#include <linux/list.h>
     20#include <linux/reboot.h>
     21#include <linux/proc_fs.h>
     22#include <linux/genalloc.h>
     23#include <linux/dma-mapping.h>
     24#include <asm/isc.h>
     25#include <asm/crw.h>
     26
     27#include "css.h"
     28#include "cio.h"
     29#include "blacklist.h"
     30#include "cio_debug.h"
     31#include "ioasm.h"
     32#include "chsc.h"
     33#include "device.h"
     34#include "idset.h"
     35#include "chp.h"
     36
     37int css_init_done = 0;
     38int max_ssid;
     39
     40#define MAX_CSS_IDX 0
     41struct channel_subsystem *channel_subsystems[MAX_CSS_IDX + 1];
     42static struct bus_type css_bus_type;
     43
     44int
     45for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *data)
     46{
     47	struct subchannel_id schid;
     48	int ret;
     49
     50	init_subchannel_id(&schid);
     51	do {
     52		do {
     53			ret = fn(schid, data);
     54			if (ret)
     55				break;
     56		} while (schid.sch_no++ < __MAX_SUBCHANNEL);
     57		schid.sch_no = 0;
     58	} while (schid.ssid++ < max_ssid);
     59	return ret;
     60}
     61
     62struct cb_data {
     63	void *data;
     64	struct idset *set;
     65	int (*fn_known_sch)(struct subchannel *, void *);
     66	int (*fn_unknown_sch)(struct subchannel_id, void *);
     67};
     68
     69static int call_fn_known_sch(struct device *dev, void *data)
     70{
     71	struct subchannel *sch = to_subchannel(dev);
     72	struct cb_data *cb = data;
     73	int rc = 0;
     74
     75	if (cb->set)
     76		idset_sch_del(cb->set, sch->schid);
     77	if (cb->fn_known_sch)
     78		rc = cb->fn_known_sch(sch, cb->data);
     79	return rc;
     80}
     81
     82static int call_fn_unknown_sch(struct subchannel_id schid, void *data)
     83{
     84	struct cb_data *cb = data;
     85	int rc = 0;
     86
     87	if (idset_sch_contains(cb->set, schid))
     88		rc = cb->fn_unknown_sch(schid, cb->data);
     89	return rc;
     90}
     91
     92static int call_fn_all_sch(struct subchannel_id schid, void *data)
     93{
     94	struct cb_data *cb = data;
     95	struct subchannel *sch;
     96	int rc = 0;
     97
     98	sch = get_subchannel_by_schid(schid);
     99	if (sch) {
    100		if (cb->fn_known_sch)
    101			rc = cb->fn_known_sch(sch, cb->data);
    102		put_device(&sch->dev);
    103	} else {
    104		if (cb->fn_unknown_sch)
    105			rc = cb->fn_unknown_sch(schid, cb->data);
    106	}
    107
    108	return rc;
    109}
    110
    111int for_each_subchannel_staged(int (*fn_known)(struct subchannel *, void *),
    112			       int (*fn_unknown)(struct subchannel_id,
    113			       void *), void *data)
    114{
    115	struct cb_data cb;
    116	int rc;
    117
    118	cb.data = data;
    119	cb.fn_known_sch = fn_known;
    120	cb.fn_unknown_sch = fn_unknown;
    121
    122	if (fn_known && !fn_unknown) {
    123		/* Skip idset allocation in case of known-only loop. */
    124		cb.set = NULL;
    125		return bus_for_each_dev(&css_bus_type, NULL, &cb,
    126					call_fn_known_sch);
    127	}
    128
    129	cb.set = idset_sch_new();
    130	if (!cb.set)
    131		/* fall back to brute force scanning in case of oom */
    132		return for_each_subchannel(call_fn_all_sch, &cb);
    133
    134	idset_fill(cb.set);
    135
    136	/* Process registered subchannels. */
    137	rc = bus_for_each_dev(&css_bus_type, NULL, &cb, call_fn_known_sch);
    138	if (rc)
    139		goto out;
    140	/* Process unregistered subchannels. */
    141	if (fn_unknown)
    142		rc = for_each_subchannel(call_fn_unknown_sch, &cb);
    143out:
    144	idset_free(cb.set);
    145
    146	return rc;
    147}
    148
    149static void css_sch_todo(struct work_struct *work);
    150
    151static int css_sch_create_locks(struct subchannel *sch)
    152{
    153	sch->lock = kmalloc(sizeof(*sch->lock), GFP_KERNEL);
    154	if (!sch->lock)
    155		return -ENOMEM;
    156
    157	spin_lock_init(sch->lock);
    158	mutex_init(&sch->reg_mutex);
    159
    160	return 0;
    161}
    162
    163static void css_subchannel_release(struct device *dev)
    164{
    165	struct subchannel *sch = to_subchannel(dev);
    166
    167	sch->config.intparm = 0;
    168	cio_commit_config(sch);
    169	kfree(sch->driver_override);
    170	kfree(sch->lock);
    171	kfree(sch);
    172}
    173
    174static int css_validate_subchannel(struct subchannel_id schid,
    175				   struct schib *schib)
    176{
    177	int err;
    178
    179	switch (schib->pmcw.st) {
    180	case SUBCHANNEL_TYPE_IO:
    181	case SUBCHANNEL_TYPE_MSG:
    182		if (!css_sch_is_valid(schib))
    183			err = -ENODEV;
    184		else if (is_blacklisted(schid.ssid, schib->pmcw.dev)) {
    185			CIO_MSG_EVENT(6, "Blacklisted device detected "
    186				      "at devno %04X, subchannel set %x\n",
    187				      schib->pmcw.dev, schid.ssid);
    188			err = -ENODEV;
    189		} else
    190			err = 0;
    191		break;
    192	default:
    193		err = 0;
    194	}
    195	if (err)
    196		goto out;
    197
    198	CIO_MSG_EVENT(4, "Subchannel 0.%x.%04x reports subchannel type %04X\n",
    199		      schid.ssid, schid.sch_no, schib->pmcw.st);
    200out:
    201	return err;
    202}
    203
    204struct subchannel *css_alloc_subchannel(struct subchannel_id schid,
    205					struct schib *schib)
    206{
    207	struct subchannel *sch;
    208	int ret;
    209
    210	ret = css_validate_subchannel(schid, schib);
    211	if (ret < 0)
    212		return ERR_PTR(ret);
    213
    214	sch = kzalloc(sizeof(*sch), GFP_KERNEL | GFP_DMA);
    215	if (!sch)
    216		return ERR_PTR(-ENOMEM);
    217
    218	sch->schid = schid;
    219	sch->schib = *schib;
    220	sch->st = schib->pmcw.st;
    221
    222	ret = css_sch_create_locks(sch);
    223	if (ret)
    224		goto err;
    225
    226	INIT_WORK(&sch->todo_work, css_sch_todo);
    227	sch->dev.release = &css_subchannel_release;
    228	sch->dev.dma_mask = &sch->dma_mask;
    229	device_initialize(&sch->dev);
    230	/*
    231	 * The physical addresses for some of the dma structures that can
    232	 * belong to a subchannel need to fit 31 bit width (e.g. ccw).
    233	 */
    234	ret = dma_set_coherent_mask(&sch->dev, DMA_BIT_MASK(31));
    235	if (ret)
    236		goto err;
    237	/*
    238	 * But we don't have such restrictions imposed on the stuff that
    239	 * is handled by the streaming API.
    240	 */
    241	ret = dma_set_mask(&sch->dev, DMA_BIT_MASK(64));
    242	if (ret)
    243		goto err;
    244
    245	return sch;
    246
    247err:
    248	kfree(sch);
    249	return ERR_PTR(ret);
    250}
    251
    252static int css_sch_device_register(struct subchannel *sch)
    253{
    254	int ret;
    255
    256	mutex_lock(&sch->reg_mutex);
    257	dev_set_name(&sch->dev, "0.%x.%04x", sch->schid.ssid,
    258		     sch->schid.sch_no);
    259	ret = device_add(&sch->dev);
    260	mutex_unlock(&sch->reg_mutex);
    261	return ret;
    262}
    263
    264/**
    265 * css_sch_device_unregister - unregister a subchannel
    266 * @sch: subchannel to be unregistered
    267 */
    268void css_sch_device_unregister(struct subchannel *sch)
    269{
    270	mutex_lock(&sch->reg_mutex);
    271	if (device_is_registered(&sch->dev))
    272		device_unregister(&sch->dev);
    273	mutex_unlock(&sch->reg_mutex);
    274}
    275EXPORT_SYMBOL_GPL(css_sch_device_unregister);
    276
    277static void ssd_from_pmcw(struct chsc_ssd_info *ssd, struct pmcw *pmcw)
    278{
    279	int i;
    280	int mask;
    281
    282	memset(ssd, 0, sizeof(struct chsc_ssd_info));
    283	ssd->path_mask = pmcw->pim;
    284	for (i = 0; i < 8; i++) {
    285		mask = 0x80 >> i;
    286		if (pmcw->pim & mask) {
    287			chp_id_init(&ssd->chpid[i]);
    288			ssd->chpid[i].id = pmcw->chpid[i];
    289		}
    290	}
    291}
    292
    293static void ssd_register_chpids(struct chsc_ssd_info *ssd)
    294{
    295	int i;
    296	int mask;
    297
    298	for (i = 0; i < 8; i++) {
    299		mask = 0x80 >> i;
    300		if (ssd->path_mask & mask)
    301			chp_new(ssd->chpid[i]);
    302	}
    303}
    304
    305void css_update_ssd_info(struct subchannel *sch)
    306{
    307	int ret;
    308
    309	ret = chsc_get_ssd_info(sch->schid, &sch->ssd_info);
    310	if (ret)
    311		ssd_from_pmcw(&sch->ssd_info, &sch->schib.pmcw);
    312
    313	ssd_register_chpids(&sch->ssd_info);
    314}
    315
    316static ssize_t type_show(struct device *dev, struct device_attribute *attr,
    317			 char *buf)
    318{
    319	struct subchannel *sch = to_subchannel(dev);
    320
    321	return sprintf(buf, "%01x\n", sch->st);
    322}
    323
    324static DEVICE_ATTR_RO(type);
    325
    326static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
    327			     char *buf)
    328{
    329	struct subchannel *sch = to_subchannel(dev);
    330
    331	return sprintf(buf, "css:t%01X\n", sch->st);
    332}
    333
    334static DEVICE_ATTR_RO(modalias);
    335
    336static ssize_t driver_override_store(struct device *dev,
    337				     struct device_attribute *attr,
    338				     const char *buf, size_t count)
    339{
    340	struct subchannel *sch = to_subchannel(dev);
    341	int ret;
    342
    343	ret = driver_set_override(dev, &sch->driver_override, buf, count);
    344	if (ret)
    345		return ret;
    346
    347	return count;
    348}
    349
    350static ssize_t driver_override_show(struct device *dev,
    351				    struct device_attribute *attr, char *buf)
    352{
    353	struct subchannel *sch = to_subchannel(dev);
    354	ssize_t len;
    355
    356	device_lock(dev);
    357	len = snprintf(buf, PAGE_SIZE, "%s\n", sch->driver_override);
    358	device_unlock(dev);
    359	return len;
    360}
    361static DEVICE_ATTR_RW(driver_override);
    362
    363static struct attribute *subch_attrs[] = {
    364	&dev_attr_type.attr,
    365	&dev_attr_modalias.attr,
    366	&dev_attr_driver_override.attr,
    367	NULL,
    368};
    369
    370static struct attribute_group subch_attr_group = {
    371	.attrs = subch_attrs,
    372};
    373
    374static const struct attribute_group *default_subch_attr_groups[] = {
    375	&subch_attr_group,
    376	NULL,
    377};
    378
    379static ssize_t chpids_show(struct device *dev,
    380			   struct device_attribute *attr,
    381			   char *buf)
    382{
    383	struct subchannel *sch = to_subchannel(dev);
    384	struct chsc_ssd_info *ssd = &sch->ssd_info;
    385	ssize_t ret = 0;
    386	int mask;
    387	int chp;
    388
    389	for (chp = 0; chp < 8; chp++) {
    390		mask = 0x80 >> chp;
    391		if (ssd->path_mask & mask)
    392			ret += sprintf(buf + ret, "%02x ", ssd->chpid[chp].id);
    393		else
    394			ret += sprintf(buf + ret, "00 ");
    395	}
    396	ret += sprintf(buf + ret, "\n");
    397	return ret;
    398}
    399static DEVICE_ATTR_RO(chpids);
    400
    401static ssize_t pimpampom_show(struct device *dev,
    402			      struct device_attribute *attr,
    403			      char *buf)
    404{
    405	struct subchannel *sch = to_subchannel(dev);
    406	struct pmcw *pmcw = &sch->schib.pmcw;
    407
    408	return sprintf(buf, "%02x %02x %02x\n",
    409		       pmcw->pim, pmcw->pam, pmcw->pom);
    410}
    411static DEVICE_ATTR_RO(pimpampom);
    412
    413static ssize_t dev_busid_show(struct device *dev,
    414			      struct device_attribute *attr,
    415			      char *buf)
    416{
    417	struct subchannel *sch = to_subchannel(dev);
    418	struct pmcw *pmcw = &sch->schib.pmcw;
    419
    420	if ((pmcw->st == SUBCHANNEL_TYPE_IO && pmcw->dnv) ||
    421	    (pmcw->st == SUBCHANNEL_TYPE_MSG && pmcw->w))
    422		return sysfs_emit(buf, "0.%x.%04x\n", sch->schid.ssid,
    423				  pmcw->dev);
    424	else
    425		return sysfs_emit(buf, "none\n");
    426}
    427static DEVICE_ATTR_RO(dev_busid);
    428
    429static struct attribute *io_subchannel_type_attrs[] = {
    430	&dev_attr_chpids.attr,
    431	&dev_attr_pimpampom.attr,
    432	&dev_attr_dev_busid.attr,
    433	NULL,
    434};
    435ATTRIBUTE_GROUPS(io_subchannel_type);
    436
    437static const struct device_type io_subchannel_type = {
    438	.groups = io_subchannel_type_groups,
    439};
    440
    441int css_register_subchannel(struct subchannel *sch)
    442{
    443	int ret;
    444
    445	/* Initialize the subchannel structure */
    446	sch->dev.parent = &channel_subsystems[0]->device;
    447	sch->dev.bus = &css_bus_type;
    448	sch->dev.groups = default_subch_attr_groups;
    449
    450	if (sch->st == SUBCHANNEL_TYPE_IO)
    451		sch->dev.type = &io_subchannel_type;
    452
    453	css_update_ssd_info(sch);
    454	/* make it known to the system */
    455	ret = css_sch_device_register(sch);
    456	if (ret) {
    457		CIO_MSG_EVENT(0, "Could not register sch 0.%x.%04x: %d\n",
    458			      sch->schid.ssid, sch->schid.sch_no, ret);
    459		return ret;
    460	}
    461	return ret;
    462}
    463
    464static int css_probe_device(struct subchannel_id schid, struct schib *schib)
    465{
    466	struct subchannel *sch;
    467	int ret;
    468
    469	sch = css_alloc_subchannel(schid, schib);
    470	if (IS_ERR(sch))
    471		return PTR_ERR(sch);
    472
    473	ret = css_register_subchannel(sch);
    474	if (ret)
    475		put_device(&sch->dev);
    476
    477	return ret;
    478}
    479
    480static int
    481check_subchannel(struct device *dev, const void *data)
    482{
    483	struct subchannel *sch;
    484	struct subchannel_id *schid = (void *)data;
    485
    486	sch = to_subchannel(dev);
    487	return schid_equal(&sch->schid, schid);
    488}
    489
    490struct subchannel *
    491get_subchannel_by_schid(struct subchannel_id schid)
    492{
    493	struct device *dev;
    494
    495	dev = bus_find_device(&css_bus_type, NULL,
    496			      &schid, check_subchannel);
    497
    498	return dev ? to_subchannel(dev) : NULL;
    499}
    500
    501/**
    502 * css_sch_is_valid() - check if a subchannel is valid
    503 * @schib: subchannel information block for the subchannel
    504 */
    505int css_sch_is_valid(struct schib *schib)
    506{
    507	if ((schib->pmcw.st == SUBCHANNEL_TYPE_IO) && !schib->pmcw.dnv)
    508		return 0;
    509	if ((schib->pmcw.st == SUBCHANNEL_TYPE_MSG) && !schib->pmcw.w)
    510		return 0;
    511	return 1;
    512}
    513EXPORT_SYMBOL_GPL(css_sch_is_valid);
    514
    515static int css_evaluate_new_subchannel(struct subchannel_id schid, int slow)
    516{
    517	struct schib schib;
    518	int ccode;
    519
    520	if (!slow) {
    521		/* Will be done on the slow path. */
    522		return -EAGAIN;
    523	}
    524	/*
    525	 * The first subchannel that is not-operational (ccode==3)
    526	 * indicates that there aren't any more devices available.
    527	 * If stsch gets an exception, it means the current subchannel set
    528	 * is not valid.
    529	 */
    530	ccode = stsch(schid, &schib);
    531	if (ccode)
    532		return (ccode == 3) ? -ENXIO : ccode;
    533
    534	return css_probe_device(schid, &schib);
    535}
    536
    537static int css_evaluate_known_subchannel(struct subchannel *sch, int slow)
    538{
    539	int ret = 0;
    540
    541	if (sch->driver) {
    542		if (sch->driver->sch_event)
    543			ret = sch->driver->sch_event(sch, slow);
    544		else
    545			dev_dbg(&sch->dev,
    546				"Got subchannel machine check but "
    547				"no sch_event handler provided.\n");
    548	}
    549	if (ret != 0 && ret != -EAGAIN) {
    550		CIO_MSG_EVENT(2, "eval: sch 0.%x.%04x, rc=%d\n",
    551			      sch->schid.ssid, sch->schid.sch_no, ret);
    552	}
    553	return ret;
    554}
    555
    556static void css_evaluate_subchannel(struct subchannel_id schid, int slow)
    557{
    558	struct subchannel *sch;
    559	int ret;
    560
    561	sch = get_subchannel_by_schid(schid);
    562	if (sch) {
    563		ret = css_evaluate_known_subchannel(sch, slow);
    564		put_device(&sch->dev);
    565	} else
    566		ret = css_evaluate_new_subchannel(schid, slow);
    567	if (ret == -EAGAIN)
    568		css_schedule_eval(schid);
    569}
    570
    571/**
    572 * css_sched_sch_todo - schedule a subchannel operation
    573 * @sch: subchannel
    574 * @todo: todo
    575 *
    576 * Schedule the operation identified by @todo to be performed on the slow path
    577 * workqueue. Do nothing if another operation with higher priority is already
    578 * scheduled. Needs to be called with subchannel lock held.
    579 */
    580void css_sched_sch_todo(struct subchannel *sch, enum sch_todo todo)
    581{
    582	CIO_MSG_EVENT(4, "sch_todo: sched sch=0.%x.%04x todo=%d\n",
    583		      sch->schid.ssid, sch->schid.sch_no, todo);
    584	if (sch->todo >= todo)
    585		return;
    586	/* Get workqueue ref. */
    587	if (!get_device(&sch->dev))
    588		return;
    589	sch->todo = todo;
    590	if (!queue_work(cio_work_q, &sch->todo_work)) {
    591		/* Already queued, release workqueue ref. */
    592		put_device(&sch->dev);
    593	}
    594}
    595EXPORT_SYMBOL_GPL(css_sched_sch_todo);
    596
    597static void css_sch_todo(struct work_struct *work)
    598{
    599	struct subchannel *sch;
    600	enum sch_todo todo;
    601	int ret;
    602
    603	sch = container_of(work, struct subchannel, todo_work);
    604	/* Find out todo. */
    605	spin_lock_irq(sch->lock);
    606	todo = sch->todo;
    607	CIO_MSG_EVENT(4, "sch_todo: sch=0.%x.%04x, todo=%d\n", sch->schid.ssid,
    608		      sch->schid.sch_no, todo);
    609	sch->todo = SCH_TODO_NOTHING;
    610	spin_unlock_irq(sch->lock);
    611	/* Perform todo. */
    612	switch (todo) {
    613	case SCH_TODO_NOTHING:
    614		break;
    615	case SCH_TODO_EVAL:
    616		ret = css_evaluate_known_subchannel(sch, 1);
    617		if (ret == -EAGAIN) {
    618			spin_lock_irq(sch->lock);
    619			css_sched_sch_todo(sch, todo);
    620			spin_unlock_irq(sch->lock);
    621		}
    622		break;
    623	case SCH_TODO_UNREG:
    624		css_sch_device_unregister(sch);
    625		break;
    626	}
    627	/* Release workqueue ref. */
    628	put_device(&sch->dev);
    629}
    630
    631static struct idset *slow_subchannel_set;
    632static DEFINE_SPINLOCK(slow_subchannel_lock);
    633static DECLARE_WAIT_QUEUE_HEAD(css_eval_wq);
    634static atomic_t css_eval_scheduled;
    635
    636static int __init slow_subchannel_init(void)
    637{
    638	atomic_set(&css_eval_scheduled, 0);
    639	slow_subchannel_set = idset_sch_new();
    640	if (!slow_subchannel_set) {
    641		CIO_MSG_EVENT(0, "could not allocate slow subchannel set\n");
    642		return -ENOMEM;
    643	}
    644	return 0;
    645}
    646
    647static int slow_eval_known_fn(struct subchannel *sch, void *data)
    648{
    649	int eval;
    650	int rc;
    651
    652	spin_lock_irq(&slow_subchannel_lock);
    653	eval = idset_sch_contains(slow_subchannel_set, sch->schid);
    654	idset_sch_del(slow_subchannel_set, sch->schid);
    655	spin_unlock_irq(&slow_subchannel_lock);
    656	if (eval) {
    657		rc = css_evaluate_known_subchannel(sch, 1);
    658		if (rc == -EAGAIN)
    659			css_schedule_eval(sch->schid);
    660		/*
    661		 * The loop might take long time for platforms with lots of
    662		 * known devices. Allow scheduling here.
    663		 */
    664		cond_resched();
    665	}
    666	return 0;
    667}
    668
    669static int slow_eval_unknown_fn(struct subchannel_id schid, void *data)
    670{
    671	int eval;
    672	int rc = 0;
    673
    674	spin_lock_irq(&slow_subchannel_lock);
    675	eval = idset_sch_contains(slow_subchannel_set, schid);
    676	idset_sch_del(slow_subchannel_set, schid);
    677	spin_unlock_irq(&slow_subchannel_lock);
    678	if (eval) {
    679		rc = css_evaluate_new_subchannel(schid, 1);
    680		switch (rc) {
    681		case -EAGAIN:
    682			css_schedule_eval(schid);
    683			rc = 0;
    684			break;
    685		case -ENXIO:
    686		case -ENOMEM:
    687		case -EIO:
    688			/* These should abort looping */
    689			spin_lock_irq(&slow_subchannel_lock);
    690			idset_sch_del_subseq(slow_subchannel_set, schid);
    691			spin_unlock_irq(&slow_subchannel_lock);
    692			break;
    693		default:
    694			rc = 0;
    695		}
    696		/* Allow scheduling here since the containing loop might
    697		 * take a while.  */
    698		cond_resched();
    699	}
    700	return rc;
    701}
    702
    703static void css_slow_path_func(struct work_struct *unused)
    704{
    705	unsigned long flags;
    706
    707	CIO_TRACE_EVENT(4, "slowpath");
    708	for_each_subchannel_staged(slow_eval_known_fn, slow_eval_unknown_fn,
    709				   NULL);
    710	spin_lock_irqsave(&slow_subchannel_lock, flags);
    711	if (idset_is_empty(slow_subchannel_set)) {
    712		atomic_set(&css_eval_scheduled, 0);
    713		wake_up(&css_eval_wq);
    714	}
    715	spin_unlock_irqrestore(&slow_subchannel_lock, flags);
    716}
    717
    718static DECLARE_DELAYED_WORK(slow_path_work, css_slow_path_func);
    719struct workqueue_struct *cio_work_q;
    720
    721void css_schedule_eval(struct subchannel_id schid)
    722{
    723	unsigned long flags;
    724
    725	spin_lock_irqsave(&slow_subchannel_lock, flags);
    726	idset_sch_add(slow_subchannel_set, schid);
    727	atomic_set(&css_eval_scheduled, 1);
    728	queue_delayed_work(cio_work_q, &slow_path_work, 0);
    729	spin_unlock_irqrestore(&slow_subchannel_lock, flags);
    730}
    731
    732void css_schedule_eval_all(void)
    733{
    734	unsigned long flags;
    735
    736	spin_lock_irqsave(&slow_subchannel_lock, flags);
    737	idset_fill(slow_subchannel_set);
    738	atomic_set(&css_eval_scheduled, 1);
    739	queue_delayed_work(cio_work_q, &slow_path_work, 0);
    740	spin_unlock_irqrestore(&slow_subchannel_lock, flags);
    741}
    742
    743static int __unset_registered(struct device *dev, void *data)
    744{
    745	struct idset *set = data;
    746	struct subchannel *sch = to_subchannel(dev);
    747
    748	idset_sch_del(set, sch->schid);
    749	return 0;
    750}
    751
    752static int __unset_online(struct device *dev, void *data)
    753{
    754	struct idset *set = data;
    755	struct subchannel *sch = to_subchannel(dev);
    756	struct ccw_device *cdev;
    757
    758	if (sch->st == SUBCHANNEL_TYPE_IO) {
    759		cdev = sch_get_cdev(sch);
    760		if (cdev && cdev->online)
    761			idset_sch_del(set, sch->schid);
    762	}
    763
    764	return 0;
    765}
    766
    767void css_schedule_eval_cond(enum css_eval_cond cond, unsigned long delay)
    768{
    769	unsigned long flags;
    770	struct idset *set;
    771
    772	/* Find unregistered subchannels. */
    773	set = idset_sch_new();
    774	if (!set) {
    775		/* Fallback. */
    776		css_schedule_eval_all();
    777		return;
    778	}
    779	idset_fill(set);
    780	switch (cond) {
    781	case CSS_EVAL_UNREG:
    782		bus_for_each_dev(&css_bus_type, NULL, set, __unset_registered);
    783		break;
    784	case CSS_EVAL_NOT_ONLINE:
    785		bus_for_each_dev(&css_bus_type, NULL, set, __unset_online);
    786		break;
    787	default:
    788		break;
    789	}
    790
    791	/* Apply to slow_subchannel_set. */
    792	spin_lock_irqsave(&slow_subchannel_lock, flags);
    793	idset_add_set(slow_subchannel_set, set);
    794	atomic_set(&css_eval_scheduled, 1);
    795	queue_delayed_work(cio_work_q, &slow_path_work, delay);
    796	spin_unlock_irqrestore(&slow_subchannel_lock, flags);
    797	idset_free(set);
    798}
    799
    800void css_wait_for_slow_path(void)
    801{
    802	flush_workqueue(cio_work_q);
    803}
    804
    805/* Schedule reprobing of all unregistered subchannels. */
    806void css_schedule_reprobe(void)
    807{
    808	/* Schedule with a delay to allow merging of subsequent calls. */
    809	css_schedule_eval_cond(CSS_EVAL_UNREG, 1 * HZ);
    810}
    811EXPORT_SYMBOL_GPL(css_schedule_reprobe);
    812
    813/*
    814 * Called from the machine check handler for subchannel report words.
    815 */
    816static void css_process_crw(struct crw *crw0, struct crw *crw1, int overflow)
    817{
    818	struct subchannel_id mchk_schid;
    819	struct subchannel *sch;
    820
    821	if (overflow) {
    822		css_schedule_eval_all();
    823		return;
    824	}
    825	CIO_CRW_EVENT(2, "CRW0 reports slct=%d, oflw=%d, "
    826		      "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
    827		      crw0->slct, crw0->oflw, crw0->chn, crw0->rsc, crw0->anc,
    828		      crw0->erc, crw0->rsid);
    829	if (crw1)
    830		CIO_CRW_EVENT(2, "CRW1 reports slct=%d, oflw=%d, "
    831			      "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
    832			      crw1->slct, crw1->oflw, crw1->chn, crw1->rsc,
    833			      crw1->anc, crw1->erc, crw1->rsid);
    834	init_subchannel_id(&mchk_schid);
    835	mchk_schid.sch_no = crw0->rsid;
    836	if (crw1)
    837		mchk_schid.ssid = (crw1->rsid >> 4) & 3;
    838
    839	if (crw0->erc == CRW_ERC_PMOD) {
    840		sch = get_subchannel_by_schid(mchk_schid);
    841		if (sch) {
    842			css_update_ssd_info(sch);
    843			put_device(&sch->dev);
    844		}
    845	}
    846	/*
    847	 * Since we are always presented with IPI in the CRW, we have to
    848	 * use stsch() to find out if the subchannel in question has come
    849	 * or gone.
    850	 */
    851	css_evaluate_subchannel(mchk_schid, 0);
    852}
    853
    854static void __init
    855css_generate_pgid(struct channel_subsystem *css, u32 tod_high)
    856{
    857	struct cpuid cpu_id;
    858
    859	if (css_general_characteristics.mcss) {
    860		css->global_pgid.pgid_high.ext_cssid.version = 0x80;
    861		css->global_pgid.pgid_high.ext_cssid.cssid =
    862			css->id_valid ? css->cssid : 0;
    863	} else {
    864		css->global_pgid.pgid_high.cpu_addr = stap();
    865	}
    866	get_cpu_id(&cpu_id);
    867	css->global_pgid.cpu_id = cpu_id.ident;
    868	css->global_pgid.cpu_model = cpu_id.machine;
    869	css->global_pgid.tod_high = tod_high;
    870}
    871
    872static void channel_subsystem_release(struct device *dev)
    873{
    874	struct channel_subsystem *css = to_css(dev);
    875
    876	mutex_destroy(&css->mutex);
    877	kfree(css);
    878}
    879
    880static ssize_t real_cssid_show(struct device *dev, struct device_attribute *a,
    881			       char *buf)
    882{
    883	struct channel_subsystem *css = to_css(dev);
    884
    885	if (!css->id_valid)
    886		return -EINVAL;
    887
    888	return sprintf(buf, "%x\n", css->cssid);
    889}
    890static DEVICE_ATTR_RO(real_cssid);
    891
    892static ssize_t rescan_store(struct device *dev, struct device_attribute *a,
    893			    const char *buf, size_t count)
    894{
    895	CIO_TRACE_EVENT(4, "usr-rescan");
    896
    897	css_schedule_eval_all();
    898	css_complete_work();
    899
    900	return count;
    901}
    902static DEVICE_ATTR_WO(rescan);
    903
    904static ssize_t cm_enable_show(struct device *dev, struct device_attribute *a,
    905			      char *buf)
    906{
    907	struct channel_subsystem *css = to_css(dev);
    908	int ret;
    909
    910	mutex_lock(&css->mutex);
    911	ret = sprintf(buf, "%x\n", css->cm_enabled);
    912	mutex_unlock(&css->mutex);
    913	return ret;
    914}
    915
    916static ssize_t cm_enable_store(struct device *dev, struct device_attribute *a,
    917			       const char *buf, size_t count)
    918{
    919	struct channel_subsystem *css = to_css(dev);
    920	unsigned long val;
    921	int ret;
    922
    923	ret = kstrtoul(buf, 16, &val);
    924	if (ret)
    925		return ret;
    926	mutex_lock(&css->mutex);
    927	switch (val) {
    928	case 0:
    929		ret = css->cm_enabled ? chsc_secm(css, 0) : 0;
    930		break;
    931	case 1:
    932		ret = css->cm_enabled ? 0 : chsc_secm(css, 1);
    933		break;
    934	default:
    935		ret = -EINVAL;
    936	}
    937	mutex_unlock(&css->mutex);
    938	return ret < 0 ? ret : count;
    939}
    940static DEVICE_ATTR_RW(cm_enable);
    941
    942static umode_t cm_enable_mode(struct kobject *kobj, struct attribute *attr,
    943			      int index)
    944{
    945	return css_chsc_characteristics.secm ? attr->mode : 0;
    946}
    947
    948static struct attribute *cssdev_attrs[] = {
    949	&dev_attr_real_cssid.attr,
    950	&dev_attr_rescan.attr,
    951	NULL,
    952};
    953
    954static struct attribute_group cssdev_attr_group = {
    955	.attrs = cssdev_attrs,
    956};
    957
    958static struct attribute *cssdev_cm_attrs[] = {
    959	&dev_attr_cm_enable.attr,
    960	NULL,
    961};
    962
    963static struct attribute_group cssdev_cm_attr_group = {
    964	.attrs = cssdev_cm_attrs,
    965	.is_visible = cm_enable_mode,
    966};
    967
    968static const struct attribute_group *cssdev_attr_groups[] = {
    969	&cssdev_attr_group,
    970	&cssdev_cm_attr_group,
    971	NULL,
    972};
    973
    974static int __init setup_css(int nr)
    975{
    976	struct channel_subsystem *css;
    977	int ret;
    978
    979	css = kzalloc(sizeof(*css), GFP_KERNEL);
    980	if (!css)
    981		return -ENOMEM;
    982
    983	channel_subsystems[nr] = css;
    984	dev_set_name(&css->device, "css%x", nr);
    985	css->device.groups = cssdev_attr_groups;
    986	css->device.release = channel_subsystem_release;
    987	/*
    988	 * We currently allocate notifier bits with this (using
    989	 * css->device as the device argument with the DMA API)
    990	 * and are fine with 64 bit addresses.
    991	 */
    992	ret = dma_coerce_mask_and_coherent(&css->device, DMA_BIT_MASK(64));
    993	if (ret) {
    994		kfree(css);
    995		goto out_err;
    996	}
    997
    998	mutex_init(&css->mutex);
    999	ret = chsc_get_cssid_iid(nr, &css->cssid, &css->iid);
   1000	if (!ret) {
   1001		css->id_valid = true;
   1002		pr_info("Partition identifier %01x.%01x\n", css->cssid,
   1003			css->iid);
   1004	}
   1005	css_generate_pgid(css, (u32) (get_tod_clock() >> 32));
   1006
   1007	ret = device_register(&css->device);
   1008	if (ret) {
   1009		put_device(&css->device);
   1010		goto out_err;
   1011	}
   1012
   1013	css->pseudo_subchannel = kzalloc(sizeof(*css->pseudo_subchannel),
   1014					 GFP_KERNEL);
   1015	if (!css->pseudo_subchannel) {
   1016		device_unregister(&css->device);
   1017		ret = -ENOMEM;
   1018		goto out_err;
   1019	}
   1020
   1021	css->pseudo_subchannel->dev.parent = &css->device;
   1022	css->pseudo_subchannel->dev.release = css_subchannel_release;
   1023	mutex_init(&css->pseudo_subchannel->reg_mutex);
   1024	ret = css_sch_create_locks(css->pseudo_subchannel);
   1025	if (ret) {
   1026		kfree(css->pseudo_subchannel);
   1027		device_unregister(&css->device);
   1028		goto out_err;
   1029	}
   1030
   1031	dev_set_name(&css->pseudo_subchannel->dev, "defunct");
   1032	ret = device_register(&css->pseudo_subchannel->dev);
   1033	if (ret) {
   1034		put_device(&css->pseudo_subchannel->dev);
   1035		device_unregister(&css->device);
   1036		goto out_err;
   1037	}
   1038
   1039	return ret;
   1040out_err:
   1041	channel_subsystems[nr] = NULL;
   1042	return ret;
   1043}
   1044
   1045static int css_reboot_event(struct notifier_block *this,
   1046			    unsigned long event,
   1047			    void *ptr)
   1048{
   1049	struct channel_subsystem *css;
   1050	int ret;
   1051
   1052	ret = NOTIFY_DONE;
   1053	for_each_css(css) {
   1054		mutex_lock(&css->mutex);
   1055		if (css->cm_enabled)
   1056			if (chsc_secm(css, 0))
   1057				ret = NOTIFY_BAD;
   1058		mutex_unlock(&css->mutex);
   1059	}
   1060
   1061	return ret;
   1062}
   1063
   1064static struct notifier_block css_reboot_notifier = {
   1065	.notifier_call = css_reboot_event,
   1066};
   1067
   1068#define  CIO_DMA_GFP (GFP_KERNEL | __GFP_ZERO)
   1069static struct gen_pool *cio_dma_pool;
   1070
   1071/* Currently cio supports only a single css */
   1072struct device *cio_get_dma_css_dev(void)
   1073{
   1074	return &channel_subsystems[0]->device;
   1075}
   1076
   1077struct gen_pool *cio_gp_dma_create(struct device *dma_dev, int nr_pages)
   1078{
   1079	struct gen_pool *gp_dma;
   1080	void *cpu_addr;
   1081	dma_addr_t dma_addr;
   1082	int i;
   1083
   1084	gp_dma = gen_pool_create(3, -1);
   1085	if (!gp_dma)
   1086		return NULL;
   1087	for (i = 0; i < nr_pages; ++i) {
   1088		cpu_addr = dma_alloc_coherent(dma_dev, PAGE_SIZE, &dma_addr,
   1089					      CIO_DMA_GFP);
   1090		if (!cpu_addr)
   1091			return gp_dma;
   1092		gen_pool_add_virt(gp_dma, (unsigned long) cpu_addr,
   1093				  dma_addr, PAGE_SIZE, -1);
   1094	}
   1095	return gp_dma;
   1096}
   1097
   1098static void __gp_dma_free_dma(struct gen_pool *pool,
   1099			      struct gen_pool_chunk *chunk, void *data)
   1100{
   1101	size_t chunk_size = chunk->end_addr - chunk->start_addr + 1;
   1102
   1103	dma_free_coherent((struct device *) data, chunk_size,
   1104			 (void *) chunk->start_addr,
   1105			 (dma_addr_t) chunk->phys_addr);
   1106}
   1107
   1108void cio_gp_dma_destroy(struct gen_pool *gp_dma, struct device *dma_dev)
   1109{
   1110	if (!gp_dma)
   1111		return;
   1112	/* this is quite ugly but no better idea */
   1113	gen_pool_for_each_chunk(gp_dma, __gp_dma_free_dma, dma_dev);
   1114	gen_pool_destroy(gp_dma);
   1115}
   1116
   1117static int cio_dma_pool_init(void)
   1118{
   1119	/* No need to free up the resources: compiled in */
   1120	cio_dma_pool = cio_gp_dma_create(cio_get_dma_css_dev(), 1);
   1121	if (!cio_dma_pool)
   1122		return -ENOMEM;
   1123	return 0;
   1124}
   1125
   1126void *cio_gp_dma_zalloc(struct gen_pool *gp_dma, struct device *dma_dev,
   1127			size_t size)
   1128{
   1129	dma_addr_t dma_addr;
   1130	unsigned long addr;
   1131	size_t chunk_size;
   1132
   1133	if (!gp_dma)
   1134		return NULL;
   1135	addr = gen_pool_alloc(gp_dma, size);
   1136	while (!addr) {
   1137		chunk_size = round_up(size, PAGE_SIZE);
   1138		addr = (unsigned long) dma_alloc_coherent(dma_dev,
   1139					 chunk_size, &dma_addr, CIO_DMA_GFP);
   1140		if (!addr)
   1141			return NULL;
   1142		gen_pool_add_virt(gp_dma, addr, dma_addr, chunk_size, -1);
   1143		addr = gen_pool_alloc(gp_dma, size);
   1144	}
   1145	return (void *) addr;
   1146}
   1147
   1148void cio_gp_dma_free(struct gen_pool *gp_dma, void *cpu_addr, size_t size)
   1149{
   1150	if (!cpu_addr)
   1151		return;
   1152	memset(cpu_addr, 0, size);
   1153	gen_pool_free(gp_dma, (unsigned long) cpu_addr, size);
   1154}
   1155
   1156/*
   1157 * Allocate dma memory from the css global pool. Intended for memory not
   1158 * specific to any single device within the css. The allocated memory
   1159 * is not guaranteed to be 31-bit addressable.
   1160 *
   1161 * Caution: Not suitable for early stuff like console.
   1162 */
   1163void *cio_dma_zalloc(size_t size)
   1164{
   1165	return cio_gp_dma_zalloc(cio_dma_pool, cio_get_dma_css_dev(), size);
   1166}
   1167
   1168void cio_dma_free(void *cpu_addr, size_t size)
   1169{
   1170	cio_gp_dma_free(cio_dma_pool, cpu_addr, size);
   1171}
   1172
   1173/*
   1174 * Now that the driver core is running, we can setup our channel subsystem.
   1175 * The struct subchannel's are created during probing.
   1176 */
   1177static int __init css_bus_init(void)
   1178{
   1179	int ret, i;
   1180
   1181	ret = chsc_init();
   1182	if (ret)
   1183		return ret;
   1184
   1185	chsc_determine_css_characteristics();
   1186	/* Try to enable MSS. */
   1187	ret = chsc_enable_facility(CHSC_SDA_OC_MSS);
   1188	if (ret)
   1189		max_ssid = 0;
   1190	else /* Success. */
   1191		max_ssid = __MAX_SSID;
   1192
   1193	ret = slow_subchannel_init();
   1194	if (ret)
   1195		goto out;
   1196
   1197	ret = crw_register_handler(CRW_RSC_SCH, css_process_crw);
   1198	if (ret)
   1199		goto out;
   1200
   1201	if ((ret = bus_register(&css_bus_type)))
   1202		goto out;
   1203
   1204	/* Setup css structure. */
   1205	for (i = 0; i <= MAX_CSS_IDX; i++) {
   1206		ret = setup_css(i);
   1207		if (ret)
   1208			goto out_unregister;
   1209	}
   1210	ret = register_reboot_notifier(&css_reboot_notifier);
   1211	if (ret)
   1212		goto out_unregister;
   1213	ret = cio_dma_pool_init();
   1214	if (ret)
   1215		goto out_unregister_rn;
   1216	airq_init();
   1217	css_init_done = 1;
   1218
   1219	/* Enable default isc for I/O subchannels. */
   1220	isc_register(IO_SCH_ISC);
   1221
   1222	return 0;
   1223out_unregister_rn:
   1224	unregister_reboot_notifier(&css_reboot_notifier);
   1225out_unregister:
   1226	while (i-- > 0) {
   1227		struct channel_subsystem *css = channel_subsystems[i];
   1228		device_unregister(&css->pseudo_subchannel->dev);
   1229		device_unregister(&css->device);
   1230	}
   1231	bus_unregister(&css_bus_type);
   1232out:
   1233	crw_unregister_handler(CRW_RSC_SCH);
   1234	idset_free(slow_subchannel_set);
   1235	chsc_init_cleanup();
   1236	pr_alert("The CSS device driver initialization failed with "
   1237		 "errno=%d\n", ret);
   1238	return ret;
   1239}
   1240
   1241static void __init css_bus_cleanup(void)
   1242{
   1243	struct channel_subsystem *css;
   1244
   1245	for_each_css(css) {
   1246		device_unregister(&css->pseudo_subchannel->dev);
   1247		device_unregister(&css->device);
   1248	}
   1249	bus_unregister(&css_bus_type);
   1250	crw_unregister_handler(CRW_RSC_SCH);
   1251	idset_free(slow_subchannel_set);
   1252	chsc_init_cleanup();
   1253	isc_unregister(IO_SCH_ISC);
   1254}
   1255
   1256static int __init channel_subsystem_init(void)
   1257{
   1258	int ret;
   1259
   1260	ret = css_bus_init();
   1261	if (ret)
   1262		return ret;
   1263	cio_work_q = create_singlethread_workqueue("cio");
   1264	if (!cio_work_q) {
   1265		ret = -ENOMEM;
   1266		goto out_bus;
   1267	}
   1268	ret = io_subchannel_init();
   1269	if (ret)
   1270		goto out_wq;
   1271
   1272	/* Register subchannels which are already in use. */
   1273	cio_register_early_subchannels();
   1274	/* Start initial subchannel evaluation. */
   1275	css_schedule_eval_all();
   1276
   1277	return ret;
   1278out_wq:
   1279	destroy_workqueue(cio_work_q);
   1280out_bus:
   1281	css_bus_cleanup();
   1282	return ret;
   1283}
   1284subsys_initcall(channel_subsystem_init);
   1285
   1286static int css_settle(struct device_driver *drv, void *unused)
   1287{
   1288	struct css_driver *cssdrv = to_cssdriver(drv);
   1289
   1290	if (cssdrv->settle)
   1291		return cssdrv->settle();
   1292	return 0;
   1293}
   1294
   1295int css_complete_work(void)
   1296{
   1297	int ret;
   1298
   1299	/* Wait for the evaluation of subchannels to finish. */
   1300	ret = wait_event_interruptible(css_eval_wq,
   1301				       atomic_read(&css_eval_scheduled) == 0);
   1302	if (ret)
   1303		return -EINTR;
   1304	flush_workqueue(cio_work_q);
   1305	/* Wait for the subchannel type specific initialization to finish */
   1306	return bus_for_each_drv(&css_bus_type, NULL, NULL, css_settle);
   1307}
   1308
   1309
   1310/*
   1311 * Wait for the initialization of devices to finish, to make sure we are
   1312 * done with our setup if the search for the root device starts.
   1313 */
   1314static int __init channel_subsystem_init_sync(void)
   1315{
   1316	css_complete_work();
   1317	return 0;
   1318}
   1319subsys_initcall_sync(channel_subsystem_init_sync);
   1320
   1321#ifdef CONFIG_PROC_FS
   1322static ssize_t cio_settle_write(struct file *file, const char __user *buf,
   1323				size_t count, loff_t *ppos)
   1324{
   1325	int ret;
   1326
   1327	/* Handle pending CRW's. */
   1328	crw_wait_for_channel_report();
   1329	ret = css_complete_work();
   1330
   1331	return ret ? ret : count;
   1332}
   1333
   1334static const struct proc_ops cio_settle_proc_ops = {
   1335	.proc_open	= nonseekable_open,
   1336	.proc_write	= cio_settle_write,
   1337	.proc_lseek	= no_llseek,
   1338};
   1339
   1340static int __init cio_settle_init(void)
   1341{
   1342	struct proc_dir_entry *entry;
   1343
   1344	entry = proc_create("cio_settle", S_IWUSR, NULL, &cio_settle_proc_ops);
   1345	if (!entry)
   1346		return -ENOMEM;
   1347	return 0;
   1348}
   1349device_initcall(cio_settle_init);
   1350#endif /*CONFIG_PROC_FS*/
   1351
   1352int sch_is_pseudo_sch(struct subchannel *sch)
   1353{
   1354	if (!sch->dev.parent)
   1355		return 0;
   1356	return sch == to_css(sch->dev.parent)->pseudo_subchannel;
   1357}
   1358
   1359static int css_bus_match(struct device *dev, struct device_driver *drv)
   1360{
   1361	struct subchannel *sch = to_subchannel(dev);
   1362	struct css_driver *driver = to_cssdriver(drv);
   1363	struct css_device_id *id;
   1364
   1365	/* When driver_override is set, only bind to the matching driver */
   1366	if (sch->driver_override && strcmp(sch->driver_override, drv->name))
   1367		return 0;
   1368
   1369	for (id = driver->subchannel_type; id->match_flags; id++) {
   1370		if (sch->st == id->type)
   1371			return 1;
   1372	}
   1373
   1374	return 0;
   1375}
   1376
   1377static int css_probe(struct device *dev)
   1378{
   1379	struct subchannel *sch;
   1380	int ret;
   1381
   1382	sch = to_subchannel(dev);
   1383	sch->driver = to_cssdriver(dev->driver);
   1384	ret = sch->driver->probe ? sch->driver->probe(sch) : 0;
   1385	if (ret)
   1386		sch->driver = NULL;
   1387	return ret;
   1388}
   1389
   1390static void css_remove(struct device *dev)
   1391{
   1392	struct subchannel *sch;
   1393
   1394	sch = to_subchannel(dev);
   1395	if (sch->driver->remove)
   1396		sch->driver->remove(sch);
   1397	sch->driver = NULL;
   1398}
   1399
   1400static void css_shutdown(struct device *dev)
   1401{
   1402	struct subchannel *sch;
   1403
   1404	sch = to_subchannel(dev);
   1405	if (sch->driver && sch->driver->shutdown)
   1406		sch->driver->shutdown(sch);
   1407}
   1408
   1409static int css_uevent(struct device *dev, struct kobj_uevent_env *env)
   1410{
   1411	struct subchannel *sch = to_subchannel(dev);
   1412	int ret;
   1413
   1414	ret = add_uevent_var(env, "ST=%01X", sch->st);
   1415	if (ret)
   1416		return ret;
   1417	ret = add_uevent_var(env, "MODALIAS=css:t%01X", sch->st);
   1418	return ret;
   1419}
   1420
   1421static struct bus_type css_bus_type = {
   1422	.name     = "css",
   1423	.match    = css_bus_match,
   1424	.probe    = css_probe,
   1425	.remove   = css_remove,
   1426	.shutdown = css_shutdown,
   1427	.uevent   = css_uevent,
   1428};
   1429
   1430/**
   1431 * css_driver_register - register a css driver
   1432 * @cdrv: css driver to register
   1433 *
   1434 * This is mainly a wrapper around driver_register that sets name
   1435 * and bus_type in the embedded struct device_driver correctly.
   1436 */
   1437int css_driver_register(struct css_driver *cdrv)
   1438{
   1439	cdrv->drv.bus = &css_bus_type;
   1440	return driver_register(&cdrv->drv);
   1441}
   1442EXPORT_SYMBOL_GPL(css_driver_register);
   1443
   1444/**
   1445 * css_driver_unregister - unregister a css driver
   1446 * @cdrv: css driver to unregister
   1447 *
   1448 * This is a wrapper around driver_unregister.
   1449 */
   1450void css_driver_unregister(struct css_driver *cdrv)
   1451{
   1452	driver_unregister(&cdrv->drv);
   1453}
   1454EXPORT_SYMBOL_GPL(css_driver_unregister);