cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

bus.c (31945B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/*
      3 * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
      4 */
      5#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
      6#include <linux/libnvdimm.h>
      7#include <linux/sched/mm.h>
      8#include <linux/vmalloc.h>
      9#include <linux/uaccess.h>
     10#include <linux/module.h>
     11#include <linux/blkdev.h>
     12#include <linux/fcntl.h>
     13#include <linux/async.h>
     14#include <linux/ndctl.h>
     15#include <linux/sched.h>
     16#include <linux/slab.h>
     17#include <linux/cpu.h>
     18#include <linux/fs.h>
     19#include <linux/io.h>
     20#include <linux/mm.h>
     21#include <linux/nd.h>
     22#include "nd-core.h"
     23#include "nd.h"
     24#include "pfn.h"
     25
     26int nvdimm_major;
     27static int nvdimm_bus_major;
     28struct class *nd_class;
     29static DEFINE_IDA(nd_ida);
     30
     31static int to_nd_device_type(struct device *dev)
     32{
     33	if (is_nvdimm(dev))
     34		return ND_DEVICE_DIMM;
     35	else if (is_memory(dev))
     36		return ND_DEVICE_REGION_PMEM;
     37	else if (is_nd_dax(dev))
     38		return ND_DEVICE_DAX_PMEM;
     39	else if (is_nd_region(dev->parent))
     40		return nd_region_to_nstype(to_nd_region(dev->parent));
     41
     42	return 0;
     43}
     44
     45static int nvdimm_bus_uevent(struct device *dev, struct kobj_uevent_env *env)
     46{
     47	return add_uevent_var(env, "MODALIAS=" ND_DEVICE_MODALIAS_FMT,
     48			to_nd_device_type(dev));
     49}
     50
     51static struct module *to_bus_provider(struct device *dev)
     52{
     53	/* pin bus providers while regions are enabled */
     54	if (is_nd_region(dev)) {
     55		struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
     56
     57		return nvdimm_bus->nd_desc->module;
     58	}
     59	return NULL;
     60}
     61
     62static void nvdimm_bus_probe_start(struct nvdimm_bus *nvdimm_bus)
     63{
     64	nvdimm_bus_lock(&nvdimm_bus->dev);
     65	nvdimm_bus->probe_active++;
     66	nvdimm_bus_unlock(&nvdimm_bus->dev);
     67}
     68
     69static void nvdimm_bus_probe_end(struct nvdimm_bus *nvdimm_bus)
     70{
     71	nvdimm_bus_lock(&nvdimm_bus->dev);
     72	if (--nvdimm_bus->probe_active == 0)
     73		wake_up(&nvdimm_bus->wait);
     74	nvdimm_bus_unlock(&nvdimm_bus->dev);
     75}
     76
     77static int nvdimm_bus_probe(struct device *dev)
     78{
     79	struct nd_device_driver *nd_drv = to_nd_device_driver(dev->driver);
     80	struct module *provider = to_bus_provider(dev);
     81	struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
     82	int rc;
     83
     84	if (!try_module_get(provider))
     85		return -ENXIO;
     86
     87	dev_dbg(&nvdimm_bus->dev, "START: %s.probe(%s)\n",
     88			dev->driver->name, dev_name(dev));
     89
     90	nvdimm_bus_probe_start(nvdimm_bus);
     91	rc = nd_drv->probe(dev);
     92	if ((rc == 0 || rc == -EOPNOTSUPP) &&
     93			dev->parent && is_nd_region(dev->parent))
     94		nd_region_advance_seeds(to_nd_region(dev->parent), dev);
     95	nvdimm_bus_probe_end(nvdimm_bus);
     96
     97	dev_dbg(&nvdimm_bus->dev, "END: %s.probe(%s) = %d\n", dev->driver->name,
     98			dev_name(dev), rc);
     99
    100	if (rc != 0)
    101		module_put(provider);
    102	return rc;
    103}
    104
    105static void nvdimm_bus_remove(struct device *dev)
    106{
    107	struct nd_device_driver *nd_drv = to_nd_device_driver(dev->driver);
    108	struct module *provider = to_bus_provider(dev);
    109	struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
    110
    111	if (nd_drv->remove)
    112		nd_drv->remove(dev);
    113
    114	dev_dbg(&nvdimm_bus->dev, "%s.remove(%s)\n", dev->driver->name,
    115			dev_name(dev));
    116	module_put(provider);
    117}
    118
    119static void nvdimm_bus_shutdown(struct device *dev)
    120{
    121	struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
    122	struct nd_device_driver *nd_drv = NULL;
    123
    124	if (dev->driver)
    125		nd_drv = to_nd_device_driver(dev->driver);
    126
    127	if (nd_drv && nd_drv->shutdown) {
    128		nd_drv->shutdown(dev);
    129		dev_dbg(&nvdimm_bus->dev, "%s.shutdown(%s)\n",
    130				dev->driver->name, dev_name(dev));
    131	}
    132}
    133
    134void nd_device_notify(struct device *dev, enum nvdimm_event event)
    135{
    136	device_lock(dev);
    137	if (dev->driver) {
    138		struct nd_device_driver *nd_drv;
    139
    140		nd_drv = to_nd_device_driver(dev->driver);
    141		if (nd_drv->notify)
    142			nd_drv->notify(dev, event);
    143	}
    144	device_unlock(dev);
    145}
    146EXPORT_SYMBOL(nd_device_notify);
    147
    148void nvdimm_region_notify(struct nd_region *nd_region, enum nvdimm_event event)
    149{
    150	struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(&nd_region->dev);
    151
    152	if (!nvdimm_bus)
    153		return;
    154
    155	/* caller is responsible for holding a reference on the device */
    156	nd_device_notify(&nd_region->dev, event);
    157}
    158EXPORT_SYMBOL_GPL(nvdimm_region_notify);
    159
    160struct clear_badblocks_context {
    161	resource_size_t phys, cleared;
    162};
    163
    164static int nvdimm_clear_badblocks_region(struct device *dev, void *data)
    165{
    166	struct clear_badblocks_context *ctx = data;
    167	struct nd_region *nd_region;
    168	resource_size_t ndr_end;
    169	sector_t sector;
    170
    171	/* make sure device is a region */
    172	if (!is_memory(dev))
    173		return 0;
    174
    175	nd_region = to_nd_region(dev);
    176	ndr_end = nd_region->ndr_start + nd_region->ndr_size - 1;
    177
    178	/* make sure we are in the region */
    179	if (ctx->phys < nd_region->ndr_start ||
    180	    (ctx->phys + ctx->cleared - 1) > ndr_end)
    181		return 0;
    182
    183	sector = (ctx->phys - nd_region->ndr_start) / 512;
    184	badblocks_clear(&nd_region->bb, sector, ctx->cleared / 512);
    185
    186	if (nd_region->bb_state)
    187		sysfs_notify_dirent(nd_region->bb_state);
    188
    189	return 0;
    190}
    191
    192static void nvdimm_clear_badblocks_regions(struct nvdimm_bus *nvdimm_bus,
    193		phys_addr_t phys, u64 cleared)
    194{
    195	struct clear_badblocks_context ctx = {
    196		.phys = phys,
    197		.cleared = cleared,
    198	};
    199
    200	device_for_each_child(&nvdimm_bus->dev, &ctx,
    201			nvdimm_clear_badblocks_region);
    202}
    203
    204static void nvdimm_account_cleared_poison(struct nvdimm_bus *nvdimm_bus,
    205		phys_addr_t phys, u64 cleared)
    206{
    207	if (cleared > 0)
    208		badrange_forget(&nvdimm_bus->badrange, phys, cleared);
    209
    210	if (cleared > 0 && cleared / 512)
    211		nvdimm_clear_badblocks_regions(nvdimm_bus, phys, cleared);
    212}
    213
    214long nvdimm_clear_poison(struct device *dev, phys_addr_t phys,
    215		unsigned int len)
    216{
    217	struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
    218	struct nvdimm_bus_descriptor *nd_desc;
    219	struct nd_cmd_clear_error clear_err;
    220	struct nd_cmd_ars_cap ars_cap;
    221	u32 clear_err_unit, mask;
    222	unsigned int noio_flag;
    223	int cmd_rc, rc;
    224
    225	if (!nvdimm_bus)
    226		return -ENXIO;
    227
    228	nd_desc = nvdimm_bus->nd_desc;
    229	/*
    230	 * if ndctl does not exist, it's PMEM_LEGACY and
    231	 * we want to just pretend everything is handled.
    232	 */
    233	if (!nd_desc->ndctl)
    234		return len;
    235
    236	memset(&ars_cap, 0, sizeof(ars_cap));
    237	ars_cap.address = phys;
    238	ars_cap.length = len;
    239	noio_flag = memalloc_noio_save();
    240	rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_CAP, &ars_cap,
    241			sizeof(ars_cap), &cmd_rc);
    242	memalloc_noio_restore(noio_flag);
    243	if (rc < 0)
    244		return rc;
    245	if (cmd_rc < 0)
    246		return cmd_rc;
    247	clear_err_unit = ars_cap.clear_err_unit;
    248	if (!clear_err_unit || !is_power_of_2(clear_err_unit))
    249		return -ENXIO;
    250
    251	mask = clear_err_unit - 1;
    252	if ((phys | len) & mask)
    253		return -ENXIO;
    254	memset(&clear_err, 0, sizeof(clear_err));
    255	clear_err.address = phys;
    256	clear_err.length = len;
    257	noio_flag = memalloc_noio_save();
    258	rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_CLEAR_ERROR, &clear_err,
    259			sizeof(clear_err), &cmd_rc);
    260	memalloc_noio_restore(noio_flag);
    261	if (rc < 0)
    262		return rc;
    263	if (cmd_rc < 0)
    264		return cmd_rc;
    265
    266	nvdimm_account_cleared_poison(nvdimm_bus, phys, clear_err.cleared);
    267
    268	return clear_err.cleared;
    269}
    270EXPORT_SYMBOL_GPL(nvdimm_clear_poison);
    271
    272static int nvdimm_bus_match(struct device *dev, struct device_driver *drv);
    273
    274static struct bus_type nvdimm_bus_type = {
    275	.name = "nd",
    276	.uevent = nvdimm_bus_uevent,
    277	.match = nvdimm_bus_match,
    278	.probe = nvdimm_bus_probe,
    279	.remove = nvdimm_bus_remove,
    280	.shutdown = nvdimm_bus_shutdown,
    281};
    282
    283static void nvdimm_bus_release(struct device *dev)
    284{
    285	struct nvdimm_bus *nvdimm_bus;
    286
    287	nvdimm_bus = container_of(dev, struct nvdimm_bus, dev);
    288	ida_simple_remove(&nd_ida, nvdimm_bus->id);
    289	kfree(nvdimm_bus);
    290}
    291
    292static const struct device_type nvdimm_bus_dev_type = {
    293	.release = nvdimm_bus_release,
    294	.groups = nvdimm_bus_attribute_groups,
    295};
    296
    297bool is_nvdimm_bus(struct device *dev)
    298{
    299	return dev->type == &nvdimm_bus_dev_type;
    300}
    301
    302struct nvdimm_bus *walk_to_nvdimm_bus(struct device *nd_dev)
    303{
    304	struct device *dev;
    305
    306	for (dev = nd_dev; dev; dev = dev->parent)
    307		if (is_nvdimm_bus(dev))
    308			break;
    309	dev_WARN_ONCE(nd_dev, !dev, "invalid dev, not on nd bus\n");
    310	if (dev)
    311		return to_nvdimm_bus(dev);
    312	return NULL;
    313}
    314
    315struct nvdimm_bus *to_nvdimm_bus(struct device *dev)
    316{
    317	struct nvdimm_bus *nvdimm_bus;
    318
    319	nvdimm_bus = container_of(dev, struct nvdimm_bus, dev);
    320	WARN_ON(!is_nvdimm_bus(dev));
    321	return nvdimm_bus;
    322}
    323EXPORT_SYMBOL_GPL(to_nvdimm_bus);
    324
    325struct nvdimm_bus *nvdimm_to_bus(struct nvdimm *nvdimm)
    326{
    327	return to_nvdimm_bus(nvdimm->dev.parent);
    328}
    329EXPORT_SYMBOL_GPL(nvdimm_to_bus);
    330
    331static struct lock_class_key nvdimm_bus_key;
    332
    333struct nvdimm_bus *nvdimm_bus_register(struct device *parent,
    334		struct nvdimm_bus_descriptor *nd_desc)
    335{
    336	struct nvdimm_bus *nvdimm_bus;
    337	int rc;
    338
    339	nvdimm_bus = kzalloc(sizeof(*nvdimm_bus), GFP_KERNEL);
    340	if (!nvdimm_bus)
    341		return NULL;
    342	INIT_LIST_HEAD(&nvdimm_bus->list);
    343	INIT_LIST_HEAD(&nvdimm_bus->mapping_list);
    344	init_waitqueue_head(&nvdimm_bus->wait);
    345	nvdimm_bus->id = ida_simple_get(&nd_ida, 0, 0, GFP_KERNEL);
    346	if (nvdimm_bus->id < 0) {
    347		kfree(nvdimm_bus);
    348		return NULL;
    349	}
    350	mutex_init(&nvdimm_bus->reconfig_mutex);
    351	badrange_init(&nvdimm_bus->badrange);
    352	nvdimm_bus->nd_desc = nd_desc;
    353	nvdimm_bus->dev.parent = parent;
    354	nvdimm_bus->dev.type = &nvdimm_bus_dev_type;
    355	nvdimm_bus->dev.groups = nd_desc->attr_groups;
    356	nvdimm_bus->dev.bus = &nvdimm_bus_type;
    357	nvdimm_bus->dev.of_node = nd_desc->of_node;
    358	device_initialize(&nvdimm_bus->dev);
    359	lockdep_set_class(&nvdimm_bus->dev.mutex, &nvdimm_bus_key);
    360	device_set_pm_not_required(&nvdimm_bus->dev);
    361	rc = dev_set_name(&nvdimm_bus->dev, "ndbus%d", nvdimm_bus->id);
    362	if (rc)
    363		goto err;
    364
    365	rc = device_add(&nvdimm_bus->dev);
    366	if (rc) {
    367		dev_dbg(&nvdimm_bus->dev, "registration failed: %d\n", rc);
    368		goto err;
    369	}
    370
    371	return nvdimm_bus;
    372 err:
    373	put_device(&nvdimm_bus->dev);
    374	return NULL;
    375}
    376EXPORT_SYMBOL_GPL(nvdimm_bus_register);
    377
    378void nvdimm_bus_unregister(struct nvdimm_bus *nvdimm_bus)
    379{
    380	if (!nvdimm_bus)
    381		return;
    382	device_unregister(&nvdimm_bus->dev);
    383}
    384EXPORT_SYMBOL_GPL(nvdimm_bus_unregister);
    385
    386static int child_unregister(struct device *dev, void *data)
    387{
    388	/*
    389	 * the singular ndctl class device per bus needs to be
    390	 * "device_destroy"ed, so skip it here
    391	 *
    392	 * i.e. remove classless children
    393	 */
    394	if (dev->class)
    395		return 0;
    396
    397	if (is_nvdimm(dev))
    398		nvdimm_delete(to_nvdimm(dev));
    399	else
    400		nd_device_unregister(dev, ND_SYNC);
    401
    402	return 0;
    403}
    404
    405static void free_badrange_list(struct list_head *badrange_list)
    406{
    407	struct badrange_entry *bre, *next;
    408
    409	list_for_each_entry_safe(bre, next, badrange_list, list) {
    410		list_del(&bre->list);
    411		kfree(bre);
    412	}
    413	list_del_init(badrange_list);
    414}
    415
    416static void nd_bus_remove(struct device *dev)
    417{
    418	struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev);
    419
    420	mutex_lock(&nvdimm_bus_list_mutex);
    421	list_del_init(&nvdimm_bus->list);
    422	mutex_unlock(&nvdimm_bus_list_mutex);
    423
    424	wait_event(nvdimm_bus->wait,
    425			atomic_read(&nvdimm_bus->ioctl_active) == 0);
    426
    427	nd_synchronize();
    428	device_for_each_child(&nvdimm_bus->dev, NULL, child_unregister);
    429
    430	spin_lock(&nvdimm_bus->badrange.lock);
    431	free_badrange_list(&nvdimm_bus->badrange.list);
    432	spin_unlock(&nvdimm_bus->badrange.lock);
    433
    434	nvdimm_bus_destroy_ndctl(nvdimm_bus);
    435}
    436
    437static int nd_bus_probe(struct device *dev)
    438{
    439	struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev);
    440	int rc;
    441
    442	rc = nvdimm_bus_create_ndctl(nvdimm_bus);
    443	if (rc)
    444		return rc;
    445
    446	mutex_lock(&nvdimm_bus_list_mutex);
    447	list_add_tail(&nvdimm_bus->list, &nvdimm_bus_list);
    448	mutex_unlock(&nvdimm_bus_list_mutex);
    449
    450	/* enable bus provider attributes to look up their local context */
    451	dev_set_drvdata(dev, nvdimm_bus->nd_desc);
    452
    453	return 0;
    454}
    455
    456static struct nd_device_driver nd_bus_driver = {
    457	.probe = nd_bus_probe,
    458	.remove = nd_bus_remove,
    459	.drv = {
    460		.name = "nd_bus",
    461		.suppress_bind_attrs = true,
    462		.bus = &nvdimm_bus_type,
    463		.owner = THIS_MODULE,
    464		.mod_name = KBUILD_MODNAME,
    465	},
    466};
    467
    468static int nvdimm_bus_match(struct device *dev, struct device_driver *drv)
    469{
    470	struct nd_device_driver *nd_drv = to_nd_device_driver(drv);
    471
    472	if (is_nvdimm_bus(dev) && nd_drv == &nd_bus_driver)
    473		return true;
    474
    475	return !!test_bit(to_nd_device_type(dev), &nd_drv->type);
    476}
    477
    478static ASYNC_DOMAIN_EXCLUSIVE(nd_async_domain);
    479
    480void nd_synchronize(void)
    481{
    482	async_synchronize_full_domain(&nd_async_domain);
    483}
    484EXPORT_SYMBOL_GPL(nd_synchronize);
    485
    486static void nd_async_device_register(void *d, async_cookie_t cookie)
    487{
    488	struct device *dev = d;
    489
    490	if (device_add(dev) != 0) {
    491		dev_err(dev, "%s: failed\n", __func__);
    492		put_device(dev);
    493	}
    494	put_device(dev);
    495	if (dev->parent)
    496		put_device(dev->parent);
    497}
    498
    499static void nd_async_device_unregister(void *d, async_cookie_t cookie)
    500{
    501	struct device *dev = d;
    502
    503	/* flush bus operations before delete */
    504	nvdimm_bus_lock(dev);
    505	nvdimm_bus_unlock(dev);
    506
    507	device_unregister(dev);
    508	put_device(dev);
    509}
    510
    511void nd_device_register(struct device *dev)
    512{
    513	if (!dev)
    514		return;
    515
    516	/*
    517	 * Ensure that region devices always have their NUMA node set as
    518	 * early as possible. This way we are able to make certain that
    519	 * any memory associated with the creation and the creation
    520	 * itself of the region is associated with the correct node.
    521	 */
    522	if (is_nd_region(dev))
    523		set_dev_node(dev, to_nd_region(dev)->numa_node);
    524
    525	dev->bus = &nvdimm_bus_type;
    526	device_set_pm_not_required(dev);
    527	if (dev->parent) {
    528		get_device(dev->parent);
    529		if (dev_to_node(dev) == NUMA_NO_NODE)
    530			set_dev_node(dev, dev_to_node(dev->parent));
    531	}
    532	get_device(dev);
    533
    534	async_schedule_dev_domain(nd_async_device_register, dev,
    535				  &nd_async_domain);
    536}
    537EXPORT_SYMBOL(nd_device_register);
    538
    539void nd_device_unregister(struct device *dev, enum nd_async_mode mode)
    540{
    541	bool killed;
    542
    543	switch (mode) {
    544	case ND_ASYNC:
    545		/*
    546		 * In the async case this is being triggered with the
    547		 * device lock held and the unregistration work needs to
    548		 * be moved out of line iff this is thread has won the
    549		 * race to schedule the deletion.
    550		 */
    551		if (!kill_device(dev))
    552			return;
    553
    554		get_device(dev);
    555		async_schedule_domain(nd_async_device_unregister, dev,
    556				&nd_async_domain);
    557		break;
    558	case ND_SYNC:
    559		/*
    560		 * In the sync case the device is being unregistered due
    561		 * to a state change of the parent. Claim the kill state
    562		 * to synchronize against other unregistration requests,
    563		 * or otherwise let the async path handle it if the
    564		 * unregistration was already queued.
    565		 */
    566		device_lock(dev);
    567		killed = kill_device(dev);
    568		device_unlock(dev);
    569
    570		if (!killed)
    571			return;
    572
    573		nd_synchronize();
    574		device_unregister(dev);
    575		break;
    576	}
    577}
    578EXPORT_SYMBOL(nd_device_unregister);
    579
    580/**
    581 * __nd_driver_register() - register a region or a namespace driver
    582 * @nd_drv: driver to register
    583 * @owner: automatically set by nd_driver_register() macro
    584 * @mod_name: automatically set by nd_driver_register() macro
    585 */
    586int __nd_driver_register(struct nd_device_driver *nd_drv, struct module *owner,
    587		const char *mod_name)
    588{
    589	struct device_driver *drv = &nd_drv->drv;
    590
    591	if (!nd_drv->type) {
    592		pr_debug("driver type bitmask not set (%ps)\n",
    593				__builtin_return_address(0));
    594		return -EINVAL;
    595	}
    596
    597	if (!nd_drv->probe) {
    598		pr_debug("%s ->probe() must be specified\n", mod_name);
    599		return -EINVAL;
    600	}
    601
    602	drv->bus = &nvdimm_bus_type;
    603	drv->owner = owner;
    604	drv->mod_name = mod_name;
    605
    606	return driver_register(drv);
    607}
    608EXPORT_SYMBOL(__nd_driver_register);
    609
    610void nvdimm_check_and_set_ro(struct gendisk *disk)
    611{
    612	struct device *dev = disk_to_dev(disk)->parent;
    613	struct nd_region *nd_region = to_nd_region(dev->parent);
    614	int disk_ro = get_disk_ro(disk);
    615
    616	/* catch the disk up with the region ro state */
    617	if (disk_ro == nd_region->ro)
    618		return;
    619
    620	dev_info(dev, "%s read-%s, marking %s read-%s\n",
    621		 dev_name(&nd_region->dev), nd_region->ro ? "only" : "write",
    622		 disk->disk_name, nd_region->ro ? "only" : "write");
    623	set_disk_ro(disk, nd_region->ro);
    624}
    625EXPORT_SYMBOL(nvdimm_check_and_set_ro);
    626
    627static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
    628		char *buf)
    629{
    630	return sprintf(buf, ND_DEVICE_MODALIAS_FMT "\n",
    631			to_nd_device_type(dev));
    632}
    633static DEVICE_ATTR_RO(modalias);
    634
    635static ssize_t devtype_show(struct device *dev, struct device_attribute *attr,
    636		char *buf)
    637{
    638	return sprintf(buf, "%s\n", dev->type->name);
    639}
    640static DEVICE_ATTR_RO(devtype);
    641
    642static struct attribute *nd_device_attributes[] = {
    643	&dev_attr_modalias.attr,
    644	&dev_attr_devtype.attr,
    645	NULL,
    646};
    647
    648/*
    649 * nd_device_attribute_group - generic attributes for all devices on an nd bus
    650 */
    651const struct attribute_group nd_device_attribute_group = {
    652	.attrs = nd_device_attributes,
    653};
    654
    655static ssize_t numa_node_show(struct device *dev,
    656		struct device_attribute *attr, char *buf)
    657{
    658	return sprintf(buf, "%d\n", dev_to_node(dev));
    659}
    660static DEVICE_ATTR_RO(numa_node);
    661
    662static int nvdimm_dev_to_target_node(struct device *dev)
    663{
    664	struct device *parent = dev->parent;
    665	struct nd_region *nd_region = NULL;
    666
    667	if (is_nd_region(dev))
    668		nd_region = to_nd_region(dev);
    669	else if (parent && is_nd_region(parent))
    670		nd_region = to_nd_region(parent);
    671
    672	if (!nd_region)
    673		return NUMA_NO_NODE;
    674	return nd_region->target_node;
    675}
    676
    677static ssize_t target_node_show(struct device *dev,
    678		struct device_attribute *attr, char *buf)
    679{
    680	return sprintf(buf, "%d\n", nvdimm_dev_to_target_node(dev));
    681}
    682static DEVICE_ATTR_RO(target_node);
    683
    684static struct attribute *nd_numa_attributes[] = {
    685	&dev_attr_numa_node.attr,
    686	&dev_attr_target_node.attr,
    687	NULL,
    688};
    689
    690static umode_t nd_numa_attr_visible(struct kobject *kobj, struct attribute *a,
    691		int n)
    692{
    693	struct device *dev = container_of(kobj, typeof(*dev), kobj);
    694
    695	if (!IS_ENABLED(CONFIG_NUMA))
    696		return 0;
    697
    698	if (a == &dev_attr_target_node.attr &&
    699			nvdimm_dev_to_target_node(dev) == NUMA_NO_NODE)
    700		return 0;
    701
    702	return a->mode;
    703}
    704
    705/*
    706 * nd_numa_attribute_group - NUMA attributes for all devices on an nd bus
    707 */
    708const struct attribute_group nd_numa_attribute_group = {
    709	.attrs = nd_numa_attributes,
    710	.is_visible = nd_numa_attr_visible,
    711};
    712
    713static void ndctl_release(struct device *dev)
    714{
    715	kfree(dev);
    716}
    717
    718static struct lock_class_key nvdimm_ndctl_key;
    719
    720int nvdimm_bus_create_ndctl(struct nvdimm_bus *nvdimm_bus)
    721{
    722	dev_t devt = MKDEV(nvdimm_bus_major, nvdimm_bus->id);
    723	struct device *dev;
    724	int rc;
    725
    726	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
    727	if (!dev)
    728		return -ENOMEM;
    729	device_initialize(dev);
    730	lockdep_set_class(&dev->mutex, &nvdimm_ndctl_key);
    731	device_set_pm_not_required(dev);
    732	dev->class = nd_class;
    733	dev->parent = &nvdimm_bus->dev;
    734	dev->devt = devt;
    735	dev->release = ndctl_release;
    736	rc = dev_set_name(dev, "ndctl%d", nvdimm_bus->id);
    737	if (rc)
    738		goto err;
    739
    740	rc = device_add(dev);
    741	if (rc) {
    742		dev_dbg(&nvdimm_bus->dev, "failed to register ndctl%d: %d\n",
    743				nvdimm_bus->id, rc);
    744		goto err;
    745	}
    746	return 0;
    747
    748err:
    749	put_device(dev);
    750	return rc;
    751}
    752
    753void nvdimm_bus_destroy_ndctl(struct nvdimm_bus *nvdimm_bus)
    754{
    755	device_destroy(nd_class, MKDEV(nvdimm_bus_major, nvdimm_bus->id));
    756}
    757
    758static const struct nd_cmd_desc __nd_cmd_dimm_descs[] = {
    759	[ND_CMD_IMPLEMENTED] = { },
    760	[ND_CMD_SMART] = {
    761		.out_num = 2,
    762		.out_sizes = { 4, 128, },
    763	},
    764	[ND_CMD_SMART_THRESHOLD] = {
    765		.out_num = 2,
    766		.out_sizes = { 4, 8, },
    767	},
    768	[ND_CMD_DIMM_FLAGS] = {
    769		.out_num = 2,
    770		.out_sizes = { 4, 4 },
    771	},
    772	[ND_CMD_GET_CONFIG_SIZE] = {
    773		.out_num = 3,
    774		.out_sizes = { 4, 4, 4, },
    775	},
    776	[ND_CMD_GET_CONFIG_DATA] = {
    777		.in_num = 2,
    778		.in_sizes = { 4, 4, },
    779		.out_num = 2,
    780		.out_sizes = { 4, UINT_MAX, },
    781	},
    782	[ND_CMD_SET_CONFIG_DATA] = {
    783		.in_num = 3,
    784		.in_sizes = { 4, 4, UINT_MAX, },
    785		.out_num = 1,
    786		.out_sizes = { 4, },
    787	},
    788	[ND_CMD_VENDOR] = {
    789		.in_num = 3,
    790		.in_sizes = { 4, 4, UINT_MAX, },
    791		.out_num = 3,
    792		.out_sizes = { 4, 4, UINT_MAX, },
    793	},
    794	[ND_CMD_CALL] = {
    795		.in_num = 2,
    796		.in_sizes = { sizeof(struct nd_cmd_pkg), UINT_MAX, },
    797		.out_num = 1,
    798		.out_sizes = { UINT_MAX, },
    799	},
    800};
    801
    802const struct nd_cmd_desc *nd_cmd_dimm_desc(int cmd)
    803{
    804	if (cmd < ARRAY_SIZE(__nd_cmd_dimm_descs))
    805		return &__nd_cmd_dimm_descs[cmd];
    806	return NULL;
    807}
    808EXPORT_SYMBOL_GPL(nd_cmd_dimm_desc);
    809
    810static const struct nd_cmd_desc __nd_cmd_bus_descs[] = {
    811	[ND_CMD_IMPLEMENTED] = { },
    812	[ND_CMD_ARS_CAP] = {
    813		.in_num = 2,
    814		.in_sizes = { 8, 8, },
    815		.out_num = 4,
    816		.out_sizes = { 4, 4, 4, 4, },
    817	},
    818	[ND_CMD_ARS_START] = {
    819		.in_num = 5,
    820		.in_sizes = { 8, 8, 2, 1, 5, },
    821		.out_num = 2,
    822		.out_sizes = { 4, 4, },
    823	},
    824	[ND_CMD_ARS_STATUS] = {
    825		.out_num = 3,
    826		.out_sizes = { 4, 4, UINT_MAX, },
    827	},
    828	[ND_CMD_CLEAR_ERROR] = {
    829		.in_num = 2,
    830		.in_sizes = { 8, 8, },
    831		.out_num = 3,
    832		.out_sizes = { 4, 4, 8, },
    833	},
    834	[ND_CMD_CALL] = {
    835		.in_num = 2,
    836		.in_sizes = { sizeof(struct nd_cmd_pkg), UINT_MAX, },
    837		.out_num = 1,
    838		.out_sizes = { UINT_MAX, },
    839	},
    840};
    841
    842const struct nd_cmd_desc *nd_cmd_bus_desc(int cmd)
    843{
    844	if (cmd < ARRAY_SIZE(__nd_cmd_bus_descs))
    845		return &__nd_cmd_bus_descs[cmd];
    846	return NULL;
    847}
    848EXPORT_SYMBOL_GPL(nd_cmd_bus_desc);
    849
    850u32 nd_cmd_in_size(struct nvdimm *nvdimm, int cmd,
    851		const struct nd_cmd_desc *desc, int idx, void *buf)
    852{
    853	if (idx >= desc->in_num)
    854		return UINT_MAX;
    855
    856	if (desc->in_sizes[idx] < UINT_MAX)
    857		return desc->in_sizes[idx];
    858
    859	if (nvdimm && cmd == ND_CMD_SET_CONFIG_DATA && idx == 2) {
    860		struct nd_cmd_set_config_hdr *hdr = buf;
    861
    862		return hdr->in_length;
    863	} else if (nvdimm && cmd == ND_CMD_VENDOR && idx == 2) {
    864		struct nd_cmd_vendor_hdr *hdr = buf;
    865
    866		return hdr->in_length;
    867	} else if (cmd == ND_CMD_CALL) {
    868		struct nd_cmd_pkg *pkg = buf;
    869
    870		return pkg->nd_size_in;
    871	}
    872
    873	return UINT_MAX;
    874}
    875EXPORT_SYMBOL_GPL(nd_cmd_in_size);
    876
    877u32 nd_cmd_out_size(struct nvdimm *nvdimm, int cmd,
    878		const struct nd_cmd_desc *desc, int idx, const u32 *in_field,
    879		const u32 *out_field, unsigned long remainder)
    880{
    881	if (idx >= desc->out_num)
    882		return UINT_MAX;
    883
    884	if (desc->out_sizes[idx] < UINT_MAX)
    885		return desc->out_sizes[idx];
    886
    887	if (nvdimm && cmd == ND_CMD_GET_CONFIG_DATA && idx == 1)
    888		return in_field[1];
    889	else if (nvdimm && cmd == ND_CMD_VENDOR && idx == 2)
    890		return out_field[1];
    891	else if (!nvdimm && cmd == ND_CMD_ARS_STATUS && idx == 2) {
    892		/*
    893		 * Per table 9-276 ARS Data in ACPI 6.1, out_field[1] is
    894		 * "Size of Output Buffer in bytes, including this
    895		 * field."
    896		 */
    897		if (out_field[1] < 4)
    898			return 0;
    899		/*
    900		 * ACPI 6.1 is ambiguous if 'status' is included in the
    901		 * output size. If we encounter an output size that
    902		 * overshoots the remainder by 4 bytes, assume it was
    903		 * including 'status'.
    904		 */
    905		if (out_field[1] - 4 == remainder)
    906			return remainder;
    907		return out_field[1] - 8;
    908	} else if (cmd == ND_CMD_CALL) {
    909		struct nd_cmd_pkg *pkg = (struct nd_cmd_pkg *) in_field;
    910
    911		return pkg->nd_size_out;
    912	}
    913
    914
    915	return UINT_MAX;
    916}
    917EXPORT_SYMBOL_GPL(nd_cmd_out_size);
    918
    919void wait_nvdimm_bus_probe_idle(struct device *dev)
    920{
    921	struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
    922
    923	do {
    924		if (nvdimm_bus->probe_active == 0)
    925			break;
    926		nvdimm_bus_unlock(dev);
    927		device_unlock(dev);
    928		wait_event(nvdimm_bus->wait,
    929				nvdimm_bus->probe_active == 0);
    930		device_lock(dev);
    931		nvdimm_bus_lock(dev);
    932	} while (true);
    933}
    934
    935static int nd_pmem_forget_poison_check(struct device *dev, void *data)
    936{
    937	struct nd_cmd_clear_error *clear_err =
    938		(struct nd_cmd_clear_error *)data;
    939	struct nd_btt *nd_btt = is_nd_btt(dev) ? to_nd_btt(dev) : NULL;
    940	struct nd_pfn *nd_pfn = is_nd_pfn(dev) ? to_nd_pfn(dev) : NULL;
    941	struct nd_dax *nd_dax = is_nd_dax(dev) ? to_nd_dax(dev) : NULL;
    942	struct nd_namespace_common *ndns = NULL;
    943	struct nd_namespace_io *nsio;
    944	resource_size_t offset = 0, end_trunc = 0, start, end, pstart, pend;
    945
    946	if (nd_dax || !dev->driver)
    947		return 0;
    948
    949	start = clear_err->address;
    950	end = clear_err->address + clear_err->cleared - 1;
    951
    952	if (nd_btt || nd_pfn || nd_dax) {
    953		if (nd_btt)
    954			ndns = nd_btt->ndns;
    955		else if (nd_pfn)
    956			ndns = nd_pfn->ndns;
    957		else if (nd_dax)
    958			ndns = nd_dax->nd_pfn.ndns;
    959
    960		if (!ndns)
    961			return 0;
    962	} else
    963		ndns = to_ndns(dev);
    964
    965	nsio = to_nd_namespace_io(&ndns->dev);
    966	pstart = nsio->res.start + offset;
    967	pend = nsio->res.end - end_trunc;
    968
    969	if ((pstart >= start) && (pend <= end))
    970		return -EBUSY;
    971
    972	return 0;
    973
    974}
    975
    976static int nd_ns_forget_poison_check(struct device *dev, void *data)
    977{
    978	return device_for_each_child(dev, data, nd_pmem_forget_poison_check);
    979}
    980
    981/* set_config requires an idle interleave set */
    982static int nd_cmd_clear_to_send(struct nvdimm_bus *nvdimm_bus,
    983		struct nvdimm *nvdimm, unsigned int cmd, void *data)
    984{
    985	struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc;
    986
    987	/* ask the bus provider if it would like to block this request */
    988	if (nd_desc->clear_to_send) {
    989		int rc = nd_desc->clear_to_send(nd_desc, nvdimm, cmd, data);
    990
    991		if (rc)
    992			return rc;
    993	}
    994
    995	/* require clear error to go through the pmem driver */
    996	if (!nvdimm && cmd == ND_CMD_CLEAR_ERROR)
    997		return device_for_each_child(&nvdimm_bus->dev, data,
    998				nd_ns_forget_poison_check);
    999
   1000	if (!nvdimm || cmd != ND_CMD_SET_CONFIG_DATA)
   1001		return 0;
   1002
   1003	/* prevent label manipulation while the kernel owns label updates */
   1004	wait_nvdimm_bus_probe_idle(&nvdimm_bus->dev);
   1005	if (atomic_read(&nvdimm->busy))
   1006		return -EBUSY;
   1007	return 0;
   1008}
   1009
   1010static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm,
   1011		int read_only, unsigned int ioctl_cmd, unsigned long arg)
   1012{
   1013	struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc;
   1014	const struct nd_cmd_desc *desc = NULL;
   1015	unsigned int cmd = _IOC_NR(ioctl_cmd);
   1016	struct device *dev = &nvdimm_bus->dev;
   1017	void __user *p = (void __user *) arg;
   1018	char *out_env = NULL, *in_env = NULL;
   1019	const char *cmd_name, *dimm_name;
   1020	u32 in_len = 0, out_len = 0;
   1021	unsigned int func = cmd;
   1022	unsigned long cmd_mask;
   1023	struct nd_cmd_pkg pkg;
   1024	int rc, i, cmd_rc;
   1025	void *buf = NULL;
   1026	u64 buf_len = 0;
   1027
   1028	if (nvdimm) {
   1029		desc = nd_cmd_dimm_desc(cmd);
   1030		cmd_name = nvdimm_cmd_name(cmd);
   1031		cmd_mask = nvdimm->cmd_mask;
   1032		dimm_name = dev_name(&nvdimm->dev);
   1033	} else {
   1034		desc = nd_cmd_bus_desc(cmd);
   1035		cmd_name = nvdimm_bus_cmd_name(cmd);
   1036		cmd_mask = nd_desc->cmd_mask;
   1037		dimm_name = "bus";
   1038	}
   1039
   1040	/* Validate command family support against bus declared support */
   1041	if (cmd == ND_CMD_CALL) {
   1042		unsigned long *mask;
   1043
   1044		if (copy_from_user(&pkg, p, sizeof(pkg)))
   1045			return -EFAULT;
   1046
   1047		if (nvdimm) {
   1048			if (pkg.nd_family > NVDIMM_FAMILY_MAX)
   1049				return -EINVAL;
   1050			mask = &nd_desc->dimm_family_mask;
   1051		} else {
   1052			if (pkg.nd_family > NVDIMM_BUS_FAMILY_MAX)
   1053				return -EINVAL;
   1054			mask = &nd_desc->bus_family_mask;
   1055		}
   1056
   1057		if (!test_bit(pkg.nd_family, mask))
   1058			return -EINVAL;
   1059	}
   1060
   1061	if (!desc ||
   1062	    (desc->out_num + desc->in_num == 0) ||
   1063	    cmd > ND_CMD_CALL ||
   1064	    !test_bit(cmd, &cmd_mask))
   1065		return -ENOTTY;
   1066
   1067	/* fail write commands (when read-only) */
   1068	if (read_only)
   1069		switch (cmd) {
   1070		case ND_CMD_VENDOR:
   1071		case ND_CMD_SET_CONFIG_DATA:
   1072		case ND_CMD_ARS_START:
   1073		case ND_CMD_CLEAR_ERROR:
   1074		case ND_CMD_CALL:
   1075			dev_dbg(dev, "'%s' command while read-only.\n",
   1076					nvdimm ? nvdimm_cmd_name(cmd)
   1077					: nvdimm_bus_cmd_name(cmd));
   1078			return -EPERM;
   1079		default:
   1080			break;
   1081		}
   1082
   1083	/* process an input envelope */
   1084	in_env = kzalloc(ND_CMD_MAX_ENVELOPE, GFP_KERNEL);
   1085	if (!in_env)
   1086		return -ENOMEM;
   1087	for (i = 0; i < desc->in_num; i++) {
   1088		u32 in_size, copy;
   1089
   1090		in_size = nd_cmd_in_size(nvdimm, cmd, desc, i, in_env);
   1091		if (in_size == UINT_MAX) {
   1092			dev_err(dev, "%s:%s unknown input size cmd: %s field: %d\n",
   1093					__func__, dimm_name, cmd_name, i);
   1094			rc = -ENXIO;
   1095			goto out;
   1096		}
   1097		if (in_len < ND_CMD_MAX_ENVELOPE)
   1098			copy = min_t(u32, ND_CMD_MAX_ENVELOPE - in_len, in_size);
   1099		else
   1100			copy = 0;
   1101		if (copy && copy_from_user(&in_env[in_len], p + in_len, copy)) {
   1102			rc = -EFAULT;
   1103			goto out;
   1104		}
   1105		in_len += in_size;
   1106	}
   1107
   1108	if (cmd == ND_CMD_CALL) {
   1109		func = pkg.nd_command;
   1110		dev_dbg(dev, "%s, idx: %llu, in: %u, out: %u, len %llu\n",
   1111				dimm_name, pkg.nd_command,
   1112				in_len, out_len, buf_len);
   1113	}
   1114
   1115	/* process an output envelope */
   1116	out_env = kzalloc(ND_CMD_MAX_ENVELOPE, GFP_KERNEL);
   1117	if (!out_env) {
   1118		rc = -ENOMEM;
   1119		goto out;
   1120	}
   1121
   1122	for (i = 0; i < desc->out_num; i++) {
   1123		u32 out_size = nd_cmd_out_size(nvdimm, cmd, desc, i,
   1124				(u32 *) in_env, (u32 *) out_env, 0);
   1125		u32 copy;
   1126
   1127		if (out_size == UINT_MAX) {
   1128			dev_dbg(dev, "%s unknown output size cmd: %s field: %d\n",
   1129					dimm_name, cmd_name, i);
   1130			rc = -EFAULT;
   1131			goto out;
   1132		}
   1133		if (out_len < ND_CMD_MAX_ENVELOPE)
   1134			copy = min_t(u32, ND_CMD_MAX_ENVELOPE - out_len, out_size);
   1135		else
   1136			copy = 0;
   1137		if (copy && copy_from_user(&out_env[out_len],
   1138					p + in_len + out_len, copy)) {
   1139			rc = -EFAULT;
   1140			goto out;
   1141		}
   1142		out_len += out_size;
   1143	}
   1144
   1145	buf_len = (u64) out_len + (u64) in_len;
   1146	if (buf_len > ND_IOCTL_MAX_BUFLEN) {
   1147		dev_dbg(dev, "%s cmd: %s buf_len: %llu > %d\n", dimm_name,
   1148				cmd_name, buf_len, ND_IOCTL_MAX_BUFLEN);
   1149		rc = -EINVAL;
   1150		goto out;
   1151	}
   1152
   1153	buf = vmalloc(buf_len);
   1154	if (!buf) {
   1155		rc = -ENOMEM;
   1156		goto out;
   1157	}
   1158
   1159	if (copy_from_user(buf, p, buf_len)) {
   1160		rc = -EFAULT;
   1161		goto out;
   1162	}
   1163
   1164	device_lock(dev);
   1165	nvdimm_bus_lock(dev);
   1166	rc = nd_cmd_clear_to_send(nvdimm_bus, nvdimm, func, buf);
   1167	if (rc)
   1168		goto out_unlock;
   1169
   1170	rc = nd_desc->ndctl(nd_desc, nvdimm, cmd, buf, buf_len, &cmd_rc);
   1171	if (rc < 0)
   1172		goto out_unlock;
   1173
   1174	if (!nvdimm && cmd == ND_CMD_CLEAR_ERROR && cmd_rc >= 0) {
   1175		struct nd_cmd_clear_error *clear_err = buf;
   1176
   1177		nvdimm_account_cleared_poison(nvdimm_bus, clear_err->address,
   1178				clear_err->cleared);
   1179	}
   1180
   1181	if (copy_to_user(p, buf, buf_len))
   1182		rc = -EFAULT;
   1183
   1184out_unlock:
   1185	nvdimm_bus_unlock(dev);
   1186	device_unlock(dev);
   1187out:
   1188	kfree(in_env);
   1189	kfree(out_env);
   1190	vfree(buf);
   1191	return rc;
   1192}
   1193
   1194enum nd_ioctl_mode {
   1195	BUS_IOCTL,
   1196	DIMM_IOCTL,
   1197};
   1198
   1199static int match_dimm(struct device *dev, void *data)
   1200{
   1201	long id = (long) data;
   1202
   1203	if (is_nvdimm(dev)) {
   1204		struct nvdimm *nvdimm = to_nvdimm(dev);
   1205
   1206		return nvdimm->id == id;
   1207	}
   1208
   1209	return 0;
   1210}
   1211
   1212static long nd_ioctl(struct file *file, unsigned int cmd, unsigned long arg,
   1213		enum nd_ioctl_mode mode)
   1214
   1215{
   1216	struct nvdimm_bus *nvdimm_bus, *found = NULL;
   1217	long id = (long) file->private_data;
   1218	struct nvdimm *nvdimm = NULL;
   1219	int rc, ro;
   1220
   1221	ro = ((file->f_flags & O_ACCMODE) == O_RDONLY);
   1222	mutex_lock(&nvdimm_bus_list_mutex);
   1223	list_for_each_entry(nvdimm_bus, &nvdimm_bus_list, list) {
   1224		if (mode == DIMM_IOCTL) {
   1225			struct device *dev;
   1226
   1227			dev = device_find_child(&nvdimm_bus->dev,
   1228					file->private_data, match_dimm);
   1229			if (!dev)
   1230				continue;
   1231			nvdimm = to_nvdimm(dev);
   1232			found = nvdimm_bus;
   1233		} else if (nvdimm_bus->id == id) {
   1234			found = nvdimm_bus;
   1235		}
   1236
   1237		if (found) {
   1238			atomic_inc(&nvdimm_bus->ioctl_active);
   1239			break;
   1240		}
   1241	}
   1242	mutex_unlock(&nvdimm_bus_list_mutex);
   1243
   1244	if (!found)
   1245		return -ENXIO;
   1246
   1247	nvdimm_bus = found;
   1248	rc = __nd_ioctl(nvdimm_bus, nvdimm, ro, cmd, arg);
   1249
   1250	if (nvdimm)
   1251		put_device(&nvdimm->dev);
   1252	if (atomic_dec_and_test(&nvdimm_bus->ioctl_active))
   1253		wake_up(&nvdimm_bus->wait);
   1254
   1255	return rc;
   1256}
   1257
   1258static long bus_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
   1259{
   1260	return nd_ioctl(file, cmd, arg, BUS_IOCTL);
   1261}
   1262
   1263static long dimm_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
   1264{
   1265	return nd_ioctl(file, cmd, arg, DIMM_IOCTL);
   1266}
   1267
   1268static int nd_open(struct inode *inode, struct file *file)
   1269{
   1270	long minor = iminor(inode);
   1271
   1272	file->private_data = (void *) minor;
   1273	return 0;
   1274}
   1275
   1276static const struct file_operations nvdimm_bus_fops = {
   1277	.owner = THIS_MODULE,
   1278	.open = nd_open,
   1279	.unlocked_ioctl = bus_ioctl,
   1280	.compat_ioctl = compat_ptr_ioctl,
   1281	.llseek = noop_llseek,
   1282};
   1283
   1284static const struct file_operations nvdimm_fops = {
   1285	.owner = THIS_MODULE,
   1286	.open = nd_open,
   1287	.unlocked_ioctl = dimm_ioctl,
   1288	.compat_ioctl = compat_ptr_ioctl,
   1289	.llseek = noop_llseek,
   1290};
   1291
   1292int __init nvdimm_bus_init(void)
   1293{
   1294	int rc;
   1295
   1296	rc = bus_register(&nvdimm_bus_type);
   1297	if (rc)
   1298		return rc;
   1299
   1300	rc = register_chrdev(0, "ndctl", &nvdimm_bus_fops);
   1301	if (rc < 0)
   1302		goto err_bus_chrdev;
   1303	nvdimm_bus_major = rc;
   1304
   1305	rc = register_chrdev(0, "dimmctl", &nvdimm_fops);
   1306	if (rc < 0)
   1307		goto err_dimm_chrdev;
   1308	nvdimm_major = rc;
   1309
   1310	nd_class = class_create(THIS_MODULE, "nd");
   1311	if (IS_ERR(nd_class)) {
   1312		rc = PTR_ERR(nd_class);
   1313		goto err_class;
   1314	}
   1315
   1316	rc = driver_register(&nd_bus_driver.drv);
   1317	if (rc)
   1318		goto err_nd_bus;
   1319
   1320	return 0;
   1321
   1322 err_nd_bus:
   1323	class_destroy(nd_class);
   1324 err_class:
   1325	unregister_chrdev(nvdimm_major, "dimmctl");
   1326 err_dimm_chrdev:
   1327	unregister_chrdev(nvdimm_bus_major, "ndctl");
   1328 err_bus_chrdev:
   1329	bus_unregister(&nvdimm_bus_type);
   1330
   1331	return rc;
   1332}
   1333
   1334void nvdimm_bus_exit(void)
   1335{
   1336	driver_unregister(&nd_bus_driver.drv);
   1337	class_destroy(nd_class);
   1338	unregister_chrdev(nvdimm_bus_major, "ndctl");
   1339	unregister_chrdev(nvdimm_major, "dimmctl");
   1340	bus_unregister(&nvdimm_bus_type);
   1341	ida_destroy(&nd_ida);
   1342}