cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

dimm_devs.c (21341B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/*
      3 * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
      4 */
      5#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
      6#include <linux/moduleparam.h>
      7#include <linux/vmalloc.h>
      8#include <linux/device.h>
      9#include <linux/ndctl.h>
     10#include <linux/slab.h>
     11#include <linux/io.h>
     12#include <linux/fs.h>
     13#include <linux/mm.h>
     14#include "nd-core.h"
     15#include "label.h"
     16#include "pmem.h"
     17#include "nd.h"
     18
     19static DEFINE_IDA(dimm_ida);
     20
     21/*
     22 * Retrieve bus and dimm handle and return if this bus supports
     23 * get_config_data commands
     24 */
     25int nvdimm_check_config_data(struct device *dev)
     26{
     27	struct nvdimm *nvdimm = to_nvdimm(dev);
     28
     29	if (!nvdimm->cmd_mask ||
     30	    !test_bit(ND_CMD_GET_CONFIG_DATA, &nvdimm->cmd_mask)) {
     31		if (test_bit(NDD_LABELING, &nvdimm->flags))
     32			return -ENXIO;
     33		else
     34			return -ENOTTY;
     35	}
     36
     37	return 0;
     38}
     39
     40static int validate_dimm(struct nvdimm_drvdata *ndd)
     41{
     42	int rc;
     43
     44	if (!ndd)
     45		return -EINVAL;
     46
     47	rc = nvdimm_check_config_data(ndd->dev);
     48	if (rc)
     49		dev_dbg(ndd->dev, "%ps: %s error: %d\n",
     50				__builtin_return_address(0), __func__, rc);
     51	return rc;
     52}
     53
     54/**
     55 * nvdimm_init_nsarea - determine the geometry of a dimm's namespace area
     56 * @nvdimm: dimm to initialize
     57 */
     58int nvdimm_init_nsarea(struct nvdimm_drvdata *ndd)
     59{
     60	struct nd_cmd_get_config_size *cmd = &ndd->nsarea;
     61	struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(ndd->dev);
     62	struct nvdimm_bus_descriptor *nd_desc;
     63	int rc = validate_dimm(ndd);
     64	int cmd_rc = 0;
     65
     66	if (rc)
     67		return rc;
     68
     69	if (cmd->config_size)
     70		return 0; /* already valid */
     71
     72	memset(cmd, 0, sizeof(*cmd));
     73	nd_desc = nvdimm_bus->nd_desc;
     74	rc = nd_desc->ndctl(nd_desc, to_nvdimm(ndd->dev),
     75			ND_CMD_GET_CONFIG_SIZE, cmd, sizeof(*cmd), &cmd_rc);
     76	if (rc < 0)
     77		return rc;
     78	return cmd_rc;
     79}
     80
     81int nvdimm_get_config_data(struct nvdimm_drvdata *ndd, void *buf,
     82			   size_t offset, size_t len)
     83{
     84	struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(ndd->dev);
     85	struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc;
     86	int rc = validate_dimm(ndd), cmd_rc = 0;
     87	struct nd_cmd_get_config_data_hdr *cmd;
     88	size_t max_cmd_size, buf_offset;
     89
     90	if (rc)
     91		return rc;
     92
     93	if (offset + len > ndd->nsarea.config_size)
     94		return -ENXIO;
     95
     96	max_cmd_size = min_t(u32, len, ndd->nsarea.max_xfer);
     97	cmd = kvzalloc(max_cmd_size + sizeof(*cmd), GFP_KERNEL);
     98	if (!cmd)
     99		return -ENOMEM;
    100
    101	for (buf_offset = 0; len;
    102	     len -= cmd->in_length, buf_offset += cmd->in_length) {
    103		size_t cmd_size;
    104
    105		cmd->in_offset = offset + buf_offset;
    106		cmd->in_length = min(max_cmd_size, len);
    107
    108		cmd_size = sizeof(*cmd) + cmd->in_length;
    109
    110		rc = nd_desc->ndctl(nd_desc, to_nvdimm(ndd->dev),
    111				ND_CMD_GET_CONFIG_DATA, cmd, cmd_size, &cmd_rc);
    112		if (rc < 0)
    113			break;
    114		if (cmd_rc < 0) {
    115			rc = cmd_rc;
    116			break;
    117		}
    118
    119		/* out_buf should be valid, copy it into our output buffer */
    120		memcpy(buf + buf_offset, cmd->out_buf, cmd->in_length);
    121	}
    122	kvfree(cmd);
    123
    124	return rc;
    125}
    126
    127int nvdimm_set_config_data(struct nvdimm_drvdata *ndd, size_t offset,
    128		void *buf, size_t len)
    129{
    130	size_t max_cmd_size, buf_offset;
    131	struct nd_cmd_set_config_hdr *cmd;
    132	int rc = validate_dimm(ndd), cmd_rc = 0;
    133	struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(ndd->dev);
    134	struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc;
    135
    136	if (rc)
    137		return rc;
    138
    139	if (offset + len > ndd->nsarea.config_size)
    140		return -ENXIO;
    141
    142	max_cmd_size = min_t(u32, len, ndd->nsarea.max_xfer);
    143	cmd = kvzalloc(max_cmd_size + sizeof(*cmd) + sizeof(u32), GFP_KERNEL);
    144	if (!cmd)
    145		return -ENOMEM;
    146
    147	for (buf_offset = 0; len; len -= cmd->in_length,
    148			buf_offset += cmd->in_length) {
    149		size_t cmd_size;
    150
    151		cmd->in_offset = offset + buf_offset;
    152		cmd->in_length = min(max_cmd_size, len);
    153		memcpy(cmd->in_buf, buf + buf_offset, cmd->in_length);
    154
    155		/* status is output in the last 4-bytes of the command buffer */
    156		cmd_size = sizeof(*cmd) + cmd->in_length + sizeof(u32);
    157
    158		rc = nd_desc->ndctl(nd_desc, to_nvdimm(ndd->dev),
    159				ND_CMD_SET_CONFIG_DATA, cmd, cmd_size, &cmd_rc);
    160		if (rc < 0)
    161			break;
    162		if (cmd_rc < 0) {
    163			rc = cmd_rc;
    164			break;
    165		}
    166	}
    167	kvfree(cmd);
    168
    169	return rc;
    170}
    171
    172void nvdimm_set_labeling(struct device *dev)
    173{
    174	struct nvdimm *nvdimm = to_nvdimm(dev);
    175
    176	set_bit(NDD_LABELING, &nvdimm->flags);
    177}
    178
    179void nvdimm_set_locked(struct device *dev)
    180{
    181	struct nvdimm *nvdimm = to_nvdimm(dev);
    182
    183	set_bit(NDD_LOCKED, &nvdimm->flags);
    184}
    185
    186void nvdimm_clear_locked(struct device *dev)
    187{
    188	struct nvdimm *nvdimm = to_nvdimm(dev);
    189
    190	clear_bit(NDD_LOCKED, &nvdimm->flags);
    191}
    192
    193static void nvdimm_release(struct device *dev)
    194{
    195	struct nvdimm *nvdimm = to_nvdimm(dev);
    196
    197	ida_simple_remove(&dimm_ida, nvdimm->id);
    198	kfree(nvdimm);
    199}
    200
    201struct nvdimm *to_nvdimm(struct device *dev)
    202{
    203	struct nvdimm *nvdimm = container_of(dev, struct nvdimm, dev);
    204
    205	WARN_ON(!is_nvdimm(dev));
    206	return nvdimm;
    207}
    208EXPORT_SYMBOL_GPL(to_nvdimm);
    209
    210struct nvdimm_drvdata *to_ndd(struct nd_mapping *nd_mapping)
    211{
    212	struct nvdimm *nvdimm = nd_mapping->nvdimm;
    213
    214	WARN_ON_ONCE(!is_nvdimm_bus_locked(&nvdimm->dev));
    215
    216	return dev_get_drvdata(&nvdimm->dev);
    217}
    218EXPORT_SYMBOL(to_ndd);
    219
    220void nvdimm_drvdata_release(struct kref *kref)
    221{
    222	struct nvdimm_drvdata *ndd = container_of(kref, typeof(*ndd), kref);
    223	struct device *dev = ndd->dev;
    224	struct resource *res, *_r;
    225
    226	dev_dbg(dev, "trace\n");
    227	nvdimm_bus_lock(dev);
    228	for_each_dpa_resource_safe(ndd, res, _r)
    229		nvdimm_free_dpa(ndd, res);
    230	nvdimm_bus_unlock(dev);
    231
    232	kvfree(ndd->data);
    233	kfree(ndd);
    234	put_device(dev);
    235}
    236
    237void get_ndd(struct nvdimm_drvdata *ndd)
    238{
    239	kref_get(&ndd->kref);
    240}
    241
    242void put_ndd(struct nvdimm_drvdata *ndd)
    243{
    244	if (ndd)
    245		kref_put(&ndd->kref, nvdimm_drvdata_release);
    246}
    247
    248const char *nvdimm_name(struct nvdimm *nvdimm)
    249{
    250	return dev_name(&nvdimm->dev);
    251}
    252EXPORT_SYMBOL_GPL(nvdimm_name);
    253
    254struct kobject *nvdimm_kobj(struct nvdimm *nvdimm)
    255{
    256	return &nvdimm->dev.kobj;
    257}
    258EXPORT_SYMBOL_GPL(nvdimm_kobj);
    259
    260unsigned long nvdimm_cmd_mask(struct nvdimm *nvdimm)
    261{
    262	return nvdimm->cmd_mask;
    263}
    264EXPORT_SYMBOL_GPL(nvdimm_cmd_mask);
    265
    266void *nvdimm_provider_data(struct nvdimm *nvdimm)
    267{
    268	if (nvdimm)
    269		return nvdimm->provider_data;
    270	return NULL;
    271}
    272EXPORT_SYMBOL_GPL(nvdimm_provider_data);
    273
    274static ssize_t commands_show(struct device *dev,
    275		struct device_attribute *attr, char *buf)
    276{
    277	struct nvdimm *nvdimm = to_nvdimm(dev);
    278	int cmd, len = 0;
    279
    280	if (!nvdimm->cmd_mask)
    281		return sprintf(buf, "\n");
    282
    283	for_each_set_bit(cmd, &nvdimm->cmd_mask, BITS_PER_LONG)
    284		len += sprintf(buf + len, "%s ", nvdimm_cmd_name(cmd));
    285	len += sprintf(buf + len, "\n");
    286	return len;
    287}
    288static DEVICE_ATTR_RO(commands);
    289
    290static ssize_t flags_show(struct device *dev,
    291		struct device_attribute *attr, char *buf)
    292{
    293	struct nvdimm *nvdimm = to_nvdimm(dev);
    294
    295	return sprintf(buf, "%s%s\n",
    296			test_bit(NDD_LABELING, &nvdimm->flags) ? "label " : "",
    297			test_bit(NDD_LOCKED, &nvdimm->flags) ? "lock " : "");
    298}
    299static DEVICE_ATTR_RO(flags);
    300
    301static ssize_t state_show(struct device *dev, struct device_attribute *attr,
    302		char *buf)
    303{
    304	struct nvdimm *nvdimm = to_nvdimm(dev);
    305
    306	/*
    307	 * The state may be in the process of changing, userspace should
    308	 * quiesce probing if it wants a static answer
    309	 */
    310	nvdimm_bus_lock(dev);
    311	nvdimm_bus_unlock(dev);
    312	return sprintf(buf, "%s\n", atomic_read(&nvdimm->busy)
    313			? "active" : "idle");
    314}
    315static DEVICE_ATTR_RO(state);
    316
    317static ssize_t __available_slots_show(struct nvdimm_drvdata *ndd, char *buf)
    318{
    319	struct device *dev;
    320	ssize_t rc;
    321	u32 nfree;
    322
    323	if (!ndd)
    324		return -ENXIO;
    325
    326	dev = ndd->dev;
    327	nvdimm_bus_lock(dev);
    328	nfree = nd_label_nfree(ndd);
    329	if (nfree - 1 > nfree) {
    330		dev_WARN_ONCE(dev, 1, "we ate our last label?\n");
    331		nfree = 0;
    332	} else
    333		nfree--;
    334	rc = sprintf(buf, "%d\n", nfree);
    335	nvdimm_bus_unlock(dev);
    336	return rc;
    337}
    338
    339static ssize_t available_slots_show(struct device *dev,
    340				    struct device_attribute *attr, char *buf)
    341{
    342	ssize_t rc;
    343
    344	device_lock(dev);
    345	rc = __available_slots_show(dev_get_drvdata(dev), buf);
    346	device_unlock(dev);
    347
    348	return rc;
    349}
    350static DEVICE_ATTR_RO(available_slots);
    351
    352__weak ssize_t security_show(struct device *dev,
    353		struct device_attribute *attr, char *buf)
    354{
    355	struct nvdimm *nvdimm = to_nvdimm(dev);
    356
    357	if (test_bit(NVDIMM_SECURITY_OVERWRITE, &nvdimm->sec.flags))
    358		return sprintf(buf, "overwrite\n");
    359	if (test_bit(NVDIMM_SECURITY_DISABLED, &nvdimm->sec.flags))
    360		return sprintf(buf, "disabled\n");
    361	if (test_bit(NVDIMM_SECURITY_UNLOCKED, &nvdimm->sec.flags))
    362		return sprintf(buf, "unlocked\n");
    363	if (test_bit(NVDIMM_SECURITY_LOCKED, &nvdimm->sec.flags))
    364		return sprintf(buf, "locked\n");
    365	return -ENOTTY;
    366}
    367
    368static ssize_t frozen_show(struct device *dev,
    369		struct device_attribute *attr, char *buf)
    370{
    371	struct nvdimm *nvdimm = to_nvdimm(dev);
    372
    373	return sprintf(buf, "%d\n", test_bit(NVDIMM_SECURITY_FROZEN,
    374				&nvdimm->sec.flags));
    375}
    376static DEVICE_ATTR_RO(frozen);
    377
    378static ssize_t security_store(struct device *dev,
    379		struct device_attribute *attr, const char *buf, size_t len)
    380
    381{
    382	ssize_t rc;
    383
    384	/*
    385	 * Require all userspace triggered security management to be
    386	 * done while probing is idle and the DIMM is not in active use
    387	 * in any region.
    388	 */
    389	device_lock(dev);
    390	nvdimm_bus_lock(dev);
    391	wait_nvdimm_bus_probe_idle(dev);
    392	rc = nvdimm_security_store(dev, buf, len);
    393	nvdimm_bus_unlock(dev);
    394	device_unlock(dev);
    395
    396	return rc;
    397}
    398static DEVICE_ATTR_RW(security);
    399
    400static struct attribute *nvdimm_attributes[] = {
    401	&dev_attr_state.attr,
    402	&dev_attr_flags.attr,
    403	&dev_attr_commands.attr,
    404	&dev_attr_available_slots.attr,
    405	&dev_attr_security.attr,
    406	&dev_attr_frozen.attr,
    407	NULL,
    408};
    409
    410static umode_t nvdimm_visible(struct kobject *kobj, struct attribute *a, int n)
    411{
    412	struct device *dev = container_of(kobj, typeof(*dev), kobj);
    413	struct nvdimm *nvdimm = to_nvdimm(dev);
    414
    415	if (a != &dev_attr_security.attr && a != &dev_attr_frozen.attr)
    416		return a->mode;
    417	if (!nvdimm->sec.flags)
    418		return 0;
    419
    420	if (a == &dev_attr_security.attr) {
    421		/* Are there any state mutation ops (make writable)? */
    422		if (nvdimm->sec.ops->freeze || nvdimm->sec.ops->disable
    423				|| nvdimm->sec.ops->change_key
    424				|| nvdimm->sec.ops->erase
    425				|| nvdimm->sec.ops->overwrite)
    426			return a->mode;
    427		return 0444;
    428	}
    429
    430	if (nvdimm->sec.ops->freeze)
    431		return a->mode;
    432	return 0;
    433}
    434
    435static const struct attribute_group nvdimm_attribute_group = {
    436	.attrs = nvdimm_attributes,
    437	.is_visible = nvdimm_visible,
    438};
    439
    440static ssize_t result_show(struct device *dev, struct device_attribute *attr, char *buf)
    441{
    442	struct nvdimm *nvdimm = to_nvdimm(dev);
    443	enum nvdimm_fwa_result result;
    444
    445	if (!nvdimm->fw_ops)
    446		return -EOPNOTSUPP;
    447
    448	nvdimm_bus_lock(dev);
    449	result = nvdimm->fw_ops->activate_result(nvdimm);
    450	nvdimm_bus_unlock(dev);
    451
    452	switch (result) {
    453	case NVDIMM_FWA_RESULT_NONE:
    454		return sprintf(buf, "none\n");
    455	case NVDIMM_FWA_RESULT_SUCCESS:
    456		return sprintf(buf, "success\n");
    457	case NVDIMM_FWA_RESULT_FAIL:
    458		return sprintf(buf, "fail\n");
    459	case NVDIMM_FWA_RESULT_NOTSTAGED:
    460		return sprintf(buf, "not_staged\n");
    461	case NVDIMM_FWA_RESULT_NEEDRESET:
    462		return sprintf(buf, "need_reset\n");
    463	default:
    464		return -ENXIO;
    465	}
    466}
    467static DEVICE_ATTR_ADMIN_RO(result);
    468
    469static ssize_t activate_show(struct device *dev, struct device_attribute *attr, char *buf)
    470{
    471	struct nvdimm *nvdimm = to_nvdimm(dev);
    472	enum nvdimm_fwa_state state;
    473
    474	if (!nvdimm->fw_ops)
    475		return -EOPNOTSUPP;
    476
    477	nvdimm_bus_lock(dev);
    478	state = nvdimm->fw_ops->activate_state(nvdimm);
    479	nvdimm_bus_unlock(dev);
    480
    481	switch (state) {
    482	case NVDIMM_FWA_IDLE:
    483		return sprintf(buf, "idle\n");
    484	case NVDIMM_FWA_BUSY:
    485		return sprintf(buf, "busy\n");
    486	case NVDIMM_FWA_ARMED:
    487		return sprintf(buf, "armed\n");
    488	default:
    489		return -ENXIO;
    490	}
    491}
    492
    493static ssize_t activate_store(struct device *dev, struct device_attribute *attr,
    494		const char *buf, size_t len)
    495{
    496	struct nvdimm *nvdimm = to_nvdimm(dev);
    497	enum nvdimm_fwa_trigger arg;
    498	int rc;
    499
    500	if (!nvdimm->fw_ops)
    501		return -EOPNOTSUPP;
    502
    503	if (sysfs_streq(buf, "arm"))
    504		arg = NVDIMM_FWA_ARM;
    505	else if (sysfs_streq(buf, "disarm"))
    506		arg = NVDIMM_FWA_DISARM;
    507	else
    508		return -EINVAL;
    509
    510	nvdimm_bus_lock(dev);
    511	rc = nvdimm->fw_ops->arm(nvdimm, arg);
    512	nvdimm_bus_unlock(dev);
    513
    514	if (rc < 0)
    515		return rc;
    516	return len;
    517}
    518static DEVICE_ATTR_ADMIN_RW(activate);
    519
    520static struct attribute *nvdimm_firmware_attributes[] = {
    521	&dev_attr_activate.attr,
    522	&dev_attr_result.attr,
    523	NULL,
    524};
    525
    526static umode_t nvdimm_firmware_visible(struct kobject *kobj, struct attribute *a, int n)
    527{
    528	struct device *dev = container_of(kobj, typeof(*dev), kobj);
    529	struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
    530	struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc;
    531	struct nvdimm *nvdimm = to_nvdimm(dev);
    532	enum nvdimm_fwa_capability cap;
    533
    534	if (!nd_desc->fw_ops)
    535		return 0;
    536	if (!nvdimm->fw_ops)
    537		return 0;
    538
    539	nvdimm_bus_lock(dev);
    540	cap = nd_desc->fw_ops->capability(nd_desc);
    541	nvdimm_bus_unlock(dev);
    542
    543	if (cap < NVDIMM_FWA_CAP_QUIESCE)
    544		return 0;
    545
    546	return a->mode;
    547}
    548
    549static const struct attribute_group nvdimm_firmware_attribute_group = {
    550	.name = "firmware",
    551	.attrs = nvdimm_firmware_attributes,
    552	.is_visible = nvdimm_firmware_visible,
    553};
    554
    555static const struct attribute_group *nvdimm_attribute_groups[] = {
    556	&nd_device_attribute_group,
    557	&nvdimm_attribute_group,
    558	&nvdimm_firmware_attribute_group,
    559	NULL,
    560};
    561
    562static const struct device_type nvdimm_device_type = {
    563	.name = "nvdimm",
    564	.release = nvdimm_release,
    565	.groups = nvdimm_attribute_groups,
    566};
    567
    568bool is_nvdimm(struct device *dev)
    569{
    570	return dev->type == &nvdimm_device_type;
    571}
    572
    573static struct lock_class_key nvdimm_key;
    574
    575struct nvdimm *__nvdimm_create(struct nvdimm_bus *nvdimm_bus,
    576		void *provider_data, const struct attribute_group **groups,
    577		unsigned long flags, unsigned long cmd_mask, int num_flush,
    578		struct resource *flush_wpq, const char *dimm_id,
    579		const struct nvdimm_security_ops *sec_ops,
    580		const struct nvdimm_fw_ops *fw_ops)
    581{
    582	struct nvdimm *nvdimm = kzalloc(sizeof(*nvdimm), GFP_KERNEL);
    583	struct device *dev;
    584
    585	if (!nvdimm)
    586		return NULL;
    587
    588	nvdimm->id = ida_simple_get(&dimm_ida, 0, 0, GFP_KERNEL);
    589	if (nvdimm->id < 0) {
    590		kfree(nvdimm);
    591		return NULL;
    592	}
    593
    594	nvdimm->dimm_id = dimm_id;
    595	nvdimm->provider_data = provider_data;
    596	nvdimm->flags = flags;
    597	nvdimm->cmd_mask = cmd_mask;
    598	nvdimm->num_flush = num_flush;
    599	nvdimm->flush_wpq = flush_wpq;
    600	atomic_set(&nvdimm->busy, 0);
    601	dev = &nvdimm->dev;
    602	dev_set_name(dev, "nmem%d", nvdimm->id);
    603	dev->parent = &nvdimm_bus->dev;
    604	dev->type = &nvdimm_device_type;
    605	dev->devt = MKDEV(nvdimm_major, nvdimm->id);
    606	dev->groups = groups;
    607	nvdimm->sec.ops = sec_ops;
    608	nvdimm->fw_ops = fw_ops;
    609	nvdimm->sec.overwrite_tmo = 0;
    610	INIT_DELAYED_WORK(&nvdimm->dwork, nvdimm_security_overwrite_query);
    611	/*
    612	 * Security state must be initialized before device_add() for
    613	 * attribute visibility.
    614	 */
    615	/* get security state and extended (master) state */
    616	nvdimm->sec.flags = nvdimm_security_flags(nvdimm, NVDIMM_USER);
    617	nvdimm->sec.ext_flags = nvdimm_security_flags(nvdimm, NVDIMM_MASTER);
    618	device_initialize(dev);
    619	lockdep_set_class(&dev->mutex, &nvdimm_key);
    620	nd_device_register(dev);
    621
    622	return nvdimm;
    623}
    624EXPORT_SYMBOL_GPL(__nvdimm_create);
    625
    626void nvdimm_delete(struct nvdimm *nvdimm)
    627{
    628	struct device *dev = &nvdimm->dev;
    629	bool dev_put = false;
    630
    631	/* We are shutting down. Make state frozen artificially. */
    632	nvdimm_bus_lock(dev);
    633	set_bit(NVDIMM_SECURITY_FROZEN, &nvdimm->sec.flags);
    634	if (test_and_clear_bit(NDD_WORK_PENDING, &nvdimm->flags))
    635		dev_put = true;
    636	nvdimm_bus_unlock(dev);
    637	cancel_delayed_work_sync(&nvdimm->dwork);
    638	if (dev_put)
    639		put_device(dev);
    640	nd_device_unregister(dev, ND_SYNC);
    641}
    642EXPORT_SYMBOL_GPL(nvdimm_delete);
    643
    644static void shutdown_security_notify(void *data)
    645{
    646	struct nvdimm *nvdimm = data;
    647
    648	sysfs_put(nvdimm->sec.overwrite_state);
    649}
    650
    651int nvdimm_security_setup_events(struct device *dev)
    652{
    653	struct nvdimm *nvdimm = to_nvdimm(dev);
    654
    655	if (!nvdimm->sec.flags || !nvdimm->sec.ops
    656			|| !nvdimm->sec.ops->overwrite)
    657		return 0;
    658	nvdimm->sec.overwrite_state = sysfs_get_dirent(dev->kobj.sd, "security");
    659	if (!nvdimm->sec.overwrite_state)
    660		return -ENOMEM;
    661
    662	return devm_add_action_or_reset(dev, shutdown_security_notify, nvdimm);
    663}
    664EXPORT_SYMBOL_GPL(nvdimm_security_setup_events);
    665
    666int nvdimm_in_overwrite(struct nvdimm *nvdimm)
    667{
    668	return test_bit(NDD_SECURITY_OVERWRITE, &nvdimm->flags);
    669}
    670EXPORT_SYMBOL_GPL(nvdimm_in_overwrite);
    671
    672int nvdimm_security_freeze(struct nvdimm *nvdimm)
    673{
    674	int rc;
    675
    676	WARN_ON_ONCE(!is_nvdimm_bus_locked(&nvdimm->dev));
    677
    678	if (!nvdimm->sec.ops || !nvdimm->sec.ops->freeze)
    679		return -EOPNOTSUPP;
    680
    681	if (!nvdimm->sec.flags)
    682		return -EIO;
    683
    684	if (test_bit(NDD_SECURITY_OVERWRITE, &nvdimm->flags)) {
    685		dev_warn(&nvdimm->dev, "Overwrite operation in progress.\n");
    686		return -EBUSY;
    687	}
    688
    689	rc = nvdimm->sec.ops->freeze(nvdimm);
    690	nvdimm->sec.flags = nvdimm_security_flags(nvdimm, NVDIMM_USER);
    691
    692	return rc;
    693}
    694
    695static unsigned long dpa_align(struct nd_region *nd_region)
    696{
    697	struct device *dev = &nd_region->dev;
    698
    699	if (dev_WARN_ONCE(dev, !is_nvdimm_bus_locked(dev),
    700				"bus lock required for capacity provision\n"))
    701		return 0;
    702	if (dev_WARN_ONCE(dev, !nd_region->ndr_mappings || nd_region->align
    703				% nd_region->ndr_mappings,
    704				"invalid region align %#lx mappings: %d\n",
    705				nd_region->align, nd_region->ndr_mappings))
    706		return 0;
    707	return nd_region->align / nd_region->ndr_mappings;
    708}
    709
    710/**
    711 * nd_pmem_max_contiguous_dpa - For the given dimm+region, return the max
    712 *			   contiguous unallocated dpa range.
    713 * @nd_region: constrain available space check to this reference region
    714 * @nd_mapping: container of dpa-resource-root + labels
    715 */
    716resource_size_t nd_pmem_max_contiguous_dpa(struct nd_region *nd_region,
    717					   struct nd_mapping *nd_mapping)
    718{
    719	struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
    720	struct nvdimm_bus *nvdimm_bus;
    721	resource_size_t max = 0;
    722	struct resource *res;
    723	unsigned long align;
    724
    725	/* if a dimm is disabled the available capacity is zero */
    726	if (!ndd)
    727		return 0;
    728
    729	align = dpa_align(nd_region);
    730	if (!align)
    731		return 0;
    732
    733	nvdimm_bus = walk_to_nvdimm_bus(ndd->dev);
    734	if (__reserve_free_pmem(&nd_region->dev, nd_mapping->nvdimm))
    735		return 0;
    736	for_each_dpa_resource(ndd, res) {
    737		resource_size_t start, end;
    738
    739		if (strcmp(res->name, "pmem-reserve") != 0)
    740			continue;
    741		/* trim free space relative to current alignment setting */
    742		start = ALIGN(res->start, align);
    743		end = ALIGN_DOWN(res->end + 1, align) - 1;
    744		if (end < start)
    745			continue;
    746		if (end - start + 1 > max)
    747			max = end - start + 1;
    748	}
    749	release_free_pmem(nvdimm_bus, nd_mapping);
    750	return max;
    751}
    752
    753/**
    754 * nd_pmem_available_dpa - for the given dimm+region account unallocated dpa
    755 * @nd_mapping: container of dpa-resource-root + labels
    756 * @nd_region: constrain available space check to this reference region
    757 *
    758 * Validate that a PMEM label, if present, aligns with the start of an
    759 * interleave set.
    760 */
    761resource_size_t nd_pmem_available_dpa(struct nd_region *nd_region,
    762				      struct nd_mapping *nd_mapping)
    763{
    764	struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
    765	resource_size_t map_start, map_end, busy = 0;
    766	struct resource *res;
    767	unsigned long align;
    768
    769	if (!ndd)
    770		return 0;
    771
    772	align = dpa_align(nd_region);
    773	if (!align)
    774		return 0;
    775
    776	map_start = nd_mapping->start;
    777	map_end = map_start + nd_mapping->size - 1;
    778	for_each_dpa_resource(ndd, res) {
    779		resource_size_t start, end;
    780
    781		start = ALIGN_DOWN(res->start, align);
    782		end = ALIGN(res->end + 1, align) - 1;
    783		if (start >= map_start && start < map_end) {
    784			if (end > map_end) {
    785				nd_dbg_dpa(nd_region, ndd, res,
    786					   "misaligned to iset\n");
    787				return 0;
    788			}
    789			busy += end - start + 1;
    790		} else if (end >= map_start && end <= map_end) {
    791			busy += end - start + 1;
    792		} else if (map_start > start && map_start < end) {
    793			/* total eclipse of the mapping */
    794			busy += nd_mapping->size;
    795		}
    796	}
    797
    798	if (busy < nd_mapping->size)
    799		return ALIGN_DOWN(nd_mapping->size - busy, align);
    800	return 0;
    801}
    802
    803void nvdimm_free_dpa(struct nvdimm_drvdata *ndd, struct resource *res)
    804{
    805	WARN_ON_ONCE(!is_nvdimm_bus_locked(ndd->dev));
    806	kfree(res->name);
    807	__release_region(&ndd->dpa, res->start, resource_size(res));
    808}
    809
    810struct resource *nvdimm_allocate_dpa(struct nvdimm_drvdata *ndd,
    811		struct nd_label_id *label_id, resource_size_t start,
    812		resource_size_t n)
    813{
    814	char *name = kmemdup(label_id, sizeof(*label_id), GFP_KERNEL);
    815	struct resource *res;
    816
    817	if (!name)
    818		return NULL;
    819
    820	WARN_ON_ONCE(!is_nvdimm_bus_locked(ndd->dev));
    821	res = __request_region(&ndd->dpa, start, n, name, 0);
    822	if (!res)
    823		kfree(name);
    824	return res;
    825}
    826
    827/**
    828 * nvdimm_allocated_dpa - sum up the dpa currently allocated to this label_id
    829 * @nvdimm: container of dpa-resource-root + labels
    830 * @label_id: dpa resource name of the form pmem-<human readable uuid>
    831 */
    832resource_size_t nvdimm_allocated_dpa(struct nvdimm_drvdata *ndd,
    833		struct nd_label_id *label_id)
    834{
    835	resource_size_t allocated = 0;
    836	struct resource *res;
    837
    838	for_each_dpa_resource(ndd, res)
    839		if (strcmp(res->name, label_id->id) == 0)
    840			allocated += resource_size(res);
    841
    842	return allocated;
    843}
    844
    845static int count_dimms(struct device *dev, void *c)
    846{
    847	int *count = c;
    848
    849	if (is_nvdimm(dev))
    850		(*count)++;
    851	return 0;
    852}
    853
    854int nvdimm_bus_check_dimm_count(struct nvdimm_bus *nvdimm_bus, int dimm_count)
    855{
    856	int count = 0;
    857	/* Flush any possible dimm registration failures */
    858	nd_synchronize();
    859
    860	device_for_each_child(&nvdimm_bus->dev, &count, count_dimms);
    861	dev_dbg(&nvdimm_bus->dev, "count: %d\n", count);
    862	if (count != dimm_count)
    863		return -ENXIO;
    864	return 0;
    865}
    866EXPORT_SYMBOL_GPL(nvdimm_bus_check_dimm_count);
    867
    868void __exit nvdimm_devs_exit(void)
    869{
    870	ida_destroy(&dimm_ida);
    871}