cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

configfs.c (42902B)


      1// SPDX-License-Identifier: GPL-2.0
      2/*
      3 * Configfs interface for the NVMe target.
      4 * Copyright (c) 2015-2016 HGST, a Western Digital Company.
      5 */
      6#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
      7#include <linux/kernel.h>
      8#include <linux/module.h>
      9#include <linux/slab.h>
     10#include <linux/stat.h>
     11#include <linux/ctype.h>
     12#include <linux/pci.h>
     13#include <linux/pci-p2pdma.h>
     14
     15#include "nvmet.h"
     16
     17static const struct config_item_type nvmet_host_type;
     18static const struct config_item_type nvmet_subsys_type;
     19
     20static LIST_HEAD(nvmet_ports_list);
     21struct list_head *nvmet_ports = &nvmet_ports_list;
     22
     23struct nvmet_type_name_map {
     24	u8		type;
     25	const char	*name;
     26};
     27
     28static struct nvmet_type_name_map nvmet_transport[] = {
     29	{ NVMF_TRTYPE_RDMA,	"rdma" },
     30	{ NVMF_TRTYPE_FC,	"fc" },
     31	{ NVMF_TRTYPE_TCP,	"tcp" },
     32	{ NVMF_TRTYPE_LOOP,	"loop" },
     33};
     34
     35static const struct nvmet_type_name_map nvmet_addr_family[] = {
     36	{ NVMF_ADDR_FAMILY_PCI,		"pcie" },
     37	{ NVMF_ADDR_FAMILY_IP4,		"ipv4" },
     38	{ NVMF_ADDR_FAMILY_IP6,		"ipv6" },
     39	{ NVMF_ADDR_FAMILY_IB,		"ib" },
     40	{ NVMF_ADDR_FAMILY_FC,		"fc" },
     41	{ NVMF_ADDR_FAMILY_LOOP,	"loop" },
     42};
     43
     44static bool nvmet_is_port_enabled(struct nvmet_port *p, const char *caller)
     45{
     46	if (p->enabled)
     47		pr_err("Disable port '%u' before changing attribute in %s\n",
     48		       le16_to_cpu(p->disc_addr.portid), caller);
     49	return p->enabled;
     50}
     51
     52/*
     53 * nvmet_port Generic ConfigFS definitions.
     54 * Used in any place in the ConfigFS tree that refers to an address.
     55 */
     56static ssize_t nvmet_addr_adrfam_show(struct config_item *item, char *page)
     57{
     58	u8 adrfam = to_nvmet_port(item)->disc_addr.adrfam;
     59	int i;
     60
     61	for (i = 1; i < ARRAY_SIZE(nvmet_addr_family); i++) {
     62		if (nvmet_addr_family[i].type == adrfam)
     63			return snprintf(page, PAGE_SIZE, "%s\n",
     64					nvmet_addr_family[i].name);
     65	}
     66
     67	return snprintf(page, PAGE_SIZE, "\n");
     68}
     69
     70static ssize_t nvmet_addr_adrfam_store(struct config_item *item,
     71		const char *page, size_t count)
     72{
     73	struct nvmet_port *port = to_nvmet_port(item);
     74	int i;
     75
     76	if (nvmet_is_port_enabled(port, __func__))
     77		return -EACCES;
     78
     79	for (i = 1; i < ARRAY_SIZE(nvmet_addr_family); i++) {
     80		if (sysfs_streq(page, nvmet_addr_family[i].name))
     81			goto found;
     82	}
     83
     84	pr_err("Invalid value '%s' for adrfam\n", page);
     85	return -EINVAL;
     86
     87found:
     88	port->disc_addr.adrfam = nvmet_addr_family[i].type;
     89	return count;
     90}
     91
     92CONFIGFS_ATTR(nvmet_, addr_adrfam);
     93
     94static ssize_t nvmet_addr_portid_show(struct config_item *item,
     95		char *page)
     96{
     97	__le16 portid = to_nvmet_port(item)->disc_addr.portid;
     98
     99	return snprintf(page, PAGE_SIZE, "%d\n", le16_to_cpu(portid));
    100}
    101
    102static ssize_t nvmet_addr_portid_store(struct config_item *item,
    103		const char *page, size_t count)
    104{
    105	struct nvmet_port *port = to_nvmet_port(item);
    106	u16 portid = 0;
    107
    108	if (kstrtou16(page, 0, &portid)) {
    109		pr_err("Invalid value '%s' for portid\n", page);
    110		return -EINVAL;
    111	}
    112
    113	if (nvmet_is_port_enabled(port, __func__))
    114		return -EACCES;
    115
    116	port->disc_addr.portid = cpu_to_le16(portid);
    117	return count;
    118}
    119
    120CONFIGFS_ATTR(nvmet_, addr_portid);
    121
    122static ssize_t nvmet_addr_traddr_show(struct config_item *item,
    123		char *page)
    124{
    125	struct nvmet_port *port = to_nvmet_port(item);
    126
    127	return snprintf(page, PAGE_SIZE, "%s\n", port->disc_addr.traddr);
    128}
    129
    130static ssize_t nvmet_addr_traddr_store(struct config_item *item,
    131		const char *page, size_t count)
    132{
    133	struct nvmet_port *port = to_nvmet_port(item);
    134
    135	if (count > NVMF_TRADDR_SIZE) {
    136		pr_err("Invalid value '%s' for traddr\n", page);
    137		return -EINVAL;
    138	}
    139
    140	if (nvmet_is_port_enabled(port, __func__))
    141		return -EACCES;
    142
    143	if (sscanf(page, "%s\n", port->disc_addr.traddr) != 1)
    144		return -EINVAL;
    145	return count;
    146}
    147
    148CONFIGFS_ATTR(nvmet_, addr_traddr);
    149
    150static const struct nvmet_type_name_map nvmet_addr_treq[] = {
    151	{ NVMF_TREQ_NOT_SPECIFIED,	"not specified" },
    152	{ NVMF_TREQ_REQUIRED,		"required" },
    153	{ NVMF_TREQ_NOT_REQUIRED,	"not required" },
    154};
    155
    156static ssize_t nvmet_addr_treq_show(struct config_item *item, char *page)
    157{
    158	u8 treq = to_nvmet_port(item)->disc_addr.treq &
    159		NVME_TREQ_SECURE_CHANNEL_MASK;
    160	int i;
    161
    162	for (i = 0; i < ARRAY_SIZE(nvmet_addr_treq); i++) {
    163		if (treq == nvmet_addr_treq[i].type)
    164			return snprintf(page, PAGE_SIZE, "%s\n",
    165					nvmet_addr_treq[i].name);
    166	}
    167
    168	return snprintf(page, PAGE_SIZE, "\n");
    169}
    170
    171static ssize_t nvmet_addr_treq_store(struct config_item *item,
    172		const char *page, size_t count)
    173{
    174	struct nvmet_port *port = to_nvmet_port(item);
    175	u8 treq = port->disc_addr.treq & ~NVME_TREQ_SECURE_CHANNEL_MASK;
    176	int i;
    177
    178	if (nvmet_is_port_enabled(port, __func__))
    179		return -EACCES;
    180
    181	for (i = 0; i < ARRAY_SIZE(nvmet_addr_treq); i++) {
    182		if (sysfs_streq(page, nvmet_addr_treq[i].name))
    183			goto found;
    184	}
    185
    186	pr_err("Invalid value '%s' for treq\n", page);
    187	return -EINVAL;
    188
    189found:
    190	treq |= nvmet_addr_treq[i].type;
    191	port->disc_addr.treq = treq;
    192	return count;
    193}
    194
    195CONFIGFS_ATTR(nvmet_, addr_treq);
    196
    197static ssize_t nvmet_addr_trsvcid_show(struct config_item *item,
    198		char *page)
    199{
    200	struct nvmet_port *port = to_nvmet_port(item);
    201
    202	return snprintf(page, PAGE_SIZE, "%s\n", port->disc_addr.trsvcid);
    203}
    204
    205static ssize_t nvmet_addr_trsvcid_store(struct config_item *item,
    206		const char *page, size_t count)
    207{
    208	struct nvmet_port *port = to_nvmet_port(item);
    209
    210	if (count > NVMF_TRSVCID_SIZE) {
    211		pr_err("Invalid value '%s' for trsvcid\n", page);
    212		return -EINVAL;
    213	}
    214	if (nvmet_is_port_enabled(port, __func__))
    215		return -EACCES;
    216
    217	if (sscanf(page, "%s\n", port->disc_addr.trsvcid) != 1)
    218		return -EINVAL;
    219	return count;
    220}
    221
    222CONFIGFS_ATTR(nvmet_, addr_trsvcid);
    223
    224static ssize_t nvmet_param_inline_data_size_show(struct config_item *item,
    225		char *page)
    226{
    227	struct nvmet_port *port = to_nvmet_port(item);
    228
    229	return snprintf(page, PAGE_SIZE, "%d\n", port->inline_data_size);
    230}
    231
    232static ssize_t nvmet_param_inline_data_size_store(struct config_item *item,
    233		const char *page, size_t count)
    234{
    235	struct nvmet_port *port = to_nvmet_port(item);
    236	int ret;
    237
    238	if (nvmet_is_port_enabled(port, __func__))
    239		return -EACCES;
    240	ret = kstrtoint(page, 0, &port->inline_data_size);
    241	if (ret) {
    242		pr_err("Invalid value '%s' for inline_data_size\n", page);
    243		return -EINVAL;
    244	}
    245	return count;
    246}
    247
    248CONFIGFS_ATTR(nvmet_, param_inline_data_size);
    249
    250#ifdef CONFIG_BLK_DEV_INTEGRITY
    251static ssize_t nvmet_param_pi_enable_show(struct config_item *item,
    252		char *page)
    253{
    254	struct nvmet_port *port = to_nvmet_port(item);
    255
    256	return snprintf(page, PAGE_SIZE, "%d\n", port->pi_enable);
    257}
    258
    259static ssize_t nvmet_param_pi_enable_store(struct config_item *item,
    260		const char *page, size_t count)
    261{
    262	struct nvmet_port *port = to_nvmet_port(item);
    263	bool val;
    264
    265	if (strtobool(page, &val))
    266		return -EINVAL;
    267
    268	if (nvmet_is_port_enabled(port, __func__))
    269		return -EACCES;
    270
    271	port->pi_enable = val;
    272	return count;
    273}
    274
    275CONFIGFS_ATTR(nvmet_, param_pi_enable);
    276#endif
    277
    278static ssize_t nvmet_addr_trtype_show(struct config_item *item,
    279		char *page)
    280{
    281	struct nvmet_port *port = to_nvmet_port(item);
    282	int i;
    283
    284	for (i = 0; i < ARRAY_SIZE(nvmet_transport); i++) {
    285		if (port->disc_addr.trtype == nvmet_transport[i].type)
    286			return snprintf(page, PAGE_SIZE,
    287					"%s\n", nvmet_transport[i].name);
    288	}
    289
    290	return sprintf(page, "\n");
    291}
    292
    293static void nvmet_port_init_tsas_rdma(struct nvmet_port *port)
    294{
    295	port->disc_addr.tsas.rdma.qptype = NVMF_RDMA_QPTYPE_CONNECTED;
    296	port->disc_addr.tsas.rdma.prtype = NVMF_RDMA_PRTYPE_NOT_SPECIFIED;
    297	port->disc_addr.tsas.rdma.cms = NVMF_RDMA_CMS_RDMA_CM;
    298}
    299
    300static ssize_t nvmet_addr_trtype_store(struct config_item *item,
    301		const char *page, size_t count)
    302{
    303	struct nvmet_port *port = to_nvmet_port(item);
    304	int i;
    305
    306	if (nvmet_is_port_enabled(port, __func__))
    307		return -EACCES;
    308
    309	for (i = 0; i < ARRAY_SIZE(nvmet_transport); i++) {
    310		if (sysfs_streq(page, nvmet_transport[i].name))
    311			goto found;
    312	}
    313
    314	pr_err("Invalid value '%s' for trtype\n", page);
    315	return -EINVAL;
    316
    317found:
    318	memset(&port->disc_addr.tsas, 0, NVMF_TSAS_SIZE);
    319	port->disc_addr.trtype = nvmet_transport[i].type;
    320	if (port->disc_addr.trtype == NVMF_TRTYPE_RDMA)
    321		nvmet_port_init_tsas_rdma(port);
    322	return count;
    323}
    324
    325CONFIGFS_ATTR(nvmet_, addr_trtype);
    326
    327/*
    328 * Namespace structures & file operation functions below
    329 */
    330static ssize_t nvmet_ns_device_path_show(struct config_item *item, char *page)
    331{
    332	return sprintf(page, "%s\n", to_nvmet_ns(item)->device_path);
    333}
    334
    335static ssize_t nvmet_ns_device_path_store(struct config_item *item,
    336		const char *page, size_t count)
    337{
    338	struct nvmet_ns *ns = to_nvmet_ns(item);
    339	struct nvmet_subsys *subsys = ns->subsys;
    340	size_t len;
    341	int ret;
    342
    343	mutex_lock(&subsys->lock);
    344	ret = -EBUSY;
    345	if (ns->enabled)
    346		goto out_unlock;
    347
    348	ret = -EINVAL;
    349	len = strcspn(page, "\n");
    350	if (!len)
    351		goto out_unlock;
    352
    353	kfree(ns->device_path);
    354	ret = -ENOMEM;
    355	ns->device_path = kmemdup_nul(page, len, GFP_KERNEL);
    356	if (!ns->device_path)
    357		goto out_unlock;
    358
    359	mutex_unlock(&subsys->lock);
    360	return count;
    361
    362out_unlock:
    363	mutex_unlock(&subsys->lock);
    364	return ret;
    365}
    366
    367CONFIGFS_ATTR(nvmet_ns_, device_path);
    368
    369#ifdef CONFIG_PCI_P2PDMA
    370static ssize_t nvmet_ns_p2pmem_show(struct config_item *item, char *page)
    371{
    372	struct nvmet_ns *ns = to_nvmet_ns(item);
    373
    374	return pci_p2pdma_enable_show(page, ns->p2p_dev, ns->use_p2pmem);
    375}
    376
    377static ssize_t nvmet_ns_p2pmem_store(struct config_item *item,
    378		const char *page, size_t count)
    379{
    380	struct nvmet_ns *ns = to_nvmet_ns(item);
    381	struct pci_dev *p2p_dev = NULL;
    382	bool use_p2pmem;
    383	int ret = count;
    384	int error;
    385
    386	mutex_lock(&ns->subsys->lock);
    387	if (ns->enabled) {
    388		ret = -EBUSY;
    389		goto out_unlock;
    390	}
    391
    392	error = pci_p2pdma_enable_store(page, &p2p_dev, &use_p2pmem);
    393	if (error) {
    394		ret = error;
    395		goto out_unlock;
    396	}
    397
    398	ns->use_p2pmem = use_p2pmem;
    399	pci_dev_put(ns->p2p_dev);
    400	ns->p2p_dev = p2p_dev;
    401
    402out_unlock:
    403	mutex_unlock(&ns->subsys->lock);
    404
    405	return ret;
    406}
    407
    408CONFIGFS_ATTR(nvmet_ns_, p2pmem);
    409#endif /* CONFIG_PCI_P2PDMA */
    410
    411static ssize_t nvmet_ns_device_uuid_show(struct config_item *item, char *page)
    412{
    413	return sprintf(page, "%pUb\n", &to_nvmet_ns(item)->uuid);
    414}
    415
    416static ssize_t nvmet_ns_device_uuid_store(struct config_item *item,
    417					  const char *page, size_t count)
    418{
    419	struct nvmet_ns *ns = to_nvmet_ns(item);
    420	struct nvmet_subsys *subsys = ns->subsys;
    421	int ret = 0;
    422
    423	mutex_lock(&subsys->lock);
    424	if (ns->enabled) {
    425		ret = -EBUSY;
    426		goto out_unlock;
    427	}
    428
    429	if (uuid_parse(page, &ns->uuid))
    430		ret = -EINVAL;
    431
    432out_unlock:
    433	mutex_unlock(&subsys->lock);
    434	return ret ? ret : count;
    435}
    436
    437CONFIGFS_ATTR(nvmet_ns_, device_uuid);
    438
    439static ssize_t nvmet_ns_device_nguid_show(struct config_item *item, char *page)
    440{
    441	return sprintf(page, "%pUb\n", &to_nvmet_ns(item)->nguid);
    442}
    443
    444static ssize_t nvmet_ns_device_nguid_store(struct config_item *item,
    445		const char *page, size_t count)
    446{
    447	struct nvmet_ns *ns = to_nvmet_ns(item);
    448	struct nvmet_subsys *subsys = ns->subsys;
    449	u8 nguid[16];
    450	const char *p = page;
    451	int i;
    452	int ret = 0;
    453
    454	mutex_lock(&subsys->lock);
    455	if (ns->enabled) {
    456		ret = -EBUSY;
    457		goto out_unlock;
    458	}
    459
    460	for (i = 0; i < 16; i++) {
    461		if (p + 2 > page + count) {
    462			ret = -EINVAL;
    463			goto out_unlock;
    464		}
    465		if (!isxdigit(p[0]) || !isxdigit(p[1])) {
    466			ret = -EINVAL;
    467			goto out_unlock;
    468		}
    469
    470		nguid[i] = (hex_to_bin(p[0]) << 4) | hex_to_bin(p[1]);
    471		p += 2;
    472
    473		if (*p == '-' || *p == ':')
    474			p++;
    475	}
    476
    477	memcpy(&ns->nguid, nguid, sizeof(nguid));
    478out_unlock:
    479	mutex_unlock(&subsys->lock);
    480	return ret ? ret : count;
    481}
    482
    483CONFIGFS_ATTR(nvmet_ns_, device_nguid);
    484
    485static ssize_t nvmet_ns_ana_grpid_show(struct config_item *item, char *page)
    486{
    487	return sprintf(page, "%u\n", to_nvmet_ns(item)->anagrpid);
    488}
    489
    490static ssize_t nvmet_ns_ana_grpid_store(struct config_item *item,
    491		const char *page, size_t count)
    492{
    493	struct nvmet_ns *ns = to_nvmet_ns(item);
    494	u32 oldgrpid, newgrpid;
    495	int ret;
    496
    497	ret = kstrtou32(page, 0, &newgrpid);
    498	if (ret)
    499		return ret;
    500
    501	if (newgrpid < 1 || newgrpid > NVMET_MAX_ANAGRPS)
    502		return -EINVAL;
    503
    504	down_write(&nvmet_ana_sem);
    505	oldgrpid = ns->anagrpid;
    506	nvmet_ana_group_enabled[newgrpid]++;
    507	ns->anagrpid = newgrpid;
    508	nvmet_ana_group_enabled[oldgrpid]--;
    509	nvmet_ana_chgcnt++;
    510	up_write(&nvmet_ana_sem);
    511
    512	nvmet_send_ana_event(ns->subsys, NULL);
    513	return count;
    514}
    515
    516CONFIGFS_ATTR(nvmet_ns_, ana_grpid);
    517
    518static ssize_t nvmet_ns_enable_show(struct config_item *item, char *page)
    519{
    520	return sprintf(page, "%d\n", to_nvmet_ns(item)->enabled);
    521}
    522
    523static ssize_t nvmet_ns_enable_store(struct config_item *item,
    524		const char *page, size_t count)
    525{
    526	struct nvmet_ns *ns = to_nvmet_ns(item);
    527	bool enable;
    528	int ret = 0;
    529
    530	if (strtobool(page, &enable))
    531		return -EINVAL;
    532
    533	if (enable)
    534		ret = nvmet_ns_enable(ns);
    535	else
    536		nvmet_ns_disable(ns);
    537
    538	return ret ? ret : count;
    539}
    540
    541CONFIGFS_ATTR(nvmet_ns_, enable);
    542
    543static ssize_t nvmet_ns_buffered_io_show(struct config_item *item, char *page)
    544{
    545	return sprintf(page, "%d\n", to_nvmet_ns(item)->buffered_io);
    546}
    547
    548static ssize_t nvmet_ns_buffered_io_store(struct config_item *item,
    549		const char *page, size_t count)
    550{
    551	struct nvmet_ns *ns = to_nvmet_ns(item);
    552	bool val;
    553
    554	if (strtobool(page, &val))
    555		return -EINVAL;
    556
    557	mutex_lock(&ns->subsys->lock);
    558	if (ns->enabled) {
    559		pr_err("disable ns before setting buffered_io value.\n");
    560		mutex_unlock(&ns->subsys->lock);
    561		return -EINVAL;
    562	}
    563
    564	ns->buffered_io = val;
    565	mutex_unlock(&ns->subsys->lock);
    566	return count;
    567}
    568
    569CONFIGFS_ATTR(nvmet_ns_, buffered_io);
    570
    571static ssize_t nvmet_ns_revalidate_size_store(struct config_item *item,
    572		const char *page, size_t count)
    573{
    574	struct nvmet_ns *ns = to_nvmet_ns(item);
    575	bool val;
    576
    577	if (strtobool(page, &val))
    578		return -EINVAL;
    579
    580	if (!val)
    581		return -EINVAL;
    582
    583	mutex_lock(&ns->subsys->lock);
    584	if (!ns->enabled) {
    585		pr_err("enable ns before revalidate.\n");
    586		mutex_unlock(&ns->subsys->lock);
    587		return -EINVAL;
    588	}
    589	if (nvmet_ns_revalidate(ns))
    590		nvmet_ns_changed(ns->subsys, ns->nsid);
    591	mutex_unlock(&ns->subsys->lock);
    592	return count;
    593}
    594
    595CONFIGFS_ATTR_WO(nvmet_ns_, revalidate_size);
    596
    597static struct configfs_attribute *nvmet_ns_attrs[] = {
    598	&nvmet_ns_attr_device_path,
    599	&nvmet_ns_attr_device_nguid,
    600	&nvmet_ns_attr_device_uuid,
    601	&nvmet_ns_attr_ana_grpid,
    602	&nvmet_ns_attr_enable,
    603	&nvmet_ns_attr_buffered_io,
    604	&nvmet_ns_attr_revalidate_size,
    605#ifdef CONFIG_PCI_P2PDMA
    606	&nvmet_ns_attr_p2pmem,
    607#endif
    608	NULL,
    609};
    610
    611static void nvmet_ns_release(struct config_item *item)
    612{
    613	struct nvmet_ns *ns = to_nvmet_ns(item);
    614
    615	nvmet_ns_free(ns);
    616}
    617
    618static struct configfs_item_operations nvmet_ns_item_ops = {
    619	.release		= nvmet_ns_release,
    620};
    621
    622static const struct config_item_type nvmet_ns_type = {
    623	.ct_item_ops		= &nvmet_ns_item_ops,
    624	.ct_attrs		= nvmet_ns_attrs,
    625	.ct_owner		= THIS_MODULE,
    626};
    627
    628static struct config_group *nvmet_ns_make(struct config_group *group,
    629		const char *name)
    630{
    631	struct nvmet_subsys *subsys = namespaces_to_subsys(&group->cg_item);
    632	struct nvmet_ns *ns;
    633	int ret;
    634	u32 nsid;
    635
    636	ret = kstrtou32(name, 0, &nsid);
    637	if (ret)
    638		goto out;
    639
    640	ret = -EINVAL;
    641	if (nsid == 0 || nsid == NVME_NSID_ALL) {
    642		pr_err("invalid nsid %#x", nsid);
    643		goto out;
    644	}
    645
    646	ret = -ENOMEM;
    647	ns = nvmet_ns_alloc(subsys, nsid);
    648	if (!ns)
    649		goto out;
    650	config_group_init_type_name(&ns->group, name, &nvmet_ns_type);
    651
    652	pr_info("adding nsid %d to subsystem %s\n", nsid, subsys->subsysnqn);
    653
    654	return &ns->group;
    655out:
    656	return ERR_PTR(ret);
    657}
    658
    659static struct configfs_group_operations nvmet_namespaces_group_ops = {
    660	.make_group		= nvmet_ns_make,
    661};
    662
    663static const struct config_item_type nvmet_namespaces_type = {
    664	.ct_group_ops		= &nvmet_namespaces_group_ops,
    665	.ct_owner		= THIS_MODULE,
    666};
    667
    668#ifdef CONFIG_NVME_TARGET_PASSTHRU
    669
    670static ssize_t nvmet_passthru_device_path_show(struct config_item *item,
    671		char *page)
    672{
    673	struct nvmet_subsys *subsys = to_subsys(item->ci_parent);
    674
    675	return snprintf(page, PAGE_SIZE, "%s\n", subsys->passthru_ctrl_path);
    676}
    677
    678static ssize_t nvmet_passthru_device_path_store(struct config_item *item,
    679		const char *page, size_t count)
    680{
    681	struct nvmet_subsys *subsys = to_subsys(item->ci_parent);
    682	size_t len;
    683	int ret;
    684
    685	mutex_lock(&subsys->lock);
    686
    687	ret = -EBUSY;
    688	if (subsys->passthru_ctrl)
    689		goto out_unlock;
    690
    691	ret = -EINVAL;
    692	len = strcspn(page, "\n");
    693	if (!len)
    694		goto out_unlock;
    695
    696	kfree(subsys->passthru_ctrl_path);
    697	ret = -ENOMEM;
    698	subsys->passthru_ctrl_path = kstrndup(page, len, GFP_KERNEL);
    699	if (!subsys->passthru_ctrl_path)
    700		goto out_unlock;
    701
    702	mutex_unlock(&subsys->lock);
    703
    704	return count;
    705out_unlock:
    706	mutex_unlock(&subsys->lock);
    707	return ret;
    708}
    709CONFIGFS_ATTR(nvmet_passthru_, device_path);
    710
    711static ssize_t nvmet_passthru_enable_show(struct config_item *item,
    712		char *page)
    713{
    714	struct nvmet_subsys *subsys = to_subsys(item->ci_parent);
    715
    716	return sprintf(page, "%d\n", subsys->passthru_ctrl ? 1 : 0);
    717}
    718
    719static ssize_t nvmet_passthru_enable_store(struct config_item *item,
    720		const char *page, size_t count)
    721{
    722	struct nvmet_subsys *subsys = to_subsys(item->ci_parent);
    723	bool enable;
    724	int ret = 0;
    725
    726	if (strtobool(page, &enable))
    727		return -EINVAL;
    728
    729	if (enable)
    730		ret = nvmet_passthru_ctrl_enable(subsys);
    731	else
    732		nvmet_passthru_ctrl_disable(subsys);
    733
    734	return ret ? ret : count;
    735}
    736CONFIGFS_ATTR(nvmet_passthru_, enable);
    737
    738static ssize_t nvmet_passthru_admin_timeout_show(struct config_item *item,
    739		char *page)
    740{
    741	return sprintf(page, "%u\n", to_subsys(item->ci_parent)->admin_timeout);
    742}
    743
    744static ssize_t nvmet_passthru_admin_timeout_store(struct config_item *item,
    745		const char *page, size_t count)
    746{
    747	struct nvmet_subsys *subsys = to_subsys(item->ci_parent);
    748	unsigned int timeout;
    749
    750	if (kstrtouint(page, 0, &timeout))
    751		return -EINVAL;
    752	subsys->admin_timeout = timeout;
    753	return count;
    754}
    755CONFIGFS_ATTR(nvmet_passthru_, admin_timeout);
    756
    757static ssize_t nvmet_passthru_io_timeout_show(struct config_item *item,
    758		char *page)
    759{
    760	return sprintf(page, "%u\n", to_subsys(item->ci_parent)->io_timeout);
    761}
    762
    763static ssize_t nvmet_passthru_io_timeout_store(struct config_item *item,
    764		const char *page, size_t count)
    765{
    766	struct nvmet_subsys *subsys = to_subsys(item->ci_parent);
    767	unsigned int timeout;
    768
    769	if (kstrtouint(page, 0, &timeout))
    770		return -EINVAL;
    771	subsys->io_timeout = timeout;
    772	return count;
    773}
    774CONFIGFS_ATTR(nvmet_passthru_, io_timeout);
    775
    776static ssize_t nvmet_passthru_clear_ids_show(struct config_item *item,
    777		char *page)
    778{
    779	return sprintf(page, "%u\n", to_subsys(item->ci_parent)->clear_ids);
    780}
    781
    782static ssize_t nvmet_passthru_clear_ids_store(struct config_item *item,
    783		const char *page, size_t count)
    784{
    785	struct nvmet_subsys *subsys = to_subsys(item->ci_parent);
    786	unsigned int clear_ids;
    787
    788	if (kstrtouint(page, 0, &clear_ids))
    789		return -EINVAL;
    790	subsys->clear_ids = clear_ids;
    791	return count;
    792}
    793CONFIGFS_ATTR(nvmet_passthru_, clear_ids);
    794
    795static struct configfs_attribute *nvmet_passthru_attrs[] = {
    796	&nvmet_passthru_attr_device_path,
    797	&nvmet_passthru_attr_enable,
    798	&nvmet_passthru_attr_admin_timeout,
    799	&nvmet_passthru_attr_io_timeout,
    800	&nvmet_passthru_attr_clear_ids,
    801	NULL,
    802};
    803
    804static const struct config_item_type nvmet_passthru_type = {
    805	.ct_attrs		= nvmet_passthru_attrs,
    806	.ct_owner		= THIS_MODULE,
    807};
    808
    809static void nvmet_add_passthru_group(struct nvmet_subsys *subsys)
    810{
    811	config_group_init_type_name(&subsys->passthru_group,
    812				    "passthru", &nvmet_passthru_type);
    813	configfs_add_default_group(&subsys->passthru_group,
    814				   &subsys->group);
    815}
    816
    817#else /* CONFIG_NVME_TARGET_PASSTHRU */
    818
    819static void nvmet_add_passthru_group(struct nvmet_subsys *subsys)
    820{
    821}
    822
    823#endif /* CONFIG_NVME_TARGET_PASSTHRU */
    824
    825static int nvmet_port_subsys_allow_link(struct config_item *parent,
    826		struct config_item *target)
    827{
    828	struct nvmet_port *port = to_nvmet_port(parent->ci_parent);
    829	struct nvmet_subsys *subsys;
    830	struct nvmet_subsys_link *link, *p;
    831	int ret;
    832
    833	if (target->ci_type != &nvmet_subsys_type) {
    834		pr_err("can only link subsystems into the subsystems dir.!\n");
    835		return -EINVAL;
    836	}
    837	subsys = to_subsys(target);
    838	link = kmalloc(sizeof(*link), GFP_KERNEL);
    839	if (!link)
    840		return -ENOMEM;
    841	link->subsys = subsys;
    842
    843	down_write(&nvmet_config_sem);
    844	ret = -EEXIST;
    845	list_for_each_entry(p, &port->subsystems, entry) {
    846		if (p->subsys == subsys)
    847			goto out_free_link;
    848	}
    849
    850	if (list_empty(&port->subsystems)) {
    851		ret = nvmet_enable_port(port);
    852		if (ret)
    853			goto out_free_link;
    854	}
    855
    856	list_add_tail(&link->entry, &port->subsystems);
    857	nvmet_port_disc_changed(port, subsys);
    858
    859	up_write(&nvmet_config_sem);
    860	return 0;
    861
    862out_free_link:
    863	up_write(&nvmet_config_sem);
    864	kfree(link);
    865	return ret;
    866}
    867
    868static void nvmet_port_subsys_drop_link(struct config_item *parent,
    869		struct config_item *target)
    870{
    871	struct nvmet_port *port = to_nvmet_port(parent->ci_parent);
    872	struct nvmet_subsys *subsys = to_subsys(target);
    873	struct nvmet_subsys_link *p;
    874
    875	down_write(&nvmet_config_sem);
    876	list_for_each_entry(p, &port->subsystems, entry) {
    877		if (p->subsys == subsys)
    878			goto found;
    879	}
    880	up_write(&nvmet_config_sem);
    881	return;
    882
    883found:
    884	list_del(&p->entry);
    885	nvmet_port_del_ctrls(port, subsys);
    886	nvmet_port_disc_changed(port, subsys);
    887
    888	if (list_empty(&port->subsystems))
    889		nvmet_disable_port(port);
    890	up_write(&nvmet_config_sem);
    891	kfree(p);
    892}
    893
    894static struct configfs_item_operations nvmet_port_subsys_item_ops = {
    895	.allow_link		= nvmet_port_subsys_allow_link,
    896	.drop_link		= nvmet_port_subsys_drop_link,
    897};
    898
    899static const struct config_item_type nvmet_port_subsys_type = {
    900	.ct_item_ops		= &nvmet_port_subsys_item_ops,
    901	.ct_owner		= THIS_MODULE,
    902};
    903
    904static int nvmet_allowed_hosts_allow_link(struct config_item *parent,
    905		struct config_item *target)
    906{
    907	struct nvmet_subsys *subsys = to_subsys(parent->ci_parent);
    908	struct nvmet_host *host;
    909	struct nvmet_host_link *link, *p;
    910	int ret;
    911
    912	if (target->ci_type != &nvmet_host_type) {
    913		pr_err("can only link hosts into the allowed_hosts directory!\n");
    914		return -EINVAL;
    915	}
    916
    917	host = to_host(target);
    918	link = kmalloc(sizeof(*link), GFP_KERNEL);
    919	if (!link)
    920		return -ENOMEM;
    921	link->host = host;
    922
    923	down_write(&nvmet_config_sem);
    924	ret = -EINVAL;
    925	if (subsys->allow_any_host) {
    926		pr_err("can't add hosts when allow_any_host is set!\n");
    927		goto out_free_link;
    928	}
    929
    930	ret = -EEXIST;
    931	list_for_each_entry(p, &subsys->hosts, entry) {
    932		if (!strcmp(nvmet_host_name(p->host), nvmet_host_name(host)))
    933			goto out_free_link;
    934	}
    935	list_add_tail(&link->entry, &subsys->hosts);
    936	nvmet_subsys_disc_changed(subsys, host);
    937
    938	up_write(&nvmet_config_sem);
    939	return 0;
    940out_free_link:
    941	up_write(&nvmet_config_sem);
    942	kfree(link);
    943	return ret;
    944}
    945
    946static void nvmet_allowed_hosts_drop_link(struct config_item *parent,
    947		struct config_item *target)
    948{
    949	struct nvmet_subsys *subsys = to_subsys(parent->ci_parent);
    950	struct nvmet_host *host = to_host(target);
    951	struct nvmet_host_link *p;
    952
    953	down_write(&nvmet_config_sem);
    954	list_for_each_entry(p, &subsys->hosts, entry) {
    955		if (!strcmp(nvmet_host_name(p->host), nvmet_host_name(host)))
    956			goto found;
    957	}
    958	up_write(&nvmet_config_sem);
    959	return;
    960
    961found:
    962	list_del(&p->entry);
    963	nvmet_subsys_disc_changed(subsys, host);
    964
    965	up_write(&nvmet_config_sem);
    966	kfree(p);
    967}
    968
    969static struct configfs_item_operations nvmet_allowed_hosts_item_ops = {
    970	.allow_link		= nvmet_allowed_hosts_allow_link,
    971	.drop_link		= nvmet_allowed_hosts_drop_link,
    972};
    973
    974static const struct config_item_type nvmet_allowed_hosts_type = {
    975	.ct_item_ops		= &nvmet_allowed_hosts_item_ops,
    976	.ct_owner		= THIS_MODULE,
    977};
    978
    979static ssize_t nvmet_subsys_attr_allow_any_host_show(struct config_item *item,
    980		char *page)
    981{
    982	return snprintf(page, PAGE_SIZE, "%d\n",
    983		to_subsys(item)->allow_any_host);
    984}
    985
    986static ssize_t nvmet_subsys_attr_allow_any_host_store(struct config_item *item,
    987		const char *page, size_t count)
    988{
    989	struct nvmet_subsys *subsys = to_subsys(item);
    990	bool allow_any_host;
    991	int ret = 0;
    992
    993	if (strtobool(page, &allow_any_host))
    994		return -EINVAL;
    995
    996	down_write(&nvmet_config_sem);
    997	if (allow_any_host && !list_empty(&subsys->hosts)) {
    998		pr_err("Can't set allow_any_host when explicit hosts are set!\n");
    999		ret = -EINVAL;
   1000		goto out_unlock;
   1001	}
   1002
   1003	if (subsys->allow_any_host != allow_any_host) {
   1004		subsys->allow_any_host = allow_any_host;
   1005		nvmet_subsys_disc_changed(subsys, NULL);
   1006	}
   1007
   1008out_unlock:
   1009	up_write(&nvmet_config_sem);
   1010	return ret ? ret : count;
   1011}
   1012
   1013CONFIGFS_ATTR(nvmet_subsys_, attr_allow_any_host);
   1014
   1015static ssize_t nvmet_subsys_attr_version_show(struct config_item *item,
   1016					      char *page)
   1017{
   1018	struct nvmet_subsys *subsys = to_subsys(item);
   1019
   1020	if (NVME_TERTIARY(subsys->ver))
   1021		return snprintf(page, PAGE_SIZE, "%llu.%llu.%llu\n",
   1022				NVME_MAJOR(subsys->ver),
   1023				NVME_MINOR(subsys->ver),
   1024				NVME_TERTIARY(subsys->ver));
   1025
   1026	return snprintf(page, PAGE_SIZE, "%llu.%llu\n",
   1027			NVME_MAJOR(subsys->ver),
   1028			NVME_MINOR(subsys->ver));
   1029}
   1030
   1031static ssize_t
   1032nvmet_subsys_attr_version_store_locked(struct nvmet_subsys *subsys,
   1033		const char *page, size_t count)
   1034{
   1035	int major, minor, tertiary = 0;
   1036	int ret;
   1037
   1038	if (subsys->subsys_discovered) {
   1039		if (NVME_TERTIARY(subsys->ver))
   1040			pr_err("Can't set version number. %llu.%llu.%llu is already assigned\n",
   1041			       NVME_MAJOR(subsys->ver),
   1042			       NVME_MINOR(subsys->ver),
   1043			       NVME_TERTIARY(subsys->ver));
   1044		else
   1045			pr_err("Can't set version number. %llu.%llu is already assigned\n",
   1046			       NVME_MAJOR(subsys->ver),
   1047			       NVME_MINOR(subsys->ver));
   1048		return -EINVAL;
   1049	}
   1050
   1051	/* passthru subsystems use the underlying controller's version */
   1052	if (nvmet_is_passthru_subsys(subsys))
   1053		return -EINVAL;
   1054
   1055	ret = sscanf(page, "%d.%d.%d\n", &major, &minor, &tertiary);
   1056	if (ret != 2 && ret != 3)
   1057		return -EINVAL;
   1058
   1059	subsys->ver = NVME_VS(major, minor, tertiary);
   1060
   1061	return count;
   1062}
   1063
   1064static ssize_t nvmet_subsys_attr_version_store(struct config_item *item,
   1065					       const char *page, size_t count)
   1066{
   1067	struct nvmet_subsys *subsys = to_subsys(item);
   1068	ssize_t ret;
   1069
   1070	down_write(&nvmet_config_sem);
   1071	mutex_lock(&subsys->lock);
   1072	ret = nvmet_subsys_attr_version_store_locked(subsys, page, count);
   1073	mutex_unlock(&subsys->lock);
   1074	up_write(&nvmet_config_sem);
   1075
   1076	return ret;
   1077}
   1078CONFIGFS_ATTR(nvmet_subsys_, attr_version);
   1079
   1080/* See Section 1.5 of NVMe 1.4 */
   1081static bool nvmet_is_ascii(const char c)
   1082{
   1083	return c >= 0x20 && c <= 0x7e;
   1084}
   1085
   1086static ssize_t nvmet_subsys_attr_serial_show(struct config_item *item,
   1087					     char *page)
   1088{
   1089	struct nvmet_subsys *subsys = to_subsys(item);
   1090
   1091	return snprintf(page, PAGE_SIZE, "%.*s\n",
   1092			NVMET_SN_MAX_SIZE, subsys->serial);
   1093}
   1094
   1095static ssize_t
   1096nvmet_subsys_attr_serial_store_locked(struct nvmet_subsys *subsys,
   1097		const char *page, size_t count)
   1098{
   1099	int pos, len = strcspn(page, "\n");
   1100
   1101	if (subsys->subsys_discovered) {
   1102		pr_err("Can't set serial number. %s is already assigned\n",
   1103		       subsys->serial);
   1104		return -EINVAL;
   1105	}
   1106
   1107	if (!len || len > NVMET_SN_MAX_SIZE) {
   1108		pr_err("Serial Number can not be empty or exceed %d Bytes\n",
   1109		       NVMET_SN_MAX_SIZE);
   1110		return -EINVAL;
   1111	}
   1112
   1113	for (pos = 0; pos < len; pos++) {
   1114		if (!nvmet_is_ascii(page[pos])) {
   1115			pr_err("Serial Number must contain only ASCII strings\n");
   1116			return -EINVAL;
   1117		}
   1118	}
   1119
   1120	memcpy_and_pad(subsys->serial, NVMET_SN_MAX_SIZE, page, len, ' ');
   1121
   1122	return count;
   1123}
   1124
   1125static ssize_t nvmet_subsys_attr_serial_store(struct config_item *item,
   1126					      const char *page, size_t count)
   1127{
   1128	struct nvmet_subsys *subsys = to_subsys(item);
   1129	ssize_t ret;
   1130
   1131	down_write(&nvmet_config_sem);
   1132	mutex_lock(&subsys->lock);
   1133	ret = nvmet_subsys_attr_serial_store_locked(subsys, page, count);
   1134	mutex_unlock(&subsys->lock);
   1135	up_write(&nvmet_config_sem);
   1136
   1137	return ret;
   1138}
   1139CONFIGFS_ATTR(nvmet_subsys_, attr_serial);
   1140
   1141static ssize_t nvmet_subsys_attr_cntlid_min_show(struct config_item *item,
   1142						 char *page)
   1143{
   1144	return snprintf(page, PAGE_SIZE, "%u\n", to_subsys(item)->cntlid_min);
   1145}
   1146
   1147static ssize_t nvmet_subsys_attr_cntlid_min_store(struct config_item *item,
   1148						  const char *page, size_t cnt)
   1149{
   1150	u16 cntlid_min;
   1151
   1152	if (sscanf(page, "%hu\n", &cntlid_min) != 1)
   1153		return -EINVAL;
   1154
   1155	if (cntlid_min == 0)
   1156		return -EINVAL;
   1157
   1158	down_write(&nvmet_config_sem);
   1159	if (cntlid_min >= to_subsys(item)->cntlid_max)
   1160		goto out_unlock;
   1161	to_subsys(item)->cntlid_min = cntlid_min;
   1162	up_write(&nvmet_config_sem);
   1163	return cnt;
   1164
   1165out_unlock:
   1166	up_write(&nvmet_config_sem);
   1167	return -EINVAL;
   1168}
   1169CONFIGFS_ATTR(nvmet_subsys_, attr_cntlid_min);
   1170
   1171static ssize_t nvmet_subsys_attr_cntlid_max_show(struct config_item *item,
   1172						 char *page)
   1173{
   1174	return snprintf(page, PAGE_SIZE, "%u\n", to_subsys(item)->cntlid_max);
   1175}
   1176
   1177static ssize_t nvmet_subsys_attr_cntlid_max_store(struct config_item *item,
   1178						  const char *page, size_t cnt)
   1179{
   1180	u16 cntlid_max;
   1181
   1182	if (sscanf(page, "%hu\n", &cntlid_max) != 1)
   1183		return -EINVAL;
   1184
   1185	if (cntlid_max == 0)
   1186		return -EINVAL;
   1187
   1188	down_write(&nvmet_config_sem);
   1189	if (cntlid_max <= to_subsys(item)->cntlid_min)
   1190		goto out_unlock;
   1191	to_subsys(item)->cntlid_max = cntlid_max;
   1192	up_write(&nvmet_config_sem);
   1193	return cnt;
   1194
   1195out_unlock:
   1196	up_write(&nvmet_config_sem);
   1197	return -EINVAL;
   1198}
   1199CONFIGFS_ATTR(nvmet_subsys_, attr_cntlid_max);
   1200
   1201static ssize_t nvmet_subsys_attr_model_show(struct config_item *item,
   1202					    char *page)
   1203{
   1204	struct nvmet_subsys *subsys = to_subsys(item);
   1205
   1206	return snprintf(page, PAGE_SIZE, "%s\n", subsys->model_number);
   1207}
   1208
   1209static ssize_t nvmet_subsys_attr_model_store_locked(struct nvmet_subsys *subsys,
   1210		const char *page, size_t count)
   1211{
   1212	int pos = 0, len;
   1213
   1214	if (subsys->subsys_discovered) {
   1215		pr_err("Can't set model number. %s is already assigned\n",
   1216		       subsys->model_number);
   1217		return -EINVAL;
   1218	}
   1219
   1220	len = strcspn(page, "\n");
   1221	if (!len)
   1222		return -EINVAL;
   1223
   1224	if (len > NVMET_MN_MAX_SIZE) {
   1225		pr_err("Model number size can not exceed %d Bytes\n",
   1226		       NVMET_MN_MAX_SIZE);
   1227		return -EINVAL;
   1228	}
   1229
   1230	for (pos = 0; pos < len; pos++) {
   1231		if (!nvmet_is_ascii(page[pos]))
   1232			return -EINVAL;
   1233	}
   1234
   1235	subsys->model_number = kmemdup_nul(page, len, GFP_KERNEL);
   1236	if (!subsys->model_number)
   1237		return -ENOMEM;
   1238	return count;
   1239}
   1240
   1241static ssize_t nvmet_subsys_attr_model_store(struct config_item *item,
   1242					     const char *page, size_t count)
   1243{
   1244	struct nvmet_subsys *subsys = to_subsys(item);
   1245	ssize_t ret;
   1246
   1247	down_write(&nvmet_config_sem);
   1248	mutex_lock(&subsys->lock);
   1249	ret = nvmet_subsys_attr_model_store_locked(subsys, page, count);
   1250	mutex_unlock(&subsys->lock);
   1251	up_write(&nvmet_config_sem);
   1252
   1253	return ret;
   1254}
   1255CONFIGFS_ATTR(nvmet_subsys_, attr_model);
   1256
   1257#ifdef CONFIG_BLK_DEV_INTEGRITY
   1258static ssize_t nvmet_subsys_attr_pi_enable_show(struct config_item *item,
   1259						char *page)
   1260{
   1261	return snprintf(page, PAGE_SIZE, "%d\n", to_subsys(item)->pi_support);
   1262}
   1263
   1264static ssize_t nvmet_subsys_attr_pi_enable_store(struct config_item *item,
   1265						 const char *page, size_t count)
   1266{
   1267	struct nvmet_subsys *subsys = to_subsys(item);
   1268	bool pi_enable;
   1269
   1270	if (strtobool(page, &pi_enable))
   1271		return -EINVAL;
   1272
   1273	subsys->pi_support = pi_enable;
   1274	return count;
   1275}
   1276CONFIGFS_ATTR(nvmet_subsys_, attr_pi_enable);
   1277#endif
   1278
   1279static struct configfs_attribute *nvmet_subsys_attrs[] = {
   1280	&nvmet_subsys_attr_attr_allow_any_host,
   1281	&nvmet_subsys_attr_attr_version,
   1282	&nvmet_subsys_attr_attr_serial,
   1283	&nvmet_subsys_attr_attr_cntlid_min,
   1284	&nvmet_subsys_attr_attr_cntlid_max,
   1285	&nvmet_subsys_attr_attr_model,
   1286#ifdef CONFIG_BLK_DEV_INTEGRITY
   1287	&nvmet_subsys_attr_attr_pi_enable,
   1288#endif
   1289	NULL,
   1290};
   1291
   1292/*
   1293 * Subsystem structures & folder operation functions below
   1294 */
   1295static void nvmet_subsys_release(struct config_item *item)
   1296{
   1297	struct nvmet_subsys *subsys = to_subsys(item);
   1298
   1299	nvmet_subsys_del_ctrls(subsys);
   1300	nvmet_subsys_put(subsys);
   1301}
   1302
   1303static struct configfs_item_operations nvmet_subsys_item_ops = {
   1304	.release		= nvmet_subsys_release,
   1305};
   1306
   1307static const struct config_item_type nvmet_subsys_type = {
   1308	.ct_item_ops		= &nvmet_subsys_item_ops,
   1309	.ct_attrs		= nvmet_subsys_attrs,
   1310	.ct_owner		= THIS_MODULE,
   1311};
   1312
   1313static struct config_group *nvmet_subsys_make(struct config_group *group,
   1314		const char *name)
   1315{
   1316	struct nvmet_subsys *subsys;
   1317
   1318	if (sysfs_streq(name, NVME_DISC_SUBSYS_NAME)) {
   1319		pr_err("can't create discovery subsystem through configfs\n");
   1320		return ERR_PTR(-EINVAL);
   1321	}
   1322
   1323	subsys = nvmet_subsys_alloc(name, NVME_NQN_NVME);
   1324	if (IS_ERR(subsys))
   1325		return ERR_CAST(subsys);
   1326
   1327	config_group_init_type_name(&subsys->group, name, &nvmet_subsys_type);
   1328
   1329	config_group_init_type_name(&subsys->namespaces_group,
   1330			"namespaces", &nvmet_namespaces_type);
   1331	configfs_add_default_group(&subsys->namespaces_group, &subsys->group);
   1332
   1333	config_group_init_type_name(&subsys->allowed_hosts_group,
   1334			"allowed_hosts", &nvmet_allowed_hosts_type);
   1335	configfs_add_default_group(&subsys->allowed_hosts_group,
   1336			&subsys->group);
   1337
   1338	nvmet_add_passthru_group(subsys);
   1339
   1340	return &subsys->group;
   1341}
   1342
   1343static struct configfs_group_operations nvmet_subsystems_group_ops = {
   1344	.make_group		= nvmet_subsys_make,
   1345};
   1346
   1347static const struct config_item_type nvmet_subsystems_type = {
   1348	.ct_group_ops		= &nvmet_subsystems_group_ops,
   1349	.ct_owner		= THIS_MODULE,
   1350};
   1351
   1352static ssize_t nvmet_referral_enable_show(struct config_item *item,
   1353		char *page)
   1354{
   1355	return snprintf(page, PAGE_SIZE, "%d\n", to_nvmet_port(item)->enabled);
   1356}
   1357
   1358static ssize_t nvmet_referral_enable_store(struct config_item *item,
   1359		const char *page, size_t count)
   1360{
   1361	struct nvmet_port *parent = to_nvmet_port(item->ci_parent->ci_parent);
   1362	struct nvmet_port *port = to_nvmet_port(item);
   1363	bool enable;
   1364
   1365	if (strtobool(page, &enable))
   1366		goto inval;
   1367
   1368	if (enable)
   1369		nvmet_referral_enable(parent, port);
   1370	else
   1371		nvmet_referral_disable(parent, port);
   1372
   1373	return count;
   1374inval:
   1375	pr_err("Invalid value '%s' for enable\n", page);
   1376	return -EINVAL;
   1377}
   1378
   1379CONFIGFS_ATTR(nvmet_referral_, enable);
   1380
   1381/*
   1382 * Discovery Service subsystem definitions
   1383 */
   1384static struct configfs_attribute *nvmet_referral_attrs[] = {
   1385	&nvmet_attr_addr_adrfam,
   1386	&nvmet_attr_addr_portid,
   1387	&nvmet_attr_addr_treq,
   1388	&nvmet_attr_addr_traddr,
   1389	&nvmet_attr_addr_trsvcid,
   1390	&nvmet_attr_addr_trtype,
   1391	&nvmet_referral_attr_enable,
   1392	NULL,
   1393};
   1394
   1395static void nvmet_referral_notify(struct config_group *group,
   1396		struct config_item *item)
   1397{
   1398	struct nvmet_port *parent = to_nvmet_port(item->ci_parent->ci_parent);
   1399	struct nvmet_port *port = to_nvmet_port(item);
   1400
   1401	nvmet_referral_disable(parent, port);
   1402}
   1403
   1404static void nvmet_referral_release(struct config_item *item)
   1405{
   1406	struct nvmet_port *port = to_nvmet_port(item);
   1407
   1408	kfree(port);
   1409}
   1410
   1411static struct configfs_item_operations nvmet_referral_item_ops = {
   1412	.release	= nvmet_referral_release,
   1413};
   1414
   1415static const struct config_item_type nvmet_referral_type = {
   1416	.ct_owner	= THIS_MODULE,
   1417	.ct_attrs	= nvmet_referral_attrs,
   1418	.ct_item_ops	= &nvmet_referral_item_ops,
   1419};
   1420
   1421static struct config_group *nvmet_referral_make(
   1422		struct config_group *group, const char *name)
   1423{
   1424	struct nvmet_port *port;
   1425
   1426	port = kzalloc(sizeof(*port), GFP_KERNEL);
   1427	if (!port)
   1428		return ERR_PTR(-ENOMEM);
   1429
   1430	INIT_LIST_HEAD(&port->entry);
   1431	config_group_init_type_name(&port->group, name, &nvmet_referral_type);
   1432
   1433	return &port->group;
   1434}
   1435
   1436static struct configfs_group_operations nvmet_referral_group_ops = {
   1437	.make_group		= nvmet_referral_make,
   1438	.disconnect_notify	= nvmet_referral_notify,
   1439};
   1440
   1441static const struct config_item_type nvmet_referrals_type = {
   1442	.ct_owner	= THIS_MODULE,
   1443	.ct_group_ops	= &nvmet_referral_group_ops,
   1444};
   1445
   1446static struct nvmet_type_name_map nvmet_ana_state[] = {
   1447	{ NVME_ANA_OPTIMIZED,		"optimized" },
   1448	{ NVME_ANA_NONOPTIMIZED,	"non-optimized" },
   1449	{ NVME_ANA_INACCESSIBLE,	"inaccessible" },
   1450	{ NVME_ANA_PERSISTENT_LOSS,	"persistent-loss" },
   1451	{ NVME_ANA_CHANGE,		"change" },
   1452};
   1453
   1454static ssize_t nvmet_ana_group_ana_state_show(struct config_item *item,
   1455		char *page)
   1456{
   1457	struct nvmet_ana_group *grp = to_ana_group(item);
   1458	enum nvme_ana_state state = grp->port->ana_state[grp->grpid];
   1459	int i;
   1460
   1461	for (i = 0; i < ARRAY_SIZE(nvmet_ana_state); i++) {
   1462		if (state == nvmet_ana_state[i].type)
   1463			return sprintf(page, "%s\n", nvmet_ana_state[i].name);
   1464	}
   1465
   1466	return sprintf(page, "\n");
   1467}
   1468
   1469static ssize_t nvmet_ana_group_ana_state_store(struct config_item *item,
   1470		const char *page, size_t count)
   1471{
   1472	struct nvmet_ana_group *grp = to_ana_group(item);
   1473	enum nvme_ana_state *ana_state = grp->port->ana_state;
   1474	int i;
   1475
   1476	for (i = 0; i < ARRAY_SIZE(nvmet_ana_state); i++) {
   1477		if (sysfs_streq(page, nvmet_ana_state[i].name))
   1478			goto found;
   1479	}
   1480
   1481	pr_err("Invalid value '%s' for ana_state\n", page);
   1482	return -EINVAL;
   1483
   1484found:
   1485	down_write(&nvmet_ana_sem);
   1486	ana_state[grp->grpid] = (enum nvme_ana_state) nvmet_ana_state[i].type;
   1487	nvmet_ana_chgcnt++;
   1488	up_write(&nvmet_ana_sem);
   1489	nvmet_port_send_ana_event(grp->port);
   1490	return count;
   1491}
   1492
   1493CONFIGFS_ATTR(nvmet_ana_group_, ana_state);
   1494
   1495static struct configfs_attribute *nvmet_ana_group_attrs[] = {
   1496	&nvmet_ana_group_attr_ana_state,
   1497	NULL,
   1498};
   1499
   1500static void nvmet_ana_group_release(struct config_item *item)
   1501{
   1502	struct nvmet_ana_group *grp = to_ana_group(item);
   1503
   1504	if (grp == &grp->port->ana_default_group)
   1505		return;
   1506
   1507	down_write(&nvmet_ana_sem);
   1508	grp->port->ana_state[grp->grpid] = NVME_ANA_INACCESSIBLE;
   1509	nvmet_ana_group_enabled[grp->grpid]--;
   1510	up_write(&nvmet_ana_sem);
   1511
   1512	nvmet_port_send_ana_event(grp->port);
   1513	kfree(grp);
   1514}
   1515
   1516static struct configfs_item_operations nvmet_ana_group_item_ops = {
   1517	.release		= nvmet_ana_group_release,
   1518};
   1519
   1520static const struct config_item_type nvmet_ana_group_type = {
   1521	.ct_item_ops		= &nvmet_ana_group_item_ops,
   1522	.ct_attrs		= nvmet_ana_group_attrs,
   1523	.ct_owner		= THIS_MODULE,
   1524};
   1525
   1526static struct config_group *nvmet_ana_groups_make_group(
   1527		struct config_group *group, const char *name)
   1528{
   1529	struct nvmet_port *port = ana_groups_to_port(&group->cg_item);
   1530	struct nvmet_ana_group *grp;
   1531	u32 grpid;
   1532	int ret;
   1533
   1534	ret = kstrtou32(name, 0, &grpid);
   1535	if (ret)
   1536		goto out;
   1537
   1538	ret = -EINVAL;
   1539	if (grpid <= 1 || grpid > NVMET_MAX_ANAGRPS)
   1540		goto out;
   1541
   1542	ret = -ENOMEM;
   1543	grp = kzalloc(sizeof(*grp), GFP_KERNEL);
   1544	if (!grp)
   1545		goto out;
   1546	grp->port = port;
   1547	grp->grpid = grpid;
   1548
   1549	down_write(&nvmet_ana_sem);
   1550	nvmet_ana_group_enabled[grpid]++;
   1551	up_write(&nvmet_ana_sem);
   1552
   1553	nvmet_port_send_ana_event(grp->port);
   1554
   1555	config_group_init_type_name(&grp->group, name, &nvmet_ana_group_type);
   1556	return &grp->group;
   1557out:
   1558	return ERR_PTR(ret);
   1559}
   1560
   1561static struct configfs_group_operations nvmet_ana_groups_group_ops = {
   1562	.make_group		= nvmet_ana_groups_make_group,
   1563};
   1564
   1565static const struct config_item_type nvmet_ana_groups_type = {
   1566	.ct_group_ops		= &nvmet_ana_groups_group_ops,
   1567	.ct_owner		= THIS_MODULE,
   1568};
   1569
   1570/*
   1571 * Ports definitions.
   1572 */
   1573static void nvmet_port_release(struct config_item *item)
   1574{
   1575	struct nvmet_port *port = to_nvmet_port(item);
   1576
   1577	/* Let inflight controllers teardown complete */
   1578	flush_workqueue(nvmet_wq);
   1579	list_del(&port->global_entry);
   1580
   1581	kfree(port->ana_state);
   1582	kfree(port);
   1583}
   1584
   1585static struct configfs_attribute *nvmet_port_attrs[] = {
   1586	&nvmet_attr_addr_adrfam,
   1587	&nvmet_attr_addr_treq,
   1588	&nvmet_attr_addr_traddr,
   1589	&nvmet_attr_addr_trsvcid,
   1590	&nvmet_attr_addr_trtype,
   1591	&nvmet_attr_param_inline_data_size,
   1592#ifdef CONFIG_BLK_DEV_INTEGRITY
   1593	&nvmet_attr_param_pi_enable,
   1594#endif
   1595	NULL,
   1596};
   1597
   1598static struct configfs_item_operations nvmet_port_item_ops = {
   1599	.release		= nvmet_port_release,
   1600};
   1601
   1602static const struct config_item_type nvmet_port_type = {
   1603	.ct_attrs		= nvmet_port_attrs,
   1604	.ct_item_ops		= &nvmet_port_item_ops,
   1605	.ct_owner		= THIS_MODULE,
   1606};
   1607
   1608static struct config_group *nvmet_ports_make(struct config_group *group,
   1609		const char *name)
   1610{
   1611	struct nvmet_port *port;
   1612	u16 portid;
   1613	u32 i;
   1614
   1615	if (kstrtou16(name, 0, &portid))
   1616		return ERR_PTR(-EINVAL);
   1617
   1618	port = kzalloc(sizeof(*port), GFP_KERNEL);
   1619	if (!port)
   1620		return ERR_PTR(-ENOMEM);
   1621
   1622	port->ana_state = kcalloc(NVMET_MAX_ANAGRPS + 1,
   1623			sizeof(*port->ana_state), GFP_KERNEL);
   1624	if (!port->ana_state) {
   1625		kfree(port);
   1626		return ERR_PTR(-ENOMEM);
   1627	}
   1628
   1629	for (i = 1; i <= NVMET_MAX_ANAGRPS; i++) {
   1630		if (i == NVMET_DEFAULT_ANA_GRPID)
   1631			port->ana_state[1] = NVME_ANA_OPTIMIZED;
   1632		else
   1633			port->ana_state[i] = NVME_ANA_INACCESSIBLE;
   1634	}
   1635
   1636	list_add(&port->global_entry, &nvmet_ports_list);
   1637
   1638	INIT_LIST_HEAD(&port->entry);
   1639	INIT_LIST_HEAD(&port->subsystems);
   1640	INIT_LIST_HEAD(&port->referrals);
   1641	port->inline_data_size = -1;	/* < 0 == let the transport choose */
   1642
   1643	port->disc_addr.portid = cpu_to_le16(portid);
   1644	port->disc_addr.adrfam = NVMF_ADDR_FAMILY_MAX;
   1645	port->disc_addr.treq = NVMF_TREQ_DISABLE_SQFLOW;
   1646	config_group_init_type_name(&port->group, name, &nvmet_port_type);
   1647
   1648	config_group_init_type_name(&port->subsys_group,
   1649			"subsystems", &nvmet_port_subsys_type);
   1650	configfs_add_default_group(&port->subsys_group, &port->group);
   1651
   1652	config_group_init_type_name(&port->referrals_group,
   1653			"referrals", &nvmet_referrals_type);
   1654	configfs_add_default_group(&port->referrals_group, &port->group);
   1655
   1656	config_group_init_type_name(&port->ana_groups_group,
   1657			"ana_groups", &nvmet_ana_groups_type);
   1658	configfs_add_default_group(&port->ana_groups_group, &port->group);
   1659
   1660	port->ana_default_group.port = port;
   1661	port->ana_default_group.grpid = NVMET_DEFAULT_ANA_GRPID;
   1662	config_group_init_type_name(&port->ana_default_group.group,
   1663			__stringify(NVMET_DEFAULT_ANA_GRPID),
   1664			&nvmet_ana_group_type);
   1665	configfs_add_default_group(&port->ana_default_group.group,
   1666			&port->ana_groups_group);
   1667
   1668	return &port->group;
   1669}
   1670
   1671static struct configfs_group_operations nvmet_ports_group_ops = {
   1672	.make_group		= nvmet_ports_make,
   1673};
   1674
   1675static const struct config_item_type nvmet_ports_type = {
   1676	.ct_group_ops		= &nvmet_ports_group_ops,
   1677	.ct_owner		= THIS_MODULE,
   1678};
   1679
   1680static struct config_group nvmet_subsystems_group;
   1681static struct config_group nvmet_ports_group;
   1682
   1683static void nvmet_host_release(struct config_item *item)
   1684{
   1685	struct nvmet_host *host = to_host(item);
   1686
   1687	kfree(host);
   1688}
   1689
   1690static struct configfs_item_operations nvmet_host_item_ops = {
   1691	.release		= nvmet_host_release,
   1692};
   1693
   1694static const struct config_item_type nvmet_host_type = {
   1695	.ct_item_ops		= &nvmet_host_item_ops,
   1696	.ct_owner		= THIS_MODULE,
   1697};
   1698
   1699static struct config_group *nvmet_hosts_make_group(struct config_group *group,
   1700		const char *name)
   1701{
   1702	struct nvmet_host *host;
   1703
   1704	host = kzalloc(sizeof(*host), GFP_KERNEL);
   1705	if (!host)
   1706		return ERR_PTR(-ENOMEM);
   1707
   1708	config_group_init_type_name(&host->group, name, &nvmet_host_type);
   1709
   1710	return &host->group;
   1711}
   1712
   1713static struct configfs_group_operations nvmet_hosts_group_ops = {
   1714	.make_group		= nvmet_hosts_make_group,
   1715};
   1716
   1717static const struct config_item_type nvmet_hosts_type = {
   1718	.ct_group_ops		= &nvmet_hosts_group_ops,
   1719	.ct_owner		= THIS_MODULE,
   1720};
   1721
   1722static struct config_group nvmet_hosts_group;
   1723
   1724static const struct config_item_type nvmet_root_type = {
   1725	.ct_owner		= THIS_MODULE,
   1726};
   1727
   1728static struct configfs_subsystem nvmet_configfs_subsystem = {
   1729	.su_group = {
   1730		.cg_item = {
   1731			.ci_namebuf	= "nvmet",
   1732			.ci_type	= &nvmet_root_type,
   1733		},
   1734	},
   1735};
   1736
   1737int __init nvmet_init_configfs(void)
   1738{
   1739	int ret;
   1740
   1741	config_group_init(&nvmet_configfs_subsystem.su_group);
   1742	mutex_init(&nvmet_configfs_subsystem.su_mutex);
   1743
   1744	config_group_init_type_name(&nvmet_subsystems_group,
   1745			"subsystems", &nvmet_subsystems_type);
   1746	configfs_add_default_group(&nvmet_subsystems_group,
   1747			&nvmet_configfs_subsystem.su_group);
   1748
   1749	config_group_init_type_name(&nvmet_ports_group,
   1750			"ports", &nvmet_ports_type);
   1751	configfs_add_default_group(&nvmet_ports_group,
   1752			&nvmet_configfs_subsystem.su_group);
   1753
   1754	config_group_init_type_name(&nvmet_hosts_group,
   1755			"hosts", &nvmet_hosts_type);
   1756	configfs_add_default_group(&nvmet_hosts_group,
   1757			&nvmet_configfs_subsystem.su_group);
   1758
   1759	ret = configfs_register_subsystem(&nvmet_configfs_subsystem);
   1760	if (ret) {
   1761		pr_err("configfs_register_subsystem: %d\n", ret);
   1762		return ret;
   1763	}
   1764
   1765	return 0;
   1766}
   1767
   1768void __exit nvmet_exit_configfs(void)
   1769{
   1770	configfs_unregister_subsystem(&nvmet_configfs_subsystem);
   1771}