cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

target_core_configfs.c (100211B)


      1// SPDX-License-Identifier: GPL-2.0-or-later
      2/*******************************************************************************
      3 * Filename:  target_core_configfs.c
      4 *
      5 * This file contains ConfigFS logic for the Generic Target Engine project.
      6 *
      7 * (c) Copyright 2008-2013 Datera, Inc.
      8 *
      9 * Nicholas A. Bellinger <nab@kernel.org>
     10 *
     11 * based on configfs Copyright (C) 2005 Oracle.  All rights reserved.
     12 *
     13 ****************************************************************************/
     14
     15#include <linux/module.h>
     16#include <linux/moduleparam.h>
     17#include <generated/utsrelease.h>
     18#include <linux/utsname.h>
     19#include <linux/init.h>
     20#include <linux/fs.h>
     21#include <linux/namei.h>
     22#include <linux/slab.h>
     23#include <linux/types.h>
     24#include <linux/delay.h>
     25#include <linux/unistd.h>
     26#include <linux/string.h>
     27#include <linux/parser.h>
     28#include <linux/syscalls.h>
     29#include <linux/configfs.h>
     30#include <linux/spinlock.h>
     31
     32#include <target/target_core_base.h>
     33#include <target/target_core_backend.h>
     34#include <target/target_core_fabric.h>
     35
     36#include "target_core_internal.h"
     37#include "target_core_alua.h"
     38#include "target_core_pr.h"
     39#include "target_core_rd.h"
     40#include "target_core_xcopy.h"
     41
     42#define TB_CIT_SETUP(_name, _item_ops, _group_ops, _attrs)		\
     43static void target_core_setup_##_name##_cit(struct target_backend *tb)	\
     44{									\
     45	struct config_item_type *cit = &tb->tb_##_name##_cit;		\
     46									\
     47	cit->ct_item_ops = _item_ops;					\
     48	cit->ct_group_ops = _group_ops;					\
     49	cit->ct_attrs = _attrs;						\
     50	cit->ct_owner = tb->ops->owner;					\
     51	pr_debug("Setup generic %s\n", __stringify(_name));		\
     52}
     53
     54#define TB_CIT_SETUP_DRV(_name, _item_ops, _group_ops)			\
     55static void target_core_setup_##_name##_cit(struct target_backend *tb)	\
     56{									\
     57	struct config_item_type *cit = &tb->tb_##_name##_cit;		\
     58									\
     59	cit->ct_item_ops = _item_ops;					\
     60	cit->ct_group_ops = _group_ops;					\
     61	cit->ct_attrs = tb->ops->tb_##_name##_attrs;			\
     62	cit->ct_owner = tb->ops->owner;					\
     63	pr_debug("Setup generic %s\n", __stringify(_name));		\
     64}
     65
     66extern struct t10_alua_lu_gp *default_lu_gp;
     67
     68static LIST_HEAD(g_tf_list);
     69static DEFINE_MUTEX(g_tf_lock);
     70
     71static struct config_group target_core_hbagroup;
     72static struct config_group alua_group;
     73static struct config_group alua_lu_gps_group;
     74
     75static unsigned int target_devices;
     76static DEFINE_MUTEX(target_devices_lock);
     77
     78static inline struct se_hba *
     79item_to_hba(struct config_item *item)
     80{
     81	return container_of(to_config_group(item), struct se_hba, hba_group);
     82}
     83
     84/*
     85 * Attributes for /sys/kernel/config/target/
     86 */
     87static ssize_t target_core_item_version_show(struct config_item *item,
     88		char *page)
     89{
     90	return sprintf(page, "Target Engine Core ConfigFS Infrastructure %s"
     91		" on %s/%s on "UTS_RELEASE"\n", TARGET_CORE_VERSION,
     92		utsname()->sysname, utsname()->machine);
     93}
     94
     95CONFIGFS_ATTR_RO(target_core_item_, version);
     96
     97char db_root[DB_ROOT_LEN] = DB_ROOT_DEFAULT;
     98static char db_root_stage[DB_ROOT_LEN];
     99
    100static ssize_t target_core_item_dbroot_show(struct config_item *item,
    101					    char *page)
    102{
    103	return sprintf(page, "%s\n", db_root);
    104}
    105
    106static ssize_t target_core_item_dbroot_store(struct config_item *item,
    107					const char *page, size_t count)
    108{
    109	ssize_t read_bytes;
    110	struct file *fp;
    111	ssize_t r = -EINVAL;
    112
    113	mutex_lock(&target_devices_lock);
    114	if (target_devices) {
    115		pr_err("db_root: cannot be changed because it's in use\n");
    116		goto unlock;
    117	}
    118
    119	if (count > (DB_ROOT_LEN - 1)) {
    120		pr_err("db_root: count %d exceeds DB_ROOT_LEN-1: %u\n",
    121		       (int)count, DB_ROOT_LEN - 1);
    122		goto unlock;
    123	}
    124
    125	read_bytes = snprintf(db_root_stage, DB_ROOT_LEN, "%s", page);
    126	if (!read_bytes)
    127		goto unlock;
    128
    129	if (db_root_stage[read_bytes - 1] == '\n')
    130		db_root_stage[read_bytes - 1] = '\0';
    131
    132	/* validate new db root before accepting it */
    133	fp = filp_open(db_root_stage, O_RDONLY, 0);
    134	if (IS_ERR(fp)) {
    135		pr_err("db_root: cannot open: %s\n", db_root_stage);
    136		goto unlock;
    137	}
    138	if (!S_ISDIR(file_inode(fp)->i_mode)) {
    139		filp_close(fp, NULL);
    140		pr_err("db_root: not a directory: %s\n", db_root_stage);
    141		goto unlock;
    142	}
    143	filp_close(fp, NULL);
    144
    145	strncpy(db_root, db_root_stage, read_bytes);
    146	pr_debug("Target_Core_ConfigFS: db_root set to %s\n", db_root);
    147
    148	r = read_bytes;
    149
    150unlock:
    151	mutex_unlock(&target_devices_lock);
    152	return r;
    153}
    154
    155CONFIGFS_ATTR(target_core_item_, dbroot);
    156
    157static struct target_fabric_configfs *target_core_get_fabric(
    158	const char *name)
    159{
    160	struct target_fabric_configfs *tf;
    161
    162	if (!name)
    163		return NULL;
    164
    165	mutex_lock(&g_tf_lock);
    166	list_for_each_entry(tf, &g_tf_list, tf_list) {
    167		const char *cmp_name = tf->tf_ops->fabric_alias;
    168		if (!cmp_name)
    169			cmp_name = tf->tf_ops->fabric_name;
    170		if (!strcmp(cmp_name, name)) {
    171			atomic_inc(&tf->tf_access_cnt);
    172			mutex_unlock(&g_tf_lock);
    173			return tf;
    174		}
    175	}
    176	mutex_unlock(&g_tf_lock);
    177
    178	return NULL;
    179}
    180
    181/*
    182 * Called from struct target_core_group_ops->make_group()
    183 */
    184static struct config_group *target_core_register_fabric(
    185	struct config_group *group,
    186	const char *name)
    187{
    188	struct target_fabric_configfs *tf;
    189	int ret;
    190
    191	pr_debug("Target_Core_ConfigFS: REGISTER -> group: %p name:"
    192			" %s\n", group, name);
    193
    194	tf = target_core_get_fabric(name);
    195	if (!tf) {
    196		pr_debug("target_core_register_fabric() trying autoload for %s\n",
    197			 name);
    198
    199		/*
    200		 * Below are some hardcoded request_module() calls to automatically
    201		 * local fabric modules when the following is called:
    202		 *
    203		 * mkdir -p /sys/kernel/config/target/$MODULE_NAME
    204		 *
    205		 * Note that this does not limit which TCM fabric module can be
    206		 * registered, but simply provids auto loading logic for modules with
    207		 * mkdir(2) system calls with known TCM fabric modules.
    208		 */
    209
    210		if (!strncmp(name, "iscsi", 5)) {
    211			/*
    212			 * Automatically load the LIO Target fabric module when the
    213			 * following is called:
    214			 *
    215			 * mkdir -p $CONFIGFS/target/iscsi
    216			 */
    217			ret = request_module("iscsi_target_mod");
    218			if (ret < 0) {
    219				pr_debug("request_module() failed for"
    220				         " iscsi_target_mod.ko: %d\n", ret);
    221				return ERR_PTR(-EINVAL);
    222			}
    223		} else if (!strncmp(name, "loopback", 8)) {
    224			/*
    225			 * Automatically load the tcm_loop fabric module when the
    226			 * following is called:
    227			 *
    228			 * mkdir -p $CONFIGFS/target/loopback
    229			 */
    230			ret = request_module("tcm_loop");
    231			if (ret < 0) {
    232				pr_debug("request_module() failed for"
    233				         " tcm_loop.ko: %d\n", ret);
    234				return ERR_PTR(-EINVAL);
    235			}
    236		}
    237
    238		tf = target_core_get_fabric(name);
    239	}
    240
    241	if (!tf) {
    242		pr_debug("target_core_get_fabric() failed for %s\n",
    243		         name);
    244		return ERR_PTR(-EINVAL);
    245	}
    246	pr_debug("Target_Core_ConfigFS: REGISTER -> Located fabric:"
    247			" %s\n", tf->tf_ops->fabric_name);
    248	/*
    249	 * On a successful target_core_get_fabric() look, the returned
    250	 * struct target_fabric_configfs *tf will contain a usage reference.
    251	 */
    252	pr_debug("Target_Core_ConfigFS: REGISTER tfc_wwn_cit -> %p\n",
    253			&tf->tf_wwn_cit);
    254
    255	config_group_init_type_name(&tf->tf_group, name, &tf->tf_wwn_cit);
    256
    257	config_group_init_type_name(&tf->tf_disc_group, "discovery_auth",
    258			&tf->tf_discovery_cit);
    259	configfs_add_default_group(&tf->tf_disc_group, &tf->tf_group);
    260
    261	pr_debug("Target_Core_ConfigFS: REGISTER -> Allocated Fabric: %s\n",
    262		 config_item_name(&tf->tf_group.cg_item));
    263	return &tf->tf_group;
    264}
    265
    266/*
    267 * Called from struct target_core_group_ops->drop_item()
    268 */
    269static void target_core_deregister_fabric(
    270	struct config_group *group,
    271	struct config_item *item)
    272{
    273	struct target_fabric_configfs *tf = container_of(
    274		to_config_group(item), struct target_fabric_configfs, tf_group);
    275
    276	pr_debug("Target_Core_ConfigFS: DEREGISTER -> Looking up %s in"
    277		" tf list\n", config_item_name(item));
    278
    279	pr_debug("Target_Core_ConfigFS: DEREGISTER -> located fabric:"
    280			" %s\n", tf->tf_ops->fabric_name);
    281	atomic_dec(&tf->tf_access_cnt);
    282
    283	pr_debug("Target_Core_ConfigFS: DEREGISTER -> Releasing ci"
    284			" %s\n", config_item_name(item));
    285
    286	configfs_remove_default_groups(&tf->tf_group);
    287	config_item_put(item);
    288}
    289
    290static struct configfs_group_operations target_core_fabric_group_ops = {
    291	.make_group	= &target_core_register_fabric,
    292	.drop_item	= &target_core_deregister_fabric,
    293};
    294
    295/*
    296 * All item attributes appearing in /sys/kernel/target/ appear here.
    297 */
    298static struct configfs_attribute *target_core_fabric_item_attrs[] = {
    299	&target_core_item_attr_version,
    300	&target_core_item_attr_dbroot,
    301	NULL,
    302};
    303
    304/*
    305 * Provides Fabrics Groups and Item Attributes for /sys/kernel/config/target/
    306 */
    307static const struct config_item_type target_core_fabrics_item = {
    308	.ct_group_ops	= &target_core_fabric_group_ops,
    309	.ct_attrs	= target_core_fabric_item_attrs,
    310	.ct_owner	= THIS_MODULE,
    311};
    312
    313static struct configfs_subsystem target_core_fabrics = {
    314	.su_group = {
    315		.cg_item = {
    316			.ci_namebuf = "target",
    317			.ci_type = &target_core_fabrics_item,
    318		},
    319	},
    320};
    321
    322int target_depend_item(struct config_item *item)
    323{
    324	return configfs_depend_item(&target_core_fabrics, item);
    325}
    326EXPORT_SYMBOL(target_depend_item);
    327
    328void target_undepend_item(struct config_item *item)
    329{
    330	return configfs_undepend_item(item);
    331}
    332EXPORT_SYMBOL(target_undepend_item);
    333
    334/*##############################################################################
    335// Start functions called by external Target Fabrics Modules
    336//############################################################################*/
    337
    338static int target_fabric_tf_ops_check(const struct target_core_fabric_ops *tfo)
    339{
    340	if (tfo->fabric_alias) {
    341		if (strlen(tfo->fabric_alias) >= TARGET_FABRIC_NAME_SIZE) {
    342			pr_err("Passed alias: %s exceeds "
    343				"TARGET_FABRIC_NAME_SIZE\n", tfo->fabric_alias);
    344			return -EINVAL;
    345		}
    346	}
    347	if (!tfo->fabric_name) {
    348		pr_err("Missing tfo->fabric_name\n");
    349		return -EINVAL;
    350	}
    351	if (strlen(tfo->fabric_name) >= TARGET_FABRIC_NAME_SIZE) {
    352		pr_err("Passed name: %s exceeds "
    353			"TARGET_FABRIC_NAME_SIZE\n", tfo->fabric_name);
    354		return -EINVAL;
    355	}
    356	if (!tfo->tpg_get_wwn) {
    357		pr_err("Missing tfo->tpg_get_wwn()\n");
    358		return -EINVAL;
    359	}
    360	if (!tfo->tpg_get_tag) {
    361		pr_err("Missing tfo->tpg_get_tag()\n");
    362		return -EINVAL;
    363	}
    364	if (!tfo->tpg_check_demo_mode) {
    365		pr_err("Missing tfo->tpg_check_demo_mode()\n");
    366		return -EINVAL;
    367	}
    368	if (!tfo->tpg_check_demo_mode_cache) {
    369		pr_err("Missing tfo->tpg_check_demo_mode_cache()\n");
    370		return -EINVAL;
    371	}
    372	if (!tfo->tpg_check_demo_mode_write_protect) {
    373		pr_err("Missing tfo->tpg_check_demo_mode_write_protect()\n");
    374		return -EINVAL;
    375	}
    376	if (!tfo->tpg_check_prod_mode_write_protect) {
    377		pr_err("Missing tfo->tpg_check_prod_mode_write_protect()\n");
    378		return -EINVAL;
    379	}
    380	if (!tfo->tpg_get_inst_index) {
    381		pr_err("Missing tfo->tpg_get_inst_index()\n");
    382		return -EINVAL;
    383	}
    384	if (!tfo->release_cmd) {
    385		pr_err("Missing tfo->release_cmd()\n");
    386		return -EINVAL;
    387	}
    388	if (!tfo->sess_get_index) {
    389		pr_err("Missing tfo->sess_get_index()\n");
    390		return -EINVAL;
    391	}
    392	if (!tfo->write_pending) {
    393		pr_err("Missing tfo->write_pending()\n");
    394		return -EINVAL;
    395	}
    396	if (!tfo->set_default_node_attributes) {
    397		pr_err("Missing tfo->set_default_node_attributes()\n");
    398		return -EINVAL;
    399	}
    400	if (!tfo->get_cmd_state) {
    401		pr_err("Missing tfo->get_cmd_state()\n");
    402		return -EINVAL;
    403	}
    404	if (!tfo->queue_data_in) {
    405		pr_err("Missing tfo->queue_data_in()\n");
    406		return -EINVAL;
    407	}
    408	if (!tfo->queue_status) {
    409		pr_err("Missing tfo->queue_status()\n");
    410		return -EINVAL;
    411	}
    412	if (!tfo->queue_tm_rsp) {
    413		pr_err("Missing tfo->queue_tm_rsp()\n");
    414		return -EINVAL;
    415	}
    416	if (!tfo->aborted_task) {
    417		pr_err("Missing tfo->aborted_task()\n");
    418		return -EINVAL;
    419	}
    420	if (!tfo->check_stop_free) {
    421		pr_err("Missing tfo->check_stop_free()\n");
    422		return -EINVAL;
    423	}
    424	/*
    425	 * We at least require tfo->fabric_make_wwn(), tfo->fabric_drop_wwn()
    426	 * tfo->fabric_make_tpg() and tfo->fabric_drop_tpg() in
    427	 * target_core_fabric_configfs.c WWN+TPG group context code.
    428	 */
    429	if (!tfo->fabric_make_wwn) {
    430		pr_err("Missing tfo->fabric_make_wwn()\n");
    431		return -EINVAL;
    432	}
    433	if (!tfo->fabric_drop_wwn) {
    434		pr_err("Missing tfo->fabric_drop_wwn()\n");
    435		return -EINVAL;
    436	}
    437	if (!tfo->fabric_make_tpg) {
    438		pr_err("Missing tfo->fabric_make_tpg()\n");
    439		return -EINVAL;
    440	}
    441	if (!tfo->fabric_drop_tpg) {
    442		pr_err("Missing tfo->fabric_drop_tpg()\n");
    443		return -EINVAL;
    444	}
    445
    446	return 0;
    447}
    448
    449int target_register_template(const struct target_core_fabric_ops *fo)
    450{
    451	struct target_fabric_configfs *tf;
    452	int ret;
    453
    454	ret = target_fabric_tf_ops_check(fo);
    455	if (ret)
    456		return ret;
    457
    458	tf = kzalloc(sizeof(struct target_fabric_configfs), GFP_KERNEL);
    459	if (!tf) {
    460		pr_err("%s: could not allocate memory!\n", __func__);
    461		return -ENOMEM;
    462	}
    463
    464	INIT_LIST_HEAD(&tf->tf_list);
    465	atomic_set(&tf->tf_access_cnt, 0);
    466	tf->tf_ops = fo;
    467	target_fabric_setup_cits(tf);
    468
    469	mutex_lock(&g_tf_lock);
    470	list_add_tail(&tf->tf_list, &g_tf_list);
    471	mutex_unlock(&g_tf_lock);
    472
    473	return 0;
    474}
    475EXPORT_SYMBOL(target_register_template);
    476
    477void target_unregister_template(const struct target_core_fabric_ops *fo)
    478{
    479	struct target_fabric_configfs *t;
    480
    481	mutex_lock(&g_tf_lock);
    482	list_for_each_entry(t, &g_tf_list, tf_list) {
    483		if (!strcmp(t->tf_ops->fabric_name, fo->fabric_name)) {
    484			BUG_ON(atomic_read(&t->tf_access_cnt));
    485			list_del(&t->tf_list);
    486			mutex_unlock(&g_tf_lock);
    487			/*
    488			 * Wait for any outstanding fabric se_deve_entry->rcu_head
    489			 * callbacks to complete post kfree_rcu(), before allowing
    490			 * fabric driver unload of TFO->module to proceed.
    491			 */
    492			rcu_barrier();
    493			kfree(t->tf_tpg_base_cit.ct_attrs);
    494			kfree(t);
    495			return;
    496		}
    497	}
    498	mutex_unlock(&g_tf_lock);
    499}
    500EXPORT_SYMBOL(target_unregister_template);
    501
    502/*##############################################################################
    503// Stop functions called by external Target Fabrics Modules
    504//############################################################################*/
    505
    506static inline struct se_dev_attrib *to_attrib(struct config_item *item)
    507{
    508	return container_of(to_config_group(item), struct se_dev_attrib,
    509			da_group);
    510}
    511
    512/* Start functions for struct config_item_type tb_dev_attrib_cit */
    513#define DEF_CONFIGFS_ATTRIB_SHOW(_name)					\
    514static ssize_t _name##_show(struct config_item *item, char *page)	\
    515{									\
    516	return snprintf(page, PAGE_SIZE, "%u\n", to_attrib(item)->_name); \
    517}
    518
    519DEF_CONFIGFS_ATTRIB_SHOW(emulate_model_alias);
    520DEF_CONFIGFS_ATTRIB_SHOW(emulate_dpo);
    521DEF_CONFIGFS_ATTRIB_SHOW(emulate_fua_write);
    522DEF_CONFIGFS_ATTRIB_SHOW(emulate_fua_read);
    523DEF_CONFIGFS_ATTRIB_SHOW(emulate_write_cache);
    524DEF_CONFIGFS_ATTRIB_SHOW(emulate_ua_intlck_ctrl);
    525DEF_CONFIGFS_ATTRIB_SHOW(emulate_tas);
    526DEF_CONFIGFS_ATTRIB_SHOW(emulate_tpu);
    527DEF_CONFIGFS_ATTRIB_SHOW(emulate_tpws);
    528DEF_CONFIGFS_ATTRIB_SHOW(emulate_caw);
    529DEF_CONFIGFS_ATTRIB_SHOW(emulate_3pc);
    530DEF_CONFIGFS_ATTRIB_SHOW(emulate_pr);
    531DEF_CONFIGFS_ATTRIB_SHOW(pi_prot_type);
    532DEF_CONFIGFS_ATTRIB_SHOW(hw_pi_prot_type);
    533DEF_CONFIGFS_ATTRIB_SHOW(pi_prot_verify);
    534DEF_CONFIGFS_ATTRIB_SHOW(enforce_pr_isids);
    535DEF_CONFIGFS_ATTRIB_SHOW(is_nonrot);
    536DEF_CONFIGFS_ATTRIB_SHOW(emulate_rest_reord);
    537DEF_CONFIGFS_ATTRIB_SHOW(force_pr_aptpl);
    538DEF_CONFIGFS_ATTRIB_SHOW(hw_block_size);
    539DEF_CONFIGFS_ATTRIB_SHOW(block_size);
    540DEF_CONFIGFS_ATTRIB_SHOW(hw_max_sectors);
    541DEF_CONFIGFS_ATTRIB_SHOW(optimal_sectors);
    542DEF_CONFIGFS_ATTRIB_SHOW(hw_queue_depth);
    543DEF_CONFIGFS_ATTRIB_SHOW(queue_depth);
    544DEF_CONFIGFS_ATTRIB_SHOW(max_unmap_lba_count);
    545DEF_CONFIGFS_ATTRIB_SHOW(max_unmap_block_desc_count);
    546DEF_CONFIGFS_ATTRIB_SHOW(unmap_granularity);
    547DEF_CONFIGFS_ATTRIB_SHOW(unmap_granularity_alignment);
    548DEF_CONFIGFS_ATTRIB_SHOW(unmap_zeroes_data);
    549DEF_CONFIGFS_ATTRIB_SHOW(max_write_same_len);
    550
    551#define DEF_CONFIGFS_ATTRIB_STORE_U32(_name)				\
    552static ssize_t _name##_store(struct config_item *item, const char *page,\
    553		size_t count)						\
    554{									\
    555	struct se_dev_attrib *da = to_attrib(item);			\
    556	u32 val;							\
    557	int ret;							\
    558									\
    559	ret = kstrtou32(page, 0, &val);					\
    560	if (ret < 0)							\
    561		return ret;						\
    562	da->_name = val;						\
    563	return count;							\
    564}
    565
    566DEF_CONFIGFS_ATTRIB_STORE_U32(max_unmap_lba_count);
    567DEF_CONFIGFS_ATTRIB_STORE_U32(max_unmap_block_desc_count);
    568DEF_CONFIGFS_ATTRIB_STORE_U32(unmap_granularity);
    569DEF_CONFIGFS_ATTRIB_STORE_U32(unmap_granularity_alignment);
    570DEF_CONFIGFS_ATTRIB_STORE_U32(max_write_same_len);
    571
    572#define DEF_CONFIGFS_ATTRIB_STORE_BOOL(_name)				\
    573static ssize_t _name##_store(struct config_item *item, const char *page,	\
    574		size_t count)						\
    575{									\
    576	struct se_dev_attrib *da = to_attrib(item);			\
    577	bool flag;							\
    578	int ret;							\
    579									\
    580	ret = strtobool(page, &flag);					\
    581	if (ret < 0)							\
    582		return ret;						\
    583	da->_name = flag;						\
    584	return count;							\
    585}
    586
    587DEF_CONFIGFS_ATTRIB_STORE_BOOL(emulate_fua_write);
    588DEF_CONFIGFS_ATTRIB_STORE_BOOL(emulate_caw);
    589DEF_CONFIGFS_ATTRIB_STORE_BOOL(emulate_3pc);
    590DEF_CONFIGFS_ATTRIB_STORE_BOOL(emulate_pr);
    591DEF_CONFIGFS_ATTRIB_STORE_BOOL(enforce_pr_isids);
    592DEF_CONFIGFS_ATTRIB_STORE_BOOL(is_nonrot);
    593
    594#define DEF_CONFIGFS_ATTRIB_STORE_STUB(_name)				\
    595static ssize_t _name##_store(struct config_item *item, const char *page,\
    596		size_t count)						\
    597{									\
    598	printk_once(KERN_WARNING					\
    599		"ignoring deprecated %s attribute\n",			\
    600		__stringify(_name));					\
    601	return count;							\
    602}
    603
    604DEF_CONFIGFS_ATTRIB_STORE_STUB(emulate_dpo);
    605DEF_CONFIGFS_ATTRIB_STORE_STUB(emulate_fua_read);
    606
    607static void dev_set_t10_wwn_model_alias(struct se_device *dev)
    608{
    609	const char *configname;
    610
    611	configname = config_item_name(&dev->dev_group.cg_item);
    612	if (strlen(configname) >= INQUIRY_MODEL_LEN) {
    613		pr_warn("dev[%p]: Backstore name '%s' is too long for "
    614			"INQUIRY_MODEL, truncating to 15 characters\n", dev,
    615			configname);
    616	}
    617	/*
    618	 * XXX We can't use sizeof(dev->t10_wwn.model) (INQUIRY_MODEL_LEN + 1)
    619	 * here without potentially breaking existing setups, so continue to
    620	 * truncate one byte shorter than what can be carried in INQUIRY.
    621	 */
    622	strlcpy(dev->t10_wwn.model, configname, INQUIRY_MODEL_LEN);
    623}
    624
    625static ssize_t emulate_model_alias_store(struct config_item *item,
    626		const char *page, size_t count)
    627{
    628	struct se_dev_attrib *da = to_attrib(item);
    629	struct se_device *dev = da->da_dev;
    630	bool flag;
    631	int ret;
    632
    633	if (dev->export_count) {
    634		pr_err("dev[%p]: Unable to change model alias"
    635			" while export_count is %d\n",
    636			dev, dev->export_count);
    637		return -EINVAL;
    638	}
    639
    640	ret = strtobool(page, &flag);
    641	if (ret < 0)
    642		return ret;
    643
    644	BUILD_BUG_ON(sizeof(dev->t10_wwn.model) != INQUIRY_MODEL_LEN + 1);
    645	if (flag) {
    646		dev_set_t10_wwn_model_alias(dev);
    647	} else {
    648		strlcpy(dev->t10_wwn.model, dev->transport->inquiry_prod,
    649			sizeof(dev->t10_wwn.model));
    650	}
    651	da->emulate_model_alias = flag;
    652	return count;
    653}
    654
    655static ssize_t emulate_write_cache_store(struct config_item *item,
    656		const char *page, size_t count)
    657{
    658	struct se_dev_attrib *da = to_attrib(item);
    659	bool flag;
    660	int ret;
    661
    662	ret = strtobool(page, &flag);
    663	if (ret < 0)
    664		return ret;
    665
    666	if (flag && da->da_dev->transport->get_write_cache) {
    667		pr_err("emulate_write_cache not supported for this device\n");
    668		return -EINVAL;
    669	}
    670
    671	da->emulate_write_cache = flag;
    672	pr_debug("dev[%p]: SE Device WRITE_CACHE_EMULATION flag: %d\n",
    673			da->da_dev, flag);
    674	return count;
    675}
    676
    677static ssize_t emulate_ua_intlck_ctrl_store(struct config_item *item,
    678		const char *page, size_t count)
    679{
    680	struct se_dev_attrib *da = to_attrib(item);
    681	u32 val;
    682	int ret;
    683
    684	ret = kstrtou32(page, 0, &val);
    685	if (ret < 0)
    686		return ret;
    687
    688	if (val != TARGET_UA_INTLCK_CTRL_CLEAR
    689	 && val != TARGET_UA_INTLCK_CTRL_NO_CLEAR
    690	 && val != TARGET_UA_INTLCK_CTRL_ESTABLISH_UA) {
    691		pr_err("Illegal value %d\n", val);
    692		return -EINVAL;
    693	}
    694
    695	if (da->da_dev->export_count) {
    696		pr_err("dev[%p]: Unable to change SE Device"
    697			" UA_INTRLCK_CTRL while export_count is %d\n",
    698			da->da_dev, da->da_dev->export_count);
    699		return -EINVAL;
    700	}
    701	da->emulate_ua_intlck_ctrl = val;
    702	pr_debug("dev[%p]: SE Device UA_INTRLCK_CTRL flag: %d\n",
    703		da->da_dev, val);
    704	return count;
    705}
    706
    707static ssize_t emulate_tas_store(struct config_item *item,
    708		const char *page, size_t count)
    709{
    710	struct se_dev_attrib *da = to_attrib(item);
    711	bool flag;
    712	int ret;
    713
    714	ret = strtobool(page, &flag);
    715	if (ret < 0)
    716		return ret;
    717
    718	if (da->da_dev->export_count) {
    719		pr_err("dev[%p]: Unable to change SE Device TAS while"
    720			" export_count is %d\n",
    721			da->da_dev, da->da_dev->export_count);
    722		return -EINVAL;
    723	}
    724	da->emulate_tas = flag;
    725	pr_debug("dev[%p]: SE Device TASK_ABORTED status bit: %s\n",
    726		da->da_dev, flag ? "Enabled" : "Disabled");
    727
    728	return count;
    729}
    730
    731static ssize_t emulate_tpu_store(struct config_item *item,
    732		const char *page, size_t count)
    733{
    734	struct se_dev_attrib *da = to_attrib(item);
    735	bool flag;
    736	int ret;
    737
    738	ret = strtobool(page, &flag);
    739	if (ret < 0)
    740		return ret;
    741
    742	/*
    743	 * We expect this value to be non-zero when generic Block Layer
    744	 * Discard supported is detected iblock_create_virtdevice().
    745	 */
    746	if (flag && !da->max_unmap_block_desc_count) {
    747		pr_err("Generic Block Discard not supported\n");
    748		return -ENOSYS;
    749	}
    750
    751	da->emulate_tpu = flag;
    752	pr_debug("dev[%p]: SE Device Thin Provisioning UNMAP bit: %d\n",
    753		da->da_dev, flag);
    754	return count;
    755}
    756
    757static ssize_t emulate_tpws_store(struct config_item *item,
    758		const char *page, size_t count)
    759{
    760	struct se_dev_attrib *da = to_attrib(item);
    761	bool flag;
    762	int ret;
    763
    764	ret = strtobool(page, &flag);
    765	if (ret < 0)
    766		return ret;
    767
    768	/*
    769	 * We expect this value to be non-zero when generic Block Layer
    770	 * Discard supported is detected iblock_create_virtdevice().
    771	 */
    772	if (flag && !da->max_unmap_block_desc_count) {
    773		pr_err("Generic Block Discard not supported\n");
    774		return -ENOSYS;
    775	}
    776
    777	da->emulate_tpws = flag;
    778	pr_debug("dev[%p]: SE Device Thin Provisioning WRITE_SAME: %d\n",
    779				da->da_dev, flag);
    780	return count;
    781}
    782
    783static ssize_t pi_prot_type_store(struct config_item *item,
    784		const char *page, size_t count)
    785{
    786	struct se_dev_attrib *da = to_attrib(item);
    787	int old_prot = da->pi_prot_type, ret;
    788	struct se_device *dev = da->da_dev;
    789	u32 flag;
    790
    791	ret = kstrtou32(page, 0, &flag);
    792	if (ret < 0)
    793		return ret;
    794
    795	if (flag != 0 && flag != 1 && flag != 2 && flag != 3) {
    796		pr_err("Illegal value %d for pi_prot_type\n", flag);
    797		return -EINVAL;
    798	}
    799	if (flag == 2) {
    800		pr_err("DIF TYPE2 protection currently not supported\n");
    801		return -ENOSYS;
    802	}
    803	if (da->hw_pi_prot_type) {
    804		pr_warn("DIF protection enabled on underlying hardware,"
    805			" ignoring\n");
    806		return count;
    807	}
    808	if (!dev->transport->init_prot || !dev->transport->free_prot) {
    809		/* 0 is only allowed value for non-supporting backends */
    810		if (flag == 0)
    811			return count;
    812
    813		pr_err("DIF protection not supported by backend: %s\n",
    814		       dev->transport->name);
    815		return -ENOSYS;
    816	}
    817	if (!target_dev_configured(dev)) {
    818		pr_err("DIF protection requires device to be configured\n");
    819		return -ENODEV;
    820	}
    821	if (dev->export_count) {
    822		pr_err("dev[%p]: Unable to change SE Device PROT type while"
    823		       " export_count is %d\n", dev, dev->export_count);
    824		return -EINVAL;
    825	}
    826
    827	da->pi_prot_type = flag;
    828
    829	if (flag && !old_prot) {
    830		ret = dev->transport->init_prot(dev);
    831		if (ret) {
    832			da->pi_prot_type = old_prot;
    833			da->pi_prot_verify = (bool) da->pi_prot_type;
    834			return ret;
    835		}
    836
    837	} else if (!flag && old_prot) {
    838		dev->transport->free_prot(dev);
    839	}
    840
    841	da->pi_prot_verify = (bool) da->pi_prot_type;
    842	pr_debug("dev[%p]: SE Device Protection Type: %d\n", dev, flag);
    843	return count;
    844}
    845
    846/* always zero, but attr needs to remain RW to avoid userspace breakage */
    847static ssize_t pi_prot_format_show(struct config_item *item, char *page)
    848{
    849	return snprintf(page, PAGE_SIZE, "0\n");
    850}
    851
    852static ssize_t pi_prot_format_store(struct config_item *item,
    853		const char *page, size_t count)
    854{
    855	struct se_dev_attrib *da = to_attrib(item);
    856	struct se_device *dev = da->da_dev;
    857	bool flag;
    858	int ret;
    859
    860	ret = strtobool(page, &flag);
    861	if (ret < 0)
    862		return ret;
    863
    864	if (!flag)
    865		return count;
    866
    867	if (!dev->transport->format_prot) {
    868		pr_err("DIF protection format not supported by backend %s\n",
    869		       dev->transport->name);
    870		return -ENOSYS;
    871	}
    872	if (!target_dev_configured(dev)) {
    873		pr_err("DIF protection format requires device to be configured\n");
    874		return -ENODEV;
    875	}
    876	if (dev->export_count) {
    877		pr_err("dev[%p]: Unable to format SE Device PROT type while"
    878		       " export_count is %d\n", dev, dev->export_count);
    879		return -EINVAL;
    880	}
    881
    882	ret = dev->transport->format_prot(dev);
    883	if (ret)
    884		return ret;
    885
    886	pr_debug("dev[%p]: SE Device Protection Format complete\n", dev);
    887	return count;
    888}
    889
    890static ssize_t pi_prot_verify_store(struct config_item *item,
    891		const char *page, size_t count)
    892{
    893	struct se_dev_attrib *da = to_attrib(item);
    894	bool flag;
    895	int ret;
    896
    897	ret = strtobool(page, &flag);
    898	if (ret < 0)
    899		return ret;
    900
    901	if (!flag) {
    902		da->pi_prot_verify = flag;
    903		return count;
    904	}
    905	if (da->hw_pi_prot_type) {
    906		pr_warn("DIF protection enabled on underlying hardware,"
    907			" ignoring\n");
    908		return count;
    909	}
    910	if (!da->pi_prot_type) {
    911		pr_warn("DIF protection not supported by backend, ignoring\n");
    912		return count;
    913	}
    914	da->pi_prot_verify = flag;
    915
    916	return count;
    917}
    918
    919static ssize_t force_pr_aptpl_store(struct config_item *item,
    920		const char *page, size_t count)
    921{
    922	struct se_dev_attrib *da = to_attrib(item);
    923	bool flag;
    924	int ret;
    925
    926	ret = strtobool(page, &flag);
    927	if (ret < 0)
    928		return ret;
    929	if (da->da_dev->export_count) {
    930		pr_err("dev[%p]: Unable to set force_pr_aptpl while"
    931		       " export_count is %d\n",
    932		       da->da_dev, da->da_dev->export_count);
    933		return -EINVAL;
    934	}
    935
    936	da->force_pr_aptpl = flag;
    937	pr_debug("dev[%p]: SE Device force_pr_aptpl: %d\n", da->da_dev, flag);
    938	return count;
    939}
    940
    941static ssize_t emulate_rest_reord_store(struct config_item *item,
    942		const char *page, size_t count)
    943{
    944	struct se_dev_attrib *da = to_attrib(item);
    945	bool flag;
    946	int ret;
    947
    948	ret = strtobool(page, &flag);
    949	if (ret < 0)
    950		return ret;
    951
    952	if (flag != 0) {
    953		printk(KERN_ERR "dev[%p]: SE Device emulation of restricted"
    954			" reordering not implemented\n", da->da_dev);
    955		return -ENOSYS;
    956	}
    957	da->emulate_rest_reord = flag;
    958	pr_debug("dev[%p]: SE Device emulate_rest_reord: %d\n",
    959		da->da_dev, flag);
    960	return count;
    961}
    962
    963static ssize_t unmap_zeroes_data_store(struct config_item *item,
    964		const char *page, size_t count)
    965{
    966	struct se_dev_attrib *da = to_attrib(item);
    967	bool flag;
    968	int ret;
    969
    970	ret = strtobool(page, &flag);
    971	if (ret < 0)
    972		return ret;
    973
    974	if (da->da_dev->export_count) {
    975		pr_err("dev[%p]: Unable to change SE Device"
    976		       " unmap_zeroes_data while export_count is %d\n",
    977		       da->da_dev, da->da_dev->export_count);
    978		return -EINVAL;
    979	}
    980	/*
    981	 * We expect this value to be non-zero when generic Block Layer
    982	 * Discard supported is detected iblock_configure_device().
    983	 */
    984	if (flag && !da->max_unmap_block_desc_count) {
    985		pr_err("dev[%p]: Thin Provisioning LBPRZ will not be set"
    986		       " because max_unmap_block_desc_count is zero\n",
    987		       da->da_dev);
    988		return -ENOSYS;
    989	}
    990	da->unmap_zeroes_data = flag;
    991	pr_debug("dev[%p]: SE Device Thin Provisioning LBPRZ bit: %d\n",
    992		 da->da_dev, flag);
    993	return count;
    994}
    995
    996/*
    997 * Note, this can only be called on unexported SE Device Object.
    998 */
    999static ssize_t queue_depth_store(struct config_item *item,
   1000		const char *page, size_t count)
   1001{
   1002	struct se_dev_attrib *da = to_attrib(item);
   1003	struct se_device *dev = da->da_dev;
   1004	u32 val;
   1005	int ret;
   1006
   1007	ret = kstrtou32(page, 0, &val);
   1008	if (ret < 0)
   1009		return ret;
   1010
   1011	if (dev->export_count) {
   1012		pr_err("dev[%p]: Unable to change SE Device TCQ while"
   1013			" export_count is %d\n",
   1014			dev, dev->export_count);
   1015		return -EINVAL;
   1016	}
   1017	if (!val) {
   1018		pr_err("dev[%p]: Illegal ZERO value for queue_depth\n", dev);
   1019		return -EINVAL;
   1020	}
   1021
   1022	if (val > dev->dev_attrib.queue_depth) {
   1023		if (val > dev->dev_attrib.hw_queue_depth) {
   1024			pr_err("dev[%p]: Passed queue_depth:"
   1025				" %u exceeds TCM/SE_Device MAX"
   1026				" TCQ: %u\n", dev, val,
   1027				dev->dev_attrib.hw_queue_depth);
   1028			return -EINVAL;
   1029		}
   1030	}
   1031	da->queue_depth = dev->queue_depth = val;
   1032	pr_debug("dev[%p]: SE Device TCQ Depth changed to: %u\n", dev, val);
   1033	return count;
   1034}
   1035
   1036static ssize_t optimal_sectors_store(struct config_item *item,
   1037		const char *page, size_t count)
   1038{
   1039	struct se_dev_attrib *da = to_attrib(item);
   1040	u32 val;
   1041	int ret;
   1042
   1043	ret = kstrtou32(page, 0, &val);
   1044	if (ret < 0)
   1045		return ret;
   1046
   1047	if (da->da_dev->export_count) {
   1048		pr_err("dev[%p]: Unable to change SE Device"
   1049			" optimal_sectors while export_count is %d\n",
   1050			da->da_dev, da->da_dev->export_count);
   1051		return -EINVAL;
   1052	}
   1053	if (val > da->hw_max_sectors) {
   1054		pr_err("dev[%p]: Passed optimal_sectors %u cannot be"
   1055			" greater than hw_max_sectors: %u\n",
   1056			da->da_dev, val, da->hw_max_sectors);
   1057		return -EINVAL;
   1058	}
   1059
   1060	da->optimal_sectors = val;
   1061	pr_debug("dev[%p]: SE Device optimal_sectors changed to %u\n",
   1062			da->da_dev, val);
   1063	return count;
   1064}
   1065
   1066static ssize_t block_size_store(struct config_item *item,
   1067		const char *page, size_t count)
   1068{
   1069	struct se_dev_attrib *da = to_attrib(item);
   1070	u32 val;
   1071	int ret;
   1072
   1073	ret = kstrtou32(page, 0, &val);
   1074	if (ret < 0)
   1075		return ret;
   1076
   1077	if (da->da_dev->export_count) {
   1078		pr_err("dev[%p]: Unable to change SE Device block_size"
   1079			" while export_count is %d\n",
   1080			da->da_dev, da->da_dev->export_count);
   1081		return -EINVAL;
   1082	}
   1083
   1084	if (val != 512 && val != 1024 && val != 2048 && val != 4096) {
   1085		pr_err("dev[%p]: Illegal value for block_device: %u"
   1086			" for SE device, must be 512, 1024, 2048 or 4096\n",
   1087			da->da_dev, val);
   1088		return -EINVAL;
   1089	}
   1090
   1091	da->block_size = val;
   1092	if (da->max_bytes_per_io)
   1093		da->hw_max_sectors = da->max_bytes_per_io / val;
   1094
   1095	pr_debug("dev[%p]: SE Device block_size changed to %u\n",
   1096			da->da_dev, val);
   1097	return count;
   1098}
   1099
   1100static ssize_t alua_support_show(struct config_item *item, char *page)
   1101{
   1102	struct se_dev_attrib *da = to_attrib(item);
   1103	u8 flags = da->da_dev->transport_flags;
   1104
   1105	return snprintf(page, PAGE_SIZE, "%d\n",
   1106			flags & TRANSPORT_FLAG_PASSTHROUGH_ALUA ? 0 : 1);
   1107}
   1108
   1109static ssize_t alua_support_store(struct config_item *item,
   1110		const char *page, size_t count)
   1111{
   1112	struct se_dev_attrib *da = to_attrib(item);
   1113	struct se_device *dev = da->da_dev;
   1114	bool flag, oldflag;
   1115	int ret;
   1116
   1117	ret = strtobool(page, &flag);
   1118	if (ret < 0)
   1119		return ret;
   1120
   1121	oldflag = !(dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_ALUA);
   1122	if (flag == oldflag)
   1123		return count;
   1124
   1125	if (!(dev->transport->transport_flags_changeable &
   1126	      TRANSPORT_FLAG_PASSTHROUGH_ALUA)) {
   1127		pr_err("dev[%p]: Unable to change SE Device alua_support:"
   1128			" alua_support has fixed value\n", dev);
   1129		return -ENOSYS;
   1130	}
   1131
   1132	if (flag)
   1133		dev->transport_flags &= ~TRANSPORT_FLAG_PASSTHROUGH_ALUA;
   1134	else
   1135		dev->transport_flags |= TRANSPORT_FLAG_PASSTHROUGH_ALUA;
   1136	return count;
   1137}
   1138
   1139static ssize_t pgr_support_show(struct config_item *item, char *page)
   1140{
   1141	struct se_dev_attrib *da = to_attrib(item);
   1142	u8 flags = da->da_dev->transport_flags;
   1143
   1144	return snprintf(page, PAGE_SIZE, "%d\n",
   1145			flags & TRANSPORT_FLAG_PASSTHROUGH_PGR ? 0 : 1);
   1146}
   1147
   1148static ssize_t pgr_support_store(struct config_item *item,
   1149		const char *page, size_t count)
   1150{
   1151	struct se_dev_attrib *da = to_attrib(item);
   1152	struct se_device *dev = da->da_dev;
   1153	bool flag, oldflag;
   1154	int ret;
   1155
   1156	ret = strtobool(page, &flag);
   1157	if (ret < 0)
   1158		return ret;
   1159
   1160	oldflag = !(dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_PGR);
   1161	if (flag == oldflag)
   1162		return count;
   1163
   1164	if (!(dev->transport->transport_flags_changeable &
   1165	      TRANSPORT_FLAG_PASSTHROUGH_PGR)) {
   1166		pr_err("dev[%p]: Unable to change SE Device pgr_support:"
   1167			" pgr_support has fixed value\n", dev);
   1168		return -ENOSYS;
   1169	}
   1170
   1171	if (flag)
   1172		dev->transport_flags &= ~TRANSPORT_FLAG_PASSTHROUGH_PGR;
   1173	else
   1174		dev->transport_flags |= TRANSPORT_FLAG_PASSTHROUGH_PGR;
   1175	return count;
   1176}
   1177
   1178CONFIGFS_ATTR(, emulate_model_alias);
   1179CONFIGFS_ATTR(, emulate_dpo);
   1180CONFIGFS_ATTR(, emulate_fua_write);
   1181CONFIGFS_ATTR(, emulate_fua_read);
   1182CONFIGFS_ATTR(, emulate_write_cache);
   1183CONFIGFS_ATTR(, emulate_ua_intlck_ctrl);
   1184CONFIGFS_ATTR(, emulate_tas);
   1185CONFIGFS_ATTR(, emulate_tpu);
   1186CONFIGFS_ATTR(, emulate_tpws);
   1187CONFIGFS_ATTR(, emulate_caw);
   1188CONFIGFS_ATTR(, emulate_3pc);
   1189CONFIGFS_ATTR(, emulate_pr);
   1190CONFIGFS_ATTR(, pi_prot_type);
   1191CONFIGFS_ATTR_RO(, hw_pi_prot_type);
   1192CONFIGFS_ATTR(, pi_prot_format);
   1193CONFIGFS_ATTR(, pi_prot_verify);
   1194CONFIGFS_ATTR(, enforce_pr_isids);
   1195CONFIGFS_ATTR(, is_nonrot);
   1196CONFIGFS_ATTR(, emulate_rest_reord);
   1197CONFIGFS_ATTR(, force_pr_aptpl);
   1198CONFIGFS_ATTR_RO(, hw_block_size);
   1199CONFIGFS_ATTR(, block_size);
   1200CONFIGFS_ATTR_RO(, hw_max_sectors);
   1201CONFIGFS_ATTR(, optimal_sectors);
   1202CONFIGFS_ATTR_RO(, hw_queue_depth);
   1203CONFIGFS_ATTR(, queue_depth);
   1204CONFIGFS_ATTR(, max_unmap_lba_count);
   1205CONFIGFS_ATTR(, max_unmap_block_desc_count);
   1206CONFIGFS_ATTR(, unmap_granularity);
   1207CONFIGFS_ATTR(, unmap_granularity_alignment);
   1208CONFIGFS_ATTR(, unmap_zeroes_data);
   1209CONFIGFS_ATTR(, max_write_same_len);
   1210CONFIGFS_ATTR(, alua_support);
   1211CONFIGFS_ATTR(, pgr_support);
   1212
   1213/*
   1214 * dev_attrib attributes for devices using the target core SBC/SPC
   1215 * interpreter.  Any backend using spc_parse_cdb should be using
   1216 * these.
   1217 */
   1218struct configfs_attribute *sbc_attrib_attrs[] = {
   1219	&attr_emulate_model_alias,
   1220	&attr_emulate_dpo,
   1221	&attr_emulate_fua_write,
   1222	&attr_emulate_fua_read,
   1223	&attr_emulate_write_cache,
   1224	&attr_emulate_ua_intlck_ctrl,
   1225	&attr_emulate_tas,
   1226	&attr_emulate_tpu,
   1227	&attr_emulate_tpws,
   1228	&attr_emulate_caw,
   1229	&attr_emulate_3pc,
   1230	&attr_emulate_pr,
   1231	&attr_pi_prot_type,
   1232	&attr_hw_pi_prot_type,
   1233	&attr_pi_prot_format,
   1234	&attr_pi_prot_verify,
   1235	&attr_enforce_pr_isids,
   1236	&attr_is_nonrot,
   1237	&attr_emulate_rest_reord,
   1238	&attr_force_pr_aptpl,
   1239	&attr_hw_block_size,
   1240	&attr_block_size,
   1241	&attr_hw_max_sectors,
   1242	&attr_optimal_sectors,
   1243	&attr_hw_queue_depth,
   1244	&attr_queue_depth,
   1245	&attr_max_unmap_lba_count,
   1246	&attr_max_unmap_block_desc_count,
   1247	&attr_unmap_granularity,
   1248	&attr_unmap_granularity_alignment,
   1249	&attr_unmap_zeroes_data,
   1250	&attr_max_write_same_len,
   1251	&attr_alua_support,
   1252	&attr_pgr_support,
   1253	NULL,
   1254};
   1255EXPORT_SYMBOL(sbc_attrib_attrs);
   1256
   1257/*
   1258 * Minimal dev_attrib attributes for devices passing through CDBs.
   1259 * In this case we only provide a few read-only attributes for
   1260 * backwards compatibility.
   1261 */
   1262struct configfs_attribute *passthrough_attrib_attrs[] = {
   1263	&attr_hw_pi_prot_type,
   1264	&attr_hw_block_size,
   1265	&attr_hw_max_sectors,
   1266	&attr_hw_queue_depth,
   1267	&attr_emulate_pr,
   1268	&attr_alua_support,
   1269	&attr_pgr_support,
   1270	NULL,
   1271};
   1272EXPORT_SYMBOL(passthrough_attrib_attrs);
   1273
   1274/*
   1275 * pr related dev_attrib attributes for devices passing through CDBs,
   1276 * but allowing in core pr emulation.
   1277 */
   1278struct configfs_attribute *passthrough_pr_attrib_attrs[] = {
   1279	&attr_enforce_pr_isids,
   1280	&attr_force_pr_aptpl,
   1281	NULL,
   1282};
   1283EXPORT_SYMBOL(passthrough_pr_attrib_attrs);
   1284
   1285TB_CIT_SETUP_DRV(dev_attrib, NULL, NULL);
   1286TB_CIT_SETUP_DRV(dev_action, NULL, NULL);
   1287
   1288/* End functions for struct config_item_type tb_dev_attrib_cit */
   1289
   1290/*  Start functions for struct config_item_type tb_dev_wwn_cit */
   1291
   1292static struct t10_wwn *to_t10_wwn(struct config_item *item)
   1293{
   1294	return container_of(to_config_group(item), struct t10_wwn, t10_wwn_group);
   1295}
   1296
   1297static ssize_t target_check_inquiry_data(char *buf)
   1298{
   1299	size_t len;
   1300	int i;
   1301
   1302	len = strlen(buf);
   1303
   1304	/*
   1305	 * SPC 4.3.1:
   1306	 * ASCII data fields shall contain only ASCII printable characters
   1307	 * (i.e., code values 20h to 7Eh) and may be terminated with one or
   1308	 * more ASCII null (00h) characters.
   1309	 */
   1310	for (i = 0; i < len; i++) {
   1311		if (buf[i] < 0x20 || buf[i] > 0x7E) {
   1312			pr_err("Emulated T10 Inquiry Data contains non-ASCII-printable characters\n");
   1313			return -EINVAL;
   1314		}
   1315	}
   1316
   1317	return len;
   1318}
   1319
   1320/*
   1321 * STANDARD and VPD page 0x83 T10 Vendor Identification
   1322 */
   1323static ssize_t target_wwn_vendor_id_show(struct config_item *item,
   1324		char *page)
   1325{
   1326	return sprintf(page, "%s\n", &to_t10_wwn(item)->vendor[0]);
   1327}
   1328
   1329static ssize_t target_wwn_vendor_id_store(struct config_item *item,
   1330		const char *page, size_t count)
   1331{
   1332	struct t10_wwn *t10_wwn = to_t10_wwn(item);
   1333	struct se_device *dev = t10_wwn->t10_dev;
   1334	/* +2 to allow for a trailing (stripped) '\n' and null-terminator */
   1335	unsigned char buf[INQUIRY_VENDOR_LEN + 2];
   1336	char *stripped = NULL;
   1337	size_t len;
   1338	ssize_t ret;
   1339
   1340	len = strlcpy(buf, page, sizeof(buf));
   1341	if (len < sizeof(buf)) {
   1342		/* Strip any newline added from userspace. */
   1343		stripped = strstrip(buf);
   1344		len = strlen(stripped);
   1345	}
   1346	if (len > INQUIRY_VENDOR_LEN) {
   1347		pr_err("Emulated T10 Vendor Identification exceeds"
   1348			" INQUIRY_VENDOR_LEN: " __stringify(INQUIRY_VENDOR_LEN)
   1349			"\n");
   1350		return -EOVERFLOW;
   1351	}
   1352
   1353	ret = target_check_inquiry_data(stripped);
   1354
   1355	if (ret < 0)
   1356		return ret;
   1357
   1358	/*
   1359	 * Check to see if any active exports exist.  If they do exist, fail
   1360	 * here as changing this information on the fly (underneath the
   1361	 * initiator side OS dependent multipath code) could cause negative
   1362	 * effects.
   1363	 */
   1364	if (dev->export_count) {
   1365		pr_err("Unable to set T10 Vendor Identification while"
   1366			" active %d exports exist\n", dev->export_count);
   1367		return -EINVAL;
   1368	}
   1369
   1370	BUILD_BUG_ON(sizeof(dev->t10_wwn.vendor) != INQUIRY_VENDOR_LEN + 1);
   1371	strlcpy(dev->t10_wwn.vendor, stripped, sizeof(dev->t10_wwn.vendor));
   1372
   1373	pr_debug("Target_Core_ConfigFS: Set emulated T10 Vendor Identification:"
   1374		 " %s\n", dev->t10_wwn.vendor);
   1375
   1376	return count;
   1377}
   1378
   1379static ssize_t target_wwn_product_id_show(struct config_item *item,
   1380		char *page)
   1381{
   1382	return sprintf(page, "%s\n", &to_t10_wwn(item)->model[0]);
   1383}
   1384
   1385static ssize_t target_wwn_product_id_store(struct config_item *item,
   1386		const char *page, size_t count)
   1387{
   1388	struct t10_wwn *t10_wwn = to_t10_wwn(item);
   1389	struct se_device *dev = t10_wwn->t10_dev;
   1390	/* +2 to allow for a trailing (stripped) '\n' and null-terminator */
   1391	unsigned char buf[INQUIRY_MODEL_LEN + 2];
   1392	char *stripped = NULL;
   1393	size_t len;
   1394	ssize_t ret;
   1395
   1396	len = strlcpy(buf, page, sizeof(buf));
   1397	if (len < sizeof(buf)) {
   1398		/* Strip any newline added from userspace. */
   1399		stripped = strstrip(buf);
   1400		len = strlen(stripped);
   1401	}
   1402	if (len > INQUIRY_MODEL_LEN) {
   1403		pr_err("Emulated T10 Vendor exceeds INQUIRY_MODEL_LEN: "
   1404			 __stringify(INQUIRY_MODEL_LEN)
   1405			"\n");
   1406		return -EOVERFLOW;
   1407	}
   1408
   1409	ret = target_check_inquiry_data(stripped);
   1410
   1411	if (ret < 0)
   1412		return ret;
   1413
   1414	/*
   1415	 * Check to see if any active exports exist.  If they do exist, fail
   1416	 * here as changing this information on the fly (underneath the
   1417	 * initiator side OS dependent multipath code) could cause negative
   1418	 * effects.
   1419	 */
   1420	if (dev->export_count) {
   1421		pr_err("Unable to set T10 Model while active %d exports exist\n",
   1422			dev->export_count);
   1423		return -EINVAL;
   1424	}
   1425
   1426	BUILD_BUG_ON(sizeof(dev->t10_wwn.model) != INQUIRY_MODEL_LEN + 1);
   1427	strlcpy(dev->t10_wwn.model, stripped, sizeof(dev->t10_wwn.model));
   1428
   1429	pr_debug("Target_Core_ConfigFS: Set emulated T10 Model Identification: %s\n",
   1430		 dev->t10_wwn.model);
   1431
   1432	return count;
   1433}
   1434
   1435static ssize_t target_wwn_revision_show(struct config_item *item,
   1436		char *page)
   1437{
   1438	return sprintf(page, "%s\n", &to_t10_wwn(item)->revision[0]);
   1439}
   1440
   1441static ssize_t target_wwn_revision_store(struct config_item *item,
   1442		const char *page, size_t count)
   1443{
   1444	struct t10_wwn *t10_wwn = to_t10_wwn(item);
   1445	struct se_device *dev = t10_wwn->t10_dev;
   1446	/* +2 to allow for a trailing (stripped) '\n' and null-terminator */
   1447	unsigned char buf[INQUIRY_REVISION_LEN + 2];
   1448	char *stripped = NULL;
   1449	size_t len;
   1450	ssize_t ret;
   1451
   1452	len = strlcpy(buf, page, sizeof(buf));
   1453	if (len < sizeof(buf)) {
   1454		/* Strip any newline added from userspace. */
   1455		stripped = strstrip(buf);
   1456		len = strlen(stripped);
   1457	}
   1458	if (len > INQUIRY_REVISION_LEN) {
   1459		pr_err("Emulated T10 Revision exceeds INQUIRY_REVISION_LEN: "
   1460			 __stringify(INQUIRY_REVISION_LEN)
   1461			"\n");
   1462		return -EOVERFLOW;
   1463	}
   1464
   1465	ret = target_check_inquiry_data(stripped);
   1466
   1467	if (ret < 0)
   1468		return ret;
   1469
   1470	/*
   1471	 * Check to see if any active exports exist.  If they do exist, fail
   1472	 * here as changing this information on the fly (underneath the
   1473	 * initiator side OS dependent multipath code) could cause negative
   1474	 * effects.
   1475	 */
   1476	if (dev->export_count) {
   1477		pr_err("Unable to set T10 Revision while active %d exports exist\n",
   1478			dev->export_count);
   1479		return -EINVAL;
   1480	}
   1481
   1482	BUILD_BUG_ON(sizeof(dev->t10_wwn.revision) != INQUIRY_REVISION_LEN + 1);
   1483	strlcpy(dev->t10_wwn.revision, stripped, sizeof(dev->t10_wwn.revision));
   1484
   1485	pr_debug("Target_Core_ConfigFS: Set emulated T10 Revision: %s\n",
   1486		 dev->t10_wwn.revision);
   1487
   1488	return count;
   1489}
   1490
   1491static ssize_t
   1492target_wwn_company_id_show(struct config_item *item,
   1493				char *page)
   1494{
   1495	return snprintf(page, PAGE_SIZE, "%#08x\n",
   1496			to_t10_wwn(item)->company_id);
   1497}
   1498
   1499static ssize_t
   1500target_wwn_company_id_store(struct config_item *item,
   1501				 const char *page, size_t count)
   1502{
   1503	struct t10_wwn *t10_wwn = to_t10_wwn(item);
   1504	struct se_device *dev = t10_wwn->t10_dev;
   1505	u32 val;
   1506	int ret;
   1507
   1508	/*
   1509	 * The IEEE COMPANY_ID field should contain a 24-bit canonical
   1510	 * form OUI assigned by the IEEE.
   1511	 */
   1512	ret = kstrtou32(page, 0, &val);
   1513	if (ret < 0)
   1514		return ret;
   1515
   1516	if (val >= 0x1000000)
   1517		return -EOVERFLOW;
   1518
   1519	/*
   1520	 * Check to see if any active exports exist. If they do exist, fail
   1521	 * here as changing this information on the fly (underneath the
   1522	 * initiator side OS dependent multipath code) could cause negative
   1523	 * effects.
   1524	 */
   1525	if (dev->export_count) {
   1526		pr_err("Unable to set Company ID while %u exports exist\n",
   1527		       dev->export_count);
   1528		return -EINVAL;
   1529	}
   1530
   1531	t10_wwn->company_id = val;
   1532
   1533	pr_debug("Target_Core_ConfigFS: Set IEEE Company ID: %#08x\n",
   1534		 t10_wwn->company_id);
   1535
   1536	return count;
   1537}
   1538
   1539/*
   1540 * VPD page 0x80 Unit serial
   1541 */
   1542static ssize_t target_wwn_vpd_unit_serial_show(struct config_item *item,
   1543		char *page)
   1544{
   1545	return sprintf(page, "T10 VPD Unit Serial Number: %s\n",
   1546		&to_t10_wwn(item)->unit_serial[0]);
   1547}
   1548
   1549static ssize_t target_wwn_vpd_unit_serial_store(struct config_item *item,
   1550		const char *page, size_t count)
   1551{
   1552	struct t10_wwn *t10_wwn = to_t10_wwn(item);
   1553	struct se_device *dev = t10_wwn->t10_dev;
   1554	unsigned char buf[INQUIRY_VPD_SERIAL_LEN] = { };
   1555
   1556	/*
   1557	 * If Linux/SCSI subsystem_api_t plugin got a VPD Unit Serial
   1558	 * from the struct scsi_device level firmware, do not allow
   1559	 * VPD Unit Serial to be emulated.
   1560	 *
   1561	 * Note this struct scsi_device could also be emulating VPD
   1562	 * information from its drivers/scsi LLD.  But for now we assume
   1563	 * it is doing 'the right thing' wrt a world wide unique
   1564	 * VPD Unit Serial Number that OS dependent multipath can depend on.
   1565	 */
   1566	if (dev->dev_flags & DF_FIRMWARE_VPD_UNIT_SERIAL) {
   1567		pr_err("Underlying SCSI device firmware provided VPD"
   1568			" Unit Serial, ignoring request\n");
   1569		return -EOPNOTSUPP;
   1570	}
   1571
   1572	if (strlen(page) >= INQUIRY_VPD_SERIAL_LEN) {
   1573		pr_err("Emulated VPD Unit Serial exceeds"
   1574		" INQUIRY_VPD_SERIAL_LEN: %d\n", INQUIRY_VPD_SERIAL_LEN);
   1575		return -EOVERFLOW;
   1576	}
   1577	/*
   1578	 * Check to see if any active $FABRIC_MOD exports exist.  If they
   1579	 * do exist, fail here as changing this information on the fly
   1580	 * (underneath the initiator side OS dependent multipath code)
   1581	 * could cause negative effects.
   1582	 */
   1583	if (dev->export_count) {
   1584		pr_err("Unable to set VPD Unit Serial while"
   1585			" active %d $FABRIC_MOD exports exist\n",
   1586			dev->export_count);
   1587		return -EINVAL;
   1588	}
   1589
   1590	/*
   1591	 * This currently assumes ASCII encoding for emulated VPD Unit Serial.
   1592	 *
   1593	 * Also, strip any newline added from the userspace
   1594	 * echo $UUID > $TARGET/$HBA/$STORAGE_OBJECT/wwn/vpd_unit_serial
   1595	 */
   1596	snprintf(buf, INQUIRY_VPD_SERIAL_LEN, "%s", page);
   1597	snprintf(dev->t10_wwn.unit_serial, INQUIRY_VPD_SERIAL_LEN,
   1598			"%s", strstrip(buf));
   1599	dev->dev_flags |= DF_EMULATED_VPD_UNIT_SERIAL;
   1600
   1601	pr_debug("Target_Core_ConfigFS: Set emulated VPD Unit Serial:"
   1602			" %s\n", dev->t10_wwn.unit_serial);
   1603
   1604	return count;
   1605}
   1606
   1607/*
   1608 * VPD page 0x83 Protocol Identifier
   1609 */
   1610static ssize_t target_wwn_vpd_protocol_identifier_show(struct config_item *item,
   1611		char *page)
   1612{
   1613	struct t10_wwn *t10_wwn = to_t10_wwn(item);
   1614	struct t10_vpd *vpd;
   1615	unsigned char buf[VPD_TMP_BUF_SIZE] = { };
   1616	ssize_t len = 0;
   1617
   1618	spin_lock(&t10_wwn->t10_vpd_lock);
   1619	list_for_each_entry(vpd, &t10_wwn->t10_vpd_list, vpd_list) {
   1620		if (!vpd->protocol_identifier_set)
   1621			continue;
   1622
   1623		transport_dump_vpd_proto_id(vpd, buf, VPD_TMP_BUF_SIZE);
   1624
   1625		if (len + strlen(buf) >= PAGE_SIZE)
   1626			break;
   1627
   1628		len += sprintf(page+len, "%s", buf);
   1629	}
   1630	spin_unlock(&t10_wwn->t10_vpd_lock);
   1631
   1632	return len;
   1633}
   1634
   1635/*
   1636 * Generic wrapper for dumping VPD identifiers by association.
   1637 */
   1638#define DEF_DEV_WWN_ASSOC_SHOW(_name, _assoc)				\
   1639static ssize_t target_wwn_##_name##_show(struct config_item *item,	\
   1640		char *page)						\
   1641{									\
   1642	struct t10_wwn *t10_wwn = to_t10_wwn(item);			\
   1643	struct t10_vpd *vpd;						\
   1644	unsigned char buf[VPD_TMP_BUF_SIZE];				\
   1645	ssize_t len = 0;						\
   1646									\
   1647	spin_lock(&t10_wwn->t10_vpd_lock);				\
   1648	list_for_each_entry(vpd, &t10_wwn->t10_vpd_list, vpd_list) {	\
   1649		if (vpd->association != _assoc)				\
   1650			continue;					\
   1651									\
   1652		memset(buf, 0, VPD_TMP_BUF_SIZE);			\
   1653		transport_dump_vpd_assoc(vpd, buf, VPD_TMP_BUF_SIZE);	\
   1654		if (len + strlen(buf) >= PAGE_SIZE)			\
   1655			break;						\
   1656		len += sprintf(page+len, "%s", buf);			\
   1657									\
   1658		memset(buf, 0, VPD_TMP_BUF_SIZE);			\
   1659		transport_dump_vpd_ident_type(vpd, buf, VPD_TMP_BUF_SIZE); \
   1660		if (len + strlen(buf) >= PAGE_SIZE)			\
   1661			break;						\
   1662		len += sprintf(page+len, "%s", buf);			\
   1663									\
   1664		memset(buf, 0, VPD_TMP_BUF_SIZE);			\
   1665		transport_dump_vpd_ident(vpd, buf, VPD_TMP_BUF_SIZE); \
   1666		if (len + strlen(buf) >= PAGE_SIZE)			\
   1667			break;						\
   1668		len += sprintf(page+len, "%s", buf);			\
   1669	}								\
   1670	spin_unlock(&t10_wwn->t10_vpd_lock);				\
   1671									\
   1672	return len;							\
   1673}
   1674
   1675/* VPD page 0x83 Association: Logical Unit */
   1676DEF_DEV_WWN_ASSOC_SHOW(vpd_assoc_logical_unit, 0x00);
   1677/* VPD page 0x83 Association: Target Port */
   1678DEF_DEV_WWN_ASSOC_SHOW(vpd_assoc_target_port, 0x10);
   1679/* VPD page 0x83 Association: SCSI Target Device */
   1680DEF_DEV_WWN_ASSOC_SHOW(vpd_assoc_scsi_target_device, 0x20);
   1681
   1682CONFIGFS_ATTR(target_wwn_, vendor_id);
   1683CONFIGFS_ATTR(target_wwn_, product_id);
   1684CONFIGFS_ATTR(target_wwn_, revision);
   1685CONFIGFS_ATTR(target_wwn_, company_id);
   1686CONFIGFS_ATTR(target_wwn_, vpd_unit_serial);
   1687CONFIGFS_ATTR_RO(target_wwn_, vpd_protocol_identifier);
   1688CONFIGFS_ATTR_RO(target_wwn_, vpd_assoc_logical_unit);
   1689CONFIGFS_ATTR_RO(target_wwn_, vpd_assoc_target_port);
   1690CONFIGFS_ATTR_RO(target_wwn_, vpd_assoc_scsi_target_device);
   1691
   1692static struct configfs_attribute *target_core_dev_wwn_attrs[] = {
   1693	&target_wwn_attr_vendor_id,
   1694	&target_wwn_attr_product_id,
   1695	&target_wwn_attr_revision,
   1696	&target_wwn_attr_company_id,
   1697	&target_wwn_attr_vpd_unit_serial,
   1698	&target_wwn_attr_vpd_protocol_identifier,
   1699	&target_wwn_attr_vpd_assoc_logical_unit,
   1700	&target_wwn_attr_vpd_assoc_target_port,
   1701	&target_wwn_attr_vpd_assoc_scsi_target_device,
   1702	NULL,
   1703};
   1704
   1705TB_CIT_SETUP(dev_wwn, NULL, NULL, target_core_dev_wwn_attrs);
   1706
   1707/*  End functions for struct config_item_type tb_dev_wwn_cit */
   1708
   1709/*  Start functions for struct config_item_type tb_dev_pr_cit */
   1710
   1711static struct se_device *pr_to_dev(struct config_item *item)
   1712{
   1713	return container_of(to_config_group(item), struct se_device,
   1714			dev_pr_group);
   1715}
   1716
   1717static ssize_t target_core_dev_pr_show_spc3_res(struct se_device *dev,
   1718		char *page)
   1719{
   1720	struct se_node_acl *se_nacl;
   1721	struct t10_pr_registration *pr_reg;
   1722	char i_buf[PR_REG_ISID_ID_LEN] = { };
   1723
   1724	pr_reg = dev->dev_pr_res_holder;
   1725	if (!pr_reg)
   1726		return sprintf(page, "No SPC-3 Reservation holder\n");
   1727
   1728	se_nacl = pr_reg->pr_reg_nacl;
   1729	core_pr_dump_initiator_port(pr_reg, i_buf, PR_REG_ISID_ID_LEN);
   1730
   1731	return sprintf(page, "SPC-3 Reservation: %s Initiator: %s%s\n",
   1732		se_nacl->se_tpg->se_tpg_tfo->fabric_name,
   1733		se_nacl->initiatorname, i_buf);
   1734}
   1735
   1736static ssize_t target_core_dev_pr_show_spc2_res(struct se_device *dev,
   1737		char *page)
   1738{
   1739	struct se_session *sess = dev->reservation_holder;
   1740	struct se_node_acl *se_nacl;
   1741	ssize_t len;
   1742
   1743	if (sess) {
   1744		se_nacl = sess->se_node_acl;
   1745		len = sprintf(page,
   1746			      "SPC-2 Reservation: %s Initiator: %s\n",
   1747			      se_nacl->se_tpg->se_tpg_tfo->fabric_name,
   1748			      se_nacl->initiatorname);
   1749	} else {
   1750		len = sprintf(page, "No SPC-2 Reservation holder\n");
   1751	}
   1752	return len;
   1753}
   1754
   1755static ssize_t target_pr_res_holder_show(struct config_item *item, char *page)
   1756{
   1757	struct se_device *dev = pr_to_dev(item);
   1758	int ret;
   1759
   1760	if (!dev->dev_attrib.emulate_pr)
   1761		return sprintf(page, "SPC_RESERVATIONS_DISABLED\n");
   1762
   1763	if (dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_PGR)
   1764		return sprintf(page, "Passthrough\n");
   1765
   1766	spin_lock(&dev->dev_reservation_lock);
   1767	if (dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS)
   1768		ret = target_core_dev_pr_show_spc2_res(dev, page);
   1769	else
   1770		ret = target_core_dev_pr_show_spc3_res(dev, page);
   1771	spin_unlock(&dev->dev_reservation_lock);
   1772	return ret;
   1773}
   1774
   1775static ssize_t target_pr_res_pr_all_tgt_pts_show(struct config_item *item,
   1776		char *page)
   1777{
   1778	struct se_device *dev = pr_to_dev(item);
   1779	ssize_t len = 0;
   1780
   1781	spin_lock(&dev->dev_reservation_lock);
   1782	if (!dev->dev_pr_res_holder) {
   1783		len = sprintf(page, "No SPC-3 Reservation holder\n");
   1784	} else if (dev->dev_pr_res_holder->pr_reg_all_tg_pt) {
   1785		len = sprintf(page, "SPC-3 Reservation: All Target"
   1786			" Ports registration\n");
   1787	} else {
   1788		len = sprintf(page, "SPC-3 Reservation: Single"
   1789			" Target Port registration\n");
   1790	}
   1791
   1792	spin_unlock(&dev->dev_reservation_lock);
   1793	return len;
   1794}
   1795
   1796static ssize_t target_pr_res_pr_generation_show(struct config_item *item,
   1797		char *page)
   1798{
   1799	return sprintf(page, "0x%08x\n", pr_to_dev(item)->t10_pr.pr_generation);
   1800}
   1801
   1802
   1803static ssize_t target_pr_res_pr_holder_tg_port_show(struct config_item *item,
   1804		char *page)
   1805{
   1806	struct se_device *dev = pr_to_dev(item);
   1807	struct se_node_acl *se_nacl;
   1808	struct se_portal_group *se_tpg;
   1809	struct t10_pr_registration *pr_reg;
   1810	const struct target_core_fabric_ops *tfo;
   1811	ssize_t len = 0;
   1812
   1813	spin_lock(&dev->dev_reservation_lock);
   1814	pr_reg = dev->dev_pr_res_holder;
   1815	if (!pr_reg) {
   1816		len = sprintf(page, "No SPC-3 Reservation holder\n");
   1817		goto out_unlock;
   1818	}
   1819
   1820	se_nacl = pr_reg->pr_reg_nacl;
   1821	se_tpg = se_nacl->se_tpg;
   1822	tfo = se_tpg->se_tpg_tfo;
   1823
   1824	len += sprintf(page+len, "SPC-3 Reservation: %s"
   1825		" Target Node Endpoint: %s\n", tfo->fabric_name,
   1826		tfo->tpg_get_wwn(se_tpg));
   1827	len += sprintf(page+len, "SPC-3 Reservation: Relative Port"
   1828		" Identifier Tag: %hu %s Portal Group Tag: %hu"
   1829		" %s Logical Unit: %llu\n", pr_reg->tg_pt_sep_rtpi,
   1830		tfo->fabric_name, tfo->tpg_get_tag(se_tpg),
   1831		tfo->fabric_name, pr_reg->pr_aptpl_target_lun);
   1832
   1833out_unlock:
   1834	spin_unlock(&dev->dev_reservation_lock);
   1835	return len;
   1836}
   1837
   1838
   1839static ssize_t target_pr_res_pr_registered_i_pts_show(struct config_item *item,
   1840		char *page)
   1841{
   1842	struct se_device *dev = pr_to_dev(item);
   1843	const struct target_core_fabric_ops *tfo;
   1844	struct t10_pr_registration *pr_reg;
   1845	unsigned char buf[384];
   1846	char i_buf[PR_REG_ISID_ID_LEN];
   1847	ssize_t len = 0;
   1848	int reg_count = 0;
   1849
   1850	len += sprintf(page+len, "SPC-3 PR Registrations:\n");
   1851
   1852	spin_lock(&dev->t10_pr.registration_lock);
   1853	list_for_each_entry(pr_reg, &dev->t10_pr.registration_list,
   1854			pr_reg_list) {
   1855
   1856		memset(buf, 0, 384);
   1857		memset(i_buf, 0, PR_REG_ISID_ID_LEN);
   1858		tfo = pr_reg->pr_reg_nacl->se_tpg->se_tpg_tfo;
   1859		core_pr_dump_initiator_port(pr_reg, i_buf,
   1860					PR_REG_ISID_ID_LEN);
   1861		sprintf(buf, "%s Node: %s%s Key: 0x%016Lx PRgen: 0x%08x\n",
   1862			tfo->fabric_name,
   1863			pr_reg->pr_reg_nacl->initiatorname, i_buf, pr_reg->pr_res_key,
   1864			pr_reg->pr_res_generation);
   1865
   1866		if (len + strlen(buf) >= PAGE_SIZE)
   1867			break;
   1868
   1869		len += sprintf(page+len, "%s", buf);
   1870		reg_count++;
   1871	}
   1872	spin_unlock(&dev->t10_pr.registration_lock);
   1873
   1874	if (!reg_count)
   1875		len += sprintf(page+len, "None\n");
   1876
   1877	return len;
   1878}
   1879
   1880static ssize_t target_pr_res_pr_type_show(struct config_item *item, char *page)
   1881{
   1882	struct se_device *dev = pr_to_dev(item);
   1883	struct t10_pr_registration *pr_reg;
   1884	ssize_t len = 0;
   1885
   1886	spin_lock(&dev->dev_reservation_lock);
   1887	pr_reg = dev->dev_pr_res_holder;
   1888	if (pr_reg) {
   1889		len = sprintf(page, "SPC-3 Reservation Type: %s\n",
   1890			core_scsi3_pr_dump_type(pr_reg->pr_res_type));
   1891	} else {
   1892		len = sprintf(page, "No SPC-3 Reservation holder\n");
   1893	}
   1894
   1895	spin_unlock(&dev->dev_reservation_lock);
   1896	return len;
   1897}
   1898
   1899static ssize_t target_pr_res_type_show(struct config_item *item, char *page)
   1900{
   1901	struct se_device *dev = pr_to_dev(item);
   1902
   1903	if (!dev->dev_attrib.emulate_pr)
   1904		return sprintf(page, "SPC_RESERVATIONS_DISABLED\n");
   1905	if (dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_PGR)
   1906		return sprintf(page, "SPC_PASSTHROUGH\n");
   1907	if (dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS)
   1908		return sprintf(page, "SPC2_RESERVATIONS\n");
   1909
   1910	return sprintf(page, "SPC3_PERSISTENT_RESERVATIONS\n");
   1911}
   1912
   1913static ssize_t target_pr_res_aptpl_active_show(struct config_item *item,
   1914		char *page)
   1915{
   1916	struct se_device *dev = pr_to_dev(item);
   1917
   1918	if (!dev->dev_attrib.emulate_pr ||
   1919	    (dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_PGR))
   1920		return 0;
   1921
   1922	return sprintf(page, "APTPL Bit Status: %s\n",
   1923		(dev->t10_pr.pr_aptpl_active) ? "Activated" : "Disabled");
   1924}
   1925
   1926static ssize_t target_pr_res_aptpl_metadata_show(struct config_item *item,
   1927		char *page)
   1928{
   1929	struct se_device *dev = pr_to_dev(item);
   1930
   1931	if (!dev->dev_attrib.emulate_pr ||
   1932	    (dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_PGR))
   1933		return 0;
   1934
   1935	return sprintf(page, "Ready to process PR APTPL metadata..\n");
   1936}
   1937
   1938enum {
   1939	Opt_initiator_fabric, Opt_initiator_node, Opt_initiator_sid,
   1940	Opt_sa_res_key, Opt_res_holder, Opt_res_type, Opt_res_scope,
   1941	Opt_res_all_tg_pt, Opt_mapped_lun, Opt_target_fabric,
   1942	Opt_target_node, Opt_tpgt, Opt_port_rtpi, Opt_target_lun, Opt_err
   1943};
   1944
   1945static match_table_t tokens = {
   1946	{Opt_initiator_fabric, "initiator_fabric=%s"},
   1947	{Opt_initiator_node, "initiator_node=%s"},
   1948	{Opt_initiator_sid, "initiator_sid=%s"},
   1949	{Opt_sa_res_key, "sa_res_key=%s"},
   1950	{Opt_res_holder, "res_holder=%d"},
   1951	{Opt_res_type, "res_type=%d"},
   1952	{Opt_res_scope, "res_scope=%d"},
   1953	{Opt_res_all_tg_pt, "res_all_tg_pt=%d"},
   1954	{Opt_mapped_lun, "mapped_lun=%u"},
   1955	{Opt_target_fabric, "target_fabric=%s"},
   1956	{Opt_target_node, "target_node=%s"},
   1957	{Opt_tpgt, "tpgt=%d"},
   1958	{Opt_port_rtpi, "port_rtpi=%d"},
   1959	{Opt_target_lun, "target_lun=%u"},
   1960	{Opt_err, NULL}
   1961};
   1962
   1963static ssize_t target_pr_res_aptpl_metadata_store(struct config_item *item,
   1964		const char *page, size_t count)
   1965{
   1966	struct se_device *dev = pr_to_dev(item);
   1967	unsigned char *i_fabric = NULL, *i_port = NULL, *isid = NULL;
   1968	unsigned char *t_fabric = NULL, *t_port = NULL;
   1969	char *orig, *ptr, *opts;
   1970	substring_t args[MAX_OPT_ARGS];
   1971	unsigned long long tmp_ll;
   1972	u64 sa_res_key = 0;
   1973	u64 mapped_lun = 0, target_lun = 0;
   1974	int ret = -1, res_holder = 0, all_tg_pt = 0, arg, token;
   1975	u16 tpgt = 0;
   1976	u8 type = 0;
   1977
   1978	if (!dev->dev_attrib.emulate_pr ||
   1979	    (dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_PGR))
   1980		return count;
   1981	if (dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS)
   1982		return count;
   1983
   1984	if (dev->export_count) {
   1985		pr_debug("Unable to process APTPL metadata while"
   1986			" active fabric exports exist\n");
   1987		return -EINVAL;
   1988	}
   1989
   1990	opts = kstrdup(page, GFP_KERNEL);
   1991	if (!opts)
   1992		return -ENOMEM;
   1993
   1994	orig = opts;
   1995	while ((ptr = strsep(&opts, ",\n")) != NULL) {
   1996		if (!*ptr)
   1997			continue;
   1998
   1999		token = match_token(ptr, tokens, args);
   2000		switch (token) {
   2001		case Opt_initiator_fabric:
   2002			i_fabric = match_strdup(args);
   2003			if (!i_fabric) {
   2004				ret = -ENOMEM;
   2005				goto out;
   2006			}
   2007			break;
   2008		case Opt_initiator_node:
   2009			i_port = match_strdup(args);
   2010			if (!i_port) {
   2011				ret = -ENOMEM;
   2012				goto out;
   2013			}
   2014			if (strlen(i_port) >= PR_APTPL_MAX_IPORT_LEN) {
   2015				pr_err("APTPL metadata initiator_node="
   2016					" exceeds PR_APTPL_MAX_IPORT_LEN: %d\n",
   2017					PR_APTPL_MAX_IPORT_LEN);
   2018				ret = -EINVAL;
   2019				break;
   2020			}
   2021			break;
   2022		case Opt_initiator_sid:
   2023			isid = match_strdup(args);
   2024			if (!isid) {
   2025				ret = -ENOMEM;
   2026				goto out;
   2027			}
   2028			if (strlen(isid) >= PR_REG_ISID_LEN) {
   2029				pr_err("APTPL metadata initiator_isid"
   2030					"= exceeds PR_REG_ISID_LEN: %d\n",
   2031					PR_REG_ISID_LEN);
   2032				ret = -EINVAL;
   2033				break;
   2034			}
   2035			break;
   2036		case Opt_sa_res_key:
   2037			ret = match_u64(args,  &tmp_ll);
   2038			if (ret < 0) {
   2039				pr_err("kstrtoull() failed for sa_res_key=\n");
   2040				goto out;
   2041			}
   2042			sa_res_key = (u64)tmp_ll;
   2043			break;
   2044		/*
   2045		 * PR APTPL Metadata for Reservation
   2046		 */
   2047		case Opt_res_holder:
   2048			ret = match_int(args, &arg);
   2049			if (ret)
   2050				goto out;
   2051			res_holder = arg;
   2052			break;
   2053		case Opt_res_type:
   2054			ret = match_int(args, &arg);
   2055			if (ret)
   2056				goto out;
   2057			type = (u8)arg;
   2058			break;
   2059		case Opt_res_scope:
   2060			ret = match_int(args, &arg);
   2061			if (ret)
   2062				goto out;
   2063			break;
   2064		case Opt_res_all_tg_pt:
   2065			ret = match_int(args, &arg);
   2066			if (ret)
   2067				goto out;
   2068			all_tg_pt = (int)arg;
   2069			break;
   2070		case Opt_mapped_lun:
   2071			ret = match_u64(args, &tmp_ll);
   2072			if (ret)
   2073				goto out;
   2074			mapped_lun = (u64)tmp_ll;
   2075			break;
   2076		/*
   2077		 * PR APTPL Metadata for Target Port
   2078		 */
   2079		case Opt_target_fabric:
   2080			t_fabric = match_strdup(args);
   2081			if (!t_fabric) {
   2082				ret = -ENOMEM;
   2083				goto out;
   2084			}
   2085			break;
   2086		case Opt_target_node:
   2087			t_port = match_strdup(args);
   2088			if (!t_port) {
   2089				ret = -ENOMEM;
   2090				goto out;
   2091			}
   2092			if (strlen(t_port) >= PR_APTPL_MAX_TPORT_LEN) {
   2093				pr_err("APTPL metadata target_node="
   2094					" exceeds PR_APTPL_MAX_TPORT_LEN: %d\n",
   2095					PR_APTPL_MAX_TPORT_LEN);
   2096				ret = -EINVAL;
   2097				break;
   2098			}
   2099			break;
   2100		case Opt_tpgt:
   2101			ret = match_int(args, &arg);
   2102			if (ret)
   2103				goto out;
   2104			tpgt = (u16)arg;
   2105			break;
   2106		case Opt_port_rtpi:
   2107			ret = match_int(args, &arg);
   2108			if (ret)
   2109				goto out;
   2110			break;
   2111		case Opt_target_lun:
   2112			ret = match_u64(args, &tmp_ll);
   2113			if (ret)
   2114				goto out;
   2115			target_lun = (u64)tmp_ll;
   2116			break;
   2117		default:
   2118			break;
   2119		}
   2120	}
   2121
   2122	if (!i_port || !t_port || !sa_res_key) {
   2123		pr_err("Illegal parameters for APTPL registration\n");
   2124		ret = -EINVAL;
   2125		goto out;
   2126	}
   2127
   2128	if (res_holder && !(type)) {
   2129		pr_err("Illegal PR type: 0x%02x for reservation"
   2130				" holder\n", type);
   2131		ret = -EINVAL;
   2132		goto out;
   2133	}
   2134
   2135	ret = core_scsi3_alloc_aptpl_registration(&dev->t10_pr, sa_res_key,
   2136			i_port, isid, mapped_lun, t_port, tpgt, target_lun,
   2137			res_holder, all_tg_pt, type);
   2138out:
   2139	kfree(i_fabric);
   2140	kfree(i_port);
   2141	kfree(isid);
   2142	kfree(t_fabric);
   2143	kfree(t_port);
   2144	kfree(orig);
   2145	return (ret == 0) ? count : ret;
   2146}
   2147
   2148
   2149CONFIGFS_ATTR_RO(target_pr_, res_holder);
   2150CONFIGFS_ATTR_RO(target_pr_, res_pr_all_tgt_pts);
   2151CONFIGFS_ATTR_RO(target_pr_, res_pr_generation);
   2152CONFIGFS_ATTR_RO(target_pr_, res_pr_holder_tg_port);
   2153CONFIGFS_ATTR_RO(target_pr_, res_pr_registered_i_pts);
   2154CONFIGFS_ATTR_RO(target_pr_, res_pr_type);
   2155CONFIGFS_ATTR_RO(target_pr_, res_type);
   2156CONFIGFS_ATTR_RO(target_pr_, res_aptpl_active);
   2157CONFIGFS_ATTR(target_pr_, res_aptpl_metadata);
   2158
   2159static struct configfs_attribute *target_core_dev_pr_attrs[] = {
   2160	&target_pr_attr_res_holder,
   2161	&target_pr_attr_res_pr_all_tgt_pts,
   2162	&target_pr_attr_res_pr_generation,
   2163	&target_pr_attr_res_pr_holder_tg_port,
   2164	&target_pr_attr_res_pr_registered_i_pts,
   2165	&target_pr_attr_res_pr_type,
   2166	&target_pr_attr_res_type,
   2167	&target_pr_attr_res_aptpl_active,
   2168	&target_pr_attr_res_aptpl_metadata,
   2169	NULL,
   2170};
   2171
   2172TB_CIT_SETUP(dev_pr, NULL, NULL, target_core_dev_pr_attrs);
   2173
   2174/*  End functions for struct config_item_type tb_dev_pr_cit */
   2175
   2176/*  Start functions for struct config_item_type tb_dev_cit */
   2177
   2178static inline struct se_device *to_device(struct config_item *item)
   2179{
   2180	return container_of(to_config_group(item), struct se_device, dev_group);
   2181}
   2182
   2183static ssize_t target_dev_info_show(struct config_item *item, char *page)
   2184{
   2185	struct se_device *dev = to_device(item);
   2186	int bl = 0;
   2187	ssize_t read_bytes = 0;
   2188
   2189	transport_dump_dev_state(dev, page, &bl);
   2190	read_bytes += bl;
   2191	read_bytes += dev->transport->show_configfs_dev_params(dev,
   2192			page+read_bytes);
   2193	return read_bytes;
   2194}
   2195
   2196static ssize_t target_dev_control_store(struct config_item *item,
   2197		const char *page, size_t count)
   2198{
   2199	struct se_device *dev = to_device(item);
   2200
   2201	return dev->transport->set_configfs_dev_params(dev, page, count);
   2202}
   2203
   2204static ssize_t target_dev_alias_show(struct config_item *item, char *page)
   2205{
   2206	struct se_device *dev = to_device(item);
   2207
   2208	if (!(dev->dev_flags & DF_USING_ALIAS))
   2209		return 0;
   2210
   2211	return snprintf(page, PAGE_SIZE, "%s\n", dev->dev_alias);
   2212}
   2213
   2214static ssize_t target_dev_alias_store(struct config_item *item,
   2215		const char *page, size_t count)
   2216{
   2217	struct se_device *dev = to_device(item);
   2218	struct se_hba *hba = dev->se_hba;
   2219	ssize_t read_bytes;
   2220
   2221	if (count > (SE_DEV_ALIAS_LEN-1)) {
   2222		pr_err("alias count: %d exceeds"
   2223			" SE_DEV_ALIAS_LEN-1: %u\n", (int)count,
   2224			SE_DEV_ALIAS_LEN-1);
   2225		return -EINVAL;
   2226	}
   2227
   2228	read_bytes = snprintf(&dev->dev_alias[0], SE_DEV_ALIAS_LEN, "%s", page);
   2229	if (!read_bytes)
   2230		return -EINVAL;
   2231	if (dev->dev_alias[read_bytes - 1] == '\n')
   2232		dev->dev_alias[read_bytes - 1] = '\0';
   2233
   2234	dev->dev_flags |= DF_USING_ALIAS;
   2235
   2236	pr_debug("Target_Core_ConfigFS: %s/%s set alias: %s\n",
   2237		config_item_name(&hba->hba_group.cg_item),
   2238		config_item_name(&dev->dev_group.cg_item),
   2239		dev->dev_alias);
   2240
   2241	return read_bytes;
   2242}
   2243
   2244static ssize_t target_dev_udev_path_show(struct config_item *item, char *page)
   2245{
   2246	struct se_device *dev = to_device(item);
   2247
   2248	if (!(dev->dev_flags & DF_USING_UDEV_PATH))
   2249		return 0;
   2250
   2251	return snprintf(page, PAGE_SIZE, "%s\n", dev->udev_path);
   2252}
   2253
   2254static ssize_t target_dev_udev_path_store(struct config_item *item,
   2255		const char *page, size_t count)
   2256{
   2257	struct se_device *dev = to_device(item);
   2258	struct se_hba *hba = dev->se_hba;
   2259	ssize_t read_bytes;
   2260
   2261	if (count > (SE_UDEV_PATH_LEN-1)) {
   2262		pr_err("udev_path count: %d exceeds"
   2263			" SE_UDEV_PATH_LEN-1: %u\n", (int)count,
   2264			SE_UDEV_PATH_LEN-1);
   2265		return -EINVAL;
   2266	}
   2267
   2268	read_bytes = snprintf(&dev->udev_path[0], SE_UDEV_PATH_LEN,
   2269			"%s", page);
   2270	if (!read_bytes)
   2271		return -EINVAL;
   2272	if (dev->udev_path[read_bytes - 1] == '\n')
   2273		dev->udev_path[read_bytes - 1] = '\0';
   2274
   2275	dev->dev_flags |= DF_USING_UDEV_PATH;
   2276
   2277	pr_debug("Target_Core_ConfigFS: %s/%s set udev_path: %s\n",
   2278		config_item_name(&hba->hba_group.cg_item),
   2279		config_item_name(&dev->dev_group.cg_item),
   2280		dev->udev_path);
   2281
   2282	return read_bytes;
   2283}
   2284
   2285static ssize_t target_dev_enable_show(struct config_item *item, char *page)
   2286{
   2287	struct se_device *dev = to_device(item);
   2288
   2289	return snprintf(page, PAGE_SIZE, "%d\n", target_dev_configured(dev));
   2290}
   2291
   2292static ssize_t target_dev_enable_store(struct config_item *item,
   2293		const char *page, size_t count)
   2294{
   2295	struct se_device *dev = to_device(item);
   2296	char *ptr;
   2297	int ret;
   2298
   2299	ptr = strstr(page, "1");
   2300	if (!ptr) {
   2301		pr_err("For dev_enable ops, only valid value"
   2302				" is \"1\"\n");
   2303		return -EINVAL;
   2304	}
   2305
   2306	ret = target_configure_device(dev);
   2307	if (ret)
   2308		return ret;
   2309	return count;
   2310}
   2311
   2312static ssize_t target_dev_alua_lu_gp_show(struct config_item *item, char *page)
   2313{
   2314	struct se_device *dev = to_device(item);
   2315	struct config_item *lu_ci;
   2316	struct t10_alua_lu_gp *lu_gp;
   2317	struct t10_alua_lu_gp_member *lu_gp_mem;
   2318	ssize_t len = 0;
   2319
   2320	lu_gp_mem = dev->dev_alua_lu_gp_mem;
   2321	if (!lu_gp_mem)
   2322		return 0;
   2323
   2324	spin_lock(&lu_gp_mem->lu_gp_mem_lock);
   2325	lu_gp = lu_gp_mem->lu_gp;
   2326	if (lu_gp) {
   2327		lu_ci = &lu_gp->lu_gp_group.cg_item;
   2328		len += sprintf(page, "LU Group Alias: %s\nLU Group ID: %hu\n",
   2329			config_item_name(lu_ci), lu_gp->lu_gp_id);
   2330	}
   2331	spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
   2332
   2333	return len;
   2334}
   2335
   2336static ssize_t target_dev_alua_lu_gp_store(struct config_item *item,
   2337		const char *page, size_t count)
   2338{
   2339	struct se_device *dev = to_device(item);
   2340	struct se_hba *hba = dev->se_hba;
   2341	struct t10_alua_lu_gp *lu_gp = NULL, *lu_gp_new = NULL;
   2342	struct t10_alua_lu_gp_member *lu_gp_mem;
   2343	unsigned char buf[LU_GROUP_NAME_BUF] = { };
   2344	int move = 0;
   2345
   2346	lu_gp_mem = dev->dev_alua_lu_gp_mem;
   2347	if (!lu_gp_mem)
   2348		return count;
   2349
   2350	if (count > LU_GROUP_NAME_BUF) {
   2351		pr_err("ALUA LU Group Alias too large!\n");
   2352		return -EINVAL;
   2353	}
   2354	memcpy(buf, page, count);
   2355	/*
   2356	 * Any ALUA logical unit alias besides "NULL" means we will be
   2357	 * making a new group association.
   2358	 */
   2359	if (strcmp(strstrip(buf), "NULL")) {
   2360		/*
   2361		 * core_alua_get_lu_gp_by_name() will increment reference to
   2362		 * struct t10_alua_lu_gp.  This reference is released with
   2363		 * core_alua_get_lu_gp_by_name below().
   2364		 */
   2365		lu_gp_new = core_alua_get_lu_gp_by_name(strstrip(buf));
   2366		if (!lu_gp_new)
   2367			return -ENODEV;
   2368	}
   2369
   2370	spin_lock(&lu_gp_mem->lu_gp_mem_lock);
   2371	lu_gp = lu_gp_mem->lu_gp;
   2372	if (lu_gp) {
   2373		/*
   2374		 * Clearing an existing lu_gp association, and replacing
   2375		 * with NULL
   2376		 */
   2377		if (!lu_gp_new) {
   2378			pr_debug("Target_Core_ConfigFS: Releasing %s/%s"
   2379				" from ALUA LU Group: core/alua/lu_gps/%s, ID:"
   2380				" %hu\n",
   2381				config_item_name(&hba->hba_group.cg_item),
   2382				config_item_name(&dev->dev_group.cg_item),
   2383				config_item_name(&lu_gp->lu_gp_group.cg_item),
   2384				lu_gp->lu_gp_id);
   2385
   2386			__core_alua_drop_lu_gp_mem(lu_gp_mem, lu_gp);
   2387			spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
   2388
   2389			return count;
   2390		}
   2391		/*
   2392		 * Removing existing association of lu_gp_mem with lu_gp
   2393		 */
   2394		__core_alua_drop_lu_gp_mem(lu_gp_mem, lu_gp);
   2395		move = 1;
   2396	}
   2397	/*
   2398	 * Associate lu_gp_mem with lu_gp_new.
   2399	 */
   2400	__core_alua_attach_lu_gp_mem(lu_gp_mem, lu_gp_new);
   2401	spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
   2402
   2403	pr_debug("Target_Core_ConfigFS: %s %s/%s to ALUA LU Group:"
   2404		" core/alua/lu_gps/%s, ID: %hu\n",
   2405		(move) ? "Moving" : "Adding",
   2406		config_item_name(&hba->hba_group.cg_item),
   2407		config_item_name(&dev->dev_group.cg_item),
   2408		config_item_name(&lu_gp_new->lu_gp_group.cg_item),
   2409		lu_gp_new->lu_gp_id);
   2410
   2411	core_alua_put_lu_gp_from_name(lu_gp_new);
   2412	return count;
   2413}
   2414
   2415static ssize_t target_dev_lba_map_show(struct config_item *item, char *page)
   2416{
   2417	struct se_device *dev = to_device(item);
   2418	struct t10_alua_lba_map *map;
   2419	struct t10_alua_lba_map_member *mem;
   2420	char *b = page;
   2421	int bl = 0;
   2422	char state;
   2423
   2424	spin_lock(&dev->t10_alua.lba_map_lock);
   2425	if (!list_empty(&dev->t10_alua.lba_map_list))
   2426	    bl += sprintf(b + bl, "%u %u\n",
   2427			  dev->t10_alua.lba_map_segment_size,
   2428			  dev->t10_alua.lba_map_segment_multiplier);
   2429	list_for_each_entry(map, &dev->t10_alua.lba_map_list, lba_map_list) {
   2430		bl += sprintf(b + bl, "%llu %llu",
   2431			      map->lba_map_first_lba, map->lba_map_last_lba);
   2432		list_for_each_entry(mem, &map->lba_map_mem_list,
   2433				    lba_map_mem_list) {
   2434			switch (mem->lba_map_mem_alua_state) {
   2435			case ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED:
   2436				state = 'O';
   2437				break;
   2438			case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED:
   2439				state = 'A';
   2440				break;
   2441			case ALUA_ACCESS_STATE_STANDBY:
   2442				state = 'S';
   2443				break;
   2444			case ALUA_ACCESS_STATE_UNAVAILABLE:
   2445				state = 'U';
   2446				break;
   2447			default:
   2448				state = '.';
   2449				break;
   2450			}
   2451			bl += sprintf(b + bl, " %d:%c",
   2452				      mem->lba_map_mem_alua_pg_id, state);
   2453		}
   2454		bl += sprintf(b + bl, "\n");
   2455	}
   2456	spin_unlock(&dev->t10_alua.lba_map_lock);
   2457	return bl;
   2458}
   2459
   2460static ssize_t target_dev_lba_map_store(struct config_item *item,
   2461		const char *page, size_t count)
   2462{
   2463	struct se_device *dev = to_device(item);
   2464	struct t10_alua_lba_map *lba_map = NULL;
   2465	struct list_head lba_list;
   2466	char *map_entries, *orig, *ptr;
   2467	char state;
   2468	int pg_num = -1, pg;
   2469	int ret = 0, num = 0, pg_id, alua_state;
   2470	unsigned long start_lba = -1, end_lba = -1;
   2471	unsigned long segment_size = -1, segment_mult = -1;
   2472
   2473	orig = map_entries = kstrdup(page, GFP_KERNEL);
   2474	if (!map_entries)
   2475		return -ENOMEM;
   2476
   2477	INIT_LIST_HEAD(&lba_list);
   2478	while ((ptr = strsep(&map_entries, "\n")) != NULL) {
   2479		if (!*ptr)
   2480			continue;
   2481
   2482		if (num == 0) {
   2483			if (sscanf(ptr, "%lu %lu\n",
   2484				   &segment_size, &segment_mult) != 2) {
   2485				pr_err("Invalid line %d\n", num);
   2486				ret = -EINVAL;
   2487				break;
   2488			}
   2489			num++;
   2490			continue;
   2491		}
   2492		if (sscanf(ptr, "%lu %lu", &start_lba, &end_lba) != 2) {
   2493			pr_err("Invalid line %d\n", num);
   2494			ret = -EINVAL;
   2495			break;
   2496		}
   2497		ptr = strchr(ptr, ' ');
   2498		if (!ptr) {
   2499			pr_err("Invalid line %d, missing end lba\n", num);
   2500			ret = -EINVAL;
   2501			break;
   2502		}
   2503		ptr++;
   2504		ptr = strchr(ptr, ' ');
   2505		if (!ptr) {
   2506			pr_err("Invalid line %d, missing state definitions\n",
   2507			       num);
   2508			ret = -EINVAL;
   2509			break;
   2510		}
   2511		ptr++;
   2512		lba_map = core_alua_allocate_lba_map(&lba_list,
   2513						     start_lba, end_lba);
   2514		if (IS_ERR(lba_map)) {
   2515			ret = PTR_ERR(lba_map);
   2516			break;
   2517		}
   2518		pg = 0;
   2519		while (sscanf(ptr, "%d:%c", &pg_id, &state) == 2) {
   2520			switch (state) {
   2521			case 'O':
   2522				alua_state = ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED;
   2523				break;
   2524			case 'A':
   2525				alua_state = ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED;
   2526				break;
   2527			case 'S':
   2528				alua_state = ALUA_ACCESS_STATE_STANDBY;
   2529				break;
   2530			case 'U':
   2531				alua_state = ALUA_ACCESS_STATE_UNAVAILABLE;
   2532				break;
   2533			default:
   2534				pr_err("Invalid ALUA state '%c'\n", state);
   2535				ret = -EINVAL;
   2536				goto out;
   2537			}
   2538
   2539			ret = core_alua_allocate_lba_map_mem(lba_map,
   2540							     pg_id, alua_state);
   2541			if (ret) {
   2542				pr_err("Invalid target descriptor %d:%c "
   2543				       "at line %d\n",
   2544				       pg_id, state, num);
   2545				break;
   2546			}
   2547			pg++;
   2548			ptr = strchr(ptr, ' ');
   2549			if (ptr)
   2550				ptr++;
   2551			else
   2552				break;
   2553		}
   2554		if (pg_num == -1)
   2555		    pg_num = pg;
   2556		else if (pg != pg_num) {
   2557			pr_err("Only %d from %d port groups definitions "
   2558			       "at line %d\n", pg, pg_num, num);
   2559			ret = -EINVAL;
   2560			break;
   2561		}
   2562		num++;
   2563	}
   2564out:
   2565	if (ret) {
   2566		core_alua_free_lba_map(&lba_list);
   2567		count = ret;
   2568	} else
   2569		core_alua_set_lba_map(dev, &lba_list,
   2570				      segment_size, segment_mult);
   2571	kfree(orig);
   2572	return count;
   2573}
   2574
   2575CONFIGFS_ATTR_RO(target_dev_, info);
   2576CONFIGFS_ATTR_WO(target_dev_, control);
   2577CONFIGFS_ATTR(target_dev_, alias);
   2578CONFIGFS_ATTR(target_dev_, udev_path);
   2579CONFIGFS_ATTR(target_dev_, enable);
   2580CONFIGFS_ATTR(target_dev_, alua_lu_gp);
   2581CONFIGFS_ATTR(target_dev_, lba_map);
   2582
   2583static struct configfs_attribute *target_core_dev_attrs[] = {
   2584	&target_dev_attr_info,
   2585	&target_dev_attr_control,
   2586	&target_dev_attr_alias,
   2587	&target_dev_attr_udev_path,
   2588	&target_dev_attr_enable,
   2589	&target_dev_attr_alua_lu_gp,
   2590	&target_dev_attr_lba_map,
   2591	NULL,
   2592};
   2593
   2594static void target_core_dev_release(struct config_item *item)
   2595{
   2596	struct config_group *dev_cg = to_config_group(item);
   2597	struct se_device *dev =
   2598		container_of(dev_cg, struct se_device, dev_group);
   2599
   2600	target_free_device(dev);
   2601}
   2602
   2603/*
   2604 * Used in target_core_fabric_configfs.c to verify valid se_device symlink
   2605 * within target_fabric_port_link()
   2606 */
   2607struct configfs_item_operations target_core_dev_item_ops = {
   2608	.release		= target_core_dev_release,
   2609};
   2610
   2611TB_CIT_SETUP(dev, &target_core_dev_item_ops, NULL, target_core_dev_attrs);
   2612
   2613/* End functions for struct config_item_type tb_dev_cit */
   2614
   2615/* Start functions for struct config_item_type target_core_alua_lu_gp_cit */
   2616
   2617static inline struct t10_alua_lu_gp *to_lu_gp(struct config_item *item)
   2618{
   2619	return container_of(to_config_group(item), struct t10_alua_lu_gp,
   2620			lu_gp_group);
   2621}
   2622
   2623static ssize_t target_lu_gp_lu_gp_id_show(struct config_item *item, char *page)
   2624{
   2625	struct t10_alua_lu_gp *lu_gp = to_lu_gp(item);
   2626
   2627	if (!lu_gp->lu_gp_valid_id)
   2628		return 0;
   2629	return sprintf(page, "%hu\n", lu_gp->lu_gp_id);
   2630}
   2631
   2632static ssize_t target_lu_gp_lu_gp_id_store(struct config_item *item,
   2633		const char *page, size_t count)
   2634{
   2635	struct t10_alua_lu_gp *lu_gp = to_lu_gp(item);
   2636	struct config_group *alua_lu_gp_cg = &lu_gp->lu_gp_group;
   2637	unsigned long lu_gp_id;
   2638	int ret;
   2639
   2640	ret = kstrtoul(page, 0, &lu_gp_id);
   2641	if (ret < 0) {
   2642		pr_err("kstrtoul() returned %d for"
   2643			" lu_gp_id\n", ret);
   2644		return ret;
   2645	}
   2646	if (lu_gp_id > 0x0000ffff) {
   2647		pr_err("ALUA lu_gp_id: %lu exceeds maximum:"
   2648			" 0x0000ffff\n", lu_gp_id);
   2649		return -EINVAL;
   2650	}
   2651
   2652	ret = core_alua_set_lu_gp_id(lu_gp, (u16)lu_gp_id);
   2653	if (ret < 0)
   2654		return -EINVAL;
   2655
   2656	pr_debug("Target_Core_ConfigFS: Set ALUA Logical Unit"
   2657		" Group: core/alua/lu_gps/%s to ID: %hu\n",
   2658		config_item_name(&alua_lu_gp_cg->cg_item),
   2659		lu_gp->lu_gp_id);
   2660
   2661	return count;
   2662}
   2663
   2664static ssize_t target_lu_gp_members_show(struct config_item *item, char *page)
   2665{
   2666	struct t10_alua_lu_gp *lu_gp = to_lu_gp(item);
   2667	struct se_device *dev;
   2668	struct se_hba *hba;
   2669	struct t10_alua_lu_gp_member *lu_gp_mem;
   2670	ssize_t len = 0, cur_len;
   2671	unsigned char buf[LU_GROUP_NAME_BUF] = { };
   2672
   2673	spin_lock(&lu_gp->lu_gp_lock);
   2674	list_for_each_entry(lu_gp_mem, &lu_gp->lu_gp_mem_list, lu_gp_mem_list) {
   2675		dev = lu_gp_mem->lu_gp_mem_dev;
   2676		hba = dev->se_hba;
   2677
   2678		cur_len = snprintf(buf, LU_GROUP_NAME_BUF, "%s/%s\n",
   2679			config_item_name(&hba->hba_group.cg_item),
   2680			config_item_name(&dev->dev_group.cg_item));
   2681		cur_len++; /* Extra byte for NULL terminator */
   2682
   2683		if ((cur_len + len) > PAGE_SIZE) {
   2684			pr_warn("Ran out of lu_gp_show_attr"
   2685				"_members buffer\n");
   2686			break;
   2687		}
   2688		memcpy(page+len, buf, cur_len);
   2689		len += cur_len;
   2690	}
   2691	spin_unlock(&lu_gp->lu_gp_lock);
   2692
   2693	return len;
   2694}
   2695
   2696CONFIGFS_ATTR(target_lu_gp_, lu_gp_id);
   2697CONFIGFS_ATTR_RO(target_lu_gp_, members);
   2698
   2699static struct configfs_attribute *target_core_alua_lu_gp_attrs[] = {
   2700	&target_lu_gp_attr_lu_gp_id,
   2701	&target_lu_gp_attr_members,
   2702	NULL,
   2703};
   2704
   2705static void target_core_alua_lu_gp_release(struct config_item *item)
   2706{
   2707	struct t10_alua_lu_gp *lu_gp = container_of(to_config_group(item),
   2708			struct t10_alua_lu_gp, lu_gp_group);
   2709
   2710	core_alua_free_lu_gp(lu_gp);
   2711}
   2712
   2713static struct configfs_item_operations target_core_alua_lu_gp_ops = {
   2714	.release		= target_core_alua_lu_gp_release,
   2715};
   2716
   2717static const struct config_item_type target_core_alua_lu_gp_cit = {
   2718	.ct_item_ops		= &target_core_alua_lu_gp_ops,
   2719	.ct_attrs		= target_core_alua_lu_gp_attrs,
   2720	.ct_owner		= THIS_MODULE,
   2721};
   2722
   2723/* End functions for struct config_item_type target_core_alua_lu_gp_cit */
   2724
   2725/* Start functions for struct config_item_type target_core_alua_lu_gps_cit */
   2726
   2727static struct config_group *target_core_alua_create_lu_gp(
   2728	struct config_group *group,
   2729	const char *name)
   2730{
   2731	struct t10_alua_lu_gp *lu_gp;
   2732	struct config_group *alua_lu_gp_cg = NULL;
   2733	struct config_item *alua_lu_gp_ci = NULL;
   2734
   2735	lu_gp = core_alua_allocate_lu_gp(name, 0);
   2736	if (IS_ERR(lu_gp))
   2737		return NULL;
   2738
   2739	alua_lu_gp_cg = &lu_gp->lu_gp_group;
   2740	alua_lu_gp_ci = &alua_lu_gp_cg->cg_item;
   2741
   2742	config_group_init_type_name(alua_lu_gp_cg, name,
   2743			&target_core_alua_lu_gp_cit);
   2744
   2745	pr_debug("Target_Core_ConfigFS: Allocated ALUA Logical Unit"
   2746		" Group: core/alua/lu_gps/%s\n",
   2747		config_item_name(alua_lu_gp_ci));
   2748
   2749	return alua_lu_gp_cg;
   2750
   2751}
   2752
   2753static void target_core_alua_drop_lu_gp(
   2754	struct config_group *group,
   2755	struct config_item *item)
   2756{
   2757	struct t10_alua_lu_gp *lu_gp = container_of(to_config_group(item),
   2758			struct t10_alua_lu_gp, lu_gp_group);
   2759
   2760	pr_debug("Target_Core_ConfigFS: Releasing ALUA Logical Unit"
   2761		" Group: core/alua/lu_gps/%s, ID: %hu\n",
   2762		config_item_name(item), lu_gp->lu_gp_id);
   2763	/*
   2764	 * core_alua_free_lu_gp() is called from target_core_alua_lu_gp_ops->release()
   2765	 * -> target_core_alua_lu_gp_release()
   2766	 */
   2767	config_item_put(item);
   2768}
   2769
   2770static struct configfs_group_operations target_core_alua_lu_gps_group_ops = {
   2771	.make_group		= &target_core_alua_create_lu_gp,
   2772	.drop_item		= &target_core_alua_drop_lu_gp,
   2773};
   2774
   2775static const struct config_item_type target_core_alua_lu_gps_cit = {
   2776	.ct_item_ops		= NULL,
   2777	.ct_group_ops		= &target_core_alua_lu_gps_group_ops,
   2778	.ct_owner		= THIS_MODULE,
   2779};
   2780
   2781/* End functions for struct config_item_type target_core_alua_lu_gps_cit */
   2782
   2783/* Start functions for struct config_item_type target_core_alua_tg_pt_gp_cit */
   2784
   2785static inline struct t10_alua_tg_pt_gp *to_tg_pt_gp(struct config_item *item)
   2786{
   2787	return container_of(to_config_group(item), struct t10_alua_tg_pt_gp,
   2788			tg_pt_gp_group);
   2789}
   2790
   2791static ssize_t target_tg_pt_gp_alua_access_state_show(struct config_item *item,
   2792		char *page)
   2793{
   2794	return sprintf(page, "%d\n",
   2795		       to_tg_pt_gp(item)->tg_pt_gp_alua_access_state);
   2796}
   2797
   2798static ssize_t target_tg_pt_gp_alua_access_state_store(struct config_item *item,
   2799		const char *page, size_t count)
   2800{
   2801	struct t10_alua_tg_pt_gp *tg_pt_gp = to_tg_pt_gp(item);
   2802	struct se_device *dev = tg_pt_gp->tg_pt_gp_dev;
   2803	unsigned long tmp;
   2804	int new_state, ret;
   2805
   2806	if (!tg_pt_gp->tg_pt_gp_valid_id) {
   2807		pr_err("Unable to do implicit ALUA on invalid tg_pt_gp ID\n");
   2808		return -EINVAL;
   2809	}
   2810	if (!target_dev_configured(dev)) {
   2811		pr_err("Unable to set alua_access_state while device is"
   2812		       " not configured\n");
   2813		return -ENODEV;
   2814	}
   2815
   2816	ret = kstrtoul(page, 0, &tmp);
   2817	if (ret < 0) {
   2818		pr_err("Unable to extract new ALUA access state from"
   2819				" %s\n", page);
   2820		return ret;
   2821	}
   2822	new_state = (int)tmp;
   2823
   2824	if (!(tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_IMPLICIT_ALUA)) {
   2825		pr_err("Unable to process implicit configfs ALUA"
   2826			" transition while TPGS_IMPLICIT_ALUA is disabled\n");
   2827		return -EINVAL;
   2828	}
   2829	if (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICIT_ALUA &&
   2830	    new_state == ALUA_ACCESS_STATE_LBA_DEPENDENT) {
   2831		/* LBA DEPENDENT is only allowed with implicit ALUA */
   2832		pr_err("Unable to process implicit configfs ALUA transition"
   2833		       " while explicit ALUA management is enabled\n");
   2834		return -EINVAL;
   2835	}
   2836
   2837	ret = core_alua_do_port_transition(tg_pt_gp, dev,
   2838					NULL, NULL, new_state, 0);
   2839	return (!ret) ? count : -EINVAL;
   2840}
   2841
   2842static ssize_t target_tg_pt_gp_alua_access_status_show(struct config_item *item,
   2843		char *page)
   2844{
   2845	struct t10_alua_tg_pt_gp *tg_pt_gp = to_tg_pt_gp(item);
   2846	return sprintf(page, "%s\n",
   2847		core_alua_dump_status(tg_pt_gp->tg_pt_gp_alua_access_status));
   2848}
   2849
   2850static ssize_t target_tg_pt_gp_alua_access_status_store(
   2851		struct config_item *item, const char *page, size_t count)
   2852{
   2853	struct t10_alua_tg_pt_gp *tg_pt_gp = to_tg_pt_gp(item);
   2854	unsigned long tmp;
   2855	int new_status, ret;
   2856
   2857	if (!tg_pt_gp->tg_pt_gp_valid_id) {
   2858		pr_err("Unable to set ALUA access status on invalid tg_pt_gp ID\n");
   2859		return -EINVAL;
   2860	}
   2861
   2862	ret = kstrtoul(page, 0, &tmp);
   2863	if (ret < 0) {
   2864		pr_err("Unable to extract new ALUA access status"
   2865				" from %s\n", page);
   2866		return ret;
   2867	}
   2868	new_status = (int)tmp;
   2869
   2870	if ((new_status != ALUA_STATUS_NONE) &&
   2871	    (new_status != ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG) &&
   2872	    (new_status != ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA)) {
   2873		pr_err("Illegal ALUA access status: 0x%02x\n",
   2874				new_status);
   2875		return -EINVAL;
   2876	}
   2877
   2878	tg_pt_gp->tg_pt_gp_alua_access_status = new_status;
   2879	return count;
   2880}
   2881
   2882static ssize_t target_tg_pt_gp_alua_access_type_show(struct config_item *item,
   2883		char *page)
   2884{
   2885	return core_alua_show_access_type(to_tg_pt_gp(item), page);
   2886}
   2887
   2888static ssize_t target_tg_pt_gp_alua_access_type_store(struct config_item *item,
   2889		const char *page, size_t count)
   2890{
   2891	return core_alua_store_access_type(to_tg_pt_gp(item), page, count);
   2892}
   2893
   2894#define ALUA_SUPPORTED_STATE_ATTR(_name, _bit)				\
   2895static ssize_t target_tg_pt_gp_alua_support_##_name##_show(		\
   2896		struct config_item *item, char *p)			\
   2897{									\
   2898	struct t10_alua_tg_pt_gp *t = to_tg_pt_gp(item);		\
   2899	return sprintf(p, "%d\n",					\
   2900		!!(t->tg_pt_gp_alua_supported_states & _bit));		\
   2901}									\
   2902									\
   2903static ssize_t target_tg_pt_gp_alua_support_##_name##_store(		\
   2904		struct config_item *item, const char *p, size_t c)	\
   2905{									\
   2906	struct t10_alua_tg_pt_gp *t = to_tg_pt_gp(item);		\
   2907	unsigned long tmp;						\
   2908	int ret;							\
   2909									\
   2910	if (!t->tg_pt_gp_valid_id) {					\
   2911		pr_err("Unable to set " #_name " ALUA state on invalid tg_pt_gp ID\n"); \
   2912		return -EINVAL;						\
   2913	}								\
   2914									\
   2915	ret = kstrtoul(p, 0, &tmp);					\
   2916	if (ret < 0) {							\
   2917		pr_err("Invalid value '%s', must be '0' or '1'\n", p);	\
   2918		return -EINVAL;						\
   2919	}								\
   2920	if (tmp > 1) {							\
   2921		pr_err("Invalid value '%ld', must be '0' or '1'\n", tmp); \
   2922		return -EINVAL;						\
   2923	}								\
   2924	if (tmp)							\
   2925		t->tg_pt_gp_alua_supported_states |= _bit;		\
   2926	else								\
   2927		t->tg_pt_gp_alua_supported_states &= ~_bit;		\
   2928									\
   2929	return c;							\
   2930}
   2931
   2932ALUA_SUPPORTED_STATE_ATTR(transitioning, ALUA_T_SUP);
   2933ALUA_SUPPORTED_STATE_ATTR(offline, ALUA_O_SUP);
   2934ALUA_SUPPORTED_STATE_ATTR(lba_dependent, ALUA_LBD_SUP);
   2935ALUA_SUPPORTED_STATE_ATTR(unavailable, ALUA_U_SUP);
   2936ALUA_SUPPORTED_STATE_ATTR(standby, ALUA_S_SUP);
   2937ALUA_SUPPORTED_STATE_ATTR(active_optimized, ALUA_AO_SUP);
   2938ALUA_SUPPORTED_STATE_ATTR(active_nonoptimized, ALUA_AN_SUP);
   2939
   2940static ssize_t target_tg_pt_gp_alua_write_metadata_show(
   2941		struct config_item *item, char *page)
   2942{
   2943	return sprintf(page, "%d\n",
   2944		to_tg_pt_gp(item)->tg_pt_gp_write_metadata);
   2945}
   2946
   2947static ssize_t target_tg_pt_gp_alua_write_metadata_store(
   2948		struct config_item *item, const char *page, size_t count)
   2949{
   2950	struct t10_alua_tg_pt_gp *tg_pt_gp = to_tg_pt_gp(item);
   2951	unsigned long tmp;
   2952	int ret;
   2953
   2954	ret = kstrtoul(page, 0, &tmp);
   2955	if (ret < 0) {
   2956		pr_err("Unable to extract alua_write_metadata\n");
   2957		return ret;
   2958	}
   2959
   2960	if ((tmp != 0) && (tmp != 1)) {
   2961		pr_err("Illegal value for alua_write_metadata:"
   2962			" %lu\n", tmp);
   2963		return -EINVAL;
   2964	}
   2965	tg_pt_gp->tg_pt_gp_write_metadata = (int)tmp;
   2966
   2967	return count;
   2968}
   2969
   2970static ssize_t target_tg_pt_gp_nonop_delay_msecs_show(struct config_item *item,
   2971		char *page)
   2972{
   2973	return core_alua_show_nonop_delay_msecs(to_tg_pt_gp(item), page);
   2974}
   2975
   2976static ssize_t target_tg_pt_gp_nonop_delay_msecs_store(struct config_item *item,
   2977		const char *page, size_t count)
   2978{
   2979	return core_alua_store_nonop_delay_msecs(to_tg_pt_gp(item), page,
   2980			count);
   2981}
   2982
   2983static ssize_t target_tg_pt_gp_trans_delay_msecs_show(struct config_item *item,
   2984		char *page)
   2985{
   2986	return core_alua_show_trans_delay_msecs(to_tg_pt_gp(item), page);
   2987}
   2988
   2989static ssize_t target_tg_pt_gp_trans_delay_msecs_store(struct config_item *item,
   2990		const char *page, size_t count)
   2991{
   2992	return core_alua_store_trans_delay_msecs(to_tg_pt_gp(item), page,
   2993			count);
   2994}
   2995
   2996static ssize_t target_tg_pt_gp_implicit_trans_secs_show(
   2997		struct config_item *item, char *page)
   2998{
   2999	return core_alua_show_implicit_trans_secs(to_tg_pt_gp(item), page);
   3000}
   3001
   3002static ssize_t target_tg_pt_gp_implicit_trans_secs_store(
   3003		struct config_item *item, const char *page, size_t count)
   3004{
   3005	return core_alua_store_implicit_trans_secs(to_tg_pt_gp(item), page,
   3006			count);
   3007}
   3008
   3009static ssize_t target_tg_pt_gp_preferred_show(struct config_item *item,
   3010		char *page)
   3011{
   3012	return core_alua_show_preferred_bit(to_tg_pt_gp(item), page);
   3013}
   3014
   3015static ssize_t target_tg_pt_gp_preferred_store(struct config_item *item,
   3016		const char *page, size_t count)
   3017{
   3018	return core_alua_store_preferred_bit(to_tg_pt_gp(item), page, count);
   3019}
   3020
   3021static ssize_t target_tg_pt_gp_tg_pt_gp_id_show(struct config_item *item,
   3022		char *page)
   3023{
   3024	struct t10_alua_tg_pt_gp *tg_pt_gp = to_tg_pt_gp(item);
   3025
   3026	if (!tg_pt_gp->tg_pt_gp_valid_id)
   3027		return 0;
   3028	return sprintf(page, "%hu\n", tg_pt_gp->tg_pt_gp_id);
   3029}
   3030
   3031static ssize_t target_tg_pt_gp_tg_pt_gp_id_store(struct config_item *item,
   3032		const char *page, size_t count)
   3033{
   3034	struct t10_alua_tg_pt_gp *tg_pt_gp = to_tg_pt_gp(item);
   3035	struct config_group *alua_tg_pt_gp_cg = &tg_pt_gp->tg_pt_gp_group;
   3036	unsigned long tg_pt_gp_id;
   3037	int ret;
   3038
   3039	ret = kstrtoul(page, 0, &tg_pt_gp_id);
   3040	if (ret < 0) {
   3041		pr_err("ALUA tg_pt_gp_id: invalid value '%s' for tg_pt_gp_id\n",
   3042		       page);
   3043		return ret;
   3044	}
   3045	if (tg_pt_gp_id > 0x0000ffff) {
   3046		pr_err("ALUA tg_pt_gp_id: %lu exceeds maximum: 0x0000ffff\n",
   3047		       tg_pt_gp_id);
   3048		return -EINVAL;
   3049	}
   3050
   3051	ret = core_alua_set_tg_pt_gp_id(tg_pt_gp, (u16)tg_pt_gp_id);
   3052	if (ret < 0)
   3053		return -EINVAL;
   3054
   3055	pr_debug("Target_Core_ConfigFS: Set ALUA Target Port Group: "
   3056		"core/alua/tg_pt_gps/%s to ID: %hu\n",
   3057		config_item_name(&alua_tg_pt_gp_cg->cg_item),
   3058		tg_pt_gp->tg_pt_gp_id);
   3059
   3060	return count;
   3061}
   3062
   3063static ssize_t target_tg_pt_gp_members_show(struct config_item *item,
   3064		char *page)
   3065{
   3066	struct t10_alua_tg_pt_gp *tg_pt_gp = to_tg_pt_gp(item);
   3067	struct se_lun *lun;
   3068	ssize_t len = 0, cur_len;
   3069	unsigned char buf[TG_PT_GROUP_NAME_BUF] = { };
   3070
   3071	spin_lock(&tg_pt_gp->tg_pt_gp_lock);
   3072	list_for_each_entry(lun, &tg_pt_gp->tg_pt_gp_lun_list,
   3073			lun_tg_pt_gp_link) {
   3074		struct se_portal_group *tpg = lun->lun_tpg;
   3075
   3076		cur_len = snprintf(buf, TG_PT_GROUP_NAME_BUF, "%s/%s/tpgt_%hu"
   3077			"/%s\n", tpg->se_tpg_tfo->fabric_name,
   3078			tpg->se_tpg_tfo->tpg_get_wwn(tpg),
   3079			tpg->se_tpg_tfo->tpg_get_tag(tpg),
   3080			config_item_name(&lun->lun_group.cg_item));
   3081		cur_len++; /* Extra byte for NULL terminator */
   3082
   3083		if ((cur_len + len) > PAGE_SIZE) {
   3084			pr_warn("Ran out of lu_gp_show_attr"
   3085				"_members buffer\n");
   3086			break;
   3087		}
   3088		memcpy(page+len, buf, cur_len);
   3089		len += cur_len;
   3090	}
   3091	spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
   3092
   3093	return len;
   3094}
   3095
   3096CONFIGFS_ATTR(target_tg_pt_gp_, alua_access_state);
   3097CONFIGFS_ATTR(target_tg_pt_gp_, alua_access_status);
   3098CONFIGFS_ATTR(target_tg_pt_gp_, alua_access_type);
   3099CONFIGFS_ATTR(target_tg_pt_gp_, alua_support_transitioning);
   3100CONFIGFS_ATTR(target_tg_pt_gp_, alua_support_offline);
   3101CONFIGFS_ATTR(target_tg_pt_gp_, alua_support_lba_dependent);
   3102CONFIGFS_ATTR(target_tg_pt_gp_, alua_support_unavailable);
   3103CONFIGFS_ATTR(target_tg_pt_gp_, alua_support_standby);
   3104CONFIGFS_ATTR(target_tg_pt_gp_, alua_support_active_optimized);
   3105CONFIGFS_ATTR(target_tg_pt_gp_, alua_support_active_nonoptimized);
   3106CONFIGFS_ATTR(target_tg_pt_gp_, alua_write_metadata);
   3107CONFIGFS_ATTR(target_tg_pt_gp_, nonop_delay_msecs);
   3108CONFIGFS_ATTR(target_tg_pt_gp_, trans_delay_msecs);
   3109CONFIGFS_ATTR(target_tg_pt_gp_, implicit_trans_secs);
   3110CONFIGFS_ATTR(target_tg_pt_gp_, preferred);
   3111CONFIGFS_ATTR(target_tg_pt_gp_, tg_pt_gp_id);
   3112CONFIGFS_ATTR_RO(target_tg_pt_gp_, members);
   3113
   3114static struct configfs_attribute *target_core_alua_tg_pt_gp_attrs[] = {
   3115	&target_tg_pt_gp_attr_alua_access_state,
   3116	&target_tg_pt_gp_attr_alua_access_status,
   3117	&target_tg_pt_gp_attr_alua_access_type,
   3118	&target_tg_pt_gp_attr_alua_support_transitioning,
   3119	&target_tg_pt_gp_attr_alua_support_offline,
   3120	&target_tg_pt_gp_attr_alua_support_lba_dependent,
   3121	&target_tg_pt_gp_attr_alua_support_unavailable,
   3122	&target_tg_pt_gp_attr_alua_support_standby,
   3123	&target_tg_pt_gp_attr_alua_support_active_nonoptimized,
   3124	&target_tg_pt_gp_attr_alua_support_active_optimized,
   3125	&target_tg_pt_gp_attr_alua_write_metadata,
   3126	&target_tg_pt_gp_attr_nonop_delay_msecs,
   3127	&target_tg_pt_gp_attr_trans_delay_msecs,
   3128	&target_tg_pt_gp_attr_implicit_trans_secs,
   3129	&target_tg_pt_gp_attr_preferred,
   3130	&target_tg_pt_gp_attr_tg_pt_gp_id,
   3131	&target_tg_pt_gp_attr_members,
   3132	NULL,
   3133};
   3134
   3135static void target_core_alua_tg_pt_gp_release(struct config_item *item)
   3136{
   3137	struct t10_alua_tg_pt_gp *tg_pt_gp = container_of(to_config_group(item),
   3138			struct t10_alua_tg_pt_gp, tg_pt_gp_group);
   3139
   3140	core_alua_free_tg_pt_gp(tg_pt_gp);
   3141}
   3142
   3143static struct configfs_item_operations target_core_alua_tg_pt_gp_ops = {
   3144	.release		= target_core_alua_tg_pt_gp_release,
   3145};
   3146
   3147static const struct config_item_type target_core_alua_tg_pt_gp_cit = {
   3148	.ct_item_ops		= &target_core_alua_tg_pt_gp_ops,
   3149	.ct_attrs		= target_core_alua_tg_pt_gp_attrs,
   3150	.ct_owner		= THIS_MODULE,
   3151};
   3152
   3153/* End functions for struct config_item_type target_core_alua_tg_pt_gp_cit */
   3154
   3155/* Start functions for struct config_item_type tb_alua_tg_pt_gps_cit */
   3156
   3157static struct config_group *target_core_alua_create_tg_pt_gp(
   3158	struct config_group *group,
   3159	const char *name)
   3160{
   3161	struct t10_alua *alua = container_of(group, struct t10_alua,
   3162					alua_tg_pt_gps_group);
   3163	struct t10_alua_tg_pt_gp *tg_pt_gp;
   3164	struct config_group *alua_tg_pt_gp_cg = NULL;
   3165	struct config_item *alua_tg_pt_gp_ci = NULL;
   3166
   3167	tg_pt_gp = core_alua_allocate_tg_pt_gp(alua->t10_dev, name, 0);
   3168	if (!tg_pt_gp)
   3169		return NULL;
   3170
   3171	alua_tg_pt_gp_cg = &tg_pt_gp->tg_pt_gp_group;
   3172	alua_tg_pt_gp_ci = &alua_tg_pt_gp_cg->cg_item;
   3173
   3174	config_group_init_type_name(alua_tg_pt_gp_cg, name,
   3175			&target_core_alua_tg_pt_gp_cit);
   3176
   3177	pr_debug("Target_Core_ConfigFS: Allocated ALUA Target Port"
   3178		" Group: alua/tg_pt_gps/%s\n",
   3179		config_item_name(alua_tg_pt_gp_ci));
   3180
   3181	return alua_tg_pt_gp_cg;
   3182}
   3183
   3184static void target_core_alua_drop_tg_pt_gp(
   3185	struct config_group *group,
   3186	struct config_item *item)
   3187{
   3188	struct t10_alua_tg_pt_gp *tg_pt_gp = container_of(to_config_group(item),
   3189			struct t10_alua_tg_pt_gp, tg_pt_gp_group);
   3190
   3191	pr_debug("Target_Core_ConfigFS: Releasing ALUA Target Port"
   3192		" Group: alua/tg_pt_gps/%s, ID: %hu\n",
   3193		config_item_name(item), tg_pt_gp->tg_pt_gp_id);
   3194	/*
   3195	 * core_alua_free_tg_pt_gp() is called from target_core_alua_tg_pt_gp_ops->release()
   3196	 * -> target_core_alua_tg_pt_gp_release().
   3197	 */
   3198	config_item_put(item);
   3199}
   3200
   3201static struct configfs_group_operations target_core_alua_tg_pt_gps_group_ops = {
   3202	.make_group		= &target_core_alua_create_tg_pt_gp,
   3203	.drop_item		= &target_core_alua_drop_tg_pt_gp,
   3204};
   3205
   3206TB_CIT_SETUP(dev_alua_tg_pt_gps, NULL, &target_core_alua_tg_pt_gps_group_ops, NULL);
   3207
   3208/* End functions for struct config_item_type tb_alua_tg_pt_gps_cit */
   3209
   3210/* Start functions for struct config_item_type target_core_alua_cit */
   3211
   3212/*
   3213 * target_core_alua_cit is a ConfigFS group that lives under
   3214 * /sys/kernel/config/target/core/alua.  There are default groups
   3215 * core/alua/lu_gps and core/alua/tg_pt_gps that are attached to
   3216 * target_core_alua_cit in target_core_init_configfs() below.
   3217 */
   3218static const struct config_item_type target_core_alua_cit = {
   3219	.ct_item_ops		= NULL,
   3220	.ct_attrs		= NULL,
   3221	.ct_owner		= THIS_MODULE,
   3222};
   3223
   3224/* End functions for struct config_item_type target_core_alua_cit */
   3225
   3226/* Start functions for struct config_item_type tb_dev_stat_cit */
   3227
   3228static struct config_group *target_core_stat_mkdir(
   3229	struct config_group *group,
   3230	const char *name)
   3231{
   3232	return ERR_PTR(-ENOSYS);
   3233}
   3234
   3235static void target_core_stat_rmdir(
   3236	struct config_group *group,
   3237	struct config_item *item)
   3238{
   3239	return;
   3240}
   3241
   3242static struct configfs_group_operations target_core_stat_group_ops = {
   3243	.make_group		= &target_core_stat_mkdir,
   3244	.drop_item		= &target_core_stat_rmdir,
   3245};
   3246
   3247TB_CIT_SETUP(dev_stat, NULL, &target_core_stat_group_ops, NULL);
   3248
   3249/* End functions for struct config_item_type tb_dev_stat_cit */
   3250
   3251/* Start functions for struct config_item_type target_core_hba_cit */
   3252
   3253static struct config_group *target_core_make_subdev(
   3254	struct config_group *group,
   3255	const char *name)
   3256{
   3257	struct t10_alua_tg_pt_gp *tg_pt_gp;
   3258	struct config_item *hba_ci = &group->cg_item;
   3259	struct se_hba *hba = item_to_hba(hba_ci);
   3260	struct target_backend *tb = hba->backend;
   3261	struct se_device *dev;
   3262	int errno = -ENOMEM, ret;
   3263
   3264	ret = mutex_lock_interruptible(&hba->hba_access_mutex);
   3265	if (ret)
   3266		return ERR_PTR(ret);
   3267
   3268	dev = target_alloc_device(hba, name);
   3269	if (!dev)
   3270		goto out_unlock;
   3271
   3272	config_group_init_type_name(&dev->dev_group, name, &tb->tb_dev_cit);
   3273
   3274	config_group_init_type_name(&dev->dev_action_group, "action",
   3275			&tb->tb_dev_action_cit);
   3276	configfs_add_default_group(&dev->dev_action_group, &dev->dev_group);
   3277
   3278	config_group_init_type_name(&dev->dev_attrib.da_group, "attrib",
   3279			&tb->tb_dev_attrib_cit);
   3280	configfs_add_default_group(&dev->dev_attrib.da_group, &dev->dev_group);
   3281
   3282	config_group_init_type_name(&dev->dev_pr_group, "pr",
   3283			&tb->tb_dev_pr_cit);
   3284	configfs_add_default_group(&dev->dev_pr_group, &dev->dev_group);
   3285
   3286	config_group_init_type_name(&dev->t10_wwn.t10_wwn_group, "wwn",
   3287			&tb->tb_dev_wwn_cit);
   3288	configfs_add_default_group(&dev->t10_wwn.t10_wwn_group,
   3289			&dev->dev_group);
   3290
   3291	config_group_init_type_name(&dev->t10_alua.alua_tg_pt_gps_group,
   3292			"alua", &tb->tb_dev_alua_tg_pt_gps_cit);
   3293	configfs_add_default_group(&dev->t10_alua.alua_tg_pt_gps_group,
   3294			&dev->dev_group);
   3295
   3296	config_group_init_type_name(&dev->dev_stat_grps.stat_group,
   3297			"statistics", &tb->tb_dev_stat_cit);
   3298	configfs_add_default_group(&dev->dev_stat_grps.stat_group,
   3299			&dev->dev_group);
   3300
   3301	/*
   3302	 * Add core/$HBA/$DEV/alua/default_tg_pt_gp
   3303	 */
   3304	tg_pt_gp = core_alua_allocate_tg_pt_gp(dev, "default_tg_pt_gp", 1);
   3305	if (!tg_pt_gp)
   3306		goto out_free_device;
   3307	dev->t10_alua.default_tg_pt_gp = tg_pt_gp;
   3308
   3309	config_group_init_type_name(&tg_pt_gp->tg_pt_gp_group,
   3310			"default_tg_pt_gp", &target_core_alua_tg_pt_gp_cit);
   3311	configfs_add_default_group(&tg_pt_gp->tg_pt_gp_group,
   3312			&dev->t10_alua.alua_tg_pt_gps_group);
   3313
   3314	/*
   3315	 * Add core/$HBA/$DEV/statistics/ default groups
   3316	 */
   3317	target_stat_setup_dev_default_groups(dev);
   3318
   3319	mutex_lock(&target_devices_lock);
   3320	target_devices++;
   3321	mutex_unlock(&target_devices_lock);
   3322
   3323	mutex_unlock(&hba->hba_access_mutex);
   3324	return &dev->dev_group;
   3325
   3326out_free_device:
   3327	target_free_device(dev);
   3328out_unlock:
   3329	mutex_unlock(&hba->hba_access_mutex);
   3330	return ERR_PTR(errno);
   3331}
   3332
   3333static void target_core_drop_subdev(
   3334	struct config_group *group,
   3335	struct config_item *item)
   3336{
   3337	struct config_group *dev_cg = to_config_group(item);
   3338	struct se_device *dev =
   3339		container_of(dev_cg, struct se_device, dev_group);
   3340	struct se_hba *hba;
   3341
   3342	hba = item_to_hba(&dev->se_hba->hba_group.cg_item);
   3343
   3344	mutex_lock(&hba->hba_access_mutex);
   3345
   3346	configfs_remove_default_groups(&dev->dev_stat_grps.stat_group);
   3347	configfs_remove_default_groups(&dev->t10_alua.alua_tg_pt_gps_group);
   3348
   3349	/*
   3350	 * core_alua_free_tg_pt_gp() is called from ->default_tg_pt_gp
   3351	 * directly from target_core_alua_tg_pt_gp_release().
   3352	 */
   3353	dev->t10_alua.default_tg_pt_gp = NULL;
   3354
   3355	configfs_remove_default_groups(dev_cg);
   3356
   3357	/*
   3358	 * se_dev is released from target_core_dev_item_ops->release()
   3359	 */
   3360	config_item_put(item);
   3361
   3362	mutex_lock(&target_devices_lock);
   3363	target_devices--;
   3364	mutex_unlock(&target_devices_lock);
   3365
   3366	mutex_unlock(&hba->hba_access_mutex);
   3367}
   3368
   3369static struct configfs_group_operations target_core_hba_group_ops = {
   3370	.make_group		= target_core_make_subdev,
   3371	.drop_item		= target_core_drop_subdev,
   3372};
   3373
   3374
   3375static inline struct se_hba *to_hba(struct config_item *item)
   3376{
   3377	return container_of(to_config_group(item), struct se_hba, hba_group);
   3378}
   3379
   3380static ssize_t target_hba_info_show(struct config_item *item, char *page)
   3381{
   3382	struct se_hba *hba = to_hba(item);
   3383
   3384	return sprintf(page, "HBA Index: %d plugin: %s version: %s\n",
   3385			hba->hba_id, hba->backend->ops->name,
   3386			TARGET_CORE_VERSION);
   3387}
   3388
   3389static ssize_t target_hba_mode_show(struct config_item *item, char *page)
   3390{
   3391	struct se_hba *hba = to_hba(item);
   3392	int hba_mode = 0;
   3393
   3394	if (hba->hba_flags & HBA_FLAGS_PSCSI_MODE)
   3395		hba_mode = 1;
   3396
   3397	return sprintf(page, "%d\n", hba_mode);
   3398}
   3399
   3400static ssize_t target_hba_mode_store(struct config_item *item,
   3401		const char *page, size_t count)
   3402{
   3403	struct se_hba *hba = to_hba(item);
   3404	unsigned long mode_flag;
   3405	int ret;
   3406
   3407	if (hba->backend->ops->pmode_enable_hba == NULL)
   3408		return -EINVAL;
   3409
   3410	ret = kstrtoul(page, 0, &mode_flag);
   3411	if (ret < 0) {
   3412		pr_err("Unable to extract hba mode flag: %d\n", ret);
   3413		return ret;
   3414	}
   3415
   3416	if (hba->dev_count) {
   3417		pr_err("Unable to set hba_mode with active devices\n");
   3418		return -EINVAL;
   3419	}
   3420
   3421	ret = hba->backend->ops->pmode_enable_hba(hba, mode_flag);
   3422	if (ret < 0)
   3423		return -EINVAL;
   3424	if (ret > 0)
   3425		hba->hba_flags |= HBA_FLAGS_PSCSI_MODE;
   3426	else if (ret == 0)
   3427		hba->hba_flags &= ~HBA_FLAGS_PSCSI_MODE;
   3428
   3429	return count;
   3430}
   3431
   3432CONFIGFS_ATTR_RO(target_, hba_info);
   3433CONFIGFS_ATTR(target_, hba_mode);
   3434
   3435static void target_core_hba_release(struct config_item *item)
   3436{
   3437	struct se_hba *hba = container_of(to_config_group(item),
   3438				struct se_hba, hba_group);
   3439	core_delete_hba(hba);
   3440}
   3441
   3442static struct configfs_attribute *target_core_hba_attrs[] = {
   3443	&target_attr_hba_info,
   3444	&target_attr_hba_mode,
   3445	NULL,
   3446};
   3447
   3448static struct configfs_item_operations target_core_hba_item_ops = {
   3449	.release		= target_core_hba_release,
   3450};
   3451
   3452static const struct config_item_type target_core_hba_cit = {
   3453	.ct_item_ops		= &target_core_hba_item_ops,
   3454	.ct_group_ops		= &target_core_hba_group_ops,
   3455	.ct_attrs		= target_core_hba_attrs,
   3456	.ct_owner		= THIS_MODULE,
   3457};
   3458
   3459static struct config_group *target_core_call_addhbatotarget(
   3460	struct config_group *group,
   3461	const char *name)
   3462{
   3463	char *se_plugin_str, *str, *str2;
   3464	struct se_hba *hba;
   3465	char buf[TARGET_CORE_NAME_MAX_LEN] = { };
   3466	unsigned long plugin_dep_id = 0;
   3467	int ret;
   3468
   3469	if (strlen(name) >= TARGET_CORE_NAME_MAX_LEN) {
   3470		pr_err("Passed *name strlen(): %d exceeds"
   3471			" TARGET_CORE_NAME_MAX_LEN: %d\n", (int)strlen(name),
   3472			TARGET_CORE_NAME_MAX_LEN);
   3473		return ERR_PTR(-ENAMETOOLONG);
   3474	}
   3475	snprintf(buf, TARGET_CORE_NAME_MAX_LEN, "%s", name);
   3476
   3477	str = strstr(buf, "_");
   3478	if (!str) {
   3479		pr_err("Unable to locate \"_\" for $SUBSYSTEM_PLUGIN_$HOST_ID\n");
   3480		return ERR_PTR(-EINVAL);
   3481	}
   3482	se_plugin_str = buf;
   3483	/*
   3484	 * Special case for subsystem plugins that have "_" in their names.
   3485	 * Namely rd_direct and rd_mcp..
   3486	 */
   3487	str2 = strstr(str+1, "_");
   3488	if (str2) {
   3489		*str2 = '\0'; /* Terminate for *se_plugin_str */
   3490		str2++; /* Skip to start of plugin dependent ID */
   3491		str = str2;
   3492	} else {
   3493		*str = '\0'; /* Terminate for *se_plugin_str */
   3494		str++; /* Skip to start of plugin dependent ID */
   3495	}
   3496
   3497	ret = kstrtoul(str, 0, &plugin_dep_id);
   3498	if (ret < 0) {
   3499		pr_err("kstrtoul() returned %d for"
   3500				" plugin_dep_id\n", ret);
   3501		return ERR_PTR(ret);
   3502	}
   3503	/*
   3504	 * Load up TCM subsystem plugins if they have not already been loaded.
   3505	 */
   3506	transport_subsystem_check_init();
   3507
   3508	hba = core_alloc_hba(se_plugin_str, plugin_dep_id, 0);
   3509	if (IS_ERR(hba))
   3510		return ERR_CAST(hba);
   3511
   3512	config_group_init_type_name(&hba->hba_group, name,
   3513			&target_core_hba_cit);
   3514
   3515	return &hba->hba_group;
   3516}
   3517
   3518static void target_core_call_delhbafromtarget(
   3519	struct config_group *group,
   3520	struct config_item *item)
   3521{
   3522	/*
   3523	 * core_delete_hba() is called from target_core_hba_item_ops->release()
   3524	 * -> target_core_hba_release()
   3525	 */
   3526	config_item_put(item);
   3527}
   3528
   3529static struct configfs_group_operations target_core_group_ops = {
   3530	.make_group	= target_core_call_addhbatotarget,
   3531	.drop_item	= target_core_call_delhbafromtarget,
   3532};
   3533
   3534static const struct config_item_type target_core_cit = {
   3535	.ct_item_ops	= NULL,
   3536	.ct_group_ops	= &target_core_group_ops,
   3537	.ct_attrs	= NULL,
   3538	.ct_owner	= THIS_MODULE,
   3539};
   3540
   3541/* Stop functions for struct config_item_type target_core_hba_cit */
   3542
   3543void target_setup_backend_cits(struct target_backend *tb)
   3544{
   3545	target_core_setup_dev_cit(tb);
   3546	target_core_setup_dev_action_cit(tb);
   3547	target_core_setup_dev_attrib_cit(tb);
   3548	target_core_setup_dev_pr_cit(tb);
   3549	target_core_setup_dev_wwn_cit(tb);
   3550	target_core_setup_dev_alua_tg_pt_gps_cit(tb);
   3551	target_core_setup_dev_stat_cit(tb);
   3552}
   3553
   3554static void target_init_dbroot(void)
   3555{
   3556	struct file *fp;
   3557
   3558	snprintf(db_root_stage, DB_ROOT_LEN, DB_ROOT_PREFERRED);
   3559	fp = filp_open(db_root_stage, O_RDONLY, 0);
   3560	if (IS_ERR(fp)) {
   3561		pr_err("db_root: cannot open: %s\n", db_root_stage);
   3562		return;
   3563	}
   3564	if (!S_ISDIR(file_inode(fp)->i_mode)) {
   3565		filp_close(fp, NULL);
   3566		pr_err("db_root: not a valid directory: %s\n", db_root_stage);
   3567		return;
   3568	}
   3569	filp_close(fp, NULL);
   3570
   3571	strncpy(db_root, db_root_stage, DB_ROOT_LEN);
   3572	pr_debug("Target_Core_ConfigFS: db_root set to %s\n", db_root);
   3573}
   3574
   3575static int __init target_core_init_configfs(void)
   3576{
   3577	struct configfs_subsystem *subsys = &target_core_fabrics;
   3578	struct t10_alua_lu_gp *lu_gp;
   3579	int ret;
   3580
   3581	pr_debug("TARGET_CORE[0]: Loading Generic Kernel Storage"
   3582		" Engine: %s on %s/%s on "UTS_RELEASE"\n",
   3583		TARGET_CORE_VERSION, utsname()->sysname, utsname()->machine);
   3584
   3585	config_group_init(&subsys->su_group);
   3586	mutex_init(&subsys->su_mutex);
   3587
   3588	ret = init_se_kmem_caches();
   3589	if (ret < 0)
   3590		return ret;
   3591	/*
   3592	 * Create $CONFIGFS/target/core default group for HBA <-> Storage Object
   3593	 * and ALUA Logical Unit Group and Target Port Group infrastructure.
   3594	 */
   3595	config_group_init_type_name(&target_core_hbagroup, "core",
   3596			&target_core_cit);
   3597	configfs_add_default_group(&target_core_hbagroup, &subsys->su_group);
   3598
   3599	/*
   3600	 * Create ALUA infrastructure under /sys/kernel/config/target/core/alua/
   3601	 */
   3602	config_group_init_type_name(&alua_group, "alua", &target_core_alua_cit);
   3603	configfs_add_default_group(&alua_group, &target_core_hbagroup);
   3604
   3605	/*
   3606	 * Add ALUA Logical Unit Group and Target Port Group ConfigFS
   3607	 * groups under /sys/kernel/config/target/core/alua/
   3608	 */
   3609	config_group_init_type_name(&alua_lu_gps_group, "lu_gps",
   3610			&target_core_alua_lu_gps_cit);
   3611	configfs_add_default_group(&alua_lu_gps_group, &alua_group);
   3612
   3613	/*
   3614	 * Add core/alua/lu_gps/default_lu_gp
   3615	 */
   3616	lu_gp = core_alua_allocate_lu_gp("default_lu_gp", 1);
   3617	if (IS_ERR(lu_gp)) {
   3618		ret = -ENOMEM;
   3619		goto out_global;
   3620	}
   3621
   3622	config_group_init_type_name(&lu_gp->lu_gp_group, "default_lu_gp",
   3623				&target_core_alua_lu_gp_cit);
   3624	configfs_add_default_group(&lu_gp->lu_gp_group, &alua_lu_gps_group);
   3625
   3626	default_lu_gp = lu_gp;
   3627
   3628	/*
   3629	 * Register the target_core_mod subsystem with configfs.
   3630	 */
   3631	ret = configfs_register_subsystem(subsys);
   3632	if (ret < 0) {
   3633		pr_err("Error %d while registering subsystem %s\n",
   3634			ret, subsys->su_group.cg_item.ci_namebuf);
   3635		goto out_global;
   3636	}
   3637	pr_debug("TARGET_CORE[0]: Initialized ConfigFS Fabric"
   3638		" Infrastructure: "TARGET_CORE_VERSION" on %s/%s"
   3639		" on "UTS_RELEASE"\n", utsname()->sysname, utsname()->machine);
   3640	/*
   3641	 * Register built-in RAMDISK subsystem logic for virtual LUN 0
   3642	 */
   3643	ret = rd_module_init();
   3644	if (ret < 0)
   3645		goto out;
   3646
   3647	ret = core_dev_setup_virtual_lun0();
   3648	if (ret < 0)
   3649		goto out;
   3650
   3651	ret = target_xcopy_setup_pt();
   3652	if (ret < 0)
   3653		goto out;
   3654
   3655	target_init_dbroot();
   3656
   3657	return 0;
   3658
   3659out:
   3660	configfs_unregister_subsystem(subsys);
   3661	core_dev_release_virtual_lun0();
   3662	rd_module_exit();
   3663out_global:
   3664	if (default_lu_gp) {
   3665		core_alua_free_lu_gp(default_lu_gp);
   3666		default_lu_gp = NULL;
   3667	}
   3668	release_se_kmem_caches();
   3669	return ret;
   3670}
   3671
   3672static void __exit target_core_exit_configfs(void)
   3673{
   3674	configfs_remove_default_groups(&alua_lu_gps_group);
   3675	configfs_remove_default_groups(&alua_group);
   3676	configfs_remove_default_groups(&target_core_hbagroup);
   3677
   3678	/*
   3679	 * We expect subsys->su_group.default_groups to be released
   3680	 * by configfs subsystem provider logic..
   3681	 */
   3682	configfs_unregister_subsystem(&target_core_fabrics);
   3683
   3684	core_alua_free_lu_gp(default_lu_gp);
   3685	default_lu_gp = NULL;
   3686
   3687	pr_debug("TARGET_CORE[0]: Released ConfigFS Fabric"
   3688			" Infrastructure\n");
   3689
   3690	core_dev_release_virtual_lun0();
   3691	rd_module_exit();
   3692	target_xcopy_release_pt();
   3693	release_se_kmem_caches();
   3694}
   3695
   3696MODULE_DESCRIPTION("Target_Core_Mod/ConfigFS");
   3697MODULE_AUTHOR("nab@Linux-iSCSI.org");
   3698MODULE_LICENSE("GPL");
   3699
   3700module_init(target_core_init_configfs);
   3701module_exit(target_core_exit_configfs);