cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

target_core_device.c (31685B)


      1// SPDX-License-Identifier: GPL-2.0-or-later
      2/*******************************************************************************
      3 * Filename:  target_core_device.c (based on iscsi_target_device.c)
      4 *
      5 * This file contains the TCM Virtual Device and Disk Transport
      6 * agnostic related functions.
      7 *
      8 * (c) Copyright 2003-2013 Datera, Inc.
      9 *
     10 * Nicholas A. Bellinger <nab@kernel.org>
     11 *
     12 ******************************************************************************/
     13
     14#include <linux/net.h>
     15#include <linux/string.h>
     16#include <linux/delay.h>
     17#include <linux/timer.h>
     18#include <linux/slab.h>
     19#include <linux/spinlock.h>
     20#include <linux/kthread.h>
     21#include <linux/in.h>
     22#include <linux/export.h>
     23#include <linux/t10-pi.h>
     24#include <asm/unaligned.h>
     25#include <net/sock.h>
     26#include <net/tcp.h>
     27#include <scsi/scsi_common.h>
     28#include <scsi/scsi_proto.h>
     29
     30#include <target/target_core_base.h>
     31#include <target/target_core_backend.h>
     32#include <target/target_core_fabric.h>
     33
     34#include "target_core_internal.h"
     35#include "target_core_alua.h"
     36#include "target_core_pr.h"
     37#include "target_core_ua.h"
     38
     39static DEFINE_MUTEX(device_mutex);
     40static LIST_HEAD(device_list);
     41static DEFINE_IDR(devices_idr);
     42
     43static struct se_hba *lun0_hba;
     44/* not static, needed by tpg.c */
     45struct se_device *g_lun0_dev;
     46
     47sense_reason_t
     48transport_lookup_cmd_lun(struct se_cmd *se_cmd)
     49{
     50	struct se_lun *se_lun = NULL;
     51	struct se_session *se_sess = se_cmd->se_sess;
     52	struct se_node_acl *nacl = se_sess->se_node_acl;
     53	struct se_dev_entry *deve;
     54	sense_reason_t ret = TCM_NO_SENSE;
     55
     56	rcu_read_lock();
     57	deve = target_nacl_find_deve(nacl, se_cmd->orig_fe_lun);
     58	if (deve) {
     59		atomic_long_inc(&deve->total_cmds);
     60
     61		if (se_cmd->data_direction == DMA_TO_DEVICE)
     62			atomic_long_add(se_cmd->data_length,
     63					&deve->write_bytes);
     64		else if (se_cmd->data_direction == DMA_FROM_DEVICE)
     65			atomic_long_add(se_cmd->data_length,
     66					&deve->read_bytes);
     67
     68		if ((se_cmd->data_direction == DMA_TO_DEVICE) &&
     69		    deve->lun_access_ro) {
     70			pr_err("TARGET_CORE[%s]: Detected WRITE_PROTECTED LUN"
     71				" Access for 0x%08llx\n",
     72				se_cmd->se_tfo->fabric_name,
     73				se_cmd->orig_fe_lun);
     74			rcu_read_unlock();
     75			return TCM_WRITE_PROTECTED;
     76		}
     77
     78		se_lun = rcu_dereference(deve->se_lun);
     79
     80		if (!percpu_ref_tryget_live(&se_lun->lun_ref)) {
     81			se_lun = NULL;
     82			goto out_unlock;
     83		}
     84
     85		se_cmd->se_lun = se_lun;
     86		se_cmd->pr_res_key = deve->pr_res_key;
     87		se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
     88		se_cmd->lun_ref_active = true;
     89	}
     90out_unlock:
     91	rcu_read_unlock();
     92
     93	if (!se_lun) {
     94		/*
     95		 * Use the se_portal_group->tpg_virt_lun0 to allow for
     96		 * REPORT_LUNS, et al to be returned when no active
     97		 * MappedLUN=0 exists for this Initiator Port.
     98		 */
     99		if (se_cmd->orig_fe_lun != 0) {
    100			pr_err("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN"
    101				" Access for 0x%08llx from %s\n",
    102				se_cmd->se_tfo->fabric_name,
    103				se_cmd->orig_fe_lun,
    104				nacl->initiatorname);
    105			return TCM_NON_EXISTENT_LUN;
    106		}
    107
    108		/*
    109		 * Force WRITE PROTECT for virtual LUN 0
    110		 */
    111		if ((se_cmd->data_direction != DMA_FROM_DEVICE) &&
    112		    (se_cmd->data_direction != DMA_NONE))
    113			return TCM_WRITE_PROTECTED;
    114
    115		se_lun = se_sess->se_tpg->tpg_virt_lun0;
    116		if (!percpu_ref_tryget_live(&se_lun->lun_ref))
    117			return TCM_NON_EXISTENT_LUN;
    118
    119		se_cmd->se_lun = se_sess->se_tpg->tpg_virt_lun0;
    120		se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
    121		se_cmd->lun_ref_active = true;
    122	}
    123	/*
    124	 * RCU reference protected by percpu se_lun->lun_ref taken above that
    125	 * must drop to zero (including initial reference) before this se_lun
    126	 * pointer can be kfree_rcu() by the final se_lun->lun_group put via
    127	 * target_core_fabric_configfs.c:target_fabric_port_release
    128	 */
    129	se_cmd->se_dev = rcu_dereference_raw(se_lun->lun_se_dev);
    130	atomic_long_inc(&se_cmd->se_dev->num_cmds);
    131
    132	if (se_cmd->data_direction == DMA_TO_DEVICE)
    133		atomic_long_add(se_cmd->data_length,
    134				&se_cmd->se_dev->write_bytes);
    135	else if (se_cmd->data_direction == DMA_FROM_DEVICE)
    136		atomic_long_add(se_cmd->data_length,
    137				&se_cmd->se_dev->read_bytes);
    138
    139	return ret;
    140}
    141EXPORT_SYMBOL(transport_lookup_cmd_lun);
    142
    143int transport_lookup_tmr_lun(struct se_cmd *se_cmd)
    144{
    145	struct se_dev_entry *deve;
    146	struct se_lun *se_lun = NULL;
    147	struct se_session *se_sess = se_cmd->se_sess;
    148	struct se_node_acl *nacl = se_sess->se_node_acl;
    149	struct se_tmr_req *se_tmr = se_cmd->se_tmr_req;
    150	unsigned long flags;
    151
    152	rcu_read_lock();
    153	deve = target_nacl_find_deve(nacl, se_cmd->orig_fe_lun);
    154	if (deve) {
    155		se_lun = rcu_dereference(deve->se_lun);
    156
    157		if (!percpu_ref_tryget_live(&se_lun->lun_ref)) {
    158			se_lun = NULL;
    159			goto out_unlock;
    160		}
    161
    162		se_cmd->se_lun = se_lun;
    163		se_cmd->pr_res_key = deve->pr_res_key;
    164		se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
    165		se_cmd->lun_ref_active = true;
    166	}
    167out_unlock:
    168	rcu_read_unlock();
    169
    170	if (!se_lun) {
    171		pr_debug("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN"
    172			" Access for 0x%08llx for %s\n",
    173			se_cmd->se_tfo->fabric_name,
    174			se_cmd->orig_fe_lun,
    175			nacl->initiatorname);
    176		return -ENODEV;
    177	}
    178	se_cmd->se_dev = rcu_dereference_raw(se_lun->lun_se_dev);
    179	se_tmr->tmr_dev = rcu_dereference_raw(se_lun->lun_se_dev);
    180
    181	spin_lock_irqsave(&se_tmr->tmr_dev->se_tmr_lock, flags);
    182	list_add_tail(&se_tmr->tmr_list, &se_tmr->tmr_dev->dev_tmr_list);
    183	spin_unlock_irqrestore(&se_tmr->tmr_dev->se_tmr_lock, flags);
    184
    185	return 0;
    186}
    187EXPORT_SYMBOL(transport_lookup_tmr_lun);
    188
    189bool target_lun_is_rdonly(struct se_cmd *cmd)
    190{
    191	struct se_session *se_sess = cmd->se_sess;
    192	struct se_dev_entry *deve;
    193	bool ret;
    194
    195	rcu_read_lock();
    196	deve = target_nacl_find_deve(se_sess->se_node_acl, cmd->orig_fe_lun);
    197	ret = deve && deve->lun_access_ro;
    198	rcu_read_unlock();
    199
    200	return ret;
    201}
    202EXPORT_SYMBOL(target_lun_is_rdonly);
    203
    204/*
    205 * This function is called from core_scsi3_emulate_pro_register_and_move()
    206 * and core_scsi3_decode_spec_i_port(), and will increment &deve->pr_kref
    207 * when a matching rtpi is found.
    208 */
    209struct se_dev_entry *core_get_se_deve_from_rtpi(
    210	struct se_node_acl *nacl,
    211	u16 rtpi)
    212{
    213	struct se_dev_entry *deve;
    214	struct se_lun *lun;
    215	struct se_portal_group *tpg = nacl->se_tpg;
    216
    217	rcu_read_lock();
    218	hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link) {
    219		lun = rcu_dereference(deve->se_lun);
    220		if (!lun) {
    221			pr_err("%s device entries device pointer is"
    222				" NULL, but Initiator has access.\n",
    223				tpg->se_tpg_tfo->fabric_name);
    224			continue;
    225		}
    226		if (lun->lun_rtpi != rtpi)
    227			continue;
    228
    229		kref_get(&deve->pr_kref);
    230		rcu_read_unlock();
    231
    232		return deve;
    233	}
    234	rcu_read_unlock();
    235
    236	return NULL;
    237}
    238
    239void core_free_device_list_for_node(
    240	struct se_node_acl *nacl,
    241	struct se_portal_group *tpg)
    242{
    243	struct se_dev_entry *deve;
    244
    245	mutex_lock(&nacl->lun_entry_mutex);
    246	hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link) {
    247		struct se_lun *lun = rcu_dereference_check(deve->se_lun,
    248					lockdep_is_held(&nacl->lun_entry_mutex));
    249		core_disable_device_list_for_node(lun, deve, nacl, tpg);
    250	}
    251	mutex_unlock(&nacl->lun_entry_mutex);
    252}
    253
    254void core_update_device_list_access(
    255	u64 mapped_lun,
    256	bool lun_access_ro,
    257	struct se_node_acl *nacl)
    258{
    259	struct se_dev_entry *deve;
    260
    261	mutex_lock(&nacl->lun_entry_mutex);
    262	deve = target_nacl_find_deve(nacl, mapped_lun);
    263	if (deve)
    264		deve->lun_access_ro = lun_access_ro;
    265	mutex_unlock(&nacl->lun_entry_mutex);
    266}
    267
    268/*
    269 * Called with rcu_read_lock or nacl->device_list_lock held.
    270 */
    271struct se_dev_entry *target_nacl_find_deve(struct se_node_acl *nacl, u64 mapped_lun)
    272{
    273	struct se_dev_entry *deve;
    274
    275	hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link)
    276		if (deve->mapped_lun == mapped_lun)
    277			return deve;
    278
    279	return NULL;
    280}
    281EXPORT_SYMBOL(target_nacl_find_deve);
    282
    283void target_pr_kref_release(struct kref *kref)
    284{
    285	struct se_dev_entry *deve = container_of(kref, struct se_dev_entry,
    286						 pr_kref);
    287	complete(&deve->pr_comp);
    288}
    289
    290static void
    291target_luns_data_has_changed(struct se_node_acl *nacl, struct se_dev_entry *new,
    292			     bool skip_new)
    293{
    294	struct se_dev_entry *tmp;
    295
    296	rcu_read_lock();
    297	hlist_for_each_entry_rcu(tmp, &nacl->lun_entry_hlist, link) {
    298		if (skip_new && tmp == new)
    299			continue;
    300		core_scsi3_ua_allocate(tmp, 0x3F,
    301				       ASCQ_3FH_REPORTED_LUNS_DATA_HAS_CHANGED);
    302	}
    303	rcu_read_unlock();
    304}
    305
    306int core_enable_device_list_for_node(
    307	struct se_lun *lun,
    308	struct se_lun_acl *lun_acl,
    309	u64 mapped_lun,
    310	bool lun_access_ro,
    311	struct se_node_acl *nacl,
    312	struct se_portal_group *tpg)
    313{
    314	struct se_dev_entry *orig, *new;
    315
    316	new = kzalloc(sizeof(*new), GFP_KERNEL);
    317	if (!new) {
    318		pr_err("Unable to allocate se_dev_entry memory\n");
    319		return -ENOMEM;
    320	}
    321
    322	spin_lock_init(&new->ua_lock);
    323	INIT_LIST_HEAD(&new->ua_list);
    324	INIT_LIST_HEAD(&new->lun_link);
    325
    326	new->mapped_lun = mapped_lun;
    327	kref_init(&new->pr_kref);
    328	init_completion(&new->pr_comp);
    329
    330	new->lun_access_ro = lun_access_ro;
    331	new->creation_time = get_jiffies_64();
    332	new->attach_count++;
    333
    334	mutex_lock(&nacl->lun_entry_mutex);
    335	orig = target_nacl_find_deve(nacl, mapped_lun);
    336	if (orig && orig->se_lun) {
    337		struct se_lun *orig_lun = rcu_dereference_check(orig->se_lun,
    338					lockdep_is_held(&nacl->lun_entry_mutex));
    339
    340		if (orig_lun != lun) {
    341			pr_err("Existing orig->se_lun doesn't match new lun"
    342			       " for dynamic -> explicit NodeACL conversion:"
    343				" %s\n", nacl->initiatorname);
    344			mutex_unlock(&nacl->lun_entry_mutex);
    345			kfree(new);
    346			return -EINVAL;
    347		}
    348		if (orig->se_lun_acl != NULL) {
    349			pr_warn_ratelimited("Detected existing explicit"
    350				" se_lun_acl->se_lun_group reference for %s"
    351				" mapped_lun: %llu, failing\n",
    352				 nacl->initiatorname, mapped_lun);
    353			mutex_unlock(&nacl->lun_entry_mutex);
    354			kfree(new);
    355			return -EINVAL;
    356		}
    357
    358		rcu_assign_pointer(new->se_lun, lun);
    359		rcu_assign_pointer(new->se_lun_acl, lun_acl);
    360		hlist_del_rcu(&orig->link);
    361		hlist_add_head_rcu(&new->link, &nacl->lun_entry_hlist);
    362		mutex_unlock(&nacl->lun_entry_mutex);
    363
    364		spin_lock(&lun->lun_deve_lock);
    365		list_del(&orig->lun_link);
    366		list_add_tail(&new->lun_link, &lun->lun_deve_list);
    367		spin_unlock(&lun->lun_deve_lock);
    368
    369		kref_put(&orig->pr_kref, target_pr_kref_release);
    370		wait_for_completion(&orig->pr_comp);
    371
    372		target_luns_data_has_changed(nacl, new, true);
    373		kfree_rcu(orig, rcu_head);
    374		return 0;
    375	}
    376
    377	rcu_assign_pointer(new->se_lun, lun);
    378	rcu_assign_pointer(new->se_lun_acl, lun_acl);
    379	hlist_add_head_rcu(&new->link, &nacl->lun_entry_hlist);
    380	mutex_unlock(&nacl->lun_entry_mutex);
    381
    382	spin_lock(&lun->lun_deve_lock);
    383	list_add_tail(&new->lun_link, &lun->lun_deve_list);
    384	spin_unlock(&lun->lun_deve_lock);
    385
    386	target_luns_data_has_changed(nacl, new, true);
    387	return 0;
    388}
    389
    390void core_disable_device_list_for_node(
    391	struct se_lun *lun,
    392	struct se_dev_entry *orig,
    393	struct se_node_acl *nacl,
    394	struct se_portal_group *tpg)
    395{
    396	/*
    397	 * rcu_dereference_raw protected by se_lun->lun_group symlink
    398	 * reference to se_device->dev_group.
    399	 */
    400	struct se_device *dev = rcu_dereference_raw(lun->lun_se_dev);
    401
    402	lockdep_assert_held(&nacl->lun_entry_mutex);
    403
    404	/*
    405	 * If the MappedLUN entry is being disabled, the entry in
    406	 * lun->lun_deve_list must be removed now before clearing the
    407	 * struct se_dev_entry pointers below as logic in
    408	 * core_alua_do_transition_tg_pt() depends on these being present.
    409	 *
    410	 * deve->se_lun_acl will be NULL for demo-mode created LUNs
    411	 * that have not been explicitly converted to MappedLUNs ->
    412	 * struct se_lun_acl, but we remove deve->lun_link from
    413	 * lun->lun_deve_list. This also means that active UAs and
    414	 * NodeACL context specific PR metadata for demo-mode
    415	 * MappedLUN *deve will be released below..
    416	 */
    417	spin_lock(&lun->lun_deve_lock);
    418	list_del(&orig->lun_link);
    419	spin_unlock(&lun->lun_deve_lock);
    420	/*
    421	 * Disable struct se_dev_entry LUN ACL mapping
    422	 */
    423	core_scsi3_ua_release_all(orig);
    424
    425	hlist_del_rcu(&orig->link);
    426	clear_bit(DEF_PR_REG_ACTIVE, &orig->deve_flags);
    427	orig->lun_access_ro = false;
    428	orig->creation_time = 0;
    429	orig->attach_count--;
    430	/*
    431	 * Before firing off RCU callback, wait for any in process SPEC_I_PT=1
    432	 * or REGISTER_AND_MOVE PR operation to complete.
    433	 */
    434	kref_put(&orig->pr_kref, target_pr_kref_release);
    435	wait_for_completion(&orig->pr_comp);
    436
    437	rcu_assign_pointer(orig->se_lun, NULL);
    438	rcu_assign_pointer(orig->se_lun_acl, NULL);
    439
    440	kfree_rcu(orig, rcu_head);
    441
    442	core_scsi3_free_pr_reg_from_nacl(dev, nacl);
    443	target_luns_data_has_changed(nacl, NULL, false);
    444}
    445
    446/*      core_clear_lun_from_tpg():
    447 *
    448 *
    449 */
    450void core_clear_lun_from_tpg(struct se_lun *lun, struct se_portal_group *tpg)
    451{
    452	struct se_node_acl *nacl;
    453	struct se_dev_entry *deve;
    454
    455	mutex_lock(&tpg->acl_node_mutex);
    456	list_for_each_entry(nacl, &tpg->acl_node_list, acl_list) {
    457
    458		mutex_lock(&nacl->lun_entry_mutex);
    459		hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link) {
    460			struct se_lun *tmp_lun = rcu_dereference_check(deve->se_lun,
    461					lockdep_is_held(&nacl->lun_entry_mutex));
    462
    463			if (lun != tmp_lun)
    464				continue;
    465
    466			core_disable_device_list_for_node(lun, deve, nacl, tpg);
    467		}
    468		mutex_unlock(&nacl->lun_entry_mutex);
    469	}
    470	mutex_unlock(&tpg->acl_node_mutex);
    471}
    472
    473int core_alloc_rtpi(struct se_lun *lun, struct se_device *dev)
    474{
    475	struct se_lun *tmp;
    476
    477	spin_lock(&dev->se_port_lock);
    478	if (dev->export_count == 0x0000ffff) {
    479		pr_warn("Reached dev->dev_port_count =="
    480				" 0x0000ffff\n");
    481		spin_unlock(&dev->se_port_lock);
    482		return -ENOSPC;
    483	}
    484again:
    485	/*
    486	 * Allocate the next RELATIVE TARGET PORT IDENTIFIER for this struct se_device
    487	 * Here is the table from spc4r17 section 7.7.3.8.
    488	 *
    489	 *    Table 473 -- RELATIVE TARGET PORT IDENTIFIER field
    490	 *
    491	 * Code      Description
    492	 * 0h        Reserved
    493	 * 1h        Relative port 1, historically known as port A
    494	 * 2h        Relative port 2, historically known as port B
    495	 * 3h to FFFFh    Relative port 3 through 65 535
    496	 */
    497	lun->lun_rtpi = dev->dev_rpti_counter++;
    498	if (!lun->lun_rtpi)
    499		goto again;
    500
    501	list_for_each_entry(tmp, &dev->dev_sep_list, lun_dev_link) {
    502		/*
    503		 * Make sure RELATIVE TARGET PORT IDENTIFIER is unique
    504		 * for 16-bit wrap..
    505		 */
    506		if (lun->lun_rtpi == tmp->lun_rtpi)
    507			goto again;
    508	}
    509	spin_unlock(&dev->se_port_lock);
    510
    511	return 0;
    512}
    513
    514static void se_release_vpd_for_dev(struct se_device *dev)
    515{
    516	struct t10_vpd *vpd, *vpd_tmp;
    517
    518	spin_lock(&dev->t10_wwn.t10_vpd_lock);
    519	list_for_each_entry_safe(vpd, vpd_tmp,
    520			&dev->t10_wwn.t10_vpd_list, vpd_list) {
    521		list_del(&vpd->vpd_list);
    522		kfree(vpd);
    523	}
    524	spin_unlock(&dev->t10_wwn.t10_vpd_lock);
    525}
    526
    527static u32 se_dev_align_max_sectors(u32 max_sectors, u32 block_size)
    528{
    529	u32 aligned_max_sectors;
    530	u32 alignment;
    531	/*
    532	 * Limit max_sectors to a PAGE_SIZE aligned value for modern
    533	 * transport_allocate_data_tasks() operation.
    534	 */
    535	alignment = max(1ul, PAGE_SIZE / block_size);
    536	aligned_max_sectors = rounddown(max_sectors, alignment);
    537
    538	if (max_sectors != aligned_max_sectors)
    539		pr_info("Rounding down aligned max_sectors from %u to %u\n",
    540			max_sectors, aligned_max_sectors);
    541
    542	return aligned_max_sectors;
    543}
    544
    545int core_dev_add_lun(
    546	struct se_portal_group *tpg,
    547	struct se_device *dev,
    548	struct se_lun *lun)
    549{
    550	int rc;
    551
    552	rc = core_tpg_add_lun(tpg, lun, false, dev);
    553	if (rc < 0)
    554		return rc;
    555
    556	pr_debug("%s_TPG[%u]_LUN[%llu] - Activated %s Logical Unit from"
    557		" CORE HBA: %u\n", tpg->se_tpg_tfo->fabric_name,
    558		tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
    559		tpg->se_tpg_tfo->fabric_name, dev->se_hba->hba_id);
    560	/*
    561	 * Update LUN maps for dynamically added initiators when
    562	 * generate_node_acl is enabled.
    563	 */
    564	if (tpg->se_tpg_tfo->tpg_check_demo_mode(tpg)) {
    565		struct se_node_acl *acl;
    566
    567		mutex_lock(&tpg->acl_node_mutex);
    568		list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
    569			if (acl->dynamic_node_acl &&
    570			    (!tpg->se_tpg_tfo->tpg_check_demo_mode_login_only ||
    571			     !tpg->se_tpg_tfo->tpg_check_demo_mode_login_only(tpg))) {
    572				core_tpg_add_node_to_devs(acl, tpg, lun);
    573			}
    574		}
    575		mutex_unlock(&tpg->acl_node_mutex);
    576	}
    577
    578	return 0;
    579}
    580
    581/*      core_dev_del_lun():
    582 *
    583 *
    584 */
    585void core_dev_del_lun(
    586	struct se_portal_group *tpg,
    587	struct se_lun *lun)
    588{
    589	pr_debug("%s_TPG[%u]_LUN[%llu] - Deactivating %s Logical Unit from"
    590		" device object\n", tpg->se_tpg_tfo->fabric_name,
    591		tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
    592		tpg->se_tpg_tfo->fabric_name);
    593
    594	core_tpg_remove_lun(tpg, lun);
    595}
    596
    597struct se_lun_acl *core_dev_init_initiator_node_lun_acl(
    598	struct se_portal_group *tpg,
    599	struct se_node_acl *nacl,
    600	u64 mapped_lun,
    601	int *ret)
    602{
    603	struct se_lun_acl *lacl;
    604
    605	if (strlen(nacl->initiatorname) >= TRANSPORT_IQN_LEN) {
    606		pr_err("%s InitiatorName exceeds maximum size.\n",
    607			tpg->se_tpg_tfo->fabric_name);
    608		*ret = -EOVERFLOW;
    609		return NULL;
    610	}
    611	lacl = kzalloc(sizeof(struct se_lun_acl), GFP_KERNEL);
    612	if (!lacl) {
    613		pr_err("Unable to allocate memory for struct se_lun_acl.\n");
    614		*ret = -ENOMEM;
    615		return NULL;
    616	}
    617
    618	lacl->mapped_lun = mapped_lun;
    619	lacl->se_lun_nacl = nacl;
    620
    621	return lacl;
    622}
    623
    624int core_dev_add_initiator_node_lun_acl(
    625	struct se_portal_group *tpg,
    626	struct se_lun_acl *lacl,
    627	struct se_lun *lun,
    628	bool lun_access_ro)
    629{
    630	struct se_node_acl *nacl = lacl->se_lun_nacl;
    631	/*
    632	 * rcu_dereference_raw protected by se_lun->lun_group symlink
    633	 * reference to se_device->dev_group.
    634	 */
    635	struct se_device *dev = rcu_dereference_raw(lun->lun_se_dev);
    636
    637	if (!nacl)
    638		return -EINVAL;
    639
    640	if (lun->lun_access_ro)
    641		lun_access_ro = true;
    642
    643	lacl->se_lun = lun;
    644
    645	if (core_enable_device_list_for_node(lun, lacl, lacl->mapped_lun,
    646			lun_access_ro, nacl, tpg) < 0)
    647		return -EINVAL;
    648
    649	pr_debug("%s_TPG[%hu]_LUN[%llu->%llu] - Added %s ACL for "
    650		" InitiatorNode: %s\n", tpg->se_tpg_tfo->fabric_name,
    651		tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun, lacl->mapped_lun,
    652		lun_access_ro ? "RO" : "RW",
    653		nacl->initiatorname);
    654	/*
    655	 * Check to see if there are any existing persistent reservation APTPL
    656	 * pre-registrations that need to be enabled for this LUN ACL..
    657	 */
    658	core_scsi3_check_aptpl_registration(dev, tpg, lun, nacl,
    659					    lacl->mapped_lun);
    660	return 0;
    661}
    662
    663int core_dev_del_initiator_node_lun_acl(
    664	struct se_lun *lun,
    665	struct se_lun_acl *lacl)
    666{
    667	struct se_portal_group *tpg = lun->lun_tpg;
    668	struct se_node_acl *nacl;
    669	struct se_dev_entry *deve;
    670
    671	nacl = lacl->se_lun_nacl;
    672	if (!nacl)
    673		return -EINVAL;
    674
    675	mutex_lock(&nacl->lun_entry_mutex);
    676	deve = target_nacl_find_deve(nacl, lacl->mapped_lun);
    677	if (deve)
    678		core_disable_device_list_for_node(lun, deve, nacl, tpg);
    679	mutex_unlock(&nacl->lun_entry_mutex);
    680
    681	pr_debug("%s_TPG[%hu]_LUN[%llu] - Removed ACL for"
    682		" InitiatorNode: %s Mapped LUN: %llu\n",
    683		tpg->se_tpg_tfo->fabric_name,
    684		tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
    685		nacl->initiatorname, lacl->mapped_lun);
    686
    687	return 0;
    688}
    689
    690void core_dev_free_initiator_node_lun_acl(
    691	struct se_portal_group *tpg,
    692	struct se_lun_acl *lacl)
    693{
    694	pr_debug("%s_TPG[%hu] - Freeing ACL for %s InitiatorNode: %s"
    695		" Mapped LUN: %llu\n", tpg->se_tpg_tfo->fabric_name,
    696		tpg->se_tpg_tfo->tpg_get_tag(tpg),
    697		tpg->se_tpg_tfo->fabric_name,
    698		lacl->se_lun_nacl->initiatorname, lacl->mapped_lun);
    699
    700	kfree(lacl);
    701}
    702
    703static void scsi_dump_inquiry(struct se_device *dev)
    704{
    705	struct t10_wwn *wwn = &dev->t10_wwn;
    706	int device_type = dev->transport->get_device_type(dev);
    707
    708	/*
    709	 * Print Linux/SCSI style INQUIRY formatting to the kernel ring buffer
    710	 */
    711	pr_debug("  Vendor: %-" __stringify(INQUIRY_VENDOR_LEN) "s\n",
    712		wwn->vendor);
    713	pr_debug("  Model: %-" __stringify(INQUIRY_MODEL_LEN) "s\n",
    714		wwn->model);
    715	pr_debug("  Revision: %-" __stringify(INQUIRY_REVISION_LEN) "s\n",
    716		wwn->revision);
    717	pr_debug("  Type:   %s ", scsi_device_type(device_type));
    718}
    719
    720struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
    721{
    722	struct se_device *dev;
    723	struct se_lun *xcopy_lun;
    724	int i;
    725
    726	dev = hba->backend->ops->alloc_device(hba, name);
    727	if (!dev)
    728		return NULL;
    729
    730	dev->queues = kcalloc(nr_cpu_ids, sizeof(*dev->queues), GFP_KERNEL);
    731	if (!dev->queues) {
    732		dev->transport->free_device(dev);
    733		return NULL;
    734	}
    735
    736	dev->queue_cnt = nr_cpu_ids;
    737	for (i = 0; i < dev->queue_cnt; i++) {
    738		struct se_device_queue *q;
    739
    740		q = &dev->queues[i];
    741		INIT_LIST_HEAD(&q->state_list);
    742		spin_lock_init(&q->lock);
    743
    744		init_llist_head(&q->sq.cmd_list);
    745		INIT_WORK(&q->sq.work, target_queued_submit_work);
    746	}
    747
    748	dev->se_hba = hba;
    749	dev->transport = hba->backend->ops;
    750	dev->transport_flags = dev->transport->transport_flags_default;
    751	dev->prot_length = sizeof(struct t10_pi_tuple);
    752	dev->hba_index = hba->hba_index;
    753
    754	INIT_LIST_HEAD(&dev->dev_sep_list);
    755	INIT_LIST_HEAD(&dev->dev_tmr_list);
    756	INIT_LIST_HEAD(&dev->delayed_cmd_list);
    757	INIT_LIST_HEAD(&dev->qf_cmd_list);
    758	spin_lock_init(&dev->delayed_cmd_lock);
    759	spin_lock_init(&dev->dev_reservation_lock);
    760	spin_lock_init(&dev->se_port_lock);
    761	spin_lock_init(&dev->se_tmr_lock);
    762	spin_lock_init(&dev->qf_cmd_lock);
    763	sema_init(&dev->caw_sem, 1);
    764	INIT_LIST_HEAD(&dev->t10_wwn.t10_vpd_list);
    765	spin_lock_init(&dev->t10_wwn.t10_vpd_lock);
    766	INIT_LIST_HEAD(&dev->t10_pr.registration_list);
    767	INIT_LIST_HEAD(&dev->t10_pr.aptpl_reg_list);
    768	spin_lock_init(&dev->t10_pr.registration_lock);
    769	spin_lock_init(&dev->t10_pr.aptpl_reg_lock);
    770	INIT_LIST_HEAD(&dev->t10_alua.tg_pt_gps_list);
    771	spin_lock_init(&dev->t10_alua.tg_pt_gps_lock);
    772	INIT_LIST_HEAD(&dev->t10_alua.lba_map_list);
    773	spin_lock_init(&dev->t10_alua.lba_map_lock);
    774
    775	INIT_WORK(&dev->delayed_cmd_work, target_do_delayed_work);
    776
    777	dev->t10_wwn.t10_dev = dev;
    778	/*
    779	 * Use OpenFabrics IEEE Company ID: 00 14 05
    780	 */
    781	dev->t10_wwn.company_id = 0x001405;
    782
    783	dev->t10_alua.t10_dev = dev;
    784
    785	dev->dev_attrib.da_dev = dev;
    786	dev->dev_attrib.emulate_model_alias = DA_EMULATE_MODEL_ALIAS;
    787	dev->dev_attrib.emulate_dpo = 1;
    788	dev->dev_attrib.emulate_fua_write = 1;
    789	dev->dev_attrib.emulate_fua_read = 1;
    790	dev->dev_attrib.emulate_write_cache = DA_EMULATE_WRITE_CACHE;
    791	dev->dev_attrib.emulate_ua_intlck_ctrl = TARGET_UA_INTLCK_CTRL_CLEAR;
    792	dev->dev_attrib.emulate_tas = DA_EMULATE_TAS;
    793	dev->dev_attrib.emulate_tpu = DA_EMULATE_TPU;
    794	dev->dev_attrib.emulate_tpws = DA_EMULATE_TPWS;
    795	dev->dev_attrib.emulate_caw = DA_EMULATE_CAW;
    796	dev->dev_attrib.emulate_3pc = DA_EMULATE_3PC;
    797	dev->dev_attrib.emulate_pr = DA_EMULATE_PR;
    798	dev->dev_attrib.pi_prot_type = TARGET_DIF_TYPE0_PROT;
    799	dev->dev_attrib.enforce_pr_isids = DA_ENFORCE_PR_ISIDS;
    800	dev->dev_attrib.force_pr_aptpl = DA_FORCE_PR_APTPL;
    801	dev->dev_attrib.is_nonrot = DA_IS_NONROT;
    802	dev->dev_attrib.emulate_rest_reord = DA_EMULATE_REST_REORD;
    803	dev->dev_attrib.max_unmap_lba_count = DA_MAX_UNMAP_LBA_COUNT;
    804	dev->dev_attrib.max_unmap_block_desc_count =
    805		DA_MAX_UNMAP_BLOCK_DESC_COUNT;
    806	dev->dev_attrib.unmap_granularity = DA_UNMAP_GRANULARITY_DEFAULT;
    807	dev->dev_attrib.unmap_granularity_alignment =
    808				DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT;
    809	dev->dev_attrib.unmap_zeroes_data =
    810				DA_UNMAP_ZEROES_DATA_DEFAULT;
    811	dev->dev_attrib.max_write_same_len = DA_MAX_WRITE_SAME_LEN;
    812
    813	xcopy_lun = &dev->xcopy_lun;
    814	rcu_assign_pointer(xcopy_lun->lun_se_dev, dev);
    815	init_completion(&xcopy_lun->lun_shutdown_comp);
    816	INIT_LIST_HEAD(&xcopy_lun->lun_deve_list);
    817	INIT_LIST_HEAD(&xcopy_lun->lun_dev_link);
    818	mutex_init(&xcopy_lun->lun_tg_pt_md_mutex);
    819	xcopy_lun->lun_tpg = &xcopy_pt_tpg;
    820
    821	/* Preload the default INQUIRY const values */
    822	strlcpy(dev->t10_wwn.vendor, "LIO-ORG", sizeof(dev->t10_wwn.vendor));
    823	strlcpy(dev->t10_wwn.model, dev->transport->inquiry_prod,
    824		sizeof(dev->t10_wwn.model));
    825	strlcpy(dev->t10_wwn.revision, dev->transport->inquiry_rev,
    826		sizeof(dev->t10_wwn.revision));
    827
    828	return dev;
    829}
    830
    831/*
    832 * Check if the underlying struct block_device supports discard and if yes
    833 * configure the UNMAP parameters.
    834 */
    835bool target_configure_unmap_from_queue(struct se_dev_attrib *attrib,
    836				       struct block_device *bdev)
    837{
    838	int block_size = bdev_logical_block_size(bdev);
    839
    840	if (!bdev_max_discard_sectors(bdev))
    841		return false;
    842
    843	attrib->max_unmap_lba_count =
    844		bdev_max_discard_sectors(bdev) >> (ilog2(block_size) - 9);
    845	/*
    846	 * Currently hardcoded to 1 in Linux/SCSI code..
    847	 */
    848	attrib->max_unmap_block_desc_count = 1;
    849	attrib->unmap_granularity = bdev_discard_granularity(bdev) / block_size;
    850	attrib->unmap_granularity_alignment =
    851		bdev_discard_alignment(bdev) / block_size;
    852	return true;
    853}
    854EXPORT_SYMBOL(target_configure_unmap_from_queue);
    855
    856/*
    857 * Convert from blocksize advertised to the initiator to the 512 byte
    858 * units unconditionally used by the Linux block layer.
    859 */
    860sector_t target_to_linux_sector(struct se_device *dev, sector_t lb)
    861{
    862	switch (dev->dev_attrib.block_size) {
    863	case 4096:
    864		return lb << 3;
    865	case 2048:
    866		return lb << 2;
    867	case 1024:
    868		return lb << 1;
    869	default:
    870		return lb;
    871	}
    872}
    873EXPORT_SYMBOL(target_to_linux_sector);
    874
    875struct devices_idr_iter {
    876	struct config_item *prev_item;
    877	int (*fn)(struct se_device *dev, void *data);
    878	void *data;
    879};
    880
    881static int target_devices_idr_iter(int id, void *p, void *data)
    882	 __must_hold(&device_mutex)
    883{
    884	struct devices_idr_iter *iter = data;
    885	struct se_device *dev = p;
    886	int ret;
    887
    888	config_item_put(iter->prev_item);
    889	iter->prev_item = NULL;
    890
    891	/*
    892	 * We add the device early to the idr, so it can be used
    893	 * by backend modules during configuration. We do not want
    894	 * to allow other callers to access partially setup devices,
    895	 * so we skip them here.
    896	 */
    897	if (!target_dev_configured(dev))
    898		return 0;
    899
    900	iter->prev_item = config_item_get_unless_zero(&dev->dev_group.cg_item);
    901	if (!iter->prev_item)
    902		return 0;
    903	mutex_unlock(&device_mutex);
    904
    905	ret = iter->fn(dev, iter->data);
    906
    907	mutex_lock(&device_mutex);
    908	return ret;
    909}
    910
    911/**
    912 * target_for_each_device - iterate over configured devices
    913 * @fn: iterator function
    914 * @data: pointer to data that will be passed to fn
    915 *
    916 * fn must return 0 to continue looping over devices. non-zero will break
    917 * from the loop and return that value to the caller.
    918 */
    919int target_for_each_device(int (*fn)(struct se_device *dev, void *data),
    920			   void *data)
    921{
    922	struct devices_idr_iter iter = { .fn = fn, .data = data };
    923	int ret;
    924
    925	mutex_lock(&device_mutex);
    926	ret = idr_for_each(&devices_idr, target_devices_idr_iter, &iter);
    927	mutex_unlock(&device_mutex);
    928	config_item_put(iter.prev_item);
    929	return ret;
    930}
    931
    932int target_configure_device(struct se_device *dev)
    933{
    934	struct se_hba *hba = dev->se_hba;
    935	int ret, id;
    936
    937	if (target_dev_configured(dev)) {
    938		pr_err("se_dev->se_dev_ptr already set for storage"
    939				" object\n");
    940		return -EEXIST;
    941	}
    942
    943	/*
    944	 * Add early so modules like tcmu can use during its
    945	 * configuration.
    946	 */
    947	mutex_lock(&device_mutex);
    948	/*
    949	 * Use cyclic to try and avoid collisions with devices
    950	 * that were recently removed.
    951	 */
    952	id = idr_alloc_cyclic(&devices_idr, dev, 0, INT_MAX, GFP_KERNEL);
    953	mutex_unlock(&device_mutex);
    954	if (id < 0) {
    955		ret = -ENOMEM;
    956		goto out;
    957	}
    958	dev->dev_index = id;
    959
    960	ret = dev->transport->configure_device(dev);
    961	if (ret)
    962		goto out_free_index;
    963	/*
    964	 * XXX: there is not much point to have two different values here..
    965	 */
    966	dev->dev_attrib.block_size = dev->dev_attrib.hw_block_size;
    967	dev->dev_attrib.queue_depth = dev->dev_attrib.hw_queue_depth;
    968
    969	/*
    970	 * Align max_hw_sectors down to PAGE_SIZE I/O transfers
    971	 */
    972	dev->dev_attrib.hw_max_sectors =
    973		se_dev_align_max_sectors(dev->dev_attrib.hw_max_sectors,
    974					 dev->dev_attrib.hw_block_size);
    975	dev->dev_attrib.optimal_sectors = dev->dev_attrib.hw_max_sectors;
    976
    977	dev->creation_time = get_jiffies_64();
    978
    979	ret = core_setup_alua(dev);
    980	if (ret)
    981		goto out_destroy_device;
    982
    983	/*
    984	 * Setup work_queue for QUEUE_FULL
    985	 */
    986	INIT_WORK(&dev->qf_work_queue, target_qf_do_work);
    987
    988	scsi_dump_inquiry(dev);
    989
    990	spin_lock(&hba->device_lock);
    991	hba->dev_count++;
    992	spin_unlock(&hba->device_lock);
    993
    994	dev->dev_flags |= DF_CONFIGURED;
    995
    996	return 0;
    997
    998out_destroy_device:
    999	dev->transport->destroy_device(dev);
   1000out_free_index:
   1001	mutex_lock(&device_mutex);
   1002	idr_remove(&devices_idr, dev->dev_index);
   1003	mutex_unlock(&device_mutex);
   1004out:
   1005	se_release_vpd_for_dev(dev);
   1006	return ret;
   1007}
   1008
   1009void target_free_device(struct se_device *dev)
   1010{
   1011	struct se_hba *hba = dev->se_hba;
   1012
   1013	WARN_ON(!list_empty(&dev->dev_sep_list));
   1014
   1015	if (target_dev_configured(dev)) {
   1016		dev->transport->destroy_device(dev);
   1017
   1018		mutex_lock(&device_mutex);
   1019		idr_remove(&devices_idr, dev->dev_index);
   1020		mutex_unlock(&device_mutex);
   1021
   1022		spin_lock(&hba->device_lock);
   1023		hba->dev_count--;
   1024		spin_unlock(&hba->device_lock);
   1025	}
   1026
   1027	core_alua_free_lu_gp_mem(dev);
   1028	core_alua_set_lba_map(dev, NULL, 0, 0);
   1029	core_scsi3_free_all_registrations(dev);
   1030	se_release_vpd_for_dev(dev);
   1031
   1032	if (dev->transport->free_prot)
   1033		dev->transport->free_prot(dev);
   1034
   1035	kfree(dev->queues);
   1036	dev->transport->free_device(dev);
   1037}
   1038
   1039int core_dev_setup_virtual_lun0(void)
   1040{
   1041	struct se_hba *hba;
   1042	struct se_device *dev;
   1043	char buf[] = "rd_pages=8,rd_nullio=1,rd_dummy=1";
   1044	int ret;
   1045
   1046	hba = core_alloc_hba("rd_mcp", 0, HBA_FLAGS_INTERNAL_USE);
   1047	if (IS_ERR(hba))
   1048		return PTR_ERR(hba);
   1049
   1050	dev = target_alloc_device(hba, "virt_lun0");
   1051	if (!dev) {
   1052		ret = -ENOMEM;
   1053		goto out_free_hba;
   1054	}
   1055
   1056	hba->backend->ops->set_configfs_dev_params(dev, buf, sizeof(buf));
   1057
   1058	ret = target_configure_device(dev);
   1059	if (ret)
   1060		goto out_free_se_dev;
   1061
   1062	lun0_hba = hba;
   1063	g_lun0_dev = dev;
   1064	return 0;
   1065
   1066out_free_se_dev:
   1067	target_free_device(dev);
   1068out_free_hba:
   1069	core_delete_hba(hba);
   1070	return ret;
   1071}
   1072
   1073
   1074void core_dev_release_virtual_lun0(void)
   1075{
   1076	struct se_hba *hba = lun0_hba;
   1077
   1078	if (!hba)
   1079		return;
   1080
   1081	if (g_lun0_dev)
   1082		target_free_device(g_lun0_dev);
   1083	core_delete_hba(hba);
   1084}
   1085
   1086/*
   1087 * Common CDB parsing for kernel and user passthrough.
   1088 */
   1089sense_reason_t
   1090passthrough_parse_cdb(struct se_cmd *cmd,
   1091	sense_reason_t (*exec_cmd)(struct se_cmd *cmd))
   1092{
   1093	unsigned char *cdb = cmd->t_task_cdb;
   1094	struct se_device *dev = cmd->se_dev;
   1095	unsigned int size;
   1096
   1097	/*
   1098	 * For REPORT LUNS we always need to emulate the response, for everything
   1099	 * else, pass it up.
   1100	 */
   1101	if (cdb[0] == REPORT_LUNS) {
   1102		cmd->execute_cmd = spc_emulate_report_luns;
   1103		return TCM_NO_SENSE;
   1104	}
   1105
   1106	/*
   1107	 * With emulate_pr disabled, all reservation requests should fail,
   1108	 * regardless of whether or not TRANSPORT_FLAG_PASSTHROUGH_PGR is set.
   1109	 */
   1110	if (!dev->dev_attrib.emulate_pr &&
   1111	    ((cdb[0] == PERSISTENT_RESERVE_IN) ||
   1112	     (cdb[0] == PERSISTENT_RESERVE_OUT) ||
   1113	     (cdb[0] == RELEASE || cdb[0] == RELEASE_10) ||
   1114	     (cdb[0] == RESERVE || cdb[0] == RESERVE_10))) {
   1115		return TCM_UNSUPPORTED_SCSI_OPCODE;
   1116	}
   1117
   1118	/*
   1119	 * For PERSISTENT RESERVE IN/OUT, RELEASE, and RESERVE we need to
   1120	 * emulate the response, since tcmu does not have the information
   1121	 * required to process these commands.
   1122	 */
   1123	if (!(dev->transport_flags &
   1124	      TRANSPORT_FLAG_PASSTHROUGH_PGR)) {
   1125		if (cdb[0] == PERSISTENT_RESERVE_IN) {
   1126			cmd->execute_cmd = target_scsi3_emulate_pr_in;
   1127			size = get_unaligned_be16(&cdb[7]);
   1128			return target_cmd_size_check(cmd, size);
   1129		}
   1130		if (cdb[0] == PERSISTENT_RESERVE_OUT) {
   1131			cmd->execute_cmd = target_scsi3_emulate_pr_out;
   1132			size = get_unaligned_be32(&cdb[5]);
   1133			return target_cmd_size_check(cmd, size);
   1134		}
   1135
   1136		if (cdb[0] == RELEASE || cdb[0] == RELEASE_10) {
   1137			cmd->execute_cmd = target_scsi2_reservation_release;
   1138			if (cdb[0] == RELEASE_10)
   1139				size = get_unaligned_be16(&cdb[7]);
   1140			else
   1141				size = cmd->data_length;
   1142			return target_cmd_size_check(cmd, size);
   1143		}
   1144		if (cdb[0] == RESERVE || cdb[0] == RESERVE_10) {
   1145			cmd->execute_cmd = target_scsi2_reservation_reserve;
   1146			if (cdb[0] == RESERVE_10)
   1147				size = get_unaligned_be16(&cdb[7]);
   1148			else
   1149				size = cmd->data_length;
   1150			return target_cmd_size_check(cmd, size);
   1151		}
   1152	}
   1153
   1154	/* Set DATA_CDB flag for ops that should have it */
   1155	switch (cdb[0]) {
   1156	case READ_6:
   1157	case READ_10:
   1158	case READ_12:
   1159	case READ_16:
   1160	case WRITE_6:
   1161	case WRITE_10:
   1162	case WRITE_12:
   1163	case WRITE_16:
   1164	case WRITE_VERIFY:
   1165	case WRITE_VERIFY_12:
   1166	case WRITE_VERIFY_16:
   1167	case COMPARE_AND_WRITE:
   1168	case XDWRITEREAD_10:
   1169		cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
   1170		break;
   1171	case VARIABLE_LENGTH_CMD:
   1172		switch (get_unaligned_be16(&cdb[8])) {
   1173		case READ_32:
   1174		case WRITE_32:
   1175		case WRITE_VERIFY_32:
   1176		case XDWRITEREAD_32:
   1177			cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
   1178			break;
   1179		}
   1180	}
   1181
   1182	cmd->execute_cmd = exec_cmd;
   1183
   1184	return TCM_NO_SENSE;
   1185}
   1186EXPORT_SYMBOL(passthrough_parse_cdb);