cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

scsi.c (65510B)


      1// SPDX-License-Identifier: GPL-2.0+
      2/*******************************************************************************
      3 * Vhost kernel TCM fabric driver for virtio SCSI initiators
      4 *
      5 * (C) Copyright 2010-2013 Datera, Inc.
      6 * (C) Copyright 2010-2012 IBM Corp.
      7 *
      8 * Authors: Nicholas A. Bellinger <nab@daterainc.com>
      9 *          Stefan Hajnoczi <stefanha@linux.vnet.ibm.com>
     10 ****************************************************************************/
     11
     12#include <linux/module.h>
     13#include <linux/moduleparam.h>
     14#include <generated/utsrelease.h>
     15#include <linux/utsname.h>
     16#include <linux/init.h>
     17#include <linux/slab.h>
     18#include <linux/kthread.h>
     19#include <linux/types.h>
     20#include <linux/string.h>
     21#include <linux/configfs.h>
     22#include <linux/ctype.h>
     23#include <linux/compat.h>
     24#include <linux/eventfd.h>
     25#include <linux/fs.h>
     26#include <linux/vmalloc.h>
     27#include <linux/miscdevice.h>
     28#include <asm/unaligned.h>
     29#include <scsi/scsi_common.h>
     30#include <scsi/scsi_proto.h>
     31#include <target/target_core_base.h>
     32#include <target/target_core_fabric.h>
     33#include <linux/vhost.h>
     34#include <linux/virtio_scsi.h>
     35#include <linux/llist.h>
     36#include <linux/bitmap.h>
     37
     38#include "vhost.h"
     39
     40#define VHOST_SCSI_VERSION  "v0.1"
     41#define VHOST_SCSI_NAMELEN 256
     42#define VHOST_SCSI_MAX_CDB_SIZE 32
     43#define VHOST_SCSI_PREALLOC_SGLS 2048
     44#define VHOST_SCSI_PREALLOC_UPAGES 2048
     45#define VHOST_SCSI_PREALLOC_PROT_SGLS 2048
     46
     47/* Max number of requests before requeueing the job.
     48 * Using this limit prevents one virtqueue from starving others with
     49 * request.
     50 */
     51#define VHOST_SCSI_WEIGHT 256
     52
     53struct vhost_scsi_inflight {
     54	/* Wait for the flush operation to finish */
     55	struct completion comp;
     56	/* Refcount for the inflight reqs */
     57	struct kref kref;
     58};
     59
     60struct vhost_scsi_cmd {
     61	/* Descriptor from vhost_get_vq_desc() for virt_queue segment */
     62	int tvc_vq_desc;
     63	/* virtio-scsi initiator task attribute */
     64	int tvc_task_attr;
     65	/* virtio-scsi response incoming iovecs */
     66	int tvc_in_iovs;
     67	/* virtio-scsi initiator data direction */
     68	enum dma_data_direction tvc_data_direction;
     69	/* Expected data transfer length from virtio-scsi header */
     70	u32 tvc_exp_data_len;
     71	/* The Tag from include/linux/virtio_scsi.h:struct virtio_scsi_cmd_req */
     72	u64 tvc_tag;
     73	/* The number of scatterlists associated with this cmd */
     74	u32 tvc_sgl_count;
     75	u32 tvc_prot_sgl_count;
     76	/* Saved unpacked SCSI LUN for vhost_scsi_target_queue_cmd() */
     77	u32 tvc_lun;
     78	/* Pointer to the SGL formatted memory from virtio-scsi */
     79	struct scatterlist *tvc_sgl;
     80	struct scatterlist *tvc_prot_sgl;
     81	struct page **tvc_upages;
     82	/* Pointer to response header iovec */
     83	struct iovec tvc_resp_iov;
     84	/* Pointer to vhost_scsi for our device */
     85	struct vhost_scsi *tvc_vhost;
     86	/* Pointer to vhost_virtqueue for the cmd */
     87	struct vhost_virtqueue *tvc_vq;
     88	/* Pointer to vhost nexus memory */
     89	struct vhost_scsi_nexus *tvc_nexus;
     90	/* The TCM I/O descriptor that is accessed via container_of() */
     91	struct se_cmd tvc_se_cmd;
     92	/* Copy of the incoming SCSI command descriptor block (CDB) */
     93	unsigned char tvc_cdb[VHOST_SCSI_MAX_CDB_SIZE];
     94	/* Sense buffer that will be mapped into outgoing status */
     95	unsigned char tvc_sense_buf[TRANSPORT_SENSE_BUFFER];
     96	/* Completed commands list, serviced from vhost worker thread */
     97	struct llist_node tvc_completion_list;
     98	/* Used to track inflight cmd */
     99	struct vhost_scsi_inflight *inflight;
    100};
    101
    102struct vhost_scsi_nexus {
    103	/* Pointer to TCM session for I_T Nexus */
    104	struct se_session *tvn_se_sess;
    105};
    106
    107struct vhost_scsi_tpg {
    108	/* Vhost port target portal group tag for TCM */
    109	u16 tport_tpgt;
    110	/* Used to track number of TPG Port/Lun Links wrt to explict I_T Nexus shutdown */
    111	int tv_tpg_port_count;
    112	/* Used for vhost_scsi device reference to tpg_nexus, protected by tv_tpg_mutex */
    113	int tv_tpg_vhost_count;
    114	/* Used for enabling T10-PI with legacy devices */
    115	int tv_fabric_prot_type;
    116	/* list for vhost_scsi_list */
    117	struct list_head tv_tpg_list;
    118	/* Used to protect access for tpg_nexus */
    119	struct mutex tv_tpg_mutex;
    120	/* Pointer to the TCM VHost I_T Nexus for this TPG endpoint */
    121	struct vhost_scsi_nexus *tpg_nexus;
    122	/* Pointer back to vhost_scsi_tport */
    123	struct vhost_scsi_tport *tport;
    124	/* Returned by vhost_scsi_make_tpg() */
    125	struct se_portal_group se_tpg;
    126	/* Pointer back to vhost_scsi, protected by tv_tpg_mutex */
    127	struct vhost_scsi *vhost_scsi;
    128	struct list_head tmf_queue;
    129};
    130
    131struct vhost_scsi_tport {
    132	/* SCSI protocol the tport is providing */
    133	u8 tport_proto_id;
    134	/* Binary World Wide unique Port Name for Vhost Target port */
    135	u64 tport_wwpn;
    136	/* ASCII formatted WWPN for Vhost Target port */
    137	char tport_name[VHOST_SCSI_NAMELEN];
    138	/* Returned by vhost_scsi_make_tport() */
    139	struct se_wwn tport_wwn;
    140};
    141
    142struct vhost_scsi_evt {
    143	/* event to be sent to guest */
    144	struct virtio_scsi_event event;
    145	/* event list, serviced from vhost worker thread */
    146	struct llist_node list;
    147};
    148
    149enum {
    150	VHOST_SCSI_VQ_CTL = 0,
    151	VHOST_SCSI_VQ_EVT = 1,
    152	VHOST_SCSI_VQ_IO = 2,
    153};
    154
    155/* Note: can't set VIRTIO_F_VERSION_1 yet, since that implies ANY_LAYOUT. */
    156enum {
    157	VHOST_SCSI_FEATURES = VHOST_FEATURES | (1ULL << VIRTIO_SCSI_F_HOTPLUG) |
    158					       (1ULL << VIRTIO_SCSI_F_T10_PI)
    159};
    160
    161#define VHOST_SCSI_MAX_TARGET	256
    162#define VHOST_SCSI_MAX_VQ	128
    163#define VHOST_SCSI_MAX_EVENT	128
    164
    165struct vhost_scsi_virtqueue {
    166	struct vhost_virtqueue vq;
    167	/*
    168	 * Reference counting for inflight reqs, used for flush operation. At
    169	 * each time, one reference tracks new commands submitted, while we
    170	 * wait for another one to reach 0.
    171	 */
    172	struct vhost_scsi_inflight inflights[2];
    173	/*
    174	 * Indicate current inflight in use, protected by vq->mutex.
    175	 * Writers must also take dev mutex and flush under it.
    176	 */
    177	int inflight_idx;
    178	struct vhost_scsi_cmd *scsi_cmds;
    179	struct sbitmap scsi_tags;
    180	int max_cmds;
    181};
    182
    183struct vhost_scsi {
    184	/* Protected by vhost_scsi->dev.mutex */
    185	struct vhost_scsi_tpg **vs_tpg;
    186	char vs_vhost_wwpn[TRANSPORT_IQN_LEN];
    187
    188	struct vhost_dev dev;
    189	struct vhost_scsi_virtqueue vqs[VHOST_SCSI_MAX_VQ];
    190
    191	struct vhost_work vs_completion_work; /* cmd completion work item */
    192	struct llist_head vs_completion_list; /* cmd completion queue */
    193
    194	struct vhost_work vs_event_work; /* evt injection work item */
    195	struct llist_head vs_event_list; /* evt injection queue */
    196
    197	bool vs_events_missed; /* any missed events, protected by vq->mutex */
    198	int vs_events_nr; /* num of pending events, protected by vq->mutex */
    199};
    200
    201struct vhost_scsi_tmf {
    202	struct vhost_work vwork;
    203	struct vhost_scsi_tpg *tpg;
    204	struct vhost_scsi *vhost;
    205	struct vhost_scsi_virtqueue *svq;
    206	struct list_head queue_entry;
    207
    208	struct se_cmd se_cmd;
    209	u8 scsi_resp;
    210	struct vhost_scsi_inflight *inflight;
    211	struct iovec resp_iov;
    212	int in_iovs;
    213	int vq_desc;
    214};
    215
    216/*
    217 * Context for processing request and control queue operations.
    218 */
    219struct vhost_scsi_ctx {
    220	int head;
    221	unsigned int out, in;
    222	size_t req_size, rsp_size;
    223	size_t out_size, in_size;
    224	u8 *target, *lunp;
    225	void *req;
    226	struct iov_iter out_iter;
    227};
    228
    229/* Global spinlock to protect vhost_scsi TPG list for vhost IOCTL access */
    230static DEFINE_MUTEX(vhost_scsi_mutex);
    231static LIST_HEAD(vhost_scsi_list);
    232
    233static void vhost_scsi_done_inflight(struct kref *kref)
    234{
    235	struct vhost_scsi_inflight *inflight;
    236
    237	inflight = container_of(kref, struct vhost_scsi_inflight, kref);
    238	complete(&inflight->comp);
    239}
    240
    241static void vhost_scsi_init_inflight(struct vhost_scsi *vs,
    242				    struct vhost_scsi_inflight *old_inflight[])
    243{
    244	struct vhost_scsi_inflight *new_inflight;
    245	struct vhost_virtqueue *vq;
    246	int idx, i;
    247
    248	for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
    249		vq = &vs->vqs[i].vq;
    250
    251		mutex_lock(&vq->mutex);
    252
    253		/* store old infight */
    254		idx = vs->vqs[i].inflight_idx;
    255		if (old_inflight)
    256			old_inflight[i] = &vs->vqs[i].inflights[idx];
    257
    258		/* setup new infight */
    259		vs->vqs[i].inflight_idx = idx ^ 1;
    260		new_inflight = &vs->vqs[i].inflights[idx ^ 1];
    261		kref_init(&new_inflight->kref);
    262		init_completion(&new_inflight->comp);
    263
    264		mutex_unlock(&vq->mutex);
    265	}
    266}
    267
    268static struct vhost_scsi_inflight *
    269vhost_scsi_get_inflight(struct vhost_virtqueue *vq)
    270{
    271	struct vhost_scsi_inflight *inflight;
    272	struct vhost_scsi_virtqueue *svq;
    273
    274	svq = container_of(vq, struct vhost_scsi_virtqueue, vq);
    275	inflight = &svq->inflights[svq->inflight_idx];
    276	kref_get(&inflight->kref);
    277
    278	return inflight;
    279}
    280
    281static void vhost_scsi_put_inflight(struct vhost_scsi_inflight *inflight)
    282{
    283	kref_put(&inflight->kref, vhost_scsi_done_inflight);
    284}
    285
    286static int vhost_scsi_check_true(struct se_portal_group *se_tpg)
    287{
    288	return 1;
    289}
    290
    291static int vhost_scsi_check_false(struct se_portal_group *se_tpg)
    292{
    293	return 0;
    294}
    295
    296static char *vhost_scsi_get_fabric_wwn(struct se_portal_group *se_tpg)
    297{
    298	struct vhost_scsi_tpg *tpg = container_of(se_tpg,
    299				struct vhost_scsi_tpg, se_tpg);
    300	struct vhost_scsi_tport *tport = tpg->tport;
    301
    302	return &tport->tport_name[0];
    303}
    304
    305static u16 vhost_scsi_get_tpgt(struct se_portal_group *se_tpg)
    306{
    307	struct vhost_scsi_tpg *tpg = container_of(se_tpg,
    308				struct vhost_scsi_tpg, se_tpg);
    309	return tpg->tport_tpgt;
    310}
    311
    312static int vhost_scsi_check_prot_fabric_only(struct se_portal_group *se_tpg)
    313{
    314	struct vhost_scsi_tpg *tpg = container_of(se_tpg,
    315				struct vhost_scsi_tpg, se_tpg);
    316
    317	return tpg->tv_fabric_prot_type;
    318}
    319
    320static u32 vhost_scsi_tpg_get_inst_index(struct se_portal_group *se_tpg)
    321{
    322	return 1;
    323}
    324
    325static void vhost_scsi_release_cmd_res(struct se_cmd *se_cmd)
    326{
    327	struct vhost_scsi_cmd *tv_cmd = container_of(se_cmd,
    328				struct vhost_scsi_cmd, tvc_se_cmd);
    329	struct vhost_scsi_virtqueue *svq = container_of(tv_cmd->tvc_vq,
    330				struct vhost_scsi_virtqueue, vq);
    331	struct vhost_scsi_inflight *inflight = tv_cmd->inflight;
    332	int i;
    333
    334	if (tv_cmd->tvc_sgl_count) {
    335		for (i = 0; i < tv_cmd->tvc_sgl_count; i++)
    336			put_page(sg_page(&tv_cmd->tvc_sgl[i]));
    337	}
    338	if (tv_cmd->tvc_prot_sgl_count) {
    339		for (i = 0; i < tv_cmd->tvc_prot_sgl_count; i++)
    340			put_page(sg_page(&tv_cmd->tvc_prot_sgl[i]));
    341	}
    342
    343	sbitmap_clear_bit(&svq->scsi_tags, se_cmd->map_tag);
    344	vhost_scsi_put_inflight(inflight);
    345}
    346
    347static void vhost_scsi_release_tmf_res(struct vhost_scsi_tmf *tmf)
    348{
    349	struct vhost_scsi_tpg *tpg = tmf->tpg;
    350	struct vhost_scsi_inflight *inflight = tmf->inflight;
    351
    352	mutex_lock(&tpg->tv_tpg_mutex);
    353	list_add_tail(&tpg->tmf_queue, &tmf->queue_entry);
    354	mutex_unlock(&tpg->tv_tpg_mutex);
    355	vhost_scsi_put_inflight(inflight);
    356}
    357
    358static void vhost_scsi_release_cmd(struct se_cmd *se_cmd)
    359{
    360	if (se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB) {
    361		struct vhost_scsi_tmf *tmf = container_of(se_cmd,
    362					struct vhost_scsi_tmf, se_cmd);
    363
    364		vhost_work_queue(&tmf->vhost->dev, &tmf->vwork);
    365	} else {
    366		struct vhost_scsi_cmd *cmd = container_of(se_cmd,
    367					struct vhost_scsi_cmd, tvc_se_cmd);
    368		struct vhost_scsi *vs = cmd->tvc_vhost;
    369
    370		llist_add(&cmd->tvc_completion_list, &vs->vs_completion_list);
    371		vhost_work_queue(&vs->dev, &vs->vs_completion_work);
    372	}
    373}
    374
    375static u32 vhost_scsi_sess_get_index(struct se_session *se_sess)
    376{
    377	return 0;
    378}
    379
    380static int vhost_scsi_write_pending(struct se_cmd *se_cmd)
    381{
    382	/* Go ahead and process the write immediately */
    383	target_execute_cmd(se_cmd);
    384	return 0;
    385}
    386
    387static void vhost_scsi_set_default_node_attrs(struct se_node_acl *nacl)
    388{
    389	return;
    390}
    391
    392static int vhost_scsi_get_cmd_state(struct se_cmd *se_cmd)
    393{
    394	return 0;
    395}
    396
    397static int vhost_scsi_queue_data_in(struct se_cmd *se_cmd)
    398{
    399	transport_generic_free_cmd(se_cmd, 0);
    400	return 0;
    401}
    402
    403static int vhost_scsi_queue_status(struct se_cmd *se_cmd)
    404{
    405	transport_generic_free_cmd(se_cmd, 0);
    406	return 0;
    407}
    408
    409static void vhost_scsi_queue_tm_rsp(struct se_cmd *se_cmd)
    410{
    411	struct vhost_scsi_tmf *tmf = container_of(se_cmd, struct vhost_scsi_tmf,
    412						  se_cmd);
    413
    414	tmf->scsi_resp = se_cmd->se_tmr_req->response;
    415	transport_generic_free_cmd(&tmf->se_cmd, 0);
    416}
    417
    418static void vhost_scsi_aborted_task(struct se_cmd *se_cmd)
    419{
    420	return;
    421}
    422
    423static void vhost_scsi_free_evt(struct vhost_scsi *vs, struct vhost_scsi_evt *evt)
    424{
    425	vs->vs_events_nr--;
    426	kfree(evt);
    427}
    428
    429static struct vhost_scsi_evt *
    430vhost_scsi_allocate_evt(struct vhost_scsi *vs,
    431		       u32 event, u32 reason)
    432{
    433	struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
    434	struct vhost_scsi_evt *evt;
    435
    436	if (vs->vs_events_nr > VHOST_SCSI_MAX_EVENT) {
    437		vs->vs_events_missed = true;
    438		return NULL;
    439	}
    440
    441	evt = kzalloc(sizeof(*evt), GFP_KERNEL);
    442	if (!evt) {
    443		vq_err(vq, "Failed to allocate vhost_scsi_evt\n");
    444		vs->vs_events_missed = true;
    445		return NULL;
    446	}
    447
    448	evt->event.event = cpu_to_vhost32(vq, event);
    449	evt->event.reason = cpu_to_vhost32(vq, reason);
    450	vs->vs_events_nr++;
    451
    452	return evt;
    453}
    454
    455static int vhost_scsi_check_stop_free(struct se_cmd *se_cmd)
    456{
    457	return target_put_sess_cmd(se_cmd);
    458}
    459
    460static void
    461vhost_scsi_do_evt_work(struct vhost_scsi *vs, struct vhost_scsi_evt *evt)
    462{
    463	struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
    464	struct virtio_scsi_event *event = &evt->event;
    465	struct virtio_scsi_event __user *eventp;
    466	unsigned out, in;
    467	int head, ret;
    468
    469	if (!vhost_vq_get_backend(vq)) {
    470		vs->vs_events_missed = true;
    471		return;
    472	}
    473
    474again:
    475	vhost_disable_notify(&vs->dev, vq);
    476	head = vhost_get_vq_desc(vq, vq->iov,
    477			ARRAY_SIZE(vq->iov), &out, &in,
    478			NULL, NULL);
    479	if (head < 0) {
    480		vs->vs_events_missed = true;
    481		return;
    482	}
    483	if (head == vq->num) {
    484		if (vhost_enable_notify(&vs->dev, vq))
    485			goto again;
    486		vs->vs_events_missed = true;
    487		return;
    488	}
    489
    490	if ((vq->iov[out].iov_len != sizeof(struct virtio_scsi_event))) {
    491		vq_err(vq, "Expecting virtio_scsi_event, got %zu bytes\n",
    492				vq->iov[out].iov_len);
    493		vs->vs_events_missed = true;
    494		return;
    495	}
    496
    497	if (vs->vs_events_missed) {
    498		event->event |= cpu_to_vhost32(vq, VIRTIO_SCSI_T_EVENTS_MISSED);
    499		vs->vs_events_missed = false;
    500	}
    501
    502	eventp = vq->iov[out].iov_base;
    503	ret = __copy_to_user(eventp, event, sizeof(*event));
    504	if (!ret)
    505		vhost_add_used_and_signal(&vs->dev, vq, head, 0);
    506	else
    507		vq_err(vq, "Faulted on vhost_scsi_send_event\n");
    508}
    509
    510static void vhost_scsi_evt_work(struct vhost_work *work)
    511{
    512	struct vhost_scsi *vs = container_of(work, struct vhost_scsi,
    513					vs_event_work);
    514	struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
    515	struct vhost_scsi_evt *evt, *t;
    516	struct llist_node *llnode;
    517
    518	mutex_lock(&vq->mutex);
    519	llnode = llist_del_all(&vs->vs_event_list);
    520	llist_for_each_entry_safe(evt, t, llnode, list) {
    521		vhost_scsi_do_evt_work(vs, evt);
    522		vhost_scsi_free_evt(vs, evt);
    523	}
    524	mutex_unlock(&vq->mutex);
    525}
    526
    527/* Fill in status and signal that we are done processing this command
    528 *
    529 * This is scheduled in the vhost work queue so we are called with the owner
    530 * process mm and can access the vring.
    531 */
    532static void vhost_scsi_complete_cmd_work(struct vhost_work *work)
    533{
    534	struct vhost_scsi *vs = container_of(work, struct vhost_scsi,
    535					vs_completion_work);
    536	DECLARE_BITMAP(signal, VHOST_SCSI_MAX_VQ);
    537	struct virtio_scsi_cmd_resp v_rsp;
    538	struct vhost_scsi_cmd *cmd, *t;
    539	struct llist_node *llnode;
    540	struct se_cmd *se_cmd;
    541	struct iov_iter iov_iter;
    542	int ret, vq;
    543
    544	bitmap_zero(signal, VHOST_SCSI_MAX_VQ);
    545	llnode = llist_del_all(&vs->vs_completion_list);
    546	llist_for_each_entry_safe(cmd, t, llnode, tvc_completion_list) {
    547		se_cmd = &cmd->tvc_se_cmd;
    548
    549		pr_debug("%s tv_cmd %p resid %u status %#02x\n", __func__,
    550			cmd, se_cmd->residual_count, se_cmd->scsi_status);
    551
    552		memset(&v_rsp, 0, sizeof(v_rsp));
    553		v_rsp.resid = cpu_to_vhost32(cmd->tvc_vq, se_cmd->residual_count);
    554		/* TODO is status_qualifier field needed? */
    555		v_rsp.status = se_cmd->scsi_status;
    556		v_rsp.sense_len = cpu_to_vhost32(cmd->tvc_vq,
    557						 se_cmd->scsi_sense_length);
    558		memcpy(v_rsp.sense, cmd->tvc_sense_buf,
    559		       se_cmd->scsi_sense_length);
    560
    561		iov_iter_init(&iov_iter, READ, &cmd->tvc_resp_iov,
    562			      cmd->tvc_in_iovs, sizeof(v_rsp));
    563		ret = copy_to_iter(&v_rsp, sizeof(v_rsp), &iov_iter);
    564		if (likely(ret == sizeof(v_rsp))) {
    565			struct vhost_scsi_virtqueue *q;
    566			vhost_add_used(cmd->tvc_vq, cmd->tvc_vq_desc, 0);
    567			q = container_of(cmd->tvc_vq, struct vhost_scsi_virtqueue, vq);
    568			vq = q - vs->vqs;
    569			__set_bit(vq, signal);
    570		} else
    571			pr_err("Faulted on virtio_scsi_cmd_resp\n");
    572
    573		vhost_scsi_release_cmd_res(se_cmd);
    574	}
    575
    576	vq = -1;
    577	while ((vq = find_next_bit(signal, VHOST_SCSI_MAX_VQ, vq + 1))
    578		< VHOST_SCSI_MAX_VQ)
    579		vhost_signal(&vs->dev, &vs->vqs[vq].vq);
    580}
    581
    582static struct vhost_scsi_cmd *
    583vhost_scsi_get_cmd(struct vhost_virtqueue *vq, struct vhost_scsi_tpg *tpg,
    584		   unsigned char *cdb, u64 scsi_tag, u16 lun, u8 task_attr,
    585		   u32 exp_data_len, int data_direction)
    586{
    587	struct vhost_scsi_virtqueue *svq = container_of(vq,
    588					struct vhost_scsi_virtqueue, vq);
    589	struct vhost_scsi_cmd *cmd;
    590	struct vhost_scsi_nexus *tv_nexus;
    591	struct scatterlist *sg, *prot_sg;
    592	struct page **pages;
    593	int tag;
    594
    595	tv_nexus = tpg->tpg_nexus;
    596	if (!tv_nexus) {
    597		pr_err("Unable to locate active struct vhost_scsi_nexus\n");
    598		return ERR_PTR(-EIO);
    599	}
    600
    601	tag = sbitmap_get(&svq->scsi_tags);
    602	if (tag < 0) {
    603		pr_err("Unable to obtain tag for vhost_scsi_cmd\n");
    604		return ERR_PTR(-ENOMEM);
    605	}
    606
    607	cmd = &svq->scsi_cmds[tag];
    608	sg = cmd->tvc_sgl;
    609	prot_sg = cmd->tvc_prot_sgl;
    610	pages = cmd->tvc_upages;
    611	memset(cmd, 0, sizeof(*cmd));
    612	cmd->tvc_sgl = sg;
    613	cmd->tvc_prot_sgl = prot_sg;
    614	cmd->tvc_upages = pages;
    615	cmd->tvc_se_cmd.map_tag = tag;
    616	cmd->tvc_tag = scsi_tag;
    617	cmd->tvc_lun = lun;
    618	cmd->tvc_task_attr = task_attr;
    619	cmd->tvc_exp_data_len = exp_data_len;
    620	cmd->tvc_data_direction = data_direction;
    621	cmd->tvc_nexus = tv_nexus;
    622	cmd->inflight = vhost_scsi_get_inflight(vq);
    623
    624	memcpy(cmd->tvc_cdb, cdb, VHOST_SCSI_MAX_CDB_SIZE);
    625
    626	return cmd;
    627}
    628
    629/*
    630 * Map a user memory range into a scatterlist
    631 *
    632 * Returns the number of scatterlist entries used or -errno on error.
    633 */
    634static int
    635vhost_scsi_map_to_sgl(struct vhost_scsi_cmd *cmd,
    636		      struct iov_iter *iter,
    637		      struct scatterlist *sgl,
    638		      bool write)
    639{
    640	struct page **pages = cmd->tvc_upages;
    641	struct scatterlist *sg = sgl;
    642	ssize_t bytes;
    643	size_t offset;
    644	unsigned int npages = 0;
    645
    646	bytes = iov_iter_get_pages(iter, pages, LONG_MAX,
    647				VHOST_SCSI_PREALLOC_UPAGES, &offset);
    648	/* No pages were pinned */
    649	if (bytes <= 0)
    650		return bytes < 0 ? bytes : -EFAULT;
    651
    652	iov_iter_advance(iter, bytes);
    653
    654	while (bytes) {
    655		unsigned n = min_t(unsigned, PAGE_SIZE - offset, bytes);
    656		sg_set_page(sg++, pages[npages++], n, offset);
    657		bytes -= n;
    658		offset = 0;
    659	}
    660	return npages;
    661}
    662
    663static int
    664vhost_scsi_calc_sgls(struct iov_iter *iter, size_t bytes, int max_sgls)
    665{
    666	int sgl_count = 0;
    667
    668	if (!iter || !iter->iov) {
    669		pr_err("%s: iter->iov is NULL, but expected bytes: %zu"
    670		       " present\n", __func__, bytes);
    671		return -EINVAL;
    672	}
    673
    674	sgl_count = iov_iter_npages(iter, 0xffff);
    675	if (sgl_count > max_sgls) {
    676		pr_err("%s: requested sgl_count: %d exceeds pre-allocated"
    677		       " max_sgls: %d\n", __func__, sgl_count, max_sgls);
    678		return -EINVAL;
    679	}
    680	return sgl_count;
    681}
    682
    683static int
    684vhost_scsi_iov_to_sgl(struct vhost_scsi_cmd *cmd, bool write,
    685		      struct iov_iter *iter,
    686		      struct scatterlist *sg, int sg_count)
    687{
    688	struct scatterlist *p = sg;
    689	int ret;
    690
    691	while (iov_iter_count(iter)) {
    692		ret = vhost_scsi_map_to_sgl(cmd, iter, sg, write);
    693		if (ret < 0) {
    694			while (p < sg) {
    695				struct page *page = sg_page(p++);
    696				if (page)
    697					put_page(page);
    698			}
    699			return ret;
    700		}
    701		sg += ret;
    702	}
    703	return 0;
    704}
    705
    706static int
    707vhost_scsi_mapal(struct vhost_scsi_cmd *cmd,
    708		 size_t prot_bytes, struct iov_iter *prot_iter,
    709		 size_t data_bytes, struct iov_iter *data_iter)
    710{
    711	int sgl_count, ret;
    712	bool write = (cmd->tvc_data_direction == DMA_FROM_DEVICE);
    713
    714	if (prot_bytes) {
    715		sgl_count = vhost_scsi_calc_sgls(prot_iter, prot_bytes,
    716						 VHOST_SCSI_PREALLOC_PROT_SGLS);
    717		if (sgl_count < 0)
    718			return sgl_count;
    719
    720		sg_init_table(cmd->tvc_prot_sgl, sgl_count);
    721		cmd->tvc_prot_sgl_count = sgl_count;
    722		pr_debug("%s prot_sg %p prot_sgl_count %u\n", __func__,
    723			 cmd->tvc_prot_sgl, cmd->tvc_prot_sgl_count);
    724
    725		ret = vhost_scsi_iov_to_sgl(cmd, write, prot_iter,
    726					    cmd->tvc_prot_sgl,
    727					    cmd->tvc_prot_sgl_count);
    728		if (ret < 0) {
    729			cmd->tvc_prot_sgl_count = 0;
    730			return ret;
    731		}
    732	}
    733	sgl_count = vhost_scsi_calc_sgls(data_iter, data_bytes,
    734					 VHOST_SCSI_PREALLOC_SGLS);
    735	if (sgl_count < 0)
    736		return sgl_count;
    737
    738	sg_init_table(cmd->tvc_sgl, sgl_count);
    739	cmd->tvc_sgl_count = sgl_count;
    740	pr_debug("%s data_sg %p data_sgl_count %u\n", __func__,
    741		  cmd->tvc_sgl, cmd->tvc_sgl_count);
    742
    743	ret = vhost_scsi_iov_to_sgl(cmd, write, data_iter,
    744				    cmd->tvc_sgl, cmd->tvc_sgl_count);
    745	if (ret < 0) {
    746		cmd->tvc_sgl_count = 0;
    747		return ret;
    748	}
    749	return 0;
    750}
    751
    752static int vhost_scsi_to_tcm_attr(int attr)
    753{
    754	switch (attr) {
    755	case VIRTIO_SCSI_S_SIMPLE:
    756		return TCM_SIMPLE_TAG;
    757	case VIRTIO_SCSI_S_ORDERED:
    758		return TCM_ORDERED_TAG;
    759	case VIRTIO_SCSI_S_HEAD:
    760		return TCM_HEAD_TAG;
    761	case VIRTIO_SCSI_S_ACA:
    762		return TCM_ACA_TAG;
    763	default:
    764		break;
    765	}
    766	return TCM_SIMPLE_TAG;
    767}
    768
    769static void vhost_scsi_target_queue_cmd(struct vhost_scsi_cmd *cmd)
    770{
    771	struct se_cmd *se_cmd = &cmd->tvc_se_cmd;
    772	struct vhost_scsi_nexus *tv_nexus;
    773	struct scatterlist *sg_ptr, *sg_prot_ptr = NULL;
    774
    775	/* FIXME: BIDI operation */
    776	if (cmd->tvc_sgl_count) {
    777		sg_ptr = cmd->tvc_sgl;
    778
    779		if (cmd->tvc_prot_sgl_count)
    780			sg_prot_ptr = cmd->tvc_prot_sgl;
    781		else
    782			se_cmd->prot_pto = true;
    783	} else {
    784		sg_ptr = NULL;
    785	}
    786	tv_nexus = cmd->tvc_nexus;
    787
    788	se_cmd->tag = 0;
    789	target_init_cmd(se_cmd, tv_nexus->tvn_se_sess, &cmd->tvc_sense_buf[0],
    790			cmd->tvc_lun, cmd->tvc_exp_data_len,
    791			vhost_scsi_to_tcm_attr(cmd->tvc_task_attr),
    792			cmd->tvc_data_direction, TARGET_SCF_ACK_KREF);
    793
    794	if (target_submit_prep(se_cmd, cmd->tvc_cdb, sg_ptr,
    795			       cmd->tvc_sgl_count, NULL, 0, sg_prot_ptr,
    796			       cmd->tvc_prot_sgl_count, GFP_KERNEL))
    797		return;
    798
    799	target_queue_submission(se_cmd);
    800}
    801
    802static void
    803vhost_scsi_send_bad_target(struct vhost_scsi *vs,
    804			   struct vhost_virtqueue *vq,
    805			   int head, unsigned out)
    806{
    807	struct virtio_scsi_cmd_resp __user *resp;
    808	struct virtio_scsi_cmd_resp rsp;
    809	int ret;
    810
    811	memset(&rsp, 0, sizeof(rsp));
    812	rsp.response = VIRTIO_SCSI_S_BAD_TARGET;
    813	resp = vq->iov[out].iov_base;
    814	ret = __copy_to_user(resp, &rsp, sizeof(rsp));
    815	if (!ret)
    816		vhost_add_used_and_signal(&vs->dev, vq, head, 0);
    817	else
    818		pr_err("Faulted on virtio_scsi_cmd_resp\n");
    819}
    820
    821static int
    822vhost_scsi_get_desc(struct vhost_scsi *vs, struct vhost_virtqueue *vq,
    823		    struct vhost_scsi_ctx *vc)
    824{
    825	int ret = -ENXIO;
    826
    827	vc->head = vhost_get_vq_desc(vq, vq->iov,
    828				     ARRAY_SIZE(vq->iov), &vc->out, &vc->in,
    829				     NULL, NULL);
    830
    831	pr_debug("vhost_get_vq_desc: head: %d, out: %u in: %u\n",
    832		 vc->head, vc->out, vc->in);
    833
    834	/* On error, stop handling until the next kick. */
    835	if (unlikely(vc->head < 0))
    836		goto done;
    837
    838	/* Nothing new?  Wait for eventfd to tell us they refilled. */
    839	if (vc->head == vq->num) {
    840		if (unlikely(vhost_enable_notify(&vs->dev, vq))) {
    841			vhost_disable_notify(&vs->dev, vq);
    842			ret = -EAGAIN;
    843		}
    844		goto done;
    845	}
    846
    847	/*
    848	 * Get the size of request and response buffers.
    849	 * FIXME: Not correct for BIDI operation
    850	 */
    851	vc->out_size = iov_length(vq->iov, vc->out);
    852	vc->in_size = iov_length(&vq->iov[vc->out], vc->in);
    853
    854	/*
    855	 * Copy over the virtio-scsi request header, which for a
    856	 * ANY_LAYOUT enabled guest may span multiple iovecs, or a
    857	 * single iovec may contain both the header + outgoing
    858	 * WRITE payloads.
    859	 *
    860	 * copy_from_iter() will advance out_iter, so that it will
    861	 * point at the start of the outgoing WRITE payload, if
    862	 * DMA_TO_DEVICE is set.
    863	 */
    864	iov_iter_init(&vc->out_iter, WRITE, vq->iov, vc->out, vc->out_size);
    865	ret = 0;
    866
    867done:
    868	return ret;
    869}
    870
    871static int
    872vhost_scsi_chk_size(struct vhost_virtqueue *vq, struct vhost_scsi_ctx *vc)
    873{
    874	if (unlikely(vc->in_size < vc->rsp_size)) {
    875		vq_err(vq,
    876		       "Response buf too small, need min %zu bytes got %zu",
    877		       vc->rsp_size, vc->in_size);
    878		return -EINVAL;
    879	} else if (unlikely(vc->out_size < vc->req_size)) {
    880		vq_err(vq,
    881		       "Request buf too small, need min %zu bytes got %zu",
    882		       vc->req_size, vc->out_size);
    883		return -EIO;
    884	}
    885
    886	return 0;
    887}
    888
    889static int
    890vhost_scsi_get_req(struct vhost_virtqueue *vq, struct vhost_scsi_ctx *vc,
    891		   struct vhost_scsi_tpg **tpgp)
    892{
    893	int ret = -EIO;
    894
    895	if (unlikely(!copy_from_iter_full(vc->req, vc->req_size,
    896					  &vc->out_iter))) {
    897		vq_err(vq, "Faulted on copy_from_iter_full\n");
    898	} else if (unlikely(*vc->lunp != 1)) {
    899		/* virtio-scsi spec requires byte 0 of the lun to be 1 */
    900		vq_err(vq, "Illegal virtio-scsi lun: %u\n", *vc->lunp);
    901	} else {
    902		struct vhost_scsi_tpg **vs_tpg, *tpg;
    903
    904		vs_tpg = vhost_vq_get_backend(vq);	/* validated at handler entry */
    905
    906		tpg = READ_ONCE(vs_tpg[*vc->target]);
    907		if (unlikely(!tpg)) {
    908			vq_err(vq, "Target 0x%x does not exist\n", *vc->target);
    909		} else {
    910			if (tpgp)
    911				*tpgp = tpg;
    912			ret = 0;
    913		}
    914	}
    915
    916	return ret;
    917}
    918
    919static u16 vhost_buf_to_lun(u8 *lun_buf)
    920{
    921	return ((lun_buf[2] << 8) | lun_buf[3]) & 0x3FFF;
    922}
    923
    924static void
    925vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
    926{
    927	struct vhost_scsi_tpg **vs_tpg, *tpg;
    928	struct virtio_scsi_cmd_req v_req;
    929	struct virtio_scsi_cmd_req_pi v_req_pi;
    930	struct vhost_scsi_ctx vc;
    931	struct vhost_scsi_cmd *cmd;
    932	struct iov_iter in_iter, prot_iter, data_iter;
    933	u64 tag;
    934	u32 exp_data_len, data_direction;
    935	int ret, prot_bytes, c = 0;
    936	u16 lun;
    937	u8 task_attr;
    938	bool t10_pi = vhost_has_feature(vq, VIRTIO_SCSI_F_T10_PI);
    939	void *cdb;
    940
    941	mutex_lock(&vq->mutex);
    942	/*
    943	 * We can handle the vq only after the endpoint is setup by calling the
    944	 * VHOST_SCSI_SET_ENDPOINT ioctl.
    945	 */
    946	vs_tpg = vhost_vq_get_backend(vq);
    947	if (!vs_tpg)
    948		goto out;
    949
    950	memset(&vc, 0, sizeof(vc));
    951	vc.rsp_size = sizeof(struct virtio_scsi_cmd_resp);
    952
    953	vhost_disable_notify(&vs->dev, vq);
    954
    955	do {
    956		ret = vhost_scsi_get_desc(vs, vq, &vc);
    957		if (ret)
    958			goto err;
    959
    960		/*
    961		 * Setup pointers and values based upon different virtio-scsi
    962		 * request header if T10_PI is enabled in KVM guest.
    963		 */
    964		if (t10_pi) {
    965			vc.req = &v_req_pi;
    966			vc.req_size = sizeof(v_req_pi);
    967			vc.lunp = &v_req_pi.lun[0];
    968			vc.target = &v_req_pi.lun[1];
    969		} else {
    970			vc.req = &v_req;
    971			vc.req_size = sizeof(v_req);
    972			vc.lunp = &v_req.lun[0];
    973			vc.target = &v_req.lun[1];
    974		}
    975
    976		/*
    977		 * Validate the size of request and response buffers.
    978		 * Check for a sane response buffer so we can report
    979		 * early errors back to the guest.
    980		 */
    981		ret = vhost_scsi_chk_size(vq, &vc);
    982		if (ret)
    983			goto err;
    984
    985		ret = vhost_scsi_get_req(vq, &vc, &tpg);
    986		if (ret)
    987			goto err;
    988
    989		ret = -EIO;	/* bad target on any error from here on */
    990
    991		/*
    992		 * Determine data_direction by calculating the total outgoing
    993		 * iovec sizes + incoming iovec sizes vs. virtio-scsi request +
    994		 * response headers respectively.
    995		 *
    996		 * For DMA_TO_DEVICE this is out_iter, which is already pointing
    997		 * to the right place.
    998		 *
    999		 * For DMA_FROM_DEVICE, the iovec will be just past the end
   1000		 * of the virtio-scsi response header in either the same
   1001		 * or immediately following iovec.
   1002		 *
   1003		 * Any associated T10_PI bytes for the outgoing / incoming
   1004		 * payloads are included in calculation of exp_data_len here.
   1005		 */
   1006		prot_bytes = 0;
   1007
   1008		if (vc.out_size > vc.req_size) {
   1009			data_direction = DMA_TO_DEVICE;
   1010			exp_data_len = vc.out_size - vc.req_size;
   1011			data_iter = vc.out_iter;
   1012		} else if (vc.in_size > vc.rsp_size) {
   1013			data_direction = DMA_FROM_DEVICE;
   1014			exp_data_len = vc.in_size - vc.rsp_size;
   1015
   1016			iov_iter_init(&in_iter, READ, &vq->iov[vc.out], vc.in,
   1017				      vc.rsp_size + exp_data_len);
   1018			iov_iter_advance(&in_iter, vc.rsp_size);
   1019			data_iter = in_iter;
   1020		} else {
   1021			data_direction = DMA_NONE;
   1022			exp_data_len = 0;
   1023		}
   1024		/*
   1025		 * If T10_PI header + payload is present, setup prot_iter values
   1026		 * and recalculate data_iter for vhost_scsi_mapal() mapping to
   1027		 * host scatterlists via get_user_pages_fast().
   1028		 */
   1029		if (t10_pi) {
   1030			if (v_req_pi.pi_bytesout) {
   1031				if (data_direction != DMA_TO_DEVICE) {
   1032					vq_err(vq, "Received non zero pi_bytesout,"
   1033						" but wrong data_direction\n");
   1034					goto err;
   1035				}
   1036				prot_bytes = vhost32_to_cpu(vq, v_req_pi.pi_bytesout);
   1037			} else if (v_req_pi.pi_bytesin) {
   1038				if (data_direction != DMA_FROM_DEVICE) {
   1039					vq_err(vq, "Received non zero pi_bytesin,"
   1040						" but wrong data_direction\n");
   1041					goto err;
   1042				}
   1043				prot_bytes = vhost32_to_cpu(vq, v_req_pi.pi_bytesin);
   1044			}
   1045			/*
   1046			 * Set prot_iter to data_iter and truncate it to
   1047			 * prot_bytes, and advance data_iter past any
   1048			 * preceeding prot_bytes that may be present.
   1049			 *
   1050			 * Also fix up the exp_data_len to reflect only the
   1051			 * actual data payload length.
   1052			 */
   1053			if (prot_bytes) {
   1054				exp_data_len -= prot_bytes;
   1055				prot_iter = data_iter;
   1056				iov_iter_truncate(&prot_iter, prot_bytes);
   1057				iov_iter_advance(&data_iter, prot_bytes);
   1058			}
   1059			tag = vhost64_to_cpu(vq, v_req_pi.tag);
   1060			task_attr = v_req_pi.task_attr;
   1061			cdb = &v_req_pi.cdb[0];
   1062			lun = vhost_buf_to_lun(v_req_pi.lun);
   1063		} else {
   1064			tag = vhost64_to_cpu(vq, v_req.tag);
   1065			task_attr = v_req.task_attr;
   1066			cdb = &v_req.cdb[0];
   1067			lun = vhost_buf_to_lun(v_req.lun);
   1068		}
   1069		/*
   1070		 * Check that the received CDB size does not exceeded our
   1071		 * hardcoded max for vhost-scsi, then get a pre-allocated
   1072		 * cmd descriptor for the new virtio-scsi tag.
   1073		 *
   1074		 * TODO what if cdb was too small for varlen cdb header?
   1075		 */
   1076		if (unlikely(scsi_command_size(cdb) > VHOST_SCSI_MAX_CDB_SIZE)) {
   1077			vq_err(vq, "Received SCSI CDB with command_size: %d that"
   1078				" exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n",
   1079				scsi_command_size(cdb), VHOST_SCSI_MAX_CDB_SIZE);
   1080				goto err;
   1081		}
   1082		cmd = vhost_scsi_get_cmd(vq, tpg, cdb, tag, lun, task_attr,
   1083					 exp_data_len + prot_bytes,
   1084					 data_direction);
   1085		if (IS_ERR(cmd)) {
   1086			vq_err(vq, "vhost_scsi_get_cmd failed %ld\n",
   1087			       PTR_ERR(cmd));
   1088			goto err;
   1089		}
   1090		cmd->tvc_vhost = vs;
   1091		cmd->tvc_vq = vq;
   1092		cmd->tvc_resp_iov = vq->iov[vc.out];
   1093		cmd->tvc_in_iovs = vc.in;
   1094
   1095		pr_debug("vhost_scsi got command opcode: %#02x, lun: %d\n",
   1096			 cmd->tvc_cdb[0], cmd->tvc_lun);
   1097		pr_debug("cmd: %p exp_data_len: %d, prot_bytes: %d data_direction:"
   1098			 " %d\n", cmd, exp_data_len, prot_bytes, data_direction);
   1099
   1100		if (data_direction != DMA_NONE) {
   1101			if (unlikely(vhost_scsi_mapal(cmd, prot_bytes,
   1102						      &prot_iter, exp_data_len,
   1103						      &data_iter))) {
   1104				vq_err(vq, "Failed to map iov to sgl\n");
   1105				vhost_scsi_release_cmd_res(&cmd->tvc_se_cmd);
   1106				goto err;
   1107			}
   1108		}
   1109		/*
   1110		 * Save the descriptor from vhost_get_vq_desc() to be used to
   1111		 * complete the virtio-scsi request in TCM callback context via
   1112		 * vhost_scsi_queue_data_in() and vhost_scsi_queue_status()
   1113		 */
   1114		cmd->tvc_vq_desc = vc.head;
   1115		vhost_scsi_target_queue_cmd(cmd);
   1116		ret = 0;
   1117err:
   1118		/*
   1119		 * ENXIO:  No more requests, or read error, wait for next kick
   1120		 * EINVAL: Invalid response buffer, drop the request
   1121		 * EIO:    Respond with bad target
   1122		 * EAGAIN: Pending request
   1123		 */
   1124		if (ret == -ENXIO)
   1125			break;
   1126		else if (ret == -EIO)
   1127			vhost_scsi_send_bad_target(vs, vq, vc.head, vc.out);
   1128	} while (likely(!vhost_exceeds_weight(vq, ++c, 0)));
   1129out:
   1130	mutex_unlock(&vq->mutex);
   1131}
   1132
   1133static void
   1134vhost_scsi_send_tmf_resp(struct vhost_scsi *vs, struct vhost_virtqueue *vq,
   1135			 int in_iovs, int vq_desc, struct iovec *resp_iov,
   1136			 int tmf_resp_code)
   1137{
   1138	struct virtio_scsi_ctrl_tmf_resp rsp;
   1139	struct iov_iter iov_iter;
   1140	int ret;
   1141
   1142	pr_debug("%s\n", __func__);
   1143	memset(&rsp, 0, sizeof(rsp));
   1144	rsp.response = tmf_resp_code;
   1145
   1146	iov_iter_init(&iov_iter, READ, resp_iov, in_iovs, sizeof(rsp));
   1147
   1148	ret = copy_to_iter(&rsp, sizeof(rsp), &iov_iter);
   1149	if (likely(ret == sizeof(rsp)))
   1150		vhost_add_used_and_signal(&vs->dev, vq, vq_desc, 0);
   1151	else
   1152		pr_err("Faulted on virtio_scsi_ctrl_tmf_resp\n");
   1153}
   1154
   1155static void vhost_scsi_tmf_resp_work(struct vhost_work *work)
   1156{
   1157	struct vhost_scsi_tmf *tmf = container_of(work, struct vhost_scsi_tmf,
   1158						  vwork);
   1159	int resp_code;
   1160
   1161	if (tmf->scsi_resp == TMR_FUNCTION_COMPLETE)
   1162		resp_code = VIRTIO_SCSI_S_FUNCTION_SUCCEEDED;
   1163	else
   1164		resp_code = VIRTIO_SCSI_S_FUNCTION_REJECTED;
   1165
   1166	vhost_scsi_send_tmf_resp(tmf->vhost, &tmf->svq->vq, tmf->in_iovs,
   1167				 tmf->vq_desc, &tmf->resp_iov, resp_code);
   1168	vhost_scsi_release_tmf_res(tmf);
   1169}
   1170
   1171static void
   1172vhost_scsi_handle_tmf(struct vhost_scsi *vs, struct vhost_scsi_tpg *tpg,
   1173		      struct vhost_virtqueue *vq,
   1174		      struct virtio_scsi_ctrl_tmf_req *vtmf,
   1175		      struct vhost_scsi_ctx *vc)
   1176{
   1177	struct vhost_scsi_virtqueue *svq = container_of(vq,
   1178					struct vhost_scsi_virtqueue, vq);
   1179	struct vhost_scsi_tmf *tmf;
   1180
   1181	if (vhost32_to_cpu(vq, vtmf->subtype) !=
   1182	    VIRTIO_SCSI_T_TMF_LOGICAL_UNIT_RESET)
   1183		goto send_reject;
   1184
   1185	if (!tpg->tpg_nexus || !tpg->tpg_nexus->tvn_se_sess) {
   1186		pr_err("Unable to locate active struct vhost_scsi_nexus for LUN RESET.\n");
   1187		goto send_reject;
   1188	}
   1189
   1190	mutex_lock(&tpg->tv_tpg_mutex);
   1191	if (list_empty(&tpg->tmf_queue)) {
   1192		pr_err("Missing reserve TMF. Could not handle LUN RESET.\n");
   1193		mutex_unlock(&tpg->tv_tpg_mutex);
   1194		goto send_reject;
   1195	}
   1196
   1197	tmf = list_first_entry(&tpg->tmf_queue, struct vhost_scsi_tmf,
   1198			       queue_entry);
   1199	list_del_init(&tmf->queue_entry);
   1200	mutex_unlock(&tpg->tv_tpg_mutex);
   1201
   1202	tmf->tpg = tpg;
   1203	tmf->vhost = vs;
   1204	tmf->svq = svq;
   1205	tmf->resp_iov = vq->iov[vc->out];
   1206	tmf->vq_desc = vc->head;
   1207	tmf->in_iovs = vc->in;
   1208	tmf->inflight = vhost_scsi_get_inflight(vq);
   1209
   1210	if (target_submit_tmr(&tmf->se_cmd, tpg->tpg_nexus->tvn_se_sess, NULL,
   1211			      vhost_buf_to_lun(vtmf->lun), NULL,
   1212			      TMR_LUN_RESET, GFP_KERNEL, 0,
   1213			      TARGET_SCF_ACK_KREF) < 0) {
   1214		vhost_scsi_release_tmf_res(tmf);
   1215		goto send_reject;
   1216	}
   1217
   1218	return;
   1219
   1220send_reject:
   1221	vhost_scsi_send_tmf_resp(vs, vq, vc->in, vc->head, &vq->iov[vc->out],
   1222				 VIRTIO_SCSI_S_FUNCTION_REJECTED);
   1223}
   1224
   1225static void
   1226vhost_scsi_send_an_resp(struct vhost_scsi *vs,
   1227			struct vhost_virtqueue *vq,
   1228			struct vhost_scsi_ctx *vc)
   1229{
   1230	struct virtio_scsi_ctrl_an_resp rsp;
   1231	struct iov_iter iov_iter;
   1232	int ret;
   1233
   1234	pr_debug("%s\n", __func__);
   1235	memset(&rsp, 0, sizeof(rsp));	/* event_actual = 0 */
   1236	rsp.response = VIRTIO_SCSI_S_OK;
   1237
   1238	iov_iter_init(&iov_iter, READ, &vq->iov[vc->out], vc->in, sizeof(rsp));
   1239
   1240	ret = copy_to_iter(&rsp, sizeof(rsp), &iov_iter);
   1241	if (likely(ret == sizeof(rsp)))
   1242		vhost_add_used_and_signal(&vs->dev, vq, vc->head, 0);
   1243	else
   1244		pr_err("Faulted on virtio_scsi_ctrl_an_resp\n");
   1245}
   1246
   1247static void
   1248vhost_scsi_ctl_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
   1249{
   1250	struct vhost_scsi_tpg *tpg;
   1251	union {
   1252		__virtio32 type;
   1253		struct virtio_scsi_ctrl_an_req an;
   1254		struct virtio_scsi_ctrl_tmf_req tmf;
   1255	} v_req;
   1256	struct vhost_scsi_ctx vc;
   1257	size_t typ_size;
   1258	int ret, c = 0;
   1259
   1260	mutex_lock(&vq->mutex);
   1261	/*
   1262	 * We can handle the vq only after the endpoint is setup by calling the
   1263	 * VHOST_SCSI_SET_ENDPOINT ioctl.
   1264	 */
   1265	if (!vhost_vq_get_backend(vq))
   1266		goto out;
   1267
   1268	memset(&vc, 0, sizeof(vc));
   1269
   1270	vhost_disable_notify(&vs->dev, vq);
   1271
   1272	do {
   1273		ret = vhost_scsi_get_desc(vs, vq, &vc);
   1274		if (ret)
   1275			goto err;
   1276
   1277		/*
   1278		 * Get the request type first in order to setup
   1279		 * other parameters dependent on the type.
   1280		 */
   1281		vc.req = &v_req.type;
   1282		typ_size = sizeof(v_req.type);
   1283
   1284		if (unlikely(!copy_from_iter_full(vc.req, typ_size,
   1285						  &vc.out_iter))) {
   1286			vq_err(vq, "Faulted on copy_from_iter tmf type\n");
   1287			/*
   1288			 * The size of the response buffer depends on the
   1289			 * request type and must be validated against it.
   1290			 * Since the request type is not known, don't send
   1291			 * a response.
   1292			 */
   1293			continue;
   1294		}
   1295
   1296		switch (vhost32_to_cpu(vq, v_req.type)) {
   1297		case VIRTIO_SCSI_T_TMF:
   1298			vc.req = &v_req.tmf;
   1299			vc.req_size = sizeof(struct virtio_scsi_ctrl_tmf_req);
   1300			vc.rsp_size = sizeof(struct virtio_scsi_ctrl_tmf_resp);
   1301			vc.lunp = &v_req.tmf.lun[0];
   1302			vc.target = &v_req.tmf.lun[1];
   1303			break;
   1304		case VIRTIO_SCSI_T_AN_QUERY:
   1305		case VIRTIO_SCSI_T_AN_SUBSCRIBE:
   1306			vc.req = &v_req.an;
   1307			vc.req_size = sizeof(struct virtio_scsi_ctrl_an_req);
   1308			vc.rsp_size = sizeof(struct virtio_scsi_ctrl_an_resp);
   1309			vc.lunp = &v_req.an.lun[0];
   1310			vc.target = NULL;
   1311			break;
   1312		default:
   1313			vq_err(vq, "Unknown control request %d", v_req.type);
   1314			continue;
   1315		}
   1316
   1317		/*
   1318		 * Validate the size of request and response buffers.
   1319		 * Check for a sane response buffer so we can report
   1320		 * early errors back to the guest.
   1321		 */
   1322		ret = vhost_scsi_chk_size(vq, &vc);
   1323		if (ret)
   1324			goto err;
   1325
   1326		/*
   1327		 * Get the rest of the request now that its size is known.
   1328		 */
   1329		vc.req += typ_size;
   1330		vc.req_size -= typ_size;
   1331
   1332		ret = vhost_scsi_get_req(vq, &vc, &tpg);
   1333		if (ret)
   1334			goto err;
   1335
   1336		if (v_req.type == VIRTIO_SCSI_T_TMF)
   1337			vhost_scsi_handle_tmf(vs, tpg, vq, &v_req.tmf, &vc);
   1338		else
   1339			vhost_scsi_send_an_resp(vs, vq, &vc);
   1340err:
   1341		/*
   1342		 * ENXIO:  No more requests, or read error, wait for next kick
   1343		 * EINVAL: Invalid response buffer, drop the request
   1344		 * EIO:    Respond with bad target
   1345		 * EAGAIN: Pending request
   1346		 */
   1347		if (ret == -ENXIO)
   1348			break;
   1349		else if (ret == -EIO)
   1350			vhost_scsi_send_bad_target(vs, vq, vc.head, vc.out);
   1351	} while (likely(!vhost_exceeds_weight(vq, ++c, 0)));
   1352out:
   1353	mutex_unlock(&vq->mutex);
   1354}
   1355
   1356static void vhost_scsi_ctl_handle_kick(struct vhost_work *work)
   1357{
   1358	struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
   1359						poll.work);
   1360	struct vhost_scsi *vs = container_of(vq->dev, struct vhost_scsi, dev);
   1361
   1362	pr_debug("%s: The handling func for control queue.\n", __func__);
   1363	vhost_scsi_ctl_handle_vq(vs, vq);
   1364}
   1365
   1366static void
   1367vhost_scsi_send_evt(struct vhost_scsi *vs,
   1368		   struct vhost_scsi_tpg *tpg,
   1369		   struct se_lun *lun,
   1370		   u32 event,
   1371		   u32 reason)
   1372{
   1373	struct vhost_scsi_evt *evt;
   1374
   1375	evt = vhost_scsi_allocate_evt(vs, event, reason);
   1376	if (!evt)
   1377		return;
   1378
   1379	if (tpg && lun) {
   1380		/* TODO: share lun setup code with virtio-scsi.ko */
   1381		/*
   1382		 * Note: evt->event is zeroed when we allocate it and
   1383		 * lun[4-7] need to be zero according to virtio-scsi spec.
   1384		 */
   1385		evt->event.lun[0] = 0x01;
   1386		evt->event.lun[1] = tpg->tport_tpgt;
   1387		if (lun->unpacked_lun >= 256)
   1388			evt->event.lun[2] = lun->unpacked_lun >> 8 | 0x40 ;
   1389		evt->event.lun[3] = lun->unpacked_lun & 0xFF;
   1390	}
   1391
   1392	llist_add(&evt->list, &vs->vs_event_list);
   1393	vhost_work_queue(&vs->dev, &vs->vs_event_work);
   1394}
   1395
   1396static void vhost_scsi_evt_handle_kick(struct vhost_work *work)
   1397{
   1398	struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
   1399						poll.work);
   1400	struct vhost_scsi *vs = container_of(vq->dev, struct vhost_scsi, dev);
   1401
   1402	mutex_lock(&vq->mutex);
   1403	if (!vhost_vq_get_backend(vq))
   1404		goto out;
   1405
   1406	if (vs->vs_events_missed)
   1407		vhost_scsi_send_evt(vs, NULL, NULL, VIRTIO_SCSI_T_NO_EVENT, 0);
   1408out:
   1409	mutex_unlock(&vq->mutex);
   1410}
   1411
   1412static void vhost_scsi_handle_kick(struct vhost_work *work)
   1413{
   1414	struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
   1415						poll.work);
   1416	struct vhost_scsi *vs = container_of(vq->dev, struct vhost_scsi, dev);
   1417
   1418	vhost_scsi_handle_vq(vs, vq);
   1419}
   1420
   1421/* Callers must hold dev mutex */
   1422static void vhost_scsi_flush(struct vhost_scsi *vs)
   1423{
   1424	struct vhost_scsi_inflight *old_inflight[VHOST_SCSI_MAX_VQ];
   1425	int i;
   1426
   1427	/* Init new inflight and remember the old inflight */
   1428	vhost_scsi_init_inflight(vs, old_inflight);
   1429
   1430	/*
   1431	 * The inflight->kref was initialized to 1. We decrement it here to
   1432	 * indicate the start of the flush operation so that it will reach 0
   1433	 * when all the reqs are finished.
   1434	 */
   1435	for (i = 0; i < VHOST_SCSI_MAX_VQ; i++)
   1436		kref_put(&old_inflight[i]->kref, vhost_scsi_done_inflight);
   1437
   1438	/* Flush both the vhost poll and vhost work */
   1439	vhost_dev_flush(&vs->dev);
   1440
   1441	/* Wait for all reqs issued before the flush to be finished */
   1442	for (i = 0; i < VHOST_SCSI_MAX_VQ; i++)
   1443		wait_for_completion(&old_inflight[i]->comp);
   1444}
   1445
   1446static void vhost_scsi_destroy_vq_cmds(struct vhost_virtqueue *vq)
   1447{
   1448	struct vhost_scsi_virtqueue *svq = container_of(vq,
   1449					struct vhost_scsi_virtqueue, vq);
   1450	struct vhost_scsi_cmd *tv_cmd;
   1451	unsigned int i;
   1452
   1453	if (!svq->scsi_cmds)
   1454		return;
   1455
   1456	for (i = 0; i < svq->max_cmds; i++) {
   1457		tv_cmd = &svq->scsi_cmds[i];
   1458
   1459		kfree(tv_cmd->tvc_sgl);
   1460		kfree(tv_cmd->tvc_prot_sgl);
   1461		kfree(tv_cmd->tvc_upages);
   1462	}
   1463
   1464	sbitmap_free(&svq->scsi_tags);
   1465	kfree(svq->scsi_cmds);
   1466	svq->scsi_cmds = NULL;
   1467}
   1468
   1469static int vhost_scsi_setup_vq_cmds(struct vhost_virtqueue *vq, int max_cmds)
   1470{
   1471	struct vhost_scsi_virtqueue *svq = container_of(vq,
   1472					struct vhost_scsi_virtqueue, vq);
   1473	struct vhost_scsi_cmd *tv_cmd;
   1474	unsigned int i;
   1475
   1476	if (svq->scsi_cmds)
   1477		return 0;
   1478
   1479	if (sbitmap_init_node(&svq->scsi_tags, max_cmds, -1, GFP_KERNEL,
   1480			      NUMA_NO_NODE, false, true))
   1481		return -ENOMEM;
   1482	svq->max_cmds = max_cmds;
   1483
   1484	svq->scsi_cmds = kcalloc(max_cmds, sizeof(*tv_cmd), GFP_KERNEL);
   1485	if (!svq->scsi_cmds) {
   1486		sbitmap_free(&svq->scsi_tags);
   1487		return -ENOMEM;
   1488	}
   1489
   1490	for (i = 0; i < max_cmds; i++) {
   1491		tv_cmd = &svq->scsi_cmds[i];
   1492
   1493		tv_cmd->tvc_sgl = kcalloc(VHOST_SCSI_PREALLOC_SGLS,
   1494					  sizeof(struct scatterlist),
   1495					  GFP_KERNEL);
   1496		if (!tv_cmd->tvc_sgl) {
   1497			pr_err("Unable to allocate tv_cmd->tvc_sgl\n");
   1498			goto out;
   1499		}
   1500
   1501		tv_cmd->tvc_upages = kcalloc(VHOST_SCSI_PREALLOC_UPAGES,
   1502					     sizeof(struct page *),
   1503					     GFP_KERNEL);
   1504		if (!tv_cmd->tvc_upages) {
   1505			pr_err("Unable to allocate tv_cmd->tvc_upages\n");
   1506			goto out;
   1507		}
   1508
   1509		tv_cmd->tvc_prot_sgl = kcalloc(VHOST_SCSI_PREALLOC_PROT_SGLS,
   1510					       sizeof(struct scatterlist),
   1511					       GFP_KERNEL);
   1512		if (!tv_cmd->tvc_prot_sgl) {
   1513			pr_err("Unable to allocate tv_cmd->tvc_prot_sgl\n");
   1514			goto out;
   1515		}
   1516	}
   1517	return 0;
   1518out:
   1519	vhost_scsi_destroy_vq_cmds(vq);
   1520	return -ENOMEM;
   1521}
   1522
   1523/*
   1524 * Called from vhost_scsi_ioctl() context to walk the list of available
   1525 * vhost_scsi_tpg with an active struct vhost_scsi_nexus
   1526 *
   1527 *  The lock nesting rule is:
   1528 *    vhost_scsi_mutex -> vs->dev.mutex -> tpg->tv_tpg_mutex -> vq->mutex
   1529 */
   1530static int
   1531vhost_scsi_set_endpoint(struct vhost_scsi *vs,
   1532			struct vhost_scsi_target *t)
   1533{
   1534	struct se_portal_group *se_tpg;
   1535	struct vhost_scsi_tport *tv_tport;
   1536	struct vhost_scsi_tpg *tpg;
   1537	struct vhost_scsi_tpg **vs_tpg;
   1538	struct vhost_virtqueue *vq;
   1539	int index, ret, i, len;
   1540	bool match = false;
   1541
   1542	mutex_lock(&vhost_scsi_mutex);
   1543	mutex_lock(&vs->dev.mutex);
   1544
   1545	/* Verify that ring has been setup correctly. */
   1546	for (index = 0; index < vs->dev.nvqs; ++index) {
   1547		/* Verify that ring has been setup correctly. */
   1548		if (!vhost_vq_access_ok(&vs->vqs[index].vq)) {
   1549			ret = -EFAULT;
   1550			goto out;
   1551		}
   1552	}
   1553
   1554	len = sizeof(vs_tpg[0]) * VHOST_SCSI_MAX_TARGET;
   1555	vs_tpg = kzalloc(len, GFP_KERNEL);
   1556	if (!vs_tpg) {
   1557		ret = -ENOMEM;
   1558		goto out;
   1559	}
   1560	if (vs->vs_tpg)
   1561		memcpy(vs_tpg, vs->vs_tpg, len);
   1562
   1563	list_for_each_entry(tpg, &vhost_scsi_list, tv_tpg_list) {
   1564		mutex_lock(&tpg->tv_tpg_mutex);
   1565		if (!tpg->tpg_nexus) {
   1566			mutex_unlock(&tpg->tv_tpg_mutex);
   1567			continue;
   1568		}
   1569		if (tpg->tv_tpg_vhost_count != 0) {
   1570			mutex_unlock(&tpg->tv_tpg_mutex);
   1571			continue;
   1572		}
   1573		tv_tport = tpg->tport;
   1574
   1575		if (!strcmp(tv_tport->tport_name, t->vhost_wwpn)) {
   1576			if (vs->vs_tpg && vs->vs_tpg[tpg->tport_tpgt]) {
   1577				mutex_unlock(&tpg->tv_tpg_mutex);
   1578				ret = -EEXIST;
   1579				goto undepend;
   1580			}
   1581			/*
   1582			 * In order to ensure individual vhost-scsi configfs
   1583			 * groups cannot be removed while in use by vhost ioctl,
   1584			 * go ahead and take an explicit se_tpg->tpg_group.cg_item
   1585			 * dependency now.
   1586			 */
   1587			se_tpg = &tpg->se_tpg;
   1588			ret = target_depend_item(&se_tpg->tpg_group.cg_item);
   1589			if (ret) {
   1590				pr_warn("target_depend_item() failed: %d\n", ret);
   1591				mutex_unlock(&tpg->tv_tpg_mutex);
   1592				goto undepend;
   1593			}
   1594			tpg->tv_tpg_vhost_count++;
   1595			tpg->vhost_scsi = vs;
   1596			vs_tpg[tpg->tport_tpgt] = tpg;
   1597			match = true;
   1598		}
   1599		mutex_unlock(&tpg->tv_tpg_mutex);
   1600	}
   1601
   1602	if (match) {
   1603		memcpy(vs->vs_vhost_wwpn, t->vhost_wwpn,
   1604		       sizeof(vs->vs_vhost_wwpn));
   1605
   1606		for (i = VHOST_SCSI_VQ_IO; i < VHOST_SCSI_MAX_VQ; i++) {
   1607			vq = &vs->vqs[i].vq;
   1608			if (!vhost_vq_is_setup(vq))
   1609				continue;
   1610
   1611			ret = vhost_scsi_setup_vq_cmds(vq, vq->num);
   1612			if (ret)
   1613				goto destroy_vq_cmds;
   1614		}
   1615
   1616		for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
   1617			vq = &vs->vqs[i].vq;
   1618			mutex_lock(&vq->mutex);
   1619			vhost_vq_set_backend(vq, vs_tpg);
   1620			vhost_vq_init_access(vq);
   1621			mutex_unlock(&vq->mutex);
   1622		}
   1623		ret = 0;
   1624	} else {
   1625		ret = -EEXIST;
   1626	}
   1627
   1628	/*
   1629	 * Act as synchronize_rcu to make sure access to
   1630	 * old vs->vs_tpg is finished.
   1631	 */
   1632	vhost_scsi_flush(vs);
   1633	kfree(vs->vs_tpg);
   1634	vs->vs_tpg = vs_tpg;
   1635	goto out;
   1636
   1637destroy_vq_cmds:
   1638	for (i--; i >= VHOST_SCSI_VQ_IO; i--) {
   1639		if (!vhost_vq_get_backend(&vs->vqs[i].vq))
   1640			vhost_scsi_destroy_vq_cmds(&vs->vqs[i].vq);
   1641	}
   1642undepend:
   1643	for (i = 0; i < VHOST_SCSI_MAX_TARGET; i++) {
   1644		tpg = vs_tpg[i];
   1645		if (tpg) {
   1646			tpg->tv_tpg_vhost_count--;
   1647			target_undepend_item(&tpg->se_tpg.tpg_group.cg_item);
   1648		}
   1649	}
   1650	kfree(vs_tpg);
   1651out:
   1652	mutex_unlock(&vs->dev.mutex);
   1653	mutex_unlock(&vhost_scsi_mutex);
   1654	return ret;
   1655}
   1656
   1657static int
   1658vhost_scsi_clear_endpoint(struct vhost_scsi *vs,
   1659			  struct vhost_scsi_target *t)
   1660{
   1661	struct se_portal_group *se_tpg;
   1662	struct vhost_scsi_tport *tv_tport;
   1663	struct vhost_scsi_tpg *tpg;
   1664	struct vhost_virtqueue *vq;
   1665	bool match = false;
   1666	int index, ret, i;
   1667	u8 target;
   1668
   1669	mutex_lock(&vhost_scsi_mutex);
   1670	mutex_lock(&vs->dev.mutex);
   1671	/* Verify that ring has been setup correctly. */
   1672	for (index = 0; index < vs->dev.nvqs; ++index) {
   1673		if (!vhost_vq_access_ok(&vs->vqs[index].vq)) {
   1674			ret = -EFAULT;
   1675			goto err_dev;
   1676		}
   1677	}
   1678
   1679	if (!vs->vs_tpg) {
   1680		ret = 0;
   1681		goto err_dev;
   1682	}
   1683
   1684	for (i = 0; i < VHOST_SCSI_MAX_TARGET; i++) {
   1685		target = i;
   1686		tpg = vs->vs_tpg[target];
   1687		if (!tpg)
   1688			continue;
   1689
   1690		mutex_lock(&tpg->tv_tpg_mutex);
   1691		tv_tport = tpg->tport;
   1692		if (!tv_tport) {
   1693			ret = -ENODEV;
   1694			goto err_tpg;
   1695		}
   1696
   1697		if (strcmp(tv_tport->tport_name, t->vhost_wwpn)) {
   1698			pr_warn("tv_tport->tport_name: %s, tpg->tport_tpgt: %hu"
   1699				" does not match t->vhost_wwpn: %s, t->vhost_tpgt: %hu\n",
   1700				tv_tport->tport_name, tpg->tport_tpgt,
   1701				t->vhost_wwpn, t->vhost_tpgt);
   1702			ret = -EINVAL;
   1703			goto err_tpg;
   1704		}
   1705		tpg->tv_tpg_vhost_count--;
   1706		tpg->vhost_scsi = NULL;
   1707		vs->vs_tpg[target] = NULL;
   1708		match = true;
   1709		mutex_unlock(&tpg->tv_tpg_mutex);
   1710		/*
   1711		 * Release se_tpg->tpg_group.cg_item configfs dependency now
   1712		 * to allow vhost-scsi WWPN se_tpg->tpg_group shutdown to occur.
   1713		 */
   1714		se_tpg = &tpg->se_tpg;
   1715		target_undepend_item(&se_tpg->tpg_group.cg_item);
   1716	}
   1717	if (match) {
   1718		for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
   1719			vq = &vs->vqs[i].vq;
   1720			mutex_lock(&vq->mutex);
   1721			vhost_vq_set_backend(vq, NULL);
   1722			mutex_unlock(&vq->mutex);
   1723		}
   1724		/* Make sure cmds are not running before tearing them down. */
   1725		vhost_scsi_flush(vs);
   1726
   1727		for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
   1728			vq = &vs->vqs[i].vq;
   1729			vhost_scsi_destroy_vq_cmds(vq);
   1730		}
   1731	}
   1732	/*
   1733	 * Act as synchronize_rcu to make sure access to
   1734	 * old vs->vs_tpg is finished.
   1735	 */
   1736	vhost_scsi_flush(vs);
   1737	kfree(vs->vs_tpg);
   1738	vs->vs_tpg = NULL;
   1739	WARN_ON(vs->vs_events_nr);
   1740	mutex_unlock(&vs->dev.mutex);
   1741	mutex_unlock(&vhost_scsi_mutex);
   1742	return 0;
   1743
   1744err_tpg:
   1745	mutex_unlock(&tpg->tv_tpg_mutex);
   1746err_dev:
   1747	mutex_unlock(&vs->dev.mutex);
   1748	mutex_unlock(&vhost_scsi_mutex);
   1749	return ret;
   1750}
   1751
   1752static int vhost_scsi_set_features(struct vhost_scsi *vs, u64 features)
   1753{
   1754	struct vhost_virtqueue *vq;
   1755	int i;
   1756
   1757	if (features & ~VHOST_SCSI_FEATURES)
   1758		return -EOPNOTSUPP;
   1759
   1760	mutex_lock(&vs->dev.mutex);
   1761	if ((features & (1 << VHOST_F_LOG_ALL)) &&
   1762	    !vhost_log_access_ok(&vs->dev)) {
   1763		mutex_unlock(&vs->dev.mutex);
   1764		return -EFAULT;
   1765	}
   1766
   1767	for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
   1768		vq = &vs->vqs[i].vq;
   1769		mutex_lock(&vq->mutex);
   1770		vq->acked_features = features;
   1771		mutex_unlock(&vq->mutex);
   1772	}
   1773	mutex_unlock(&vs->dev.mutex);
   1774	return 0;
   1775}
   1776
   1777static int vhost_scsi_open(struct inode *inode, struct file *f)
   1778{
   1779	struct vhost_scsi *vs;
   1780	struct vhost_virtqueue **vqs;
   1781	int r = -ENOMEM, i;
   1782
   1783	vs = kvzalloc(sizeof(*vs), GFP_KERNEL);
   1784	if (!vs)
   1785		goto err_vs;
   1786
   1787	vqs = kmalloc_array(VHOST_SCSI_MAX_VQ, sizeof(*vqs), GFP_KERNEL);
   1788	if (!vqs)
   1789		goto err_vqs;
   1790
   1791	vhost_work_init(&vs->vs_completion_work, vhost_scsi_complete_cmd_work);
   1792	vhost_work_init(&vs->vs_event_work, vhost_scsi_evt_work);
   1793
   1794	vs->vs_events_nr = 0;
   1795	vs->vs_events_missed = false;
   1796
   1797	vqs[VHOST_SCSI_VQ_CTL] = &vs->vqs[VHOST_SCSI_VQ_CTL].vq;
   1798	vqs[VHOST_SCSI_VQ_EVT] = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
   1799	vs->vqs[VHOST_SCSI_VQ_CTL].vq.handle_kick = vhost_scsi_ctl_handle_kick;
   1800	vs->vqs[VHOST_SCSI_VQ_EVT].vq.handle_kick = vhost_scsi_evt_handle_kick;
   1801	for (i = VHOST_SCSI_VQ_IO; i < VHOST_SCSI_MAX_VQ; i++) {
   1802		vqs[i] = &vs->vqs[i].vq;
   1803		vs->vqs[i].vq.handle_kick = vhost_scsi_handle_kick;
   1804	}
   1805	vhost_dev_init(&vs->dev, vqs, VHOST_SCSI_MAX_VQ, UIO_MAXIOV,
   1806		       VHOST_SCSI_WEIGHT, 0, true, NULL);
   1807
   1808	vhost_scsi_init_inflight(vs, NULL);
   1809
   1810	f->private_data = vs;
   1811	return 0;
   1812
   1813err_vqs:
   1814	kvfree(vs);
   1815err_vs:
   1816	return r;
   1817}
   1818
   1819static int vhost_scsi_release(struct inode *inode, struct file *f)
   1820{
   1821	struct vhost_scsi *vs = f->private_data;
   1822	struct vhost_scsi_target t;
   1823
   1824	mutex_lock(&vs->dev.mutex);
   1825	memcpy(t.vhost_wwpn, vs->vs_vhost_wwpn, sizeof(t.vhost_wwpn));
   1826	mutex_unlock(&vs->dev.mutex);
   1827	vhost_scsi_clear_endpoint(vs, &t);
   1828	vhost_dev_stop(&vs->dev);
   1829	vhost_dev_cleanup(&vs->dev);
   1830	kfree(vs->dev.vqs);
   1831	kvfree(vs);
   1832	return 0;
   1833}
   1834
   1835static long
   1836vhost_scsi_ioctl(struct file *f,
   1837		 unsigned int ioctl,
   1838		 unsigned long arg)
   1839{
   1840	struct vhost_scsi *vs = f->private_data;
   1841	struct vhost_scsi_target backend;
   1842	void __user *argp = (void __user *)arg;
   1843	u64 __user *featurep = argp;
   1844	u32 __user *eventsp = argp;
   1845	u32 events_missed;
   1846	u64 features;
   1847	int r, abi_version = VHOST_SCSI_ABI_VERSION;
   1848	struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
   1849
   1850	switch (ioctl) {
   1851	case VHOST_SCSI_SET_ENDPOINT:
   1852		if (copy_from_user(&backend, argp, sizeof backend))
   1853			return -EFAULT;
   1854		if (backend.reserved != 0)
   1855			return -EOPNOTSUPP;
   1856
   1857		return vhost_scsi_set_endpoint(vs, &backend);
   1858	case VHOST_SCSI_CLEAR_ENDPOINT:
   1859		if (copy_from_user(&backend, argp, sizeof backend))
   1860			return -EFAULT;
   1861		if (backend.reserved != 0)
   1862			return -EOPNOTSUPP;
   1863
   1864		return vhost_scsi_clear_endpoint(vs, &backend);
   1865	case VHOST_SCSI_GET_ABI_VERSION:
   1866		if (copy_to_user(argp, &abi_version, sizeof abi_version))
   1867			return -EFAULT;
   1868		return 0;
   1869	case VHOST_SCSI_SET_EVENTS_MISSED:
   1870		if (get_user(events_missed, eventsp))
   1871			return -EFAULT;
   1872		mutex_lock(&vq->mutex);
   1873		vs->vs_events_missed = events_missed;
   1874		mutex_unlock(&vq->mutex);
   1875		return 0;
   1876	case VHOST_SCSI_GET_EVENTS_MISSED:
   1877		mutex_lock(&vq->mutex);
   1878		events_missed = vs->vs_events_missed;
   1879		mutex_unlock(&vq->mutex);
   1880		if (put_user(events_missed, eventsp))
   1881			return -EFAULT;
   1882		return 0;
   1883	case VHOST_GET_FEATURES:
   1884		features = VHOST_SCSI_FEATURES;
   1885		if (copy_to_user(featurep, &features, sizeof features))
   1886			return -EFAULT;
   1887		return 0;
   1888	case VHOST_SET_FEATURES:
   1889		if (copy_from_user(&features, featurep, sizeof features))
   1890			return -EFAULT;
   1891		return vhost_scsi_set_features(vs, features);
   1892	default:
   1893		mutex_lock(&vs->dev.mutex);
   1894		r = vhost_dev_ioctl(&vs->dev, ioctl, argp);
   1895		/* TODO: flush backend after dev ioctl. */
   1896		if (r == -ENOIOCTLCMD)
   1897			r = vhost_vring_ioctl(&vs->dev, ioctl, argp);
   1898		mutex_unlock(&vs->dev.mutex);
   1899		return r;
   1900	}
   1901}
   1902
   1903static const struct file_operations vhost_scsi_fops = {
   1904	.owner          = THIS_MODULE,
   1905	.release        = vhost_scsi_release,
   1906	.unlocked_ioctl = vhost_scsi_ioctl,
   1907	.compat_ioctl	= compat_ptr_ioctl,
   1908	.open           = vhost_scsi_open,
   1909	.llseek		= noop_llseek,
   1910};
   1911
   1912static struct miscdevice vhost_scsi_misc = {
   1913	MISC_DYNAMIC_MINOR,
   1914	"vhost-scsi",
   1915	&vhost_scsi_fops,
   1916};
   1917
   1918static int __init vhost_scsi_register(void)
   1919{
   1920	return misc_register(&vhost_scsi_misc);
   1921}
   1922
   1923static void vhost_scsi_deregister(void)
   1924{
   1925	misc_deregister(&vhost_scsi_misc);
   1926}
   1927
   1928static char *vhost_scsi_dump_proto_id(struct vhost_scsi_tport *tport)
   1929{
   1930	switch (tport->tport_proto_id) {
   1931	case SCSI_PROTOCOL_SAS:
   1932		return "SAS";
   1933	case SCSI_PROTOCOL_FCP:
   1934		return "FCP";
   1935	case SCSI_PROTOCOL_ISCSI:
   1936		return "iSCSI";
   1937	default:
   1938		break;
   1939	}
   1940
   1941	return "Unknown";
   1942}
   1943
   1944static void
   1945vhost_scsi_do_plug(struct vhost_scsi_tpg *tpg,
   1946		  struct se_lun *lun, bool plug)
   1947{
   1948
   1949	struct vhost_scsi *vs = tpg->vhost_scsi;
   1950	struct vhost_virtqueue *vq;
   1951	u32 reason;
   1952
   1953	if (!vs)
   1954		return;
   1955
   1956	mutex_lock(&vs->dev.mutex);
   1957
   1958	if (plug)
   1959		reason = VIRTIO_SCSI_EVT_RESET_RESCAN;
   1960	else
   1961		reason = VIRTIO_SCSI_EVT_RESET_REMOVED;
   1962
   1963	vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
   1964	mutex_lock(&vq->mutex);
   1965	if (vhost_has_feature(vq, VIRTIO_SCSI_F_HOTPLUG))
   1966		vhost_scsi_send_evt(vs, tpg, lun,
   1967				   VIRTIO_SCSI_T_TRANSPORT_RESET, reason);
   1968	mutex_unlock(&vq->mutex);
   1969	mutex_unlock(&vs->dev.mutex);
   1970}
   1971
   1972static void vhost_scsi_hotplug(struct vhost_scsi_tpg *tpg, struct se_lun *lun)
   1973{
   1974	vhost_scsi_do_plug(tpg, lun, true);
   1975}
   1976
   1977static void vhost_scsi_hotunplug(struct vhost_scsi_tpg *tpg, struct se_lun *lun)
   1978{
   1979	vhost_scsi_do_plug(tpg, lun, false);
   1980}
   1981
   1982static int vhost_scsi_port_link(struct se_portal_group *se_tpg,
   1983			       struct se_lun *lun)
   1984{
   1985	struct vhost_scsi_tpg *tpg = container_of(se_tpg,
   1986				struct vhost_scsi_tpg, se_tpg);
   1987	struct vhost_scsi_tmf *tmf;
   1988
   1989	tmf = kzalloc(sizeof(*tmf), GFP_KERNEL);
   1990	if (!tmf)
   1991		return -ENOMEM;
   1992	INIT_LIST_HEAD(&tmf->queue_entry);
   1993	vhost_work_init(&tmf->vwork, vhost_scsi_tmf_resp_work);
   1994
   1995	mutex_lock(&vhost_scsi_mutex);
   1996
   1997	mutex_lock(&tpg->tv_tpg_mutex);
   1998	tpg->tv_tpg_port_count++;
   1999	list_add_tail(&tmf->queue_entry, &tpg->tmf_queue);
   2000	mutex_unlock(&tpg->tv_tpg_mutex);
   2001
   2002	vhost_scsi_hotplug(tpg, lun);
   2003
   2004	mutex_unlock(&vhost_scsi_mutex);
   2005
   2006	return 0;
   2007}
   2008
   2009static void vhost_scsi_port_unlink(struct se_portal_group *se_tpg,
   2010				  struct se_lun *lun)
   2011{
   2012	struct vhost_scsi_tpg *tpg = container_of(se_tpg,
   2013				struct vhost_scsi_tpg, se_tpg);
   2014	struct vhost_scsi_tmf *tmf;
   2015
   2016	mutex_lock(&vhost_scsi_mutex);
   2017
   2018	mutex_lock(&tpg->tv_tpg_mutex);
   2019	tpg->tv_tpg_port_count--;
   2020	tmf = list_first_entry(&tpg->tmf_queue, struct vhost_scsi_tmf,
   2021			       queue_entry);
   2022	list_del(&tmf->queue_entry);
   2023	kfree(tmf);
   2024	mutex_unlock(&tpg->tv_tpg_mutex);
   2025
   2026	vhost_scsi_hotunplug(tpg, lun);
   2027
   2028	mutex_unlock(&vhost_scsi_mutex);
   2029}
   2030
   2031static ssize_t vhost_scsi_tpg_attrib_fabric_prot_type_store(
   2032		struct config_item *item, const char *page, size_t count)
   2033{
   2034	struct se_portal_group *se_tpg = attrib_to_tpg(item);
   2035	struct vhost_scsi_tpg *tpg = container_of(se_tpg,
   2036				struct vhost_scsi_tpg, se_tpg);
   2037	unsigned long val;
   2038	int ret = kstrtoul(page, 0, &val);
   2039
   2040	if (ret) {
   2041		pr_err("kstrtoul() returned %d for fabric_prot_type\n", ret);
   2042		return ret;
   2043	}
   2044	if (val != 0 && val != 1 && val != 3) {
   2045		pr_err("Invalid vhost_scsi fabric_prot_type: %lu\n", val);
   2046		return -EINVAL;
   2047	}
   2048	tpg->tv_fabric_prot_type = val;
   2049
   2050	return count;
   2051}
   2052
   2053static ssize_t vhost_scsi_tpg_attrib_fabric_prot_type_show(
   2054		struct config_item *item, char *page)
   2055{
   2056	struct se_portal_group *se_tpg = attrib_to_tpg(item);
   2057	struct vhost_scsi_tpg *tpg = container_of(se_tpg,
   2058				struct vhost_scsi_tpg, se_tpg);
   2059
   2060	return sprintf(page, "%d\n", tpg->tv_fabric_prot_type);
   2061}
   2062
   2063CONFIGFS_ATTR(vhost_scsi_tpg_attrib_, fabric_prot_type);
   2064
   2065static struct configfs_attribute *vhost_scsi_tpg_attrib_attrs[] = {
   2066	&vhost_scsi_tpg_attrib_attr_fabric_prot_type,
   2067	NULL,
   2068};
   2069
   2070static int vhost_scsi_make_nexus(struct vhost_scsi_tpg *tpg,
   2071				const char *name)
   2072{
   2073	struct vhost_scsi_nexus *tv_nexus;
   2074
   2075	mutex_lock(&tpg->tv_tpg_mutex);
   2076	if (tpg->tpg_nexus) {
   2077		mutex_unlock(&tpg->tv_tpg_mutex);
   2078		pr_debug("tpg->tpg_nexus already exists\n");
   2079		return -EEXIST;
   2080	}
   2081
   2082	tv_nexus = kzalloc(sizeof(*tv_nexus), GFP_KERNEL);
   2083	if (!tv_nexus) {
   2084		mutex_unlock(&tpg->tv_tpg_mutex);
   2085		pr_err("Unable to allocate struct vhost_scsi_nexus\n");
   2086		return -ENOMEM;
   2087	}
   2088	/*
   2089	 * Since we are running in 'demo mode' this call with generate a
   2090	 * struct se_node_acl for the vhost_scsi struct se_portal_group with
   2091	 * the SCSI Initiator port name of the passed configfs group 'name'.
   2092	 */
   2093	tv_nexus->tvn_se_sess = target_setup_session(&tpg->se_tpg, 0, 0,
   2094					TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS,
   2095					(unsigned char *)name, tv_nexus, NULL);
   2096	if (IS_ERR(tv_nexus->tvn_se_sess)) {
   2097		mutex_unlock(&tpg->tv_tpg_mutex);
   2098		kfree(tv_nexus);
   2099		return -ENOMEM;
   2100	}
   2101	tpg->tpg_nexus = tv_nexus;
   2102
   2103	mutex_unlock(&tpg->tv_tpg_mutex);
   2104	return 0;
   2105}
   2106
   2107static int vhost_scsi_drop_nexus(struct vhost_scsi_tpg *tpg)
   2108{
   2109	struct se_session *se_sess;
   2110	struct vhost_scsi_nexus *tv_nexus;
   2111
   2112	mutex_lock(&tpg->tv_tpg_mutex);
   2113	tv_nexus = tpg->tpg_nexus;
   2114	if (!tv_nexus) {
   2115		mutex_unlock(&tpg->tv_tpg_mutex);
   2116		return -ENODEV;
   2117	}
   2118
   2119	se_sess = tv_nexus->tvn_se_sess;
   2120	if (!se_sess) {
   2121		mutex_unlock(&tpg->tv_tpg_mutex);
   2122		return -ENODEV;
   2123	}
   2124
   2125	if (tpg->tv_tpg_port_count != 0) {
   2126		mutex_unlock(&tpg->tv_tpg_mutex);
   2127		pr_err("Unable to remove TCM_vhost I_T Nexus with"
   2128			" active TPG port count: %d\n",
   2129			tpg->tv_tpg_port_count);
   2130		return -EBUSY;
   2131	}
   2132
   2133	if (tpg->tv_tpg_vhost_count != 0) {
   2134		mutex_unlock(&tpg->tv_tpg_mutex);
   2135		pr_err("Unable to remove TCM_vhost I_T Nexus with"
   2136			" active TPG vhost count: %d\n",
   2137			tpg->tv_tpg_vhost_count);
   2138		return -EBUSY;
   2139	}
   2140
   2141	pr_debug("TCM_vhost_ConfigFS: Removing I_T Nexus to emulated"
   2142		" %s Initiator Port: %s\n", vhost_scsi_dump_proto_id(tpg->tport),
   2143		tv_nexus->tvn_se_sess->se_node_acl->initiatorname);
   2144
   2145	/*
   2146	 * Release the SCSI I_T Nexus to the emulated vhost Target Port
   2147	 */
   2148	target_remove_session(se_sess);
   2149	tpg->tpg_nexus = NULL;
   2150	mutex_unlock(&tpg->tv_tpg_mutex);
   2151
   2152	kfree(tv_nexus);
   2153	return 0;
   2154}
   2155
   2156static ssize_t vhost_scsi_tpg_nexus_show(struct config_item *item, char *page)
   2157{
   2158	struct se_portal_group *se_tpg = to_tpg(item);
   2159	struct vhost_scsi_tpg *tpg = container_of(se_tpg,
   2160				struct vhost_scsi_tpg, se_tpg);
   2161	struct vhost_scsi_nexus *tv_nexus;
   2162	ssize_t ret;
   2163
   2164	mutex_lock(&tpg->tv_tpg_mutex);
   2165	tv_nexus = tpg->tpg_nexus;
   2166	if (!tv_nexus) {
   2167		mutex_unlock(&tpg->tv_tpg_mutex);
   2168		return -ENODEV;
   2169	}
   2170	ret = snprintf(page, PAGE_SIZE, "%s\n",
   2171			tv_nexus->tvn_se_sess->se_node_acl->initiatorname);
   2172	mutex_unlock(&tpg->tv_tpg_mutex);
   2173
   2174	return ret;
   2175}
   2176
   2177static ssize_t vhost_scsi_tpg_nexus_store(struct config_item *item,
   2178		const char *page, size_t count)
   2179{
   2180	struct se_portal_group *se_tpg = to_tpg(item);
   2181	struct vhost_scsi_tpg *tpg = container_of(se_tpg,
   2182				struct vhost_scsi_tpg, se_tpg);
   2183	struct vhost_scsi_tport *tport_wwn = tpg->tport;
   2184	unsigned char i_port[VHOST_SCSI_NAMELEN], *ptr, *port_ptr;
   2185	int ret;
   2186	/*
   2187	 * Shutdown the active I_T nexus if 'NULL' is passed..
   2188	 */
   2189	if (!strncmp(page, "NULL", 4)) {
   2190		ret = vhost_scsi_drop_nexus(tpg);
   2191		return (!ret) ? count : ret;
   2192	}
   2193	/*
   2194	 * Otherwise make sure the passed virtual Initiator port WWN matches
   2195	 * the fabric protocol_id set in vhost_scsi_make_tport(), and call
   2196	 * vhost_scsi_make_nexus().
   2197	 */
   2198	if (strlen(page) >= VHOST_SCSI_NAMELEN) {
   2199		pr_err("Emulated NAA Sas Address: %s, exceeds"
   2200				" max: %d\n", page, VHOST_SCSI_NAMELEN);
   2201		return -EINVAL;
   2202	}
   2203	snprintf(&i_port[0], VHOST_SCSI_NAMELEN, "%s", page);
   2204
   2205	ptr = strstr(i_port, "naa.");
   2206	if (ptr) {
   2207		if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_SAS) {
   2208			pr_err("Passed SAS Initiator Port %s does not"
   2209				" match target port protoid: %s\n", i_port,
   2210				vhost_scsi_dump_proto_id(tport_wwn));
   2211			return -EINVAL;
   2212		}
   2213		port_ptr = &i_port[0];
   2214		goto check_newline;
   2215	}
   2216	ptr = strstr(i_port, "fc.");
   2217	if (ptr) {
   2218		if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_FCP) {
   2219			pr_err("Passed FCP Initiator Port %s does not"
   2220				" match target port protoid: %s\n", i_port,
   2221				vhost_scsi_dump_proto_id(tport_wwn));
   2222			return -EINVAL;
   2223		}
   2224		port_ptr = &i_port[3]; /* Skip over "fc." */
   2225		goto check_newline;
   2226	}
   2227	ptr = strstr(i_port, "iqn.");
   2228	if (ptr) {
   2229		if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_ISCSI) {
   2230			pr_err("Passed iSCSI Initiator Port %s does not"
   2231				" match target port protoid: %s\n", i_port,
   2232				vhost_scsi_dump_proto_id(tport_wwn));
   2233			return -EINVAL;
   2234		}
   2235		port_ptr = &i_port[0];
   2236		goto check_newline;
   2237	}
   2238	pr_err("Unable to locate prefix for emulated Initiator Port:"
   2239			" %s\n", i_port);
   2240	return -EINVAL;
   2241	/*
   2242	 * Clear any trailing newline for the NAA WWN
   2243	 */
   2244check_newline:
   2245	if (i_port[strlen(i_port)-1] == '\n')
   2246		i_port[strlen(i_port)-1] = '\0';
   2247
   2248	ret = vhost_scsi_make_nexus(tpg, port_ptr);
   2249	if (ret < 0)
   2250		return ret;
   2251
   2252	return count;
   2253}
   2254
   2255CONFIGFS_ATTR(vhost_scsi_tpg_, nexus);
   2256
   2257static struct configfs_attribute *vhost_scsi_tpg_attrs[] = {
   2258	&vhost_scsi_tpg_attr_nexus,
   2259	NULL,
   2260};
   2261
   2262static struct se_portal_group *
   2263vhost_scsi_make_tpg(struct se_wwn *wwn, const char *name)
   2264{
   2265	struct vhost_scsi_tport *tport = container_of(wwn,
   2266			struct vhost_scsi_tport, tport_wwn);
   2267
   2268	struct vhost_scsi_tpg *tpg;
   2269	u16 tpgt;
   2270	int ret;
   2271
   2272	if (strstr(name, "tpgt_") != name)
   2273		return ERR_PTR(-EINVAL);
   2274	if (kstrtou16(name + 5, 10, &tpgt) || tpgt >= VHOST_SCSI_MAX_TARGET)
   2275		return ERR_PTR(-EINVAL);
   2276
   2277	tpg = kzalloc(sizeof(*tpg), GFP_KERNEL);
   2278	if (!tpg) {
   2279		pr_err("Unable to allocate struct vhost_scsi_tpg");
   2280		return ERR_PTR(-ENOMEM);
   2281	}
   2282	mutex_init(&tpg->tv_tpg_mutex);
   2283	INIT_LIST_HEAD(&tpg->tv_tpg_list);
   2284	INIT_LIST_HEAD(&tpg->tmf_queue);
   2285	tpg->tport = tport;
   2286	tpg->tport_tpgt = tpgt;
   2287
   2288	ret = core_tpg_register(wwn, &tpg->se_tpg, tport->tport_proto_id);
   2289	if (ret < 0) {
   2290		kfree(tpg);
   2291		return NULL;
   2292	}
   2293	mutex_lock(&vhost_scsi_mutex);
   2294	list_add_tail(&tpg->tv_tpg_list, &vhost_scsi_list);
   2295	mutex_unlock(&vhost_scsi_mutex);
   2296
   2297	return &tpg->se_tpg;
   2298}
   2299
   2300static void vhost_scsi_drop_tpg(struct se_portal_group *se_tpg)
   2301{
   2302	struct vhost_scsi_tpg *tpg = container_of(se_tpg,
   2303				struct vhost_scsi_tpg, se_tpg);
   2304
   2305	mutex_lock(&vhost_scsi_mutex);
   2306	list_del(&tpg->tv_tpg_list);
   2307	mutex_unlock(&vhost_scsi_mutex);
   2308	/*
   2309	 * Release the virtual I_T Nexus for this vhost TPG
   2310	 */
   2311	vhost_scsi_drop_nexus(tpg);
   2312	/*
   2313	 * Deregister the se_tpg from TCM..
   2314	 */
   2315	core_tpg_deregister(se_tpg);
   2316	kfree(tpg);
   2317}
   2318
   2319static struct se_wwn *
   2320vhost_scsi_make_tport(struct target_fabric_configfs *tf,
   2321		     struct config_group *group,
   2322		     const char *name)
   2323{
   2324	struct vhost_scsi_tport *tport;
   2325	char *ptr;
   2326	u64 wwpn = 0;
   2327	int off = 0;
   2328
   2329	/* if (vhost_scsi_parse_wwn(name, &wwpn, 1) < 0)
   2330		return ERR_PTR(-EINVAL); */
   2331
   2332	tport = kzalloc(sizeof(*tport), GFP_KERNEL);
   2333	if (!tport) {
   2334		pr_err("Unable to allocate struct vhost_scsi_tport");
   2335		return ERR_PTR(-ENOMEM);
   2336	}
   2337	tport->tport_wwpn = wwpn;
   2338	/*
   2339	 * Determine the emulated Protocol Identifier and Target Port Name
   2340	 * based on the incoming configfs directory name.
   2341	 */
   2342	ptr = strstr(name, "naa.");
   2343	if (ptr) {
   2344		tport->tport_proto_id = SCSI_PROTOCOL_SAS;
   2345		goto check_len;
   2346	}
   2347	ptr = strstr(name, "fc.");
   2348	if (ptr) {
   2349		tport->tport_proto_id = SCSI_PROTOCOL_FCP;
   2350		off = 3; /* Skip over "fc." */
   2351		goto check_len;
   2352	}
   2353	ptr = strstr(name, "iqn.");
   2354	if (ptr) {
   2355		tport->tport_proto_id = SCSI_PROTOCOL_ISCSI;
   2356		goto check_len;
   2357	}
   2358
   2359	pr_err("Unable to locate prefix for emulated Target Port:"
   2360			" %s\n", name);
   2361	kfree(tport);
   2362	return ERR_PTR(-EINVAL);
   2363
   2364check_len:
   2365	if (strlen(name) >= VHOST_SCSI_NAMELEN) {
   2366		pr_err("Emulated %s Address: %s, exceeds"
   2367			" max: %d\n", name, vhost_scsi_dump_proto_id(tport),
   2368			VHOST_SCSI_NAMELEN);
   2369		kfree(tport);
   2370		return ERR_PTR(-EINVAL);
   2371	}
   2372	snprintf(&tport->tport_name[0], VHOST_SCSI_NAMELEN, "%s", &name[off]);
   2373
   2374	pr_debug("TCM_VHost_ConfigFS: Allocated emulated Target"
   2375		" %s Address: %s\n", vhost_scsi_dump_proto_id(tport), name);
   2376
   2377	return &tport->tport_wwn;
   2378}
   2379
   2380static void vhost_scsi_drop_tport(struct se_wwn *wwn)
   2381{
   2382	struct vhost_scsi_tport *tport = container_of(wwn,
   2383				struct vhost_scsi_tport, tport_wwn);
   2384
   2385	pr_debug("TCM_VHost_ConfigFS: Deallocating emulated Target"
   2386		" %s Address: %s\n", vhost_scsi_dump_proto_id(tport),
   2387		tport->tport_name);
   2388
   2389	kfree(tport);
   2390}
   2391
   2392static ssize_t
   2393vhost_scsi_wwn_version_show(struct config_item *item, char *page)
   2394{
   2395	return sprintf(page, "TCM_VHOST fabric module %s on %s/%s"
   2396		"on "UTS_RELEASE"\n", VHOST_SCSI_VERSION, utsname()->sysname,
   2397		utsname()->machine);
   2398}
   2399
   2400CONFIGFS_ATTR_RO(vhost_scsi_wwn_, version);
   2401
   2402static struct configfs_attribute *vhost_scsi_wwn_attrs[] = {
   2403	&vhost_scsi_wwn_attr_version,
   2404	NULL,
   2405};
   2406
   2407static const struct target_core_fabric_ops vhost_scsi_ops = {
   2408	.module				= THIS_MODULE,
   2409	.fabric_name			= "vhost",
   2410	.max_data_sg_nents		= VHOST_SCSI_PREALLOC_SGLS,
   2411	.tpg_get_wwn			= vhost_scsi_get_fabric_wwn,
   2412	.tpg_get_tag			= vhost_scsi_get_tpgt,
   2413	.tpg_check_demo_mode		= vhost_scsi_check_true,
   2414	.tpg_check_demo_mode_cache	= vhost_scsi_check_true,
   2415	.tpg_check_demo_mode_write_protect = vhost_scsi_check_false,
   2416	.tpg_check_prod_mode_write_protect = vhost_scsi_check_false,
   2417	.tpg_check_prot_fabric_only	= vhost_scsi_check_prot_fabric_only,
   2418	.tpg_get_inst_index		= vhost_scsi_tpg_get_inst_index,
   2419	.release_cmd			= vhost_scsi_release_cmd,
   2420	.check_stop_free		= vhost_scsi_check_stop_free,
   2421	.sess_get_index			= vhost_scsi_sess_get_index,
   2422	.sess_get_initiator_sid		= NULL,
   2423	.write_pending			= vhost_scsi_write_pending,
   2424	.set_default_node_attributes	= vhost_scsi_set_default_node_attrs,
   2425	.get_cmd_state			= vhost_scsi_get_cmd_state,
   2426	.queue_data_in			= vhost_scsi_queue_data_in,
   2427	.queue_status			= vhost_scsi_queue_status,
   2428	.queue_tm_rsp			= vhost_scsi_queue_tm_rsp,
   2429	.aborted_task			= vhost_scsi_aborted_task,
   2430	/*
   2431	 * Setup callers for generic logic in target_core_fabric_configfs.c
   2432	 */
   2433	.fabric_make_wwn		= vhost_scsi_make_tport,
   2434	.fabric_drop_wwn		= vhost_scsi_drop_tport,
   2435	.fabric_make_tpg		= vhost_scsi_make_tpg,
   2436	.fabric_drop_tpg		= vhost_scsi_drop_tpg,
   2437	.fabric_post_link		= vhost_scsi_port_link,
   2438	.fabric_pre_unlink		= vhost_scsi_port_unlink,
   2439
   2440	.tfc_wwn_attrs			= vhost_scsi_wwn_attrs,
   2441	.tfc_tpg_base_attrs		= vhost_scsi_tpg_attrs,
   2442	.tfc_tpg_attrib_attrs		= vhost_scsi_tpg_attrib_attrs,
   2443};
   2444
   2445static int __init vhost_scsi_init(void)
   2446{
   2447	int ret = -ENOMEM;
   2448
   2449	pr_debug("TCM_VHOST fabric module %s on %s/%s"
   2450		" on "UTS_RELEASE"\n", VHOST_SCSI_VERSION, utsname()->sysname,
   2451		utsname()->machine);
   2452
   2453	ret = vhost_scsi_register();
   2454	if (ret < 0)
   2455		goto out;
   2456
   2457	ret = target_register_template(&vhost_scsi_ops);
   2458	if (ret < 0)
   2459		goto out_vhost_scsi_deregister;
   2460
   2461	return 0;
   2462
   2463out_vhost_scsi_deregister:
   2464	vhost_scsi_deregister();
   2465out:
   2466	return ret;
   2467};
   2468
   2469static void vhost_scsi_exit(void)
   2470{
   2471	target_unregister_template(&vhost_scsi_ops);
   2472	vhost_scsi_deregister();
   2473};
   2474
   2475MODULE_DESCRIPTION("VHOST_SCSI series fabric driver");
   2476MODULE_ALIAS("tcm_vhost");
   2477MODULE_LICENSE("GPL");
   2478module_init(vhost_scsi_init);
   2479module_exit(vhost_scsi_exit);