cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

xen-scsiback.c (49542B)


      1/*
      2 * Xen SCSI backend driver
      3 *
      4 * Copyright (c) 2008, FUJITSU Limited
      5 *
      6 * Based on the blkback driver code.
      7 * Adaption to kernel taget core infrastructure taken from vhost/scsi.c
      8 *
      9 * This program is free software; you can redistribute it and/or
     10 * modify it under the terms of the GNU General Public License version 2
     11 * as published by the Free Software Foundation; or, when distributed
     12 * separately from the Linux kernel or incorporated into other
     13 * software packages, subject to the following license:
     14 *
     15 * Permission is hereby granted, free of charge, to any person obtaining a copy
     16 * of this source file (the "Software"), to deal in the Software without
     17 * restriction, including without limitation the rights to use, copy, modify,
     18 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
     19 * and to permit persons to whom the Software is furnished to do so, subject to
     20 * the following conditions:
     21 *
     22 * The above copyright notice and this permission notice shall be included in
     23 * all copies or substantial portions of the Software.
     24 *
     25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     26 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     27 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
     28 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
     29 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
     30 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
     31 * IN THE SOFTWARE.
     32 */
     33
     34#define pr_fmt(fmt) "xen-pvscsi: " fmt
     35
     36#include <linux/module.h>
     37#include <linux/utsname.h>
     38#include <linux/interrupt.h>
     39#include <linux/slab.h>
     40#include <linux/wait.h>
     41#include <linux/sched.h>
     42#include <linux/list.h>
     43#include <linux/gfp.h>
     44#include <linux/delay.h>
     45#include <linux/spinlock.h>
     46#include <linux/configfs.h>
     47
     48#include <generated/utsrelease.h>
     49
     50#include <scsi/scsi_host.h> /* SG_ALL */
     51
     52#include <target/target_core_base.h>
     53#include <target/target_core_fabric.h>
     54
     55#include <asm/hypervisor.h>
     56
     57#include <xen/xen.h>
     58#include <xen/balloon.h>
     59#include <xen/events.h>
     60#include <xen/xenbus.h>
     61#include <xen/grant_table.h>
     62#include <xen/page.h>
     63
     64#include <xen/interface/grant_table.h>
     65#include <xen/interface/io/vscsiif.h>
     66
     67#define VSCSI_VERSION	"v0.1"
     68#define VSCSI_NAMELEN	32
     69
     70struct ids_tuple {
     71	unsigned int hst;		/* host    */
     72	unsigned int chn;		/* channel */
     73	unsigned int tgt;		/* target  */
     74	unsigned int lun;		/* LUN     */
     75};
     76
     77struct v2p_entry {
     78	struct ids_tuple v;		/* translate from */
     79	struct scsiback_tpg *tpg;	/* translate to   */
     80	unsigned int lun;
     81	struct kref kref;
     82	struct list_head l;
     83};
     84
     85struct vscsibk_info {
     86	struct xenbus_device *dev;
     87
     88	domid_t domid;
     89	unsigned int irq;
     90
     91	struct vscsiif_back_ring ring;
     92
     93	spinlock_t ring_lock;
     94	atomic_t nr_unreplied_reqs;
     95
     96	spinlock_t v2p_lock;
     97	struct list_head v2p_entry_lists;
     98
     99	wait_queue_head_t waiting_to_free;
    100
    101	struct gnttab_page_cache free_pages;
    102};
    103
    104/* theoretical maximum of grants for one request */
    105#define VSCSI_MAX_GRANTS	(SG_ALL + VSCSIIF_SG_TABLESIZE)
    106
    107/*
    108 * VSCSI_GRANT_BATCH is the maximum number of grants to be processed in one
    109 * call to map/unmap grants. Don't choose it too large, as there are arrays
    110 * with VSCSI_GRANT_BATCH elements allocated on the stack.
    111 */
    112#define VSCSI_GRANT_BATCH	16
    113
    114struct vscsibk_pend {
    115	uint16_t rqid;
    116
    117	uint8_t cmnd[VSCSIIF_MAX_COMMAND_SIZE];
    118	uint8_t cmd_len;
    119
    120	uint8_t sc_data_direction;
    121	uint16_t n_sg;		/* real length of SG list */
    122	uint16_t n_grants;	/* SG pages and potentially SG list */
    123	uint32_t data_len;
    124	uint32_t result;
    125
    126	struct vscsibk_info *info;
    127	struct v2p_entry *v2p;
    128	struct scatterlist *sgl;
    129
    130	uint8_t sense_buffer[VSCSIIF_SENSE_BUFFERSIZE];
    131
    132	grant_handle_t grant_handles[VSCSI_MAX_GRANTS];
    133	struct page *pages[VSCSI_MAX_GRANTS];
    134
    135	struct se_cmd se_cmd;
    136
    137	struct completion tmr_done;
    138};
    139
    140#define VSCSI_DEFAULT_SESSION_TAGS	128
    141
    142struct scsiback_nexus {
    143	/* Pointer to TCM session for I_T Nexus */
    144	struct se_session *tvn_se_sess;
    145};
    146
    147struct scsiback_tport {
    148	/* SCSI protocol the tport is providing */
    149	u8 tport_proto_id;
    150	/* Binary World Wide unique Port Name for pvscsi Target port */
    151	u64 tport_wwpn;
    152	/* ASCII formatted WWPN for pvscsi Target port */
    153	char tport_name[VSCSI_NAMELEN];
    154	/* Returned by scsiback_make_tport() */
    155	struct se_wwn tport_wwn;
    156};
    157
    158struct scsiback_tpg {
    159	/* scsiback port target portal group tag for TCM */
    160	u16 tport_tpgt;
    161	/* track number of TPG Port/Lun Links wrt explicit I_T Nexus shutdown */
    162	int tv_tpg_port_count;
    163	/* xen-pvscsi references to tpg_nexus, protected by tv_tpg_mutex */
    164	int tv_tpg_fe_count;
    165	/* list for scsiback_list */
    166	struct list_head tv_tpg_list;
    167	/* Used to protect access for tpg_nexus */
    168	struct mutex tv_tpg_mutex;
    169	/* Pointer to the TCM pvscsi I_T Nexus for this TPG endpoint */
    170	struct scsiback_nexus *tpg_nexus;
    171	/* Pointer back to scsiback_tport */
    172	struct scsiback_tport *tport;
    173	/* Returned by scsiback_make_tpg() */
    174	struct se_portal_group se_tpg;
    175	/* alias used in xenstore */
    176	char param_alias[VSCSI_NAMELEN];
    177	/* list of info structures related to this target portal group */
    178	struct list_head info_list;
    179};
    180
    181#define SCSIBACK_INVALID_HANDLE (~0)
    182
    183static bool log_print_stat;
    184module_param(log_print_stat, bool, 0644);
    185
    186static int scsiback_max_buffer_pages = 1024;
    187module_param_named(max_buffer_pages, scsiback_max_buffer_pages, int, 0644);
    188MODULE_PARM_DESC(max_buffer_pages,
    189"Maximum number of free pages to keep in backend buffer");
    190
    191/* Global spinlock to protect scsiback TPG list */
    192static DEFINE_MUTEX(scsiback_mutex);
    193static LIST_HEAD(scsiback_list);
    194
    195static void scsiback_get(struct vscsibk_info *info)
    196{
    197	atomic_inc(&info->nr_unreplied_reqs);
    198}
    199
    200static void scsiback_put(struct vscsibk_info *info)
    201{
    202	if (atomic_dec_and_test(&info->nr_unreplied_reqs))
    203		wake_up(&info->waiting_to_free);
    204}
    205
    206static unsigned long vaddr_page(struct page *page)
    207{
    208	unsigned long pfn = page_to_pfn(page);
    209
    210	return (unsigned long)pfn_to_kaddr(pfn);
    211}
    212
    213static unsigned long vaddr(struct vscsibk_pend *req, int seg)
    214{
    215	return vaddr_page(req->pages[seg]);
    216}
    217
    218static void scsiback_print_status(char *sense_buffer, int errors,
    219					struct vscsibk_pend *pending_req)
    220{
    221	struct scsiback_tpg *tpg = pending_req->v2p->tpg;
    222
    223	pr_err("[%s:%d] cmnd[0]=%02x -> st=%02x msg=%02x host=%02x\n",
    224	       tpg->tport->tport_name, pending_req->v2p->lun,
    225	       pending_req->cmnd[0], errors & 0xff, COMMAND_COMPLETE,
    226	       host_byte(errors));
    227}
    228
    229static void scsiback_fast_flush_area(struct vscsibk_pend *req)
    230{
    231	struct gnttab_unmap_grant_ref unmap[VSCSI_GRANT_BATCH];
    232	struct page *pages[VSCSI_GRANT_BATCH];
    233	unsigned int i, invcount = 0;
    234	grant_handle_t handle;
    235	int err;
    236
    237	kfree(req->sgl);
    238	req->sgl = NULL;
    239	req->n_sg = 0;
    240
    241	if (!req->n_grants)
    242		return;
    243
    244	for (i = 0; i < req->n_grants; i++) {
    245		handle = req->grant_handles[i];
    246		if (handle == SCSIBACK_INVALID_HANDLE)
    247			continue;
    248		gnttab_set_unmap_op(&unmap[invcount], vaddr(req, i),
    249				    GNTMAP_host_map, handle);
    250		req->grant_handles[i] = SCSIBACK_INVALID_HANDLE;
    251		pages[invcount] = req->pages[i];
    252		put_page(pages[invcount]);
    253		invcount++;
    254		if (invcount < VSCSI_GRANT_BATCH)
    255			continue;
    256		err = gnttab_unmap_refs(unmap, NULL, pages, invcount);
    257		BUG_ON(err);
    258		invcount = 0;
    259	}
    260
    261	if (invcount) {
    262		err = gnttab_unmap_refs(unmap, NULL, pages, invcount);
    263		BUG_ON(err);
    264	}
    265
    266	gnttab_page_cache_put(&req->info->free_pages, req->pages,
    267			      req->n_grants);
    268	req->n_grants = 0;
    269}
    270
    271static void scsiback_free_translation_entry(struct kref *kref)
    272{
    273	struct v2p_entry *entry = container_of(kref, struct v2p_entry, kref);
    274	struct scsiback_tpg *tpg = entry->tpg;
    275
    276	mutex_lock(&tpg->tv_tpg_mutex);
    277	tpg->tv_tpg_fe_count--;
    278	mutex_unlock(&tpg->tv_tpg_mutex);
    279
    280	kfree(entry);
    281}
    282
    283static int32_t scsiback_result(int32_t result)
    284{
    285	int32_t host_status;
    286
    287	switch (XEN_VSCSIIF_RSLT_HOST(result)) {
    288	case DID_OK:
    289		host_status = XEN_VSCSIIF_RSLT_HOST_OK;
    290		break;
    291	case DID_NO_CONNECT:
    292		host_status = XEN_VSCSIIF_RSLT_HOST_NO_CONNECT;
    293		break;
    294	case DID_BUS_BUSY:
    295		host_status = XEN_VSCSIIF_RSLT_HOST_BUS_BUSY;
    296		break;
    297	case DID_TIME_OUT:
    298		host_status = XEN_VSCSIIF_RSLT_HOST_TIME_OUT;
    299		break;
    300	case DID_BAD_TARGET:
    301		host_status = XEN_VSCSIIF_RSLT_HOST_BAD_TARGET;
    302		break;
    303	case DID_ABORT:
    304		host_status = XEN_VSCSIIF_RSLT_HOST_ABORT;
    305		break;
    306	case DID_PARITY:
    307		host_status = XEN_VSCSIIF_RSLT_HOST_PARITY;
    308		break;
    309	case DID_ERROR:
    310		host_status = XEN_VSCSIIF_RSLT_HOST_ERROR;
    311		break;
    312	case DID_RESET:
    313		host_status = XEN_VSCSIIF_RSLT_HOST_RESET;
    314		break;
    315	case DID_BAD_INTR:
    316		host_status = XEN_VSCSIIF_RSLT_HOST_BAD_INTR;
    317		break;
    318	case DID_PASSTHROUGH:
    319		host_status = XEN_VSCSIIF_RSLT_HOST_PASSTHROUGH;
    320		break;
    321	case DID_SOFT_ERROR:
    322		host_status = XEN_VSCSIIF_RSLT_HOST_SOFT_ERROR;
    323		break;
    324	case DID_IMM_RETRY:
    325		host_status = XEN_VSCSIIF_RSLT_HOST_IMM_RETRY;
    326		break;
    327	case DID_REQUEUE:
    328		host_status = XEN_VSCSIIF_RSLT_HOST_REQUEUE;
    329		break;
    330	case DID_TRANSPORT_DISRUPTED:
    331		host_status = XEN_VSCSIIF_RSLT_HOST_TRANSPORT_DISRUPTED;
    332		break;
    333	case DID_TRANSPORT_FAILFAST:
    334		host_status = XEN_VSCSIIF_RSLT_HOST_TRANSPORT_FAILFAST;
    335		break;
    336	case DID_TARGET_FAILURE:
    337		host_status = XEN_VSCSIIF_RSLT_HOST_TARGET_FAILURE;
    338		break;
    339	case DID_NEXUS_FAILURE:
    340		host_status = XEN_VSCSIIF_RSLT_HOST_NEXUS_FAILURE;
    341		break;
    342	case DID_ALLOC_FAILURE:
    343		host_status = XEN_VSCSIIF_RSLT_HOST_ALLOC_FAILURE;
    344		break;
    345	case DID_MEDIUM_ERROR:
    346		host_status = XEN_VSCSIIF_RSLT_HOST_MEDIUM_ERROR;
    347		break;
    348	case DID_TRANSPORT_MARGINAL:
    349		host_status = XEN_VSCSIIF_RSLT_HOST_TRANSPORT_MARGINAL;
    350		break;
    351	default:
    352		host_status = XEN_VSCSIIF_RSLT_HOST_ERROR;
    353		break;
    354	}
    355
    356	return (host_status << 16) | (result & 0x00ffff);
    357}
    358
    359static void scsiback_send_response(struct vscsibk_info *info,
    360			char *sense_buffer, int32_t result, uint32_t resid,
    361			uint16_t rqid)
    362{
    363	struct vscsiif_response *ring_res;
    364	int notify;
    365	struct scsi_sense_hdr sshdr;
    366	unsigned long flags;
    367	unsigned len;
    368
    369	spin_lock_irqsave(&info->ring_lock, flags);
    370
    371	ring_res = RING_GET_RESPONSE(&info->ring, info->ring.rsp_prod_pvt);
    372	info->ring.rsp_prod_pvt++;
    373
    374	ring_res->rslt   = scsiback_result(result);
    375	ring_res->rqid   = rqid;
    376
    377	if (sense_buffer != NULL &&
    378	    scsi_normalize_sense(sense_buffer, VSCSIIF_SENSE_BUFFERSIZE,
    379				 &sshdr)) {
    380		len = min_t(unsigned, 8 + sense_buffer[7],
    381			    VSCSIIF_SENSE_BUFFERSIZE);
    382		memcpy(ring_res->sense_buffer, sense_buffer, len);
    383		ring_res->sense_len = len;
    384	} else {
    385		ring_res->sense_len = 0;
    386	}
    387
    388	ring_res->residual_len = resid;
    389
    390	RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&info->ring, notify);
    391	spin_unlock_irqrestore(&info->ring_lock, flags);
    392
    393	if (notify)
    394		notify_remote_via_irq(info->irq);
    395}
    396
    397static void scsiback_do_resp_with_sense(char *sense_buffer, int32_t result,
    398			uint32_t resid, struct vscsibk_pend *pending_req)
    399{
    400	scsiback_send_response(pending_req->info, sense_buffer, result,
    401			       resid, pending_req->rqid);
    402
    403	if (pending_req->v2p)
    404		kref_put(&pending_req->v2p->kref,
    405			 scsiback_free_translation_entry);
    406}
    407
    408static void scsiback_cmd_done(struct vscsibk_pend *pending_req)
    409{
    410	struct vscsibk_info *info = pending_req->info;
    411	unsigned char *sense_buffer;
    412	unsigned int resid;
    413	int errors;
    414
    415	sense_buffer = pending_req->sense_buffer;
    416	resid        = pending_req->se_cmd.residual_count;
    417	errors       = pending_req->result;
    418
    419	if (errors && log_print_stat)
    420		scsiback_print_status(sense_buffer, errors, pending_req);
    421
    422	scsiback_fast_flush_area(pending_req);
    423	scsiback_do_resp_with_sense(sense_buffer, errors, resid, pending_req);
    424	scsiback_put(info);
    425	/*
    426	 * Drop the extra KREF_ACK reference taken by target_submit_cmd_map_sgls()
    427	 * ahead of scsiback_check_stop_free() ->  transport_generic_free_cmd()
    428	 * final se_cmd->cmd_kref put.
    429	 */
    430	target_put_sess_cmd(&pending_req->se_cmd);
    431}
    432
    433static void scsiback_cmd_exec(struct vscsibk_pend *pending_req)
    434{
    435	struct se_cmd *se_cmd = &pending_req->se_cmd;
    436	struct se_session *sess = pending_req->v2p->tpg->tpg_nexus->tvn_se_sess;
    437
    438	scsiback_get(pending_req->info);
    439	se_cmd->tag = pending_req->rqid;
    440	target_init_cmd(se_cmd, sess, pending_req->sense_buffer,
    441			pending_req->v2p->lun, pending_req->data_len, 0,
    442			pending_req->sc_data_direction, TARGET_SCF_ACK_KREF);
    443
    444	if (target_submit_prep(se_cmd, pending_req->cmnd, pending_req->sgl,
    445			       pending_req->n_sg, NULL, 0, NULL, 0, GFP_KERNEL))
    446		return;
    447
    448	target_submit(se_cmd);
    449}
    450
    451static int scsiback_gnttab_data_map_batch(struct gnttab_map_grant_ref *map,
    452	struct page **pg, grant_handle_t *grant, int cnt)
    453{
    454	int err, i;
    455
    456	if (!cnt)
    457		return 0;
    458
    459	err = gnttab_map_refs(map, NULL, pg, cnt);
    460	for (i = 0; i < cnt; i++) {
    461		if (unlikely(map[i].status != GNTST_okay)) {
    462			pr_err("invalid buffer -- could not remap it\n");
    463			map[i].handle = SCSIBACK_INVALID_HANDLE;
    464			if (!err)
    465				err = -ENOMEM;
    466		} else {
    467			get_page(pg[i]);
    468		}
    469		grant[i] = map[i].handle;
    470	}
    471	return err;
    472}
    473
    474static int scsiback_gnttab_data_map_list(struct vscsibk_pend *pending_req,
    475			struct scsiif_request_segment *seg, struct page **pg,
    476			grant_handle_t *grant, int cnt, u32 flags)
    477{
    478	int mapcount = 0, i, err = 0;
    479	struct gnttab_map_grant_ref map[VSCSI_GRANT_BATCH];
    480	struct vscsibk_info *info = pending_req->info;
    481
    482	for (i = 0; i < cnt; i++) {
    483		if (gnttab_page_cache_get(&info->free_pages, pg + mapcount)) {
    484			gnttab_page_cache_put(&info->free_pages, pg, mapcount);
    485			pr_err("no grant page\n");
    486			return -ENOMEM;
    487		}
    488		gnttab_set_map_op(&map[mapcount], vaddr_page(pg[mapcount]),
    489				  flags, seg[i].gref, info->domid);
    490		mapcount++;
    491		if (mapcount < VSCSI_GRANT_BATCH)
    492			continue;
    493		err = scsiback_gnttab_data_map_batch(map, pg, grant, mapcount);
    494		pg += mapcount;
    495		grant += mapcount;
    496		pending_req->n_grants += mapcount;
    497		if (err)
    498			return err;
    499		mapcount = 0;
    500	}
    501	err = scsiback_gnttab_data_map_batch(map, pg, grant, mapcount);
    502	pending_req->n_grants += mapcount;
    503	return err;
    504}
    505
    506static int scsiback_gnttab_data_map(struct vscsiif_request *ring_req,
    507					struct vscsibk_pend *pending_req)
    508{
    509	u32 flags;
    510	int i, err, n_segs, i_seg = 0;
    511	struct page **pg;
    512	struct scsiif_request_segment *seg;
    513	unsigned long end_seg = 0;
    514	unsigned int nr_segments = (unsigned int)ring_req->nr_segments;
    515	unsigned int nr_sgl = 0;
    516	struct scatterlist *sg;
    517	grant_handle_t *grant;
    518
    519	pending_req->n_sg = 0;
    520	pending_req->n_grants = 0;
    521	pending_req->data_len = 0;
    522
    523	nr_segments &= ~VSCSIIF_SG_GRANT;
    524	if (!nr_segments)
    525		return 0;
    526
    527	if (nr_segments > VSCSIIF_SG_TABLESIZE) {
    528		pr_debug("invalid parameter nr_seg = %d\n",
    529			ring_req->nr_segments);
    530		return -EINVAL;
    531	}
    532
    533	if (ring_req->nr_segments & VSCSIIF_SG_GRANT) {
    534		err = scsiback_gnttab_data_map_list(pending_req, ring_req->seg,
    535			pending_req->pages, pending_req->grant_handles,
    536			nr_segments, GNTMAP_host_map | GNTMAP_readonly);
    537		if (err)
    538			return err;
    539		nr_sgl = nr_segments;
    540		nr_segments = 0;
    541		for (i = 0; i < nr_sgl; i++) {
    542			n_segs = ring_req->seg[i].length /
    543				 sizeof(struct scsiif_request_segment);
    544			if ((unsigned)ring_req->seg[i].offset +
    545			    (unsigned)ring_req->seg[i].length > PAGE_SIZE ||
    546			    n_segs * sizeof(struct scsiif_request_segment) !=
    547			    ring_req->seg[i].length)
    548				return -EINVAL;
    549			nr_segments += n_segs;
    550		}
    551		if (nr_segments > SG_ALL) {
    552			pr_debug("invalid nr_seg = %d\n", nr_segments);
    553			return -EINVAL;
    554		}
    555	}
    556
    557	/* free of (sgl) in fast_flush_area() */
    558	pending_req->sgl = kmalloc_array(nr_segments,
    559					sizeof(struct scatterlist), GFP_KERNEL);
    560	if (!pending_req->sgl)
    561		return -ENOMEM;
    562
    563	sg_init_table(pending_req->sgl, nr_segments);
    564	pending_req->n_sg = nr_segments;
    565
    566	flags = GNTMAP_host_map;
    567	if (pending_req->sc_data_direction == DMA_TO_DEVICE)
    568		flags |= GNTMAP_readonly;
    569
    570	pg = pending_req->pages + nr_sgl;
    571	grant = pending_req->grant_handles + nr_sgl;
    572	if (!nr_sgl) {
    573		seg = ring_req->seg;
    574		err = scsiback_gnttab_data_map_list(pending_req, seg,
    575			pg, grant, nr_segments, flags);
    576		if (err)
    577			return err;
    578	} else {
    579		for (i = 0; i < nr_sgl; i++) {
    580			seg = (struct scsiif_request_segment *)(
    581			      vaddr(pending_req, i) + ring_req->seg[i].offset);
    582			n_segs = ring_req->seg[i].length /
    583				 sizeof(struct scsiif_request_segment);
    584			err = scsiback_gnttab_data_map_list(pending_req, seg,
    585				pg, grant, n_segs, flags);
    586			if (err)
    587				return err;
    588			pg += n_segs;
    589			grant += n_segs;
    590		}
    591		end_seg = vaddr(pending_req, 0) + ring_req->seg[0].offset;
    592		seg = (struct scsiif_request_segment *)end_seg;
    593		end_seg += ring_req->seg[0].length;
    594		pg = pending_req->pages + nr_sgl;
    595	}
    596
    597	for_each_sg(pending_req->sgl, sg, nr_segments, i) {
    598		sg_set_page(sg, pg[i], seg->length, seg->offset);
    599		pending_req->data_len += seg->length;
    600		seg++;
    601		if (nr_sgl && (unsigned long)seg >= end_seg) {
    602			i_seg++;
    603			end_seg = vaddr(pending_req, i_seg) +
    604				  ring_req->seg[i_seg].offset;
    605			seg = (struct scsiif_request_segment *)end_seg;
    606			end_seg += ring_req->seg[i_seg].length;
    607		}
    608		if (sg->offset >= PAGE_SIZE ||
    609		    sg->length > PAGE_SIZE ||
    610		    sg->offset + sg->length > PAGE_SIZE)
    611			return -EINVAL;
    612	}
    613
    614	return 0;
    615}
    616
    617static void scsiback_disconnect(struct vscsibk_info *info)
    618{
    619	wait_event(info->waiting_to_free,
    620		atomic_read(&info->nr_unreplied_reqs) == 0);
    621
    622	unbind_from_irqhandler(info->irq, info);
    623	info->irq = 0;
    624	xenbus_unmap_ring_vfree(info->dev, info->ring.sring);
    625}
    626
    627static void scsiback_device_action(struct vscsibk_pend *pending_req,
    628	enum tcm_tmreq_table act, int tag)
    629{
    630	struct scsiback_tpg *tpg = pending_req->v2p->tpg;
    631	struct scsiback_nexus *nexus = tpg->tpg_nexus;
    632	struct se_cmd *se_cmd = &pending_req->se_cmd;
    633	u64 unpacked_lun = pending_req->v2p->lun;
    634	int rc, err = XEN_VSCSIIF_RSLT_RESET_FAILED;
    635
    636	init_completion(&pending_req->tmr_done);
    637
    638	rc = target_submit_tmr(&pending_req->se_cmd, nexus->tvn_se_sess,
    639			       &pending_req->sense_buffer[0],
    640			       unpacked_lun, NULL, act, GFP_KERNEL,
    641			       tag, TARGET_SCF_ACK_KREF);
    642	if (rc)
    643		goto err;
    644
    645	wait_for_completion(&pending_req->tmr_done);
    646
    647	err = (se_cmd->se_tmr_req->response == TMR_FUNCTION_COMPLETE) ?
    648		XEN_VSCSIIF_RSLT_RESET_SUCCESS : XEN_VSCSIIF_RSLT_RESET_FAILED;
    649
    650	scsiback_do_resp_with_sense(NULL, err, 0, pending_req);
    651	transport_generic_free_cmd(&pending_req->se_cmd, 0);
    652	return;
    653
    654err:
    655	scsiback_do_resp_with_sense(NULL, err, 0, pending_req);
    656}
    657
    658/*
    659  Perform virtual to physical translation
    660*/
    661static struct v2p_entry *scsiback_do_translation(struct vscsibk_info *info,
    662			struct ids_tuple *v)
    663{
    664	struct v2p_entry *entry;
    665	struct list_head *head = &(info->v2p_entry_lists);
    666	unsigned long flags;
    667
    668	spin_lock_irqsave(&info->v2p_lock, flags);
    669	list_for_each_entry(entry, head, l) {
    670		if ((entry->v.chn == v->chn) &&
    671		    (entry->v.tgt == v->tgt) &&
    672		    (entry->v.lun == v->lun)) {
    673			kref_get(&entry->kref);
    674			goto out;
    675		}
    676	}
    677	entry = NULL;
    678
    679out:
    680	spin_unlock_irqrestore(&info->v2p_lock, flags);
    681	return entry;
    682}
    683
    684static struct vscsibk_pend *scsiback_get_pend_req(struct vscsiif_back_ring *ring,
    685				struct v2p_entry *v2p)
    686{
    687	struct scsiback_tpg *tpg = v2p->tpg;
    688	struct scsiback_nexus *nexus = tpg->tpg_nexus;
    689	struct se_session *se_sess = nexus->tvn_se_sess;
    690	struct vscsibk_pend *req;
    691	int tag, cpu, i;
    692
    693	tag = sbitmap_queue_get(&se_sess->sess_tag_pool, &cpu);
    694	if (tag < 0) {
    695		pr_err("Unable to obtain tag for vscsiif_request\n");
    696		return ERR_PTR(-ENOMEM);
    697	}
    698
    699	req = &((struct vscsibk_pend *)se_sess->sess_cmd_map)[tag];
    700	memset(req, 0, sizeof(*req));
    701	req->se_cmd.map_tag = tag;
    702	req->se_cmd.map_cpu = cpu;
    703
    704	for (i = 0; i < VSCSI_MAX_GRANTS; i++)
    705		req->grant_handles[i] = SCSIBACK_INVALID_HANDLE;
    706
    707	return req;
    708}
    709
    710static struct vscsibk_pend *prepare_pending_reqs(struct vscsibk_info *info,
    711				struct vscsiif_back_ring *ring,
    712				struct vscsiif_request *ring_req)
    713{
    714	struct vscsibk_pend *pending_req;
    715	struct v2p_entry *v2p;
    716	struct ids_tuple vir;
    717
    718	/* request range check from frontend */
    719	if ((ring_req->sc_data_direction != DMA_BIDIRECTIONAL) &&
    720		(ring_req->sc_data_direction != DMA_TO_DEVICE) &&
    721		(ring_req->sc_data_direction != DMA_FROM_DEVICE) &&
    722		(ring_req->sc_data_direction != DMA_NONE)) {
    723		pr_debug("invalid parameter data_dir = %d\n",
    724			ring_req->sc_data_direction);
    725		return ERR_PTR(-EINVAL);
    726	}
    727	if (ring_req->cmd_len > VSCSIIF_MAX_COMMAND_SIZE) {
    728		pr_debug("invalid parameter cmd_len = %d\n",
    729			ring_req->cmd_len);
    730		return ERR_PTR(-EINVAL);
    731	}
    732
    733	vir.chn = ring_req->channel;
    734	vir.tgt = ring_req->id;
    735	vir.lun = ring_req->lun;
    736
    737	v2p = scsiback_do_translation(info, &vir);
    738	if (!v2p) {
    739		pr_debug("the v2p of (chn:%d, tgt:%d, lun:%d) doesn't exist.\n",
    740			 vir.chn, vir.tgt, vir.lun);
    741		return ERR_PTR(-ENODEV);
    742	}
    743
    744	pending_req = scsiback_get_pend_req(ring, v2p);
    745	if (IS_ERR(pending_req)) {
    746		kref_put(&v2p->kref, scsiback_free_translation_entry);
    747		return ERR_PTR(-ENOMEM);
    748	}
    749	pending_req->rqid = ring_req->rqid;
    750	pending_req->info = info;
    751	pending_req->v2p = v2p;
    752	pending_req->sc_data_direction = ring_req->sc_data_direction;
    753	pending_req->cmd_len = ring_req->cmd_len;
    754	memcpy(pending_req->cmnd, ring_req->cmnd, pending_req->cmd_len);
    755
    756	return pending_req;
    757}
    758
    759static int scsiback_do_cmd_fn(struct vscsibk_info *info,
    760			      unsigned int *eoi_flags)
    761{
    762	struct vscsiif_back_ring *ring = &info->ring;
    763	struct vscsiif_request ring_req;
    764	struct vscsibk_pend *pending_req;
    765	RING_IDX rc, rp;
    766	int more_to_do;
    767	uint32_t result;
    768
    769	rc = ring->req_cons;
    770	rp = ring->sring->req_prod;
    771	rmb();	/* guest system is accessing ring, too */
    772
    773	if (RING_REQUEST_PROD_OVERFLOW(ring, rp)) {
    774		rc = ring->rsp_prod_pvt;
    775		pr_warn("Dom%d provided bogus ring requests (%#x - %#x = %u). Halting ring processing\n",
    776			   info->domid, rp, rc, rp - rc);
    777		return -EINVAL;
    778	}
    779
    780	while ((rc != rp)) {
    781		*eoi_flags &= ~XEN_EOI_FLAG_SPURIOUS;
    782
    783		if (RING_REQUEST_CONS_OVERFLOW(ring, rc))
    784			break;
    785
    786		RING_COPY_REQUEST(ring, rc, &ring_req);
    787		ring->req_cons = ++rc;
    788
    789		pending_req = prepare_pending_reqs(info, ring, &ring_req);
    790		if (IS_ERR(pending_req)) {
    791			switch (PTR_ERR(pending_req)) {
    792			case -ENODEV:
    793				result = DID_NO_CONNECT;
    794				break;
    795			default:
    796				result = DID_ERROR;
    797				break;
    798			}
    799			scsiback_send_response(info, NULL, result << 16, 0,
    800					       ring_req.rqid);
    801			return 1;
    802		}
    803
    804		switch (ring_req.act) {
    805		case VSCSIIF_ACT_SCSI_CDB:
    806			if (scsiback_gnttab_data_map(&ring_req, pending_req)) {
    807				scsiback_fast_flush_area(pending_req);
    808				scsiback_do_resp_with_sense(NULL,
    809						DID_ERROR << 16, 0, pending_req);
    810				transport_generic_free_cmd(&pending_req->se_cmd, 0);
    811			} else {
    812				scsiback_cmd_exec(pending_req);
    813			}
    814			break;
    815		case VSCSIIF_ACT_SCSI_ABORT:
    816			scsiback_device_action(pending_req, TMR_ABORT_TASK,
    817				ring_req.ref_rqid);
    818			break;
    819		case VSCSIIF_ACT_SCSI_RESET:
    820			scsiback_device_action(pending_req, TMR_LUN_RESET, 0);
    821			break;
    822		default:
    823			pr_err_ratelimited("invalid request\n");
    824			scsiback_do_resp_with_sense(NULL, DID_ERROR << 16, 0,
    825						    pending_req);
    826			transport_generic_free_cmd(&pending_req->se_cmd, 0);
    827			break;
    828		}
    829
    830		/* Yield point for this unbounded loop. */
    831		cond_resched();
    832	}
    833
    834	gnttab_page_cache_shrink(&info->free_pages, scsiback_max_buffer_pages);
    835
    836	RING_FINAL_CHECK_FOR_REQUESTS(&info->ring, more_to_do);
    837	return more_to_do;
    838}
    839
    840static irqreturn_t scsiback_irq_fn(int irq, void *dev_id)
    841{
    842	struct vscsibk_info *info = dev_id;
    843	int rc;
    844	unsigned int eoi_flags = XEN_EOI_FLAG_SPURIOUS;
    845
    846	while ((rc = scsiback_do_cmd_fn(info, &eoi_flags)) > 0)
    847		cond_resched();
    848
    849	/* In case of a ring error we keep the event channel masked. */
    850	if (!rc)
    851		xen_irq_lateeoi(irq, eoi_flags);
    852
    853	return IRQ_HANDLED;
    854}
    855
    856static int scsiback_init_sring(struct vscsibk_info *info, grant_ref_t ring_ref,
    857			evtchn_port_t evtchn)
    858{
    859	void *area;
    860	struct vscsiif_sring *sring;
    861	int err;
    862
    863	if (info->irq)
    864		return -1;
    865
    866	err = xenbus_map_ring_valloc(info->dev, &ring_ref, 1, &area);
    867	if (err)
    868		return err;
    869
    870	sring = (struct vscsiif_sring *)area;
    871	BACK_RING_INIT(&info->ring, sring, PAGE_SIZE);
    872
    873	err = bind_interdomain_evtchn_to_irq_lateeoi(info->dev, evtchn);
    874	if (err < 0)
    875		goto unmap_page;
    876
    877	info->irq = err;
    878
    879	err = request_threaded_irq(info->irq, NULL, scsiback_irq_fn,
    880				   IRQF_ONESHOT, "vscsiif-backend", info);
    881	if (err)
    882		goto free_irq;
    883
    884	return 0;
    885
    886free_irq:
    887	unbind_from_irqhandler(info->irq, info);
    888	info->irq = 0;
    889unmap_page:
    890	xenbus_unmap_ring_vfree(info->dev, area);
    891
    892	return err;
    893}
    894
    895static int scsiback_map(struct vscsibk_info *info)
    896{
    897	struct xenbus_device *dev = info->dev;
    898	unsigned int ring_ref;
    899	evtchn_port_t evtchn;
    900	int err;
    901
    902	err = xenbus_gather(XBT_NIL, dev->otherend,
    903			"ring-ref", "%u", &ring_ref,
    904			"event-channel", "%u", &evtchn, NULL);
    905	if (err) {
    906		xenbus_dev_fatal(dev, err, "reading %s ring", dev->otherend);
    907		return err;
    908	}
    909
    910	return scsiback_init_sring(info, ring_ref, evtchn);
    911}
    912
    913/*
    914  Check for a translation entry being present
    915*/
    916static struct v2p_entry *scsiback_chk_translation_entry(
    917	struct vscsibk_info *info, struct ids_tuple *v)
    918{
    919	struct list_head *head = &(info->v2p_entry_lists);
    920	struct v2p_entry *entry;
    921
    922	list_for_each_entry(entry, head, l)
    923		if ((entry->v.chn == v->chn) &&
    924		    (entry->v.tgt == v->tgt) &&
    925		    (entry->v.lun == v->lun))
    926			return entry;
    927
    928	return NULL;
    929}
    930
    931/*
    932  Add a new translation entry
    933*/
    934static int scsiback_add_translation_entry(struct vscsibk_info *info,
    935					  char *phy, struct ids_tuple *v)
    936{
    937	int err = 0;
    938	struct v2p_entry *new;
    939	unsigned long flags;
    940	char *lunp;
    941	unsigned long long unpacked_lun;
    942	struct se_lun *se_lun;
    943	struct scsiback_tpg *tpg_entry, *tpg = NULL;
    944	char *error = "doesn't exist";
    945
    946	lunp = strrchr(phy, ':');
    947	if (!lunp) {
    948		pr_err("illegal format of physical device %s\n", phy);
    949		return -EINVAL;
    950	}
    951	*lunp = 0;
    952	lunp++;
    953	err = kstrtoull(lunp, 10, &unpacked_lun);
    954	if (err < 0) {
    955		pr_err("lun number not valid: %s\n", lunp);
    956		return err;
    957	}
    958
    959	mutex_lock(&scsiback_mutex);
    960	list_for_each_entry(tpg_entry, &scsiback_list, tv_tpg_list) {
    961		if (!strcmp(phy, tpg_entry->tport->tport_name) ||
    962		    !strcmp(phy, tpg_entry->param_alias)) {
    963			mutex_lock(&tpg_entry->se_tpg.tpg_lun_mutex);
    964			hlist_for_each_entry(se_lun, &tpg_entry->se_tpg.tpg_lun_hlist, link) {
    965				if (se_lun->unpacked_lun == unpacked_lun) {
    966					if (!tpg_entry->tpg_nexus)
    967						error = "nexus undefined";
    968					else
    969						tpg = tpg_entry;
    970					break;
    971				}
    972			}
    973			mutex_unlock(&tpg_entry->se_tpg.tpg_lun_mutex);
    974			break;
    975		}
    976	}
    977	if (tpg) {
    978		mutex_lock(&tpg->tv_tpg_mutex);
    979		tpg->tv_tpg_fe_count++;
    980		mutex_unlock(&tpg->tv_tpg_mutex);
    981	}
    982	mutex_unlock(&scsiback_mutex);
    983
    984	if (!tpg) {
    985		pr_err("%s:%llu %s\n", phy, unpacked_lun, error);
    986		return -ENODEV;
    987	}
    988
    989	new = kmalloc(sizeof(struct v2p_entry), GFP_KERNEL);
    990	if (new == NULL) {
    991		err = -ENOMEM;
    992		goto out_free;
    993	}
    994
    995	spin_lock_irqsave(&info->v2p_lock, flags);
    996
    997	/* Check double assignment to identical virtual ID */
    998	if (scsiback_chk_translation_entry(info, v)) {
    999		pr_warn("Virtual ID is already used. Assignment was not performed.\n");
   1000		err = -EEXIST;
   1001		goto out;
   1002	}
   1003
   1004	/* Create a new translation entry and add to the list */
   1005	kref_init(&new->kref);
   1006	new->v = *v;
   1007	new->tpg = tpg;
   1008	new->lun = unpacked_lun;
   1009	list_add_tail(&new->l, &info->v2p_entry_lists);
   1010
   1011out:
   1012	spin_unlock_irqrestore(&info->v2p_lock, flags);
   1013
   1014out_free:
   1015	if (err) {
   1016		mutex_lock(&tpg->tv_tpg_mutex);
   1017		tpg->tv_tpg_fe_count--;
   1018		mutex_unlock(&tpg->tv_tpg_mutex);
   1019		kfree(new);
   1020	}
   1021
   1022	return err;
   1023}
   1024
   1025static void __scsiback_del_translation_entry(struct v2p_entry *entry)
   1026{
   1027	list_del(&entry->l);
   1028	kref_put(&entry->kref, scsiback_free_translation_entry);
   1029}
   1030
   1031/*
   1032  Delete the translation entry specified
   1033*/
   1034static int scsiback_del_translation_entry(struct vscsibk_info *info,
   1035					  struct ids_tuple *v)
   1036{
   1037	struct v2p_entry *entry;
   1038	unsigned long flags;
   1039	int ret = 0;
   1040
   1041	spin_lock_irqsave(&info->v2p_lock, flags);
   1042	/* Find out the translation entry specified */
   1043	entry = scsiback_chk_translation_entry(info, v);
   1044	if (entry)
   1045		__scsiback_del_translation_entry(entry);
   1046	else
   1047		ret = -ENOENT;
   1048
   1049	spin_unlock_irqrestore(&info->v2p_lock, flags);
   1050	return ret;
   1051}
   1052
   1053static void scsiback_do_add_lun(struct vscsibk_info *info, const char *state,
   1054				char *phy, struct ids_tuple *vir, int try)
   1055{
   1056	struct v2p_entry *entry;
   1057	unsigned long flags;
   1058	int err;
   1059
   1060	if (try) {
   1061		spin_lock_irqsave(&info->v2p_lock, flags);
   1062		entry = scsiback_chk_translation_entry(info, vir);
   1063		spin_unlock_irqrestore(&info->v2p_lock, flags);
   1064		if (entry)
   1065			return;
   1066	}
   1067	if (!scsiback_add_translation_entry(info, phy, vir)) {
   1068		if (xenbus_printf(XBT_NIL, info->dev->nodename, state,
   1069				  "%d", XenbusStateInitialised)) {
   1070			pr_err("xenbus_printf error %s\n", state);
   1071			scsiback_del_translation_entry(info, vir);
   1072		}
   1073	} else if (!try) {
   1074		err = xenbus_printf(XBT_NIL, info->dev->nodename, state,
   1075			      "%d", XenbusStateClosed);
   1076		if (err)
   1077			xenbus_dev_error(info->dev, err,
   1078				"%s: writing %s", __func__, state);
   1079	}
   1080}
   1081
   1082static void scsiback_do_del_lun(struct vscsibk_info *info, const char *state,
   1083				struct ids_tuple *vir)
   1084{
   1085	if (!scsiback_del_translation_entry(info, vir)) {
   1086		if (xenbus_printf(XBT_NIL, info->dev->nodename, state,
   1087				  "%d", XenbusStateClosed))
   1088			pr_err("xenbus_printf error %s\n", state);
   1089	}
   1090}
   1091
   1092#define VSCSIBACK_OP_ADD_OR_DEL_LUN	1
   1093#define VSCSIBACK_OP_UPDATEDEV_STATE	2
   1094
   1095static void scsiback_do_1lun_hotplug(struct vscsibk_info *info, int op,
   1096				     char *ent)
   1097{
   1098	int err;
   1099	struct ids_tuple vir;
   1100	char *val;
   1101	int device_state;
   1102	char phy[VSCSI_NAMELEN];
   1103	char str[64];
   1104	char state[64];
   1105	struct xenbus_device *dev = info->dev;
   1106
   1107	/* read status */
   1108	snprintf(state, sizeof(state), "vscsi-devs/%s/state", ent);
   1109	err = xenbus_scanf(XBT_NIL, dev->nodename, state, "%u", &device_state);
   1110	if (XENBUS_EXIST_ERR(err))
   1111		return;
   1112
   1113	/* physical SCSI device */
   1114	snprintf(str, sizeof(str), "vscsi-devs/%s/p-dev", ent);
   1115	val = xenbus_read(XBT_NIL, dev->nodename, str, NULL);
   1116	if (IS_ERR(val)) {
   1117		err = xenbus_printf(XBT_NIL, dev->nodename, state,
   1118			      "%d", XenbusStateClosed);
   1119		if (err)
   1120			xenbus_dev_error(info->dev, err,
   1121				"%s: writing %s", __func__, state);
   1122		return;
   1123	}
   1124	strlcpy(phy, val, VSCSI_NAMELEN);
   1125	kfree(val);
   1126
   1127	/* virtual SCSI device */
   1128	snprintf(str, sizeof(str), "vscsi-devs/%s/v-dev", ent);
   1129	err = xenbus_scanf(XBT_NIL, dev->nodename, str, "%u:%u:%u:%u",
   1130			   &vir.hst, &vir.chn, &vir.tgt, &vir.lun);
   1131	if (XENBUS_EXIST_ERR(err)) {
   1132		err = xenbus_printf(XBT_NIL, dev->nodename, state,
   1133			      "%d", XenbusStateClosed);
   1134		if (err)
   1135			xenbus_dev_error(info->dev, err,
   1136				"%s: writing %s", __func__, state);
   1137		return;
   1138	}
   1139
   1140	switch (op) {
   1141	case VSCSIBACK_OP_ADD_OR_DEL_LUN:
   1142		switch (device_state) {
   1143		case XenbusStateInitialising:
   1144			scsiback_do_add_lun(info, state, phy, &vir, 0);
   1145			break;
   1146		case XenbusStateConnected:
   1147			scsiback_do_add_lun(info, state, phy, &vir, 1);
   1148			break;
   1149		case XenbusStateClosing:
   1150			scsiback_do_del_lun(info, state, &vir);
   1151			break;
   1152		default:
   1153			break;
   1154		}
   1155		break;
   1156
   1157	case VSCSIBACK_OP_UPDATEDEV_STATE:
   1158		if (device_state == XenbusStateInitialised) {
   1159			/* modify vscsi-devs/dev-x/state */
   1160			if (xenbus_printf(XBT_NIL, dev->nodename, state,
   1161					  "%d", XenbusStateConnected)) {
   1162				pr_err("xenbus_printf error %s\n", str);
   1163				scsiback_del_translation_entry(info, &vir);
   1164				xenbus_printf(XBT_NIL, dev->nodename, state,
   1165					      "%d", XenbusStateClosed);
   1166			}
   1167		}
   1168		break;
   1169	/* When it is necessary, processing is added here. */
   1170	default:
   1171		break;
   1172	}
   1173}
   1174
   1175static void scsiback_do_lun_hotplug(struct vscsibk_info *info, int op)
   1176{
   1177	int i;
   1178	char **dir;
   1179	unsigned int ndir = 0;
   1180
   1181	dir = xenbus_directory(XBT_NIL, info->dev->nodename, "vscsi-devs",
   1182			       &ndir);
   1183	if (IS_ERR(dir))
   1184		return;
   1185
   1186	for (i = 0; i < ndir; i++)
   1187		scsiback_do_1lun_hotplug(info, op, dir[i]);
   1188
   1189	kfree(dir);
   1190}
   1191
   1192static void scsiback_frontend_changed(struct xenbus_device *dev,
   1193					enum xenbus_state frontend_state)
   1194{
   1195	struct vscsibk_info *info = dev_get_drvdata(&dev->dev);
   1196
   1197	switch (frontend_state) {
   1198	case XenbusStateInitialising:
   1199		break;
   1200
   1201	case XenbusStateInitialised:
   1202		if (scsiback_map(info))
   1203			break;
   1204
   1205		scsiback_do_lun_hotplug(info, VSCSIBACK_OP_ADD_OR_DEL_LUN);
   1206		xenbus_switch_state(dev, XenbusStateConnected);
   1207		break;
   1208
   1209	case XenbusStateConnected:
   1210		scsiback_do_lun_hotplug(info, VSCSIBACK_OP_UPDATEDEV_STATE);
   1211
   1212		if (dev->state == XenbusStateConnected)
   1213			break;
   1214
   1215		xenbus_switch_state(dev, XenbusStateConnected);
   1216		break;
   1217
   1218	case XenbusStateClosing:
   1219		if (info->irq)
   1220			scsiback_disconnect(info);
   1221
   1222		xenbus_switch_state(dev, XenbusStateClosing);
   1223		break;
   1224
   1225	case XenbusStateClosed:
   1226		xenbus_switch_state(dev, XenbusStateClosed);
   1227		if (xenbus_dev_is_online(dev))
   1228			break;
   1229		fallthrough;	/* if not online */
   1230	case XenbusStateUnknown:
   1231		device_unregister(&dev->dev);
   1232		break;
   1233
   1234	case XenbusStateReconfiguring:
   1235		scsiback_do_lun_hotplug(info, VSCSIBACK_OP_ADD_OR_DEL_LUN);
   1236		xenbus_switch_state(dev, XenbusStateReconfigured);
   1237
   1238		break;
   1239
   1240	default:
   1241		xenbus_dev_fatal(dev, -EINVAL, "saw state %d at frontend",
   1242					frontend_state);
   1243		break;
   1244	}
   1245}
   1246
   1247/*
   1248  Release the translation entry specfied
   1249*/
   1250static void scsiback_release_translation_entry(struct vscsibk_info *info)
   1251{
   1252	struct v2p_entry *entry, *tmp;
   1253	struct list_head *head = &(info->v2p_entry_lists);
   1254	unsigned long flags;
   1255
   1256	spin_lock_irqsave(&info->v2p_lock, flags);
   1257
   1258	list_for_each_entry_safe(entry, tmp, head, l)
   1259		__scsiback_del_translation_entry(entry);
   1260
   1261	spin_unlock_irqrestore(&info->v2p_lock, flags);
   1262}
   1263
   1264static int scsiback_remove(struct xenbus_device *dev)
   1265{
   1266	struct vscsibk_info *info = dev_get_drvdata(&dev->dev);
   1267
   1268	if (info->irq)
   1269		scsiback_disconnect(info);
   1270
   1271	scsiback_release_translation_entry(info);
   1272
   1273	gnttab_page_cache_shrink(&info->free_pages, 0);
   1274
   1275	dev_set_drvdata(&dev->dev, NULL);
   1276
   1277	return 0;
   1278}
   1279
   1280static int scsiback_probe(struct xenbus_device *dev,
   1281			   const struct xenbus_device_id *id)
   1282{
   1283	int err;
   1284
   1285	struct vscsibk_info *info = kzalloc(sizeof(struct vscsibk_info),
   1286					    GFP_KERNEL);
   1287
   1288	pr_debug("%s %p %d\n", __func__, dev, dev->otherend_id);
   1289
   1290	if (!info) {
   1291		xenbus_dev_fatal(dev, -ENOMEM, "allocating backend structure");
   1292		return -ENOMEM;
   1293	}
   1294	info->dev = dev;
   1295	dev_set_drvdata(&dev->dev, info);
   1296
   1297	info->domid = dev->otherend_id;
   1298	spin_lock_init(&info->ring_lock);
   1299	atomic_set(&info->nr_unreplied_reqs, 0);
   1300	init_waitqueue_head(&info->waiting_to_free);
   1301	info->dev = dev;
   1302	info->irq = 0;
   1303	INIT_LIST_HEAD(&info->v2p_entry_lists);
   1304	spin_lock_init(&info->v2p_lock);
   1305	gnttab_page_cache_init(&info->free_pages);
   1306
   1307	err = xenbus_printf(XBT_NIL, dev->nodename, "feature-sg-grant", "%u",
   1308			    SG_ALL);
   1309	if (err)
   1310		xenbus_dev_error(dev, err, "writing feature-sg-grant");
   1311
   1312	err = xenbus_switch_state(dev, XenbusStateInitWait);
   1313	if (err)
   1314		goto fail;
   1315
   1316	return 0;
   1317
   1318fail:
   1319	pr_warn("%s failed\n", __func__);
   1320	scsiback_remove(dev);
   1321
   1322	return err;
   1323}
   1324
   1325static char *scsiback_dump_proto_id(struct scsiback_tport *tport)
   1326{
   1327	switch (tport->tport_proto_id) {
   1328	case SCSI_PROTOCOL_SAS:
   1329		return "SAS";
   1330	case SCSI_PROTOCOL_FCP:
   1331		return "FCP";
   1332	case SCSI_PROTOCOL_ISCSI:
   1333		return "iSCSI";
   1334	default:
   1335		break;
   1336	}
   1337
   1338	return "Unknown";
   1339}
   1340
   1341static char *scsiback_get_fabric_wwn(struct se_portal_group *se_tpg)
   1342{
   1343	struct scsiback_tpg *tpg = container_of(se_tpg,
   1344				struct scsiback_tpg, se_tpg);
   1345	struct scsiback_tport *tport = tpg->tport;
   1346
   1347	return &tport->tport_name[0];
   1348}
   1349
   1350static u16 scsiback_get_tag(struct se_portal_group *se_tpg)
   1351{
   1352	struct scsiback_tpg *tpg = container_of(se_tpg,
   1353				struct scsiback_tpg, se_tpg);
   1354	return tpg->tport_tpgt;
   1355}
   1356
   1357static struct se_wwn *
   1358scsiback_make_tport(struct target_fabric_configfs *tf,
   1359		     struct config_group *group,
   1360		     const char *name)
   1361{
   1362	struct scsiback_tport *tport;
   1363	char *ptr;
   1364	u64 wwpn = 0;
   1365	int off = 0;
   1366
   1367	tport = kzalloc(sizeof(struct scsiback_tport), GFP_KERNEL);
   1368	if (!tport)
   1369		return ERR_PTR(-ENOMEM);
   1370
   1371	tport->tport_wwpn = wwpn;
   1372	/*
   1373	 * Determine the emulated Protocol Identifier and Target Port Name
   1374	 * based on the incoming configfs directory name.
   1375	 */
   1376	ptr = strstr(name, "naa.");
   1377	if (ptr) {
   1378		tport->tport_proto_id = SCSI_PROTOCOL_SAS;
   1379		goto check_len;
   1380	}
   1381	ptr = strstr(name, "fc.");
   1382	if (ptr) {
   1383		tport->tport_proto_id = SCSI_PROTOCOL_FCP;
   1384		off = 3; /* Skip over "fc." */
   1385		goto check_len;
   1386	}
   1387	ptr = strstr(name, "iqn.");
   1388	if (ptr) {
   1389		tport->tport_proto_id = SCSI_PROTOCOL_ISCSI;
   1390		goto check_len;
   1391	}
   1392
   1393	pr_err("Unable to locate prefix for emulated Target Port: %s\n", name);
   1394	kfree(tport);
   1395	return ERR_PTR(-EINVAL);
   1396
   1397check_len:
   1398	if (strlen(name) >= VSCSI_NAMELEN) {
   1399		pr_err("Emulated %s Address: %s, exceeds max: %d\n", name,
   1400			scsiback_dump_proto_id(tport), VSCSI_NAMELEN);
   1401		kfree(tport);
   1402		return ERR_PTR(-EINVAL);
   1403	}
   1404	snprintf(&tport->tport_name[0], VSCSI_NAMELEN, "%s", &name[off]);
   1405
   1406	pr_debug("Allocated emulated Target %s Address: %s\n",
   1407		 scsiback_dump_proto_id(tport), name);
   1408
   1409	return &tport->tport_wwn;
   1410}
   1411
   1412static void scsiback_drop_tport(struct se_wwn *wwn)
   1413{
   1414	struct scsiback_tport *tport = container_of(wwn,
   1415				struct scsiback_tport, tport_wwn);
   1416
   1417	pr_debug("Deallocating emulated Target %s Address: %s\n",
   1418		 scsiback_dump_proto_id(tport), tport->tport_name);
   1419
   1420	kfree(tport);
   1421}
   1422
   1423static u32 scsiback_tpg_get_inst_index(struct se_portal_group *se_tpg)
   1424{
   1425	return 1;
   1426}
   1427
   1428static int scsiback_check_stop_free(struct se_cmd *se_cmd)
   1429{
   1430	return transport_generic_free_cmd(se_cmd, 0);
   1431}
   1432
   1433static void scsiback_release_cmd(struct se_cmd *se_cmd)
   1434{
   1435	target_free_tag(se_cmd->se_sess, se_cmd);
   1436}
   1437
   1438static u32 scsiback_sess_get_index(struct se_session *se_sess)
   1439{
   1440	return 0;
   1441}
   1442
   1443static int scsiback_write_pending(struct se_cmd *se_cmd)
   1444{
   1445	/* Go ahead and process the write immediately */
   1446	target_execute_cmd(se_cmd);
   1447
   1448	return 0;
   1449}
   1450
   1451static void scsiback_set_default_node_attrs(struct se_node_acl *nacl)
   1452{
   1453}
   1454
   1455static int scsiback_get_cmd_state(struct se_cmd *se_cmd)
   1456{
   1457	return 0;
   1458}
   1459
   1460static int scsiback_queue_data_in(struct se_cmd *se_cmd)
   1461{
   1462	struct vscsibk_pend *pending_req = container_of(se_cmd,
   1463				struct vscsibk_pend, se_cmd);
   1464
   1465	pending_req->result = SAM_STAT_GOOD;
   1466	scsiback_cmd_done(pending_req);
   1467	return 0;
   1468}
   1469
   1470static int scsiback_queue_status(struct se_cmd *se_cmd)
   1471{
   1472	struct vscsibk_pend *pending_req = container_of(se_cmd,
   1473				struct vscsibk_pend, se_cmd);
   1474
   1475	if (se_cmd->sense_buffer &&
   1476	    ((se_cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) ||
   1477	     (se_cmd->se_cmd_flags & SCF_EMULATED_TASK_SENSE)))
   1478		pending_req->result = SAM_STAT_CHECK_CONDITION;
   1479	else
   1480		pending_req->result = se_cmd->scsi_status;
   1481
   1482	scsiback_cmd_done(pending_req);
   1483	return 0;
   1484}
   1485
   1486static void scsiback_queue_tm_rsp(struct se_cmd *se_cmd)
   1487{
   1488	struct vscsibk_pend *pending_req = container_of(se_cmd,
   1489				struct vscsibk_pend, se_cmd);
   1490
   1491	complete(&pending_req->tmr_done);
   1492}
   1493
   1494static void scsiback_aborted_task(struct se_cmd *se_cmd)
   1495{
   1496}
   1497
   1498static ssize_t scsiback_tpg_param_alias_show(struct config_item *item,
   1499					     char *page)
   1500{
   1501	struct se_portal_group *se_tpg = param_to_tpg(item);
   1502	struct scsiback_tpg *tpg = container_of(se_tpg, struct scsiback_tpg,
   1503						se_tpg);
   1504	ssize_t rb;
   1505
   1506	mutex_lock(&tpg->tv_tpg_mutex);
   1507	rb = snprintf(page, PAGE_SIZE, "%s\n", tpg->param_alias);
   1508	mutex_unlock(&tpg->tv_tpg_mutex);
   1509
   1510	return rb;
   1511}
   1512
   1513static ssize_t scsiback_tpg_param_alias_store(struct config_item *item,
   1514					      const char *page, size_t count)
   1515{
   1516	struct se_portal_group *se_tpg = param_to_tpg(item);
   1517	struct scsiback_tpg *tpg = container_of(se_tpg, struct scsiback_tpg,
   1518						se_tpg);
   1519	int len;
   1520
   1521	if (strlen(page) >= VSCSI_NAMELEN) {
   1522		pr_err("param alias: %s, exceeds max: %d\n", page,
   1523			VSCSI_NAMELEN);
   1524		return -EINVAL;
   1525	}
   1526
   1527	mutex_lock(&tpg->tv_tpg_mutex);
   1528	len = snprintf(tpg->param_alias, VSCSI_NAMELEN, "%s", page);
   1529	if (tpg->param_alias[len - 1] == '\n')
   1530		tpg->param_alias[len - 1] = '\0';
   1531	mutex_unlock(&tpg->tv_tpg_mutex);
   1532
   1533	return count;
   1534}
   1535
   1536CONFIGFS_ATTR(scsiback_tpg_param_, alias);
   1537
   1538static struct configfs_attribute *scsiback_param_attrs[] = {
   1539	&scsiback_tpg_param_attr_alias,
   1540	NULL,
   1541};
   1542
   1543static int scsiback_alloc_sess_cb(struct se_portal_group *se_tpg,
   1544				  struct se_session *se_sess, void *p)
   1545{
   1546	struct scsiback_tpg *tpg = container_of(se_tpg,
   1547				struct scsiback_tpg, se_tpg);
   1548
   1549	tpg->tpg_nexus = p;
   1550	return 0;
   1551}
   1552
   1553static int scsiback_make_nexus(struct scsiback_tpg *tpg,
   1554				const char *name)
   1555{
   1556	struct scsiback_nexus *tv_nexus;
   1557	int ret = 0;
   1558
   1559	mutex_lock(&tpg->tv_tpg_mutex);
   1560	if (tpg->tpg_nexus) {
   1561		pr_debug("tpg->tpg_nexus already exists\n");
   1562		ret = -EEXIST;
   1563		goto out_unlock;
   1564	}
   1565
   1566	tv_nexus = kzalloc(sizeof(struct scsiback_nexus), GFP_KERNEL);
   1567	if (!tv_nexus) {
   1568		ret = -ENOMEM;
   1569		goto out_unlock;
   1570	}
   1571
   1572	tv_nexus->tvn_se_sess = target_setup_session(&tpg->se_tpg,
   1573						     VSCSI_DEFAULT_SESSION_TAGS,
   1574						     sizeof(struct vscsibk_pend),
   1575						     TARGET_PROT_NORMAL, name,
   1576						     tv_nexus, scsiback_alloc_sess_cb);
   1577	if (IS_ERR(tv_nexus->tvn_se_sess)) {
   1578		kfree(tv_nexus);
   1579		ret = -ENOMEM;
   1580		goto out_unlock;
   1581	}
   1582
   1583out_unlock:
   1584	mutex_unlock(&tpg->tv_tpg_mutex);
   1585	return ret;
   1586}
   1587
   1588static int scsiback_drop_nexus(struct scsiback_tpg *tpg)
   1589{
   1590	struct se_session *se_sess;
   1591	struct scsiback_nexus *tv_nexus;
   1592
   1593	mutex_lock(&tpg->tv_tpg_mutex);
   1594	tv_nexus = tpg->tpg_nexus;
   1595	if (!tv_nexus) {
   1596		mutex_unlock(&tpg->tv_tpg_mutex);
   1597		return -ENODEV;
   1598	}
   1599
   1600	se_sess = tv_nexus->tvn_se_sess;
   1601	if (!se_sess) {
   1602		mutex_unlock(&tpg->tv_tpg_mutex);
   1603		return -ENODEV;
   1604	}
   1605
   1606	if (tpg->tv_tpg_port_count != 0) {
   1607		mutex_unlock(&tpg->tv_tpg_mutex);
   1608		pr_err("Unable to remove xen-pvscsi I_T Nexus with active TPG port count: %d\n",
   1609			tpg->tv_tpg_port_count);
   1610		return -EBUSY;
   1611	}
   1612
   1613	if (tpg->tv_tpg_fe_count != 0) {
   1614		mutex_unlock(&tpg->tv_tpg_mutex);
   1615		pr_err("Unable to remove xen-pvscsi I_T Nexus with active TPG frontend count: %d\n",
   1616			tpg->tv_tpg_fe_count);
   1617		return -EBUSY;
   1618	}
   1619
   1620	pr_debug("Removing I_T Nexus to emulated %s Initiator Port: %s\n",
   1621		scsiback_dump_proto_id(tpg->tport),
   1622		tv_nexus->tvn_se_sess->se_node_acl->initiatorname);
   1623
   1624	/*
   1625	 * Release the SCSI I_T Nexus to the emulated xen-pvscsi Target Port
   1626	 */
   1627	target_remove_session(se_sess);
   1628	tpg->tpg_nexus = NULL;
   1629	mutex_unlock(&tpg->tv_tpg_mutex);
   1630
   1631	kfree(tv_nexus);
   1632	return 0;
   1633}
   1634
   1635static ssize_t scsiback_tpg_nexus_show(struct config_item *item, char *page)
   1636{
   1637	struct se_portal_group *se_tpg = to_tpg(item);
   1638	struct scsiback_tpg *tpg = container_of(se_tpg,
   1639				struct scsiback_tpg, se_tpg);
   1640	struct scsiback_nexus *tv_nexus;
   1641	ssize_t ret;
   1642
   1643	mutex_lock(&tpg->tv_tpg_mutex);
   1644	tv_nexus = tpg->tpg_nexus;
   1645	if (!tv_nexus) {
   1646		mutex_unlock(&tpg->tv_tpg_mutex);
   1647		return -ENODEV;
   1648	}
   1649	ret = snprintf(page, PAGE_SIZE, "%s\n",
   1650			tv_nexus->tvn_se_sess->se_node_acl->initiatorname);
   1651	mutex_unlock(&tpg->tv_tpg_mutex);
   1652
   1653	return ret;
   1654}
   1655
   1656static ssize_t scsiback_tpg_nexus_store(struct config_item *item,
   1657		const char *page, size_t count)
   1658{
   1659	struct se_portal_group *se_tpg = to_tpg(item);
   1660	struct scsiback_tpg *tpg = container_of(se_tpg,
   1661				struct scsiback_tpg, se_tpg);
   1662	struct scsiback_tport *tport_wwn = tpg->tport;
   1663	unsigned char i_port[VSCSI_NAMELEN], *ptr, *port_ptr;
   1664	int ret;
   1665	/*
   1666	 * Shutdown the active I_T nexus if 'NULL' is passed.
   1667	 */
   1668	if (!strncmp(page, "NULL", 4)) {
   1669		ret = scsiback_drop_nexus(tpg);
   1670		return (!ret) ? count : ret;
   1671	}
   1672	/*
   1673	 * Otherwise make sure the passed virtual Initiator port WWN matches
   1674	 * the fabric protocol_id set in scsiback_make_tport(), and call
   1675	 * scsiback_make_nexus().
   1676	 */
   1677	if (strlen(page) >= VSCSI_NAMELEN) {
   1678		pr_err("Emulated NAA Sas Address: %s, exceeds max: %d\n",
   1679			page, VSCSI_NAMELEN);
   1680		return -EINVAL;
   1681	}
   1682	snprintf(&i_port[0], VSCSI_NAMELEN, "%s", page);
   1683
   1684	ptr = strstr(i_port, "naa.");
   1685	if (ptr) {
   1686		if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_SAS) {
   1687			pr_err("Passed SAS Initiator Port %s does not match target port protoid: %s\n",
   1688				i_port, scsiback_dump_proto_id(tport_wwn));
   1689			return -EINVAL;
   1690		}
   1691		port_ptr = &i_port[0];
   1692		goto check_newline;
   1693	}
   1694	ptr = strstr(i_port, "fc.");
   1695	if (ptr) {
   1696		if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_FCP) {
   1697			pr_err("Passed FCP Initiator Port %s does not match target port protoid: %s\n",
   1698				i_port, scsiback_dump_proto_id(tport_wwn));
   1699			return -EINVAL;
   1700		}
   1701		port_ptr = &i_port[3]; /* Skip over "fc." */
   1702		goto check_newline;
   1703	}
   1704	ptr = strstr(i_port, "iqn.");
   1705	if (ptr) {
   1706		if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_ISCSI) {
   1707			pr_err("Passed iSCSI Initiator Port %s does not match target port protoid: %s\n",
   1708				i_port, scsiback_dump_proto_id(tport_wwn));
   1709			return -EINVAL;
   1710		}
   1711		port_ptr = &i_port[0];
   1712		goto check_newline;
   1713	}
   1714	pr_err("Unable to locate prefix for emulated Initiator Port: %s\n",
   1715		i_port);
   1716	return -EINVAL;
   1717	/*
   1718	 * Clear any trailing newline for the NAA WWN
   1719	 */
   1720check_newline:
   1721	if (i_port[strlen(i_port) - 1] == '\n')
   1722		i_port[strlen(i_port) - 1] = '\0';
   1723
   1724	ret = scsiback_make_nexus(tpg, port_ptr);
   1725	if (ret < 0)
   1726		return ret;
   1727
   1728	return count;
   1729}
   1730
   1731CONFIGFS_ATTR(scsiback_tpg_, nexus);
   1732
   1733static struct configfs_attribute *scsiback_tpg_attrs[] = {
   1734	&scsiback_tpg_attr_nexus,
   1735	NULL,
   1736};
   1737
   1738static ssize_t
   1739scsiback_wwn_version_show(struct config_item *item, char *page)
   1740{
   1741	return sprintf(page, "xen-pvscsi fabric module %s on %s/%s on "
   1742		UTS_RELEASE"\n",
   1743		VSCSI_VERSION, utsname()->sysname, utsname()->machine);
   1744}
   1745
   1746CONFIGFS_ATTR_RO(scsiback_wwn_, version);
   1747
   1748static struct configfs_attribute *scsiback_wwn_attrs[] = {
   1749	&scsiback_wwn_attr_version,
   1750	NULL,
   1751};
   1752
   1753static int scsiback_port_link(struct se_portal_group *se_tpg,
   1754			       struct se_lun *lun)
   1755{
   1756	struct scsiback_tpg *tpg = container_of(se_tpg,
   1757				struct scsiback_tpg, se_tpg);
   1758
   1759	mutex_lock(&tpg->tv_tpg_mutex);
   1760	tpg->tv_tpg_port_count++;
   1761	mutex_unlock(&tpg->tv_tpg_mutex);
   1762
   1763	return 0;
   1764}
   1765
   1766static void scsiback_port_unlink(struct se_portal_group *se_tpg,
   1767				  struct se_lun *lun)
   1768{
   1769	struct scsiback_tpg *tpg = container_of(se_tpg,
   1770				struct scsiback_tpg, se_tpg);
   1771
   1772	mutex_lock(&tpg->tv_tpg_mutex);
   1773	tpg->tv_tpg_port_count--;
   1774	mutex_unlock(&tpg->tv_tpg_mutex);
   1775}
   1776
   1777static struct se_portal_group *
   1778scsiback_make_tpg(struct se_wwn *wwn, const char *name)
   1779{
   1780	struct scsiback_tport *tport = container_of(wwn,
   1781			struct scsiback_tport, tport_wwn);
   1782
   1783	struct scsiback_tpg *tpg;
   1784	u16 tpgt;
   1785	int ret;
   1786
   1787	if (strstr(name, "tpgt_") != name)
   1788		return ERR_PTR(-EINVAL);
   1789	ret = kstrtou16(name + 5, 10, &tpgt);
   1790	if (ret)
   1791		return ERR_PTR(ret);
   1792
   1793	tpg = kzalloc(sizeof(struct scsiback_tpg), GFP_KERNEL);
   1794	if (!tpg)
   1795		return ERR_PTR(-ENOMEM);
   1796
   1797	mutex_init(&tpg->tv_tpg_mutex);
   1798	INIT_LIST_HEAD(&tpg->tv_tpg_list);
   1799	INIT_LIST_HEAD(&tpg->info_list);
   1800	tpg->tport = tport;
   1801	tpg->tport_tpgt = tpgt;
   1802
   1803	ret = core_tpg_register(wwn, &tpg->se_tpg, tport->tport_proto_id);
   1804	if (ret < 0) {
   1805		kfree(tpg);
   1806		return NULL;
   1807	}
   1808	mutex_lock(&scsiback_mutex);
   1809	list_add_tail(&tpg->tv_tpg_list, &scsiback_list);
   1810	mutex_unlock(&scsiback_mutex);
   1811
   1812	return &tpg->se_tpg;
   1813}
   1814
   1815static void scsiback_drop_tpg(struct se_portal_group *se_tpg)
   1816{
   1817	struct scsiback_tpg *tpg = container_of(se_tpg,
   1818				struct scsiback_tpg, se_tpg);
   1819
   1820	mutex_lock(&scsiback_mutex);
   1821	list_del(&tpg->tv_tpg_list);
   1822	mutex_unlock(&scsiback_mutex);
   1823	/*
   1824	 * Release the virtual I_T Nexus for this xen-pvscsi TPG
   1825	 */
   1826	scsiback_drop_nexus(tpg);
   1827	/*
   1828	 * Deregister the se_tpg from TCM.
   1829	 */
   1830	core_tpg_deregister(se_tpg);
   1831	kfree(tpg);
   1832}
   1833
   1834static int scsiback_check_true(struct se_portal_group *se_tpg)
   1835{
   1836	return 1;
   1837}
   1838
   1839static int scsiback_check_false(struct se_portal_group *se_tpg)
   1840{
   1841	return 0;
   1842}
   1843
   1844static const struct target_core_fabric_ops scsiback_ops = {
   1845	.module				= THIS_MODULE,
   1846	.fabric_name			= "xen-pvscsi",
   1847	.tpg_get_wwn			= scsiback_get_fabric_wwn,
   1848	.tpg_get_tag			= scsiback_get_tag,
   1849	.tpg_check_demo_mode		= scsiback_check_true,
   1850	.tpg_check_demo_mode_cache	= scsiback_check_true,
   1851	.tpg_check_demo_mode_write_protect = scsiback_check_false,
   1852	.tpg_check_prod_mode_write_protect = scsiback_check_false,
   1853	.tpg_get_inst_index		= scsiback_tpg_get_inst_index,
   1854	.check_stop_free		= scsiback_check_stop_free,
   1855	.release_cmd			= scsiback_release_cmd,
   1856	.sess_get_index			= scsiback_sess_get_index,
   1857	.sess_get_initiator_sid		= NULL,
   1858	.write_pending			= scsiback_write_pending,
   1859	.set_default_node_attributes	= scsiback_set_default_node_attrs,
   1860	.get_cmd_state			= scsiback_get_cmd_state,
   1861	.queue_data_in			= scsiback_queue_data_in,
   1862	.queue_status			= scsiback_queue_status,
   1863	.queue_tm_rsp			= scsiback_queue_tm_rsp,
   1864	.aborted_task			= scsiback_aborted_task,
   1865	/*
   1866	 * Setup callers for generic logic in target_core_fabric_configfs.c
   1867	 */
   1868	.fabric_make_wwn		= scsiback_make_tport,
   1869	.fabric_drop_wwn		= scsiback_drop_tport,
   1870	.fabric_make_tpg		= scsiback_make_tpg,
   1871	.fabric_drop_tpg		= scsiback_drop_tpg,
   1872	.fabric_post_link		= scsiback_port_link,
   1873	.fabric_pre_unlink		= scsiback_port_unlink,
   1874
   1875	.tfc_wwn_attrs			= scsiback_wwn_attrs,
   1876	.tfc_tpg_base_attrs		= scsiback_tpg_attrs,
   1877	.tfc_tpg_param_attrs		= scsiback_param_attrs,
   1878};
   1879
   1880static const struct xenbus_device_id scsiback_ids[] = {
   1881	{ "vscsi" },
   1882	{ "" }
   1883};
   1884
   1885static struct xenbus_driver scsiback_driver = {
   1886	.ids			= scsiback_ids,
   1887	.probe			= scsiback_probe,
   1888	.remove			= scsiback_remove,
   1889	.otherend_changed	= scsiback_frontend_changed
   1890};
   1891
   1892static int __init scsiback_init(void)
   1893{
   1894	int ret;
   1895
   1896	if (!xen_domain())
   1897		return -ENODEV;
   1898
   1899	pr_debug("xen-pvscsi: fabric module %s on %s/%s on "UTS_RELEASE"\n",
   1900		 VSCSI_VERSION, utsname()->sysname, utsname()->machine);
   1901
   1902	ret = xenbus_register_backend(&scsiback_driver);
   1903	if (ret)
   1904		goto out;
   1905
   1906	ret = target_register_template(&scsiback_ops);
   1907	if (ret)
   1908		goto out_unregister_xenbus;
   1909
   1910	return 0;
   1911
   1912out_unregister_xenbus:
   1913	xenbus_unregister_driver(&scsiback_driver);
   1914out:
   1915	pr_err("%s: error %d\n", __func__, ret);
   1916	return ret;
   1917}
   1918
   1919static void __exit scsiback_exit(void)
   1920{
   1921	target_unregister_template(&scsiback_ops);
   1922	xenbus_unregister_driver(&scsiback_driver);
   1923}
   1924
   1925module_init(scsiback_init);
   1926module_exit(scsiback_exit);
   1927
   1928MODULE_DESCRIPTION("Xen SCSI backend driver");
   1929MODULE_LICENSE("Dual BSD/GPL");
   1930MODULE_ALIAS("xen-backend:vscsi");
   1931MODULE_AUTHOR("Juergen Gross <jgross@suse.com>");