cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

efct_lio.c (45866B)


      1// SPDX-License-Identifier: GPL-2.0
      2/*
      3 * Copyright (C) 2021 Broadcom. All Rights Reserved. The term
      4 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
      5 */
      6
      7#include <target/target_core_base.h>
      8#include <target/target_core_fabric.h>
      9#include "efct_driver.h"
     10#include "efct_lio.h"
     11
     12/*
     13 * lio_wq is used to call the LIO backed during creation or deletion of
     14 * sessions. This brings serialization to the session management as we create
     15 * single threaded work queue.
     16 */
     17static struct workqueue_struct *lio_wq;
     18
     19static int
     20efct_format_wwn(char *str, size_t len, const char *pre, u64 wwn)
     21{
     22	u8 a[8];
     23
     24	put_unaligned_be64(wwn, a);
     25	return snprintf(str, len, "%s%8phC", pre, a);
     26}
     27
     28static int
     29efct_lio_parse_wwn(const char *name, u64 *wwp, u8 npiv)
     30{
     31	int num;
     32	u8 b[8];
     33
     34	if (npiv) {
     35		num = sscanf(name,
     36			     "%02hhx%02hhx%02hhx%02hhx%02hhx%02hhx%02hhx%02hhx",
     37			     &b[0], &b[1], &b[2], &b[3], &b[4], &b[5], &b[6],
     38			     &b[7]);
     39	} else {
     40		num = sscanf(name,
     41		      "%02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx",
     42			     &b[0], &b[1], &b[2], &b[3], &b[4], &b[5], &b[6],
     43			     &b[7]);
     44	}
     45
     46	if (num != 8)
     47		return -EINVAL;
     48
     49	*wwp = get_unaligned_be64(b);
     50	return 0;
     51}
     52
     53static int
     54efct_lio_parse_npiv_wwn(const char *name, size_t size, u64 *wwpn, u64 *wwnn)
     55{
     56	unsigned int cnt = size;
     57	int rc;
     58
     59	*wwpn = *wwnn = 0;
     60	if (name[cnt - 1] == '\n' || name[cnt - 1] == 0)
     61		cnt--;
     62
     63	/* validate we have enough characters for WWPN */
     64	if ((cnt != (16 + 1 + 16)) || (name[16] != ':'))
     65		return -EINVAL;
     66
     67	rc = efct_lio_parse_wwn(&name[0], wwpn, 1);
     68	if (rc)
     69		return rc;
     70
     71	rc = efct_lio_parse_wwn(&name[17], wwnn, 1);
     72	if (rc)
     73		return rc;
     74
     75	return 0;
     76}
     77
     78static ssize_t
     79efct_lio_tpg_enable_show(struct config_item *item, char *page)
     80{
     81	struct se_portal_group *se_tpg = to_tpg(item);
     82	struct efct_lio_tpg *tpg =
     83		container_of(se_tpg, struct efct_lio_tpg, tpg);
     84
     85	return snprintf(page, PAGE_SIZE, "%d\n", tpg->enabled);
     86}
     87
     88static ssize_t
     89efct_lio_tpg_enable_store(struct config_item *item, const char *page,
     90			  size_t count)
     91{
     92	struct se_portal_group *se_tpg = to_tpg(item);
     93	struct efct_lio_tpg *tpg =
     94		container_of(se_tpg, struct efct_lio_tpg, tpg);
     95	struct efct *efct;
     96	struct efc *efc;
     97	unsigned long op;
     98
     99	if (!tpg->nport || !tpg->nport->efct) {
    100		pr_err("%s: Unable to find EFCT device\n", __func__);
    101		return -EINVAL;
    102	}
    103
    104	efct = tpg->nport->efct;
    105	efc = efct->efcport;
    106
    107	if (kstrtoul(page, 0, &op) < 0)
    108		return -EINVAL;
    109
    110	if (op == 1) {
    111		int ret;
    112
    113		tpg->enabled = true;
    114		efc_log_debug(efct, "enable portal group %d\n", tpg->tpgt);
    115
    116		ret = efct_xport_control(efct->xport, EFCT_XPORT_PORT_ONLINE);
    117		if (ret) {
    118			efct->tgt_efct.lio_nport = NULL;
    119			efc_log_debug(efct, "cannot bring port online\n");
    120			return ret;
    121		}
    122	} else if (op == 0) {
    123		efc_log_debug(efct, "disable portal group %d\n", tpg->tpgt);
    124
    125		if (efc->domain && efc->domain->nport)
    126			efct_scsi_tgt_del_nport(efc, efc->domain->nport);
    127
    128		tpg->enabled = false;
    129	} else {
    130		return -EINVAL;
    131	}
    132
    133	return count;
    134}
    135
    136static ssize_t
    137efct_lio_npiv_tpg_enable_show(struct config_item *item, char *page)
    138{
    139	struct se_portal_group *se_tpg = to_tpg(item);
    140	struct efct_lio_tpg *tpg =
    141		container_of(se_tpg, struct efct_lio_tpg, tpg);
    142
    143	return snprintf(page, PAGE_SIZE, "%d\n", tpg->enabled);
    144}
    145
    146static ssize_t
    147efct_lio_npiv_tpg_enable_store(struct config_item *item, const char *page,
    148			       size_t count)
    149{
    150	struct se_portal_group *se_tpg = to_tpg(item);
    151	struct efct_lio_tpg *tpg =
    152		container_of(se_tpg, struct efct_lio_tpg, tpg);
    153	struct efct_lio_vport *lio_vport = tpg->vport;
    154	struct efct *efct;
    155	struct efc *efc;
    156	unsigned long op;
    157
    158	if (kstrtoul(page, 0, &op) < 0)
    159		return -EINVAL;
    160
    161	if (!lio_vport) {
    162		pr_err("Unable to find vport\n");
    163		return -EINVAL;
    164	}
    165
    166	efct = lio_vport->efct;
    167	efc = efct->efcport;
    168
    169	if (op == 1) {
    170		tpg->enabled = true;
    171		efc_log_debug(efct, "enable portal group %d\n", tpg->tpgt);
    172
    173		if (efc->domain) {
    174			int ret;
    175
    176			ret = efc_nport_vport_new(efc->domain,
    177						  lio_vport->npiv_wwpn,
    178						  lio_vport->npiv_wwnn,
    179						  U32_MAX, false, true,
    180						  NULL, NULL);
    181			if (ret != 0) {
    182				efc_log_err(efct, "Failed to create Vport\n");
    183				return ret;
    184			}
    185			return count;
    186		}
    187
    188		if (!(efc_vport_create_spec(efc, lio_vport->npiv_wwnn,
    189					    lio_vport->npiv_wwpn, U32_MAX,
    190					    false, true, NULL, NULL)))
    191			return -ENOMEM;
    192
    193	} else if (op == 0) {
    194		efc_log_debug(efct, "disable portal group %d\n", tpg->tpgt);
    195
    196		tpg->enabled = false;
    197		/* only physical nport should exist, free lio_nport
    198		 * allocated in efct_lio_make_nport
    199		 */
    200		if (efc->domain) {
    201			efc_nport_vport_del(efct->efcport, efc->domain,
    202					    lio_vport->npiv_wwpn,
    203					    lio_vport->npiv_wwnn);
    204			return count;
    205		}
    206	} else {
    207		return -EINVAL;
    208	}
    209	return count;
    210}
    211
    212static char *efct_lio_get_fabric_wwn(struct se_portal_group *se_tpg)
    213{
    214	struct efct_lio_tpg *tpg =
    215		container_of(se_tpg, struct efct_lio_tpg, tpg);
    216
    217	return tpg->nport->wwpn_str;
    218}
    219
    220static char *efct_lio_get_npiv_fabric_wwn(struct se_portal_group *se_tpg)
    221{
    222	struct efct_lio_tpg *tpg =
    223		container_of(se_tpg, struct efct_lio_tpg, tpg);
    224
    225	return tpg->vport->wwpn_str;
    226}
    227
    228static u16 efct_lio_get_tag(struct se_portal_group *se_tpg)
    229{
    230	struct efct_lio_tpg *tpg =
    231		container_of(se_tpg, struct efct_lio_tpg, tpg);
    232
    233	return tpg->tpgt;
    234}
    235
    236static u16 efct_lio_get_npiv_tag(struct se_portal_group *se_tpg)
    237{
    238	struct efct_lio_tpg *tpg =
    239		container_of(se_tpg, struct efct_lio_tpg, tpg);
    240
    241	return tpg->tpgt;
    242}
    243
    244static int efct_lio_check_demo_mode(struct se_portal_group *se_tpg)
    245{
    246	return 1;
    247}
    248
    249static int efct_lio_check_demo_mode_cache(struct se_portal_group *se_tpg)
    250{
    251	return 1;
    252}
    253
    254static int efct_lio_check_demo_write_protect(struct se_portal_group *se_tpg)
    255{
    256	struct efct_lio_tpg *tpg =
    257		container_of(se_tpg, struct efct_lio_tpg, tpg);
    258
    259	return tpg->tpg_attrib.demo_mode_write_protect;
    260}
    261
    262static int
    263efct_lio_npiv_check_demo_write_protect(struct se_portal_group *se_tpg)
    264{
    265	struct efct_lio_tpg *tpg =
    266		container_of(se_tpg, struct efct_lio_tpg, tpg);
    267
    268	return tpg->tpg_attrib.demo_mode_write_protect;
    269}
    270
    271static int efct_lio_check_prod_write_protect(struct se_portal_group *se_tpg)
    272{
    273	struct efct_lio_tpg *tpg =
    274		container_of(se_tpg, struct efct_lio_tpg, tpg);
    275
    276	return tpg->tpg_attrib.prod_mode_write_protect;
    277}
    278
    279static int
    280efct_lio_npiv_check_prod_write_protect(struct se_portal_group *se_tpg)
    281{
    282	struct efct_lio_tpg *tpg =
    283		container_of(se_tpg, struct efct_lio_tpg, tpg);
    284
    285	return tpg->tpg_attrib.prod_mode_write_protect;
    286}
    287
    288static u32 efct_lio_tpg_get_inst_index(struct se_portal_group *se_tpg)
    289{
    290	return 1;
    291}
    292
    293static int efct_lio_check_stop_free(struct se_cmd *se_cmd)
    294{
    295	struct efct_scsi_tgt_io *ocp =
    296		container_of(se_cmd, struct efct_scsi_tgt_io, cmd);
    297	struct efct_io *io = container_of(ocp, struct efct_io, tgt_io);
    298
    299	efct_set_lio_io_state(io, EFCT_LIO_STATE_TFO_CHK_STOP_FREE);
    300	return target_put_sess_cmd(se_cmd);
    301}
    302
    303static int
    304efct_lio_abort_tgt_cb(struct efct_io *io,
    305		      enum efct_scsi_io_status scsi_status,
    306		      u32 flags, void *arg)
    307{
    308	efct_lio_io_printf(io, "Abort done, status:%d\n", scsi_status);
    309	return 0;
    310}
    311
    312static void
    313efct_lio_aborted_task(struct se_cmd *se_cmd)
    314{
    315	struct efct_scsi_tgt_io *ocp =
    316		container_of(se_cmd, struct efct_scsi_tgt_io, cmd);
    317	struct efct_io *io = container_of(ocp, struct efct_io, tgt_io);
    318
    319	efct_set_lio_io_state(io, EFCT_LIO_STATE_TFO_ABORTED_TASK);
    320
    321	if (ocp->rsp_sent)
    322		return;
    323
    324	/* command has been aborted, cleanup here */
    325	ocp->aborting = true;
    326	ocp->err = EFCT_SCSI_STATUS_ABORTED;
    327	/* terminate the exchange */
    328	efct_scsi_tgt_abort_io(io, efct_lio_abort_tgt_cb, NULL);
    329}
    330
    331static void efct_lio_release_cmd(struct se_cmd *se_cmd)
    332{
    333	struct efct_scsi_tgt_io *ocp =
    334		container_of(se_cmd, struct efct_scsi_tgt_io, cmd);
    335	struct efct_io *io = container_of(ocp, struct efct_io, tgt_io);
    336	struct efct *efct = io->efct;
    337
    338	efct_set_lio_io_state(io, EFCT_LIO_STATE_TFO_RELEASE_CMD);
    339	efct_set_lio_io_state(io, EFCT_LIO_STATE_SCSI_CMPL_CMD);
    340	efct_scsi_io_complete(io);
    341	atomic_sub_return(1, &efct->tgt_efct.ios_in_use);
    342}
    343
    344static void efct_lio_close_session(struct se_session *se_sess)
    345{
    346	struct efc_node *node = se_sess->fabric_sess_ptr;
    347
    348	pr_debug("se_sess=%p node=%p", se_sess, node);
    349
    350	if (!node) {
    351		pr_debug("node is NULL");
    352		return;
    353	}
    354
    355	efc_node_post_shutdown(node, NULL);
    356}
    357
    358static u32 efct_lio_sess_get_index(struct se_session *se_sess)
    359{
    360	return 0;
    361}
    362
    363static void efct_lio_set_default_node_attrs(struct se_node_acl *nacl)
    364{
    365}
    366
    367static int efct_lio_get_cmd_state(struct se_cmd *cmd)
    368{
    369	struct efct_scsi_tgt_io *ocp =
    370		container_of(cmd, struct efct_scsi_tgt_io, cmd);
    371	struct efct_io *io = container_of(ocp, struct efct_io, tgt_io);
    372
    373	return io->tgt_io.state;
    374}
    375
    376static int
    377efct_lio_sg_map(struct efct_io *io)
    378{
    379	struct efct_scsi_tgt_io *ocp = &io->tgt_io;
    380	struct se_cmd *cmd = &ocp->cmd;
    381
    382	ocp->seg_map_cnt = dma_map_sg(&io->efct->pci->dev, cmd->t_data_sg,
    383				      cmd->t_data_nents, cmd->data_direction);
    384	if (ocp->seg_map_cnt == 0)
    385		return -EFAULT;
    386	return 0;
    387}
    388
    389static void
    390efct_lio_sg_unmap(struct efct_io *io)
    391{
    392	struct efct_scsi_tgt_io *ocp = &io->tgt_io;
    393	struct se_cmd *cmd = &ocp->cmd;
    394
    395	if (WARN_ON(!ocp->seg_map_cnt || !cmd->t_data_sg))
    396		return;
    397
    398	dma_unmap_sg(&io->efct->pci->dev, cmd->t_data_sg,
    399		     ocp->seg_map_cnt, cmd->data_direction);
    400	ocp->seg_map_cnt = 0;
    401}
    402
    403static int
    404efct_lio_status_done(struct efct_io *io,
    405		     enum efct_scsi_io_status scsi_status,
    406		     u32 flags, void *arg)
    407{
    408	struct efct_scsi_tgt_io *ocp = &io->tgt_io;
    409
    410	efct_set_lio_io_state(io, EFCT_LIO_STATE_SCSI_RSP_DONE);
    411	if (scsi_status != EFCT_SCSI_STATUS_GOOD) {
    412		efct_lio_io_printf(io, "callback completed with error=%d\n",
    413				   scsi_status);
    414		ocp->err = scsi_status;
    415	}
    416	if (ocp->seg_map_cnt)
    417		efct_lio_sg_unmap(io);
    418
    419	efct_lio_io_printf(io, "status=%d, err=%d flags=0x%x, dir=%d\n",
    420			   scsi_status, ocp->err, flags, ocp->ddir);
    421
    422	efct_set_lio_io_state(io, EFCT_LIO_STATE_TGT_GENERIC_FREE);
    423	transport_generic_free_cmd(&io->tgt_io.cmd, 0);
    424	return 0;
    425}
    426
    427static int
    428efct_lio_datamove_done(struct efct_io *io, enum efct_scsi_io_status scsi_status,
    429		       u32 flags, void *arg);
    430
    431static int
    432efct_lio_write_pending(struct se_cmd *cmd)
    433{
    434	struct efct_scsi_tgt_io *ocp =
    435		container_of(cmd, struct efct_scsi_tgt_io, cmd);
    436	struct efct_io *io = container_of(ocp, struct efct_io, tgt_io);
    437	struct efct_scsi_sgl *sgl = io->sgl;
    438	struct scatterlist *sg;
    439	u32 flags = 0, cnt, curcnt;
    440	u64 length = 0;
    441
    442	efct_set_lio_io_state(io, EFCT_LIO_STATE_TFO_WRITE_PENDING);
    443	efct_lio_io_printf(io, "trans_state=0x%x se_cmd_flags=0x%x\n",
    444			   cmd->transport_state, cmd->se_cmd_flags);
    445
    446	if (ocp->seg_cnt == 0) {
    447		ocp->seg_cnt = cmd->t_data_nents;
    448		ocp->cur_seg = 0;
    449		if (efct_lio_sg_map(io)) {
    450			efct_lio_io_printf(io, "efct_lio_sg_map failed\n");
    451			return -EFAULT;
    452		}
    453	}
    454	curcnt = (ocp->seg_map_cnt - ocp->cur_seg);
    455	curcnt = (curcnt < io->sgl_allocated) ? curcnt : io->sgl_allocated;
    456	/* find current sg */
    457	for (cnt = 0, sg = cmd->t_data_sg; cnt < ocp->cur_seg; cnt++,
    458	     sg = sg_next(sg))
    459		;/* do nothing */
    460
    461	for (cnt = 0; cnt < curcnt; cnt++, sg = sg_next(sg)) {
    462		sgl[cnt].addr = sg_dma_address(sg);
    463		sgl[cnt].dif_addr = 0;
    464		sgl[cnt].len = sg_dma_len(sg);
    465		length += sgl[cnt].len;
    466		ocp->cur_seg++;
    467	}
    468
    469	if (ocp->cur_seg == ocp->seg_cnt)
    470		flags = EFCT_SCSI_LAST_DATAPHASE;
    471
    472	return efct_scsi_recv_wr_data(io, flags, sgl, curcnt, length,
    473				    efct_lio_datamove_done, NULL);
    474}
    475
    476static int
    477efct_lio_queue_data_in(struct se_cmd *cmd)
    478{
    479	struct efct_scsi_tgt_io *ocp =
    480		container_of(cmd, struct efct_scsi_tgt_io, cmd);
    481	struct efct_io *io = container_of(ocp, struct efct_io, tgt_io);
    482	struct efct_scsi_sgl *sgl = io->sgl;
    483	struct scatterlist *sg = NULL;
    484	uint flags = 0, cnt = 0, curcnt = 0;
    485	u64 length = 0;
    486
    487	efct_set_lio_io_state(io, EFCT_LIO_STATE_TFO_QUEUE_DATA_IN);
    488
    489	if (ocp->seg_cnt == 0) {
    490		if (cmd->data_length) {
    491			ocp->seg_cnt = cmd->t_data_nents;
    492			ocp->cur_seg = 0;
    493			if (efct_lio_sg_map(io)) {
    494				efct_lio_io_printf(io,
    495						   "efct_lio_sg_map failed\n");
    496				return -EAGAIN;
    497			}
    498		} else {
    499			/* If command length is 0, send the response status */
    500			struct efct_scsi_cmd_resp rsp;
    501
    502			memset(&rsp, 0, sizeof(rsp));
    503			efct_lio_io_printf(io,
    504					   "cmd : %p length 0, send status\n",
    505					   cmd);
    506			return efct_scsi_send_resp(io, 0, &rsp,
    507						   efct_lio_status_done, NULL);
    508		}
    509	}
    510	curcnt = min(ocp->seg_map_cnt - ocp->cur_seg, io->sgl_allocated);
    511
    512	while (cnt < curcnt) {
    513		sg = &cmd->t_data_sg[ocp->cur_seg];
    514		sgl[cnt].addr = sg_dma_address(sg);
    515		sgl[cnt].dif_addr = 0;
    516		if (ocp->transferred_len + sg_dma_len(sg) >= cmd->data_length)
    517			sgl[cnt].len = cmd->data_length - ocp->transferred_len;
    518		else
    519			sgl[cnt].len = sg_dma_len(sg);
    520
    521		ocp->transferred_len += sgl[cnt].len;
    522		length += sgl[cnt].len;
    523		ocp->cur_seg++;
    524		cnt++;
    525		if (ocp->transferred_len == cmd->data_length)
    526			break;
    527	}
    528
    529	if (ocp->transferred_len == cmd->data_length) {
    530		flags = EFCT_SCSI_LAST_DATAPHASE;
    531		ocp->seg_cnt = ocp->cur_seg;
    532	}
    533
    534	/* If there is residual, disable Auto Good Response */
    535	if (cmd->residual_count)
    536		flags |= EFCT_SCSI_NO_AUTO_RESPONSE;
    537
    538	efct_set_lio_io_state(io, EFCT_LIO_STATE_SCSI_SEND_RD_DATA);
    539
    540	return efct_scsi_send_rd_data(io, flags, sgl, curcnt, length,
    541				    efct_lio_datamove_done, NULL);
    542}
    543
    544static void
    545efct_lio_send_resp(struct efct_io *io, enum efct_scsi_io_status scsi_status,
    546		   u32 flags)
    547{
    548	struct efct_scsi_cmd_resp rsp;
    549	struct efct_scsi_tgt_io *ocp = &io->tgt_io;
    550	struct se_cmd *cmd = &io->tgt_io.cmd;
    551	int rc;
    552
    553	if (flags & EFCT_SCSI_IO_CMPL_RSP_SENT) {
    554		ocp->rsp_sent = true;
    555		efct_set_lio_io_state(io, EFCT_LIO_STATE_TGT_GENERIC_FREE);
    556		transport_generic_free_cmd(&io->tgt_io.cmd, 0);
    557		return;
    558	}
    559
    560	/* send check condition if an error occurred */
    561	memset(&rsp, 0, sizeof(rsp));
    562	rsp.scsi_status = cmd->scsi_status;
    563	rsp.sense_data = (uint8_t *)io->tgt_io.sense_buffer;
    564	rsp.sense_data_length = cmd->scsi_sense_length;
    565
    566	/* Check for residual underrun or overrun */
    567	if (cmd->se_cmd_flags & SCF_OVERFLOW_BIT)
    568		rsp.residual = -cmd->residual_count;
    569	else if (cmd->se_cmd_flags & SCF_UNDERFLOW_BIT)
    570		rsp.residual = cmd->residual_count;
    571
    572	rc = efct_scsi_send_resp(io, 0, &rsp, efct_lio_status_done, NULL);
    573	efct_set_lio_io_state(io, EFCT_LIO_STATE_SCSI_SEND_RSP);
    574	if (rc != 0) {
    575		efct_lio_io_printf(io, "Read done, send rsp failed %d\n", rc);
    576		efct_set_lio_io_state(io, EFCT_LIO_STATE_TGT_GENERIC_FREE);
    577		transport_generic_free_cmd(&io->tgt_io.cmd, 0);
    578	} else {
    579		ocp->rsp_sent = true;
    580	}
    581}
    582
    583static int
    584efct_lio_datamove_done(struct efct_io *io, enum efct_scsi_io_status scsi_status,
    585		       u32 flags, void *arg)
    586{
    587	struct efct_scsi_tgt_io *ocp = &io->tgt_io;
    588
    589	efct_set_lio_io_state(io, EFCT_LIO_STATE_SCSI_DATA_DONE);
    590	if (scsi_status != EFCT_SCSI_STATUS_GOOD) {
    591		efct_lio_io_printf(io, "callback completed with error=%d\n",
    592				   scsi_status);
    593		ocp->err = scsi_status;
    594	}
    595	efct_lio_io_printf(io, "seg_map_cnt=%d\n", ocp->seg_map_cnt);
    596	if (ocp->seg_map_cnt) {
    597		if (ocp->err == EFCT_SCSI_STATUS_GOOD &&
    598		    ocp->cur_seg < ocp->seg_cnt) {
    599			int rc;
    600
    601			efct_lio_io_printf(io, "continuing cmd at segm=%d\n",
    602					   ocp->cur_seg);
    603			if (ocp->ddir == DMA_TO_DEVICE)
    604				rc = efct_lio_write_pending(&ocp->cmd);
    605			else
    606				rc = efct_lio_queue_data_in(&ocp->cmd);
    607			if (!rc)
    608				return 0;
    609
    610			ocp->err = EFCT_SCSI_STATUS_ERROR;
    611			efct_lio_io_printf(io, "could not continue command\n");
    612		}
    613		efct_lio_sg_unmap(io);
    614	}
    615
    616	if (io->tgt_io.aborting) {
    617		efct_lio_io_printf(io, "IO done aborted\n");
    618		return 0;
    619	}
    620
    621	if (ocp->ddir == DMA_TO_DEVICE) {
    622		efct_lio_io_printf(io, "Write done, trans_state=0x%x\n",
    623				   io->tgt_io.cmd.transport_state);
    624		if (scsi_status != EFCT_SCSI_STATUS_GOOD) {
    625			transport_generic_request_failure(&io->tgt_io.cmd,
    626					TCM_CHECK_CONDITION_ABORT_CMD);
    627			efct_set_lio_io_state(io,
    628				EFCT_LIO_STATE_TGT_GENERIC_REQ_FAILURE);
    629		} else {
    630			efct_set_lio_io_state(io,
    631						EFCT_LIO_STATE_TGT_EXECUTE_CMD);
    632			target_execute_cmd(&io->tgt_io.cmd);
    633		}
    634	} else {
    635		efct_lio_send_resp(io, scsi_status, flags);
    636	}
    637	return 0;
    638}
    639
    640static int
    641efct_lio_tmf_done(struct efct_io *io, enum efct_scsi_io_status scsi_status,
    642		  u32 flags, void *arg)
    643{
    644	efct_lio_tmfio_printf(io, "cmd=%p status=%d, flags=0x%x\n",
    645			      &io->tgt_io.cmd, scsi_status, flags);
    646
    647	efct_set_lio_io_state(io, EFCT_LIO_STATE_TGT_GENERIC_FREE);
    648	transport_generic_free_cmd(&io->tgt_io.cmd, 0);
    649	return 0;
    650}
    651
    652static int
    653efct_lio_null_tmf_done(struct efct_io *tmfio,
    654		       enum efct_scsi_io_status scsi_status,
    655		      u32 flags, void *arg)
    656{
    657	efct_lio_tmfio_printf(tmfio, "cmd=%p status=%d, flags=0x%x\n",
    658			      &tmfio->tgt_io.cmd, scsi_status, flags);
    659
    660	/* free struct efct_io only, no active se_cmd */
    661	efct_scsi_io_complete(tmfio);
    662	return 0;
    663}
    664
    665static int
    666efct_lio_queue_status(struct se_cmd *cmd)
    667{
    668	struct efct_scsi_cmd_resp rsp;
    669	struct efct_scsi_tgt_io *ocp =
    670		container_of(cmd, struct efct_scsi_tgt_io, cmd);
    671	struct efct_io *io = container_of(ocp, struct efct_io, tgt_io);
    672	int rc = 0;
    673
    674	efct_set_lio_io_state(io, EFCT_LIO_STATE_TFO_QUEUE_STATUS);
    675	efct_lio_io_printf(io,
    676		"status=0x%x trans_state=0x%x se_cmd_flags=0x%x sns_len=%d\n",
    677		cmd->scsi_status, cmd->transport_state, cmd->se_cmd_flags,
    678		cmd->scsi_sense_length);
    679
    680	memset(&rsp, 0, sizeof(rsp));
    681	rsp.scsi_status = cmd->scsi_status;
    682	rsp.sense_data = (u8 *)io->tgt_io.sense_buffer;
    683	rsp.sense_data_length = cmd->scsi_sense_length;
    684
    685	/* Check for residual underrun or overrun, mark negitive value for
    686	 * underrun to recognize in HW
    687	 */
    688	if (cmd->se_cmd_flags & SCF_OVERFLOW_BIT)
    689		rsp.residual = -cmd->residual_count;
    690	else if (cmd->se_cmd_flags & SCF_UNDERFLOW_BIT)
    691		rsp.residual = cmd->residual_count;
    692
    693	rc = efct_scsi_send_resp(io, 0, &rsp, efct_lio_status_done, NULL);
    694	efct_set_lio_io_state(io, EFCT_LIO_STATE_SCSI_SEND_RSP);
    695	if (rc == 0)
    696		ocp->rsp_sent = true;
    697	return rc;
    698}
    699
    700static void efct_lio_queue_tm_rsp(struct se_cmd *cmd)
    701{
    702	struct efct_scsi_tgt_io *ocp =
    703		container_of(cmd, struct efct_scsi_tgt_io, cmd);
    704	struct efct_io *tmfio = container_of(ocp, struct efct_io, tgt_io);
    705	struct se_tmr_req *se_tmr = cmd->se_tmr_req;
    706	u8 rspcode;
    707
    708	efct_lio_tmfio_printf(tmfio, "cmd=%p function=0x%x tmr->response=%d\n",
    709			      cmd, se_tmr->function, se_tmr->response);
    710	switch (se_tmr->response) {
    711	case TMR_FUNCTION_COMPLETE:
    712		rspcode = EFCT_SCSI_TMF_FUNCTION_COMPLETE;
    713		break;
    714	case TMR_TASK_DOES_NOT_EXIST:
    715		rspcode = EFCT_SCSI_TMF_FUNCTION_IO_NOT_FOUND;
    716		break;
    717	case TMR_LUN_DOES_NOT_EXIST:
    718		rspcode = EFCT_SCSI_TMF_INCORRECT_LOGICAL_UNIT_NUMBER;
    719		break;
    720	case TMR_FUNCTION_REJECTED:
    721	default:
    722		rspcode = EFCT_SCSI_TMF_FUNCTION_REJECTED;
    723		break;
    724	}
    725	efct_scsi_send_tmf_resp(tmfio, rspcode, NULL, efct_lio_tmf_done, NULL);
    726}
    727
    728static struct efct *efct_find_wwpn(u64 wwpn)
    729{
    730	struct efct *efct;
    731
    732	 /* Search for the HBA that has this WWPN */
    733	list_for_each_entry(efct, &efct_devices, list_entry) {
    734
    735		if (wwpn == efct_get_wwpn(&efct->hw))
    736			return efct;
    737	}
    738
    739	return NULL;
    740}
    741
    742static struct se_wwn *
    743efct_lio_make_nport(struct target_fabric_configfs *tf,
    744		    struct config_group *group, const char *name)
    745{
    746	struct efct_lio_nport *lio_nport;
    747	struct efct *efct;
    748	int ret;
    749	u64 wwpn;
    750
    751	ret = efct_lio_parse_wwn(name, &wwpn, 0);
    752	if (ret)
    753		return ERR_PTR(ret);
    754
    755	efct = efct_find_wwpn(wwpn);
    756	if (!efct) {
    757		pr_err("cannot find EFCT for base wwpn %s\n", name);
    758		return ERR_PTR(-ENXIO);
    759	}
    760
    761	lio_nport = kzalloc(sizeof(*lio_nport), GFP_KERNEL);
    762	if (!lio_nport)
    763		return ERR_PTR(-ENOMEM);
    764
    765	lio_nport->efct = efct;
    766	lio_nport->wwpn = wwpn;
    767	efct_format_wwn(lio_nport->wwpn_str, sizeof(lio_nport->wwpn_str),
    768			"naa.", wwpn);
    769	efct->tgt_efct.lio_nport = lio_nport;
    770
    771	return &lio_nport->nport_wwn;
    772}
    773
    774static struct se_wwn *
    775efct_lio_npiv_make_nport(struct target_fabric_configfs *tf,
    776			 struct config_group *group, const char *name)
    777{
    778	struct efct_lio_vport *lio_vport;
    779	struct efct *efct;
    780	int ret;
    781	u64 p_wwpn, npiv_wwpn, npiv_wwnn;
    782	char *p, *pbuf, tmp[128];
    783	struct efct_lio_vport_list_t *vport_list;
    784	struct fc_vport *new_fc_vport;
    785	struct fc_vport_identifiers vport_id;
    786	unsigned long flags = 0;
    787
    788	snprintf(tmp, sizeof(tmp), "%s", name);
    789	pbuf = &tmp[0];
    790
    791	p = strsep(&pbuf, "@");
    792
    793	if (!p || !pbuf) {
    794		pr_err("Unable to find separator operator(@)\n");
    795		return ERR_PTR(-EINVAL);
    796	}
    797
    798	ret = efct_lio_parse_wwn(p, &p_wwpn, 0);
    799	if (ret)
    800		return ERR_PTR(ret);
    801
    802	ret = efct_lio_parse_npiv_wwn(pbuf, strlen(pbuf), &npiv_wwpn,
    803				      &npiv_wwnn);
    804	if (ret)
    805		return ERR_PTR(ret);
    806
    807	efct = efct_find_wwpn(p_wwpn);
    808	if (!efct) {
    809		pr_err("cannot find EFCT for base wwpn %s\n", name);
    810		return ERR_PTR(-ENXIO);
    811	}
    812
    813	lio_vport = kzalloc(sizeof(*lio_vport), GFP_KERNEL);
    814	if (!lio_vport)
    815		return ERR_PTR(-ENOMEM);
    816
    817	lio_vport->efct = efct;
    818	lio_vport->wwpn = p_wwpn;
    819	lio_vport->npiv_wwpn = npiv_wwpn;
    820	lio_vport->npiv_wwnn = npiv_wwnn;
    821
    822	efct_format_wwn(lio_vport->wwpn_str, sizeof(lio_vport->wwpn_str),
    823			"naa.", npiv_wwpn);
    824
    825	vport_list = kzalloc(sizeof(*vport_list), GFP_KERNEL);
    826	if (!vport_list) {
    827		kfree(lio_vport);
    828		return ERR_PTR(-ENOMEM);
    829	}
    830
    831	vport_list->lio_vport = lio_vport;
    832
    833	memset(&vport_id, 0, sizeof(vport_id));
    834	vport_id.port_name = npiv_wwpn;
    835	vport_id.node_name = npiv_wwnn;
    836	vport_id.roles = FC_PORT_ROLE_FCP_INITIATOR;
    837	vport_id.vport_type = FC_PORTTYPE_NPIV;
    838	vport_id.disable = false;
    839
    840	new_fc_vport = fc_vport_create(efct->shost, 0, &vport_id);
    841	if (!new_fc_vport) {
    842		efc_log_err(efct, "fc_vport_create failed\n");
    843		kfree(lio_vport);
    844		kfree(vport_list);
    845		return ERR_PTR(-ENOMEM);
    846	}
    847
    848	lio_vport->fc_vport = new_fc_vport;
    849	spin_lock_irqsave(&efct->tgt_efct.efct_lio_lock, flags);
    850	INIT_LIST_HEAD(&vport_list->list_entry);
    851	list_add_tail(&vport_list->list_entry, &efct->tgt_efct.vport_list);
    852	spin_unlock_irqrestore(&efct->tgt_efct.efct_lio_lock, flags);
    853
    854	return &lio_vport->vport_wwn;
    855}
    856
    857static void
    858efct_lio_drop_nport(struct se_wwn *wwn)
    859{
    860	struct efct_lio_nport *lio_nport =
    861		container_of(wwn, struct efct_lio_nport, nport_wwn);
    862	struct efct *efct = lio_nport->efct;
    863
    864	/* only physical nport should exist, free lio_nport allocated
    865	 * in efct_lio_make_nport.
    866	 */
    867	kfree(efct->tgt_efct.lio_nport);
    868	efct->tgt_efct.lio_nport = NULL;
    869}
    870
    871static void
    872efct_lio_npiv_drop_nport(struct se_wwn *wwn)
    873{
    874	struct efct_lio_vport *lio_vport =
    875		container_of(wwn, struct efct_lio_vport, vport_wwn);
    876	struct efct_lio_vport_list_t *vport, *next_vport;
    877	struct efct *efct = lio_vport->efct;
    878	unsigned long flags = 0;
    879
    880	if (lio_vport->fc_vport)
    881		fc_vport_terminate(lio_vport->fc_vport);
    882
    883	spin_lock_irqsave(&efct->tgt_efct.efct_lio_lock, flags);
    884
    885	list_for_each_entry_safe(vport, next_vport, &efct->tgt_efct.vport_list,
    886				 list_entry) {
    887		if (vport->lio_vport == lio_vport) {
    888			list_del(&vport->list_entry);
    889			kfree(vport->lio_vport);
    890			kfree(vport);
    891			break;
    892		}
    893	}
    894	spin_unlock_irqrestore(&efct->tgt_efct.efct_lio_lock, flags);
    895}
    896
    897static struct se_portal_group *
    898efct_lio_make_tpg(struct se_wwn *wwn, const char *name)
    899{
    900	struct efct_lio_nport *lio_nport =
    901		container_of(wwn, struct efct_lio_nport, nport_wwn);
    902	struct efct_lio_tpg *tpg;
    903	struct efct *efct;
    904	unsigned long n;
    905	int ret;
    906
    907	if (strstr(name, "tpgt_") != name)
    908		return ERR_PTR(-EINVAL);
    909	if (kstrtoul(name + 5, 10, &n) || n > USHRT_MAX)
    910		return ERR_PTR(-EINVAL);
    911
    912	tpg = kzalloc(sizeof(*tpg), GFP_KERNEL);
    913	if (!tpg)
    914		return ERR_PTR(-ENOMEM);
    915
    916	tpg->nport = lio_nport;
    917	tpg->tpgt = n;
    918	tpg->enabled = false;
    919
    920	tpg->tpg_attrib.generate_node_acls = 1;
    921	tpg->tpg_attrib.demo_mode_write_protect = 1;
    922	tpg->tpg_attrib.cache_dynamic_acls = 1;
    923	tpg->tpg_attrib.demo_mode_login_only = 1;
    924	tpg->tpg_attrib.session_deletion_wait = 1;
    925
    926	ret = core_tpg_register(wwn, &tpg->tpg, SCSI_PROTOCOL_FCP);
    927	if (ret < 0) {
    928		kfree(tpg);
    929		return NULL;
    930	}
    931	efct = lio_nport->efct;
    932	efct->tgt_efct.tpg = tpg;
    933	efc_log_debug(efct, "create portal group %d\n", tpg->tpgt);
    934
    935	xa_init(&efct->lookup);
    936	return &tpg->tpg;
    937}
    938
    939static void
    940efct_lio_drop_tpg(struct se_portal_group *se_tpg)
    941{
    942	struct efct_lio_tpg *tpg =
    943		container_of(se_tpg, struct efct_lio_tpg, tpg);
    944
    945	struct efct *efct = tpg->nport->efct;
    946
    947	efc_log_debug(efct, "drop portal group %d\n", tpg->tpgt);
    948	tpg->nport->efct->tgt_efct.tpg = NULL;
    949	core_tpg_deregister(se_tpg);
    950	xa_destroy(&efct->lookup);
    951	kfree(tpg);
    952}
    953
    954static struct se_portal_group *
    955efct_lio_npiv_make_tpg(struct se_wwn *wwn, const char *name)
    956{
    957	struct efct_lio_vport *lio_vport =
    958		container_of(wwn, struct efct_lio_vport, vport_wwn);
    959	struct efct_lio_tpg *tpg;
    960	struct efct *efct;
    961	unsigned long n;
    962	int ret;
    963
    964	efct = lio_vport->efct;
    965	if (strstr(name, "tpgt_") != name)
    966		return ERR_PTR(-EINVAL);
    967	if (kstrtoul(name + 5, 10, &n) || n > USHRT_MAX)
    968		return ERR_PTR(-EINVAL);
    969
    970	if (n != 1) {
    971		efc_log_err(efct, "Invalid tpgt index: %ld provided\n", n);
    972		return ERR_PTR(-EINVAL);
    973	}
    974
    975	tpg = kzalloc(sizeof(*tpg), GFP_KERNEL);
    976	if (!tpg)
    977		return ERR_PTR(-ENOMEM);
    978
    979	tpg->vport = lio_vport;
    980	tpg->tpgt = n;
    981	tpg->enabled = false;
    982
    983	tpg->tpg_attrib.generate_node_acls = 1;
    984	tpg->tpg_attrib.demo_mode_write_protect = 1;
    985	tpg->tpg_attrib.cache_dynamic_acls = 1;
    986	tpg->tpg_attrib.demo_mode_login_only = 1;
    987	tpg->tpg_attrib.session_deletion_wait = 1;
    988
    989	ret = core_tpg_register(wwn, &tpg->tpg, SCSI_PROTOCOL_FCP);
    990
    991	if (ret < 0) {
    992		kfree(tpg);
    993		return NULL;
    994	}
    995	lio_vport->tpg = tpg;
    996	efc_log_debug(efct, "create vport portal group %d\n", tpg->tpgt);
    997
    998	return &tpg->tpg;
    999}
   1000
   1001static void
   1002efct_lio_npiv_drop_tpg(struct se_portal_group *se_tpg)
   1003{
   1004	struct efct_lio_tpg *tpg =
   1005		container_of(se_tpg, struct efct_lio_tpg, tpg);
   1006
   1007	efc_log_debug(tpg->vport->efct, "drop npiv portal group %d\n",
   1008		       tpg->tpgt);
   1009	core_tpg_deregister(se_tpg);
   1010	kfree(tpg);
   1011}
   1012
   1013static int
   1014efct_lio_init_nodeacl(struct se_node_acl *se_nacl, const char *name)
   1015{
   1016	struct efct_lio_nacl *nacl;
   1017	u64 wwnn;
   1018
   1019	if (efct_lio_parse_wwn(name, &wwnn, 0) < 0)
   1020		return -EINVAL;
   1021
   1022	nacl = container_of(se_nacl, struct efct_lio_nacl, se_node_acl);
   1023	nacl->nport_wwnn = wwnn;
   1024
   1025	efct_format_wwn(nacl->nport_name, sizeof(nacl->nport_name), "", wwnn);
   1026	return 0;
   1027}
   1028
   1029static int efct_lio_check_demo_mode_login_only(struct se_portal_group *stpg)
   1030{
   1031	struct efct_lio_tpg *tpg = container_of(stpg, struct efct_lio_tpg, tpg);
   1032
   1033	return tpg->tpg_attrib.demo_mode_login_only;
   1034}
   1035
   1036static int
   1037efct_lio_npiv_check_demo_mode_login_only(struct se_portal_group *stpg)
   1038{
   1039	struct efct_lio_tpg *tpg = container_of(stpg, struct efct_lio_tpg, tpg);
   1040
   1041	return tpg->tpg_attrib.demo_mode_login_only;
   1042}
   1043
   1044static struct efct_lio_tpg *
   1045efct_get_vport_tpg(struct efc_node *node)
   1046{
   1047	struct efct *efct;
   1048	u64 wwpn = node->nport->wwpn;
   1049	struct efct_lio_vport_list_t *vport, *next;
   1050	struct efct_lio_vport *lio_vport = NULL;
   1051	struct efct_lio_tpg *tpg = NULL;
   1052	unsigned long flags = 0;
   1053
   1054	efct = node->efc->base;
   1055	spin_lock_irqsave(&efct->tgt_efct.efct_lio_lock, flags);
   1056	list_for_each_entry_safe(vport, next, &efct->tgt_efct.vport_list,
   1057				 list_entry) {
   1058		lio_vport = vport->lio_vport;
   1059		if (wwpn && lio_vport && lio_vport->npiv_wwpn == wwpn) {
   1060			efc_log_debug(efct, "found tpg on vport\n");
   1061			tpg = lio_vport->tpg;
   1062			break;
   1063		}
   1064	}
   1065	spin_unlock_irqrestore(&efct->tgt_efct.efct_lio_lock, flags);
   1066	return tpg;
   1067}
   1068
   1069static void
   1070_efct_tgt_node_free(struct kref *arg)
   1071{
   1072	struct efct_node *tgt_node = container_of(arg, struct efct_node, ref);
   1073	struct efc_node *node = tgt_node->node;
   1074
   1075	efc_scsi_del_initiator_complete(node->efc, node);
   1076	kfree(tgt_node);
   1077}
   1078
   1079static int efct_session_cb(struct se_portal_group *se_tpg,
   1080			   struct se_session *se_sess, void *private)
   1081{
   1082	struct efc_node *node = private;
   1083	struct efct_node *tgt_node;
   1084	struct efct *efct = node->efc->base;
   1085
   1086	tgt_node = kzalloc(sizeof(*tgt_node), GFP_KERNEL);
   1087	if (!tgt_node)
   1088		return -ENOMEM;
   1089
   1090	kref_init(&tgt_node->ref);
   1091	tgt_node->release = _efct_tgt_node_free;
   1092
   1093	tgt_node->session = se_sess;
   1094	node->tgt_node = tgt_node;
   1095	tgt_node->efct = efct;
   1096
   1097	tgt_node->node = node;
   1098
   1099	tgt_node->node_fc_id = node->rnode.fc_id;
   1100	tgt_node->port_fc_id = node->nport->fc_id;
   1101	tgt_node->vpi = node->nport->indicator;
   1102	tgt_node->rpi = node->rnode.indicator;
   1103
   1104	spin_lock_init(&tgt_node->active_ios_lock);
   1105	INIT_LIST_HEAD(&tgt_node->active_ios);
   1106
   1107	return 0;
   1108}
   1109
   1110int efct_scsi_tgt_new_device(struct efct *efct)
   1111{
   1112	u32 total_ios;
   1113
   1114	/* Get the max settings */
   1115	efct->tgt_efct.max_sge = sli_get_max_sge(&efct->hw.sli);
   1116	efct->tgt_efct.max_sgl = sli_get_max_sgl(&efct->hw.sli);
   1117
   1118	/* initialize IO watermark fields */
   1119	atomic_set(&efct->tgt_efct.ios_in_use, 0);
   1120	total_ios = efct->hw.config.n_io;
   1121	efc_log_debug(efct, "total_ios=%d\n", total_ios);
   1122	efct->tgt_efct.watermark_min =
   1123			(total_ios * EFCT_WATERMARK_LOW_PCT) / 100;
   1124	efct->tgt_efct.watermark_max =
   1125			(total_ios * EFCT_WATERMARK_HIGH_PCT) / 100;
   1126	atomic_set(&efct->tgt_efct.io_high_watermark,
   1127		   efct->tgt_efct.watermark_max);
   1128	atomic_set(&efct->tgt_efct.watermark_hit, 0);
   1129	atomic_set(&efct->tgt_efct.initiator_count, 0);
   1130
   1131	lio_wq = create_singlethread_workqueue("efct_lio_worker");
   1132	if (!lio_wq) {
   1133		efc_log_err(efct, "workqueue create failed\n");
   1134		return -EIO;
   1135	}
   1136
   1137	spin_lock_init(&efct->tgt_efct.efct_lio_lock);
   1138	INIT_LIST_HEAD(&efct->tgt_efct.vport_list);
   1139
   1140	return 0;
   1141}
   1142
   1143int efct_scsi_tgt_del_device(struct efct *efct)
   1144{
   1145	flush_workqueue(lio_wq);
   1146
   1147	return 0;
   1148}
   1149
   1150int
   1151efct_scsi_tgt_new_nport(struct efc *efc, struct efc_nport *nport)
   1152{
   1153	struct efct *efct = nport->efc->base;
   1154
   1155	efc_log_debug(efct, "New SPORT: %s bound to %s\n", nport->display_name,
   1156		       efct->tgt_efct.lio_nport->wwpn_str);
   1157
   1158	return 0;
   1159}
   1160
   1161void
   1162efct_scsi_tgt_del_nport(struct efc *efc, struct efc_nport *nport)
   1163{
   1164	efc_log_debug(efc, "Del SPORT: %s\n", nport->display_name);
   1165}
   1166
   1167static void efct_lio_setup_session(struct work_struct *work)
   1168{
   1169	struct efct_lio_wq_data *wq_data =
   1170		container_of(work, struct efct_lio_wq_data, work);
   1171	struct efct *efct = wq_data->efct;
   1172	struct efc_node *node = wq_data->ptr;
   1173	char wwpn[WWN_NAME_LEN];
   1174	struct efct_lio_tpg *tpg;
   1175	struct efct_node *tgt_node;
   1176	struct se_portal_group *se_tpg;
   1177	struct se_session *se_sess;
   1178	int watermark;
   1179	int ini_count;
   1180	u64 id;
   1181
   1182	/* Check to see if it's belongs to vport,
   1183	 * if not get physical port
   1184	 */
   1185	tpg = efct_get_vport_tpg(node);
   1186	if (tpg) {
   1187		se_tpg = &tpg->tpg;
   1188	} else if (efct->tgt_efct.tpg) {
   1189		tpg = efct->tgt_efct.tpg;
   1190		se_tpg = &tpg->tpg;
   1191	} else {
   1192		efc_log_err(efct, "failed to init session\n");
   1193		return;
   1194	}
   1195
   1196	/*
   1197	 * Format the FCP Initiator port_name into colon
   1198	 * separated values to match the format by our explicit
   1199	 * ConfigFS NodeACLs.
   1200	 */
   1201	efct_format_wwn(wwpn, sizeof(wwpn), "",	efc_node_get_wwpn(node));
   1202
   1203	se_sess = target_setup_session(se_tpg, 0, 0, TARGET_PROT_NORMAL, wwpn,
   1204				       node, efct_session_cb);
   1205	if (IS_ERR(se_sess)) {
   1206		efc_log_err(efct, "failed to setup session\n");
   1207		kfree(wq_data);
   1208		efc_scsi_sess_reg_complete(node, -EIO);
   1209		return;
   1210	}
   1211
   1212	tgt_node = node->tgt_node;
   1213	id = (u64) tgt_node->port_fc_id << 32 | tgt_node->node_fc_id;
   1214
   1215	efc_log_debug(efct, "new initiator sess=%p node=%p id: %llx\n",
   1216		      se_sess, node, id);
   1217
   1218	if (xa_err(xa_store(&efct->lookup, id, tgt_node, GFP_KERNEL)))
   1219		efc_log_err(efct, "Node lookup store failed\n");
   1220
   1221	efc_scsi_sess_reg_complete(node, 0);
   1222
   1223	/* update IO watermark: increment initiator count */
   1224	ini_count = atomic_add_return(1, &efct->tgt_efct.initiator_count);
   1225	watermark = efct->tgt_efct.watermark_max -
   1226		    ini_count * EFCT_IO_WATERMARK_PER_INITIATOR;
   1227	watermark = (efct->tgt_efct.watermark_min > watermark) ?
   1228			efct->tgt_efct.watermark_min : watermark;
   1229	atomic_set(&efct->tgt_efct.io_high_watermark, watermark);
   1230
   1231	kfree(wq_data);
   1232}
   1233
   1234int efct_scsi_new_initiator(struct efc *efc, struct efc_node *node)
   1235{
   1236	struct efct *efct = node->efc->base;
   1237	struct efct_lio_wq_data *wq_data;
   1238
   1239	/*
   1240	 * Since LIO only supports initiator validation at thread level,
   1241	 * we are open minded and accept all callers.
   1242	 */
   1243	wq_data = kzalloc(sizeof(*wq_data), GFP_ATOMIC);
   1244	if (!wq_data)
   1245		return -ENOMEM;
   1246
   1247	wq_data->ptr = node;
   1248	wq_data->efct = efct;
   1249	INIT_WORK(&wq_data->work, efct_lio_setup_session);
   1250	queue_work(lio_wq, &wq_data->work);
   1251	return EFC_SCSI_CALL_ASYNC;
   1252}
   1253
   1254static void efct_lio_remove_session(struct work_struct *work)
   1255{
   1256	struct efct_lio_wq_data *wq_data =
   1257		container_of(work, struct efct_lio_wq_data, work);
   1258	struct efct *efct = wq_data->efct;
   1259	struct efc_node *node = wq_data->ptr;
   1260	struct efct_node *tgt_node;
   1261	struct se_session *se_sess;
   1262
   1263	tgt_node = node->tgt_node;
   1264	if (!tgt_node) {
   1265		/* base driver has sent back-to-back requests
   1266		 * to unreg session with no intervening
   1267		 * register
   1268		 */
   1269		efc_log_err(efct, "unreg session for NULL session\n");
   1270		efc_scsi_del_initiator_complete(node->efc, node);
   1271		return;
   1272	}
   1273
   1274	se_sess = tgt_node->session;
   1275	efc_log_debug(efct, "unreg session se_sess=%p node=%p\n",
   1276		       se_sess, node);
   1277
   1278	/* first flag all session commands to complete */
   1279	target_stop_session(se_sess);
   1280
   1281	/* now wait for session commands to complete */
   1282	target_wait_for_sess_cmds(se_sess);
   1283	target_remove_session(se_sess);
   1284	tgt_node->session = NULL;
   1285	node->tgt_node = NULL;
   1286	kref_put(&tgt_node->ref, tgt_node->release);
   1287
   1288	kfree(wq_data);
   1289}
   1290
   1291int efct_scsi_del_initiator(struct efc *efc, struct efc_node *node, int reason)
   1292{
   1293	struct efct *efct = node->efc->base;
   1294	struct efct_node *tgt_node = node->tgt_node;
   1295	struct efct_lio_wq_data *wq_data;
   1296	int watermark;
   1297	int ini_count;
   1298	u64 id;
   1299
   1300	if (reason == EFCT_SCSI_INITIATOR_MISSING)
   1301		return EFC_SCSI_CALL_COMPLETE;
   1302
   1303	if (!tgt_node) {
   1304		efc_log_err(efct, "tgt_node is NULL\n");
   1305		return -EIO;
   1306	}
   1307
   1308	wq_data = kzalloc(sizeof(*wq_data), GFP_ATOMIC);
   1309	if (!wq_data)
   1310		return -ENOMEM;
   1311
   1312	id = (u64) tgt_node->port_fc_id << 32 | tgt_node->node_fc_id;
   1313	xa_erase(&efct->lookup, id);
   1314
   1315	wq_data->ptr = node;
   1316	wq_data->efct = efct;
   1317	INIT_WORK(&wq_data->work, efct_lio_remove_session);
   1318	queue_work(lio_wq, &wq_data->work);
   1319
   1320	/*
   1321	 * update IO watermark: decrement initiator count
   1322	 */
   1323	ini_count = atomic_sub_return(1, &efct->tgt_efct.initiator_count);
   1324
   1325	watermark = efct->tgt_efct.watermark_max -
   1326		    ini_count * EFCT_IO_WATERMARK_PER_INITIATOR;
   1327	watermark = (efct->tgt_efct.watermark_min > watermark) ?
   1328			efct->tgt_efct.watermark_min : watermark;
   1329	atomic_set(&efct->tgt_efct.io_high_watermark, watermark);
   1330
   1331	return EFC_SCSI_CALL_ASYNC;
   1332}
   1333
   1334void efct_scsi_recv_cmd(struct efct_io *io, uint64_t lun, u8 *cdb,
   1335		       u32 cdb_len, u32 flags)
   1336{
   1337	struct efct_scsi_tgt_io *ocp = &io->tgt_io;
   1338	struct se_cmd *se_cmd = &io->tgt_io.cmd;
   1339	struct efct *efct = io->efct;
   1340	char *ddir;
   1341	struct efct_node *tgt_node;
   1342	struct se_session *se_sess;
   1343	int rc = 0;
   1344
   1345	memset(ocp, 0, sizeof(struct efct_scsi_tgt_io));
   1346	efct_set_lio_io_state(io, EFCT_LIO_STATE_SCSI_RECV_CMD);
   1347	atomic_add_return(1, &efct->tgt_efct.ios_in_use);
   1348
   1349	/* set target timeout */
   1350	io->timeout = efct->target_io_timer_sec;
   1351
   1352	if (flags & EFCT_SCSI_CMD_SIMPLE)
   1353		ocp->task_attr = TCM_SIMPLE_TAG;
   1354	else if (flags & EFCT_SCSI_CMD_HEAD_OF_QUEUE)
   1355		ocp->task_attr = TCM_HEAD_TAG;
   1356	else if (flags & EFCT_SCSI_CMD_ORDERED)
   1357		ocp->task_attr = TCM_ORDERED_TAG;
   1358	else if (flags & EFCT_SCSI_CMD_ACA)
   1359		ocp->task_attr = TCM_ACA_TAG;
   1360
   1361	switch (flags & (EFCT_SCSI_CMD_DIR_IN | EFCT_SCSI_CMD_DIR_OUT)) {
   1362	case EFCT_SCSI_CMD_DIR_IN:
   1363		ddir = "FROM_INITIATOR";
   1364		ocp->ddir = DMA_TO_DEVICE;
   1365		break;
   1366	case EFCT_SCSI_CMD_DIR_OUT:
   1367		ddir = "TO_INITIATOR";
   1368		ocp->ddir = DMA_FROM_DEVICE;
   1369		break;
   1370	case EFCT_SCSI_CMD_DIR_IN | EFCT_SCSI_CMD_DIR_OUT:
   1371		ddir = "BIDIR";
   1372		ocp->ddir = DMA_BIDIRECTIONAL;
   1373		break;
   1374	default:
   1375		ddir = "NONE";
   1376		ocp->ddir = DMA_NONE;
   1377		break;
   1378	}
   1379
   1380	ocp->lun = lun;
   1381	efct_lio_io_printf(io, "new cmd=0x%x ddir=%s dl=%u\n",
   1382			   cdb[0], ddir, io->exp_xfer_len);
   1383
   1384	tgt_node = io->node;
   1385	se_sess = tgt_node->session;
   1386	if (!se_sess) {
   1387		efc_log_err(efct, "No session found to submit IO se_cmd: %p\n",
   1388			    &ocp->cmd);
   1389		efct_scsi_io_free(io);
   1390		return;
   1391	}
   1392
   1393	efct_set_lio_io_state(io, EFCT_LIO_STATE_TGT_SUBMIT_CMD);
   1394	rc = target_init_cmd(se_cmd, se_sess, &io->tgt_io.sense_buffer[0],
   1395			     ocp->lun, io->exp_xfer_len, ocp->task_attr,
   1396			     ocp->ddir, TARGET_SCF_ACK_KREF);
   1397	if (rc) {
   1398		efc_log_err(efct, "failed to init cmd se_cmd: %p\n", se_cmd);
   1399		efct_scsi_io_free(io);
   1400		return;
   1401	}
   1402
   1403	if (target_submit_prep(se_cmd, cdb, NULL, 0, NULL, 0,
   1404				NULL, 0, GFP_ATOMIC))
   1405		return;
   1406
   1407	target_submit(se_cmd);
   1408}
   1409
   1410int
   1411efct_scsi_recv_tmf(struct efct_io *tmfio, u32 lun, enum efct_scsi_tmf_cmd cmd,
   1412		   struct efct_io *io_to_abort, u32 flags)
   1413{
   1414	unsigned char tmr_func;
   1415	struct efct *efct = tmfio->efct;
   1416	struct efct_scsi_tgt_io *ocp = &tmfio->tgt_io;
   1417	struct efct_node *tgt_node;
   1418	struct se_session *se_sess;
   1419	int rc;
   1420
   1421	memset(ocp, 0, sizeof(struct efct_scsi_tgt_io));
   1422	efct_set_lio_io_state(tmfio, EFCT_LIO_STATE_SCSI_RECV_TMF);
   1423	atomic_add_return(1, &efct->tgt_efct.ios_in_use);
   1424	efct_lio_tmfio_printf(tmfio, "%s: new tmf %x lun=%u\n",
   1425			      tmfio->display_name, cmd, lun);
   1426
   1427	switch (cmd) {
   1428	case EFCT_SCSI_TMF_ABORT_TASK:
   1429		tmr_func = TMR_ABORT_TASK;
   1430		break;
   1431	case EFCT_SCSI_TMF_ABORT_TASK_SET:
   1432		tmr_func = TMR_ABORT_TASK_SET;
   1433		break;
   1434	case EFCT_SCSI_TMF_CLEAR_TASK_SET:
   1435		tmr_func = TMR_CLEAR_TASK_SET;
   1436		break;
   1437	case EFCT_SCSI_TMF_LOGICAL_UNIT_RESET:
   1438		tmr_func = TMR_LUN_RESET;
   1439		break;
   1440	case EFCT_SCSI_TMF_CLEAR_ACA:
   1441		tmr_func = TMR_CLEAR_ACA;
   1442		break;
   1443	case EFCT_SCSI_TMF_TARGET_RESET:
   1444		tmr_func = TMR_TARGET_WARM_RESET;
   1445		break;
   1446	case EFCT_SCSI_TMF_QUERY_ASYNCHRONOUS_EVENT:
   1447	case EFCT_SCSI_TMF_QUERY_TASK_SET:
   1448	default:
   1449		goto tmf_fail;
   1450	}
   1451
   1452	tmfio->tgt_io.tmf = tmr_func;
   1453	tmfio->tgt_io.lun = lun;
   1454	tmfio->tgt_io.io_to_abort = io_to_abort;
   1455
   1456	tgt_node = tmfio->node;
   1457
   1458	se_sess = tgt_node->session;
   1459	if (!se_sess)
   1460		return 0;
   1461
   1462	rc = target_submit_tmr(&ocp->cmd, se_sess, NULL, lun, ocp, tmr_func,
   1463			GFP_ATOMIC, tmfio->init_task_tag, TARGET_SCF_ACK_KREF);
   1464
   1465	efct_set_lio_io_state(tmfio, EFCT_LIO_STATE_TGT_SUBMIT_TMR);
   1466	if (rc)
   1467		goto tmf_fail;
   1468
   1469	return 0;
   1470
   1471tmf_fail:
   1472	efct_scsi_send_tmf_resp(tmfio, EFCT_SCSI_TMF_FUNCTION_REJECTED,
   1473				NULL, efct_lio_null_tmf_done, NULL);
   1474	return 0;
   1475}
   1476
   1477/* Start items for efct_lio_tpg_attrib_cit */
   1478
   1479#define DEF_EFCT_TPG_ATTRIB(name)					  \
   1480									  \
   1481static ssize_t efct_lio_tpg_attrib_##name##_show(			  \
   1482		struct config_item *item, char *page)			  \
   1483{									  \
   1484	struct se_portal_group *se_tpg = to_tpg(item);			  \
   1485	struct efct_lio_tpg *tpg = container_of(se_tpg,			  \
   1486			struct efct_lio_tpg, tpg);			  \
   1487									  \
   1488	return sprintf(page, "%u\n", tpg->tpg_attrib.name);		  \
   1489}									  \
   1490									  \
   1491static ssize_t efct_lio_tpg_attrib_##name##_store(			  \
   1492		struct config_item *item, const char *page, size_t count) \
   1493{									  \
   1494	struct se_portal_group *se_tpg = to_tpg(item);			  \
   1495	struct efct_lio_tpg *tpg = container_of(se_tpg,			  \
   1496					struct efct_lio_tpg, tpg);	  \
   1497	struct efct_lio_tpg_attrib *a = &tpg->tpg_attrib;		  \
   1498	unsigned long val;						  \
   1499	int ret;							  \
   1500									  \
   1501	ret = kstrtoul(page, 0, &val);					  \
   1502	if (ret < 0) {							  \
   1503		pr_err("kstrtoul() failed with ret: %d\n", ret);	  \
   1504		return ret;						  \
   1505	}								  \
   1506									  \
   1507	if (val != 0 && val != 1) {					  \
   1508		pr_err("Illegal boolean value %lu\n", val);		  \
   1509		return -EINVAL;						  \
   1510	}								  \
   1511									  \
   1512	a->name = val;							  \
   1513									  \
   1514	return count;							  \
   1515}									  \
   1516CONFIGFS_ATTR(efct_lio_tpg_attrib_, name)
   1517
   1518DEF_EFCT_TPG_ATTRIB(generate_node_acls);
   1519DEF_EFCT_TPG_ATTRIB(cache_dynamic_acls);
   1520DEF_EFCT_TPG_ATTRIB(demo_mode_write_protect);
   1521DEF_EFCT_TPG_ATTRIB(prod_mode_write_protect);
   1522DEF_EFCT_TPG_ATTRIB(demo_mode_login_only);
   1523DEF_EFCT_TPG_ATTRIB(session_deletion_wait);
   1524
   1525static struct configfs_attribute *efct_lio_tpg_attrib_attrs[] = {
   1526	&efct_lio_tpg_attrib_attr_generate_node_acls,
   1527	&efct_lio_tpg_attrib_attr_cache_dynamic_acls,
   1528	&efct_lio_tpg_attrib_attr_demo_mode_write_protect,
   1529	&efct_lio_tpg_attrib_attr_prod_mode_write_protect,
   1530	&efct_lio_tpg_attrib_attr_demo_mode_login_only,
   1531	&efct_lio_tpg_attrib_attr_session_deletion_wait,
   1532	NULL,
   1533};
   1534
   1535#define DEF_EFCT_NPIV_TPG_ATTRIB(name)					   \
   1536									   \
   1537static ssize_t efct_lio_npiv_tpg_attrib_##name##_show(			   \
   1538		struct config_item *item, char *page)			   \
   1539{									   \
   1540	struct se_portal_group *se_tpg = to_tpg(item);			   \
   1541	struct efct_lio_tpg *tpg = container_of(se_tpg,			   \
   1542			struct efct_lio_tpg, tpg);			   \
   1543									   \
   1544	return sprintf(page, "%u\n", tpg->tpg_attrib.name);		   \
   1545}									   \
   1546									   \
   1547static ssize_t efct_lio_npiv_tpg_attrib_##name##_store(			   \
   1548		struct config_item *item, const char *page, size_t count)  \
   1549{									   \
   1550	struct se_portal_group *se_tpg = to_tpg(item);			   \
   1551	struct efct_lio_tpg *tpg = container_of(se_tpg,			   \
   1552			struct efct_lio_tpg, tpg);			   \
   1553	struct efct_lio_tpg_attrib *a = &tpg->tpg_attrib;		   \
   1554	unsigned long val;						   \
   1555	int ret;							   \
   1556									   \
   1557	ret = kstrtoul(page, 0, &val);					   \
   1558	if (ret < 0) {							   \
   1559		pr_err("kstrtoul() failed with ret: %d\n", ret);	   \
   1560		return ret;						   \
   1561	}								   \
   1562									   \
   1563	if (val != 0 && val != 1) {					   \
   1564		pr_err("Illegal boolean value %lu\n", val);		   \
   1565		return -EINVAL;						   \
   1566	}								   \
   1567									   \
   1568	a->name = val;							   \
   1569									   \
   1570	return count;							   \
   1571}									   \
   1572CONFIGFS_ATTR(efct_lio_npiv_tpg_attrib_, name)
   1573
   1574DEF_EFCT_NPIV_TPG_ATTRIB(generate_node_acls);
   1575DEF_EFCT_NPIV_TPG_ATTRIB(cache_dynamic_acls);
   1576DEF_EFCT_NPIV_TPG_ATTRIB(demo_mode_write_protect);
   1577DEF_EFCT_NPIV_TPG_ATTRIB(prod_mode_write_protect);
   1578DEF_EFCT_NPIV_TPG_ATTRIB(demo_mode_login_only);
   1579DEF_EFCT_NPIV_TPG_ATTRIB(session_deletion_wait);
   1580
   1581static struct configfs_attribute *efct_lio_npiv_tpg_attrib_attrs[] = {
   1582	&efct_lio_npiv_tpg_attrib_attr_generate_node_acls,
   1583	&efct_lio_npiv_tpg_attrib_attr_cache_dynamic_acls,
   1584	&efct_lio_npiv_tpg_attrib_attr_demo_mode_write_protect,
   1585	&efct_lio_npiv_tpg_attrib_attr_prod_mode_write_protect,
   1586	&efct_lio_npiv_tpg_attrib_attr_demo_mode_login_only,
   1587	&efct_lio_npiv_tpg_attrib_attr_session_deletion_wait,
   1588	NULL,
   1589};
   1590
   1591CONFIGFS_ATTR(efct_lio_tpg_, enable);
   1592static struct configfs_attribute *efct_lio_tpg_attrs[] = {
   1593				&efct_lio_tpg_attr_enable, NULL };
   1594CONFIGFS_ATTR(efct_lio_npiv_tpg_, enable);
   1595static struct configfs_attribute *efct_lio_npiv_tpg_attrs[] = {
   1596				&efct_lio_npiv_tpg_attr_enable, NULL };
   1597
   1598static const struct target_core_fabric_ops efct_lio_ops = {
   1599	.module				= THIS_MODULE,
   1600	.fabric_name			= "efct",
   1601	.node_acl_size			= sizeof(struct efct_lio_nacl),
   1602	.max_data_sg_nents		= 65535,
   1603	.tpg_get_wwn			= efct_lio_get_fabric_wwn,
   1604	.tpg_get_tag			= efct_lio_get_tag,
   1605	.fabric_init_nodeacl		= efct_lio_init_nodeacl,
   1606	.tpg_check_demo_mode		= efct_lio_check_demo_mode,
   1607	.tpg_check_demo_mode_cache      = efct_lio_check_demo_mode_cache,
   1608	.tpg_check_demo_mode_write_protect = efct_lio_check_demo_write_protect,
   1609	.tpg_check_prod_mode_write_protect = efct_lio_check_prod_write_protect,
   1610	.tpg_get_inst_index		= efct_lio_tpg_get_inst_index,
   1611	.check_stop_free		= efct_lio_check_stop_free,
   1612	.aborted_task			= efct_lio_aborted_task,
   1613	.release_cmd			= efct_lio_release_cmd,
   1614	.close_session			= efct_lio_close_session,
   1615	.sess_get_index			= efct_lio_sess_get_index,
   1616	.write_pending			= efct_lio_write_pending,
   1617	.set_default_node_attributes	= efct_lio_set_default_node_attrs,
   1618	.get_cmd_state			= efct_lio_get_cmd_state,
   1619	.queue_data_in			= efct_lio_queue_data_in,
   1620	.queue_status			= efct_lio_queue_status,
   1621	.queue_tm_rsp			= efct_lio_queue_tm_rsp,
   1622	.fabric_make_wwn		= efct_lio_make_nport,
   1623	.fabric_drop_wwn		= efct_lio_drop_nport,
   1624	.fabric_make_tpg		= efct_lio_make_tpg,
   1625	.fabric_drop_tpg		= efct_lio_drop_tpg,
   1626	.tpg_check_demo_mode_login_only = efct_lio_check_demo_mode_login_only,
   1627	.tpg_check_prot_fabric_only	= NULL,
   1628	.sess_get_initiator_sid		= NULL,
   1629	.tfc_tpg_base_attrs		= efct_lio_tpg_attrs,
   1630	.tfc_tpg_attrib_attrs           = efct_lio_tpg_attrib_attrs,
   1631};
   1632
   1633static const struct target_core_fabric_ops efct_lio_npiv_ops = {
   1634	.module				= THIS_MODULE,
   1635	.fabric_name			= "efct_npiv",
   1636	.node_acl_size			= sizeof(struct efct_lio_nacl),
   1637	.max_data_sg_nents		= 65535,
   1638	.tpg_get_wwn			= efct_lio_get_npiv_fabric_wwn,
   1639	.tpg_get_tag			= efct_lio_get_npiv_tag,
   1640	.fabric_init_nodeacl		= efct_lio_init_nodeacl,
   1641	.tpg_check_demo_mode		= efct_lio_check_demo_mode,
   1642	.tpg_check_demo_mode_cache      = efct_lio_check_demo_mode_cache,
   1643	.tpg_check_demo_mode_write_protect =
   1644					efct_lio_npiv_check_demo_write_protect,
   1645	.tpg_check_prod_mode_write_protect =
   1646					efct_lio_npiv_check_prod_write_protect,
   1647	.tpg_get_inst_index		= efct_lio_tpg_get_inst_index,
   1648	.check_stop_free		= efct_lio_check_stop_free,
   1649	.aborted_task			= efct_lio_aborted_task,
   1650	.release_cmd			= efct_lio_release_cmd,
   1651	.close_session			= efct_lio_close_session,
   1652	.sess_get_index			= efct_lio_sess_get_index,
   1653	.write_pending			= efct_lio_write_pending,
   1654	.set_default_node_attributes	= efct_lio_set_default_node_attrs,
   1655	.get_cmd_state			= efct_lio_get_cmd_state,
   1656	.queue_data_in			= efct_lio_queue_data_in,
   1657	.queue_status			= efct_lio_queue_status,
   1658	.queue_tm_rsp			= efct_lio_queue_tm_rsp,
   1659	.fabric_make_wwn		= efct_lio_npiv_make_nport,
   1660	.fabric_drop_wwn		= efct_lio_npiv_drop_nport,
   1661	.fabric_make_tpg		= efct_lio_npiv_make_tpg,
   1662	.fabric_drop_tpg		= efct_lio_npiv_drop_tpg,
   1663	.tpg_check_demo_mode_login_only =
   1664				efct_lio_npiv_check_demo_mode_login_only,
   1665	.tpg_check_prot_fabric_only	= NULL,
   1666	.sess_get_initiator_sid		= NULL,
   1667	.tfc_tpg_base_attrs		= efct_lio_npiv_tpg_attrs,
   1668	.tfc_tpg_attrib_attrs		= efct_lio_npiv_tpg_attrib_attrs,
   1669};
   1670
   1671int efct_scsi_tgt_driver_init(void)
   1672{
   1673	int rc;
   1674
   1675	/* Register the top level struct config_item_type with TCM core */
   1676	rc = target_register_template(&efct_lio_ops);
   1677	if (rc < 0) {
   1678		pr_err("target_fabric_configfs_register failed with %d\n", rc);
   1679		return rc;
   1680	}
   1681	rc = target_register_template(&efct_lio_npiv_ops);
   1682	if (rc < 0) {
   1683		pr_err("target_fabric_configfs_register failed with %d\n", rc);
   1684		target_unregister_template(&efct_lio_ops);
   1685		return rc;
   1686	}
   1687	return 0;
   1688}
   1689
   1690int efct_scsi_tgt_driver_exit(void)
   1691{
   1692	target_unregister_template(&efct_lio_ops);
   1693	target_unregister_template(&efct_lio_npiv_ops);
   1694	return 0;
   1695}