cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

csio_scsi.c (68959B)


      1/*
      2 * This file is part of the Chelsio FCoE driver for Linux.
      3 *
      4 * Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved.
      5 *
      6 * This software is available to you under a choice of one of two
      7 * licenses.  You may choose to be licensed under the terms of the GNU
      8 * General Public License (GPL) Version 2, available from the file
      9 * COPYING in the main directory of this source tree, or the
     10 * OpenIB.org BSD license below:
     11 *
     12 *     Redistribution and use in source and binary forms, with or
     13 *     without modification, are permitted provided that the following
     14 *     conditions are met:
     15 *
     16 *      - Redistributions of source code must retain the above
     17 *        copyright notice, this list of conditions and the following
     18 *        disclaimer.
     19 *
     20 *      - Redistributions in binary form must reproduce the above
     21 *        copyright notice, this list of conditions and the following
     22 *        disclaimer in the documentation and/or other materials
     23 *        provided with the distribution.
     24 *
     25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
     26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
     27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
     28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
     29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
     30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
     31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
     32 * SOFTWARE.
     33 */
     34
     35#include <linux/device.h>
     36#include <linux/delay.h>
     37#include <linux/ctype.h>
     38#include <linux/kernel.h>
     39#include <linux/slab.h>
     40#include <linux/string.h>
     41#include <linux/compiler.h>
     42#include <linux/export.h>
     43#include <linux/module.h>
     44#include <asm/unaligned.h>
     45#include <asm/page.h>
     46#include <scsi/scsi.h>
     47#include <scsi/scsi_device.h>
     48#include <scsi/scsi_transport_fc.h>
     49
     50#include "csio_hw.h"
     51#include "csio_lnode.h"
     52#include "csio_rnode.h"
     53#include "csio_scsi.h"
     54#include "csio_init.h"
     55
     56int csio_scsi_eqsize = 65536;
     57int csio_scsi_iqlen = 128;
     58int csio_scsi_ioreqs = 2048;
     59uint32_t csio_max_scan_tmo;
     60uint32_t csio_delta_scan_tmo = 5;
     61int csio_lun_qdepth = 32;
     62
     63static int csio_ddp_descs = 128;
     64
     65static int csio_do_abrt_cls(struct csio_hw *,
     66				      struct csio_ioreq *, bool);
     67
     68static void csio_scsis_uninit(struct csio_ioreq *, enum csio_scsi_ev);
     69static void csio_scsis_io_active(struct csio_ioreq *, enum csio_scsi_ev);
     70static void csio_scsis_tm_active(struct csio_ioreq *, enum csio_scsi_ev);
     71static void csio_scsis_aborting(struct csio_ioreq *, enum csio_scsi_ev);
     72static void csio_scsis_closing(struct csio_ioreq *, enum csio_scsi_ev);
     73static void csio_scsis_shost_cmpl_await(struct csio_ioreq *, enum csio_scsi_ev);
     74
     75/*
     76 * csio_scsi_match_io - Match an ioreq with the given SCSI level data.
     77 * @ioreq: The I/O request
     78 * @sld: Level information
     79 *
     80 * Should be called with lock held.
     81 *
     82 */
     83static bool
     84csio_scsi_match_io(struct csio_ioreq *ioreq, struct csio_scsi_level_data *sld)
     85{
     86	struct scsi_cmnd *scmnd = csio_scsi_cmnd(ioreq);
     87
     88	switch (sld->level) {
     89	case CSIO_LEV_LUN:
     90		if (scmnd == NULL)
     91			return false;
     92
     93		return ((ioreq->lnode == sld->lnode) &&
     94			(ioreq->rnode == sld->rnode) &&
     95			((uint64_t)scmnd->device->lun == sld->oslun));
     96
     97	case CSIO_LEV_RNODE:
     98		return ((ioreq->lnode == sld->lnode) &&
     99				(ioreq->rnode == sld->rnode));
    100	case CSIO_LEV_LNODE:
    101		return (ioreq->lnode == sld->lnode);
    102	case CSIO_LEV_ALL:
    103		return true;
    104	default:
    105		return false;
    106	}
    107}
    108
    109/*
    110 * csio_scsi_gather_active_ios - Gather active I/Os based on level
    111 * @scm: SCSI module
    112 * @sld: Level information
    113 * @dest: The queue where these I/Os have to be gathered.
    114 *
    115 * Should be called with lock held.
    116 */
    117static void
    118csio_scsi_gather_active_ios(struct csio_scsim *scm,
    119			    struct csio_scsi_level_data *sld,
    120			    struct list_head *dest)
    121{
    122	struct list_head *tmp, *next;
    123
    124	if (list_empty(&scm->active_q))
    125		return;
    126
    127	/* Just splice the entire active_q into dest */
    128	if (sld->level == CSIO_LEV_ALL) {
    129		list_splice_tail_init(&scm->active_q, dest);
    130		return;
    131	}
    132
    133	list_for_each_safe(tmp, next, &scm->active_q) {
    134		if (csio_scsi_match_io((struct csio_ioreq *)tmp, sld)) {
    135			list_del_init(tmp);
    136			list_add_tail(tmp, dest);
    137		}
    138	}
    139}
    140
    141static inline bool
    142csio_scsi_itnexus_loss_error(uint16_t error)
    143{
    144	switch (error) {
    145	case FW_ERR_LINK_DOWN:
    146	case FW_RDEV_NOT_READY:
    147	case FW_ERR_RDEV_LOST:
    148	case FW_ERR_RDEV_LOGO:
    149	case FW_ERR_RDEV_IMPL_LOGO:
    150		return true;
    151	}
    152	return false;
    153}
    154
    155/*
    156 * csio_scsi_fcp_cmnd - Frame the SCSI FCP command paylod.
    157 * @req: IO req structure.
    158 * @addr: DMA location to place the payload.
    159 *
    160 * This routine is shared between FCP_WRITE, FCP_READ and FCP_CMD requests.
    161 */
    162static inline void
    163csio_scsi_fcp_cmnd(struct csio_ioreq *req, void *addr)
    164{
    165	struct fcp_cmnd *fcp_cmnd = (struct fcp_cmnd *)addr;
    166	struct scsi_cmnd *scmnd = csio_scsi_cmnd(req);
    167
    168	/* Check for Task Management */
    169	if (likely(csio_priv(scmnd)->fc_tm_flags == 0)) {
    170		int_to_scsilun(scmnd->device->lun, &fcp_cmnd->fc_lun);
    171		fcp_cmnd->fc_tm_flags = 0;
    172		fcp_cmnd->fc_cmdref = 0;
    173
    174		memcpy(fcp_cmnd->fc_cdb, scmnd->cmnd, 16);
    175		fcp_cmnd->fc_pri_ta = FCP_PTA_SIMPLE;
    176		fcp_cmnd->fc_dl = cpu_to_be32(scsi_bufflen(scmnd));
    177
    178		if (req->nsge)
    179			if (req->datadir == DMA_TO_DEVICE)
    180				fcp_cmnd->fc_flags = FCP_CFL_WRDATA;
    181			else
    182				fcp_cmnd->fc_flags = FCP_CFL_RDDATA;
    183		else
    184			fcp_cmnd->fc_flags = 0;
    185	} else {
    186		memset(fcp_cmnd, 0, sizeof(*fcp_cmnd));
    187		int_to_scsilun(scmnd->device->lun, &fcp_cmnd->fc_lun);
    188		fcp_cmnd->fc_tm_flags = csio_priv(scmnd)->fc_tm_flags;
    189	}
    190}
    191
    192/*
    193 * csio_scsi_init_cmd_wr - Initialize the SCSI CMD WR.
    194 * @req: IO req structure.
    195 * @addr: DMA location to place the payload.
    196 * @size: Size of WR (including FW WR + immed data + rsp SG entry
    197 *
    198 * Wrapper for populating fw_scsi_cmd_wr.
    199 */
    200static inline void
    201csio_scsi_init_cmd_wr(struct csio_ioreq *req, void *addr, uint32_t size)
    202{
    203	struct csio_hw *hw = req->lnode->hwp;
    204	struct csio_rnode *rn = req->rnode;
    205	struct fw_scsi_cmd_wr *wr = (struct fw_scsi_cmd_wr *)addr;
    206	struct csio_dma_buf *dma_buf;
    207	uint8_t imm = csio_hw_to_scsim(hw)->proto_cmd_len;
    208
    209	wr->op_immdlen = cpu_to_be32(FW_WR_OP_V(FW_SCSI_CMD_WR) |
    210					  FW_SCSI_CMD_WR_IMMDLEN(imm));
    211	wr->flowid_len16 = cpu_to_be32(FW_WR_FLOWID_V(rn->flowid) |
    212					    FW_WR_LEN16_V(
    213						DIV_ROUND_UP(size, 16)));
    214
    215	wr->cookie = (uintptr_t) req;
    216	wr->iqid = cpu_to_be16(csio_q_physiqid(hw, req->iq_idx));
    217	wr->tmo_val = (uint8_t) req->tmo;
    218	wr->r3 = 0;
    219	memset(&wr->r5, 0, 8);
    220
    221	/* Get RSP DMA buffer */
    222	dma_buf = &req->dma_buf;
    223
    224	/* Prepare RSP SGL */
    225	wr->rsp_dmalen = cpu_to_be32(dma_buf->len);
    226	wr->rsp_dmaaddr = cpu_to_be64(dma_buf->paddr);
    227
    228	wr->r6 = 0;
    229
    230	wr->u.fcoe.ctl_pri = 0;
    231	wr->u.fcoe.cp_en_class = 0;
    232	wr->u.fcoe.r4_lo[0] = 0;
    233	wr->u.fcoe.r4_lo[1] = 0;
    234
    235	/* Frame a FCP command */
    236	csio_scsi_fcp_cmnd(req, (void *)((uintptr_t)addr +
    237				    sizeof(struct fw_scsi_cmd_wr)));
    238}
    239
    240#define CSIO_SCSI_CMD_WR_SZ(_imm)					\
    241	(sizeof(struct fw_scsi_cmd_wr) +		/* WR size */	\
    242	 ALIGN((_imm), 16))				/* Immed data */
    243
    244#define CSIO_SCSI_CMD_WR_SZ_16(_imm)					\
    245			(ALIGN(CSIO_SCSI_CMD_WR_SZ((_imm)), 16))
    246
    247/*
    248 * csio_scsi_cmd - Create a SCSI CMD WR.
    249 * @req: IO req structure.
    250 *
    251 * Gets a WR slot in the ingress queue and initializes it with SCSI CMD WR.
    252 *
    253 */
    254static inline void
    255csio_scsi_cmd(struct csio_ioreq *req)
    256{
    257	struct csio_wr_pair wrp;
    258	struct csio_hw *hw = req->lnode->hwp;
    259	struct csio_scsim *scsim = csio_hw_to_scsim(hw);
    260	uint32_t size = CSIO_SCSI_CMD_WR_SZ_16(scsim->proto_cmd_len);
    261
    262	req->drv_status = csio_wr_get(hw, req->eq_idx, size, &wrp);
    263	if (unlikely(req->drv_status != 0))
    264		return;
    265
    266	if (wrp.size1 >= size) {
    267		/* Initialize WR in one shot */
    268		csio_scsi_init_cmd_wr(req, wrp.addr1, size);
    269	} else {
    270		uint8_t *tmpwr = csio_q_eq_wrap(hw, req->eq_idx);
    271
    272		/*
    273		 * Make a temporary copy of the WR and write back
    274		 * the copy into the WR pair.
    275		 */
    276		csio_scsi_init_cmd_wr(req, (void *)tmpwr, size);
    277		memcpy(wrp.addr1, tmpwr, wrp.size1);
    278		memcpy(wrp.addr2, tmpwr + wrp.size1, size - wrp.size1);
    279	}
    280}
    281
    282/*
    283 * csio_scsi_init_ulptx_dsgl - Fill in a ULP_TX_SC_DSGL
    284 * @hw: HW module
    285 * @req: IO request
    286 * @sgl: ULP TX SGL pointer.
    287 *
    288 */
    289static inline void
    290csio_scsi_init_ultptx_dsgl(struct csio_hw *hw, struct csio_ioreq *req,
    291			   struct ulptx_sgl *sgl)
    292{
    293	struct ulptx_sge_pair *sge_pair = NULL;
    294	struct scatterlist *sgel;
    295	uint32_t i = 0;
    296	uint32_t xfer_len;
    297	struct list_head *tmp;
    298	struct csio_dma_buf *dma_buf;
    299	struct scsi_cmnd *scmnd = csio_scsi_cmnd(req);
    300
    301	sgl->cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) | ULPTX_MORE_F |
    302				     ULPTX_NSGE_V(req->nsge));
    303	/* Now add the data SGLs */
    304	if (likely(!req->dcopy)) {
    305		scsi_for_each_sg(scmnd, sgel, req->nsge, i) {
    306			if (i == 0) {
    307				sgl->addr0 = cpu_to_be64(sg_dma_address(sgel));
    308				sgl->len0 = cpu_to_be32(sg_dma_len(sgel));
    309				sge_pair = (struct ulptx_sge_pair *)(sgl + 1);
    310				continue;
    311			}
    312			if ((i - 1) & 0x1) {
    313				sge_pair->addr[1] = cpu_to_be64(
    314							sg_dma_address(sgel));
    315				sge_pair->len[1] = cpu_to_be32(
    316							sg_dma_len(sgel));
    317				sge_pair++;
    318			} else {
    319				sge_pair->addr[0] = cpu_to_be64(
    320							sg_dma_address(sgel));
    321				sge_pair->len[0] = cpu_to_be32(
    322							sg_dma_len(sgel));
    323			}
    324		}
    325	} else {
    326		/* Program sg elements with driver's DDP buffer */
    327		xfer_len = scsi_bufflen(scmnd);
    328		list_for_each(tmp, &req->gen_list) {
    329			dma_buf = (struct csio_dma_buf *)tmp;
    330			if (i == 0) {
    331				sgl->addr0 = cpu_to_be64(dma_buf->paddr);
    332				sgl->len0 = cpu_to_be32(
    333						min(xfer_len, dma_buf->len));
    334				sge_pair = (struct ulptx_sge_pair *)(sgl + 1);
    335			} else if ((i - 1) & 0x1) {
    336				sge_pair->addr[1] = cpu_to_be64(dma_buf->paddr);
    337				sge_pair->len[1] = cpu_to_be32(
    338						min(xfer_len, dma_buf->len));
    339				sge_pair++;
    340			} else {
    341				sge_pair->addr[0] = cpu_to_be64(dma_buf->paddr);
    342				sge_pair->len[0] = cpu_to_be32(
    343						min(xfer_len, dma_buf->len));
    344			}
    345			xfer_len -= min(xfer_len, dma_buf->len);
    346			i++;
    347		}
    348	}
    349}
    350
    351/*
    352 * csio_scsi_init_read_wr - Initialize the READ SCSI WR.
    353 * @req: IO req structure.
    354 * @wrp: DMA location to place the payload.
    355 * @size: Size of WR (including FW WR + immed data + rsp SG entry + data SGL
    356 *
    357 * Wrapper for populating fw_scsi_read_wr.
    358 */
    359static inline void
    360csio_scsi_init_read_wr(struct csio_ioreq *req, void *wrp, uint32_t size)
    361{
    362	struct csio_hw *hw = req->lnode->hwp;
    363	struct csio_rnode *rn = req->rnode;
    364	struct fw_scsi_read_wr *wr = (struct fw_scsi_read_wr *)wrp;
    365	struct ulptx_sgl *sgl;
    366	struct csio_dma_buf *dma_buf;
    367	uint8_t imm = csio_hw_to_scsim(hw)->proto_cmd_len;
    368	struct scsi_cmnd *scmnd = csio_scsi_cmnd(req);
    369
    370	wr->op_immdlen = cpu_to_be32(FW_WR_OP_V(FW_SCSI_READ_WR) |
    371				     FW_SCSI_READ_WR_IMMDLEN(imm));
    372	wr->flowid_len16 = cpu_to_be32(FW_WR_FLOWID_V(rn->flowid) |
    373				       FW_WR_LEN16_V(DIV_ROUND_UP(size, 16)));
    374	wr->cookie = (uintptr_t)req;
    375	wr->iqid = cpu_to_be16(csio_q_physiqid(hw, req->iq_idx));
    376	wr->tmo_val = (uint8_t)(req->tmo);
    377	wr->use_xfer_cnt = 1;
    378	wr->xfer_cnt = cpu_to_be32(scsi_bufflen(scmnd));
    379	wr->ini_xfer_cnt = cpu_to_be32(scsi_bufflen(scmnd));
    380	/* Get RSP DMA buffer */
    381	dma_buf = &req->dma_buf;
    382
    383	/* Prepare RSP SGL */
    384	wr->rsp_dmalen = cpu_to_be32(dma_buf->len);
    385	wr->rsp_dmaaddr = cpu_to_be64(dma_buf->paddr);
    386
    387	wr->r4 = 0;
    388
    389	wr->u.fcoe.ctl_pri = 0;
    390	wr->u.fcoe.cp_en_class = 0;
    391	wr->u.fcoe.r3_lo[0] = 0;
    392	wr->u.fcoe.r3_lo[1] = 0;
    393	csio_scsi_fcp_cmnd(req, (void *)((uintptr_t)wrp +
    394					sizeof(struct fw_scsi_read_wr)));
    395
    396	/* Move WR pointer past command and immediate data */
    397	sgl = (struct ulptx_sgl *)((uintptr_t)wrp +
    398			      sizeof(struct fw_scsi_read_wr) + ALIGN(imm, 16));
    399
    400	/* Fill in the DSGL */
    401	csio_scsi_init_ultptx_dsgl(hw, req, sgl);
    402}
    403
    404/*
    405 * csio_scsi_init_write_wr - Initialize the WRITE SCSI WR.
    406 * @req: IO req structure.
    407 * @wrp: DMA location to place the payload.
    408 * @size: Size of WR (including FW WR + immed data + rsp SG entry + data SGL
    409 *
    410 * Wrapper for populating fw_scsi_write_wr.
    411 */
    412static inline void
    413csio_scsi_init_write_wr(struct csio_ioreq *req, void *wrp, uint32_t size)
    414{
    415	struct csio_hw *hw = req->lnode->hwp;
    416	struct csio_rnode *rn = req->rnode;
    417	struct fw_scsi_write_wr *wr = (struct fw_scsi_write_wr *)wrp;
    418	struct ulptx_sgl *sgl;
    419	struct csio_dma_buf *dma_buf;
    420	uint8_t imm = csio_hw_to_scsim(hw)->proto_cmd_len;
    421	struct scsi_cmnd *scmnd = csio_scsi_cmnd(req);
    422
    423	wr->op_immdlen = cpu_to_be32(FW_WR_OP_V(FW_SCSI_WRITE_WR) |
    424				     FW_SCSI_WRITE_WR_IMMDLEN(imm));
    425	wr->flowid_len16 = cpu_to_be32(FW_WR_FLOWID_V(rn->flowid) |
    426				       FW_WR_LEN16_V(DIV_ROUND_UP(size, 16)));
    427	wr->cookie = (uintptr_t)req;
    428	wr->iqid = cpu_to_be16(csio_q_physiqid(hw, req->iq_idx));
    429	wr->tmo_val = (uint8_t)(req->tmo);
    430	wr->use_xfer_cnt = 1;
    431	wr->xfer_cnt = cpu_to_be32(scsi_bufflen(scmnd));
    432	wr->ini_xfer_cnt = cpu_to_be32(scsi_bufflen(scmnd));
    433	/* Get RSP DMA buffer */
    434	dma_buf = &req->dma_buf;
    435
    436	/* Prepare RSP SGL */
    437	wr->rsp_dmalen = cpu_to_be32(dma_buf->len);
    438	wr->rsp_dmaaddr = cpu_to_be64(dma_buf->paddr);
    439
    440	wr->r4 = 0;
    441
    442	wr->u.fcoe.ctl_pri = 0;
    443	wr->u.fcoe.cp_en_class = 0;
    444	wr->u.fcoe.r3_lo[0] = 0;
    445	wr->u.fcoe.r3_lo[1] = 0;
    446	csio_scsi_fcp_cmnd(req, (void *)((uintptr_t)wrp +
    447					sizeof(struct fw_scsi_write_wr)));
    448
    449	/* Move WR pointer past command and immediate data */
    450	sgl = (struct ulptx_sgl *)((uintptr_t)wrp +
    451			      sizeof(struct fw_scsi_write_wr) + ALIGN(imm, 16));
    452
    453	/* Fill in the DSGL */
    454	csio_scsi_init_ultptx_dsgl(hw, req, sgl);
    455}
    456
    457/* Calculate WR size needed for fw_scsi_read_wr/fw_scsi_write_wr */
    458#define CSIO_SCSI_DATA_WRSZ(req, oper, sz, imm)				       \
    459do {									       \
    460	(sz) = sizeof(struct fw_scsi_##oper##_wr) +	/* WR size */          \
    461	       ALIGN((imm), 16) +			/* Immed data */       \
    462	       sizeof(struct ulptx_sgl);		/* ulptx_sgl */	       \
    463									       \
    464	if (unlikely((req)->nsge > 1))				               \
    465		(sz) += (sizeof(struct ulptx_sge_pair) *		       \
    466				(ALIGN(((req)->nsge - 1), 2) / 2));            \
    467							/* Data SGE */	       \
    468} while (0)
    469
    470/*
    471 * csio_scsi_read - Create a SCSI READ WR.
    472 * @req: IO req structure.
    473 *
    474 * Gets a WR slot in the ingress queue and initializes it with
    475 * SCSI READ WR.
    476 *
    477 */
    478static inline void
    479csio_scsi_read(struct csio_ioreq *req)
    480{
    481	struct csio_wr_pair wrp;
    482	uint32_t size;
    483	struct csio_hw *hw = req->lnode->hwp;
    484	struct csio_scsim *scsim = csio_hw_to_scsim(hw);
    485
    486	CSIO_SCSI_DATA_WRSZ(req, read, size, scsim->proto_cmd_len);
    487	size = ALIGN(size, 16);
    488
    489	req->drv_status = csio_wr_get(hw, req->eq_idx, size, &wrp);
    490	if (likely(req->drv_status == 0)) {
    491		if (likely(wrp.size1 >= size)) {
    492			/* Initialize WR in one shot */
    493			csio_scsi_init_read_wr(req, wrp.addr1, size);
    494		} else {
    495			uint8_t *tmpwr = csio_q_eq_wrap(hw, req->eq_idx);
    496			/*
    497			 * Make a temporary copy of the WR and write back
    498			 * the copy into the WR pair.
    499			 */
    500			csio_scsi_init_read_wr(req, (void *)tmpwr, size);
    501			memcpy(wrp.addr1, tmpwr, wrp.size1);
    502			memcpy(wrp.addr2, tmpwr + wrp.size1, size - wrp.size1);
    503		}
    504	}
    505}
    506
    507/*
    508 * csio_scsi_write - Create a SCSI WRITE WR.
    509 * @req: IO req structure.
    510 *
    511 * Gets a WR slot in the ingress queue and initializes it with
    512 * SCSI WRITE WR.
    513 *
    514 */
    515static inline void
    516csio_scsi_write(struct csio_ioreq *req)
    517{
    518	struct csio_wr_pair wrp;
    519	uint32_t size;
    520	struct csio_hw *hw = req->lnode->hwp;
    521	struct csio_scsim *scsim = csio_hw_to_scsim(hw);
    522
    523	CSIO_SCSI_DATA_WRSZ(req, write, size, scsim->proto_cmd_len);
    524	size = ALIGN(size, 16);
    525
    526	req->drv_status = csio_wr_get(hw, req->eq_idx, size, &wrp);
    527	if (likely(req->drv_status == 0)) {
    528		if (likely(wrp.size1 >= size)) {
    529			/* Initialize WR in one shot */
    530			csio_scsi_init_write_wr(req, wrp.addr1, size);
    531		} else {
    532			uint8_t *tmpwr = csio_q_eq_wrap(hw, req->eq_idx);
    533			/*
    534			 * Make a temporary copy of the WR and write back
    535			 * the copy into the WR pair.
    536			 */
    537			csio_scsi_init_write_wr(req, (void *)tmpwr, size);
    538			memcpy(wrp.addr1, tmpwr, wrp.size1);
    539			memcpy(wrp.addr2, tmpwr + wrp.size1, size - wrp.size1);
    540		}
    541	}
    542}
    543
    544/*
    545 * csio_setup_ddp - Setup DDP buffers for Read request.
    546 * @req: IO req structure.
    547 *
    548 * Checks SGLs/Data buffers are virtually contiguous required for DDP.
    549 * If contiguous,driver posts SGLs in the WR otherwise post internal
    550 * buffers for such request for DDP.
    551 */
    552static inline void
    553csio_setup_ddp(struct csio_scsim *scsim, struct csio_ioreq *req)
    554{
    555#ifdef __CSIO_DEBUG__
    556	struct csio_hw *hw = req->lnode->hwp;
    557#endif
    558	struct scatterlist *sgel = NULL;
    559	struct scsi_cmnd *scmnd = csio_scsi_cmnd(req);
    560	uint64_t sg_addr = 0;
    561	uint32_t ddp_pagesz = 4096;
    562	uint32_t buf_off;
    563	struct csio_dma_buf *dma_buf = NULL;
    564	uint32_t alloc_len = 0;
    565	uint32_t xfer_len = 0;
    566	uint32_t sg_len = 0;
    567	uint32_t i;
    568
    569	scsi_for_each_sg(scmnd, sgel, req->nsge, i) {
    570		sg_addr = sg_dma_address(sgel);
    571		sg_len	= sg_dma_len(sgel);
    572
    573		buf_off = sg_addr & (ddp_pagesz - 1);
    574
    575		/* Except 1st buffer,all buffer addr have to be Page aligned */
    576		if (i != 0 && buf_off) {
    577			csio_dbg(hw, "SGL addr not DDP aligned (%llx:%d)\n",
    578				 sg_addr, sg_len);
    579			goto unaligned;
    580		}
    581
    582		/* Except last buffer,all buffer must end on page boundary */
    583		if ((i != (req->nsge - 1)) &&
    584			((buf_off + sg_len) & (ddp_pagesz - 1))) {
    585			csio_dbg(hw,
    586				 "SGL addr not ending on page boundary"
    587				 "(%llx:%d)\n", sg_addr, sg_len);
    588			goto unaligned;
    589		}
    590	}
    591
    592	/* SGL's are virtually contiguous. HW will DDP to SGLs */
    593	req->dcopy = 0;
    594	csio_scsi_read(req);
    595
    596	return;
    597
    598unaligned:
    599	CSIO_INC_STATS(scsim, n_unaligned);
    600	/*
    601	 * For unaligned SGLs, driver will allocate internal DDP buffer.
    602	 * Once command is completed data from DDP buffer copied to SGLs
    603	 */
    604	req->dcopy = 1;
    605
    606	/* Use gen_list to store the DDP buffers */
    607	INIT_LIST_HEAD(&req->gen_list);
    608	xfer_len = scsi_bufflen(scmnd);
    609
    610	i = 0;
    611	/* Allocate ddp buffers for this request */
    612	while (alloc_len < xfer_len) {
    613		dma_buf = csio_get_scsi_ddp(scsim);
    614		if (dma_buf == NULL || i > scsim->max_sge) {
    615			req->drv_status = -EBUSY;
    616			break;
    617		}
    618		alloc_len += dma_buf->len;
    619		/* Added to IO req */
    620		list_add_tail(&dma_buf->list, &req->gen_list);
    621		i++;
    622	}
    623
    624	if (!req->drv_status) {
    625		/* set number of ddp bufs used */
    626		req->nsge = i;
    627		csio_scsi_read(req);
    628		return;
    629	}
    630
    631	 /* release dma descs */
    632	if (i > 0)
    633		csio_put_scsi_ddp_list(scsim, &req->gen_list, i);
    634}
    635
    636/*
    637 * csio_scsi_init_abrt_cls_wr - Initialize an ABORT/CLOSE WR.
    638 * @req: IO req structure.
    639 * @addr: DMA location to place the payload.
    640 * @size: Size of WR
    641 * @abort: abort OR close
    642 *
    643 * Wrapper for populating fw_scsi_cmd_wr.
    644 */
    645static inline void
    646csio_scsi_init_abrt_cls_wr(struct csio_ioreq *req, void *addr, uint32_t size,
    647			   bool abort)
    648{
    649	struct csio_hw *hw = req->lnode->hwp;
    650	struct csio_rnode *rn = req->rnode;
    651	struct fw_scsi_abrt_cls_wr *wr = (struct fw_scsi_abrt_cls_wr *)addr;
    652
    653	wr->op_immdlen = cpu_to_be32(FW_WR_OP_V(FW_SCSI_ABRT_CLS_WR));
    654	wr->flowid_len16 = cpu_to_be32(FW_WR_FLOWID_V(rn->flowid) |
    655					    FW_WR_LEN16_V(
    656						DIV_ROUND_UP(size, 16)));
    657
    658	wr->cookie = (uintptr_t) req;
    659	wr->iqid = cpu_to_be16(csio_q_physiqid(hw, req->iq_idx));
    660	wr->tmo_val = (uint8_t) req->tmo;
    661	/* 0 for CHK_ALL_IO tells FW to look up t_cookie */
    662	wr->sub_opcode_to_chk_all_io =
    663				(FW_SCSI_ABRT_CLS_WR_SUB_OPCODE(abort) |
    664				 FW_SCSI_ABRT_CLS_WR_CHK_ALL_IO(0));
    665	wr->r3[0] = 0;
    666	wr->r3[1] = 0;
    667	wr->r3[2] = 0;
    668	wr->r3[3] = 0;
    669	/* Since we re-use the same ioreq for abort as well */
    670	wr->t_cookie = (uintptr_t) req;
    671}
    672
    673static inline void
    674csio_scsi_abrt_cls(struct csio_ioreq *req, bool abort)
    675{
    676	struct csio_wr_pair wrp;
    677	struct csio_hw *hw = req->lnode->hwp;
    678	uint32_t size = ALIGN(sizeof(struct fw_scsi_abrt_cls_wr), 16);
    679
    680	req->drv_status = csio_wr_get(hw, req->eq_idx, size, &wrp);
    681	if (req->drv_status != 0)
    682		return;
    683
    684	if (wrp.size1 >= size) {
    685		/* Initialize WR in one shot */
    686		csio_scsi_init_abrt_cls_wr(req, wrp.addr1, size, abort);
    687	} else {
    688		uint8_t *tmpwr = csio_q_eq_wrap(hw, req->eq_idx);
    689		/*
    690		 * Make a temporary copy of the WR and write back
    691		 * the copy into the WR pair.
    692		 */
    693		csio_scsi_init_abrt_cls_wr(req, (void *)tmpwr, size, abort);
    694		memcpy(wrp.addr1, tmpwr, wrp.size1);
    695		memcpy(wrp.addr2, tmpwr + wrp.size1, size - wrp.size1);
    696	}
    697}
    698
    699/*****************************************************************************/
    700/* START: SCSI SM                                                            */
    701/*****************************************************************************/
    702static void
    703csio_scsis_uninit(struct csio_ioreq *req, enum csio_scsi_ev evt)
    704{
    705	struct csio_hw *hw = req->lnode->hwp;
    706	struct csio_scsim *scsim = csio_hw_to_scsim(hw);
    707
    708	switch (evt) {
    709	case CSIO_SCSIE_START_IO:
    710
    711		if (req->nsge) {
    712			if (req->datadir == DMA_TO_DEVICE) {
    713				req->dcopy = 0;
    714				csio_scsi_write(req);
    715			} else
    716				csio_setup_ddp(scsim, req);
    717		} else {
    718			csio_scsi_cmd(req);
    719		}
    720
    721		if (likely(req->drv_status == 0)) {
    722			/* change state and enqueue on active_q */
    723			csio_set_state(&req->sm, csio_scsis_io_active);
    724			list_add_tail(&req->sm.sm_list, &scsim->active_q);
    725			csio_wr_issue(hw, req->eq_idx, false);
    726			CSIO_INC_STATS(scsim, n_active);
    727
    728			return;
    729		}
    730		break;
    731
    732	case CSIO_SCSIE_START_TM:
    733		csio_scsi_cmd(req);
    734		if (req->drv_status == 0) {
    735			/*
    736			 * NOTE: We collect the affected I/Os prior to issuing
    737			 * LUN reset, and not after it. This is to prevent
    738			 * aborting I/Os that get issued after the LUN reset,
    739			 * but prior to LUN reset completion (in the event that
    740			 * the host stack has not blocked I/Os to a LUN that is
    741			 * being reset.
    742			 */
    743			csio_set_state(&req->sm, csio_scsis_tm_active);
    744			list_add_tail(&req->sm.sm_list, &scsim->active_q);
    745			csio_wr_issue(hw, req->eq_idx, false);
    746			CSIO_INC_STATS(scsim, n_tm_active);
    747		}
    748		return;
    749
    750	case CSIO_SCSIE_ABORT:
    751	case CSIO_SCSIE_CLOSE:
    752		/*
    753		 * NOTE:
    754		 * We could get here due to  :
    755		 * - a window in the cleanup path of the SCSI module
    756		 *   (csio_scsi_abort_io()). Please see NOTE in this function.
    757		 * - a window in the time we tried to issue an abort/close
    758		 *   of a request to FW, and the FW completed the request
    759		 *   itself.
    760		 *   Print a message for now, and return INVAL either way.
    761		 */
    762		req->drv_status = -EINVAL;
    763		csio_warn(hw, "Trying to abort/close completed IO:%p!\n", req);
    764		break;
    765
    766	default:
    767		csio_dbg(hw, "Unhandled event:%d sent to req:%p\n", evt, req);
    768		CSIO_DB_ASSERT(0);
    769	}
    770}
    771
    772static void
    773csio_scsis_io_active(struct csio_ioreq *req, enum csio_scsi_ev evt)
    774{
    775	struct csio_hw *hw = req->lnode->hwp;
    776	struct csio_scsim *scm = csio_hw_to_scsim(hw);
    777	struct csio_rnode *rn;
    778
    779	switch (evt) {
    780	case CSIO_SCSIE_COMPLETED:
    781		CSIO_DEC_STATS(scm, n_active);
    782		list_del_init(&req->sm.sm_list);
    783		csio_set_state(&req->sm, csio_scsis_uninit);
    784		/*
    785		 * In MSIX mode, with multiple queues, the SCSI compeltions
    786		 * could reach us sooner than the FW events sent to indicate
    787		 * I-T nexus loss (link down, remote device logo etc). We
    788		 * dont want to be returning such I/Os to the upper layer
    789		 * immediately, since we wouldnt have reported the I-T nexus
    790		 * loss itself. This forces us to serialize such completions
    791		 * with the reporting of the I-T nexus loss. Therefore, we
    792		 * internally queue up such up such completions in the rnode.
    793		 * The reporting of I-T nexus loss to the upper layer is then
    794		 * followed by the returning of I/Os in this internal queue.
    795		 * Having another state alongwith another queue helps us take
    796		 * actions for events such as ABORT received while we are
    797		 * in this rnode queue.
    798		 */
    799		if (unlikely(req->wr_status != FW_SUCCESS)) {
    800			rn = req->rnode;
    801			/*
    802			 * FW says remote device is lost, but rnode
    803			 * doesnt reflect it.
    804			 */
    805			if (csio_scsi_itnexus_loss_error(req->wr_status) &&
    806						csio_is_rnode_ready(rn)) {
    807				csio_set_state(&req->sm,
    808						csio_scsis_shost_cmpl_await);
    809				list_add_tail(&req->sm.sm_list,
    810					      &rn->host_cmpl_q);
    811			}
    812		}
    813
    814		break;
    815
    816	case CSIO_SCSIE_ABORT:
    817		csio_scsi_abrt_cls(req, SCSI_ABORT);
    818		if (req->drv_status == 0) {
    819			csio_wr_issue(hw, req->eq_idx, false);
    820			csio_set_state(&req->sm, csio_scsis_aborting);
    821		}
    822		break;
    823
    824	case CSIO_SCSIE_CLOSE:
    825		csio_scsi_abrt_cls(req, SCSI_CLOSE);
    826		if (req->drv_status == 0) {
    827			csio_wr_issue(hw, req->eq_idx, false);
    828			csio_set_state(&req->sm, csio_scsis_closing);
    829		}
    830		break;
    831
    832	case CSIO_SCSIE_DRVCLEANUP:
    833		req->wr_status = FW_HOSTERROR;
    834		CSIO_DEC_STATS(scm, n_active);
    835		csio_set_state(&req->sm, csio_scsis_uninit);
    836		break;
    837
    838	default:
    839		csio_dbg(hw, "Unhandled event:%d sent to req:%p\n", evt, req);
    840		CSIO_DB_ASSERT(0);
    841	}
    842}
    843
    844static void
    845csio_scsis_tm_active(struct csio_ioreq *req, enum csio_scsi_ev evt)
    846{
    847	struct csio_hw *hw = req->lnode->hwp;
    848	struct csio_scsim *scm = csio_hw_to_scsim(hw);
    849
    850	switch (evt) {
    851	case CSIO_SCSIE_COMPLETED:
    852		CSIO_DEC_STATS(scm, n_tm_active);
    853		list_del_init(&req->sm.sm_list);
    854		csio_set_state(&req->sm, csio_scsis_uninit);
    855
    856		break;
    857
    858	case CSIO_SCSIE_ABORT:
    859		csio_scsi_abrt_cls(req, SCSI_ABORT);
    860		if (req->drv_status == 0) {
    861			csio_wr_issue(hw, req->eq_idx, false);
    862			csio_set_state(&req->sm, csio_scsis_aborting);
    863		}
    864		break;
    865
    866
    867	case CSIO_SCSIE_CLOSE:
    868		csio_scsi_abrt_cls(req, SCSI_CLOSE);
    869		if (req->drv_status == 0) {
    870			csio_wr_issue(hw, req->eq_idx, false);
    871			csio_set_state(&req->sm, csio_scsis_closing);
    872		}
    873		break;
    874
    875	case CSIO_SCSIE_DRVCLEANUP:
    876		req->wr_status = FW_HOSTERROR;
    877		CSIO_DEC_STATS(scm, n_tm_active);
    878		csio_set_state(&req->sm, csio_scsis_uninit);
    879		break;
    880
    881	default:
    882		csio_dbg(hw, "Unhandled event:%d sent to req:%p\n", evt, req);
    883		CSIO_DB_ASSERT(0);
    884	}
    885}
    886
    887static void
    888csio_scsis_aborting(struct csio_ioreq *req, enum csio_scsi_ev evt)
    889{
    890	struct csio_hw *hw = req->lnode->hwp;
    891	struct csio_scsim *scm = csio_hw_to_scsim(hw);
    892
    893	switch (evt) {
    894	case CSIO_SCSIE_COMPLETED:
    895		csio_dbg(hw,
    896			 "ioreq %p recvd cmpltd (wr_status:%d) "
    897			 "in aborting st\n", req, req->wr_status);
    898		/*
    899		 * Use -ECANCELED to explicitly tell the ABORTED event that
    900		 * the original I/O was returned to driver by FW.
    901		 * We dont really care if the I/O was returned with success by
    902		 * FW (because the ABORT and completion of the I/O crossed each
    903		 * other), or any other return value. Once we are in aborting
    904		 * state, the success or failure of the I/O is unimportant to
    905		 * us.
    906		 */
    907		req->drv_status = -ECANCELED;
    908		break;
    909
    910	case CSIO_SCSIE_ABORT:
    911		CSIO_INC_STATS(scm, n_abrt_dups);
    912		break;
    913
    914	case CSIO_SCSIE_ABORTED:
    915
    916		csio_dbg(hw, "abort of %p return status:0x%x drv_status:%x\n",
    917			 req, req->wr_status, req->drv_status);
    918		/*
    919		 * Check if original I/O WR completed before the Abort
    920		 * completion.
    921		 */
    922		if (req->drv_status != -ECANCELED) {
    923			csio_warn(hw,
    924				  "Abort completed before original I/O,"
    925				   " req:%p\n", req);
    926			CSIO_DB_ASSERT(0);
    927		}
    928
    929		/*
    930		 * There are the following possible scenarios:
    931		 * 1. The abort completed successfully, FW returned FW_SUCCESS.
    932		 * 2. The completion of an I/O and the receipt of
    933		 *    abort for that I/O by the FW crossed each other.
    934		 *    The FW returned FW_EINVAL. The original I/O would have
    935		 *    returned with FW_SUCCESS or any other SCSI error.
    936		 * 3. The FW couldn't sent the abort out on the wire, as there
    937		 *    was an I-T nexus loss (link down, remote device logged
    938		 *    out etc). FW sent back an appropriate IT nexus loss status
    939		 *    for the abort.
    940		 * 4. FW sent an abort, but abort timed out (remote device
    941		 *    didnt respond). FW replied back with
    942		 *    FW_SCSI_ABORT_TIMEDOUT.
    943		 * 5. FW couldn't genuinely abort the request for some reason,
    944		 *    and sent us an error.
    945		 *
    946		 * The first 3 scenarios are treated as  succesful abort
    947		 * operations by the host, while the last 2 are failed attempts
    948		 * to abort. Manipulate the return value of the request
    949		 * appropriately, so that host can convey these results
    950		 * back to the upper layer.
    951		 */
    952		if ((req->wr_status == FW_SUCCESS) ||
    953		    (req->wr_status == FW_EINVAL) ||
    954		    csio_scsi_itnexus_loss_error(req->wr_status))
    955			req->wr_status = FW_SCSI_ABORT_REQUESTED;
    956
    957		CSIO_DEC_STATS(scm, n_active);
    958		list_del_init(&req->sm.sm_list);
    959		csio_set_state(&req->sm, csio_scsis_uninit);
    960		break;
    961
    962	case CSIO_SCSIE_DRVCLEANUP:
    963		req->wr_status = FW_HOSTERROR;
    964		CSIO_DEC_STATS(scm, n_active);
    965		csio_set_state(&req->sm, csio_scsis_uninit);
    966		break;
    967
    968	case CSIO_SCSIE_CLOSE:
    969		/*
    970		 * We can receive this event from the module
    971		 * cleanup paths, if the FW forgot to reply to the ABORT WR
    972		 * and left this ioreq in this state. For now, just ignore
    973		 * the event. The CLOSE event is sent to this state, as
    974		 * the LINK may have already gone down.
    975		 */
    976		break;
    977
    978	default:
    979		csio_dbg(hw, "Unhandled event:%d sent to req:%p\n", evt, req);
    980		CSIO_DB_ASSERT(0);
    981	}
    982}
    983
    984static void
    985csio_scsis_closing(struct csio_ioreq *req, enum csio_scsi_ev evt)
    986{
    987	struct csio_hw *hw = req->lnode->hwp;
    988	struct csio_scsim *scm = csio_hw_to_scsim(hw);
    989
    990	switch (evt) {
    991	case CSIO_SCSIE_COMPLETED:
    992		csio_dbg(hw,
    993			 "ioreq %p recvd cmpltd (wr_status:%d) "
    994			 "in closing st\n", req, req->wr_status);
    995		/*
    996		 * Use -ECANCELED to explicitly tell the CLOSED event that
    997		 * the original I/O was returned to driver by FW.
    998		 * We dont really care if the I/O was returned with success by
    999		 * FW (because the CLOSE and completion of the I/O crossed each
   1000		 * other), or any other return value. Once we are in aborting
   1001		 * state, the success or failure of the I/O is unimportant to
   1002		 * us.
   1003		 */
   1004		req->drv_status = -ECANCELED;
   1005		break;
   1006
   1007	case CSIO_SCSIE_CLOSED:
   1008		/*
   1009		 * Check if original I/O WR completed before the Close
   1010		 * completion.
   1011		 */
   1012		if (req->drv_status != -ECANCELED) {
   1013			csio_fatal(hw,
   1014				   "Close completed before original I/O,"
   1015				   " req:%p\n", req);
   1016			CSIO_DB_ASSERT(0);
   1017		}
   1018
   1019		/*
   1020		 * Either close succeeded, or we issued close to FW at the
   1021		 * same time FW compelted it to us. Either way, the I/O
   1022		 * is closed.
   1023		 */
   1024		CSIO_DB_ASSERT((req->wr_status == FW_SUCCESS) ||
   1025					(req->wr_status == FW_EINVAL));
   1026		req->wr_status = FW_SCSI_CLOSE_REQUESTED;
   1027
   1028		CSIO_DEC_STATS(scm, n_active);
   1029		list_del_init(&req->sm.sm_list);
   1030		csio_set_state(&req->sm, csio_scsis_uninit);
   1031		break;
   1032
   1033	case CSIO_SCSIE_CLOSE:
   1034		break;
   1035
   1036	case CSIO_SCSIE_DRVCLEANUP:
   1037		req->wr_status = FW_HOSTERROR;
   1038		CSIO_DEC_STATS(scm, n_active);
   1039		csio_set_state(&req->sm, csio_scsis_uninit);
   1040		break;
   1041
   1042	default:
   1043		csio_dbg(hw, "Unhandled event:%d sent to req:%p\n", evt, req);
   1044		CSIO_DB_ASSERT(0);
   1045	}
   1046}
   1047
   1048static void
   1049csio_scsis_shost_cmpl_await(struct csio_ioreq *req, enum csio_scsi_ev evt)
   1050{
   1051	switch (evt) {
   1052	case CSIO_SCSIE_ABORT:
   1053	case CSIO_SCSIE_CLOSE:
   1054		/*
   1055		 * Just succeed the abort request, and hope that
   1056		 * the remote device unregister path will cleanup
   1057		 * this I/O to the upper layer within a sane
   1058		 * amount of time.
   1059		 */
   1060		/*
   1061		 * A close can come in during a LINK DOWN. The FW would have
   1062		 * returned us the I/O back, but not the remote device lost
   1063		 * FW event. In this interval, if the I/O times out at the upper
   1064		 * layer, a close can come in. Take the same action as abort:
   1065		 * return success, and hope that the remote device unregister
   1066		 * path will cleanup this I/O. If the FW still doesnt send
   1067		 * the msg, the close times out, and the upper layer resorts
   1068		 * to the next level of error recovery.
   1069		 */
   1070		req->drv_status = 0;
   1071		break;
   1072	case CSIO_SCSIE_DRVCLEANUP:
   1073		csio_set_state(&req->sm, csio_scsis_uninit);
   1074		break;
   1075	default:
   1076		csio_dbg(req->lnode->hwp, "Unhandled event:%d sent to req:%p\n",
   1077			 evt, req);
   1078		CSIO_DB_ASSERT(0);
   1079	}
   1080}
   1081
   1082/*
   1083 * csio_scsi_cmpl_handler - WR completion handler for SCSI.
   1084 * @hw: HW module.
   1085 * @wr: The completed WR from the ingress queue.
   1086 * @len: Length of the WR.
   1087 * @flb: Freelist buffer array.
   1088 * @priv: Private object
   1089 * @scsiwr: Pointer to SCSI WR.
   1090 *
   1091 * This is the WR completion handler called per completion from the
   1092 * ISR. It is called with lock held. It walks past the RSS and CPL message
   1093 * header where the actual WR is present.
   1094 * It then gets the status, WR handle (ioreq pointer) and the len of
   1095 * the WR, based on WR opcode. Only on a non-good status is the entire
   1096 * WR copied into the WR cache (ioreq->fw_wr).
   1097 * The ioreq corresponding to the WR is returned to the caller.
   1098 * NOTE: The SCSI queue doesnt allocate a freelist today, hence
   1099 * no freelist buffer is expected.
   1100 */
   1101struct csio_ioreq *
   1102csio_scsi_cmpl_handler(struct csio_hw *hw, void *wr, uint32_t len,
   1103		     struct csio_fl_dma_buf *flb, void *priv, uint8_t **scsiwr)
   1104{
   1105	struct csio_ioreq *ioreq = NULL;
   1106	struct cpl_fw6_msg *cpl;
   1107	uint8_t *tempwr;
   1108	uint8_t	status;
   1109	struct csio_scsim *scm = csio_hw_to_scsim(hw);
   1110
   1111	/* skip RSS header */
   1112	cpl = (struct cpl_fw6_msg *)((uintptr_t)wr + sizeof(__be64));
   1113
   1114	if (unlikely(cpl->opcode != CPL_FW6_MSG)) {
   1115		csio_warn(hw, "Error: Invalid CPL msg %x recvd on SCSI q\n",
   1116			  cpl->opcode);
   1117		CSIO_INC_STATS(scm, n_inval_cplop);
   1118		return NULL;
   1119	}
   1120
   1121	tempwr = (uint8_t *)(cpl->data);
   1122	status = csio_wr_status(tempwr);
   1123	*scsiwr = tempwr;
   1124
   1125	if (likely((*tempwr == FW_SCSI_READ_WR) ||
   1126			(*tempwr == FW_SCSI_WRITE_WR) ||
   1127			(*tempwr == FW_SCSI_CMD_WR))) {
   1128		ioreq = (struct csio_ioreq *)((uintptr_t)
   1129				 (((struct fw_scsi_read_wr *)tempwr)->cookie));
   1130		CSIO_DB_ASSERT(virt_addr_valid(ioreq));
   1131
   1132		ioreq->wr_status = status;
   1133
   1134		return ioreq;
   1135	}
   1136
   1137	if (*tempwr == FW_SCSI_ABRT_CLS_WR) {
   1138		ioreq = (struct csio_ioreq *)((uintptr_t)
   1139			 (((struct fw_scsi_abrt_cls_wr *)tempwr)->cookie));
   1140		CSIO_DB_ASSERT(virt_addr_valid(ioreq));
   1141
   1142		ioreq->wr_status = status;
   1143		return ioreq;
   1144	}
   1145
   1146	csio_warn(hw, "WR with invalid opcode in SCSI IQ: %x\n", *tempwr);
   1147	CSIO_INC_STATS(scm, n_inval_scsiop);
   1148	return NULL;
   1149}
   1150
   1151/*
   1152 * csio_scsi_cleanup_io_q - Cleanup the given queue.
   1153 * @scm: SCSI module.
   1154 * @q: Queue to be cleaned up.
   1155 *
   1156 * Called with lock held. Has to exit with lock held.
   1157 */
   1158void
   1159csio_scsi_cleanup_io_q(struct csio_scsim *scm, struct list_head *q)
   1160{
   1161	struct csio_hw *hw = scm->hw;
   1162	struct csio_ioreq *ioreq;
   1163	struct list_head *tmp, *next;
   1164	struct scsi_cmnd *scmnd;
   1165
   1166	/* Call back the completion routines of the active_q */
   1167	list_for_each_safe(tmp, next, q) {
   1168		ioreq = (struct csio_ioreq *)tmp;
   1169		csio_scsi_drvcleanup(ioreq);
   1170		list_del_init(&ioreq->sm.sm_list);
   1171		scmnd = csio_scsi_cmnd(ioreq);
   1172		spin_unlock_irq(&hw->lock);
   1173
   1174		/*
   1175		 * Upper layers may have cleared this command, hence this
   1176		 * check to avoid accessing stale references.
   1177		 */
   1178		if (scmnd != NULL)
   1179			ioreq->io_cbfn(hw, ioreq);
   1180
   1181		spin_lock_irq(&scm->freelist_lock);
   1182		csio_put_scsi_ioreq(scm, ioreq);
   1183		spin_unlock_irq(&scm->freelist_lock);
   1184
   1185		spin_lock_irq(&hw->lock);
   1186	}
   1187}
   1188
   1189#define CSIO_SCSI_ABORT_Q_POLL_MS		2000
   1190
   1191static void
   1192csio_abrt_cls(struct csio_ioreq *ioreq, struct scsi_cmnd *scmnd)
   1193{
   1194	struct csio_lnode *ln = ioreq->lnode;
   1195	struct csio_hw *hw = ln->hwp;
   1196	int ready = 0;
   1197	struct csio_scsim *scsim = csio_hw_to_scsim(hw);
   1198	int rv;
   1199
   1200	if (csio_scsi_cmnd(ioreq) != scmnd) {
   1201		CSIO_INC_STATS(scsim, n_abrt_race_comp);
   1202		return;
   1203	}
   1204
   1205	ready = csio_is_lnode_ready(ln);
   1206
   1207	rv = csio_do_abrt_cls(hw, ioreq, (ready ? SCSI_ABORT : SCSI_CLOSE));
   1208	if (rv != 0) {
   1209		if (ready)
   1210			CSIO_INC_STATS(scsim, n_abrt_busy_error);
   1211		else
   1212			CSIO_INC_STATS(scsim, n_cls_busy_error);
   1213	}
   1214}
   1215
   1216/*
   1217 * csio_scsi_abort_io_q - Abort all I/Os on given queue
   1218 * @scm: SCSI module.
   1219 * @q: Queue to abort.
   1220 * @tmo: Timeout in ms
   1221 *
   1222 * Attempt to abort all I/Os on given queue, and wait for a max
   1223 * of tmo milliseconds for them to complete. Returns success
   1224 * if all I/Os are aborted. Else returns -ETIMEDOUT.
   1225 * Should be entered with lock held. Exits with lock held.
   1226 * NOTE:
   1227 * Lock has to be held across the loop that aborts I/Os, since dropping the lock
   1228 * in between can cause the list to be corrupted. As a result, the caller
   1229 * of this function has to ensure that the number of I/os to be aborted
   1230 * is finite enough to not cause lock-held-for-too-long issues.
   1231 */
   1232static int
   1233csio_scsi_abort_io_q(struct csio_scsim *scm, struct list_head *q, uint32_t tmo)
   1234{
   1235	struct csio_hw *hw = scm->hw;
   1236	struct list_head *tmp, *next;
   1237	int count = DIV_ROUND_UP(tmo, CSIO_SCSI_ABORT_Q_POLL_MS);
   1238	struct scsi_cmnd *scmnd;
   1239
   1240	if (list_empty(q))
   1241		return 0;
   1242
   1243	csio_dbg(hw, "Aborting SCSI I/Os\n");
   1244
   1245	/* Now abort/close I/Os in the queue passed */
   1246	list_for_each_safe(tmp, next, q) {
   1247		scmnd = csio_scsi_cmnd((struct csio_ioreq *)tmp);
   1248		csio_abrt_cls((struct csio_ioreq *)tmp, scmnd);
   1249	}
   1250
   1251	/* Wait till all active I/Os are completed/aborted/closed */
   1252	while (!list_empty(q) && count--) {
   1253		spin_unlock_irq(&hw->lock);
   1254		msleep(CSIO_SCSI_ABORT_Q_POLL_MS);
   1255		spin_lock_irq(&hw->lock);
   1256	}
   1257
   1258	/* all aborts completed */
   1259	if (list_empty(q))
   1260		return 0;
   1261
   1262	return -ETIMEDOUT;
   1263}
   1264
   1265/*
   1266 * csio_scsim_cleanup_io - Cleanup all I/Os in SCSI module.
   1267 * @scm: SCSI module.
   1268 * @abort: abort required.
   1269 * Called with lock held, should exit with lock held.
   1270 * Can sleep when waiting for I/Os to complete.
   1271 */
   1272int
   1273csio_scsim_cleanup_io(struct csio_scsim *scm, bool abort)
   1274{
   1275	struct csio_hw *hw = scm->hw;
   1276	int rv = 0;
   1277	int count = DIV_ROUND_UP(60 * 1000, CSIO_SCSI_ABORT_Q_POLL_MS);
   1278
   1279	/* No I/Os pending */
   1280	if (list_empty(&scm->active_q))
   1281		return 0;
   1282
   1283	/* Wait until all active I/Os are completed */
   1284	while (!list_empty(&scm->active_q) && count--) {
   1285		spin_unlock_irq(&hw->lock);
   1286		msleep(CSIO_SCSI_ABORT_Q_POLL_MS);
   1287		spin_lock_irq(&hw->lock);
   1288	}
   1289
   1290	/* all I/Os completed */
   1291	if (list_empty(&scm->active_q))
   1292		return 0;
   1293
   1294	/* Else abort */
   1295	if (abort) {
   1296		rv = csio_scsi_abort_io_q(scm, &scm->active_q, 30000);
   1297		if (rv == 0)
   1298			return rv;
   1299		csio_dbg(hw, "Some I/O aborts timed out, cleaning up..\n");
   1300	}
   1301
   1302	csio_scsi_cleanup_io_q(scm, &scm->active_q);
   1303
   1304	CSIO_DB_ASSERT(list_empty(&scm->active_q));
   1305
   1306	return rv;
   1307}
   1308
   1309/*
   1310 * csio_scsim_cleanup_io_lnode - Cleanup all I/Os of given lnode.
   1311 * @scm: SCSI module.
   1312 * @lnode: lnode
   1313 *
   1314 * Called with lock held, should exit with lock held.
   1315 * Can sleep (with dropped lock) when waiting for I/Os to complete.
   1316 */
   1317int
   1318csio_scsim_cleanup_io_lnode(struct csio_scsim *scm, struct csio_lnode *ln)
   1319{
   1320	struct csio_hw *hw = scm->hw;
   1321	struct csio_scsi_level_data sld;
   1322	int rv;
   1323	int count = DIV_ROUND_UP(60 * 1000, CSIO_SCSI_ABORT_Q_POLL_MS);
   1324
   1325	csio_dbg(hw, "Gathering all SCSI I/Os on lnode %p\n", ln);
   1326
   1327	sld.level = CSIO_LEV_LNODE;
   1328	sld.lnode = ln;
   1329	INIT_LIST_HEAD(&ln->cmpl_q);
   1330	csio_scsi_gather_active_ios(scm, &sld, &ln->cmpl_q);
   1331
   1332	/* No I/Os pending on this lnode  */
   1333	if (list_empty(&ln->cmpl_q))
   1334		return 0;
   1335
   1336	/* Wait until all active I/Os on this lnode are completed */
   1337	while (!list_empty(&ln->cmpl_q) && count--) {
   1338		spin_unlock_irq(&hw->lock);
   1339		msleep(CSIO_SCSI_ABORT_Q_POLL_MS);
   1340		spin_lock_irq(&hw->lock);
   1341	}
   1342
   1343	/* all I/Os completed */
   1344	if (list_empty(&ln->cmpl_q))
   1345		return 0;
   1346
   1347	csio_dbg(hw, "Some I/Os pending on ln:%p, aborting them..\n", ln);
   1348
   1349	/* I/Os are pending, abort them */
   1350	rv = csio_scsi_abort_io_q(scm, &ln->cmpl_q, 30000);
   1351	if (rv != 0) {
   1352		csio_dbg(hw, "Some I/O aborts timed out, cleaning up..\n");
   1353		csio_scsi_cleanup_io_q(scm, &ln->cmpl_q);
   1354	}
   1355
   1356	CSIO_DB_ASSERT(list_empty(&ln->cmpl_q));
   1357
   1358	return rv;
   1359}
   1360
   1361static ssize_t
   1362csio_show_hw_state(struct device *dev,
   1363		   struct device_attribute *attr, char *buf)
   1364{
   1365	struct csio_lnode *ln = shost_priv(class_to_shost(dev));
   1366	struct csio_hw *hw = csio_lnode_to_hw(ln);
   1367
   1368	if (csio_is_hw_ready(hw))
   1369		return snprintf(buf, PAGE_SIZE, "ready\n");
   1370	else
   1371		return snprintf(buf, PAGE_SIZE, "not ready\n");
   1372}
   1373
   1374/* Device reset */
   1375static ssize_t
   1376csio_device_reset(struct device *dev,
   1377		   struct device_attribute *attr, const char *buf, size_t count)
   1378{
   1379	struct csio_lnode *ln = shost_priv(class_to_shost(dev));
   1380	struct csio_hw *hw = csio_lnode_to_hw(ln);
   1381
   1382	if (*buf != '1')
   1383		return -EINVAL;
   1384
   1385	/* Delete NPIV lnodes */
   1386	csio_lnodes_exit(hw, 1);
   1387
   1388	/* Block upper IOs */
   1389	csio_lnodes_block_request(hw);
   1390
   1391	spin_lock_irq(&hw->lock);
   1392	csio_hw_reset(hw);
   1393	spin_unlock_irq(&hw->lock);
   1394
   1395	/* Unblock upper IOs */
   1396	csio_lnodes_unblock_request(hw);
   1397	return count;
   1398}
   1399
   1400/* disable port */
   1401static ssize_t
   1402csio_disable_port(struct device *dev,
   1403		   struct device_attribute *attr, const char *buf, size_t count)
   1404{
   1405	struct csio_lnode *ln = shost_priv(class_to_shost(dev));
   1406	struct csio_hw *hw = csio_lnode_to_hw(ln);
   1407	bool disable;
   1408
   1409	if (*buf == '1' || *buf == '0')
   1410		disable = (*buf == '1') ? true : false;
   1411	else
   1412		return -EINVAL;
   1413
   1414	/* Block upper IOs */
   1415	csio_lnodes_block_by_port(hw, ln->portid);
   1416
   1417	spin_lock_irq(&hw->lock);
   1418	csio_disable_lnodes(hw, ln->portid, disable);
   1419	spin_unlock_irq(&hw->lock);
   1420
   1421	/* Unblock upper IOs */
   1422	csio_lnodes_unblock_by_port(hw, ln->portid);
   1423	return count;
   1424}
   1425
   1426/* Show debug level */
   1427static ssize_t
   1428csio_show_dbg_level(struct device *dev,
   1429		   struct device_attribute *attr, char *buf)
   1430{
   1431	struct csio_lnode *ln = shost_priv(class_to_shost(dev));
   1432
   1433	return snprintf(buf, PAGE_SIZE, "%x\n", ln->params.log_level);
   1434}
   1435
   1436/* Store debug level */
   1437static ssize_t
   1438csio_store_dbg_level(struct device *dev,
   1439		   struct device_attribute *attr, const char *buf, size_t count)
   1440{
   1441	struct csio_lnode *ln = shost_priv(class_to_shost(dev));
   1442	struct csio_hw *hw = csio_lnode_to_hw(ln);
   1443	uint32_t dbg_level = 0;
   1444
   1445	if (!isdigit(buf[0]))
   1446		return -EINVAL;
   1447
   1448	if (sscanf(buf, "%i", &dbg_level))
   1449		return -EINVAL;
   1450
   1451	ln->params.log_level = dbg_level;
   1452	hw->params.log_level = dbg_level;
   1453
   1454	return 0;
   1455}
   1456
   1457static DEVICE_ATTR(hw_state, S_IRUGO, csio_show_hw_state, NULL);
   1458static DEVICE_ATTR(device_reset, S_IWUSR, NULL, csio_device_reset);
   1459static DEVICE_ATTR(disable_port, S_IWUSR, NULL, csio_disable_port);
   1460static DEVICE_ATTR(dbg_level, S_IRUGO | S_IWUSR, csio_show_dbg_level,
   1461		  csio_store_dbg_level);
   1462
   1463static struct attribute *csio_fcoe_lport_attrs[] = {
   1464	&dev_attr_hw_state.attr,
   1465	&dev_attr_device_reset.attr,
   1466	&dev_attr_disable_port.attr,
   1467	&dev_attr_dbg_level.attr,
   1468	NULL,
   1469};
   1470
   1471ATTRIBUTE_GROUPS(csio_fcoe_lport);
   1472
   1473static ssize_t
   1474csio_show_num_reg_rnodes(struct device *dev,
   1475		     struct device_attribute *attr, char *buf)
   1476{
   1477	struct csio_lnode *ln = shost_priv(class_to_shost(dev));
   1478
   1479	return snprintf(buf, PAGE_SIZE, "%d\n", ln->num_reg_rnodes);
   1480}
   1481
   1482static DEVICE_ATTR(num_reg_rnodes, S_IRUGO, csio_show_num_reg_rnodes, NULL);
   1483
   1484static struct attribute *csio_fcoe_vport_attrs[] = {
   1485	&dev_attr_num_reg_rnodes.attr,
   1486	&dev_attr_dbg_level.attr,
   1487	NULL,
   1488};
   1489
   1490ATTRIBUTE_GROUPS(csio_fcoe_vport);
   1491
   1492static inline uint32_t
   1493csio_scsi_copy_to_sgl(struct csio_hw *hw, struct csio_ioreq *req)
   1494{
   1495	struct scsi_cmnd *scmnd  = (struct scsi_cmnd *)csio_scsi_cmnd(req);
   1496	struct scatterlist *sg;
   1497	uint32_t bytes_left;
   1498	uint32_t bytes_copy;
   1499	uint32_t buf_off = 0;
   1500	uint32_t start_off = 0;
   1501	uint32_t sg_off = 0;
   1502	void *sg_addr;
   1503	void *buf_addr;
   1504	struct csio_dma_buf *dma_buf;
   1505
   1506	bytes_left = scsi_bufflen(scmnd);
   1507	sg = scsi_sglist(scmnd);
   1508	dma_buf = (struct csio_dma_buf *)csio_list_next(&req->gen_list);
   1509
   1510	/* Copy data from driver buffer to SGs of SCSI CMD */
   1511	while (bytes_left > 0 && sg && dma_buf) {
   1512		if (buf_off >= dma_buf->len) {
   1513			buf_off = 0;
   1514			dma_buf = (struct csio_dma_buf *)
   1515					csio_list_next(dma_buf);
   1516			continue;
   1517		}
   1518
   1519		if (start_off >= sg->length) {
   1520			start_off -= sg->length;
   1521			sg = sg_next(sg);
   1522			continue;
   1523		}
   1524
   1525		buf_addr = dma_buf->vaddr + buf_off;
   1526		sg_off = sg->offset + start_off;
   1527		bytes_copy = min((dma_buf->len - buf_off),
   1528				sg->length - start_off);
   1529		bytes_copy = min((uint32_t)(PAGE_SIZE - (sg_off & ~PAGE_MASK)),
   1530				 bytes_copy);
   1531
   1532		sg_addr = kmap_atomic(sg_page(sg) + (sg_off >> PAGE_SHIFT));
   1533		if (!sg_addr) {
   1534			csio_err(hw, "failed to kmap sg:%p of ioreq:%p\n",
   1535				sg, req);
   1536			break;
   1537		}
   1538
   1539		csio_dbg(hw, "copy_to_sgl:sg_addr %p sg_off %d buf %p len %d\n",
   1540				sg_addr, sg_off, buf_addr, bytes_copy);
   1541		memcpy(sg_addr + (sg_off & ~PAGE_MASK), buf_addr, bytes_copy);
   1542		kunmap_atomic(sg_addr);
   1543
   1544		start_off +=  bytes_copy;
   1545		buf_off += bytes_copy;
   1546		bytes_left -= bytes_copy;
   1547	}
   1548
   1549	if (bytes_left > 0)
   1550		return DID_ERROR;
   1551	else
   1552		return DID_OK;
   1553}
   1554
   1555/*
   1556 * csio_scsi_err_handler - SCSI error handler.
   1557 * @hw: HW module.
   1558 * @req: IO request.
   1559 *
   1560 */
   1561static inline void
   1562csio_scsi_err_handler(struct csio_hw *hw, struct csio_ioreq *req)
   1563{
   1564	struct scsi_cmnd *cmnd  = (struct scsi_cmnd *)csio_scsi_cmnd(req);
   1565	struct csio_scsim *scm = csio_hw_to_scsim(hw);
   1566	struct fcp_resp_with_ext *fcp_resp;
   1567	struct fcp_resp_rsp_info *rsp_info;
   1568	struct csio_dma_buf *dma_buf;
   1569	uint8_t flags, scsi_status = 0;
   1570	uint32_t host_status = DID_OK;
   1571	uint32_t rsp_len = 0, sns_len = 0;
   1572	struct csio_rnode *rn = (struct csio_rnode *)(cmnd->device->hostdata);
   1573
   1574
   1575	switch (req->wr_status) {
   1576	case FW_HOSTERROR:
   1577		if (unlikely(!csio_is_hw_ready(hw)))
   1578			return;
   1579
   1580		host_status = DID_ERROR;
   1581		CSIO_INC_STATS(scm, n_hosterror);
   1582
   1583		break;
   1584	case FW_SCSI_RSP_ERR:
   1585		dma_buf = &req->dma_buf;
   1586		fcp_resp = (struct fcp_resp_with_ext *)dma_buf->vaddr;
   1587		rsp_info = (struct fcp_resp_rsp_info *)(fcp_resp + 1);
   1588		flags = fcp_resp->resp.fr_flags;
   1589		scsi_status = fcp_resp->resp.fr_status;
   1590
   1591		if (flags & FCP_RSP_LEN_VAL) {
   1592			rsp_len = be32_to_cpu(fcp_resp->ext.fr_rsp_len);
   1593			if ((rsp_len != 0 && rsp_len != 4 && rsp_len != 8) ||
   1594				(rsp_info->rsp_code != FCP_TMF_CMPL)) {
   1595				host_status = DID_ERROR;
   1596				goto out;
   1597			}
   1598		}
   1599
   1600		if ((flags & FCP_SNS_LEN_VAL) && fcp_resp->ext.fr_sns_len) {
   1601			sns_len = be32_to_cpu(fcp_resp->ext.fr_sns_len);
   1602			if (sns_len > SCSI_SENSE_BUFFERSIZE)
   1603				sns_len = SCSI_SENSE_BUFFERSIZE;
   1604
   1605			memcpy(cmnd->sense_buffer,
   1606			       &rsp_info->_fr_resvd[0] + rsp_len, sns_len);
   1607			CSIO_INC_STATS(scm, n_autosense);
   1608		}
   1609
   1610		scsi_set_resid(cmnd, 0);
   1611
   1612		/* Under run */
   1613		if (flags & FCP_RESID_UNDER) {
   1614			scsi_set_resid(cmnd,
   1615				       be32_to_cpu(fcp_resp->ext.fr_resid));
   1616
   1617			if (!(flags & FCP_SNS_LEN_VAL) &&
   1618			    (scsi_status == SAM_STAT_GOOD) &&
   1619			    ((scsi_bufflen(cmnd) - scsi_get_resid(cmnd))
   1620							< cmnd->underflow))
   1621				host_status = DID_ERROR;
   1622		} else if (flags & FCP_RESID_OVER)
   1623			host_status = DID_ERROR;
   1624
   1625		CSIO_INC_STATS(scm, n_rsperror);
   1626		break;
   1627
   1628	case FW_SCSI_OVER_FLOW_ERR:
   1629		csio_warn(hw,
   1630			  "Over-flow error,cmnd:0x%x expected len:0x%x"
   1631			  " resid:0x%x\n", cmnd->cmnd[0],
   1632			  scsi_bufflen(cmnd), scsi_get_resid(cmnd));
   1633		host_status = DID_ERROR;
   1634		CSIO_INC_STATS(scm, n_ovflerror);
   1635		break;
   1636
   1637	case FW_SCSI_UNDER_FLOW_ERR:
   1638		csio_warn(hw,
   1639			  "Under-flow error,cmnd:0x%x expected"
   1640			  " len:0x%x resid:0x%x lun:0x%llx ssn:0x%x\n",
   1641			  cmnd->cmnd[0], scsi_bufflen(cmnd),
   1642			  scsi_get_resid(cmnd), cmnd->device->lun,
   1643			  rn->flowid);
   1644		host_status = DID_ERROR;
   1645		CSIO_INC_STATS(scm, n_unflerror);
   1646		break;
   1647
   1648	case FW_SCSI_ABORT_REQUESTED:
   1649	case FW_SCSI_ABORTED:
   1650	case FW_SCSI_CLOSE_REQUESTED:
   1651		csio_dbg(hw, "Req %p cmd:%p op:%x %s\n", req, cmnd,
   1652			     cmnd->cmnd[0],
   1653			    (req->wr_status == FW_SCSI_CLOSE_REQUESTED) ?
   1654			    "closed" : "aborted");
   1655		/*
   1656		 * csio_eh_abort_handler checks this value to
   1657		 * succeed or fail the abort request.
   1658		 */
   1659		host_status = DID_REQUEUE;
   1660		if (req->wr_status == FW_SCSI_CLOSE_REQUESTED)
   1661			CSIO_INC_STATS(scm, n_closed);
   1662		else
   1663			CSIO_INC_STATS(scm, n_aborted);
   1664		break;
   1665
   1666	case FW_SCSI_ABORT_TIMEDOUT:
   1667		/* FW timed out the abort itself */
   1668		csio_dbg(hw, "FW timed out abort req:%p cmnd:%p status:%x\n",
   1669			 req, cmnd, req->wr_status);
   1670		host_status = DID_ERROR;
   1671		CSIO_INC_STATS(scm, n_abrt_timedout);
   1672		break;
   1673
   1674	case FW_RDEV_NOT_READY:
   1675		/*
   1676		 * In firmware, a RDEV can get into this state
   1677		 * temporarily, before moving into dissapeared/lost
   1678		 * state. So, the driver should complete the request equivalent
   1679		 * to device-disappeared!
   1680		 */
   1681		CSIO_INC_STATS(scm, n_rdev_nr_error);
   1682		host_status = DID_ERROR;
   1683		break;
   1684
   1685	case FW_ERR_RDEV_LOST:
   1686		CSIO_INC_STATS(scm, n_rdev_lost_error);
   1687		host_status = DID_ERROR;
   1688		break;
   1689
   1690	case FW_ERR_RDEV_LOGO:
   1691		CSIO_INC_STATS(scm, n_rdev_logo_error);
   1692		host_status = DID_ERROR;
   1693		break;
   1694
   1695	case FW_ERR_RDEV_IMPL_LOGO:
   1696		host_status = DID_ERROR;
   1697		break;
   1698
   1699	case FW_ERR_LINK_DOWN:
   1700		CSIO_INC_STATS(scm, n_link_down_error);
   1701		host_status = DID_ERROR;
   1702		break;
   1703
   1704	case FW_FCOE_NO_XCHG:
   1705		CSIO_INC_STATS(scm, n_no_xchg_error);
   1706		host_status = DID_ERROR;
   1707		break;
   1708
   1709	default:
   1710		csio_err(hw, "Unknown SCSI FW WR status:%d req:%p cmnd:%p\n",
   1711			    req->wr_status, req, cmnd);
   1712		CSIO_DB_ASSERT(0);
   1713
   1714		CSIO_INC_STATS(scm, n_unknown_error);
   1715		host_status = DID_ERROR;
   1716		break;
   1717	}
   1718
   1719out:
   1720	if (req->nsge > 0) {
   1721		scsi_dma_unmap(cmnd);
   1722		if (req->dcopy && (host_status == DID_OK))
   1723			host_status = csio_scsi_copy_to_sgl(hw, req);
   1724	}
   1725
   1726	cmnd->result = (((host_status) << 16) | scsi_status);
   1727	scsi_done(cmnd);
   1728
   1729	/* Wake up waiting threads */
   1730	csio_scsi_cmnd(req) = NULL;
   1731	complete(&req->cmplobj);
   1732}
   1733
   1734/*
   1735 * csio_scsi_cbfn - SCSI callback function.
   1736 * @hw: HW module.
   1737 * @req: IO request.
   1738 *
   1739 */
   1740static void
   1741csio_scsi_cbfn(struct csio_hw *hw, struct csio_ioreq *req)
   1742{
   1743	struct scsi_cmnd *cmnd  = (struct scsi_cmnd *)csio_scsi_cmnd(req);
   1744	uint8_t scsi_status = SAM_STAT_GOOD;
   1745	uint32_t host_status = DID_OK;
   1746
   1747	if (likely(req->wr_status == FW_SUCCESS)) {
   1748		if (req->nsge > 0) {
   1749			scsi_dma_unmap(cmnd);
   1750			if (req->dcopy)
   1751				host_status = csio_scsi_copy_to_sgl(hw, req);
   1752		}
   1753
   1754		cmnd->result = (((host_status) << 16) | scsi_status);
   1755		scsi_done(cmnd);
   1756		csio_scsi_cmnd(req) = NULL;
   1757		CSIO_INC_STATS(csio_hw_to_scsim(hw), n_tot_success);
   1758	} else {
   1759		/* Error handling */
   1760		csio_scsi_err_handler(hw, req);
   1761	}
   1762}
   1763
   1764/**
   1765 * csio_queuecommand - Entry point to kickstart an I/O request.
   1766 * @host:	The scsi_host pointer.
   1767 * @cmnd:	The I/O request from ML.
   1768 *
   1769 * This routine does the following:
   1770 *	- Checks for HW and Rnode module readiness.
   1771 *	- Gets a free ioreq structure (which is already initialized
   1772 *	  to uninit during its allocation).
   1773 *	- Maps SG elements.
   1774 *	- Initializes ioreq members.
   1775 *	- Kicks off the SCSI state machine for this IO.
   1776 *	- Returns busy status on error.
   1777 */
   1778static int
   1779csio_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmnd)
   1780{
   1781	struct csio_lnode *ln = shost_priv(host);
   1782	struct csio_hw *hw = csio_lnode_to_hw(ln);
   1783	struct csio_scsim *scsim = csio_hw_to_scsim(hw);
   1784	struct csio_rnode *rn = (struct csio_rnode *)(cmnd->device->hostdata);
   1785	struct csio_ioreq *ioreq = NULL;
   1786	unsigned long flags;
   1787	int nsge = 0;
   1788	int rv = SCSI_MLQUEUE_HOST_BUSY, nr;
   1789	int retval;
   1790	struct csio_scsi_qset *sqset;
   1791	struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
   1792
   1793	sqset = &hw->sqset[ln->portid][blk_mq_rq_cpu(scsi_cmd_to_rq(cmnd))];
   1794
   1795	nr = fc_remote_port_chkready(rport);
   1796	if (nr) {
   1797		cmnd->result = nr;
   1798		CSIO_INC_STATS(scsim, n_rn_nr_error);
   1799		goto err_done;
   1800	}
   1801
   1802	if (unlikely(!csio_is_hw_ready(hw))) {
   1803		cmnd->result = (DID_REQUEUE << 16);
   1804		CSIO_INC_STATS(scsim, n_hw_nr_error);
   1805		goto err_done;
   1806	}
   1807
   1808	/* Get req->nsge, if there are SG elements to be mapped  */
   1809	nsge = scsi_dma_map(cmnd);
   1810	if (unlikely(nsge < 0)) {
   1811		CSIO_INC_STATS(scsim, n_dmamap_error);
   1812		goto err;
   1813	}
   1814
   1815	/* Do we support so many mappings? */
   1816	if (unlikely(nsge > scsim->max_sge)) {
   1817		csio_warn(hw,
   1818			  "More SGEs than can be supported."
   1819			  " SGEs: %d, Max SGEs: %d\n", nsge, scsim->max_sge);
   1820		CSIO_INC_STATS(scsim, n_unsupp_sge_error);
   1821		goto err_dma_unmap;
   1822	}
   1823
   1824	/* Get a free ioreq structure - SM is already set to uninit */
   1825	ioreq = csio_get_scsi_ioreq_lock(hw, scsim);
   1826	if (!ioreq) {
   1827		csio_err(hw, "Out of I/O request elements. Active #:%d\n",
   1828			 scsim->stats.n_active);
   1829		CSIO_INC_STATS(scsim, n_no_req_error);
   1830		goto err_dma_unmap;
   1831	}
   1832
   1833	ioreq->nsge		= nsge;
   1834	ioreq->lnode		= ln;
   1835	ioreq->rnode		= rn;
   1836	ioreq->iq_idx		= sqset->iq_idx;
   1837	ioreq->eq_idx		= sqset->eq_idx;
   1838	ioreq->wr_status	= 0;
   1839	ioreq->drv_status	= 0;
   1840	csio_scsi_cmnd(ioreq)	= (void *)cmnd;
   1841	ioreq->tmo		= 0;
   1842	ioreq->datadir		= cmnd->sc_data_direction;
   1843
   1844	if (cmnd->sc_data_direction == DMA_TO_DEVICE) {
   1845		CSIO_INC_STATS(ln, n_output_requests);
   1846		ln->stats.n_output_bytes += scsi_bufflen(cmnd);
   1847	} else if (cmnd->sc_data_direction == DMA_FROM_DEVICE) {
   1848		CSIO_INC_STATS(ln, n_input_requests);
   1849		ln->stats.n_input_bytes += scsi_bufflen(cmnd);
   1850	} else
   1851		CSIO_INC_STATS(ln, n_control_requests);
   1852
   1853	/* Set cbfn */
   1854	ioreq->io_cbfn = csio_scsi_cbfn;
   1855
   1856	/* Needed during abort */
   1857	cmnd->host_scribble = (unsigned char *)ioreq;
   1858	csio_priv(cmnd)->fc_tm_flags = 0;
   1859
   1860	/* Kick off SCSI IO SM on the ioreq */
   1861	spin_lock_irqsave(&hw->lock, flags);
   1862	retval = csio_scsi_start_io(ioreq);
   1863	spin_unlock_irqrestore(&hw->lock, flags);
   1864
   1865	if (retval != 0) {
   1866		csio_err(hw, "ioreq: %p couldn't be started, status:%d\n",
   1867			 ioreq, retval);
   1868		CSIO_INC_STATS(scsim, n_busy_error);
   1869		goto err_put_req;
   1870	}
   1871
   1872	return 0;
   1873
   1874err_put_req:
   1875	csio_put_scsi_ioreq_lock(hw, scsim, ioreq);
   1876err_dma_unmap:
   1877	if (nsge > 0)
   1878		scsi_dma_unmap(cmnd);
   1879err:
   1880	return rv;
   1881
   1882err_done:
   1883	scsi_done(cmnd);
   1884	return 0;
   1885}
   1886
   1887static int
   1888csio_do_abrt_cls(struct csio_hw *hw, struct csio_ioreq *ioreq, bool abort)
   1889{
   1890	int rv;
   1891	int cpu = smp_processor_id();
   1892	struct csio_lnode *ln = ioreq->lnode;
   1893	struct csio_scsi_qset *sqset = &hw->sqset[ln->portid][cpu];
   1894
   1895	ioreq->tmo = CSIO_SCSI_ABRT_TMO_MS;
   1896	/*
   1897	 * Use current processor queue for posting the abort/close, but retain
   1898	 * the ingress queue ID of the original I/O being aborted/closed - we
   1899	 * need the abort/close completion to be received on the same queue
   1900	 * as the original I/O.
   1901	 */
   1902	ioreq->eq_idx = sqset->eq_idx;
   1903
   1904	if (abort == SCSI_ABORT)
   1905		rv = csio_scsi_abort(ioreq);
   1906	else
   1907		rv = csio_scsi_close(ioreq);
   1908
   1909	return rv;
   1910}
   1911
   1912static int
   1913csio_eh_abort_handler(struct scsi_cmnd *cmnd)
   1914{
   1915	struct csio_ioreq *ioreq;
   1916	struct csio_lnode *ln = shost_priv(cmnd->device->host);
   1917	struct csio_hw *hw = csio_lnode_to_hw(ln);
   1918	struct csio_scsim *scsim = csio_hw_to_scsim(hw);
   1919	int ready = 0, ret;
   1920	unsigned long tmo = 0;
   1921	int rv;
   1922	struct csio_rnode *rn = (struct csio_rnode *)(cmnd->device->hostdata);
   1923
   1924	ret = fc_block_scsi_eh(cmnd);
   1925	if (ret)
   1926		return ret;
   1927
   1928	ioreq = (struct csio_ioreq *)cmnd->host_scribble;
   1929	if (!ioreq)
   1930		return SUCCESS;
   1931
   1932	if (!rn)
   1933		return FAILED;
   1934
   1935	csio_dbg(hw,
   1936		 "Request to abort ioreq:%p cmd:%p cdb:%08llx"
   1937		 " ssni:0x%x lun:%llu iq:0x%x\n",
   1938		ioreq, cmnd, *((uint64_t *)cmnd->cmnd), rn->flowid,
   1939		cmnd->device->lun, csio_q_physiqid(hw, ioreq->iq_idx));
   1940
   1941	if (((struct scsi_cmnd *)csio_scsi_cmnd(ioreq)) != cmnd) {
   1942		CSIO_INC_STATS(scsim, n_abrt_race_comp);
   1943		return SUCCESS;
   1944	}
   1945
   1946	ready = csio_is_lnode_ready(ln);
   1947	tmo = CSIO_SCSI_ABRT_TMO_MS;
   1948
   1949	reinit_completion(&ioreq->cmplobj);
   1950	spin_lock_irq(&hw->lock);
   1951	rv = csio_do_abrt_cls(hw, ioreq, (ready ? SCSI_ABORT : SCSI_CLOSE));
   1952	spin_unlock_irq(&hw->lock);
   1953
   1954	if (rv != 0) {
   1955		if (rv == -EINVAL) {
   1956			/* Return success, if abort/close request issued on
   1957			 * already completed IO
   1958			 */
   1959			return SUCCESS;
   1960		}
   1961		if (ready)
   1962			CSIO_INC_STATS(scsim, n_abrt_busy_error);
   1963		else
   1964			CSIO_INC_STATS(scsim, n_cls_busy_error);
   1965
   1966		goto inval_scmnd;
   1967	}
   1968
   1969	wait_for_completion_timeout(&ioreq->cmplobj, msecs_to_jiffies(tmo));
   1970
   1971	/* FW didnt respond to abort within our timeout */
   1972	if (((struct scsi_cmnd *)csio_scsi_cmnd(ioreq)) == cmnd) {
   1973
   1974		csio_err(hw, "Abort timed out -- req: %p\n", ioreq);
   1975		CSIO_INC_STATS(scsim, n_abrt_timedout);
   1976
   1977inval_scmnd:
   1978		if (ioreq->nsge > 0)
   1979			scsi_dma_unmap(cmnd);
   1980
   1981		spin_lock_irq(&hw->lock);
   1982		csio_scsi_cmnd(ioreq) = NULL;
   1983		spin_unlock_irq(&hw->lock);
   1984
   1985		cmnd->result = (DID_ERROR << 16);
   1986		scsi_done(cmnd);
   1987
   1988		return FAILED;
   1989	}
   1990
   1991	/* FW successfully aborted the request */
   1992	if (host_byte(cmnd->result) == DID_REQUEUE) {
   1993		csio_info(hw,
   1994			"Aborted SCSI command to (%d:%llu) tag %u\n",
   1995			cmnd->device->id, cmnd->device->lun,
   1996			scsi_cmd_to_rq(cmnd)->tag);
   1997		return SUCCESS;
   1998	} else {
   1999		csio_info(hw,
   2000			"Failed to abort SCSI command, (%d:%llu) tag %u\n",
   2001			cmnd->device->id, cmnd->device->lun,
   2002			scsi_cmd_to_rq(cmnd)->tag);
   2003		return FAILED;
   2004	}
   2005}
   2006
   2007/*
   2008 * csio_tm_cbfn - TM callback function.
   2009 * @hw: HW module.
   2010 * @req: IO request.
   2011 *
   2012 * Cache the result in 'cmnd', since ioreq will be freed soon
   2013 * after we return from here, and the waiting thread shouldnt trust
   2014 * the ioreq contents.
   2015 */
   2016static void
   2017csio_tm_cbfn(struct csio_hw *hw, struct csio_ioreq *req)
   2018{
   2019	struct scsi_cmnd *cmnd  = (struct scsi_cmnd *)csio_scsi_cmnd(req);
   2020	struct csio_dma_buf *dma_buf;
   2021	uint8_t flags = 0;
   2022	struct fcp_resp_with_ext *fcp_resp;
   2023	struct fcp_resp_rsp_info *rsp_info;
   2024
   2025	csio_dbg(hw, "req: %p in csio_tm_cbfn status: %d\n",
   2026		      req, req->wr_status);
   2027
   2028	/* Cache FW return status */
   2029	csio_priv(cmnd)->wr_status = req->wr_status;
   2030
   2031	/* Special handling based on FCP response */
   2032
   2033	/*
   2034	 * FW returns us this error, if flags were set. FCP4 says
   2035	 * FCP_RSP_LEN_VAL in flags shall be set for TM completions.
   2036	 * So if a target were to set this bit, we expect that the
   2037	 * rsp_code is set to FCP_TMF_CMPL for a successful TM
   2038	 * completion. Any other rsp_code means TM operation failed.
   2039	 * If a target were to just ignore setting flags, we treat
   2040	 * the TM operation as success, and FW returns FW_SUCCESS.
   2041	 */
   2042	if (req->wr_status == FW_SCSI_RSP_ERR) {
   2043		dma_buf = &req->dma_buf;
   2044		fcp_resp = (struct fcp_resp_with_ext *)dma_buf->vaddr;
   2045		rsp_info = (struct fcp_resp_rsp_info *)(fcp_resp + 1);
   2046
   2047		flags = fcp_resp->resp.fr_flags;
   2048
   2049		/* Modify return status if flags indicate success */
   2050		if (flags & FCP_RSP_LEN_VAL)
   2051			if (rsp_info->rsp_code == FCP_TMF_CMPL)
   2052				csio_priv(cmnd)->wr_status = FW_SUCCESS;
   2053
   2054		csio_dbg(hw, "TM FCP rsp code: %d\n", rsp_info->rsp_code);
   2055	}
   2056
   2057	/* Wake up the TM handler thread */
   2058	csio_scsi_cmnd(req) = NULL;
   2059}
   2060
   2061static int
   2062csio_eh_lun_reset_handler(struct scsi_cmnd *cmnd)
   2063{
   2064	struct csio_lnode *ln = shost_priv(cmnd->device->host);
   2065	struct csio_hw *hw = csio_lnode_to_hw(ln);
   2066	struct csio_scsim *scsim = csio_hw_to_scsim(hw);
   2067	struct csio_rnode *rn = (struct csio_rnode *)(cmnd->device->hostdata);
   2068	struct csio_ioreq *ioreq = NULL;
   2069	struct csio_scsi_qset *sqset;
   2070	unsigned long flags;
   2071	int retval;
   2072	int count, ret;
   2073	LIST_HEAD(local_q);
   2074	struct csio_scsi_level_data sld;
   2075
   2076	if (!rn)
   2077		goto fail;
   2078
   2079	csio_dbg(hw, "Request to reset LUN:%llu (ssni:0x%x tgtid:%d)\n",
   2080		      cmnd->device->lun, rn->flowid, rn->scsi_id);
   2081
   2082	if (!csio_is_lnode_ready(ln)) {
   2083		csio_err(hw,
   2084			 "LUN reset cannot be issued on non-ready"
   2085			 " local node vnpi:0x%x (LUN:%llu)\n",
   2086			 ln->vnp_flowid, cmnd->device->lun);
   2087		goto fail;
   2088	}
   2089
   2090	/* Lnode is ready, now wait on rport node readiness */
   2091	ret = fc_block_scsi_eh(cmnd);
   2092	if (ret)
   2093		return ret;
   2094
   2095	/*
   2096	 * If we have blocked in the previous call, at this point, either the
   2097	 * remote node has come back online, or device loss timer has fired
   2098	 * and the remote node is destroyed. Allow the LUN reset only for
   2099	 * the former case, since LUN reset is a TMF I/O on the wire, and we
   2100	 * need a valid session to issue it.
   2101	 */
   2102	if (fc_remote_port_chkready(rn->rport)) {
   2103		csio_err(hw,
   2104			 "LUN reset cannot be issued on non-ready"
   2105			 " remote node ssni:0x%x (LUN:%llu)\n",
   2106			 rn->flowid, cmnd->device->lun);
   2107		goto fail;
   2108	}
   2109
   2110	/* Get a free ioreq structure - SM is already set to uninit */
   2111	ioreq = csio_get_scsi_ioreq_lock(hw, scsim);
   2112
   2113	if (!ioreq) {
   2114		csio_err(hw, "Out of IO request elements. Active # :%d\n",
   2115			 scsim->stats.n_active);
   2116		goto fail;
   2117	}
   2118
   2119	sqset			= &hw->sqset[ln->portid][smp_processor_id()];
   2120	ioreq->nsge		= 0;
   2121	ioreq->lnode		= ln;
   2122	ioreq->rnode		= rn;
   2123	ioreq->iq_idx		= sqset->iq_idx;
   2124	ioreq->eq_idx		= sqset->eq_idx;
   2125
   2126	csio_scsi_cmnd(ioreq)	= cmnd;
   2127	cmnd->host_scribble	= (unsigned char *)ioreq;
   2128	csio_priv(cmnd)->wr_status = 0;
   2129
   2130	csio_priv(cmnd)->fc_tm_flags = FCP_TMF_LUN_RESET;
   2131	ioreq->tmo		= CSIO_SCSI_LUNRST_TMO_MS / 1000;
   2132
   2133	/*
   2134	 * FW times the LUN reset for ioreq->tmo, so we got to wait a little
   2135	 * longer (10s for now) than that to allow FW to return the timed
   2136	 * out command.
   2137	 */
   2138	count = DIV_ROUND_UP((ioreq->tmo + 10) * 1000, CSIO_SCSI_TM_POLL_MS);
   2139
   2140	/* Set cbfn */
   2141	ioreq->io_cbfn = csio_tm_cbfn;
   2142
   2143	/* Save of the ioreq info for later use */
   2144	sld.level = CSIO_LEV_LUN;
   2145	sld.lnode = ioreq->lnode;
   2146	sld.rnode = ioreq->rnode;
   2147	sld.oslun = cmnd->device->lun;
   2148
   2149	spin_lock_irqsave(&hw->lock, flags);
   2150	/* Kick off TM SM on the ioreq */
   2151	retval = csio_scsi_start_tm(ioreq);
   2152	spin_unlock_irqrestore(&hw->lock, flags);
   2153
   2154	if (retval != 0) {
   2155		csio_err(hw, "Failed to issue LUN reset, req:%p, status:%d\n",
   2156			    ioreq, retval);
   2157		goto fail_ret_ioreq;
   2158	}
   2159
   2160	csio_dbg(hw, "Waiting max %d secs for LUN reset completion\n",
   2161		    count * (CSIO_SCSI_TM_POLL_MS / 1000));
   2162	/* Wait for completion */
   2163	while ((((struct scsi_cmnd *)csio_scsi_cmnd(ioreq)) == cmnd)
   2164								&& count--)
   2165		msleep(CSIO_SCSI_TM_POLL_MS);
   2166
   2167	/* LUN reset timed-out */
   2168	if (((struct scsi_cmnd *)csio_scsi_cmnd(ioreq)) == cmnd) {
   2169		csio_err(hw, "LUN reset (%d:%llu) timed out\n",
   2170			 cmnd->device->id, cmnd->device->lun);
   2171
   2172		spin_lock_irq(&hw->lock);
   2173		csio_scsi_drvcleanup(ioreq);
   2174		list_del_init(&ioreq->sm.sm_list);
   2175		spin_unlock_irq(&hw->lock);
   2176
   2177		goto fail_ret_ioreq;
   2178	}
   2179
   2180	/* LUN reset returned, check cached status */
   2181	if (csio_priv(cmnd)->wr_status != FW_SUCCESS) {
   2182		csio_err(hw, "LUN reset failed (%d:%llu), status: %d\n",
   2183			 cmnd->device->id, cmnd->device->lun,
   2184			 csio_priv(cmnd)->wr_status);
   2185		goto fail;
   2186	}
   2187
   2188	/* LUN reset succeeded, Start aborting affected I/Os */
   2189	/*
   2190	 * Since the host guarantees during LUN reset that there
   2191	 * will not be any more I/Os to that LUN, until the LUN reset
   2192	 * completes, we gather pending I/Os after the LUN reset.
   2193	 */
   2194	spin_lock_irq(&hw->lock);
   2195	csio_scsi_gather_active_ios(scsim, &sld, &local_q);
   2196
   2197	retval = csio_scsi_abort_io_q(scsim, &local_q, 30000);
   2198	spin_unlock_irq(&hw->lock);
   2199
   2200	/* Aborts may have timed out */
   2201	if (retval != 0) {
   2202		csio_err(hw,
   2203			 "Attempt to abort I/Os during LUN reset of %llu"
   2204			 " returned %d\n", cmnd->device->lun, retval);
   2205		/* Return I/Os back to active_q */
   2206		spin_lock_irq(&hw->lock);
   2207		list_splice_tail_init(&local_q, &scsim->active_q);
   2208		spin_unlock_irq(&hw->lock);
   2209		goto fail;
   2210	}
   2211
   2212	CSIO_INC_STATS(rn, n_lun_rst);
   2213
   2214	csio_info(hw, "LUN reset occurred (%d:%llu)\n",
   2215		  cmnd->device->id, cmnd->device->lun);
   2216
   2217	return SUCCESS;
   2218
   2219fail_ret_ioreq:
   2220	csio_put_scsi_ioreq_lock(hw, scsim, ioreq);
   2221fail:
   2222	CSIO_INC_STATS(rn, n_lun_rst_fail);
   2223	return FAILED;
   2224}
   2225
   2226static int
   2227csio_slave_alloc(struct scsi_device *sdev)
   2228{
   2229	struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
   2230
   2231	if (!rport || fc_remote_port_chkready(rport))
   2232		return -ENXIO;
   2233
   2234	sdev->hostdata = *((struct csio_lnode **)(rport->dd_data));
   2235
   2236	return 0;
   2237}
   2238
   2239static int
   2240csio_slave_configure(struct scsi_device *sdev)
   2241{
   2242	scsi_change_queue_depth(sdev, csio_lun_qdepth);
   2243	return 0;
   2244}
   2245
   2246static void
   2247csio_slave_destroy(struct scsi_device *sdev)
   2248{
   2249	sdev->hostdata = NULL;
   2250}
   2251
   2252static int
   2253csio_scan_finished(struct Scsi_Host *shost, unsigned long time)
   2254{
   2255	struct csio_lnode *ln = shost_priv(shost);
   2256	int rv = 1;
   2257
   2258	spin_lock_irq(shost->host_lock);
   2259	if (!ln->hwp || csio_list_deleted(&ln->sm.sm_list))
   2260		goto out;
   2261
   2262	rv = csio_scan_done(ln, jiffies, time, csio_max_scan_tmo * HZ,
   2263			    csio_delta_scan_tmo * HZ);
   2264out:
   2265	spin_unlock_irq(shost->host_lock);
   2266
   2267	return rv;
   2268}
   2269
   2270struct scsi_host_template csio_fcoe_shost_template = {
   2271	.module			= THIS_MODULE,
   2272	.name			= CSIO_DRV_DESC,
   2273	.proc_name		= KBUILD_MODNAME,
   2274	.queuecommand		= csio_queuecommand,
   2275	.cmd_size		= sizeof(struct csio_cmd_priv),
   2276	.eh_timed_out		= fc_eh_timed_out,
   2277	.eh_abort_handler	= csio_eh_abort_handler,
   2278	.eh_device_reset_handler = csio_eh_lun_reset_handler,
   2279	.slave_alloc		= csio_slave_alloc,
   2280	.slave_configure	= csio_slave_configure,
   2281	.slave_destroy		= csio_slave_destroy,
   2282	.scan_finished		= csio_scan_finished,
   2283	.this_id		= -1,
   2284	.sg_tablesize		= CSIO_SCSI_MAX_SGE,
   2285	.cmd_per_lun		= CSIO_MAX_CMD_PER_LUN,
   2286	.shost_groups		= csio_fcoe_lport_groups,
   2287	.max_sectors		= CSIO_MAX_SECTOR_SIZE,
   2288};
   2289
   2290struct scsi_host_template csio_fcoe_shost_vport_template = {
   2291	.module			= THIS_MODULE,
   2292	.name			= CSIO_DRV_DESC,
   2293	.proc_name		= KBUILD_MODNAME,
   2294	.queuecommand		= csio_queuecommand,
   2295	.eh_timed_out		= fc_eh_timed_out,
   2296	.eh_abort_handler	= csio_eh_abort_handler,
   2297	.eh_device_reset_handler = csio_eh_lun_reset_handler,
   2298	.slave_alloc		= csio_slave_alloc,
   2299	.slave_configure	= csio_slave_configure,
   2300	.slave_destroy		= csio_slave_destroy,
   2301	.scan_finished		= csio_scan_finished,
   2302	.this_id		= -1,
   2303	.sg_tablesize		= CSIO_SCSI_MAX_SGE,
   2304	.cmd_per_lun		= CSIO_MAX_CMD_PER_LUN,
   2305	.shost_groups		= csio_fcoe_vport_groups,
   2306	.max_sectors		= CSIO_MAX_SECTOR_SIZE,
   2307};
   2308
   2309/*
   2310 * csio_scsi_alloc_ddp_bufs - Allocate buffers for DDP of unaligned SGLs.
   2311 * @scm: SCSI Module
   2312 * @hw: HW device.
   2313 * @buf_size: buffer size
   2314 * @num_buf : Number of buffers.
   2315 *
   2316 * This routine allocates DMA buffers required for SCSI Data xfer, if
   2317 * each SGL buffer for a SCSI Read request posted by SCSI midlayer are
   2318 * not virtually contiguous.
   2319 */
   2320static int
   2321csio_scsi_alloc_ddp_bufs(struct csio_scsim *scm, struct csio_hw *hw,
   2322			 int buf_size, int num_buf)
   2323{
   2324	int n = 0;
   2325	struct list_head *tmp;
   2326	struct csio_dma_buf *ddp_desc = NULL;
   2327	uint32_t unit_size = 0;
   2328
   2329	if (!num_buf)
   2330		return 0;
   2331
   2332	if (!buf_size)
   2333		return -EINVAL;
   2334
   2335	INIT_LIST_HEAD(&scm->ddp_freelist);
   2336
   2337	/* Align buf size to page size */
   2338	buf_size = (buf_size + PAGE_SIZE - 1) & PAGE_MASK;
   2339	/* Initialize dma descriptors */
   2340	for (n = 0; n < num_buf; n++) {
   2341		/* Set unit size to request size */
   2342		unit_size = buf_size;
   2343		ddp_desc = kzalloc(sizeof(struct csio_dma_buf), GFP_KERNEL);
   2344		if (!ddp_desc) {
   2345			csio_err(hw,
   2346				 "Failed to allocate ddp descriptors,"
   2347				 " Num allocated = %d.\n",
   2348				 scm->stats.n_free_ddp);
   2349			goto no_mem;
   2350		}
   2351
   2352		/* Allocate Dma buffers for DDP */
   2353		ddp_desc->vaddr = dma_alloc_coherent(&hw->pdev->dev, unit_size,
   2354				&ddp_desc->paddr, GFP_KERNEL);
   2355		if (!ddp_desc->vaddr) {
   2356			csio_err(hw,
   2357				 "SCSI response DMA buffer (ddp) allocation"
   2358				 " failed!\n");
   2359			kfree(ddp_desc);
   2360			goto no_mem;
   2361		}
   2362
   2363		ddp_desc->len = unit_size;
   2364
   2365		/* Added it to scsi ddp freelist */
   2366		list_add_tail(&ddp_desc->list, &scm->ddp_freelist);
   2367		CSIO_INC_STATS(scm, n_free_ddp);
   2368	}
   2369
   2370	return 0;
   2371no_mem:
   2372	/* release dma descs back to freelist and free dma memory */
   2373	list_for_each(tmp, &scm->ddp_freelist) {
   2374		ddp_desc = (struct csio_dma_buf *) tmp;
   2375		tmp = csio_list_prev(tmp);
   2376		dma_free_coherent(&hw->pdev->dev, ddp_desc->len,
   2377				  ddp_desc->vaddr, ddp_desc->paddr);
   2378		list_del_init(&ddp_desc->list);
   2379		kfree(ddp_desc);
   2380	}
   2381	scm->stats.n_free_ddp = 0;
   2382
   2383	return -ENOMEM;
   2384}
   2385
   2386/*
   2387 * csio_scsi_free_ddp_bufs - free DDP buffers of unaligned SGLs.
   2388 * @scm: SCSI Module
   2389 * @hw: HW device.
   2390 *
   2391 * This routine frees ddp buffers.
   2392 */
   2393static void
   2394csio_scsi_free_ddp_bufs(struct csio_scsim *scm, struct csio_hw *hw)
   2395{
   2396	struct list_head *tmp;
   2397	struct csio_dma_buf *ddp_desc;
   2398
   2399	/* release dma descs back to freelist and free dma memory */
   2400	list_for_each(tmp, &scm->ddp_freelist) {
   2401		ddp_desc = (struct csio_dma_buf *) tmp;
   2402		tmp = csio_list_prev(tmp);
   2403		dma_free_coherent(&hw->pdev->dev, ddp_desc->len,
   2404				  ddp_desc->vaddr, ddp_desc->paddr);
   2405		list_del_init(&ddp_desc->list);
   2406		kfree(ddp_desc);
   2407	}
   2408	scm->stats.n_free_ddp = 0;
   2409}
   2410
   2411/**
   2412 * csio_scsim_init - Initialize SCSI Module
   2413 * @scm:	SCSI Module
   2414 * @hw:		HW module
   2415 *
   2416 */
   2417int
   2418csio_scsim_init(struct csio_scsim *scm, struct csio_hw *hw)
   2419{
   2420	int i;
   2421	struct csio_ioreq *ioreq;
   2422	struct csio_dma_buf *dma_buf;
   2423
   2424	INIT_LIST_HEAD(&scm->active_q);
   2425	scm->hw = hw;
   2426
   2427	scm->proto_cmd_len = sizeof(struct fcp_cmnd);
   2428	scm->proto_rsp_len = CSIO_SCSI_RSP_LEN;
   2429	scm->max_sge = CSIO_SCSI_MAX_SGE;
   2430
   2431	spin_lock_init(&scm->freelist_lock);
   2432
   2433	/* Pre-allocate ioreqs and initialize them */
   2434	INIT_LIST_HEAD(&scm->ioreq_freelist);
   2435	for (i = 0; i < csio_scsi_ioreqs; i++) {
   2436
   2437		ioreq = kzalloc(sizeof(struct csio_ioreq), GFP_KERNEL);
   2438		if (!ioreq) {
   2439			csio_err(hw,
   2440				 "I/O request element allocation failed, "
   2441				 " Num allocated = %d.\n",
   2442				 scm->stats.n_free_ioreq);
   2443
   2444			goto free_ioreq;
   2445		}
   2446
   2447		/* Allocate Dma buffers for Response Payload */
   2448		dma_buf = &ioreq->dma_buf;
   2449		dma_buf->vaddr = dma_pool_alloc(hw->scsi_dma_pool, GFP_KERNEL,
   2450						&dma_buf->paddr);
   2451		if (!dma_buf->vaddr) {
   2452			csio_err(hw,
   2453				 "SCSI response DMA buffer allocation"
   2454				 " failed!\n");
   2455			kfree(ioreq);
   2456			goto free_ioreq;
   2457		}
   2458
   2459		dma_buf->len = scm->proto_rsp_len;
   2460
   2461		/* Set state to uninit */
   2462		csio_init_state(&ioreq->sm, csio_scsis_uninit);
   2463		INIT_LIST_HEAD(&ioreq->gen_list);
   2464		init_completion(&ioreq->cmplobj);
   2465
   2466		list_add_tail(&ioreq->sm.sm_list, &scm->ioreq_freelist);
   2467		CSIO_INC_STATS(scm, n_free_ioreq);
   2468	}
   2469
   2470	if (csio_scsi_alloc_ddp_bufs(scm, hw, PAGE_SIZE, csio_ddp_descs))
   2471		goto free_ioreq;
   2472
   2473	return 0;
   2474
   2475free_ioreq:
   2476	/*
   2477	 * Free up existing allocations, since an error
   2478	 * from here means we are returning for good
   2479	 */
   2480	while (!list_empty(&scm->ioreq_freelist)) {
   2481		struct csio_sm *tmp;
   2482
   2483		tmp = list_first_entry(&scm->ioreq_freelist,
   2484				       struct csio_sm, sm_list);
   2485		list_del_init(&tmp->sm_list);
   2486		ioreq = (struct csio_ioreq *)tmp;
   2487
   2488		dma_buf = &ioreq->dma_buf;
   2489		dma_pool_free(hw->scsi_dma_pool, dma_buf->vaddr,
   2490			      dma_buf->paddr);
   2491
   2492		kfree(ioreq);
   2493	}
   2494
   2495	scm->stats.n_free_ioreq = 0;
   2496
   2497	return -ENOMEM;
   2498}
   2499
   2500/**
   2501 * csio_scsim_exit: Uninitialize SCSI Module
   2502 * @scm: SCSI Module
   2503 *
   2504 */
   2505void
   2506csio_scsim_exit(struct csio_scsim *scm)
   2507{
   2508	struct csio_ioreq *ioreq;
   2509	struct csio_dma_buf *dma_buf;
   2510
   2511	while (!list_empty(&scm->ioreq_freelist)) {
   2512		struct csio_sm *tmp;
   2513
   2514		tmp = list_first_entry(&scm->ioreq_freelist,
   2515				       struct csio_sm, sm_list);
   2516		list_del_init(&tmp->sm_list);
   2517		ioreq = (struct csio_ioreq *)tmp;
   2518
   2519		dma_buf = &ioreq->dma_buf;
   2520		dma_pool_free(scm->hw->scsi_dma_pool, dma_buf->vaddr,
   2521			      dma_buf->paddr);
   2522
   2523		kfree(ioreq);
   2524	}
   2525
   2526	scm->stats.n_free_ioreq = 0;
   2527
   2528	csio_scsi_free_ddp_bufs(scm, scm->hw);
   2529}