cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

qedf_io.c (73653B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/*
      3 *  QLogic FCoE Offload Driver
      4 *  Copyright (c) 2016-2018 Cavium Inc.
      5 */
      6#include <linux/spinlock.h>
      7#include <linux/vmalloc.h>
      8#include "qedf.h"
      9#include <scsi/scsi_tcq.h>
     10
     11void qedf_cmd_timer_set(struct qedf_ctx *qedf, struct qedf_ioreq *io_req,
     12	unsigned int timer_msec)
     13{
     14	queue_delayed_work(qedf->timer_work_queue, &io_req->timeout_work,
     15	    msecs_to_jiffies(timer_msec));
     16}
     17
     18static void qedf_cmd_timeout(struct work_struct *work)
     19{
     20
     21	struct qedf_ioreq *io_req =
     22	    container_of(work, struct qedf_ioreq, timeout_work.work);
     23	struct qedf_ctx *qedf;
     24	struct qedf_rport *fcport;
     25
     26	fcport = io_req->fcport;
     27	if (io_req->fcport == NULL) {
     28		QEDF_INFO(NULL, QEDF_LOG_IO,  "fcport is NULL.\n");
     29		return;
     30	}
     31
     32	qedf = fcport->qedf;
     33
     34	switch (io_req->cmd_type) {
     35	case QEDF_ABTS:
     36		if (qedf == NULL) {
     37			QEDF_INFO(NULL, QEDF_LOG_IO,
     38				  "qedf is NULL for ABTS xid=0x%x.\n",
     39				  io_req->xid);
     40			return;
     41		}
     42
     43		QEDF_ERR((&qedf->dbg_ctx), "ABTS timeout, xid=0x%x.\n",
     44		    io_req->xid);
     45		/* Cleanup timed out ABTS */
     46		qedf_initiate_cleanup(io_req, true);
     47		complete(&io_req->abts_done);
     48
     49		/*
     50		 * Need to call kref_put for reference taken when initiate_abts
     51		 * was called since abts_compl won't be called now that we've
     52		 * cleaned up the task.
     53		 */
     54		kref_put(&io_req->refcount, qedf_release_cmd);
     55
     56		/* Clear in abort bit now that we're done with the command */
     57		clear_bit(QEDF_CMD_IN_ABORT, &io_req->flags);
     58
     59		/*
     60		 * Now that the original I/O and the ABTS are complete see
     61		 * if we need to reconnect to the target.
     62		 */
     63		qedf_restart_rport(fcport);
     64		break;
     65	case QEDF_ELS:
     66		if (!qedf) {
     67			QEDF_INFO(NULL, QEDF_LOG_IO,
     68				  "qedf is NULL for ELS xid=0x%x.\n",
     69				  io_req->xid);
     70			return;
     71		}
     72		/* ELS request no longer outstanding since it timed out */
     73		clear_bit(QEDF_CMD_OUTSTANDING, &io_req->flags);
     74
     75		kref_get(&io_req->refcount);
     76		/*
     77		 * Don't attempt to clean an ELS timeout as any subseqeunt
     78		 * ABTS or cleanup requests just hang.  For now just free
     79		 * the resources of the original I/O and the RRQ
     80		 */
     81		QEDF_ERR(&(qedf->dbg_ctx), "ELS timeout, xid=0x%x.\n",
     82			  io_req->xid);
     83		qedf_initiate_cleanup(io_req, true);
     84		io_req->event = QEDF_IOREQ_EV_ELS_TMO;
     85		/* Call callback function to complete command */
     86		if (io_req->cb_func && io_req->cb_arg) {
     87			io_req->cb_func(io_req->cb_arg);
     88			io_req->cb_arg = NULL;
     89		}
     90		kref_put(&io_req->refcount, qedf_release_cmd);
     91		break;
     92	case QEDF_SEQ_CLEANUP:
     93		QEDF_ERR(&(qedf->dbg_ctx), "Sequence cleanup timeout, "
     94		    "xid=0x%x.\n", io_req->xid);
     95		qedf_initiate_cleanup(io_req, true);
     96		io_req->event = QEDF_IOREQ_EV_ELS_TMO;
     97		qedf_process_seq_cleanup_compl(qedf, NULL, io_req);
     98		break;
     99	default:
    100		QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
    101			  "Hit default case, xid=0x%x.\n", io_req->xid);
    102		break;
    103	}
    104}
    105
    106void qedf_cmd_mgr_free(struct qedf_cmd_mgr *cmgr)
    107{
    108	struct io_bdt *bdt_info;
    109	struct qedf_ctx *qedf = cmgr->qedf;
    110	size_t bd_tbl_sz;
    111	u16 min_xid = 0;
    112	u16 max_xid = (FCOE_PARAMS_NUM_TASKS - 1);
    113	int num_ios;
    114	int i;
    115	struct qedf_ioreq *io_req;
    116
    117	num_ios = max_xid - min_xid + 1;
    118
    119	/* Free fcoe_bdt_ctx structures */
    120	if (!cmgr->io_bdt_pool) {
    121		QEDF_ERR(&qedf->dbg_ctx, "io_bdt_pool is NULL.\n");
    122		goto free_cmd_pool;
    123	}
    124
    125	bd_tbl_sz = QEDF_MAX_BDS_PER_CMD * sizeof(struct scsi_sge);
    126	for (i = 0; i < num_ios; i++) {
    127		bdt_info = cmgr->io_bdt_pool[i];
    128		if (bdt_info->bd_tbl) {
    129			dma_free_coherent(&qedf->pdev->dev, bd_tbl_sz,
    130			    bdt_info->bd_tbl, bdt_info->bd_tbl_dma);
    131			bdt_info->bd_tbl = NULL;
    132		}
    133	}
    134
    135	/* Destroy io_bdt pool */
    136	for (i = 0; i < num_ios; i++) {
    137		kfree(cmgr->io_bdt_pool[i]);
    138		cmgr->io_bdt_pool[i] = NULL;
    139	}
    140
    141	kfree(cmgr->io_bdt_pool);
    142	cmgr->io_bdt_pool = NULL;
    143
    144free_cmd_pool:
    145
    146	for (i = 0; i < num_ios; i++) {
    147		io_req = &cmgr->cmds[i];
    148		kfree(io_req->sgl_task_params);
    149		kfree(io_req->task_params);
    150		/* Make sure we free per command sense buffer */
    151		if (io_req->sense_buffer)
    152			dma_free_coherent(&qedf->pdev->dev,
    153			    QEDF_SCSI_SENSE_BUFFERSIZE, io_req->sense_buffer,
    154			    io_req->sense_buffer_dma);
    155		cancel_delayed_work_sync(&io_req->rrq_work);
    156	}
    157
    158	/* Free command manager itself */
    159	vfree(cmgr);
    160}
    161
    162static void qedf_handle_rrq(struct work_struct *work)
    163{
    164	struct qedf_ioreq *io_req =
    165	    container_of(work, struct qedf_ioreq, rrq_work.work);
    166
    167	atomic_set(&io_req->state, QEDFC_CMD_ST_RRQ_ACTIVE);
    168	qedf_send_rrq(io_req);
    169
    170}
    171
    172struct qedf_cmd_mgr *qedf_cmd_mgr_alloc(struct qedf_ctx *qedf)
    173{
    174	struct qedf_cmd_mgr *cmgr;
    175	struct io_bdt *bdt_info;
    176	struct qedf_ioreq *io_req;
    177	u16 xid;
    178	int i;
    179	int num_ios;
    180	u16 min_xid = 0;
    181	u16 max_xid = (FCOE_PARAMS_NUM_TASKS - 1);
    182
    183	/* Make sure num_queues is already set before calling this function */
    184	if (!qedf->num_queues) {
    185		QEDF_ERR(&(qedf->dbg_ctx), "num_queues is not set.\n");
    186		return NULL;
    187	}
    188
    189	if (max_xid <= min_xid || max_xid == FC_XID_UNKNOWN) {
    190		QEDF_WARN(&(qedf->dbg_ctx), "Invalid min_xid 0x%x and "
    191			   "max_xid 0x%x.\n", min_xid, max_xid);
    192		return NULL;
    193	}
    194
    195	QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "min xid 0x%x, max xid "
    196		   "0x%x.\n", min_xid, max_xid);
    197
    198	num_ios = max_xid - min_xid + 1;
    199
    200	cmgr = vzalloc(sizeof(struct qedf_cmd_mgr));
    201	if (!cmgr) {
    202		QEDF_WARN(&(qedf->dbg_ctx), "Failed to alloc cmd mgr.\n");
    203		return NULL;
    204	}
    205
    206	cmgr->qedf = qedf;
    207	spin_lock_init(&cmgr->lock);
    208
    209	/*
    210	 * Initialize I/O request fields.
    211	 */
    212	xid = 0;
    213
    214	for (i = 0; i < num_ios; i++) {
    215		io_req = &cmgr->cmds[i];
    216		INIT_DELAYED_WORK(&io_req->timeout_work, qedf_cmd_timeout);
    217
    218		io_req->xid = xid++;
    219
    220		INIT_DELAYED_WORK(&io_req->rrq_work, qedf_handle_rrq);
    221
    222		/* Allocate DMA memory to hold sense buffer */
    223		io_req->sense_buffer = dma_alloc_coherent(&qedf->pdev->dev,
    224		    QEDF_SCSI_SENSE_BUFFERSIZE, &io_req->sense_buffer_dma,
    225		    GFP_KERNEL);
    226		if (!io_req->sense_buffer) {
    227			QEDF_ERR(&qedf->dbg_ctx,
    228				 "Failed to alloc sense buffer.\n");
    229			goto mem_err;
    230		}
    231
    232		/* Allocate task parameters to pass to f/w init funcions */
    233		io_req->task_params = kzalloc(sizeof(*io_req->task_params),
    234					      GFP_KERNEL);
    235		if (!io_req->task_params) {
    236			QEDF_ERR(&(qedf->dbg_ctx),
    237				 "Failed to allocate task_params for xid=0x%x\n",
    238				 i);
    239			goto mem_err;
    240		}
    241
    242		/*
    243		 * Allocate scatter/gather list info to pass to f/w init
    244		 * functions.
    245		 */
    246		io_req->sgl_task_params = kzalloc(
    247		    sizeof(struct scsi_sgl_task_params), GFP_KERNEL);
    248		if (!io_req->sgl_task_params) {
    249			QEDF_ERR(&(qedf->dbg_ctx),
    250				 "Failed to allocate sgl_task_params for xid=0x%x\n",
    251				 i);
    252			goto mem_err;
    253		}
    254	}
    255
    256	/* Allocate pool of io_bdts - one for each qedf_ioreq */
    257	cmgr->io_bdt_pool = kmalloc_array(num_ios, sizeof(struct io_bdt *),
    258	    GFP_KERNEL);
    259
    260	if (!cmgr->io_bdt_pool) {
    261		QEDF_WARN(&(qedf->dbg_ctx), "Failed to alloc io_bdt_pool.\n");
    262		goto mem_err;
    263	}
    264
    265	for (i = 0; i < num_ios; i++) {
    266		cmgr->io_bdt_pool[i] = kmalloc(sizeof(struct io_bdt),
    267		    GFP_KERNEL);
    268		if (!cmgr->io_bdt_pool[i]) {
    269			QEDF_WARN(&(qedf->dbg_ctx),
    270				  "Failed to alloc io_bdt_pool[%d].\n", i);
    271			goto mem_err;
    272		}
    273	}
    274
    275	for (i = 0; i < num_ios; i++) {
    276		bdt_info = cmgr->io_bdt_pool[i];
    277		bdt_info->bd_tbl = dma_alloc_coherent(&qedf->pdev->dev,
    278		    QEDF_MAX_BDS_PER_CMD * sizeof(struct scsi_sge),
    279		    &bdt_info->bd_tbl_dma, GFP_KERNEL);
    280		if (!bdt_info->bd_tbl) {
    281			QEDF_WARN(&(qedf->dbg_ctx),
    282				  "Failed to alloc bdt_tbl[%d].\n", i);
    283			goto mem_err;
    284		}
    285	}
    286	atomic_set(&cmgr->free_list_cnt, num_ios);
    287	QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
    288	    "cmgr->free_list_cnt=%d.\n",
    289	    atomic_read(&cmgr->free_list_cnt));
    290
    291	return cmgr;
    292
    293mem_err:
    294	qedf_cmd_mgr_free(cmgr);
    295	return NULL;
    296}
    297
    298struct qedf_ioreq *qedf_alloc_cmd(struct qedf_rport *fcport, u8 cmd_type)
    299{
    300	struct qedf_ctx *qedf = fcport->qedf;
    301	struct qedf_cmd_mgr *cmd_mgr = qedf->cmd_mgr;
    302	struct qedf_ioreq *io_req = NULL;
    303	struct io_bdt *bd_tbl;
    304	u16 xid;
    305	uint32_t free_sqes;
    306	int i;
    307	unsigned long flags;
    308
    309	free_sqes = atomic_read(&fcport->free_sqes);
    310
    311	if (!free_sqes) {
    312		QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
    313		    "Returning NULL, free_sqes=%d.\n ",
    314		    free_sqes);
    315		goto out_failed;
    316	}
    317
    318	/* Limit the number of outstanding R/W tasks */
    319	if ((atomic_read(&fcport->num_active_ios) >=
    320	    NUM_RW_TASKS_PER_CONNECTION)) {
    321		QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
    322		    "Returning NULL, num_active_ios=%d.\n",
    323		    atomic_read(&fcport->num_active_ios));
    324		goto out_failed;
    325	}
    326
    327	/* Limit global TIDs certain tasks */
    328	if (atomic_read(&cmd_mgr->free_list_cnt) <= GBL_RSVD_TASKS) {
    329		QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
    330		    "Returning NULL, free_list_cnt=%d.\n",
    331		    atomic_read(&cmd_mgr->free_list_cnt));
    332		goto out_failed;
    333	}
    334
    335	spin_lock_irqsave(&cmd_mgr->lock, flags);
    336	for (i = 0; i < FCOE_PARAMS_NUM_TASKS; i++) {
    337		io_req = &cmd_mgr->cmds[cmd_mgr->idx];
    338		cmd_mgr->idx++;
    339		if (cmd_mgr->idx == FCOE_PARAMS_NUM_TASKS)
    340			cmd_mgr->idx = 0;
    341
    342		/* Check to make sure command was previously freed */
    343		if (!io_req->alloc)
    344			break;
    345	}
    346
    347	if (i == FCOE_PARAMS_NUM_TASKS) {
    348		spin_unlock_irqrestore(&cmd_mgr->lock, flags);
    349		goto out_failed;
    350	}
    351
    352	if (test_bit(QEDF_CMD_DIRTY, &io_req->flags))
    353		QEDF_ERR(&qedf->dbg_ctx,
    354			 "io_req found to be dirty ox_id = 0x%x.\n",
    355			 io_req->xid);
    356
    357	/* Clear any flags now that we've reallocated the xid */
    358	io_req->flags = 0;
    359	io_req->alloc = 1;
    360	spin_unlock_irqrestore(&cmd_mgr->lock, flags);
    361
    362	atomic_inc(&fcport->num_active_ios);
    363	atomic_dec(&fcport->free_sqes);
    364	xid = io_req->xid;
    365	atomic_dec(&cmd_mgr->free_list_cnt);
    366
    367	io_req->cmd_mgr = cmd_mgr;
    368	io_req->fcport = fcport;
    369
    370	/* Clear any stale sc_cmd back pointer */
    371	io_req->sc_cmd = NULL;
    372	io_req->lun = -1;
    373
    374	/* Hold the io_req against deletion */
    375	kref_init(&io_req->refcount);	/* ID: 001 */
    376	atomic_set(&io_req->state, QEDFC_CMD_ST_IO_ACTIVE);
    377
    378	/* Bind io_bdt for this io_req */
    379	/* Have a static link between io_req and io_bdt_pool */
    380	bd_tbl = io_req->bd_tbl = cmd_mgr->io_bdt_pool[xid];
    381	if (bd_tbl == NULL) {
    382		QEDF_ERR(&(qedf->dbg_ctx), "bd_tbl is NULL, xid=%x.\n", xid);
    383		kref_put(&io_req->refcount, qedf_release_cmd);
    384		goto out_failed;
    385	}
    386	bd_tbl->io_req = io_req;
    387	io_req->cmd_type = cmd_type;
    388	io_req->tm_flags = 0;
    389
    390	/* Reset sequence offset data */
    391	io_req->rx_buf_off = 0;
    392	io_req->tx_buf_off = 0;
    393	io_req->rx_id = 0xffff; /* No OX_ID */
    394
    395	return io_req;
    396
    397out_failed:
    398	/* Record failure for stats and return NULL to caller */
    399	qedf->alloc_failures++;
    400	return NULL;
    401}
    402
    403static void qedf_free_mp_resc(struct qedf_ioreq *io_req)
    404{
    405	struct qedf_mp_req *mp_req = &(io_req->mp_req);
    406	struct qedf_ctx *qedf = io_req->fcport->qedf;
    407	uint64_t sz = sizeof(struct scsi_sge);
    408
    409	/* clear tm flags */
    410	if (mp_req->mp_req_bd) {
    411		dma_free_coherent(&qedf->pdev->dev, sz,
    412		    mp_req->mp_req_bd, mp_req->mp_req_bd_dma);
    413		mp_req->mp_req_bd = NULL;
    414	}
    415	if (mp_req->mp_resp_bd) {
    416		dma_free_coherent(&qedf->pdev->dev, sz,
    417		    mp_req->mp_resp_bd, mp_req->mp_resp_bd_dma);
    418		mp_req->mp_resp_bd = NULL;
    419	}
    420	if (mp_req->req_buf) {
    421		dma_free_coherent(&qedf->pdev->dev, QEDF_PAGE_SIZE,
    422		    mp_req->req_buf, mp_req->req_buf_dma);
    423		mp_req->req_buf = NULL;
    424	}
    425	if (mp_req->resp_buf) {
    426		dma_free_coherent(&qedf->pdev->dev, QEDF_PAGE_SIZE,
    427		    mp_req->resp_buf, mp_req->resp_buf_dma);
    428		mp_req->resp_buf = NULL;
    429	}
    430}
    431
    432void qedf_release_cmd(struct kref *ref)
    433{
    434	struct qedf_ioreq *io_req =
    435	    container_of(ref, struct qedf_ioreq, refcount);
    436	struct qedf_cmd_mgr *cmd_mgr = io_req->cmd_mgr;
    437	struct qedf_rport *fcport = io_req->fcport;
    438	unsigned long flags;
    439
    440	if (io_req->cmd_type == QEDF_SCSI_CMD) {
    441		QEDF_WARN(&fcport->qedf->dbg_ctx,
    442			  "Cmd released called without scsi_done called, io_req %p xid=0x%x.\n",
    443			  io_req, io_req->xid);
    444		WARN_ON(io_req->sc_cmd);
    445	}
    446
    447	if (io_req->cmd_type == QEDF_ELS ||
    448	    io_req->cmd_type == QEDF_TASK_MGMT_CMD)
    449		qedf_free_mp_resc(io_req);
    450
    451	atomic_inc(&cmd_mgr->free_list_cnt);
    452	atomic_dec(&fcport->num_active_ios);
    453	atomic_set(&io_req->state, QEDF_CMD_ST_INACTIVE);
    454	if (atomic_read(&fcport->num_active_ios) < 0) {
    455		QEDF_WARN(&(fcport->qedf->dbg_ctx), "active_ios < 0.\n");
    456		WARN_ON(1);
    457	}
    458
    459	/* Increment task retry identifier now that the request is released */
    460	io_req->task_retry_identifier++;
    461	io_req->fcport = NULL;
    462
    463	clear_bit(QEDF_CMD_DIRTY, &io_req->flags);
    464	io_req->cpu = 0;
    465	spin_lock_irqsave(&cmd_mgr->lock, flags);
    466	io_req->fcport = NULL;
    467	io_req->alloc = 0;
    468	spin_unlock_irqrestore(&cmd_mgr->lock, flags);
    469}
    470
    471static int qedf_map_sg(struct qedf_ioreq *io_req)
    472{
    473	struct scsi_cmnd *sc = io_req->sc_cmd;
    474	struct Scsi_Host *host = sc->device->host;
    475	struct fc_lport *lport = shost_priv(host);
    476	struct qedf_ctx *qedf = lport_priv(lport);
    477	struct scsi_sge *bd = io_req->bd_tbl->bd_tbl;
    478	struct scatterlist *sg;
    479	int byte_count = 0;
    480	int sg_count = 0;
    481	int bd_count = 0;
    482	u32 sg_len;
    483	u64 addr;
    484	int i = 0;
    485
    486	sg_count = dma_map_sg(&qedf->pdev->dev, scsi_sglist(sc),
    487	    scsi_sg_count(sc), sc->sc_data_direction);
    488	sg = scsi_sglist(sc);
    489
    490	io_req->sge_type = QEDF_IOREQ_UNKNOWN_SGE;
    491
    492	if (sg_count <= 8 || io_req->io_req_flags == QEDF_READ)
    493		io_req->sge_type = QEDF_IOREQ_FAST_SGE;
    494
    495	scsi_for_each_sg(sc, sg, sg_count, i) {
    496		sg_len = (u32)sg_dma_len(sg);
    497		addr = (u64)sg_dma_address(sg);
    498
    499		/*
    500		 * Intermediate s/g element so check if start address
    501		 * is page aligned.  Only required for writes and only if the
    502		 * number of scatter/gather elements is 8 or more.
    503		 */
    504		if (io_req->sge_type == QEDF_IOREQ_UNKNOWN_SGE && (i) &&
    505		    (i != (sg_count - 1)) && sg_len < QEDF_PAGE_SIZE)
    506			io_req->sge_type = QEDF_IOREQ_SLOW_SGE;
    507
    508		bd[bd_count].sge_addr.lo = cpu_to_le32(U64_LO(addr));
    509		bd[bd_count].sge_addr.hi  = cpu_to_le32(U64_HI(addr));
    510		bd[bd_count].sge_len = cpu_to_le32(sg_len);
    511
    512		bd_count++;
    513		byte_count += sg_len;
    514	}
    515
    516	/* To catch a case where FAST and SLOW nothing is set, set FAST */
    517	if (io_req->sge_type == QEDF_IOREQ_UNKNOWN_SGE)
    518		io_req->sge_type = QEDF_IOREQ_FAST_SGE;
    519
    520	if (byte_count != scsi_bufflen(sc))
    521		QEDF_ERR(&(qedf->dbg_ctx), "byte_count = %d != "
    522			  "scsi_bufflen = %d, task_id = 0x%x.\n", byte_count,
    523			   scsi_bufflen(sc), io_req->xid);
    524
    525	return bd_count;
    526}
    527
    528static int qedf_build_bd_list_from_sg(struct qedf_ioreq *io_req)
    529{
    530	struct scsi_cmnd *sc = io_req->sc_cmd;
    531	struct scsi_sge *bd = io_req->bd_tbl->bd_tbl;
    532	int bd_count;
    533
    534	if (scsi_sg_count(sc)) {
    535		bd_count = qedf_map_sg(io_req);
    536		if (bd_count == 0)
    537			return -ENOMEM;
    538	} else {
    539		bd_count = 0;
    540		bd[0].sge_addr.lo = bd[0].sge_addr.hi = 0;
    541		bd[0].sge_len = 0;
    542	}
    543	io_req->bd_tbl->bd_valid = bd_count;
    544
    545	return 0;
    546}
    547
    548static void qedf_build_fcp_cmnd(struct qedf_ioreq *io_req,
    549				  struct fcp_cmnd *fcp_cmnd)
    550{
    551	struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
    552
    553	/* fcp_cmnd is 32 bytes */
    554	memset(fcp_cmnd, 0, FCP_CMND_LEN);
    555
    556	/* 8 bytes: SCSI LUN info */
    557	int_to_scsilun(sc_cmd->device->lun,
    558			(struct scsi_lun *)&fcp_cmnd->fc_lun);
    559
    560	/* 4 bytes: flag info */
    561	fcp_cmnd->fc_pri_ta = 0;
    562	fcp_cmnd->fc_tm_flags = io_req->tm_flags;
    563	fcp_cmnd->fc_flags = io_req->io_req_flags;
    564	fcp_cmnd->fc_cmdref = 0;
    565
    566	/* Populate data direction */
    567	if (io_req->cmd_type == QEDF_TASK_MGMT_CMD) {
    568		fcp_cmnd->fc_flags |= FCP_CFL_RDDATA;
    569	} else {
    570		if (sc_cmd->sc_data_direction == DMA_TO_DEVICE)
    571			fcp_cmnd->fc_flags |= FCP_CFL_WRDATA;
    572		else if (sc_cmd->sc_data_direction == DMA_FROM_DEVICE)
    573			fcp_cmnd->fc_flags |= FCP_CFL_RDDATA;
    574	}
    575
    576	fcp_cmnd->fc_pri_ta = FCP_PTA_SIMPLE;
    577
    578	/* 16 bytes: CDB information */
    579	if (io_req->cmd_type != QEDF_TASK_MGMT_CMD)
    580		memcpy(fcp_cmnd->fc_cdb, sc_cmd->cmnd, sc_cmd->cmd_len);
    581
    582	/* 4 bytes: FCP data length */
    583	fcp_cmnd->fc_dl = htonl(io_req->data_xfer_len);
    584}
    585
    586static void  qedf_init_task(struct qedf_rport *fcport, struct fc_lport *lport,
    587	struct qedf_ioreq *io_req, struct fcoe_task_context *task_ctx,
    588	struct fcoe_wqe *sqe)
    589{
    590	enum fcoe_task_type task_type;
    591	struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
    592	struct io_bdt *bd_tbl = io_req->bd_tbl;
    593	u8 fcp_cmnd[32];
    594	u32 tmp_fcp_cmnd[8];
    595	int bd_count = 0;
    596	struct qedf_ctx *qedf = fcport->qedf;
    597	uint16_t cq_idx = smp_processor_id() % qedf->num_queues;
    598	struct regpair sense_data_buffer_phys_addr;
    599	u32 tx_io_size = 0;
    600	u32 rx_io_size = 0;
    601	int i, cnt;
    602
    603	/* Note init_initiator_rw_fcoe_task memsets the task context */
    604	io_req->task = task_ctx;
    605	memset(task_ctx, 0, sizeof(struct fcoe_task_context));
    606	memset(io_req->task_params, 0, sizeof(struct fcoe_task_params));
    607	memset(io_req->sgl_task_params, 0, sizeof(struct scsi_sgl_task_params));
    608
    609	/* Set task type bassed on DMA directio of command */
    610	if (io_req->cmd_type == QEDF_TASK_MGMT_CMD) {
    611		task_type = FCOE_TASK_TYPE_READ_INITIATOR;
    612	} else {
    613		if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) {
    614			task_type = FCOE_TASK_TYPE_WRITE_INITIATOR;
    615			tx_io_size = io_req->data_xfer_len;
    616		} else {
    617			task_type = FCOE_TASK_TYPE_READ_INITIATOR;
    618			rx_io_size = io_req->data_xfer_len;
    619		}
    620	}
    621
    622	/* Setup the fields for fcoe_task_params */
    623	io_req->task_params->context = task_ctx;
    624	io_req->task_params->sqe = sqe;
    625	io_req->task_params->task_type = task_type;
    626	io_req->task_params->tx_io_size = tx_io_size;
    627	io_req->task_params->rx_io_size = rx_io_size;
    628	io_req->task_params->conn_cid = fcport->fw_cid;
    629	io_req->task_params->itid = io_req->xid;
    630	io_req->task_params->cq_rss_number = cq_idx;
    631	io_req->task_params->is_tape_device = fcport->dev_type;
    632
    633	/* Fill in information for scatter/gather list */
    634	if (io_req->cmd_type != QEDF_TASK_MGMT_CMD) {
    635		bd_count = bd_tbl->bd_valid;
    636		io_req->sgl_task_params->sgl = bd_tbl->bd_tbl;
    637		io_req->sgl_task_params->sgl_phys_addr.lo =
    638			U64_LO(bd_tbl->bd_tbl_dma);
    639		io_req->sgl_task_params->sgl_phys_addr.hi =
    640			U64_HI(bd_tbl->bd_tbl_dma);
    641		io_req->sgl_task_params->num_sges = bd_count;
    642		io_req->sgl_task_params->total_buffer_size =
    643		    scsi_bufflen(io_req->sc_cmd);
    644		if (io_req->sge_type == QEDF_IOREQ_SLOW_SGE)
    645			io_req->sgl_task_params->small_mid_sge = 1;
    646		else
    647			io_req->sgl_task_params->small_mid_sge = 0;
    648	}
    649
    650	/* Fill in physical address of sense buffer */
    651	sense_data_buffer_phys_addr.lo = U64_LO(io_req->sense_buffer_dma);
    652	sense_data_buffer_phys_addr.hi = U64_HI(io_req->sense_buffer_dma);
    653
    654	/* fill FCP_CMND IU */
    655	qedf_build_fcp_cmnd(io_req, (struct fcp_cmnd *)tmp_fcp_cmnd);
    656
    657	/* Swap fcp_cmnd since FC is big endian */
    658	cnt = sizeof(struct fcp_cmnd) / sizeof(u32);
    659	for (i = 0; i < cnt; i++) {
    660		tmp_fcp_cmnd[i] = cpu_to_be32(tmp_fcp_cmnd[i]);
    661	}
    662	memcpy(fcp_cmnd, tmp_fcp_cmnd, sizeof(struct fcp_cmnd));
    663
    664	init_initiator_rw_fcoe_task(io_req->task_params,
    665				    io_req->sgl_task_params,
    666				    sense_data_buffer_phys_addr,
    667				    io_req->task_retry_identifier, fcp_cmnd);
    668
    669	/* Increment SGL type counters */
    670	if (io_req->sge_type == QEDF_IOREQ_SLOW_SGE)
    671		qedf->slow_sge_ios++;
    672	else
    673		qedf->fast_sge_ios++;
    674}
    675
    676void qedf_init_mp_task(struct qedf_ioreq *io_req,
    677	struct fcoe_task_context *task_ctx, struct fcoe_wqe *sqe)
    678{
    679	struct qedf_mp_req *mp_req = &(io_req->mp_req);
    680	struct qedf_rport *fcport = io_req->fcport;
    681	struct qedf_ctx *qedf = io_req->fcport->qedf;
    682	struct fc_frame_header *fc_hdr;
    683	struct fcoe_tx_mid_path_params task_fc_hdr;
    684	struct scsi_sgl_task_params tx_sgl_task_params;
    685	struct scsi_sgl_task_params rx_sgl_task_params;
    686
    687	QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
    688		  "Initializing MP task for cmd_type=%d\n",
    689		  io_req->cmd_type);
    690
    691	qedf->control_requests++;
    692
    693	memset(&tx_sgl_task_params, 0, sizeof(struct scsi_sgl_task_params));
    694	memset(&rx_sgl_task_params, 0, sizeof(struct scsi_sgl_task_params));
    695	memset(task_ctx, 0, sizeof(struct fcoe_task_context));
    696	memset(&task_fc_hdr, 0, sizeof(struct fcoe_tx_mid_path_params));
    697
    698	/* Setup the task from io_req for easy reference */
    699	io_req->task = task_ctx;
    700
    701	/* Setup the fields for fcoe_task_params */
    702	io_req->task_params->context = task_ctx;
    703	io_req->task_params->sqe = sqe;
    704	io_req->task_params->task_type = FCOE_TASK_TYPE_MIDPATH;
    705	io_req->task_params->tx_io_size = io_req->data_xfer_len;
    706	/* rx_io_size tells the f/w how large a response buffer we have */
    707	io_req->task_params->rx_io_size = PAGE_SIZE;
    708	io_req->task_params->conn_cid = fcport->fw_cid;
    709	io_req->task_params->itid = io_req->xid;
    710	/* Return middle path commands on CQ 0 */
    711	io_req->task_params->cq_rss_number = 0;
    712	io_req->task_params->is_tape_device = fcport->dev_type;
    713
    714	fc_hdr = &(mp_req->req_fc_hdr);
    715	/* Set OX_ID and RX_ID based on driver task id */
    716	fc_hdr->fh_ox_id = io_req->xid;
    717	fc_hdr->fh_rx_id = htons(0xffff);
    718
    719	/* Set up FC header information */
    720	task_fc_hdr.parameter = fc_hdr->fh_parm_offset;
    721	task_fc_hdr.r_ctl = fc_hdr->fh_r_ctl;
    722	task_fc_hdr.type = fc_hdr->fh_type;
    723	task_fc_hdr.cs_ctl = fc_hdr->fh_cs_ctl;
    724	task_fc_hdr.df_ctl = fc_hdr->fh_df_ctl;
    725	task_fc_hdr.rx_id = fc_hdr->fh_rx_id;
    726	task_fc_hdr.ox_id = fc_hdr->fh_ox_id;
    727
    728	/* Set up s/g list parameters for request buffer */
    729	tx_sgl_task_params.sgl = mp_req->mp_req_bd;
    730	tx_sgl_task_params.sgl_phys_addr.lo = U64_LO(mp_req->mp_req_bd_dma);
    731	tx_sgl_task_params.sgl_phys_addr.hi = U64_HI(mp_req->mp_req_bd_dma);
    732	tx_sgl_task_params.num_sges = 1;
    733	/* Set PAGE_SIZE for now since sg element is that size ??? */
    734	tx_sgl_task_params.total_buffer_size = io_req->data_xfer_len;
    735	tx_sgl_task_params.small_mid_sge = 0;
    736
    737	/* Set up s/g list parameters for request buffer */
    738	rx_sgl_task_params.sgl = mp_req->mp_resp_bd;
    739	rx_sgl_task_params.sgl_phys_addr.lo = U64_LO(mp_req->mp_resp_bd_dma);
    740	rx_sgl_task_params.sgl_phys_addr.hi = U64_HI(mp_req->mp_resp_bd_dma);
    741	rx_sgl_task_params.num_sges = 1;
    742	/* Set PAGE_SIZE for now since sg element is that size ??? */
    743	rx_sgl_task_params.total_buffer_size = PAGE_SIZE;
    744	rx_sgl_task_params.small_mid_sge = 0;
    745
    746
    747	/*
    748	 * Last arg is 0 as previous code did not set that we wanted the
    749	 * fc header information.
    750	 */
    751	init_initiator_midpath_unsolicited_fcoe_task(io_req->task_params,
    752						     &task_fc_hdr,
    753						     &tx_sgl_task_params,
    754						     &rx_sgl_task_params, 0);
    755}
    756
    757/* Presumed that fcport->rport_lock is held */
    758u16 qedf_get_sqe_idx(struct qedf_rport *fcport)
    759{
    760	uint16_t total_sqe = (fcport->sq_mem_size)/(sizeof(struct fcoe_wqe));
    761	u16 rval;
    762
    763	rval = fcport->sq_prod_idx;
    764
    765	/* Adjust ring index */
    766	fcport->sq_prod_idx++;
    767	fcport->fw_sq_prod_idx++;
    768	if (fcport->sq_prod_idx == total_sqe)
    769		fcport->sq_prod_idx = 0;
    770
    771	return rval;
    772}
    773
    774void qedf_ring_doorbell(struct qedf_rport *fcport)
    775{
    776	struct fcoe_db_data dbell = { 0 };
    777
    778	dbell.agg_flags = 0;
    779
    780	dbell.params |= DB_DEST_XCM << FCOE_DB_DATA_DEST_SHIFT;
    781	dbell.params |= DB_AGG_CMD_SET << FCOE_DB_DATA_AGG_CMD_SHIFT;
    782	dbell.params |= DQ_XCM_FCOE_SQ_PROD_CMD <<
    783	    FCOE_DB_DATA_AGG_VAL_SEL_SHIFT;
    784
    785	dbell.sq_prod = fcport->fw_sq_prod_idx;
    786	/* wmb makes sure that the BDs data is updated before updating the
    787	 * producer, otherwise FW may read old data from the BDs.
    788	 */
    789	wmb();
    790	barrier();
    791	writel(*(u32 *)&dbell, fcport->p_doorbell);
    792	/*
    793	 * Fence required to flush the write combined buffer, since another
    794	 * CPU may write to the same doorbell address and data may be lost
    795	 * due to relaxed order nature of write combined bar.
    796	 */
    797	wmb();
    798}
    799
    800static void qedf_trace_io(struct qedf_rport *fcport, struct qedf_ioreq *io_req,
    801			  int8_t direction)
    802{
    803	struct qedf_ctx *qedf = fcport->qedf;
    804	struct qedf_io_log *io_log;
    805	struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
    806	unsigned long flags;
    807
    808	spin_lock_irqsave(&qedf->io_trace_lock, flags);
    809
    810	io_log = &qedf->io_trace_buf[qedf->io_trace_idx];
    811	io_log->direction = direction;
    812	io_log->task_id = io_req->xid;
    813	io_log->port_id = fcport->rdata->ids.port_id;
    814	io_log->lun = sc_cmd->device->lun;
    815	io_log->op = sc_cmd->cmnd[0];
    816	io_log->lba[0] = sc_cmd->cmnd[2];
    817	io_log->lba[1] = sc_cmd->cmnd[3];
    818	io_log->lba[2] = sc_cmd->cmnd[4];
    819	io_log->lba[3] = sc_cmd->cmnd[5];
    820	io_log->bufflen = scsi_bufflen(sc_cmd);
    821	io_log->sg_count = scsi_sg_count(sc_cmd);
    822	io_log->result = sc_cmd->result;
    823	io_log->jiffies = jiffies;
    824	io_log->refcount = kref_read(&io_req->refcount);
    825
    826	if (direction == QEDF_IO_TRACE_REQ) {
    827		/* For requests we only care abot the submission CPU */
    828		io_log->req_cpu = io_req->cpu;
    829		io_log->int_cpu = 0;
    830		io_log->rsp_cpu = 0;
    831	} else if (direction == QEDF_IO_TRACE_RSP) {
    832		io_log->req_cpu = io_req->cpu;
    833		io_log->int_cpu = io_req->int_cpu;
    834		io_log->rsp_cpu = smp_processor_id();
    835	}
    836
    837	io_log->sge_type = io_req->sge_type;
    838
    839	qedf->io_trace_idx++;
    840	if (qedf->io_trace_idx == QEDF_IO_TRACE_SIZE)
    841		qedf->io_trace_idx = 0;
    842
    843	spin_unlock_irqrestore(&qedf->io_trace_lock, flags);
    844}
    845
    846int qedf_post_io_req(struct qedf_rport *fcport, struct qedf_ioreq *io_req)
    847{
    848	struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
    849	struct Scsi_Host *host = sc_cmd->device->host;
    850	struct fc_lport *lport = shost_priv(host);
    851	struct qedf_ctx *qedf = lport_priv(lport);
    852	struct fcoe_task_context *task_ctx;
    853	u16 xid;
    854	struct fcoe_wqe *sqe;
    855	u16 sqe_idx;
    856
    857	/* Initialize rest of io_req fileds */
    858	io_req->data_xfer_len = scsi_bufflen(sc_cmd);
    859	qedf_priv(sc_cmd)->io_req = io_req;
    860	io_req->sge_type = QEDF_IOREQ_FAST_SGE; /* Assume fast SGL by default */
    861
    862	/* Record which cpu this request is associated with */
    863	io_req->cpu = smp_processor_id();
    864
    865	if (sc_cmd->sc_data_direction == DMA_FROM_DEVICE) {
    866		io_req->io_req_flags = QEDF_READ;
    867		qedf->input_requests++;
    868	} else if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) {
    869		io_req->io_req_flags = QEDF_WRITE;
    870		qedf->output_requests++;
    871	} else {
    872		io_req->io_req_flags = 0;
    873		qedf->control_requests++;
    874	}
    875
    876	xid = io_req->xid;
    877
    878	/* Build buffer descriptor list for firmware from sg list */
    879	if (qedf_build_bd_list_from_sg(io_req)) {
    880		QEDF_ERR(&(qedf->dbg_ctx), "BD list creation failed.\n");
    881		/* Release cmd will release io_req, but sc_cmd is assigned */
    882		io_req->sc_cmd = NULL;
    883		kref_put(&io_req->refcount, qedf_release_cmd);
    884		return -EAGAIN;
    885	}
    886
    887	if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags) ||
    888	    test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags)) {
    889		QEDF_ERR(&(qedf->dbg_ctx), "Session not offloaded yet.\n");
    890		/* Release cmd will release io_req, but sc_cmd is assigned */
    891		io_req->sc_cmd = NULL;
    892		kref_put(&io_req->refcount, qedf_release_cmd);
    893		return -EINVAL;
    894	}
    895
    896	/* Record LUN number for later use if we need them */
    897	io_req->lun = (int)sc_cmd->device->lun;
    898
    899	/* Obtain free SQE */
    900	sqe_idx = qedf_get_sqe_idx(fcport);
    901	sqe = &fcport->sq[sqe_idx];
    902	memset(sqe, 0, sizeof(struct fcoe_wqe));
    903
    904	/* Get the task context */
    905	task_ctx = qedf_get_task_mem(&qedf->tasks, xid);
    906	if (!task_ctx) {
    907		QEDF_WARN(&(qedf->dbg_ctx), "task_ctx is NULL, xid=%d.\n",
    908			   xid);
    909		/* Release cmd will release io_req, but sc_cmd is assigned */
    910		io_req->sc_cmd = NULL;
    911		kref_put(&io_req->refcount, qedf_release_cmd);
    912		return -EINVAL;
    913	}
    914
    915	qedf_init_task(fcport, lport, io_req, task_ctx, sqe);
    916
    917	/* Ring doorbell */
    918	qedf_ring_doorbell(fcport);
    919
    920	/* Set that command is with the firmware now */
    921	set_bit(QEDF_CMD_OUTSTANDING, &io_req->flags);
    922
    923	if (qedf_io_tracing && io_req->sc_cmd)
    924		qedf_trace_io(fcport, io_req, QEDF_IO_TRACE_REQ);
    925
    926	return false;
    927}
    928
    929int
    930qedf_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc_cmd)
    931{
    932	struct fc_lport *lport = shost_priv(host);
    933	struct qedf_ctx *qedf = lport_priv(lport);
    934	struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device));
    935	struct fc_rport_libfc_priv *rp = rport->dd_data;
    936	struct qedf_rport *fcport;
    937	struct qedf_ioreq *io_req;
    938	int rc = 0;
    939	int rval;
    940	unsigned long flags = 0;
    941	int num_sgs = 0;
    942
    943	num_sgs = scsi_sg_count(sc_cmd);
    944	if (scsi_sg_count(sc_cmd) > QEDF_MAX_BDS_PER_CMD) {
    945		QEDF_ERR(&qedf->dbg_ctx,
    946			 "Number of SG elements %d exceeds what hardware limitation of %d.\n",
    947			 num_sgs, QEDF_MAX_BDS_PER_CMD);
    948		sc_cmd->result = DID_ERROR;
    949		scsi_done(sc_cmd);
    950		return 0;
    951	}
    952
    953	if (test_bit(QEDF_UNLOADING, &qedf->flags) ||
    954	    test_bit(QEDF_DBG_STOP_IO, &qedf->flags)) {
    955		QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
    956			  "Returning DNC as unloading or stop io, flags 0x%lx.\n",
    957			  qedf->flags);
    958		sc_cmd->result = DID_NO_CONNECT << 16;
    959		scsi_done(sc_cmd);
    960		return 0;
    961	}
    962
    963	if (!qedf->pdev->msix_enabled) {
    964		QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
    965		    "Completing sc_cmd=%p DID_NO_CONNECT as MSI-X is not enabled.\n",
    966		    sc_cmd);
    967		sc_cmd->result = DID_NO_CONNECT << 16;
    968		scsi_done(sc_cmd);
    969		return 0;
    970	}
    971
    972	rval = fc_remote_port_chkready(rport);
    973	if (rval) {
    974		QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
    975			  "fc_remote_port_chkready failed=0x%x for port_id=0x%06x.\n",
    976			  rval, rport->port_id);
    977		sc_cmd->result = rval;
    978		scsi_done(sc_cmd);
    979		return 0;
    980	}
    981
    982	/* Retry command if we are doing a qed drain operation */
    983	if (test_bit(QEDF_DRAIN_ACTIVE, &qedf->flags)) {
    984		QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO, "Drain active.\n");
    985		rc = SCSI_MLQUEUE_HOST_BUSY;
    986		goto exit_qcmd;
    987	}
    988
    989	if (lport->state != LPORT_ST_READY ||
    990	    atomic_read(&qedf->link_state) != QEDF_LINK_UP) {
    991		QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO, "Link down.\n");
    992		rc = SCSI_MLQUEUE_HOST_BUSY;
    993		goto exit_qcmd;
    994	}
    995
    996	/* rport and tgt are allocated together, so tgt should be non-NULL */
    997	fcport = (struct qedf_rport *)&rp[1];
    998
    999	if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags) ||
   1000	    test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags)) {
   1001		/*
   1002		 * Session is not offloaded yet. Let SCSI-ml retry
   1003		 * the command.
   1004		 */
   1005		rc = SCSI_MLQUEUE_TARGET_BUSY;
   1006		goto exit_qcmd;
   1007	}
   1008
   1009	atomic_inc(&fcport->ios_to_queue);
   1010
   1011	if (fcport->retry_delay_timestamp) {
   1012		/* Take fcport->rport_lock for resetting the delay_timestamp */
   1013		spin_lock_irqsave(&fcport->rport_lock, flags);
   1014		if (time_after(jiffies, fcport->retry_delay_timestamp)) {
   1015			fcport->retry_delay_timestamp = 0;
   1016		} else {
   1017			spin_unlock_irqrestore(&fcport->rport_lock, flags);
   1018			/* If retry_delay timer is active, flow off the ML */
   1019			rc = SCSI_MLQUEUE_TARGET_BUSY;
   1020			atomic_dec(&fcport->ios_to_queue);
   1021			goto exit_qcmd;
   1022		}
   1023		spin_unlock_irqrestore(&fcport->rport_lock, flags);
   1024	}
   1025
   1026	io_req = qedf_alloc_cmd(fcport, QEDF_SCSI_CMD);
   1027	if (!io_req) {
   1028		rc = SCSI_MLQUEUE_HOST_BUSY;
   1029		atomic_dec(&fcport->ios_to_queue);
   1030		goto exit_qcmd;
   1031	}
   1032
   1033	io_req->sc_cmd = sc_cmd;
   1034
   1035	/* Take fcport->rport_lock for posting to fcport send queue */
   1036	spin_lock_irqsave(&fcport->rport_lock, flags);
   1037	if (qedf_post_io_req(fcport, io_req)) {
   1038		QEDF_WARN(&(qedf->dbg_ctx), "Unable to post io_req\n");
   1039		/* Return SQE to pool */
   1040		atomic_inc(&fcport->free_sqes);
   1041		rc = SCSI_MLQUEUE_HOST_BUSY;
   1042	}
   1043	spin_unlock_irqrestore(&fcport->rport_lock, flags);
   1044	atomic_dec(&fcport->ios_to_queue);
   1045
   1046exit_qcmd:
   1047	return rc;
   1048}
   1049
   1050static void qedf_parse_fcp_rsp(struct qedf_ioreq *io_req,
   1051				 struct fcoe_cqe_rsp_info *fcp_rsp)
   1052{
   1053	struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
   1054	struct qedf_ctx *qedf = io_req->fcport->qedf;
   1055	u8 rsp_flags = fcp_rsp->rsp_flags.flags;
   1056	int fcp_sns_len = 0;
   1057	int fcp_rsp_len = 0;
   1058	uint8_t *rsp_info, *sense_data;
   1059
   1060	io_req->fcp_status = FC_GOOD;
   1061	io_req->fcp_resid = 0;
   1062	if (rsp_flags & (FCOE_FCP_RSP_FLAGS_FCP_RESID_OVER |
   1063	    FCOE_FCP_RSP_FLAGS_FCP_RESID_UNDER))
   1064		io_req->fcp_resid = fcp_rsp->fcp_resid;
   1065
   1066	io_req->scsi_comp_flags = rsp_flags;
   1067	io_req->cdb_status = fcp_rsp->scsi_status_code;
   1068
   1069	if (rsp_flags &
   1070	    FCOE_FCP_RSP_FLAGS_FCP_RSP_LEN_VALID)
   1071		fcp_rsp_len = fcp_rsp->fcp_rsp_len;
   1072
   1073	if (rsp_flags &
   1074	    FCOE_FCP_RSP_FLAGS_FCP_SNS_LEN_VALID)
   1075		fcp_sns_len = fcp_rsp->fcp_sns_len;
   1076
   1077	io_req->fcp_rsp_len = fcp_rsp_len;
   1078	io_req->fcp_sns_len = fcp_sns_len;
   1079	rsp_info = sense_data = io_req->sense_buffer;
   1080
   1081	/* fetch fcp_rsp_code */
   1082	if ((fcp_rsp_len == 4) || (fcp_rsp_len == 8)) {
   1083		/* Only for task management function */
   1084		io_req->fcp_rsp_code = rsp_info[3];
   1085		QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
   1086		    "fcp_rsp_code = %d\n", io_req->fcp_rsp_code);
   1087		/* Adjust sense-data location. */
   1088		sense_data += fcp_rsp_len;
   1089	}
   1090
   1091	if (fcp_sns_len > SCSI_SENSE_BUFFERSIZE) {
   1092		QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
   1093		    "Truncating sense buffer\n");
   1094		fcp_sns_len = SCSI_SENSE_BUFFERSIZE;
   1095	}
   1096
   1097	/* The sense buffer can be NULL for TMF commands */
   1098	if (sc_cmd->sense_buffer) {
   1099		memset(sc_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
   1100		if (fcp_sns_len)
   1101			memcpy(sc_cmd->sense_buffer, sense_data,
   1102			    fcp_sns_len);
   1103	}
   1104}
   1105
   1106static void qedf_unmap_sg_list(struct qedf_ctx *qedf, struct qedf_ioreq *io_req)
   1107{
   1108	struct scsi_cmnd *sc = io_req->sc_cmd;
   1109
   1110	if (io_req->bd_tbl->bd_valid && sc && scsi_sg_count(sc)) {
   1111		dma_unmap_sg(&qedf->pdev->dev, scsi_sglist(sc),
   1112		    scsi_sg_count(sc), sc->sc_data_direction);
   1113		io_req->bd_tbl->bd_valid = 0;
   1114	}
   1115}
   1116
   1117void qedf_scsi_completion(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
   1118	struct qedf_ioreq *io_req)
   1119{
   1120	struct scsi_cmnd *sc_cmd;
   1121	struct fcoe_cqe_rsp_info *fcp_rsp;
   1122	struct qedf_rport *fcport;
   1123	int refcount;
   1124	u16 scope, qualifier = 0;
   1125	u8 fw_residual_flag = 0;
   1126	unsigned long flags = 0;
   1127	u16 chk_scope = 0;
   1128
   1129	if (!io_req)
   1130		return;
   1131	if (!cqe)
   1132		return;
   1133
   1134	if (!test_bit(QEDF_CMD_OUTSTANDING, &io_req->flags) ||
   1135	    test_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags) ||
   1136	    test_bit(QEDF_CMD_IN_ABORT, &io_req->flags)) {
   1137		QEDF_ERR(&qedf->dbg_ctx,
   1138			 "io_req xid=0x%x already in cleanup or abort processing or already completed.\n",
   1139			 io_req->xid);
   1140		return;
   1141	}
   1142
   1143	sc_cmd = io_req->sc_cmd;
   1144	fcp_rsp = &cqe->cqe_info.rsp_info;
   1145
   1146	if (!sc_cmd) {
   1147		QEDF_WARN(&(qedf->dbg_ctx), "sc_cmd is NULL!\n");
   1148		return;
   1149	}
   1150
   1151	if (!qedf_priv(sc_cmd)->io_req) {
   1152		QEDF_WARN(&(qedf->dbg_ctx),
   1153			  "io_req is NULL, returned in another context.\n");
   1154		return;
   1155	}
   1156
   1157	if (!sc_cmd->device) {
   1158		QEDF_ERR(&qedf->dbg_ctx,
   1159			 "Device for sc_cmd %p is NULL.\n", sc_cmd);
   1160		return;
   1161	}
   1162
   1163	if (!scsi_cmd_to_rq(sc_cmd)->q) {
   1164		QEDF_WARN(&(qedf->dbg_ctx), "request->q is NULL so request "
   1165		   "is not valid, sc_cmd=%p.\n", sc_cmd);
   1166		return;
   1167	}
   1168
   1169	fcport = io_req->fcport;
   1170
   1171	/*
   1172	 * When flush is active, let the cmds be completed from the cleanup
   1173	 * context
   1174	 */
   1175	if (test_bit(QEDF_RPORT_IN_TARGET_RESET, &fcport->flags) ||
   1176	    (test_bit(QEDF_RPORT_IN_LUN_RESET, &fcport->flags) &&
   1177	     sc_cmd->device->lun == (u64)fcport->lun_reset_lun)) {
   1178		QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
   1179			  "Dropping good completion xid=0x%x as fcport is flushing",
   1180			  io_req->xid);
   1181		return;
   1182	}
   1183
   1184	qedf_parse_fcp_rsp(io_req, fcp_rsp);
   1185
   1186	qedf_unmap_sg_list(qedf, io_req);
   1187
   1188	/* Check for FCP transport error */
   1189	if (io_req->fcp_rsp_len > 3 && io_req->fcp_rsp_code) {
   1190		QEDF_ERR(&(qedf->dbg_ctx),
   1191		    "FCP I/O protocol failure xid=0x%x fcp_rsp_len=%d "
   1192		    "fcp_rsp_code=%d.\n", io_req->xid, io_req->fcp_rsp_len,
   1193		    io_req->fcp_rsp_code);
   1194		sc_cmd->result = DID_BUS_BUSY << 16;
   1195		goto out;
   1196	}
   1197
   1198	fw_residual_flag = GET_FIELD(cqe->cqe_info.rsp_info.fw_error_flags,
   1199	    FCOE_CQE_RSP_INFO_FW_UNDERRUN);
   1200	if (fw_residual_flag) {
   1201		QEDF_ERR(&qedf->dbg_ctx,
   1202			 "Firmware detected underrun: xid=0x%x fcp_rsp.flags=0x%02x fcp_resid=%d fw_residual=0x%x lba=%02x%02x%02x%02x.\n",
   1203			 io_req->xid, fcp_rsp->rsp_flags.flags,
   1204			 io_req->fcp_resid,
   1205			 cqe->cqe_info.rsp_info.fw_residual, sc_cmd->cmnd[2],
   1206			 sc_cmd->cmnd[3], sc_cmd->cmnd[4], sc_cmd->cmnd[5]);
   1207
   1208		if (io_req->cdb_status == 0)
   1209			sc_cmd->result = (DID_ERROR << 16) | io_req->cdb_status;
   1210		else
   1211			sc_cmd->result = (DID_OK << 16) | io_req->cdb_status;
   1212
   1213		/*
   1214		 * Set resid to the whole buffer length so we won't try to resue
   1215		 * any previously data.
   1216		 */
   1217		scsi_set_resid(sc_cmd, scsi_bufflen(sc_cmd));
   1218		goto out;
   1219	}
   1220
   1221	switch (io_req->fcp_status) {
   1222	case FC_GOOD:
   1223		if (io_req->cdb_status == 0) {
   1224			/* Good I/O completion */
   1225			sc_cmd->result = DID_OK << 16;
   1226		} else {
   1227			refcount = kref_read(&io_req->refcount);
   1228			QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
   1229			    "%d:0:%d:%lld xid=0x%0x op=0x%02x "
   1230			    "lba=%02x%02x%02x%02x cdb_status=%d "
   1231			    "fcp_resid=0x%x refcount=%d.\n",
   1232			    qedf->lport->host->host_no, sc_cmd->device->id,
   1233			    sc_cmd->device->lun, io_req->xid,
   1234			    sc_cmd->cmnd[0], sc_cmd->cmnd[2], sc_cmd->cmnd[3],
   1235			    sc_cmd->cmnd[4], sc_cmd->cmnd[5],
   1236			    io_req->cdb_status, io_req->fcp_resid,
   1237			    refcount);
   1238			sc_cmd->result = (DID_OK << 16) | io_req->cdb_status;
   1239
   1240			if (io_req->cdb_status == SAM_STAT_TASK_SET_FULL ||
   1241			    io_req->cdb_status == SAM_STAT_BUSY) {
   1242				/*
   1243				 * Check whether we need to set retry_delay at
   1244				 * all based on retry_delay module parameter
   1245				 * and the status qualifier.
   1246				 */
   1247
   1248				/* Upper 2 bits */
   1249				scope = fcp_rsp->retry_delay_timer & 0xC000;
   1250				/* Lower 14 bits */
   1251				qualifier = fcp_rsp->retry_delay_timer & 0x3FFF;
   1252
   1253				if (qedf_retry_delay)
   1254					chk_scope = 1;
   1255				/* Record stats */
   1256				if (io_req->cdb_status ==
   1257				    SAM_STAT_TASK_SET_FULL)
   1258					qedf->task_set_fulls++;
   1259				else
   1260					qedf->busy++;
   1261			}
   1262		}
   1263		if (io_req->fcp_resid)
   1264			scsi_set_resid(sc_cmd, io_req->fcp_resid);
   1265
   1266		if (chk_scope == 1) {
   1267			if ((scope == 1 || scope == 2) &&
   1268			    (qualifier > 0 && qualifier <= 0x3FEF)) {
   1269				/* Check we don't go over the max */
   1270				if (qualifier > QEDF_RETRY_DELAY_MAX) {
   1271					qualifier = QEDF_RETRY_DELAY_MAX;
   1272					QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
   1273						  "qualifier = %d\n",
   1274						  (fcp_rsp->retry_delay_timer &
   1275						  0x3FFF));
   1276				}
   1277				QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
   1278					  "Scope = %d and qualifier = %d",
   1279					  scope, qualifier);
   1280				/*  Take fcport->rport_lock to
   1281				 *  update the retry_delay_timestamp
   1282				 */
   1283				spin_lock_irqsave(&fcport->rport_lock, flags);
   1284				fcport->retry_delay_timestamp =
   1285					jiffies + (qualifier * HZ / 10);
   1286				spin_unlock_irqrestore(&fcport->rport_lock,
   1287						       flags);
   1288
   1289			} else {
   1290				QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
   1291					  "combination of scope = %d and qualifier = %d is not handled in qedf.\n",
   1292					  scope, qualifier);
   1293			}
   1294		}
   1295		break;
   1296	default:
   1297		QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, "fcp_status=%d.\n",
   1298			   io_req->fcp_status);
   1299		break;
   1300	}
   1301
   1302out:
   1303	if (qedf_io_tracing)
   1304		qedf_trace_io(fcport, io_req, QEDF_IO_TRACE_RSP);
   1305
   1306	/*
   1307	 * We wait till the end of the function to clear the
   1308	 * outstanding bit in case we need to send an abort
   1309	 */
   1310	clear_bit(QEDF_CMD_OUTSTANDING, &io_req->flags);
   1311
   1312	io_req->sc_cmd = NULL;
   1313	qedf_priv(sc_cmd)->io_req =  NULL;
   1314	scsi_done(sc_cmd);
   1315	kref_put(&io_req->refcount, qedf_release_cmd);
   1316}
   1317
   1318/* Return a SCSI command in some other context besides a normal completion */
   1319void qedf_scsi_done(struct qedf_ctx *qedf, struct qedf_ioreq *io_req,
   1320	int result)
   1321{
   1322	struct scsi_cmnd *sc_cmd;
   1323	int refcount;
   1324
   1325	if (!io_req) {
   1326		QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO, "io_req is NULL\n");
   1327		return;
   1328	}
   1329
   1330	if (test_and_set_bit(QEDF_CMD_ERR_SCSI_DONE, &io_req->flags)) {
   1331		QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
   1332			  "io_req:%p scsi_done handling already done\n",
   1333			  io_req);
   1334		return;
   1335	}
   1336
   1337	/*
   1338	 * We will be done with this command after this call so clear the
   1339	 * outstanding bit.
   1340	 */
   1341	clear_bit(QEDF_CMD_OUTSTANDING, &io_req->flags);
   1342
   1343	sc_cmd = io_req->sc_cmd;
   1344
   1345	if (!sc_cmd) {
   1346		QEDF_WARN(&(qedf->dbg_ctx), "sc_cmd is NULL!\n");
   1347		return;
   1348	}
   1349
   1350	if (!virt_addr_valid(sc_cmd)) {
   1351		QEDF_ERR(&qedf->dbg_ctx, "sc_cmd=%p is not valid.", sc_cmd);
   1352		goto bad_scsi_ptr;
   1353	}
   1354
   1355	if (!qedf_priv(sc_cmd)->io_req) {
   1356		QEDF_WARN(&(qedf->dbg_ctx),
   1357			  "io_req is NULL, returned in another context.\n");
   1358		return;
   1359	}
   1360
   1361	if (!sc_cmd->device) {
   1362		QEDF_ERR(&qedf->dbg_ctx, "Device for sc_cmd %p is NULL.\n",
   1363			 sc_cmd);
   1364		goto bad_scsi_ptr;
   1365	}
   1366
   1367	if (!virt_addr_valid(sc_cmd->device)) {
   1368		QEDF_ERR(&qedf->dbg_ctx,
   1369			 "Device pointer for sc_cmd %p is bad.\n", sc_cmd);
   1370		goto bad_scsi_ptr;
   1371	}
   1372
   1373	if (!sc_cmd->sense_buffer) {
   1374		QEDF_ERR(&qedf->dbg_ctx,
   1375			 "sc_cmd->sense_buffer for sc_cmd %p is NULL.\n",
   1376			 sc_cmd);
   1377		goto bad_scsi_ptr;
   1378	}
   1379
   1380	if (!virt_addr_valid(sc_cmd->sense_buffer)) {
   1381		QEDF_ERR(&qedf->dbg_ctx,
   1382			 "sc_cmd->sense_buffer for sc_cmd %p is bad.\n",
   1383			 sc_cmd);
   1384		goto bad_scsi_ptr;
   1385	}
   1386
   1387	qedf_unmap_sg_list(qedf, io_req);
   1388
   1389	sc_cmd->result = result << 16;
   1390	refcount = kref_read(&io_req->refcount);
   1391	QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, "%d:0:%d:%lld: Completing "
   1392	    "sc_cmd=%p result=0x%08x op=0x%02x lba=0x%02x%02x%02x%02x, "
   1393	    "allowed=%d retries=%d refcount=%d.\n",
   1394	    qedf->lport->host->host_no, sc_cmd->device->id,
   1395	    sc_cmd->device->lun, sc_cmd, sc_cmd->result, sc_cmd->cmnd[0],
   1396	    sc_cmd->cmnd[2], sc_cmd->cmnd[3], sc_cmd->cmnd[4],
   1397	    sc_cmd->cmnd[5], sc_cmd->allowed, sc_cmd->retries,
   1398	    refcount);
   1399
   1400	/*
   1401	 * Set resid to the whole buffer length so we won't try to resue any
   1402	 * previously read data
   1403	 */
   1404	scsi_set_resid(sc_cmd, scsi_bufflen(sc_cmd));
   1405
   1406	if (qedf_io_tracing)
   1407		qedf_trace_io(io_req->fcport, io_req, QEDF_IO_TRACE_RSP);
   1408
   1409	io_req->sc_cmd = NULL;
   1410	qedf_priv(sc_cmd)->io_req = NULL;
   1411	scsi_done(sc_cmd);
   1412	kref_put(&io_req->refcount, qedf_release_cmd);
   1413	return;
   1414
   1415bad_scsi_ptr:
   1416	/*
   1417	 * Clear the io_req->sc_cmd backpointer so we don't try to process
   1418	 * this again
   1419	 */
   1420	io_req->sc_cmd = NULL;
   1421	kref_put(&io_req->refcount, qedf_release_cmd);  /* ID: 001 */
   1422}
   1423
   1424/*
   1425 * Handle warning type CQE completions. This is mainly used for REC timer
   1426 * popping.
   1427 */
   1428void qedf_process_warning_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
   1429	struct qedf_ioreq *io_req)
   1430{
   1431	int rval, i;
   1432	struct qedf_rport *fcport = io_req->fcport;
   1433	u64 err_warn_bit_map;
   1434	u8 err_warn = 0xff;
   1435
   1436	if (!cqe) {
   1437		QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
   1438			  "cqe is NULL for io_req %p xid=0x%x\n",
   1439			  io_req, io_req->xid);
   1440		return;
   1441	}
   1442
   1443	QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx), "Warning CQE, "
   1444		  "xid=0x%x\n", io_req->xid);
   1445	QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx),
   1446		  "err_warn_bitmap=%08x:%08x\n",
   1447		  le32_to_cpu(cqe->cqe_info.err_info.err_warn_bitmap_hi),
   1448		  le32_to_cpu(cqe->cqe_info.err_info.err_warn_bitmap_lo));
   1449	QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx), "tx_buff_off=%08x, "
   1450		  "rx_buff_off=%08x, rx_id=%04x\n",
   1451		  le32_to_cpu(cqe->cqe_info.err_info.tx_buf_off),
   1452		  le32_to_cpu(cqe->cqe_info.err_info.rx_buf_off),
   1453		  le32_to_cpu(cqe->cqe_info.err_info.rx_id));
   1454
   1455	/* Normalize the error bitmap value to an just an unsigned int */
   1456	err_warn_bit_map = (u64)
   1457	    ((u64)cqe->cqe_info.err_info.err_warn_bitmap_hi << 32) |
   1458	    (u64)cqe->cqe_info.err_info.err_warn_bitmap_lo;
   1459	for (i = 0; i < 64; i++) {
   1460		if (err_warn_bit_map & (u64)((u64)1 << i)) {
   1461			err_warn = i;
   1462			break;
   1463		}
   1464	}
   1465
   1466	/* Check if REC TOV expired if this is a tape device */
   1467	if (fcport->dev_type == QEDF_RPORT_TYPE_TAPE) {
   1468		if (err_warn ==
   1469		    FCOE_WARNING_CODE_REC_TOV_TIMER_EXPIRATION) {
   1470			QEDF_ERR(&(qedf->dbg_ctx), "REC timer expired.\n");
   1471			if (!test_bit(QEDF_CMD_SRR_SENT, &io_req->flags)) {
   1472				io_req->rx_buf_off =
   1473				    cqe->cqe_info.err_info.rx_buf_off;
   1474				io_req->tx_buf_off =
   1475				    cqe->cqe_info.err_info.tx_buf_off;
   1476				io_req->rx_id = cqe->cqe_info.err_info.rx_id;
   1477				rval = qedf_send_rec(io_req);
   1478				/*
   1479				 * We only want to abort the io_req if we
   1480				 * can't queue the REC command as we want to
   1481				 * keep the exchange open for recovery.
   1482				 */
   1483				if (rval)
   1484					goto send_abort;
   1485			}
   1486			return;
   1487		}
   1488	}
   1489
   1490send_abort:
   1491	init_completion(&io_req->abts_done);
   1492	rval = qedf_initiate_abts(io_req, true);
   1493	if (rval)
   1494		QEDF_ERR(&(qedf->dbg_ctx), "Failed to queue ABTS.\n");
   1495}
   1496
   1497/* Cleanup a command when we receive an error detection completion */
   1498void qedf_process_error_detect(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
   1499	struct qedf_ioreq *io_req)
   1500{
   1501	int rval;
   1502
   1503	if (io_req == NULL) {
   1504		QEDF_INFO(NULL, QEDF_LOG_IO, "io_req is NULL.\n");
   1505		return;
   1506	}
   1507
   1508	if (io_req->fcport == NULL) {
   1509		QEDF_INFO(NULL, QEDF_LOG_IO, "fcport is NULL.\n");
   1510		return;
   1511	}
   1512
   1513	if (!cqe) {
   1514		QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
   1515			"cqe is NULL for io_req %p\n", io_req);
   1516		return;
   1517	}
   1518
   1519	QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx), "Error detection CQE, "
   1520		  "xid=0x%x\n", io_req->xid);
   1521	QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx),
   1522		  "err_warn_bitmap=%08x:%08x\n",
   1523		  le32_to_cpu(cqe->cqe_info.err_info.err_warn_bitmap_hi),
   1524		  le32_to_cpu(cqe->cqe_info.err_info.err_warn_bitmap_lo));
   1525	QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx), "tx_buff_off=%08x, "
   1526		  "rx_buff_off=%08x, rx_id=%04x\n",
   1527		  le32_to_cpu(cqe->cqe_info.err_info.tx_buf_off),
   1528		  le32_to_cpu(cqe->cqe_info.err_info.rx_buf_off),
   1529		  le32_to_cpu(cqe->cqe_info.err_info.rx_id));
   1530
   1531	/* When flush is active, let the cmds be flushed out from the cleanup context */
   1532	if (test_bit(QEDF_RPORT_IN_TARGET_RESET, &io_req->fcport->flags) ||
   1533		(test_bit(QEDF_RPORT_IN_LUN_RESET, &io_req->fcport->flags) &&
   1534		 io_req->sc_cmd->device->lun == (u64)io_req->fcport->lun_reset_lun)) {
   1535		QEDF_ERR(&qedf->dbg_ctx,
   1536			"Dropping EQE for xid=0x%x as fcport is flushing",
   1537			io_req->xid);
   1538		return;
   1539	}
   1540
   1541	if (qedf->stop_io_on_error) {
   1542		qedf_stop_all_io(qedf);
   1543		return;
   1544	}
   1545
   1546	init_completion(&io_req->abts_done);
   1547	rval = qedf_initiate_abts(io_req, true);
   1548	if (rval)
   1549		QEDF_ERR(&(qedf->dbg_ctx), "Failed to queue ABTS.\n");
   1550}
   1551
   1552static void qedf_flush_els_req(struct qedf_ctx *qedf,
   1553	struct qedf_ioreq *els_req)
   1554{
   1555	QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
   1556	    "Flushing ELS request xid=0x%x refcount=%d.\n", els_req->xid,
   1557	    kref_read(&els_req->refcount));
   1558
   1559	/*
   1560	 * Need to distinguish this from a timeout when calling the
   1561	 * els_req->cb_func.
   1562	 */
   1563	els_req->event = QEDF_IOREQ_EV_ELS_FLUSH;
   1564
   1565	clear_bit(QEDF_CMD_OUTSTANDING, &els_req->flags);
   1566
   1567	/* Cancel the timer */
   1568	cancel_delayed_work_sync(&els_req->timeout_work);
   1569
   1570	/* Call callback function to complete command */
   1571	if (els_req->cb_func && els_req->cb_arg) {
   1572		els_req->cb_func(els_req->cb_arg);
   1573		els_req->cb_arg = NULL;
   1574	}
   1575
   1576	/* Release kref for original initiate_els */
   1577	kref_put(&els_req->refcount, qedf_release_cmd);
   1578}
   1579
   1580/* A value of -1 for lun is a wild card that means flush all
   1581 * active SCSI I/Os for the target.
   1582 */
   1583void qedf_flush_active_ios(struct qedf_rport *fcport, int lun)
   1584{
   1585	struct qedf_ioreq *io_req;
   1586	struct qedf_ctx *qedf;
   1587	struct qedf_cmd_mgr *cmd_mgr;
   1588	int i, rc;
   1589	unsigned long flags;
   1590	int flush_cnt = 0;
   1591	int wait_cnt = 100;
   1592	int refcount = 0;
   1593
   1594	if (!fcport) {
   1595		QEDF_ERR(NULL, "fcport is NULL\n");
   1596		return;
   1597	}
   1598
   1599	/* Check that fcport is still offloaded */
   1600	if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
   1601		QEDF_ERR(NULL, "fcport is no longer offloaded.\n");
   1602		return;
   1603	}
   1604
   1605	qedf = fcport->qedf;
   1606
   1607	if (!qedf) {
   1608		QEDF_ERR(NULL, "qedf is NULL.\n");
   1609		return;
   1610	}
   1611
   1612	/* Only wait for all commands to be queued in the Upload context */
   1613	if (test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags) &&
   1614	    (lun == -1)) {
   1615		while (atomic_read(&fcport->ios_to_queue)) {
   1616			QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
   1617				  "Waiting for %d I/Os to be queued\n",
   1618				  atomic_read(&fcport->ios_to_queue));
   1619			if (wait_cnt == 0) {
   1620				QEDF_ERR(NULL,
   1621					 "%d IOs request could not be queued\n",
   1622					 atomic_read(&fcport->ios_to_queue));
   1623			}
   1624			msleep(20);
   1625			wait_cnt--;
   1626		}
   1627	}
   1628
   1629	cmd_mgr = qedf->cmd_mgr;
   1630
   1631	QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
   1632		  "Flush active i/o's num=0x%x fcport=0x%p port_id=0x%06x scsi_id=%d.\n",
   1633		  atomic_read(&fcport->num_active_ios), fcport,
   1634		  fcport->rdata->ids.port_id, fcport->rport->scsi_target_id);
   1635	QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO, "Locking flush mutex.\n");
   1636
   1637	mutex_lock(&qedf->flush_mutex);
   1638	if (lun == -1) {
   1639		set_bit(QEDF_RPORT_IN_TARGET_RESET, &fcport->flags);
   1640	} else {
   1641		set_bit(QEDF_RPORT_IN_LUN_RESET, &fcport->flags);
   1642		fcport->lun_reset_lun = lun;
   1643	}
   1644
   1645	for (i = 0; i < FCOE_PARAMS_NUM_TASKS; i++) {
   1646		io_req = &cmd_mgr->cmds[i];
   1647
   1648		if (!io_req)
   1649			continue;
   1650		if (!io_req->fcport)
   1651			continue;
   1652
   1653		spin_lock_irqsave(&cmd_mgr->lock, flags);
   1654
   1655		if (io_req->alloc) {
   1656			if (!test_bit(QEDF_CMD_OUTSTANDING, &io_req->flags)) {
   1657				if (io_req->cmd_type == QEDF_SCSI_CMD)
   1658					QEDF_ERR(&qedf->dbg_ctx,
   1659						 "Allocated but not queued, xid=0x%x\n",
   1660						 io_req->xid);
   1661			}
   1662			spin_unlock_irqrestore(&cmd_mgr->lock, flags);
   1663		} else {
   1664			spin_unlock_irqrestore(&cmd_mgr->lock, flags);
   1665			continue;
   1666		}
   1667
   1668		if (io_req->fcport != fcport)
   1669			continue;
   1670
   1671		/* In case of ABTS, CMD_OUTSTANDING is cleared on ABTS response,
   1672		 * but RRQ is still pending.
   1673		 * Workaround: Within qedf_send_rrq, we check if the fcport is
   1674		 * NULL, and we drop the ref on the io_req to clean it up.
   1675		 */
   1676		if (!test_bit(QEDF_CMD_OUTSTANDING, &io_req->flags)) {
   1677			refcount = kref_read(&io_req->refcount);
   1678			QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
   1679				  "Not outstanding, xid=0x%x, cmd_type=%d refcount=%d.\n",
   1680				  io_req->xid, io_req->cmd_type, refcount);
   1681			/* If RRQ work has been queue, try to cancel it and
   1682			 * free the io_req
   1683			 */
   1684			if (atomic_read(&io_req->state) ==
   1685			    QEDFC_CMD_ST_RRQ_WAIT) {
   1686				if (cancel_delayed_work_sync
   1687				    (&io_req->rrq_work)) {
   1688					QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
   1689						  "Putting reference for pending RRQ work xid=0x%x.\n",
   1690						  io_req->xid);
   1691					/* ID: 003 */
   1692					kref_put(&io_req->refcount,
   1693						 qedf_release_cmd);
   1694				}
   1695			}
   1696			continue;
   1697		}
   1698
   1699		/* Only consider flushing ELS during target reset */
   1700		if (io_req->cmd_type == QEDF_ELS &&
   1701		    lun == -1) {
   1702			rc = kref_get_unless_zero(&io_req->refcount);
   1703			if (!rc) {
   1704				QEDF_ERR(&(qedf->dbg_ctx),
   1705				    "Could not get kref for ELS io_req=0x%p xid=0x%x.\n",
   1706				    io_req, io_req->xid);
   1707				continue;
   1708			}
   1709			qedf_initiate_cleanup(io_req, false);
   1710			flush_cnt++;
   1711			qedf_flush_els_req(qedf, io_req);
   1712
   1713			/*
   1714			 * Release the kref and go back to the top of the
   1715			 * loop.
   1716			 */
   1717			goto free_cmd;
   1718		}
   1719
   1720		if (io_req->cmd_type == QEDF_ABTS) {
   1721			/* ID: 004 */
   1722			rc = kref_get_unless_zero(&io_req->refcount);
   1723			if (!rc) {
   1724				QEDF_ERR(&(qedf->dbg_ctx),
   1725				    "Could not get kref for abort io_req=0x%p xid=0x%x.\n",
   1726				    io_req, io_req->xid);
   1727				continue;
   1728			}
   1729			if (lun != -1 && io_req->lun != lun)
   1730				goto free_cmd;
   1731
   1732			QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
   1733			    "Flushing abort xid=0x%x.\n", io_req->xid);
   1734
   1735			if (cancel_delayed_work_sync(&io_req->rrq_work)) {
   1736				QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
   1737					  "Putting ref for cancelled RRQ work xid=0x%x.\n",
   1738					  io_req->xid);
   1739				kref_put(&io_req->refcount, qedf_release_cmd);
   1740			}
   1741
   1742			if (cancel_delayed_work_sync(&io_req->timeout_work)) {
   1743				QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
   1744					  "Putting ref for cancelled tmo work xid=0x%x.\n",
   1745					  io_req->xid);
   1746				qedf_initiate_cleanup(io_req, true);
   1747				/* Notify eh_abort handler that ABTS is
   1748				 * complete
   1749				 */
   1750				complete(&io_req->abts_done);
   1751				clear_bit(QEDF_CMD_IN_ABORT, &io_req->flags);
   1752				/* ID: 002 */
   1753				kref_put(&io_req->refcount, qedf_release_cmd);
   1754			}
   1755			flush_cnt++;
   1756			goto free_cmd;
   1757		}
   1758
   1759		if (!io_req->sc_cmd)
   1760			continue;
   1761		if (!io_req->sc_cmd->device) {
   1762			QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
   1763				  "Device backpointer NULL for sc_cmd=%p.\n",
   1764				  io_req->sc_cmd);
   1765			/* Put reference for non-existent scsi_cmnd */
   1766			io_req->sc_cmd = NULL;
   1767			qedf_initiate_cleanup(io_req, false);
   1768			kref_put(&io_req->refcount, qedf_release_cmd);
   1769			continue;
   1770		}
   1771		if (lun > -1) {
   1772			if (io_req->lun != lun)
   1773				continue;
   1774		}
   1775
   1776		/*
   1777		 * Use kref_get_unless_zero in the unlikely case the command
   1778		 * we're about to flush was completed in the normal SCSI path
   1779		 */
   1780		rc = kref_get_unless_zero(&io_req->refcount);
   1781		if (!rc) {
   1782			QEDF_ERR(&(qedf->dbg_ctx), "Could not get kref for "
   1783			    "io_req=0x%p xid=0x%x\n", io_req, io_req->xid);
   1784			continue;
   1785		}
   1786
   1787		QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
   1788		    "Cleanup xid=0x%x.\n", io_req->xid);
   1789		flush_cnt++;
   1790
   1791		/* Cleanup task and return I/O mid-layer */
   1792		qedf_initiate_cleanup(io_req, true);
   1793
   1794free_cmd:
   1795		kref_put(&io_req->refcount, qedf_release_cmd);	/* ID: 004 */
   1796	}
   1797
   1798	wait_cnt = 60;
   1799	QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
   1800		  "Flushed 0x%x I/Os, active=0x%x.\n",
   1801		  flush_cnt, atomic_read(&fcport->num_active_ios));
   1802	/* Only wait for all commands to complete in the Upload context */
   1803	if (test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags) &&
   1804	    (lun == -1)) {
   1805		while (atomic_read(&fcport->num_active_ios)) {
   1806			QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
   1807				  "Flushed 0x%x I/Os, active=0x%x cnt=%d.\n",
   1808				  flush_cnt,
   1809				  atomic_read(&fcport->num_active_ios),
   1810				  wait_cnt);
   1811			if (wait_cnt == 0) {
   1812				QEDF_ERR(&qedf->dbg_ctx,
   1813					 "Flushed %d I/Os, active=%d.\n",
   1814					 flush_cnt,
   1815					 atomic_read(&fcport->num_active_ios));
   1816				for (i = 0; i < FCOE_PARAMS_NUM_TASKS; i++) {
   1817					io_req = &cmd_mgr->cmds[i];
   1818					if (io_req->fcport &&
   1819					    io_req->fcport == fcport) {
   1820						refcount =
   1821						kref_read(&io_req->refcount);
   1822						set_bit(QEDF_CMD_DIRTY,
   1823							&io_req->flags);
   1824						QEDF_ERR(&qedf->dbg_ctx,
   1825							 "Outstanding io_req =%p xid=0x%x flags=0x%lx, sc_cmd=%p refcount=%d cmd_type=%d.\n",
   1826							 io_req, io_req->xid,
   1827							 io_req->flags,
   1828							 io_req->sc_cmd,
   1829							 refcount,
   1830							 io_req->cmd_type);
   1831					}
   1832				}
   1833				WARN_ON(1);
   1834				break;
   1835			}
   1836			msleep(500);
   1837			wait_cnt--;
   1838		}
   1839	}
   1840
   1841	clear_bit(QEDF_RPORT_IN_LUN_RESET, &fcport->flags);
   1842	clear_bit(QEDF_RPORT_IN_TARGET_RESET, &fcport->flags);
   1843	QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO, "Unlocking flush mutex.\n");
   1844	mutex_unlock(&qedf->flush_mutex);
   1845}
   1846
   1847/*
   1848 * Initiate a ABTS middle path command. Note that we don't have to initialize
   1849 * the task context for an ABTS task.
   1850 */
   1851int qedf_initiate_abts(struct qedf_ioreq *io_req, bool return_scsi_cmd_on_abts)
   1852{
   1853	struct fc_lport *lport;
   1854	struct qedf_rport *fcport = io_req->fcport;
   1855	struct fc_rport_priv *rdata;
   1856	struct qedf_ctx *qedf;
   1857	u16 xid;
   1858	int rc = 0;
   1859	unsigned long flags;
   1860	struct fcoe_wqe *sqe;
   1861	u16 sqe_idx;
   1862	int refcount = 0;
   1863
   1864	/* Sanity check qedf_rport before dereferencing any pointers */
   1865	if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
   1866		QEDF_ERR(NULL, "tgt not offloaded\n");
   1867		rc = 1;
   1868		goto out;
   1869	}
   1870
   1871	qedf = fcport->qedf;
   1872	rdata = fcport->rdata;
   1873
   1874	if (!rdata || !kref_get_unless_zero(&rdata->kref)) {
   1875		QEDF_ERR(&qedf->dbg_ctx, "stale rport\n");
   1876		rc = 1;
   1877		goto out;
   1878	}
   1879
   1880	lport = qedf->lport;
   1881
   1882	if (lport->state != LPORT_ST_READY || !(lport->link_up)) {
   1883		QEDF_ERR(&(qedf->dbg_ctx), "link is not ready\n");
   1884		rc = 1;
   1885		goto drop_rdata_kref;
   1886	}
   1887
   1888	if (atomic_read(&qedf->link_down_tmo_valid) > 0) {
   1889		QEDF_ERR(&(qedf->dbg_ctx), "link_down_tmo active.\n");
   1890		rc = 1;
   1891		goto drop_rdata_kref;
   1892	}
   1893
   1894	/* Ensure room on SQ */
   1895	if (!atomic_read(&fcport->free_sqes)) {
   1896		QEDF_ERR(&(qedf->dbg_ctx), "No SQ entries available\n");
   1897		rc = 1;
   1898		goto drop_rdata_kref;
   1899	}
   1900
   1901	if (test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags)) {
   1902		QEDF_ERR(&qedf->dbg_ctx, "fcport is uploading.\n");
   1903		rc = 1;
   1904		goto drop_rdata_kref;
   1905	}
   1906
   1907	if (!test_bit(QEDF_CMD_OUTSTANDING, &io_req->flags) ||
   1908	    test_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags) ||
   1909	    test_bit(QEDF_CMD_IN_ABORT, &io_req->flags)) {
   1910		QEDF_ERR(&qedf->dbg_ctx,
   1911			 "io_req xid=0x%x sc_cmd=%p already in cleanup or abort processing or already completed.\n",
   1912			 io_req->xid, io_req->sc_cmd);
   1913		rc = 1;
   1914		goto drop_rdata_kref;
   1915	}
   1916
   1917	kref_get(&io_req->refcount);
   1918
   1919	xid = io_req->xid;
   1920	qedf->control_requests++;
   1921	qedf->packet_aborts++;
   1922
   1923	/* Set the command type to abort */
   1924	io_req->cmd_type = QEDF_ABTS;
   1925	io_req->return_scsi_cmd_on_abts = return_scsi_cmd_on_abts;
   1926
   1927	set_bit(QEDF_CMD_IN_ABORT, &io_req->flags);
   1928	refcount = kref_read(&io_req->refcount);
   1929	QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_SCSI_TM,
   1930		  "ABTS io_req xid = 0x%x refcount=%d\n",
   1931		  xid, refcount);
   1932
   1933	qedf_cmd_timer_set(qedf, io_req, QEDF_ABORT_TIMEOUT);
   1934
   1935	spin_lock_irqsave(&fcport->rport_lock, flags);
   1936
   1937	sqe_idx = qedf_get_sqe_idx(fcport);
   1938	sqe = &fcport->sq[sqe_idx];
   1939	memset(sqe, 0, sizeof(struct fcoe_wqe));
   1940	io_req->task_params->sqe = sqe;
   1941
   1942	init_initiator_abort_fcoe_task(io_req->task_params);
   1943	qedf_ring_doorbell(fcport);
   1944
   1945	spin_unlock_irqrestore(&fcport->rport_lock, flags);
   1946
   1947drop_rdata_kref:
   1948	kref_put(&rdata->kref, fc_rport_destroy);
   1949out:
   1950	return rc;
   1951}
   1952
   1953void qedf_process_abts_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
   1954	struct qedf_ioreq *io_req)
   1955{
   1956	uint32_t r_ctl;
   1957	int rc;
   1958	struct qedf_rport *fcport = io_req->fcport;
   1959
   1960	QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_SCSI_TM, "Entered with xid = "
   1961		   "0x%x cmd_type = %d\n", io_req->xid, io_req->cmd_type);
   1962
   1963	r_ctl = cqe->cqe_info.abts_info.r_ctl;
   1964
   1965	/* This was added at a point when we were scheduling abts_compl &
   1966	 * cleanup_compl on different CPUs and there was a possibility of
   1967	 * the io_req to be freed from the other context before we got here.
   1968	 */
   1969	if (!fcport) {
   1970		QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
   1971			  "Dropping ABTS completion xid=0x%x as fcport is NULL",
   1972			  io_req->xid);
   1973		return;
   1974	}
   1975
   1976	/*
   1977	 * When flush is active, let the cmds be completed from the cleanup
   1978	 * context
   1979	 */
   1980	if (test_bit(QEDF_RPORT_IN_TARGET_RESET, &fcport->flags) ||
   1981	    test_bit(QEDF_RPORT_IN_LUN_RESET, &fcport->flags)) {
   1982		QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
   1983			  "Dropping ABTS completion xid=0x%x as fcport is flushing",
   1984			  io_req->xid);
   1985		return;
   1986	}
   1987
   1988	if (!cancel_delayed_work(&io_req->timeout_work)) {
   1989		QEDF_ERR(&qedf->dbg_ctx,
   1990			 "Wasn't able to cancel abts timeout work.\n");
   1991	}
   1992
   1993	switch (r_ctl) {
   1994	case FC_RCTL_BA_ACC:
   1995		QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_SCSI_TM,
   1996		    "ABTS response - ACC Send RRQ after R_A_TOV\n");
   1997		io_req->event = QEDF_IOREQ_EV_ABORT_SUCCESS;
   1998		rc = kref_get_unless_zero(&io_req->refcount);	/* ID: 003 */
   1999		if (!rc) {
   2000			QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_SCSI_TM,
   2001				  "kref is already zero so ABTS was already completed or flushed xid=0x%x.\n",
   2002				  io_req->xid);
   2003			return;
   2004		}
   2005		/*
   2006		 * Dont release this cmd yet. It will be relesed
   2007		 * after we get RRQ response
   2008		 */
   2009		queue_delayed_work(qedf->dpc_wq, &io_req->rrq_work,
   2010		    msecs_to_jiffies(qedf->lport->r_a_tov));
   2011		atomic_set(&io_req->state, QEDFC_CMD_ST_RRQ_WAIT);
   2012		break;
   2013	/* For error cases let the cleanup return the command */
   2014	case FC_RCTL_BA_RJT:
   2015		QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_SCSI_TM,
   2016		   "ABTS response - RJT\n");
   2017		io_req->event = QEDF_IOREQ_EV_ABORT_FAILED;
   2018		break;
   2019	default:
   2020		QEDF_ERR(&(qedf->dbg_ctx), "Unknown ABTS response\n");
   2021		break;
   2022	}
   2023
   2024	clear_bit(QEDF_CMD_IN_ABORT, &io_req->flags);
   2025
   2026	if (io_req->sc_cmd) {
   2027		if (!io_req->return_scsi_cmd_on_abts)
   2028			QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_SCSI_TM,
   2029				  "Not call scsi_done for xid=0x%x.\n",
   2030				  io_req->xid);
   2031		if (io_req->return_scsi_cmd_on_abts)
   2032			qedf_scsi_done(qedf, io_req, DID_ERROR);
   2033	}
   2034
   2035	/* Notify eh_abort handler that ABTS is complete */
   2036	complete(&io_req->abts_done);
   2037
   2038	kref_put(&io_req->refcount, qedf_release_cmd);
   2039}
   2040
   2041int qedf_init_mp_req(struct qedf_ioreq *io_req)
   2042{
   2043	struct qedf_mp_req *mp_req;
   2044	struct scsi_sge *mp_req_bd;
   2045	struct scsi_sge *mp_resp_bd;
   2046	struct qedf_ctx *qedf = io_req->fcport->qedf;
   2047	dma_addr_t addr;
   2048	uint64_t sz;
   2049
   2050	QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_MP_REQ, "Entered.\n");
   2051
   2052	mp_req = (struct qedf_mp_req *)&(io_req->mp_req);
   2053	memset(mp_req, 0, sizeof(struct qedf_mp_req));
   2054
   2055	if (io_req->cmd_type != QEDF_ELS) {
   2056		mp_req->req_len = sizeof(struct fcp_cmnd);
   2057		io_req->data_xfer_len = mp_req->req_len;
   2058	} else
   2059		mp_req->req_len = io_req->data_xfer_len;
   2060
   2061	mp_req->req_buf = dma_alloc_coherent(&qedf->pdev->dev, QEDF_PAGE_SIZE,
   2062	    &mp_req->req_buf_dma, GFP_KERNEL);
   2063	if (!mp_req->req_buf) {
   2064		QEDF_ERR(&(qedf->dbg_ctx), "Unable to alloc MP req buffer\n");
   2065		qedf_free_mp_resc(io_req);
   2066		return -ENOMEM;
   2067	}
   2068
   2069	mp_req->resp_buf = dma_alloc_coherent(&qedf->pdev->dev,
   2070	    QEDF_PAGE_SIZE, &mp_req->resp_buf_dma, GFP_KERNEL);
   2071	if (!mp_req->resp_buf) {
   2072		QEDF_ERR(&(qedf->dbg_ctx), "Unable to alloc TM resp "
   2073			  "buffer\n");
   2074		qedf_free_mp_resc(io_req);
   2075		return -ENOMEM;
   2076	}
   2077
   2078	/* Allocate and map mp_req_bd and mp_resp_bd */
   2079	sz = sizeof(struct scsi_sge);
   2080	mp_req->mp_req_bd = dma_alloc_coherent(&qedf->pdev->dev, sz,
   2081	    &mp_req->mp_req_bd_dma, GFP_KERNEL);
   2082	if (!mp_req->mp_req_bd) {
   2083		QEDF_ERR(&(qedf->dbg_ctx), "Unable to alloc MP req bd\n");
   2084		qedf_free_mp_resc(io_req);
   2085		return -ENOMEM;
   2086	}
   2087
   2088	mp_req->mp_resp_bd = dma_alloc_coherent(&qedf->pdev->dev, sz,
   2089	    &mp_req->mp_resp_bd_dma, GFP_KERNEL);
   2090	if (!mp_req->mp_resp_bd) {
   2091		QEDF_ERR(&(qedf->dbg_ctx), "Unable to alloc MP resp bd\n");
   2092		qedf_free_mp_resc(io_req);
   2093		return -ENOMEM;
   2094	}
   2095
   2096	/* Fill bd table */
   2097	addr = mp_req->req_buf_dma;
   2098	mp_req_bd = mp_req->mp_req_bd;
   2099	mp_req_bd->sge_addr.lo = U64_LO(addr);
   2100	mp_req_bd->sge_addr.hi = U64_HI(addr);
   2101	mp_req_bd->sge_len = QEDF_PAGE_SIZE;
   2102
   2103	/*
   2104	 * MP buffer is either a task mgmt command or an ELS.
   2105	 * So the assumption is that it consumes a single bd
   2106	 * entry in the bd table
   2107	 */
   2108	mp_resp_bd = mp_req->mp_resp_bd;
   2109	addr = mp_req->resp_buf_dma;
   2110	mp_resp_bd->sge_addr.lo = U64_LO(addr);
   2111	mp_resp_bd->sge_addr.hi = U64_HI(addr);
   2112	mp_resp_bd->sge_len = QEDF_PAGE_SIZE;
   2113
   2114	return 0;
   2115}
   2116
   2117/*
   2118 * Last ditch effort to clear the port if it's stuck. Used only after a
   2119 * cleanup task times out.
   2120 */
   2121static void qedf_drain_request(struct qedf_ctx *qedf)
   2122{
   2123	if (test_bit(QEDF_DRAIN_ACTIVE, &qedf->flags)) {
   2124		QEDF_ERR(&(qedf->dbg_ctx), "MCP drain already active.\n");
   2125		return;
   2126	}
   2127
   2128	/* Set bit to return all queuecommand requests as busy */
   2129	set_bit(QEDF_DRAIN_ACTIVE, &qedf->flags);
   2130
   2131	/* Call qed drain request for function. Should be synchronous */
   2132	qed_ops->common->drain(qedf->cdev);
   2133
   2134	/* Settle time for CQEs to be returned */
   2135	msleep(100);
   2136
   2137	/* Unplug and continue */
   2138	clear_bit(QEDF_DRAIN_ACTIVE, &qedf->flags);
   2139}
   2140
   2141/*
   2142 * Returns SUCCESS if the cleanup task does not timeout, otherwise return
   2143 * FAILURE.
   2144 */
   2145int qedf_initiate_cleanup(struct qedf_ioreq *io_req,
   2146	bool return_scsi_cmd_on_abts)
   2147{
   2148	struct qedf_rport *fcport;
   2149	struct qedf_ctx *qedf;
   2150	int tmo = 0;
   2151	int rc = SUCCESS;
   2152	unsigned long flags;
   2153	struct fcoe_wqe *sqe;
   2154	u16 sqe_idx;
   2155	int refcount = 0;
   2156
   2157	fcport = io_req->fcport;
   2158	if (!fcport) {
   2159		QEDF_ERR(NULL, "fcport is NULL.\n");
   2160		return SUCCESS;
   2161	}
   2162
   2163	/* Sanity check qedf_rport before dereferencing any pointers */
   2164	if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
   2165		QEDF_ERR(NULL, "tgt not offloaded\n");
   2166		return SUCCESS;
   2167	}
   2168
   2169	qedf = fcport->qedf;
   2170	if (!qedf) {
   2171		QEDF_ERR(NULL, "qedf is NULL.\n");
   2172		return SUCCESS;
   2173	}
   2174
   2175	if (io_req->cmd_type == QEDF_ELS) {
   2176		goto process_els;
   2177	}
   2178
   2179	if (!test_bit(QEDF_CMD_OUTSTANDING, &io_req->flags) ||
   2180	    test_and_set_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags)) {
   2181		QEDF_ERR(&(qedf->dbg_ctx), "io_req xid=0x%x already in "
   2182			  "cleanup processing or already completed.\n",
   2183			  io_req->xid);
   2184		return SUCCESS;
   2185	}
   2186	set_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags);
   2187
   2188process_els:
   2189	/* Ensure room on SQ */
   2190	if (!atomic_read(&fcport->free_sqes)) {
   2191		QEDF_ERR(&(qedf->dbg_ctx), "No SQ entries available\n");
   2192		/* Need to make sure we clear the flag since it was set */
   2193		clear_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags);
   2194		return FAILED;
   2195	}
   2196
   2197	if (io_req->cmd_type == QEDF_CLEANUP) {
   2198		QEDF_ERR(&qedf->dbg_ctx,
   2199			 "io_req=0x%x is already a cleanup command cmd_type=%d.\n",
   2200			 io_req->xid, io_req->cmd_type);
   2201		clear_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags);
   2202		return SUCCESS;
   2203	}
   2204
   2205	refcount = kref_read(&io_req->refcount);
   2206
   2207	QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
   2208		  "Entered xid=0x%x sc_cmd=%p cmd_type=%d flags=0x%lx refcount=%d fcport=%p port_id=0x%06x\n",
   2209		  io_req->xid, io_req->sc_cmd, io_req->cmd_type, io_req->flags,
   2210		  refcount, fcport, fcport->rdata->ids.port_id);
   2211
   2212	/* Cleanup cmds re-use the same TID as the original I/O */
   2213	io_req->cmd_type = QEDF_CLEANUP;
   2214	io_req->return_scsi_cmd_on_abts = return_scsi_cmd_on_abts;
   2215
   2216	init_completion(&io_req->cleanup_done);
   2217
   2218	spin_lock_irqsave(&fcport->rport_lock, flags);
   2219
   2220	sqe_idx = qedf_get_sqe_idx(fcport);
   2221	sqe = &fcport->sq[sqe_idx];
   2222	memset(sqe, 0, sizeof(struct fcoe_wqe));
   2223	io_req->task_params->sqe = sqe;
   2224
   2225	init_initiator_cleanup_fcoe_task(io_req->task_params);
   2226	qedf_ring_doorbell(fcport);
   2227
   2228	spin_unlock_irqrestore(&fcport->rport_lock, flags);
   2229
   2230	tmo = wait_for_completion_timeout(&io_req->cleanup_done,
   2231					  QEDF_CLEANUP_TIMEOUT * HZ);
   2232
   2233	if (!tmo) {
   2234		rc = FAILED;
   2235		/* Timeout case */
   2236		QEDF_ERR(&(qedf->dbg_ctx), "Cleanup command timeout, "
   2237			  "xid=%x.\n", io_req->xid);
   2238		clear_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags);
   2239		/* Issue a drain request if cleanup task times out */
   2240		QEDF_ERR(&(qedf->dbg_ctx), "Issuing MCP drain request.\n");
   2241		qedf_drain_request(qedf);
   2242	}
   2243
   2244	/* If it TASK MGMT handle it, reference will be decreased
   2245	 * in qedf_execute_tmf
   2246	 */
   2247	if (io_req->tm_flags  == FCP_TMF_LUN_RESET ||
   2248	    io_req->tm_flags == FCP_TMF_TGT_RESET) {
   2249		clear_bit(QEDF_CMD_OUTSTANDING, &io_req->flags);
   2250		io_req->sc_cmd = NULL;
   2251		kref_put(&io_req->refcount, qedf_release_cmd);
   2252		complete(&io_req->tm_done);
   2253	}
   2254
   2255	if (io_req->sc_cmd) {
   2256		if (!io_req->return_scsi_cmd_on_abts)
   2257			QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_SCSI_TM,
   2258				  "Not call scsi_done for xid=0x%x.\n",
   2259				  io_req->xid);
   2260		if (io_req->return_scsi_cmd_on_abts)
   2261			qedf_scsi_done(qedf, io_req, DID_ERROR);
   2262	}
   2263
   2264	if (rc == SUCCESS)
   2265		io_req->event = QEDF_IOREQ_EV_CLEANUP_SUCCESS;
   2266	else
   2267		io_req->event = QEDF_IOREQ_EV_CLEANUP_FAILED;
   2268
   2269	return rc;
   2270}
   2271
   2272void qedf_process_cleanup_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
   2273	struct qedf_ioreq *io_req)
   2274{
   2275	QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, "Entered xid = 0x%x\n",
   2276		   io_req->xid);
   2277
   2278	clear_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags);
   2279
   2280	/* Complete so we can finish cleaning up the I/O */
   2281	complete(&io_req->cleanup_done);
   2282}
   2283
   2284static int qedf_execute_tmf(struct qedf_rport *fcport, struct scsi_cmnd *sc_cmd,
   2285	uint8_t tm_flags)
   2286{
   2287	struct qedf_ioreq *io_req;
   2288	struct fcoe_task_context *task;
   2289	struct qedf_ctx *qedf = fcport->qedf;
   2290	struct fc_lport *lport = qedf->lport;
   2291	int rc = 0;
   2292	uint16_t xid;
   2293	int tmo = 0;
   2294	int lun = 0;
   2295	unsigned long flags;
   2296	struct fcoe_wqe *sqe;
   2297	u16 sqe_idx;
   2298
   2299	if (!sc_cmd) {
   2300		QEDF_ERR(&qedf->dbg_ctx, "sc_cmd is NULL\n");
   2301		return FAILED;
   2302	}
   2303
   2304	lun = (int)sc_cmd->device->lun;
   2305	if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
   2306		QEDF_ERR(&(qedf->dbg_ctx), "fcport not offloaded\n");
   2307		rc = FAILED;
   2308		goto no_flush;
   2309	}
   2310
   2311	io_req = qedf_alloc_cmd(fcport, QEDF_TASK_MGMT_CMD);
   2312	if (!io_req) {
   2313		QEDF_ERR(&(qedf->dbg_ctx), "Failed TMF");
   2314		rc = -EAGAIN;
   2315		goto no_flush;
   2316	}
   2317
   2318	if (tm_flags == FCP_TMF_LUN_RESET)
   2319		qedf->lun_resets++;
   2320	else if (tm_flags == FCP_TMF_TGT_RESET)
   2321		qedf->target_resets++;
   2322
   2323	/* Initialize rest of io_req fields */
   2324	io_req->sc_cmd = sc_cmd;
   2325	io_req->fcport = fcport;
   2326	io_req->cmd_type = QEDF_TASK_MGMT_CMD;
   2327
   2328	/* Record which cpu this request is associated with */
   2329	io_req->cpu = smp_processor_id();
   2330
   2331	/* Set TM flags */
   2332	io_req->io_req_flags = QEDF_READ;
   2333	io_req->data_xfer_len = 0;
   2334	io_req->tm_flags = tm_flags;
   2335
   2336	/* Default is to return a SCSI command when an error occurs */
   2337	io_req->return_scsi_cmd_on_abts = false;
   2338
   2339	/* Obtain exchange id */
   2340	xid = io_req->xid;
   2341
   2342	QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_SCSI_TM, "TMF io_req xid = "
   2343		   "0x%x\n", xid);
   2344
   2345	/* Initialize task context for this IO request */
   2346	task = qedf_get_task_mem(&qedf->tasks, xid);
   2347
   2348	init_completion(&io_req->tm_done);
   2349
   2350	spin_lock_irqsave(&fcport->rport_lock, flags);
   2351
   2352	sqe_idx = qedf_get_sqe_idx(fcport);
   2353	sqe = &fcport->sq[sqe_idx];
   2354	memset(sqe, 0, sizeof(struct fcoe_wqe));
   2355
   2356	qedf_init_task(fcport, lport, io_req, task, sqe);
   2357	qedf_ring_doorbell(fcport);
   2358
   2359	spin_unlock_irqrestore(&fcport->rport_lock, flags);
   2360
   2361	set_bit(QEDF_CMD_OUTSTANDING, &io_req->flags);
   2362	tmo = wait_for_completion_timeout(&io_req->tm_done,
   2363	    QEDF_TM_TIMEOUT * HZ);
   2364
   2365	if (!tmo) {
   2366		rc = FAILED;
   2367		QEDF_ERR(&(qedf->dbg_ctx), "wait for tm_cmpl timeout!\n");
   2368		/* Clear outstanding bit since command timed out */
   2369		clear_bit(QEDF_CMD_OUTSTANDING, &io_req->flags);
   2370		io_req->sc_cmd = NULL;
   2371	} else {
   2372		/* Check TMF response code */
   2373		if (io_req->fcp_rsp_code == 0)
   2374			rc = SUCCESS;
   2375		else
   2376			rc = FAILED;
   2377	}
   2378	/*
   2379	 * Double check that fcport has not gone into an uploading state before
   2380	 * executing the command flush for the LUN/target.
   2381	 */
   2382	if (test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags)) {
   2383		QEDF_ERR(&qedf->dbg_ctx,
   2384			 "fcport is uploading, not executing flush.\n");
   2385		goto no_flush;
   2386	}
   2387	/* We do not need this io_req any more */
   2388	kref_put(&io_req->refcount, qedf_release_cmd);
   2389
   2390
   2391	if (tm_flags == FCP_TMF_LUN_RESET)
   2392		qedf_flush_active_ios(fcport, lun);
   2393	else
   2394		qedf_flush_active_ios(fcport, -1);
   2395
   2396no_flush:
   2397	if (rc != SUCCESS) {
   2398		QEDF_ERR(&(qedf->dbg_ctx), "task mgmt command failed...\n");
   2399		rc = FAILED;
   2400	} else {
   2401		QEDF_ERR(&(qedf->dbg_ctx), "task mgmt command success...\n");
   2402		rc = SUCCESS;
   2403	}
   2404	return rc;
   2405}
   2406
   2407int qedf_initiate_tmf(struct scsi_cmnd *sc_cmd, u8 tm_flags)
   2408{
   2409	struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device));
   2410	struct fc_rport_libfc_priv *rp = rport->dd_data;
   2411	struct qedf_rport *fcport = (struct qedf_rport *)&rp[1];
   2412	struct qedf_ctx *qedf;
   2413	struct fc_lport *lport = shost_priv(sc_cmd->device->host);
   2414	int rc = SUCCESS;
   2415	int rval;
   2416	struct qedf_ioreq *io_req = NULL;
   2417	int ref_cnt = 0;
   2418	struct fc_rport_priv *rdata = fcport->rdata;
   2419
   2420	QEDF_ERR(NULL,
   2421		 "tm_flags 0x%x sc_cmd %p op = 0x%02x target_id = 0x%x lun=%d\n",
   2422		 tm_flags, sc_cmd, sc_cmd->cmd_len ? sc_cmd->cmnd[0] : 0xff,
   2423		 rport->scsi_target_id, (int)sc_cmd->device->lun);
   2424
   2425	if (!rdata || !kref_get_unless_zero(&rdata->kref)) {
   2426		QEDF_ERR(NULL, "stale rport\n");
   2427		return FAILED;
   2428	}
   2429
   2430	QEDF_ERR(NULL, "portid=%06x tm_flags =%s\n", rdata->ids.port_id,
   2431		 (tm_flags == FCP_TMF_TGT_RESET) ? "TARGET RESET" :
   2432		 "LUN RESET");
   2433
   2434	if (qedf_priv(sc_cmd)->io_req) {
   2435		io_req = qedf_priv(sc_cmd)->io_req;
   2436		ref_cnt = kref_read(&io_req->refcount);
   2437		QEDF_ERR(NULL,
   2438			 "orig io_req = %p xid = 0x%x ref_cnt = %d.\n",
   2439			 io_req, io_req->xid, ref_cnt);
   2440	}
   2441
   2442	rval = fc_remote_port_chkready(rport);
   2443	if (rval) {
   2444		QEDF_ERR(NULL, "device_reset rport not ready\n");
   2445		rc = FAILED;
   2446		goto tmf_err;
   2447	}
   2448
   2449	rc = fc_block_scsi_eh(sc_cmd);
   2450	if (rc)
   2451		goto tmf_err;
   2452
   2453	if (!fcport) {
   2454		QEDF_ERR(NULL, "device_reset: rport is NULL\n");
   2455		rc = FAILED;
   2456		goto tmf_err;
   2457	}
   2458
   2459	qedf = fcport->qedf;
   2460
   2461	if (!qedf) {
   2462		QEDF_ERR(NULL, "qedf is NULL.\n");
   2463		rc = FAILED;
   2464		goto tmf_err;
   2465	}
   2466
   2467	if (test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags)) {
   2468		QEDF_ERR(&qedf->dbg_ctx, "Connection is getting uploaded.\n");
   2469		rc = SUCCESS;
   2470		goto tmf_err;
   2471	}
   2472
   2473	if (test_bit(QEDF_UNLOADING, &qedf->flags) ||
   2474	    test_bit(QEDF_DBG_STOP_IO, &qedf->flags)) {
   2475		rc = SUCCESS;
   2476		goto tmf_err;
   2477	}
   2478
   2479	if (lport->state != LPORT_ST_READY || !(lport->link_up)) {
   2480		QEDF_ERR(&(qedf->dbg_ctx), "link is not ready\n");
   2481		rc = FAILED;
   2482		goto tmf_err;
   2483	}
   2484
   2485	if (test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags)) {
   2486		if (!fcport->rdata)
   2487			QEDF_ERR(&qedf->dbg_ctx, "fcport %p is uploading.\n",
   2488				 fcport);
   2489		else
   2490			QEDF_ERR(&qedf->dbg_ctx,
   2491				 "fcport %p port_id=%06x is uploading.\n",
   2492				 fcport, fcport->rdata->ids.port_id);
   2493		rc = FAILED;
   2494		goto tmf_err;
   2495	}
   2496
   2497	rc = qedf_execute_tmf(fcport, sc_cmd, tm_flags);
   2498
   2499tmf_err:
   2500	kref_put(&rdata->kref, fc_rport_destroy);
   2501	return rc;
   2502}
   2503
   2504void qedf_process_tmf_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
   2505	struct qedf_ioreq *io_req)
   2506{
   2507	struct fcoe_cqe_rsp_info *fcp_rsp;
   2508
   2509	clear_bit(QEDF_CMD_OUTSTANDING, &io_req->flags);
   2510
   2511	fcp_rsp = &cqe->cqe_info.rsp_info;
   2512	qedf_parse_fcp_rsp(io_req, fcp_rsp);
   2513
   2514	io_req->sc_cmd = NULL;
   2515	complete(&io_req->tm_done);
   2516}
   2517
   2518void qedf_process_unsol_compl(struct qedf_ctx *qedf, uint16_t que_idx,
   2519	struct fcoe_cqe *cqe)
   2520{
   2521	unsigned long flags;
   2522	uint16_t pktlen = cqe->cqe_info.unsolic_info.pkt_len;
   2523	u32 payload_len, crc;
   2524	struct fc_frame_header *fh;
   2525	struct fc_frame *fp;
   2526	struct qedf_io_work *io_work;
   2527	u32 bdq_idx;
   2528	void *bdq_addr;
   2529	struct scsi_bd *p_bd_info;
   2530
   2531	p_bd_info = &cqe->cqe_info.unsolic_info.bd_info;
   2532	QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_UNSOL,
   2533		  "address.hi=%x, address.lo=%x, opaque_data.hi=%x, opaque_data.lo=%x, bdq_prod_idx=%u, len=%u\n",
   2534		  le32_to_cpu(p_bd_info->address.hi),
   2535		  le32_to_cpu(p_bd_info->address.lo),
   2536		  le32_to_cpu(p_bd_info->opaque.fcoe_opaque.hi),
   2537		  le32_to_cpu(p_bd_info->opaque.fcoe_opaque.lo),
   2538		  qedf->bdq_prod_idx, pktlen);
   2539
   2540	bdq_idx = le32_to_cpu(p_bd_info->opaque.fcoe_opaque.lo);
   2541	if (bdq_idx >= QEDF_BDQ_SIZE) {
   2542		QEDF_ERR(&(qedf->dbg_ctx), "bdq_idx is out of range %d.\n",
   2543		    bdq_idx);
   2544		goto increment_prod;
   2545	}
   2546
   2547	bdq_addr = qedf->bdq[bdq_idx].buf_addr;
   2548	if (!bdq_addr) {
   2549		QEDF_ERR(&(qedf->dbg_ctx), "bdq_addr is NULL, dropping "
   2550		    "unsolicited packet.\n");
   2551		goto increment_prod;
   2552	}
   2553
   2554	if (qedf_dump_frames) {
   2555		QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_UNSOL,
   2556		    "BDQ frame is at addr=%p.\n", bdq_addr);
   2557		print_hex_dump(KERN_WARNING, "bdq ", DUMP_PREFIX_OFFSET, 16, 1,
   2558		    (void *)bdq_addr, pktlen, false);
   2559	}
   2560
   2561	/* Allocate frame */
   2562	payload_len = pktlen - sizeof(struct fc_frame_header);
   2563	fp = fc_frame_alloc(qedf->lport, payload_len);
   2564	if (!fp) {
   2565		QEDF_ERR(&(qedf->dbg_ctx), "Could not allocate fp.\n");
   2566		goto increment_prod;
   2567	}
   2568
   2569	/* Copy data from BDQ buffer into fc_frame struct */
   2570	fh = (struct fc_frame_header *)fc_frame_header_get(fp);
   2571	memcpy(fh, (void *)bdq_addr, pktlen);
   2572
   2573	QEDF_WARN(&qedf->dbg_ctx,
   2574		  "Processing Unsolicated frame, src=%06x dest=%06x r_ctl=0x%x type=0x%x cmd=%02x\n",
   2575		  ntoh24(fh->fh_s_id), ntoh24(fh->fh_d_id), fh->fh_r_ctl,
   2576		  fh->fh_type, fc_frame_payload_op(fp));
   2577
   2578	/* Initialize the frame so libfc sees it as a valid frame */
   2579	crc = fcoe_fc_crc(fp);
   2580	fc_frame_init(fp);
   2581	fr_dev(fp) = qedf->lport;
   2582	fr_sof(fp) = FC_SOF_I3;
   2583	fr_eof(fp) = FC_EOF_T;
   2584	fr_crc(fp) = cpu_to_le32(~crc);
   2585
   2586	/*
   2587	 * We need to return the frame back up to libfc in a non-atomic
   2588	 * context
   2589	 */
   2590	io_work = mempool_alloc(qedf->io_mempool, GFP_ATOMIC);
   2591	if (!io_work) {
   2592		QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate "
   2593			   "work for I/O completion.\n");
   2594		fc_frame_free(fp);
   2595		goto increment_prod;
   2596	}
   2597	memset(io_work, 0, sizeof(struct qedf_io_work));
   2598
   2599	INIT_WORK(&io_work->work, qedf_fp_io_handler);
   2600
   2601	/* Copy contents of CQE for deferred processing */
   2602	memcpy(&io_work->cqe, cqe, sizeof(struct fcoe_cqe));
   2603
   2604	io_work->qedf = qedf;
   2605	io_work->fp = fp;
   2606
   2607	queue_work_on(smp_processor_id(), qedf_io_wq, &io_work->work);
   2608increment_prod:
   2609	spin_lock_irqsave(&qedf->hba_lock, flags);
   2610
   2611	/* Increment producer to let f/w know we've handled the frame */
   2612	qedf->bdq_prod_idx++;
   2613
   2614	/* Producer index wraps at uint16_t boundary */
   2615	if (qedf->bdq_prod_idx == 0xffff)
   2616		qedf->bdq_prod_idx = 0;
   2617
   2618	writew(qedf->bdq_prod_idx, qedf->bdq_primary_prod);
   2619	readw(qedf->bdq_primary_prod);
   2620	writew(qedf->bdq_prod_idx, qedf->bdq_secondary_prod);
   2621	readw(qedf->bdq_secondary_prod);
   2622
   2623	spin_unlock_irqrestore(&qedf->hba_lock, flags);
   2624}