cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

lpfc_nvmet.c (112037B)


      1/*******************************************************************
      2 * This file is part of the Emulex Linux Device Driver for         *
      3 * Fibre Channel Host Bus Adapters.                                *
      4 * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term *
      5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.     *
      6 * Copyright (C) 2004-2016 Emulex.  All rights reserved.           *
      7 * EMULEX and SLI are trademarks of Emulex.                        *
      8 * www.broadcom.com                                                *
      9 * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
     10 *                                                                 *
     11 * This program is free software; you can redistribute it and/or   *
     12 * modify it under the terms of version 2 of the GNU General       *
     13 * Public License as published by the Free Software Foundation.    *
     14 * This program is distributed in the hope that it will be useful. *
     15 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
     16 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
     17 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
     18 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
     19 * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
     20 * more details, a copy of which can be found in the file COPYING  *
     21 * included with this package.                                     *
     22 ********************************************************************/
     23#include <linux/pci.h>
     24#include <linux/slab.h>
     25#include <linux/interrupt.h>
     26#include <linux/delay.h>
     27#include <asm/unaligned.h>
     28#include <linux/crc-t10dif.h>
     29#include <net/checksum.h>
     30
     31#include <scsi/scsi.h>
     32#include <scsi/scsi_device.h>
     33#include <scsi/scsi_eh.h>
     34#include <scsi/scsi_host.h>
     35#include <scsi/scsi_tcq.h>
     36#include <scsi/scsi_transport_fc.h>
     37#include <scsi/fc/fc_fs.h>
     38
     39#include "lpfc_version.h"
     40#include "lpfc_hw4.h"
     41#include "lpfc_hw.h"
     42#include "lpfc_sli.h"
     43#include "lpfc_sli4.h"
     44#include "lpfc_nl.h"
     45#include "lpfc_disc.h"
     46#include "lpfc.h"
     47#include "lpfc_scsi.h"
     48#include "lpfc_nvme.h"
     49#include "lpfc_logmsg.h"
     50#include "lpfc_crtn.h"
     51#include "lpfc_vport.h"
     52#include "lpfc_debugfs.h"
     53
     54static struct lpfc_iocbq *lpfc_nvmet_prep_ls_wqe(struct lpfc_hba *,
     55						 struct lpfc_async_xchg_ctx *,
     56						 dma_addr_t rspbuf,
     57						 uint16_t rspsize);
     58static struct lpfc_iocbq *lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *,
     59						  struct lpfc_async_xchg_ctx *);
     60static int lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *,
     61					  struct lpfc_async_xchg_ctx *,
     62					  uint32_t, uint16_t);
     63static int lpfc_nvmet_unsol_fcp_issue_abort(struct lpfc_hba *,
     64					    struct lpfc_async_xchg_ctx *,
     65					    uint32_t, uint16_t);
     66static void lpfc_nvmet_wqfull_flush(struct lpfc_hba *, struct lpfc_queue *,
     67				    struct lpfc_async_xchg_ctx *);
     68static void lpfc_nvmet_fcp_rqst_defer_work(struct work_struct *);
     69
     70static void lpfc_nvmet_process_rcv_fcp_req(struct lpfc_nvmet_ctxbuf *ctx_buf);
     71
     72static union lpfc_wqe128 lpfc_tsend_cmd_template;
     73static union lpfc_wqe128 lpfc_treceive_cmd_template;
     74static union lpfc_wqe128 lpfc_trsp_cmd_template;
     75
     76/* Setup WQE templates for NVME IOs */
     77void
     78lpfc_nvmet_cmd_template(void)
     79{
     80	union lpfc_wqe128 *wqe;
     81
     82	/* TSEND template */
     83	wqe = &lpfc_tsend_cmd_template;
     84	memset(wqe, 0, sizeof(union lpfc_wqe128));
     85
     86	/* Word 0, 1, 2 - BDE is variable */
     87
     88	/* Word 3 - payload_offset_len is zero */
     89
     90	/* Word 4 - relative_offset is variable */
     91
     92	/* Word 5 - is zero */
     93
     94	/* Word 6 - ctxt_tag, xri_tag is variable */
     95
     96	/* Word 7 - wqe_ar is variable */
     97	bf_set(wqe_cmnd, &wqe->fcp_tsend.wqe_com, CMD_FCP_TSEND64_WQE);
     98	bf_set(wqe_pu, &wqe->fcp_tsend.wqe_com, PARM_REL_OFF);
     99	bf_set(wqe_class, &wqe->fcp_tsend.wqe_com, CLASS3);
    100	bf_set(wqe_ct, &wqe->fcp_tsend.wqe_com, SLI4_CT_RPI);
    101	bf_set(wqe_ar, &wqe->fcp_tsend.wqe_com, 1);
    102
    103	/* Word 8 - abort_tag is variable */
    104
    105	/* Word 9  - reqtag, rcvoxid is variable */
    106
    107	/* Word 10 - wqes, xc is variable */
    108	bf_set(wqe_xchg, &wqe->fcp_tsend.wqe_com, LPFC_NVME_XCHG);
    109	bf_set(wqe_dbde, &wqe->fcp_tsend.wqe_com, 1);
    110	bf_set(wqe_wqes, &wqe->fcp_tsend.wqe_com, 0);
    111	bf_set(wqe_xc, &wqe->fcp_tsend.wqe_com, 1);
    112	bf_set(wqe_iod, &wqe->fcp_tsend.wqe_com, LPFC_WQE_IOD_WRITE);
    113	bf_set(wqe_lenloc, &wqe->fcp_tsend.wqe_com, LPFC_WQE_LENLOC_WORD12);
    114
    115	/* Word 11 - sup, irsp, irsplen is variable */
    116	bf_set(wqe_cmd_type, &wqe->fcp_tsend.wqe_com, FCP_COMMAND_TSEND);
    117	bf_set(wqe_cqid, &wqe->fcp_tsend.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
    118	bf_set(wqe_sup, &wqe->fcp_tsend.wqe_com, 0);
    119	bf_set(wqe_irsp, &wqe->fcp_tsend.wqe_com, 0);
    120	bf_set(wqe_irsplen, &wqe->fcp_tsend.wqe_com, 0);
    121	bf_set(wqe_pbde, &wqe->fcp_tsend.wqe_com, 0);
    122
    123	/* Word 12 - fcp_data_len is variable */
    124
    125	/* Word 13, 14, 15 - PBDE is zero */
    126
    127	/* TRECEIVE template */
    128	wqe = &lpfc_treceive_cmd_template;
    129	memset(wqe, 0, sizeof(union lpfc_wqe128));
    130
    131	/* Word 0, 1, 2 - BDE is variable */
    132
    133	/* Word 3 */
    134	wqe->fcp_treceive.payload_offset_len = TXRDY_PAYLOAD_LEN;
    135
    136	/* Word 4 - relative_offset is variable */
    137
    138	/* Word 5 - is zero */
    139
    140	/* Word 6 - ctxt_tag, xri_tag is variable */
    141
    142	/* Word 7 */
    143	bf_set(wqe_cmnd, &wqe->fcp_treceive.wqe_com, CMD_FCP_TRECEIVE64_WQE);
    144	bf_set(wqe_pu, &wqe->fcp_treceive.wqe_com, PARM_REL_OFF);
    145	bf_set(wqe_class, &wqe->fcp_treceive.wqe_com, CLASS3);
    146	bf_set(wqe_ct, &wqe->fcp_treceive.wqe_com, SLI4_CT_RPI);
    147	bf_set(wqe_ar, &wqe->fcp_treceive.wqe_com, 0);
    148
    149	/* Word 8 - abort_tag is variable */
    150
    151	/* Word 9  - reqtag, rcvoxid is variable */
    152
    153	/* Word 10 - xc is variable */
    154	bf_set(wqe_dbde, &wqe->fcp_treceive.wqe_com, 1);
    155	bf_set(wqe_wqes, &wqe->fcp_treceive.wqe_com, 0);
    156	bf_set(wqe_xchg, &wqe->fcp_treceive.wqe_com, LPFC_NVME_XCHG);
    157	bf_set(wqe_iod, &wqe->fcp_treceive.wqe_com, LPFC_WQE_IOD_READ);
    158	bf_set(wqe_lenloc, &wqe->fcp_treceive.wqe_com, LPFC_WQE_LENLOC_WORD12);
    159	bf_set(wqe_xc, &wqe->fcp_tsend.wqe_com, 1);
    160
    161	/* Word 11 - pbde is variable */
    162	bf_set(wqe_cmd_type, &wqe->fcp_treceive.wqe_com, FCP_COMMAND_TRECEIVE);
    163	bf_set(wqe_cqid, &wqe->fcp_treceive.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
    164	bf_set(wqe_sup, &wqe->fcp_treceive.wqe_com, 0);
    165	bf_set(wqe_irsp, &wqe->fcp_treceive.wqe_com, 0);
    166	bf_set(wqe_irsplen, &wqe->fcp_treceive.wqe_com, 0);
    167	bf_set(wqe_pbde, &wqe->fcp_treceive.wqe_com, 1);
    168
    169	/* Word 12 - fcp_data_len is variable */
    170
    171	/* Word 13, 14, 15 - PBDE is variable */
    172
    173	/* TRSP template */
    174	wqe = &lpfc_trsp_cmd_template;
    175	memset(wqe, 0, sizeof(union lpfc_wqe128));
    176
    177	/* Word 0, 1, 2 - BDE is variable */
    178
    179	/* Word 3 - response_len is variable */
    180
    181	/* Word 4, 5 - is zero */
    182
    183	/* Word 6 - ctxt_tag, xri_tag is variable */
    184
    185	/* Word 7 */
    186	bf_set(wqe_cmnd, &wqe->fcp_trsp.wqe_com, CMD_FCP_TRSP64_WQE);
    187	bf_set(wqe_pu, &wqe->fcp_trsp.wqe_com, PARM_UNUSED);
    188	bf_set(wqe_class, &wqe->fcp_trsp.wqe_com, CLASS3);
    189	bf_set(wqe_ct, &wqe->fcp_trsp.wqe_com, SLI4_CT_RPI);
    190	bf_set(wqe_ag, &wqe->fcp_trsp.wqe_com, 1); /* wqe_ar */
    191
    192	/* Word 8 - abort_tag is variable */
    193
    194	/* Word 9  - reqtag is variable */
    195
    196	/* Word 10 wqes, xc is variable */
    197	bf_set(wqe_dbde, &wqe->fcp_trsp.wqe_com, 1);
    198	bf_set(wqe_xchg, &wqe->fcp_trsp.wqe_com, LPFC_NVME_XCHG);
    199	bf_set(wqe_wqes, &wqe->fcp_trsp.wqe_com, 0);
    200	bf_set(wqe_xc, &wqe->fcp_trsp.wqe_com, 0);
    201	bf_set(wqe_iod, &wqe->fcp_trsp.wqe_com, LPFC_WQE_IOD_NONE);
    202	bf_set(wqe_lenloc, &wqe->fcp_trsp.wqe_com, LPFC_WQE_LENLOC_WORD3);
    203
    204	/* Word 11 irsp, irsplen is variable */
    205	bf_set(wqe_cmd_type, &wqe->fcp_trsp.wqe_com, FCP_COMMAND_TRSP);
    206	bf_set(wqe_cqid, &wqe->fcp_trsp.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
    207	bf_set(wqe_sup, &wqe->fcp_trsp.wqe_com, 0);
    208	bf_set(wqe_irsp, &wqe->fcp_trsp.wqe_com, 0);
    209	bf_set(wqe_irsplen, &wqe->fcp_trsp.wqe_com, 0);
    210	bf_set(wqe_pbde, &wqe->fcp_trsp.wqe_com, 0);
    211
    212	/* Word 12, 13, 14, 15 - is zero */
    213}
    214
    215#if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
    216static struct lpfc_async_xchg_ctx *
    217lpfc_nvmet_get_ctx_for_xri(struct lpfc_hba *phba, u16 xri)
    218{
    219	struct lpfc_async_xchg_ctx *ctxp;
    220	unsigned long iflag;
    221	bool found = false;
    222
    223	spin_lock_irqsave(&phba->sli4_hba.t_active_list_lock, iflag);
    224	list_for_each_entry(ctxp, &phba->sli4_hba.t_active_ctx_list, list) {
    225		if (ctxp->ctxbuf->sglq->sli4_xritag != xri)
    226			continue;
    227
    228		found = true;
    229		break;
    230	}
    231	spin_unlock_irqrestore(&phba->sli4_hba.t_active_list_lock, iflag);
    232	if (found)
    233		return ctxp;
    234
    235	return NULL;
    236}
    237
    238static struct lpfc_async_xchg_ctx *
    239lpfc_nvmet_get_ctx_for_oxid(struct lpfc_hba *phba, u16 oxid, u32 sid)
    240{
    241	struct lpfc_async_xchg_ctx *ctxp;
    242	unsigned long iflag;
    243	bool found = false;
    244
    245	spin_lock_irqsave(&phba->sli4_hba.t_active_list_lock, iflag);
    246	list_for_each_entry(ctxp, &phba->sli4_hba.t_active_ctx_list, list) {
    247		if (ctxp->oxid != oxid || ctxp->sid != sid)
    248			continue;
    249
    250		found = true;
    251		break;
    252	}
    253	spin_unlock_irqrestore(&phba->sli4_hba.t_active_list_lock, iflag);
    254	if (found)
    255		return ctxp;
    256
    257	return NULL;
    258}
    259#endif
    260
    261static void
    262lpfc_nvmet_defer_release(struct lpfc_hba *phba,
    263			struct lpfc_async_xchg_ctx *ctxp)
    264{
    265	lockdep_assert_held(&ctxp->ctxlock);
    266
    267	lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
    268			"6313 NVMET Defer ctx release oxid x%x flg x%x\n",
    269			ctxp->oxid, ctxp->flag);
    270
    271	if (ctxp->flag & LPFC_NVME_CTX_RLS)
    272		return;
    273
    274	ctxp->flag |= LPFC_NVME_CTX_RLS;
    275	spin_lock(&phba->sli4_hba.t_active_list_lock);
    276	list_del(&ctxp->list);
    277	spin_unlock(&phba->sli4_hba.t_active_list_lock);
    278	spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
    279	list_add_tail(&ctxp->list, &phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
    280	spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
    281}
    282
    283/**
    284 * __lpfc_nvme_xmt_ls_rsp_cmp - Generic completion handler for the
    285 *         transmission of an NVME LS response.
    286 * @phba: Pointer to HBA context object.
    287 * @cmdwqe: Pointer to driver command WQE object.
    288 * @rspwqe: Pointer to driver response WQE object.
    289 *
    290 * The function is called from SLI ring event handler with no
    291 * lock held. The function frees memory resources used for the command
    292 * used to send the NVME LS RSP.
    293 **/
    294void
    295__lpfc_nvme_xmt_ls_rsp_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
    296			   struct lpfc_iocbq *rspwqe)
    297{
    298	struct lpfc_async_xchg_ctx *axchg = cmdwqe->context_un.axchg;
    299	struct lpfc_wcqe_complete *wcqe = &rspwqe->wcqe_cmpl;
    300	struct nvmefc_ls_rsp *ls_rsp = &axchg->ls_rsp;
    301	uint32_t status, result;
    302
    303	status = bf_get(lpfc_wcqe_c_status, wcqe) & LPFC_IOCB_STATUS_MASK;
    304	result = wcqe->parameter;
    305
    306	if (axchg->state != LPFC_NVME_STE_LS_RSP || axchg->entry_cnt != 2) {
    307		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
    308				"6410 NVMEx LS cmpl state mismatch IO x%x: "
    309				"%d %d\n",
    310				axchg->oxid, axchg->state, axchg->entry_cnt);
    311	}
    312
    313	lpfc_nvmeio_data(phba, "NVMEx LS  CMPL: xri x%x stat x%x result x%x\n",
    314			 axchg->oxid, status, result);
    315
    316	lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
    317			"6038 NVMEx LS rsp cmpl: %d %d oxid x%x\n",
    318			status, result, axchg->oxid);
    319
    320	lpfc_nlp_put(cmdwqe->ndlp);
    321	cmdwqe->context_un.axchg = NULL;
    322	cmdwqe->bpl_dmabuf = NULL;
    323	lpfc_sli_release_iocbq(phba, cmdwqe);
    324	ls_rsp->done(ls_rsp);
    325	lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
    326			"6200 NVMEx LS rsp cmpl done status %d oxid x%x\n",
    327			status, axchg->oxid);
    328	kfree(axchg);
    329}
    330
    331/**
    332 * lpfc_nvmet_xmt_ls_rsp_cmp - Completion handler for LS Response
    333 * @phba: Pointer to HBA context object.
    334 * @cmdwqe: Pointer to driver command WQE object.
    335 * @rspwqe: Pointer to driver response WQE object.
    336 *
    337 * The function is called from SLI ring event handler with no
    338 * lock held. This function is the completion handler for NVME LS commands
    339 * The function updates any states and statistics, then calls the
    340 * generic completion handler to free resources.
    341 **/
    342static void
    343lpfc_nvmet_xmt_ls_rsp_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
    344			  struct lpfc_iocbq *rspwqe)
    345{
    346	struct lpfc_nvmet_tgtport *tgtp;
    347	uint32_t status, result;
    348	struct lpfc_wcqe_complete *wcqe = &rspwqe->wcqe_cmpl;
    349
    350	if (!phba->targetport)
    351		goto finish;
    352
    353	status = bf_get(lpfc_wcqe_c_status, wcqe) & LPFC_IOCB_STATUS_MASK;
    354	result = wcqe->parameter;
    355
    356	tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
    357	if (tgtp) {
    358		if (status) {
    359			atomic_inc(&tgtp->xmt_ls_rsp_error);
    360			if (result == IOERR_ABORT_REQUESTED)
    361				atomic_inc(&tgtp->xmt_ls_rsp_aborted);
    362			if (bf_get(lpfc_wcqe_c_xb, wcqe))
    363				atomic_inc(&tgtp->xmt_ls_rsp_xb_set);
    364		} else {
    365			atomic_inc(&tgtp->xmt_ls_rsp_cmpl);
    366		}
    367	}
    368
    369finish:
    370	__lpfc_nvme_xmt_ls_rsp_cmp(phba, cmdwqe, rspwqe);
    371}
    372
    373/**
    374 * lpfc_nvmet_ctxbuf_post - Repost a NVMET RQ DMA buffer and clean up context
    375 * @phba: HBA buffer is associated with
    376 * @ctx_buf: ctx buffer context
    377 *
    378 * Description: Frees the given DMA buffer in the appropriate way given by
    379 * reposting it to its associated RQ so it can be reused.
    380 *
    381 * Notes: Takes phba->hbalock.  Can be called with or without other locks held.
    382 *
    383 * Returns: None
    384 **/
    385void
    386lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba, struct lpfc_nvmet_ctxbuf *ctx_buf)
    387{
    388#if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
    389	struct lpfc_async_xchg_ctx *ctxp = ctx_buf->context;
    390	struct lpfc_nvmet_tgtport *tgtp;
    391	struct fc_frame_header *fc_hdr;
    392	struct rqb_dmabuf *nvmebuf;
    393	struct lpfc_nvmet_ctx_info *infop;
    394	uint32_t size, oxid, sid;
    395	int cpu;
    396	unsigned long iflag;
    397
    398	if (ctxp->state == LPFC_NVME_STE_FREE) {
    399		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
    400				"6411 NVMET free, already free IO x%x: %d %d\n",
    401				ctxp->oxid, ctxp->state, ctxp->entry_cnt);
    402	}
    403
    404	if (ctxp->rqb_buffer) {
    405		spin_lock_irqsave(&ctxp->ctxlock, iflag);
    406		nvmebuf = ctxp->rqb_buffer;
    407		/* check if freed in another path whilst acquiring lock */
    408		if (nvmebuf) {
    409			ctxp->rqb_buffer = NULL;
    410			if (ctxp->flag & LPFC_NVME_CTX_REUSE_WQ) {
    411				ctxp->flag &= ~LPFC_NVME_CTX_REUSE_WQ;
    412				spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
    413				nvmebuf->hrq->rqbp->rqb_free_buffer(phba,
    414								    nvmebuf);
    415			} else {
    416				spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
    417				/* repost */
    418				lpfc_rq_buf_free(phba, &nvmebuf->hbuf);
    419			}
    420		} else {
    421			spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
    422		}
    423	}
    424	ctxp->state = LPFC_NVME_STE_FREE;
    425
    426	spin_lock_irqsave(&phba->sli4_hba.nvmet_io_wait_lock, iflag);
    427	if (phba->sli4_hba.nvmet_io_wait_cnt) {
    428		list_remove_head(&phba->sli4_hba.lpfc_nvmet_io_wait_list,
    429				 nvmebuf, struct rqb_dmabuf,
    430				 hbuf.list);
    431		phba->sli4_hba.nvmet_io_wait_cnt--;
    432		spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_wait_lock,
    433				       iflag);
    434
    435		fc_hdr = (struct fc_frame_header *)(nvmebuf->hbuf.virt);
    436		oxid = be16_to_cpu(fc_hdr->fh_ox_id);
    437		tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
    438		size = nvmebuf->bytes_recv;
    439		sid = sli4_sid_from_fc_hdr(fc_hdr);
    440
    441		ctxp = (struct lpfc_async_xchg_ctx *)ctx_buf->context;
    442		ctxp->wqeq = NULL;
    443		ctxp->offset = 0;
    444		ctxp->phba = phba;
    445		ctxp->size = size;
    446		ctxp->oxid = oxid;
    447		ctxp->sid = sid;
    448		ctxp->state = LPFC_NVME_STE_RCV;
    449		ctxp->entry_cnt = 1;
    450		ctxp->flag = 0;
    451		ctxp->ctxbuf = ctx_buf;
    452		ctxp->rqb_buffer = (void *)nvmebuf;
    453		spin_lock_init(&ctxp->ctxlock);
    454
    455#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
    456		/* NOTE: isr time stamp is stale when context is re-assigned*/
    457		if (ctxp->ts_isr_cmd) {
    458			ctxp->ts_cmd_nvme = 0;
    459			ctxp->ts_nvme_data = 0;
    460			ctxp->ts_data_wqput = 0;
    461			ctxp->ts_isr_data = 0;
    462			ctxp->ts_data_nvme = 0;
    463			ctxp->ts_nvme_status = 0;
    464			ctxp->ts_status_wqput = 0;
    465			ctxp->ts_isr_status = 0;
    466			ctxp->ts_status_nvme = 0;
    467		}
    468#endif
    469		atomic_inc(&tgtp->rcv_fcp_cmd_in);
    470
    471		/* Indicate that a replacement buffer has been posted */
    472		spin_lock_irqsave(&ctxp->ctxlock, iflag);
    473		ctxp->flag |= LPFC_NVME_CTX_REUSE_WQ;
    474		spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
    475
    476		if (!queue_work(phba->wq, &ctx_buf->defer_work)) {
    477			atomic_inc(&tgtp->rcv_fcp_cmd_drop);
    478			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
    479					"6181 Unable to queue deferred work "
    480					"for oxid x%x. "
    481					"FCP Drop IO [x%x x%x x%x]\n",
    482					ctxp->oxid,
    483					atomic_read(&tgtp->rcv_fcp_cmd_in),
    484					atomic_read(&tgtp->rcv_fcp_cmd_out),
    485					atomic_read(&tgtp->xmt_fcp_release));
    486
    487			spin_lock_irqsave(&ctxp->ctxlock, iflag);
    488			lpfc_nvmet_defer_release(phba, ctxp);
    489			spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
    490			lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, sid, oxid);
    491		}
    492		return;
    493	}
    494	spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_wait_lock, iflag);
    495
    496	/*
    497	 * Use the CPU context list, from the MRQ the IO was received on
    498	 * (ctxp->idx), to save context structure.
    499	 */
    500	spin_lock_irqsave(&phba->sli4_hba.t_active_list_lock, iflag);
    501	list_del_init(&ctxp->list);
    502	spin_unlock_irqrestore(&phba->sli4_hba.t_active_list_lock, iflag);
    503	cpu = raw_smp_processor_id();
    504	infop = lpfc_get_ctx_list(phba, cpu, ctxp->idx);
    505	spin_lock_irqsave(&infop->nvmet_ctx_list_lock, iflag);
    506	list_add_tail(&ctx_buf->list, &infop->nvmet_ctx_list);
    507	infop->nvmet_ctx_list_cnt++;
    508	spin_unlock_irqrestore(&infop->nvmet_ctx_list_lock, iflag);
    509#endif
    510}
    511
    512#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
    513static void
    514lpfc_nvmet_ktime(struct lpfc_hba *phba,
    515		 struct lpfc_async_xchg_ctx *ctxp)
    516{
    517	uint64_t seg1, seg2, seg3, seg4, seg5;
    518	uint64_t seg6, seg7, seg8, seg9, seg10;
    519	uint64_t segsum;
    520
    521	if (!ctxp->ts_isr_cmd || !ctxp->ts_cmd_nvme ||
    522	    !ctxp->ts_nvme_data || !ctxp->ts_data_wqput ||
    523	    !ctxp->ts_isr_data || !ctxp->ts_data_nvme ||
    524	    !ctxp->ts_nvme_status || !ctxp->ts_status_wqput ||
    525	    !ctxp->ts_isr_status || !ctxp->ts_status_nvme)
    526		return;
    527
    528	if (ctxp->ts_status_nvme < ctxp->ts_isr_cmd)
    529		return;
    530	if (ctxp->ts_isr_cmd  > ctxp->ts_cmd_nvme)
    531		return;
    532	if (ctxp->ts_cmd_nvme > ctxp->ts_nvme_data)
    533		return;
    534	if (ctxp->ts_nvme_data > ctxp->ts_data_wqput)
    535		return;
    536	if (ctxp->ts_data_wqput > ctxp->ts_isr_data)
    537		return;
    538	if (ctxp->ts_isr_data > ctxp->ts_data_nvme)
    539		return;
    540	if (ctxp->ts_data_nvme > ctxp->ts_nvme_status)
    541		return;
    542	if (ctxp->ts_nvme_status > ctxp->ts_status_wqput)
    543		return;
    544	if (ctxp->ts_status_wqput > ctxp->ts_isr_status)
    545		return;
    546	if (ctxp->ts_isr_status > ctxp->ts_status_nvme)
    547		return;
    548	/*
    549	 * Segment 1 - Time from FCP command received by MSI-X ISR
    550	 * to FCP command is passed to NVME Layer.
    551	 * Segment 2 - Time from FCP command payload handed
    552	 * off to NVME Layer to Driver receives a Command op
    553	 * from NVME Layer.
    554	 * Segment 3 - Time from Driver receives a Command op
    555	 * from NVME Layer to Command is put on WQ.
    556	 * Segment 4 - Time from Driver WQ put is done
    557	 * to MSI-X ISR for Command cmpl.
    558	 * Segment 5 - Time from MSI-X ISR for Command cmpl to
    559	 * Command cmpl is passed to NVME Layer.
    560	 * Segment 6 - Time from Command cmpl is passed to NVME
    561	 * Layer to Driver receives a RSP op from NVME Layer.
    562	 * Segment 7 - Time from Driver receives a RSP op from
    563	 * NVME Layer to WQ put is done on TRSP FCP Status.
    564	 * Segment 8 - Time from Driver WQ put is done on TRSP
    565	 * FCP Status to MSI-X ISR for TRSP cmpl.
    566	 * Segment 9 - Time from MSI-X ISR for TRSP cmpl to
    567	 * TRSP cmpl is passed to NVME Layer.
    568	 * Segment 10 - Time from FCP command received by
    569	 * MSI-X ISR to command is completed on wire.
    570	 * (Segments 1 thru 8) for READDATA / WRITEDATA
    571	 * (Segments 1 thru 4) for READDATA_RSP
    572	 */
    573	seg1 = ctxp->ts_cmd_nvme - ctxp->ts_isr_cmd;
    574	segsum = seg1;
    575
    576	seg2 = ctxp->ts_nvme_data - ctxp->ts_isr_cmd;
    577	if (segsum > seg2)
    578		return;
    579	seg2 -= segsum;
    580	segsum += seg2;
    581
    582	seg3 = ctxp->ts_data_wqput - ctxp->ts_isr_cmd;
    583	if (segsum > seg3)
    584		return;
    585	seg3 -= segsum;
    586	segsum += seg3;
    587
    588	seg4 = ctxp->ts_isr_data - ctxp->ts_isr_cmd;
    589	if (segsum > seg4)
    590		return;
    591	seg4 -= segsum;
    592	segsum += seg4;
    593
    594	seg5 = ctxp->ts_data_nvme - ctxp->ts_isr_cmd;
    595	if (segsum > seg5)
    596		return;
    597	seg5 -= segsum;
    598	segsum += seg5;
    599
    600
    601	/* For auto rsp commands seg6 thru seg10 will be 0 */
    602	if (ctxp->ts_nvme_status > ctxp->ts_data_nvme) {
    603		seg6 = ctxp->ts_nvme_status - ctxp->ts_isr_cmd;
    604		if (segsum > seg6)
    605			return;
    606		seg6 -= segsum;
    607		segsum += seg6;
    608
    609		seg7 = ctxp->ts_status_wqput - ctxp->ts_isr_cmd;
    610		if (segsum > seg7)
    611			return;
    612		seg7 -= segsum;
    613		segsum += seg7;
    614
    615		seg8 = ctxp->ts_isr_status - ctxp->ts_isr_cmd;
    616		if (segsum > seg8)
    617			return;
    618		seg8 -= segsum;
    619		segsum += seg8;
    620
    621		seg9 = ctxp->ts_status_nvme - ctxp->ts_isr_cmd;
    622		if (segsum > seg9)
    623			return;
    624		seg9 -= segsum;
    625		segsum += seg9;
    626
    627		if (ctxp->ts_isr_status < ctxp->ts_isr_cmd)
    628			return;
    629		seg10 = (ctxp->ts_isr_status -
    630			ctxp->ts_isr_cmd);
    631	} else {
    632		if (ctxp->ts_isr_data < ctxp->ts_isr_cmd)
    633			return;
    634		seg6 =  0;
    635		seg7 =  0;
    636		seg8 =  0;
    637		seg9 =  0;
    638		seg10 = (ctxp->ts_isr_data - ctxp->ts_isr_cmd);
    639	}
    640
    641	phba->ktime_seg1_total += seg1;
    642	if (seg1 < phba->ktime_seg1_min)
    643		phba->ktime_seg1_min = seg1;
    644	else if (seg1 > phba->ktime_seg1_max)
    645		phba->ktime_seg1_max = seg1;
    646
    647	phba->ktime_seg2_total += seg2;
    648	if (seg2 < phba->ktime_seg2_min)
    649		phba->ktime_seg2_min = seg2;
    650	else if (seg2 > phba->ktime_seg2_max)
    651		phba->ktime_seg2_max = seg2;
    652
    653	phba->ktime_seg3_total += seg3;
    654	if (seg3 < phba->ktime_seg3_min)
    655		phba->ktime_seg3_min = seg3;
    656	else if (seg3 > phba->ktime_seg3_max)
    657		phba->ktime_seg3_max = seg3;
    658
    659	phba->ktime_seg4_total += seg4;
    660	if (seg4 < phba->ktime_seg4_min)
    661		phba->ktime_seg4_min = seg4;
    662	else if (seg4 > phba->ktime_seg4_max)
    663		phba->ktime_seg4_max = seg4;
    664
    665	phba->ktime_seg5_total += seg5;
    666	if (seg5 < phba->ktime_seg5_min)
    667		phba->ktime_seg5_min = seg5;
    668	else if (seg5 > phba->ktime_seg5_max)
    669		phba->ktime_seg5_max = seg5;
    670
    671	phba->ktime_data_samples++;
    672	if (!seg6)
    673		goto out;
    674
    675	phba->ktime_seg6_total += seg6;
    676	if (seg6 < phba->ktime_seg6_min)
    677		phba->ktime_seg6_min = seg6;
    678	else if (seg6 > phba->ktime_seg6_max)
    679		phba->ktime_seg6_max = seg6;
    680
    681	phba->ktime_seg7_total += seg7;
    682	if (seg7 < phba->ktime_seg7_min)
    683		phba->ktime_seg7_min = seg7;
    684	else if (seg7 > phba->ktime_seg7_max)
    685		phba->ktime_seg7_max = seg7;
    686
    687	phba->ktime_seg8_total += seg8;
    688	if (seg8 < phba->ktime_seg8_min)
    689		phba->ktime_seg8_min = seg8;
    690	else if (seg8 > phba->ktime_seg8_max)
    691		phba->ktime_seg8_max = seg8;
    692
    693	phba->ktime_seg9_total += seg9;
    694	if (seg9 < phba->ktime_seg9_min)
    695		phba->ktime_seg9_min = seg9;
    696	else if (seg9 > phba->ktime_seg9_max)
    697		phba->ktime_seg9_max = seg9;
    698out:
    699	phba->ktime_seg10_total += seg10;
    700	if (seg10 < phba->ktime_seg10_min)
    701		phba->ktime_seg10_min = seg10;
    702	else if (seg10 > phba->ktime_seg10_max)
    703		phba->ktime_seg10_max = seg10;
    704	phba->ktime_status_samples++;
    705}
    706#endif
    707
    708/**
    709 * lpfc_nvmet_xmt_fcp_op_cmp - Completion handler for FCP Response
    710 * @phba: Pointer to HBA context object.
    711 * @cmdwqe: Pointer to driver command WQE object.
    712 * @rspwqe: Pointer to driver response WQE object.
    713 *
    714 * The function is called from SLI ring event handler with no
    715 * lock held. This function is the completion handler for NVME FCP commands
    716 * The function frees memory resources used for the NVME commands.
    717 **/
    718static void
    719lpfc_nvmet_xmt_fcp_op_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
    720			  struct lpfc_iocbq *rspwqe)
    721{
    722	struct lpfc_nvmet_tgtport *tgtp;
    723	struct nvmefc_tgt_fcp_req *rsp;
    724	struct lpfc_async_xchg_ctx *ctxp;
    725	uint32_t status, result, op, start_clean, logerr;
    726	struct lpfc_wcqe_complete *wcqe = &rspwqe->wcqe_cmpl;
    727#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
    728	int id;
    729#endif
    730
    731	ctxp = cmdwqe->context_un.axchg;
    732	ctxp->flag &= ~LPFC_NVME_IO_INP;
    733
    734	rsp = &ctxp->hdlrctx.fcp_req;
    735	op = rsp->op;
    736
    737	status = bf_get(lpfc_wcqe_c_status, wcqe);
    738	result = wcqe->parameter;
    739
    740	if (phba->targetport)
    741		tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
    742	else
    743		tgtp = NULL;
    744
    745	lpfc_nvmeio_data(phba, "NVMET FCP CMPL: xri x%x op x%x status x%x\n",
    746			 ctxp->oxid, op, status);
    747
    748	if (status) {
    749		rsp->fcp_error = NVME_SC_DATA_XFER_ERROR;
    750		rsp->transferred_length = 0;
    751		if (tgtp) {
    752			atomic_inc(&tgtp->xmt_fcp_rsp_error);
    753			if (result == IOERR_ABORT_REQUESTED)
    754				atomic_inc(&tgtp->xmt_fcp_rsp_aborted);
    755		}
    756
    757		logerr = LOG_NVME_IOERR;
    758
    759		/* pick up SLI4 exhange busy condition */
    760		if (bf_get(lpfc_wcqe_c_xb, wcqe)) {
    761			ctxp->flag |= LPFC_NVME_XBUSY;
    762			logerr |= LOG_NVME_ABTS;
    763			if (tgtp)
    764				atomic_inc(&tgtp->xmt_fcp_rsp_xb_set);
    765
    766		} else {
    767			ctxp->flag &= ~LPFC_NVME_XBUSY;
    768		}
    769
    770		lpfc_printf_log(phba, KERN_INFO, logerr,
    771				"6315 IO Error Cmpl oxid: x%x xri: x%x %x/%x "
    772				"XBUSY:x%x\n",
    773				ctxp->oxid, ctxp->ctxbuf->sglq->sli4_xritag,
    774				status, result, ctxp->flag);
    775
    776	} else {
    777		rsp->fcp_error = NVME_SC_SUCCESS;
    778		if (op == NVMET_FCOP_RSP)
    779			rsp->transferred_length = rsp->rsplen;
    780		else
    781			rsp->transferred_length = rsp->transfer_length;
    782		if (tgtp)
    783			atomic_inc(&tgtp->xmt_fcp_rsp_cmpl);
    784	}
    785
    786	if ((op == NVMET_FCOP_READDATA_RSP) ||
    787	    (op == NVMET_FCOP_RSP)) {
    788		/* Sanity check */
    789		ctxp->state = LPFC_NVME_STE_DONE;
    790		ctxp->entry_cnt++;
    791
    792#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
    793		if (ctxp->ts_cmd_nvme) {
    794			if (rsp->op == NVMET_FCOP_READDATA_RSP) {
    795				ctxp->ts_isr_data =
    796					cmdwqe->isr_timestamp;
    797				ctxp->ts_data_nvme =
    798					ktime_get_ns();
    799				ctxp->ts_nvme_status =
    800					ctxp->ts_data_nvme;
    801				ctxp->ts_status_wqput =
    802					ctxp->ts_data_nvme;
    803				ctxp->ts_isr_status =
    804					ctxp->ts_data_nvme;
    805				ctxp->ts_status_nvme =
    806					ctxp->ts_data_nvme;
    807			} else {
    808				ctxp->ts_isr_status =
    809					cmdwqe->isr_timestamp;
    810				ctxp->ts_status_nvme =
    811					ktime_get_ns();
    812			}
    813		}
    814#endif
    815		rsp->done(rsp);
    816#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
    817		if (ctxp->ts_cmd_nvme)
    818			lpfc_nvmet_ktime(phba, ctxp);
    819#endif
    820		/* lpfc_nvmet_xmt_fcp_release() will recycle the context */
    821	} else {
    822		ctxp->entry_cnt++;
    823		start_clean = offsetof(struct lpfc_iocbq, cmd_flag);
    824		memset(((char *)cmdwqe) + start_clean, 0,
    825		       (sizeof(struct lpfc_iocbq) - start_clean));
    826#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
    827		if (ctxp->ts_cmd_nvme) {
    828			ctxp->ts_isr_data = cmdwqe->isr_timestamp;
    829			ctxp->ts_data_nvme = ktime_get_ns();
    830		}
    831#endif
    832		rsp->done(rsp);
    833	}
    834#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
    835	if (phba->hdwqstat_on & LPFC_CHECK_NVMET_IO) {
    836		id = raw_smp_processor_id();
    837		this_cpu_inc(phba->sli4_hba.c_stat->cmpl_io);
    838		if (ctxp->cpu != id)
    839			lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
    840					"6704 CPU Check cmdcmpl: "
    841					"cpu %d expect %d\n",
    842					id, ctxp->cpu);
    843	}
    844#endif
    845}
    846
    847/**
    848 * __lpfc_nvme_xmt_ls_rsp - Generic service routine to issue transmit
    849 *         an NVME LS rsp for a prior NVME LS request that was received.
    850 * @axchg: pointer to exchange context for the NVME LS request the response
    851 *         is for.
    852 * @ls_rsp: pointer to the transport LS RSP that is to be sent
    853 * @xmt_ls_rsp_cmp: completion routine to call upon RSP transmit done
    854 *
    855 * This routine is used to format and send a WQE to transmit a NVME LS
    856 * Response.  The response is for a prior NVME LS request that was
    857 * received and posted to the transport.
    858 *
    859 * Returns:
    860 *  0 : if response successfully transmit
    861 *  non-zero : if response failed to transmit, of the form -Exxx.
    862 **/
    863int
    864__lpfc_nvme_xmt_ls_rsp(struct lpfc_async_xchg_ctx *axchg,
    865			struct nvmefc_ls_rsp *ls_rsp,
    866			void (*xmt_ls_rsp_cmp)(struct lpfc_hba *phba,
    867				struct lpfc_iocbq *cmdwqe,
    868				struct lpfc_iocbq *rspwqe))
    869{
    870	struct lpfc_hba *phba = axchg->phba;
    871	struct hbq_dmabuf *nvmebuf = (struct hbq_dmabuf *)axchg->rqb_buffer;
    872	struct lpfc_iocbq *nvmewqeq;
    873	struct lpfc_dmabuf dmabuf;
    874	struct ulp_bde64 bpl;
    875	int rc;
    876
    877	if (phba->pport->load_flag & FC_UNLOADING)
    878		return -ENODEV;
    879
    880	lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
    881			"6023 NVMEx LS rsp oxid x%x\n", axchg->oxid);
    882
    883	if (axchg->state != LPFC_NVME_STE_LS_RCV || axchg->entry_cnt != 1) {
    884		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
    885				"6412 NVMEx LS rsp state mismatch "
    886				"oxid x%x: %d %d\n",
    887				axchg->oxid, axchg->state, axchg->entry_cnt);
    888		return -EALREADY;
    889	}
    890	axchg->state = LPFC_NVME_STE_LS_RSP;
    891	axchg->entry_cnt++;
    892
    893	nvmewqeq = lpfc_nvmet_prep_ls_wqe(phba, axchg, ls_rsp->rspdma,
    894					 ls_rsp->rsplen);
    895	if (nvmewqeq == NULL) {
    896		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
    897				"6150 NVMEx LS Drop Rsp x%x: Prep\n",
    898				axchg->oxid);
    899		rc = -ENOMEM;
    900		goto out_free_buf;
    901	}
    902
    903	/* Save numBdes for bpl2sgl */
    904	nvmewqeq->num_bdes = 1;
    905	nvmewqeq->hba_wqidx = 0;
    906	nvmewqeq->bpl_dmabuf = &dmabuf;
    907	dmabuf.virt = &bpl;
    908	bpl.addrLow = nvmewqeq->wqe.xmit_sequence.bde.addrLow;
    909	bpl.addrHigh = nvmewqeq->wqe.xmit_sequence.bde.addrHigh;
    910	bpl.tus.f.bdeSize = ls_rsp->rsplen;
    911	bpl.tus.f.bdeFlags = 0;
    912	bpl.tus.w = le32_to_cpu(bpl.tus.w);
    913	/*
    914	 * Note: although we're using stack space for the dmabuf, the
    915	 * call to lpfc_sli4_issue_wqe is synchronous, so it will not
    916	 * be referenced after it returns back to this routine.
    917	 */
    918
    919	nvmewqeq->cmd_cmpl = xmt_ls_rsp_cmp;
    920	nvmewqeq->context_un.axchg = axchg;
    921
    922	lpfc_nvmeio_data(phba, "NVMEx LS RSP: xri x%x wqidx x%x len x%x\n",
    923			 axchg->oxid, nvmewqeq->hba_wqidx, ls_rsp->rsplen);
    924
    925	rc = lpfc_sli4_issue_wqe(phba, axchg->hdwq, nvmewqeq);
    926
    927	/* clear to be sure there's no reference */
    928	nvmewqeq->bpl_dmabuf = NULL;
    929
    930	if (rc == WQE_SUCCESS) {
    931		/*
    932		 * Okay to repost buffer here, but wait till cmpl
    933		 * before freeing ctxp and iocbq.
    934		 */
    935		lpfc_in_buf_free(phba, &nvmebuf->dbuf);
    936		return 0;
    937	}
    938
    939	lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
    940			"6151 NVMEx LS RSP x%x: failed to transmit %d\n",
    941			axchg->oxid, rc);
    942
    943	rc = -ENXIO;
    944
    945	lpfc_nlp_put(nvmewqeq->ndlp);
    946
    947out_free_buf:
    948	/* Give back resources */
    949	lpfc_in_buf_free(phba, &nvmebuf->dbuf);
    950
    951	/*
    952	 * As transport doesn't track completions of responses, if the rsp
    953	 * fails to send, the transport will effectively ignore the rsp
    954	 * and consider the LS done. However, the driver has an active
    955	 * exchange open for the LS - so be sure to abort the exchange
    956	 * if the response isn't sent.
    957	 */
    958	lpfc_nvme_unsol_ls_issue_abort(phba, axchg, axchg->sid, axchg->oxid);
    959	return rc;
    960}
    961
    962/**
    963 * lpfc_nvmet_xmt_ls_rsp - Transmit NVME LS response
    964 * @tgtport: pointer to target port that NVME LS is to be transmit from.
    965 * @ls_rsp: pointer to the transport LS RSP that is to be sent
    966 *
    967 * Driver registers this routine to transmit responses for received NVME
    968 * LS requests.
    969 *
    970 * This routine is used to format and send a WQE to transmit a NVME LS
    971 * Response. The ls_rsp is used to reverse-map the LS to the original
    972 * NVME LS request sequence, which provides addressing information for
    973 * the remote port the LS to be sent to, as well as the exchange id
    974 * that is the LS is bound to.
    975 *
    976 * Returns:
    977 *  0 : if response successfully transmit
    978 *  non-zero : if response failed to transmit, of the form -Exxx.
    979 **/
    980static int
    981lpfc_nvmet_xmt_ls_rsp(struct nvmet_fc_target_port *tgtport,
    982		      struct nvmefc_ls_rsp *ls_rsp)
    983{
    984	struct lpfc_async_xchg_ctx *axchg =
    985		container_of(ls_rsp, struct lpfc_async_xchg_ctx, ls_rsp);
    986	struct lpfc_nvmet_tgtport *nvmep = tgtport->private;
    987	int rc;
    988
    989	if (axchg->phba->pport->load_flag & FC_UNLOADING)
    990		return -ENODEV;
    991
    992	rc = __lpfc_nvme_xmt_ls_rsp(axchg, ls_rsp, lpfc_nvmet_xmt_ls_rsp_cmp);
    993
    994	if (rc) {
    995		atomic_inc(&nvmep->xmt_ls_drop);
    996		/*
    997		 * unless the failure is due to having already sent
    998		 * the response, an abort will be generated for the
    999		 * exchange if the rsp can't be sent.
   1000		 */
   1001		if (rc != -EALREADY)
   1002			atomic_inc(&nvmep->xmt_ls_abort);
   1003		return rc;
   1004	}
   1005
   1006	atomic_inc(&nvmep->xmt_ls_rsp);
   1007	return 0;
   1008}
   1009
   1010static int
   1011lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port *tgtport,
   1012		      struct nvmefc_tgt_fcp_req *rsp)
   1013{
   1014	struct lpfc_nvmet_tgtport *lpfc_nvmep = tgtport->private;
   1015	struct lpfc_async_xchg_ctx *ctxp =
   1016		container_of(rsp, struct lpfc_async_xchg_ctx, hdlrctx.fcp_req);
   1017	struct lpfc_hba *phba = ctxp->phba;
   1018	struct lpfc_queue *wq;
   1019	struct lpfc_iocbq *nvmewqeq;
   1020	struct lpfc_sli_ring *pring;
   1021	unsigned long iflags;
   1022	int rc;
   1023#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
   1024	int id;
   1025#endif
   1026
   1027	if (phba->pport->load_flag & FC_UNLOADING) {
   1028		rc = -ENODEV;
   1029		goto aerr;
   1030	}
   1031
   1032#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
   1033	if (ctxp->ts_cmd_nvme) {
   1034		if (rsp->op == NVMET_FCOP_RSP)
   1035			ctxp->ts_nvme_status = ktime_get_ns();
   1036		else
   1037			ctxp->ts_nvme_data = ktime_get_ns();
   1038	}
   1039
   1040	/* Setup the hdw queue if not already set */
   1041	if (!ctxp->hdwq)
   1042		ctxp->hdwq = &phba->sli4_hba.hdwq[rsp->hwqid];
   1043
   1044	if (phba->hdwqstat_on & LPFC_CHECK_NVMET_IO) {
   1045		id = raw_smp_processor_id();
   1046		this_cpu_inc(phba->sli4_hba.c_stat->xmt_io);
   1047		if (rsp->hwqid != id)
   1048			lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
   1049					"6705 CPU Check OP: "
   1050					"cpu %d expect %d\n",
   1051					id, rsp->hwqid);
   1052		ctxp->cpu = id; /* Setup cpu for cmpl check */
   1053	}
   1054#endif
   1055
   1056	/* Sanity check */
   1057	if ((ctxp->flag & LPFC_NVME_ABTS_RCV) ||
   1058	    (ctxp->state == LPFC_NVME_STE_ABORT)) {
   1059		atomic_inc(&lpfc_nvmep->xmt_fcp_drop);
   1060		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
   1061				"6102 IO oxid x%x aborted\n",
   1062				ctxp->oxid);
   1063		rc = -ENXIO;
   1064		goto aerr;
   1065	}
   1066
   1067	nvmewqeq = lpfc_nvmet_prep_fcp_wqe(phba, ctxp);
   1068	if (nvmewqeq == NULL) {
   1069		atomic_inc(&lpfc_nvmep->xmt_fcp_drop);
   1070		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
   1071				"6152 FCP Drop IO x%x: Prep\n",
   1072				ctxp->oxid);
   1073		rc = -ENXIO;
   1074		goto aerr;
   1075	}
   1076
   1077	nvmewqeq->cmd_cmpl = lpfc_nvmet_xmt_fcp_op_cmp;
   1078	nvmewqeq->context_un.axchg = ctxp;
   1079	nvmewqeq->cmd_flag |=  LPFC_IO_NVMET;
   1080	ctxp->wqeq->hba_wqidx = rsp->hwqid;
   1081
   1082	lpfc_nvmeio_data(phba, "NVMET FCP CMND: xri x%x op x%x len x%x\n",
   1083			 ctxp->oxid, rsp->op, rsp->rsplen);
   1084
   1085	ctxp->flag |= LPFC_NVME_IO_INP;
   1086	rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, nvmewqeq);
   1087	if (rc == WQE_SUCCESS) {
   1088#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
   1089		if (!ctxp->ts_cmd_nvme)
   1090			return 0;
   1091		if (rsp->op == NVMET_FCOP_RSP)
   1092			ctxp->ts_status_wqput = ktime_get_ns();
   1093		else
   1094			ctxp->ts_data_wqput = ktime_get_ns();
   1095#endif
   1096		return 0;
   1097	}
   1098
   1099	if (rc == -EBUSY) {
   1100		/*
   1101		 * WQ was full, so queue nvmewqeq to be sent after
   1102		 * WQE release CQE
   1103		 */
   1104		ctxp->flag |= LPFC_NVME_DEFER_WQFULL;
   1105		wq = ctxp->hdwq->io_wq;
   1106		pring = wq->pring;
   1107		spin_lock_irqsave(&pring->ring_lock, iflags);
   1108		list_add_tail(&nvmewqeq->list, &wq->wqfull_list);
   1109		wq->q_flag |= HBA_NVMET_WQFULL;
   1110		spin_unlock_irqrestore(&pring->ring_lock, iflags);
   1111		atomic_inc(&lpfc_nvmep->defer_wqfull);
   1112		return 0;
   1113	}
   1114
   1115	/* Give back resources */
   1116	atomic_inc(&lpfc_nvmep->xmt_fcp_drop);
   1117	lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
   1118			"6153 FCP Drop IO x%x: Issue: %d\n",
   1119			ctxp->oxid, rc);
   1120
   1121	ctxp->wqeq->hba_wqidx = 0;
   1122	nvmewqeq->context_un.axchg = NULL;
   1123	nvmewqeq->bpl_dmabuf = NULL;
   1124	rc = -EBUSY;
   1125aerr:
   1126	return rc;
   1127}
   1128
   1129static void
   1130lpfc_nvmet_targetport_delete(struct nvmet_fc_target_port *targetport)
   1131{
   1132	struct lpfc_nvmet_tgtport *tport = targetport->private;
   1133
   1134	/* release any threads waiting for the unreg to complete */
   1135	if (tport->phba->targetport)
   1136		complete(tport->tport_unreg_cmp);
   1137}
   1138
   1139static void
   1140lpfc_nvmet_xmt_fcp_abort(struct nvmet_fc_target_port *tgtport,
   1141			 struct nvmefc_tgt_fcp_req *req)
   1142{
   1143	struct lpfc_nvmet_tgtport *lpfc_nvmep = tgtport->private;
   1144	struct lpfc_async_xchg_ctx *ctxp =
   1145		container_of(req, struct lpfc_async_xchg_ctx, hdlrctx.fcp_req);
   1146	struct lpfc_hba *phba = ctxp->phba;
   1147	struct lpfc_queue *wq;
   1148	unsigned long flags;
   1149
   1150	if (phba->pport->load_flag & FC_UNLOADING)
   1151		return;
   1152
   1153	if (!ctxp->hdwq)
   1154		ctxp->hdwq = &phba->sli4_hba.hdwq[0];
   1155
   1156	lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
   1157			"6103 NVMET Abort op: oxid x%x flg x%x ste %d\n",
   1158			ctxp->oxid, ctxp->flag, ctxp->state);
   1159
   1160	lpfc_nvmeio_data(phba, "NVMET FCP ABRT: xri x%x flg x%x ste x%x\n",
   1161			 ctxp->oxid, ctxp->flag, ctxp->state);
   1162
   1163	atomic_inc(&lpfc_nvmep->xmt_fcp_abort);
   1164
   1165	spin_lock_irqsave(&ctxp->ctxlock, flags);
   1166
   1167	/* Since iaab/iaar are NOT set, we need to check
   1168	 * if the firmware is in process of aborting IO
   1169	 */
   1170	if (ctxp->flag & (LPFC_NVME_XBUSY | LPFC_NVME_ABORT_OP)) {
   1171		spin_unlock_irqrestore(&ctxp->ctxlock, flags);
   1172		return;
   1173	}
   1174	ctxp->flag |= LPFC_NVME_ABORT_OP;
   1175
   1176	if (ctxp->flag & LPFC_NVME_DEFER_WQFULL) {
   1177		spin_unlock_irqrestore(&ctxp->ctxlock, flags);
   1178		lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid,
   1179						 ctxp->oxid);
   1180		wq = ctxp->hdwq->io_wq;
   1181		lpfc_nvmet_wqfull_flush(phba, wq, ctxp);
   1182		return;
   1183	}
   1184	spin_unlock_irqrestore(&ctxp->ctxlock, flags);
   1185
   1186	/* A state of LPFC_NVME_STE_RCV means we have just received
   1187	 * the NVME command and have not started processing it.
   1188	 * (by issuing any IO WQEs on this exchange yet)
   1189	 */
   1190	if (ctxp->state == LPFC_NVME_STE_RCV)
   1191		lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid,
   1192						 ctxp->oxid);
   1193	else
   1194		lpfc_nvmet_sol_fcp_issue_abort(phba, ctxp, ctxp->sid,
   1195					       ctxp->oxid);
   1196}
   1197
   1198static void
   1199lpfc_nvmet_xmt_fcp_release(struct nvmet_fc_target_port *tgtport,
   1200			   struct nvmefc_tgt_fcp_req *rsp)
   1201{
   1202	struct lpfc_nvmet_tgtport *lpfc_nvmep = tgtport->private;
   1203	struct lpfc_async_xchg_ctx *ctxp =
   1204		container_of(rsp, struct lpfc_async_xchg_ctx, hdlrctx.fcp_req);
   1205	struct lpfc_hba *phba = ctxp->phba;
   1206	unsigned long flags;
   1207	bool aborting = false;
   1208
   1209	spin_lock_irqsave(&ctxp->ctxlock, flags);
   1210	if (ctxp->flag & LPFC_NVME_XBUSY)
   1211		lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
   1212				"6027 NVMET release with XBUSY flag x%x"
   1213				" oxid x%x\n",
   1214				ctxp->flag, ctxp->oxid);
   1215	else if (ctxp->state != LPFC_NVME_STE_DONE &&
   1216		 ctxp->state != LPFC_NVME_STE_ABORT)
   1217		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
   1218				"6413 NVMET release bad state %d %d oxid x%x\n",
   1219				ctxp->state, ctxp->entry_cnt, ctxp->oxid);
   1220
   1221	if ((ctxp->flag & LPFC_NVME_ABORT_OP) ||
   1222	    (ctxp->flag & LPFC_NVME_XBUSY)) {
   1223		aborting = true;
   1224		/* let the abort path do the real release */
   1225		lpfc_nvmet_defer_release(phba, ctxp);
   1226	}
   1227	spin_unlock_irqrestore(&ctxp->ctxlock, flags);
   1228
   1229	lpfc_nvmeio_data(phba, "NVMET FCP FREE: xri x%x ste %d abt %d\n", ctxp->oxid,
   1230			 ctxp->state, aborting);
   1231
   1232	atomic_inc(&lpfc_nvmep->xmt_fcp_release);
   1233	ctxp->flag &= ~LPFC_NVME_TNOTIFY;
   1234
   1235	if (aborting)
   1236		return;
   1237
   1238	lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
   1239}
   1240
   1241static void
   1242lpfc_nvmet_defer_rcv(struct nvmet_fc_target_port *tgtport,
   1243		     struct nvmefc_tgt_fcp_req *rsp)
   1244{
   1245	struct lpfc_nvmet_tgtport *tgtp;
   1246	struct lpfc_async_xchg_ctx *ctxp =
   1247		container_of(rsp, struct lpfc_async_xchg_ctx, hdlrctx.fcp_req);
   1248	struct rqb_dmabuf *nvmebuf = ctxp->rqb_buffer;
   1249	struct lpfc_hba *phba = ctxp->phba;
   1250	unsigned long iflag;
   1251
   1252
   1253	lpfc_nvmeio_data(phba, "NVMET DEFERRCV: xri x%x sz %d CPU %02x\n",
   1254			 ctxp->oxid, ctxp->size, raw_smp_processor_id());
   1255
   1256	if (!nvmebuf) {
   1257		lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
   1258				"6425 Defer rcv: no buffer oxid x%x: "
   1259				"flg %x ste %x\n",
   1260				ctxp->oxid, ctxp->flag, ctxp->state);
   1261		return;
   1262	}
   1263
   1264	tgtp = phba->targetport->private;
   1265	if (tgtp)
   1266		atomic_inc(&tgtp->rcv_fcp_cmd_defer);
   1267
   1268	/* Free the nvmebuf since a new buffer already replaced it */
   1269	nvmebuf->hrq->rqbp->rqb_free_buffer(phba, nvmebuf);
   1270	spin_lock_irqsave(&ctxp->ctxlock, iflag);
   1271	ctxp->rqb_buffer = NULL;
   1272	spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
   1273}
   1274
   1275/**
   1276 * lpfc_nvmet_ls_req_cmp - completion handler for a nvme ls request
   1277 * @phba: Pointer to HBA context object
   1278 * @cmdwqe: Pointer to driver command WQE object.
   1279 * @rspwqe: Pointer to driver response WQE object.
   1280 *
   1281 * This function is the completion handler for NVME LS requests.
   1282 * The function updates any states and statistics, then calls the
   1283 * generic completion handler to finish completion of the request.
   1284 **/
   1285static void
   1286lpfc_nvmet_ls_req_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
   1287		      struct lpfc_iocbq *rspwqe)
   1288{
   1289	struct lpfc_wcqe_complete *wcqe = &rspwqe->wcqe_cmpl;
   1290	__lpfc_nvme_ls_req_cmp(phba, cmdwqe->vport, cmdwqe, wcqe);
   1291}
   1292
   1293/**
   1294 * lpfc_nvmet_ls_req - Issue an Link Service request
   1295 * @targetport: pointer to target instance registered with nvmet transport.
   1296 * @hosthandle: hosthandle set by the driver in a prior ls_rqst_rcv.
   1297 *               Driver sets this value to the ndlp pointer.
   1298 * @pnvme_lsreq: the transport nvme_ls_req structure for the LS
   1299 *
   1300 * Driver registers this routine to handle any link service request
   1301 * from the nvme_fc transport to a remote nvme-aware port.
   1302 *
   1303 * Return value :
   1304 *   0 - Success
   1305 *   non-zero: various error codes, in form of -Exxx
   1306 **/
   1307static int
   1308lpfc_nvmet_ls_req(struct nvmet_fc_target_port *targetport,
   1309		  void *hosthandle,
   1310		  struct nvmefc_ls_req *pnvme_lsreq)
   1311{
   1312	struct lpfc_nvmet_tgtport *lpfc_nvmet = targetport->private;
   1313	struct lpfc_hba *phba;
   1314	struct lpfc_nodelist *ndlp;
   1315	int ret;
   1316	u32 hstate;
   1317
   1318	if (!lpfc_nvmet)
   1319		return -EINVAL;
   1320
   1321	phba = lpfc_nvmet->phba;
   1322	if (phba->pport->load_flag & FC_UNLOADING)
   1323		return -EINVAL;
   1324
   1325	hstate = atomic_read(&lpfc_nvmet->state);
   1326	if (hstate == LPFC_NVMET_INV_HOST_ACTIVE)
   1327		return -EACCES;
   1328
   1329	ndlp = (struct lpfc_nodelist *)hosthandle;
   1330
   1331	ret = __lpfc_nvme_ls_req(phba->pport, ndlp, pnvme_lsreq,
   1332				 lpfc_nvmet_ls_req_cmp);
   1333
   1334	return ret;
   1335}
   1336
   1337/**
   1338 * lpfc_nvmet_ls_abort - Abort a prior NVME LS request
   1339 * @targetport: Transport targetport, that LS was issued from.
   1340 * @hosthandle: hosthandle set by the driver in a prior ls_rqst_rcv.
   1341 *               Driver sets this value to the ndlp pointer.
   1342 * @pnvme_lsreq: the transport nvme_ls_req structure for LS to be aborted
   1343 *
   1344 * Driver registers this routine to abort an NVME LS request that is
   1345 * in progress (from the transports perspective).
   1346 **/
   1347static void
   1348lpfc_nvmet_ls_abort(struct nvmet_fc_target_port *targetport,
   1349		    void *hosthandle,
   1350		    struct nvmefc_ls_req *pnvme_lsreq)
   1351{
   1352	struct lpfc_nvmet_tgtport *lpfc_nvmet = targetport->private;
   1353	struct lpfc_hba *phba;
   1354	struct lpfc_nodelist *ndlp;
   1355	int ret;
   1356
   1357	phba = lpfc_nvmet->phba;
   1358	if (phba->pport->load_flag & FC_UNLOADING)
   1359		return;
   1360
   1361	ndlp = (struct lpfc_nodelist *)hosthandle;
   1362
   1363	ret = __lpfc_nvme_ls_abort(phba->pport, ndlp, pnvme_lsreq);
   1364	if (!ret)
   1365		atomic_inc(&lpfc_nvmet->xmt_ls_abort);
   1366}
   1367
   1368static void
   1369lpfc_nvmet_host_release(void *hosthandle)
   1370{
   1371	struct lpfc_nodelist *ndlp = hosthandle;
   1372	struct lpfc_hba *phba = ndlp->phba;
   1373	struct lpfc_nvmet_tgtport *tgtp;
   1374
   1375	if (!phba->targetport || !phba->targetport->private)
   1376		return;
   1377
   1378	lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
   1379			"6202 NVMET XPT releasing hosthandle x%px "
   1380			"DID x%x xflags x%x refcnt %d\n",
   1381			hosthandle, ndlp->nlp_DID, ndlp->fc4_xpt_flags,
   1382			kref_read(&ndlp->kref));
   1383	tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
   1384	spin_lock_irq(&ndlp->lock);
   1385	ndlp->fc4_xpt_flags &= ~NLP_XPT_HAS_HH;
   1386	spin_unlock_irq(&ndlp->lock);
   1387	lpfc_nlp_put(ndlp);
   1388	atomic_set(&tgtp->state, 0);
   1389}
   1390
   1391static void
   1392lpfc_nvmet_discovery_event(struct nvmet_fc_target_port *tgtport)
   1393{
   1394	struct lpfc_nvmet_tgtport *tgtp;
   1395	struct lpfc_hba *phba;
   1396	uint32_t rc;
   1397
   1398	tgtp = tgtport->private;
   1399	phba = tgtp->phba;
   1400
   1401	rc = lpfc_issue_els_rscn(phba->pport, 0);
   1402	lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
   1403			"6420 NVMET subsystem change: Notification %s\n",
   1404			(rc) ? "Failed" : "Sent");
   1405}
   1406
   1407static struct nvmet_fc_target_template lpfc_tgttemplate = {
   1408	.targetport_delete = lpfc_nvmet_targetport_delete,
   1409	.xmt_ls_rsp     = lpfc_nvmet_xmt_ls_rsp,
   1410	.fcp_op         = lpfc_nvmet_xmt_fcp_op,
   1411	.fcp_abort      = lpfc_nvmet_xmt_fcp_abort,
   1412	.fcp_req_release = lpfc_nvmet_xmt_fcp_release,
   1413	.defer_rcv	= lpfc_nvmet_defer_rcv,
   1414	.discovery_event = lpfc_nvmet_discovery_event,
   1415	.ls_req         = lpfc_nvmet_ls_req,
   1416	.ls_abort       = lpfc_nvmet_ls_abort,
   1417	.host_release   = lpfc_nvmet_host_release,
   1418
   1419	.max_hw_queues  = 1,
   1420	.max_sgl_segments = LPFC_NVMET_DEFAULT_SEGS,
   1421	.max_dif_sgl_segments = LPFC_NVMET_DEFAULT_SEGS,
   1422	.dma_boundary = 0xFFFFFFFF,
   1423
   1424	/* optional features */
   1425	.target_features = 0,
   1426	/* sizes of additional private data for data structures */
   1427	.target_priv_sz = sizeof(struct lpfc_nvmet_tgtport),
   1428	.lsrqst_priv_sz = 0,
   1429};
   1430
   1431static void
   1432__lpfc_nvmet_clean_io_for_cpu(struct lpfc_hba *phba,
   1433		struct lpfc_nvmet_ctx_info *infop)
   1434{
   1435	struct lpfc_nvmet_ctxbuf *ctx_buf, *next_ctx_buf;
   1436	unsigned long flags;
   1437
   1438	spin_lock_irqsave(&infop->nvmet_ctx_list_lock, flags);
   1439	list_for_each_entry_safe(ctx_buf, next_ctx_buf,
   1440				&infop->nvmet_ctx_list, list) {
   1441		spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
   1442		list_del_init(&ctx_buf->list);
   1443		spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
   1444
   1445		spin_lock(&phba->hbalock);
   1446		__lpfc_clear_active_sglq(phba, ctx_buf->sglq->sli4_lxritag);
   1447		spin_unlock(&phba->hbalock);
   1448
   1449		ctx_buf->sglq->state = SGL_FREED;
   1450		ctx_buf->sglq->ndlp = NULL;
   1451
   1452		spin_lock(&phba->sli4_hba.sgl_list_lock);
   1453		list_add_tail(&ctx_buf->sglq->list,
   1454				&phba->sli4_hba.lpfc_nvmet_sgl_list);
   1455		spin_unlock(&phba->sli4_hba.sgl_list_lock);
   1456
   1457		lpfc_sli_release_iocbq(phba, ctx_buf->iocbq);
   1458		kfree(ctx_buf->context);
   1459	}
   1460	spin_unlock_irqrestore(&infop->nvmet_ctx_list_lock, flags);
   1461}
   1462
   1463static void
   1464lpfc_nvmet_cleanup_io_context(struct lpfc_hba *phba)
   1465{
   1466	struct lpfc_nvmet_ctx_info *infop;
   1467	int i, j;
   1468
   1469	/* The first context list, MRQ 0 CPU 0 */
   1470	infop = phba->sli4_hba.nvmet_ctx_info;
   1471	if (!infop)
   1472		return;
   1473
   1474	/* Cycle the the entire CPU context list for every MRQ */
   1475	for (i = 0; i < phba->cfg_nvmet_mrq; i++) {
   1476		for_each_present_cpu(j) {
   1477			infop = lpfc_get_ctx_list(phba, j, i);
   1478			__lpfc_nvmet_clean_io_for_cpu(phba, infop);
   1479		}
   1480	}
   1481	kfree(phba->sli4_hba.nvmet_ctx_info);
   1482	phba->sli4_hba.nvmet_ctx_info = NULL;
   1483}
   1484
   1485static int
   1486lpfc_nvmet_setup_io_context(struct lpfc_hba *phba)
   1487{
   1488	struct lpfc_nvmet_ctxbuf *ctx_buf;
   1489	struct lpfc_iocbq *nvmewqe;
   1490	union lpfc_wqe128 *wqe;
   1491	struct lpfc_nvmet_ctx_info *last_infop;
   1492	struct lpfc_nvmet_ctx_info *infop;
   1493	int i, j, idx, cpu;
   1494
   1495	lpfc_printf_log(phba, KERN_INFO, LOG_NVME,
   1496			"6403 Allocate NVMET resources for %d XRIs\n",
   1497			phba->sli4_hba.nvmet_xri_cnt);
   1498
   1499	phba->sli4_hba.nvmet_ctx_info = kcalloc(
   1500		phba->sli4_hba.num_possible_cpu * phba->cfg_nvmet_mrq,
   1501		sizeof(struct lpfc_nvmet_ctx_info), GFP_KERNEL);
   1502	if (!phba->sli4_hba.nvmet_ctx_info) {
   1503		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
   1504				"6419 Failed allocate memory for "
   1505				"nvmet context lists\n");
   1506		return -ENOMEM;
   1507	}
   1508
   1509	/*
   1510	 * Assuming X CPUs in the system, and Y MRQs, allocate some
   1511	 * lpfc_nvmet_ctx_info structures as follows:
   1512	 *
   1513	 * cpu0/mrq0 cpu1/mrq0 ... cpuX/mrq0
   1514	 * cpu0/mrq1 cpu1/mrq1 ... cpuX/mrq1
   1515	 * ...
   1516	 * cpuX/mrqY cpuX/mrqY ... cpuX/mrqY
   1517	 *
   1518	 * Each line represents a MRQ "silo" containing an entry for
   1519	 * every CPU.
   1520	 *
   1521	 * MRQ X is initially assumed to be associated with CPU X, thus
   1522	 * contexts are initially distributed across all MRQs using
   1523	 * the MRQ index (N) as follows cpuN/mrqN. When contexts are
   1524	 * freed, the are freed to the MRQ silo based on the CPU number
   1525	 * of the IO completion. Thus a context that was allocated for MRQ A
   1526	 * whose IO completed on CPU B will be freed to cpuB/mrqA.
   1527	 */
   1528	for_each_possible_cpu(i) {
   1529		for (j = 0; j < phba->cfg_nvmet_mrq; j++) {
   1530			infop = lpfc_get_ctx_list(phba, i, j);
   1531			INIT_LIST_HEAD(&infop->nvmet_ctx_list);
   1532			spin_lock_init(&infop->nvmet_ctx_list_lock);
   1533			infop->nvmet_ctx_list_cnt = 0;
   1534		}
   1535	}
   1536
   1537	/*
   1538	 * Setup the next CPU context info ptr for each MRQ.
   1539	 * MRQ 0 will cycle thru CPUs 0 - X separately from
   1540	 * MRQ 1 cycling thru CPUs 0 - X, and so on.
   1541	 */
   1542	for (j = 0; j < phba->cfg_nvmet_mrq; j++) {
   1543		last_infop = lpfc_get_ctx_list(phba,
   1544					       cpumask_first(cpu_present_mask),
   1545					       j);
   1546		for (i = phba->sli4_hba.num_possible_cpu - 1;  i >= 0; i--) {
   1547			infop = lpfc_get_ctx_list(phba, i, j);
   1548			infop->nvmet_ctx_next_cpu = last_infop;
   1549			last_infop = infop;
   1550		}
   1551	}
   1552
   1553	/* For all nvmet xris, allocate resources needed to process a
   1554	 * received command on a per xri basis.
   1555	 */
   1556	idx = 0;
   1557	cpu = cpumask_first(cpu_present_mask);
   1558	for (i = 0; i < phba->sli4_hba.nvmet_xri_cnt; i++) {
   1559		ctx_buf = kzalloc(sizeof(*ctx_buf), GFP_KERNEL);
   1560		if (!ctx_buf) {
   1561			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
   1562					"6404 Ran out of memory for NVMET\n");
   1563			return -ENOMEM;
   1564		}
   1565
   1566		ctx_buf->context = kzalloc(sizeof(*ctx_buf->context),
   1567					   GFP_KERNEL);
   1568		if (!ctx_buf->context) {
   1569			kfree(ctx_buf);
   1570			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
   1571					"6405 Ran out of NVMET "
   1572					"context memory\n");
   1573			return -ENOMEM;
   1574		}
   1575		ctx_buf->context->ctxbuf = ctx_buf;
   1576		ctx_buf->context->state = LPFC_NVME_STE_FREE;
   1577
   1578		ctx_buf->iocbq = lpfc_sli_get_iocbq(phba);
   1579		if (!ctx_buf->iocbq) {
   1580			kfree(ctx_buf->context);
   1581			kfree(ctx_buf);
   1582			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
   1583					"6406 Ran out of NVMET iocb/WQEs\n");
   1584			return -ENOMEM;
   1585		}
   1586		ctx_buf->iocbq->cmd_flag = LPFC_IO_NVMET;
   1587		nvmewqe = ctx_buf->iocbq;
   1588		wqe = &nvmewqe->wqe;
   1589
   1590		/* Initialize WQE */
   1591		memset(wqe, 0, sizeof(union lpfc_wqe));
   1592
   1593		ctx_buf->iocbq->cmd_dmabuf = NULL;
   1594		spin_lock(&phba->sli4_hba.sgl_list_lock);
   1595		ctx_buf->sglq = __lpfc_sli_get_nvmet_sglq(phba, ctx_buf->iocbq);
   1596		spin_unlock(&phba->sli4_hba.sgl_list_lock);
   1597		if (!ctx_buf->sglq) {
   1598			lpfc_sli_release_iocbq(phba, ctx_buf->iocbq);
   1599			kfree(ctx_buf->context);
   1600			kfree(ctx_buf);
   1601			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
   1602					"6407 Ran out of NVMET XRIs\n");
   1603			return -ENOMEM;
   1604		}
   1605		INIT_WORK(&ctx_buf->defer_work, lpfc_nvmet_fcp_rqst_defer_work);
   1606
   1607		/*
   1608		 * Add ctx to MRQidx context list. Our initial assumption
   1609		 * is MRQidx will be associated with CPUidx. This association
   1610		 * can change on the fly.
   1611		 */
   1612		infop = lpfc_get_ctx_list(phba, cpu, idx);
   1613		spin_lock(&infop->nvmet_ctx_list_lock);
   1614		list_add_tail(&ctx_buf->list, &infop->nvmet_ctx_list);
   1615		infop->nvmet_ctx_list_cnt++;
   1616		spin_unlock(&infop->nvmet_ctx_list_lock);
   1617
   1618		/* Spread ctx structures evenly across all MRQs */
   1619		idx++;
   1620		if (idx >= phba->cfg_nvmet_mrq) {
   1621			idx = 0;
   1622			cpu = cpumask_first(cpu_present_mask);
   1623			continue;
   1624		}
   1625		cpu = cpumask_next(cpu, cpu_present_mask);
   1626		if (cpu == nr_cpu_ids)
   1627			cpu = cpumask_first(cpu_present_mask);
   1628
   1629	}
   1630
   1631	for_each_present_cpu(i) {
   1632		for (j = 0; j < phba->cfg_nvmet_mrq; j++) {
   1633			infop = lpfc_get_ctx_list(phba, i, j);
   1634			lpfc_printf_log(phba, KERN_INFO, LOG_NVME | LOG_INIT,
   1635					"6408 TOTAL NVMET ctx for CPU %d "
   1636					"MRQ %d: cnt %d nextcpu x%px\n",
   1637					i, j, infop->nvmet_ctx_list_cnt,
   1638					infop->nvmet_ctx_next_cpu);
   1639		}
   1640	}
   1641	return 0;
   1642}
   1643
   1644int
   1645lpfc_nvmet_create_targetport(struct lpfc_hba *phba)
   1646{
   1647	struct lpfc_vport  *vport = phba->pport;
   1648	struct lpfc_nvmet_tgtport *tgtp;
   1649	struct nvmet_fc_port_info pinfo;
   1650	int error;
   1651
   1652	if (phba->targetport)
   1653		return 0;
   1654
   1655	error = lpfc_nvmet_setup_io_context(phba);
   1656	if (error)
   1657		return error;
   1658
   1659	memset(&pinfo, 0, sizeof(struct nvmet_fc_port_info));
   1660	pinfo.node_name = wwn_to_u64(vport->fc_nodename.u.wwn);
   1661	pinfo.port_name = wwn_to_u64(vport->fc_portname.u.wwn);
   1662	pinfo.port_id = vport->fc_myDID;
   1663
   1664	/* We need to tell the transport layer + 1 because it takes page
   1665	 * alignment into account. When space for the SGL is allocated we
   1666	 * allocate + 3, one for cmd, one for rsp and one for this alignment
   1667	 */
   1668	lpfc_tgttemplate.max_sgl_segments = phba->cfg_nvme_seg_cnt + 1;
   1669	lpfc_tgttemplate.max_hw_queues = phba->cfg_hdw_queue;
   1670	lpfc_tgttemplate.target_features = NVMET_FCTGTFEAT_READDATA_RSP;
   1671
   1672#if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
   1673	error = nvmet_fc_register_targetport(&pinfo, &lpfc_tgttemplate,
   1674					     &phba->pcidev->dev,
   1675					     &phba->targetport);
   1676#else
   1677	error = -ENOENT;
   1678#endif
   1679	if (error) {
   1680		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
   1681				"6025 Cannot register NVME targetport x%x: "
   1682				"portnm %llx nodenm %llx segs %d qs %d\n",
   1683				error,
   1684				pinfo.port_name, pinfo.node_name,
   1685				lpfc_tgttemplate.max_sgl_segments,
   1686				lpfc_tgttemplate.max_hw_queues);
   1687		phba->targetport = NULL;
   1688		phba->nvmet_support = 0;
   1689
   1690		lpfc_nvmet_cleanup_io_context(phba);
   1691
   1692	} else {
   1693		tgtp = (struct lpfc_nvmet_tgtport *)
   1694			phba->targetport->private;
   1695		tgtp->phba = phba;
   1696
   1697		lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
   1698				"6026 Registered NVME "
   1699				"targetport: x%px, private x%px "
   1700				"portnm %llx nodenm %llx segs %d qs %d\n",
   1701				phba->targetport, tgtp,
   1702				pinfo.port_name, pinfo.node_name,
   1703				lpfc_tgttemplate.max_sgl_segments,
   1704				lpfc_tgttemplate.max_hw_queues);
   1705
   1706		atomic_set(&tgtp->rcv_ls_req_in, 0);
   1707		atomic_set(&tgtp->rcv_ls_req_out, 0);
   1708		atomic_set(&tgtp->rcv_ls_req_drop, 0);
   1709		atomic_set(&tgtp->xmt_ls_abort, 0);
   1710		atomic_set(&tgtp->xmt_ls_abort_cmpl, 0);
   1711		atomic_set(&tgtp->xmt_ls_rsp, 0);
   1712		atomic_set(&tgtp->xmt_ls_drop, 0);
   1713		atomic_set(&tgtp->xmt_ls_rsp_error, 0);
   1714		atomic_set(&tgtp->xmt_ls_rsp_xb_set, 0);
   1715		atomic_set(&tgtp->xmt_ls_rsp_aborted, 0);
   1716		atomic_set(&tgtp->xmt_ls_rsp_cmpl, 0);
   1717		atomic_set(&tgtp->rcv_fcp_cmd_in, 0);
   1718		atomic_set(&tgtp->rcv_fcp_cmd_out, 0);
   1719		atomic_set(&tgtp->rcv_fcp_cmd_drop, 0);
   1720		atomic_set(&tgtp->xmt_fcp_drop, 0);
   1721		atomic_set(&tgtp->xmt_fcp_read_rsp, 0);
   1722		atomic_set(&tgtp->xmt_fcp_read, 0);
   1723		atomic_set(&tgtp->xmt_fcp_write, 0);
   1724		atomic_set(&tgtp->xmt_fcp_rsp, 0);
   1725		atomic_set(&tgtp->xmt_fcp_release, 0);
   1726		atomic_set(&tgtp->xmt_fcp_rsp_cmpl, 0);
   1727		atomic_set(&tgtp->xmt_fcp_rsp_error, 0);
   1728		atomic_set(&tgtp->xmt_fcp_rsp_xb_set, 0);
   1729		atomic_set(&tgtp->xmt_fcp_rsp_aborted, 0);
   1730		atomic_set(&tgtp->xmt_fcp_rsp_drop, 0);
   1731		atomic_set(&tgtp->xmt_fcp_xri_abort_cqe, 0);
   1732		atomic_set(&tgtp->xmt_fcp_abort, 0);
   1733		atomic_set(&tgtp->xmt_fcp_abort_cmpl, 0);
   1734		atomic_set(&tgtp->xmt_abort_unsol, 0);
   1735		atomic_set(&tgtp->xmt_abort_sol, 0);
   1736		atomic_set(&tgtp->xmt_abort_rsp, 0);
   1737		atomic_set(&tgtp->xmt_abort_rsp_error, 0);
   1738		atomic_set(&tgtp->defer_ctx, 0);
   1739		atomic_set(&tgtp->defer_fod, 0);
   1740		atomic_set(&tgtp->defer_wqfull, 0);
   1741	}
   1742	return error;
   1743}
   1744
   1745int
   1746lpfc_nvmet_update_targetport(struct lpfc_hba *phba)
   1747{
   1748	struct lpfc_vport  *vport = phba->pport;
   1749
   1750	if (!phba->targetport)
   1751		return 0;
   1752
   1753	lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
   1754			 "6007 Update NVMET port x%px did x%x\n",
   1755			 phba->targetport, vport->fc_myDID);
   1756
   1757	phba->targetport->port_id = vport->fc_myDID;
   1758	return 0;
   1759}
   1760
   1761/**
   1762 * lpfc_sli4_nvmet_xri_aborted - Fast-path process of nvmet xri abort
   1763 * @phba: pointer to lpfc hba data structure.
   1764 * @axri: pointer to the nvmet xri abort wcqe structure.
   1765 *
   1766 * This routine is invoked by the worker thread to process a SLI4 fast-path
   1767 * NVMET aborted xri.
   1768 **/
   1769void
   1770lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba *phba,
   1771			    struct sli4_wcqe_xri_aborted *axri)
   1772{
   1773#if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
   1774	uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
   1775	uint16_t rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri);
   1776	struct lpfc_async_xchg_ctx *ctxp, *next_ctxp;
   1777	struct lpfc_nvmet_tgtport *tgtp;
   1778	struct nvmefc_tgt_fcp_req *req = NULL;
   1779	struct lpfc_nodelist *ndlp;
   1780	unsigned long iflag = 0;
   1781	int rrq_empty = 0;
   1782	bool released = false;
   1783
   1784	lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
   1785			"6317 XB aborted xri x%x rxid x%x\n", xri, rxid);
   1786
   1787	if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME))
   1788		return;
   1789
   1790	if (phba->targetport) {
   1791		tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
   1792		atomic_inc(&tgtp->xmt_fcp_xri_abort_cqe);
   1793	}
   1794
   1795	spin_lock_irqsave(&phba->sli4_hba.abts_nvmet_buf_list_lock, iflag);
   1796	list_for_each_entry_safe(ctxp, next_ctxp,
   1797				 &phba->sli4_hba.lpfc_abts_nvmet_ctx_list,
   1798				 list) {
   1799		if (ctxp->ctxbuf->sglq->sli4_xritag != xri)
   1800			continue;
   1801
   1802		spin_unlock_irqrestore(&phba->sli4_hba.abts_nvmet_buf_list_lock,
   1803				       iflag);
   1804
   1805		spin_lock_irqsave(&ctxp->ctxlock, iflag);
   1806		/* Check if we already received a free context call
   1807		 * and we have completed processing an abort situation.
   1808		 */
   1809		if (ctxp->flag & LPFC_NVME_CTX_RLS &&
   1810		    !(ctxp->flag & LPFC_NVME_ABORT_OP)) {
   1811			spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
   1812			list_del_init(&ctxp->list);
   1813			spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
   1814			released = true;
   1815		}
   1816		ctxp->flag &= ~LPFC_NVME_XBUSY;
   1817		spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
   1818
   1819		rrq_empty = list_empty(&phba->active_rrq_list);
   1820		ndlp = lpfc_findnode_did(phba->pport, ctxp->sid);
   1821		if (ndlp &&
   1822		    (ndlp->nlp_state == NLP_STE_UNMAPPED_NODE ||
   1823		     ndlp->nlp_state == NLP_STE_MAPPED_NODE)) {
   1824			lpfc_set_rrq_active(phba, ndlp,
   1825				ctxp->ctxbuf->sglq->sli4_lxritag,
   1826				rxid, 1);
   1827			lpfc_sli4_abts_err_handler(phba, ndlp, axri);
   1828		}
   1829
   1830		lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
   1831				"6318 XB aborted oxid x%x flg x%x (%x)\n",
   1832				ctxp->oxid, ctxp->flag, released);
   1833		if (released)
   1834			lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
   1835
   1836		if (rrq_empty)
   1837			lpfc_worker_wake_up(phba);
   1838		return;
   1839	}
   1840	spin_unlock_irqrestore(&phba->sli4_hba.abts_nvmet_buf_list_lock, iflag);
   1841	ctxp = lpfc_nvmet_get_ctx_for_xri(phba, xri);
   1842	if (ctxp) {
   1843		/*
   1844		 *  Abort already done by FW, so BA_ACC sent.
   1845		 *  However, the transport may be unaware.
   1846		 */
   1847		lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
   1848				"6323 NVMET Rcv ABTS xri x%x ctxp state x%x "
   1849				"flag x%x oxid x%x rxid x%x\n",
   1850				xri, ctxp->state, ctxp->flag, ctxp->oxid,
   1851				rxid);
   1852
   1853		spin_lock_irqsave(&ctxp->ctxlock, iflag);
   1854		ctxp->flag |= LPFC_NVME_ABTS_RCV;
   1855		ctxp->state = LPFC_NVME_STE_ABORT;
   1856		spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
   1857
   1858		lpfc_nvmeio_data(phba,
   1859				 "NVMET ABTS RCV: xri x%x CPU %02x rjt %d\n",
   1860				 xri, raw_smp_processor_id(), 0);
   1861
   1862		req = &ctxp->hdlrctx.fcp_req;
   1863		if (req)
   1864			nvmet_fc_rcv_fcp_abort(phba->targetport, req);
   1865	}
   1866#endif
   1867}
   1868
   1869int
   1870lpfc_nvmet_rcv_unsol_abort(struct lpfc_vport *vport,
   1871			   struct fc_frame_header *fc_hdr)
   1872{
   1873#if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
   1874	struct lpfc_hba *phba = vport->phba;
   1875	struct lpfc_async_xchg_ctx *ctxp, *next_ctxp;
   1876	struct nvmefc_tgt_fcp_req *rsp;
   1877	uint32_t sid;
   1878	uint16_t oxid, xri;
   1879	unsigned long iflag = 0;
   1880
   1881	sid = sli4_sid_from_fc_hdr(fc_hdr);
   1882	oxid = be16_to_cpu(fc_hdr->fh_ox_id);
   1883
   1884	spin_lock_irqsave(&phba->sli4_hba.abts_nvmet_buf_list_lock, iflag);
   1885	list_for_each_entry_safe(ctxp, next_ctxp,
   1886				 &phba->sli4_hba.lpfc_abts_nvmet_ctx_list,
   1887				 list) {
   1888		if (ctxp->oxid != oxid || ctxp->sid != sid)
   1889			continue;
   1890
   1891		xri = ctxp->ctxbuf->sglq->sli4_xritag;
   1892
   1893		spin_unlock_irqrestore(&phba->sli4_hba.abts_nvmet_buf_list_lock,
   1894				       iflag);
   1895		spin_lock_irqsave(&ctxp->ctxlock, iflag);
   1896		ctxp->flag |= LPFC_NVME_ABTS_RCV;
   1897		spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
   1898
   1899		lpfc_nvmeio_data(phba,
   1900			"NVMET ABTS RCV: xri x%x CPU %02x rjt %d\n",
   1901			xri, raw_smp_processor_id(), 0);
   1902
   1903		lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
   1904				"6319 NVMET Rcv ABTS:acc xri x%x\n", xri);
   1905
   1906		rsp = &ctxp->hdlrctx.fcp_req;
   1907		nvmet_fc_rcv_fcp_abort(phba->targetport, rsp);
   1908
   1909		/* Respond with BA_ACC accordingly */
   1910		lpfc_sli4_seq_abort_rsp(vport, fc_hdr, 1);
   1911		return 0;
   1912	}
   1913	spin_unlock_irqrestore(&phba->sli4_hba.abts_nvmet_buf_list_lock, iflag);
   1914	/* check the wait list */
   1915	if (phba->sli4_hba.nvmet_io_wait_cnt) {
   1916		struct rqb_dmabuf *nvmebuf;
   1917		struct fc_frame_header *fc_hdr_tmp;
   1918		u32 sid_tmp;
   1919		u16 oxid_tmp;
   1920		bool found = false;
   1921
   1922		spin_lock_irqsave(&phba->sli4_hba.nvmet_io_wait_lock, iflag);
   1923
   1924		/* match by oxid and s_id */
   1925		list_for_each_entry(nvmebuf,
   1926				    &phba->sli4_hba.lpfc_nvmet_io_wait_list,
   1927				    hbuf.list) {
   1928			fc_hdr_tmp = (struct fc_frame_header *)
   1929					(nvmebuf->hbuf.virt);
   1930			oxid_tmp = be16_to_cpu(fc_hdr_tmp->fh_ox_id);
   1931			sid_tmp = sli4_sid_from_fc_hdr(fc_hdr_tmp);
   1932			if (oxid_tmp != oxid || sid_tmp != sid)
   1933				continue;
   1934
   1935			lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
   1936					"6321 NVMET Rcv ABTS oxid x%x from x%x "
   1937					"is waiting for a ctxp\n",
   1938					oxid, sid);
   1939
   1940			list_del_init(&nvmebuf->hbuf.list);
   1941			phba->sli4_hba.nvmet_io_wait_cnt--;
   1942			found = true;
   1943			break;
   1944		}
   1945		spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_wait_lock,
   1946				       iflag);
   1947
   1948		/* free buffer since already posted a new DMA buffer to RQ */
   1949		if (found) {
   1950			nvmebuf->hrq->rqbp->rqb_free_buffer(phba, nvmebuf);
   1951			/* Respond with BA_ACC accordingly */
   1952			lpfc_sli4_seq_abort_rsp(vport, fc_hdr, 1);
   1953			return 0;
   1954		}
   1955	}
   1956
   1957	/* check active list */
   1958	ctxp = lpfc_nvmet_get_ctx_for_oxid(phba, oxid, sid);
   1959	if (ctxp) {
   1960		xri = ctxp->ctxbuf->sglq->sli4_xritag;
   1961
   1962		spin_lock_irqsave(&ctxp->ctxlock, iflag);
   1963		ctxp->flag |= (LPFC_NVME_ABTS_RCV | LPFC_NVME_ABORT_OP);
   1964		spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
   1965
   1966		lpfc_nvmeio_data(phba,
   1967				 "NVMET ABTS RCV: xri x%x CPU %02x rjt %d\n",
   1968				 xri, raw_smp_processor_id(), 0);
   1969
   1970		lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
   1971				"6322 NVMET Rcv ABTS:acc oxid x%x xri x%x "
   1972				"flag x%x state x%x\n",
   1973				ctxp->oxid, xri, ctxp->flag, ctxp->state);
   1974
   1975		if (ctxp->flag & LPFC_NVME_TNOTIFY) {
   1976			/* Notify the transport */
   1977			nvmet_fc_rcv_fcp_abort(phba->targetport,
   1978					       &ctxp->hdlrctx.fcp_req);
   1979		} else {
   1980			cancel_work_sync(&ctxp->ctxbuf->defer_work);
   1981			spin_lock_irqsave(&ctxp->ctxlock, iflag);
   1982			lpfc_nvmet_defer_release(phba, ctxp);
   1983			spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
   1984		}
   1985		lpfc_nvmet_sol_fcp_issue_abort(phba, ctxp, ctxp->sid,
   1986					       ctxp->oxid);
   1987
   1988		lpfc_sli4_seq_abort_rsp(vport, fc_hdr, 1);
   1989		return 0;
   1990	}
   1991
   1992	lpfc_nvmeio_data(phba, "NVMET ABTS RCV: oxid x%x CPU %02x rjt %d\n",
   1993			 oxid, raw_smp_processor_id(), 1);
   1994
   1995	lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
   1996			"6320 NVMET Rcv ABTS:rjt oxid x%x\n", oxid);
   1997
   1998	/* Respond with BA_RJT accordingly */
   1999	lpfc_sli4_seq_abort_rsp(vport, fc_hdr, 0);
   2000#endif
   2001	return 0;
   2002}
   2003
   2004static void
   2005lpfc_nvmet_wqfull_flush(struct lpfc_hba *phba, struct lpfc_queue *wq,
   2006			struct lpfc_async_xchg_ctx *ctxp)
   2007{
   2008	struct lpfc_sli_ring *pring;
   2009	struct lpfc_iocbq *nvmewqeq;
   2010	struct lpfc_iocbq *next_nvmewqeq;
   2011	unsigned long iflags;
   2012	struct lpfc_wcqe_complete wcqe;
   2013	struct lpfc_wcqe_complete *wcqep;
   2014
   2015	pring = wq->pring;
   2016	wcqep = &wcqe;
   2017
   2018	/* Fake an ABORT error code back to cmpl routine */
   2019	memset(wcqep, 0, sizeof(struct lpfc_wcqe_complete));
   2020	bf_set(lpfc_wcqe_c_status, wcqep, IOSTAT_LOCAL_REJECT);
   2021	wcqep->parameter = IOERR_ABORT_REQUESTED;
   2022
   2023	spin_lock_irqsave(&pring->ring_lock, iflags);
   2024	list_for_each_entry_safe(nvmewqeq, next_nvmewqeq,
   2025				 &wq->wqfull_list, list) {
   2026		if (ctxp) {
   2027			/* Checking for a specific IO to flush */
   2028			if (nvmewqeq->context_un.axchg == ctxp) {
   2029				list_del(&nvmewqeq->list);
   2030				spin_unlock_irqrestore(&pring->ring_lock,
   2031						       iflags);
   2032				memcpy(&nvmewqeq->wcqe_cmpl, wcqep,
   2033				       sizeof(*wcqep));
   2034				lpfc_nvmet_xmt_fcp_op_cmp(phba, nvmewqeq,
   2035							  nvmewqeq);
   2036				return;
   2037			}
   2038			continue;
   2039		} else {
   2040			/* Flush all IOs */
   2041			list_del(&nvmewqeq->list);
   2042			spin_unlock_irqrestore(&pring->ring_lock, iflags);
   2043			memcpy(&nvmewqeq->wcqe_cmpl, wcqep, sizeof(*wcqep));
   2044			lpfc_nvmet_xmt_fcp_op_cmp(phba, nvmewqeq, nvmewqeq);
   2045			spin_lock_irqsave(&pring->ring_lock, iflags);
   2046		}
   2047	}
   2048	if (!ctxp)
   2049		wq->q_flag &= ~HBA_NVMET_WQFULL;
   2050	spin_unlock_irqrestore(&pring->ring_lock, iflags);
   2051}
   2052
   2053void
   2054lpfc_nvmet_wqfull_process(struct lpfc_hba *phba,
   2055			  struct lpfc_queue *wq)
   2056{
   2057#if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
   2058	struct lpfc_sli_ring *pring;
   2059	struct lpfc_iocbq *nvmewqeq;
   2060	struct lpfc_async_xchg_ctx *ctxp;
   2061	unsigned long iflags;
   2062	int rc;
   2063
   2064	/*
   2065	 * Some WQE slots are available, so try to re-issue anything
   2066	 * on the WQ wqfull_list.
   2067	 */
   2068	pring = wq->pring;
   2069	spin_lock_irqsave(&pring->ring_lock, iflags);
   2070	while (!list_empty(&wq->wqfull_list)) {
   2071		list_remove_head(&wq->wqfull_list, nvmewqeq, struct lpfc_iocbq,
   2072				 list);
   2073		spin_unlock_irqrestore(&pring->ring_lock, iflags);
   2074		ctxp = nvmewqeq->context_un.axchg;
   2075		rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, nvmewqeq);
   2076		spin_lock_irqsave(&pring->ring_lock, iflags);
   2077		if (rc == -EBUSY) {
   2078			/* WQ was full again, so put it back on the list */
   2079			list_add(&nvmewqeq->list, &wq->wqfull_list);
   2080			spin_unlock_irqrestore(&pring->ring_lock, iflags);
   2081			return;
   2082		}
   2083		if (rc == WQE_SUCCESS) {
   2084#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
   2085			if (ctxp->ts_cmd_nvme) {
   2086				if (ctxp->hdlrctx.fcp_req.op == NVMET_FCOP_RSP)
   2087					ctxp->ts_status_wqput = ktime_get_ns();
   2088				else
   2089					ctxp->ts_data_wqput = ktime_get_ns();
   2090			}
   2091#endif
   2092		} else {
   2093			WARN_ON(rc);
   2094		}
   2095	}
   2096	wq->q_flag &= ~HBA_NVMET_WQFULL;
   2097	spin_unlock_irqrestore(&pring->ring_lock, iflags);
   2098
   2099#endif
   2100}
   2101
   2102void
   2103lpfc_nvmet_destroy_targetport(struct lpfc_hba *phba)
   2104{
   2105#if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
   2106	struct lpfc_nvmet_tgtport *tgtp;
   2107	struct lpfc_queue *wq;
   2108	uint32_t qidx;
   2109	DECLARE_COMPLETION_ONSTACK(tport_unreg_cmp);
   2110
   2111	if (phba->nvmet_support == 0)
   2112		return;
   2113	if (phba->targetport) {
   2114		tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
   2115		for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
   2116			wq = phba->sli4_hba.hdwq[qidx].io_wq;
   2117			lpfc_nvmet_wqfull_flush(phba, wq, NULL);
   2118		}
   2119		tgtp->tport_unreg_cmp = &tport_unreg_cmp;
   2120		nvmet_fc_unregister_targetport(phba->targetport);
   2121		if (!wait_for_completion_timeout(&tport_unreg_cmp,
   2122					msecs_to_jiffies(LPFC_NVMET_WAIT_TMO)))
   2123			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
   2124					"6179 Unreg targetport x%px timeout "
   2125					"reached.\n", phba->targetport);
   2126		lpfc_nvmet_cleanup_io_context(phba);
   2127	}
   2128	phba->targetport = NULL;
   2129#endif
   2130}
   2131
   2132/**
   2133 * lpfc_nvmet_handle_lsreq - Process an NVME LS request
   2134 * @phba: pointer to lpfc hba data structure.
   2135 * @axchg: pointer to exchange context for the NVME LS request
   2136 *
   2137 * This routine is used for processing an asychronously received NVME LS
   2138 * request. Any remaining validation is done and the LS is then forwarded
   2139 * to the nvmet-fc transport via nvmet_fc_rcv_ls_req().
   2140 *
   2141 * The calling sequence should be: nvmet_fc_rcv_ls_req() -> (processing)
   2142 * -> lpfc_nvmet_xmt_ls_rsp/cmp -> req->done.
   2143 * lpfc_nvme_xmt_ls_rsp_cmp should free the allocated axchg.
   2144 *
   2145 * Returns 0 if LS was handled and delivered to the transport
   2146 * Returns 1 if LS failed to be handled and should be dropped
   2147 */
   2148int
   2149lpfc_nvmet_handle_lsreq(struct lpfc_hba *phba,
   2150			struct lpfc_async_xchg_ctx *axchg)
   2151{
   2152#if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
   2153	struct lpfc_nvmet_tgtport *tgtp = phba->targetport->private;
   2154	uint32_t *payload = axchg->payload;
   2155	int rc;
   2156
   2157	atomic_inc(&tgtp->rcv_ls_req_in);
   2158
   2159	/*
   2160	 * Driver passes the ndlp as the hosthandle argument allowing
   2161	 * the transport to generate LS requests for any associateions
   2162	 * that are created.
   2163	 */
   2164	rc = nvmet_fc_rcv_ls_req(phba->targetport, axchg->ndlp, &axchg->ls_rsp,
   2165				 axchg->payload, axchg->size);
   2166
   2167	lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
   2168			"6037 NVMET Unsol rcv: sz %d rc %d: %08x %08x %08x "
   2169			"%08x %08x %08x\n", axchg->size, rc,
   2170			*payload, *(payload+1), *(payload+2),
   2171			*(payload+3), *(payload+4), *(payload+5));
   2172
   2173	if (!rc) {
   2174		atomic_inc(&tgtp->rcv_ls_req_out);
   2175		return 0;
   2176	}
   2177
   2178	atomic_inc(&tgtp->rcv_ls_req_drop);
   2179#endif
   2180	return 1;
   2181}
   2182
   2183static void
   2184lpfc_nvmet_process_rcv_fcp_req(struct lpfc_nvmet_ctxbuf *ctx_buf)
   2185{
   2186#if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
   2187	struct lpfc_async_xchg_ctx *ctxp = ctx_buf->context;
   2188	struct lpfc_hba *phba = ctxp->phba;
   2189	struct rqb_dmabuf *nvmebuf = ctxp->rqb_buffer;
   2190	struct lpfc_nvmet_tgtport *tgtp;
   2191	uint32_t *payload, qno;
   2192	uint32_t rc;
   2193	unsigned long iflags;
   2194
   2195	if (!nvmebuf) {
   2196		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
   2197			"6159 process_rcv_fcp_req, nvmebuf is NULL, "
   2198			"oxid: x%x flg: x%x state: x%x\n",
   2199			ctxp->oxid, ctxp->flag, ctxp->state);
   2200		spin_lock_irqsave(&ctxp->ctxlock, iflags);
   2201		lpfc_nvmet_defer_release(phba, ctxp);
   2202		spin_unlock_irqrestore(&ctxp->ctxlock, iflags);
   2203		lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid,
   2204						 ctxp->oxid);
   2205		return;
   2206	}
   2207
   2208	if (ctxp->flag & LPFC_NVME_ABTS_RCV) {
   2209		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
   2210				"6324 IO oxid x%x aborted\n",
   2211				ctxp->oxid);
   2212		return;
   2213	}
   2214
   2215	payload = (uint32_t *)(nvmebuf->dbuf.virt);
   2216	tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
   2217	ctxp->flag |= LPFC_NVME_TNOTIFY;
   2218#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
   2219	if (ctxp->ts_isr_cmd)
   2220		ctxp->ts_cmd_nvme = ktime_get_ns();
   2221#endif
   2222	/*
   2223	 * The calling sequence should be:
   2224	 * nvmet_fc_rcv_fcp_req->lpfc_nvmet_xmt_fcp_op/cmp- req->done
   2225	 * lpfc_nvmet_xmt_fcp_op_cmp should free the allocated ctxp.
   2226	 * When we return from nvmet_fc_rcv_fcp_req, all relevant info
   2227	 * the NVME command / FC header is stored.
   2228	 * A buffer has already been reposted for this IO, so just free
   2229	 * the nvmebuf.
   2230	 */
   2231	rc = nvmet_fc_rcv_fcp_req(phba->targetport, &ctxp->hdlrctx.fcp_req,
   2232				  payload, ctxp->size);
   2233	/* Process FCP command */
   2234	if (rc == 0) {
   2235		atomic_inc(&tgtp->rcv_fcp_cmd_out);
   2236		spin_lock_irqsave(&ctxp->ctxlock, iflags);
   2237		if ((ctxp->flag & LPFC_NVME_CTX_REUSE_WQ) ||
   2238		    (nvmebuf != ctxp->rqb_buffer)) {
   2239			spin_unlock_irqrestore(&ctxp->ctxlock, iflags);
   2240			return;
   2241		}
   2242		ctxp->rqb_buffer = NULL;
   2243		spin_unlock_irqrestore(&ctxp->ctxlock, iflags);
   2244		lpfc_rq_buf_free(phba, &nvmebuf->hbuf); /* repost */
   2245		return;
   2246	}
   2247
   2248	/* Processing of FCP command is deferred */
   2249	if (rc == -EOVERFLOW) {
   2250		lpfc_nvmeio_data(phba, "NVMET RCV BUSY: xri x%x sz %d "
   2251				 "from %06x\n",
   2252				 ctxp->oxid, ctxp->size, ctxp->sid);
   2253		atomic_inc(&tgtp->rcv_fcp_cmd_out);
   2254		atomic_inc(&tgtp->defer_fod);
   2255		spin_lock_irqsave(&ctxp->ctxlock, iflags);
   2256		if (ctxp->flag & LPFC_NVME_CTX_REUSE_WQ) {
   2257			spin_unlock_irqrestore(&ctxp->ctxlock, iflags);
   2258			return;
   2259		}
   2260		spin_unlock_irqrestore(&ctxp->ctxlock, iflags);
   2261		/*
   2262		 * Post a replacement DMA buffer to RQ and defer
   2263		 * freeing rcv buffer till .defer_rcv callback
   2264		 */
   2265		qno = nvmebuf->idx;
   2266		lpfc_post_rq_buffer(
   2267			phba, phba->sli4_hba.nvmet_mrq_hdr[qno],
   2268			phba->sli4_hba.nvmet_mrq_data[qno], 1, qno);
   2269		return;
   2270	}
   2271	ctxp->flag &= ~LPFC_NVME_TNOTIFY;
   2272	atomic_inc(&tgtp->rcv_fcp_cmd_drop);
   2273	lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
   2274			"2582 FCP Drop IO x%x: err x%x: x%x x%x x%x\n",
   2275			ctxp->oxid, rc,
   2276			atomic_read(&tgtp->rcv_fcp_cmd_in),
   2277			atomic_read(&tgtp->rcv_fcp_cmd_out),
   2278			atomic_read(&tgtp->xmt_fcp_release));
   2279	lpfc_nvmeio_data(phba, "NVMET FCP DROP: xri x%x sz %d from %06x\n",
   2280			 ctxp->oxid, ctxp->size, ctxp->sid);
   2281	spin_lock_irqsave(&ctxp->ctxlock, iflags);
   2282	lpfc_nvmet_defer_release(phba, ctxp);
   2283	spin_unlock_irqrestore(&ctxp->ctxlock, iflags);
   2284	lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid, ctxp->oxid);
   2285#endif
   2286}
   2287
   2288static void
   2289lpfc_nvmet_fcp_rqst_defer_work(struct work_struct *work)
   2290{
   2291#if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
   2292	struct lpfc_nvmet_ctxbuf *ctx_buf =
   2293		container_of(work, struct lpfc_nvmet_ctxbuf, defer_work);
   2294
   2295	lpfc_nvmet_process_rcv_fcp_req(ctx_buf);
   2296#endif
   2297}
   2298
   2299static struct lpfc_nvmet_ctxbuf *
   2300lpfc_nvmet_replenish_context(struct lpfc_hba *phba,
   2301			     struct lpfc_nvmet_ctx_info *current_infop)
   2302{
   2303#if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
   2304	struct lpfc_nvmet_ctxbuf *ctx_buf = NULL;
   2305	struct lpfc_nvmet_ctx_info *get_infop;
   2306	int i;
   2307
   2308	/*
   2309	 * The current_infop for the MRQ a NVME command IU was received
   2310	 * on is empty. Our goal is to replenish this MRQs context
   2311	 * list from a another CPUs.
   2312	 *
   2313	 * First we need to pick a context list to start looking on.
   2314	 * nvmet_ctx_start_cpu has available context the last time
   2315	 * we needed to replenish this CPU where nvmet_ctx_next_cpu
   2316	 * is just the next sequential CPU for this MRQ.
   2317	 */
   2318	if (current_infop->nvmet_ctx_start_cpu)
   2319		get_infop = current_infop->nvmet_ctx_start_cpu;
   2320	else
   2321		get_infop = current_infop->nvmet_ctx_next_cpu;
   2322
   2323	for (i = 0; i < phba->sli4_hba.num_possible_cpu; i++) {
   2324		if (get_infop == current_infop) {
   2325			get_infop = get_infop->nvmet_ctx_next_cpu;
   2326			continue;
   2327		}
   2328		spin_lock(&get_infop->nvmet_ctx_list_lock);
   2329
   2330		/* Just take the entire context list, if there are any */
   2331		if (get_infop->nvmet_ctx_list_cnt) {
   2332			list_splice_init(&get_infop->nvmet_ctx_list,
   2333				    &current_infop->nvmet_ctx_list);
   2334			current_infop->nvmet_ctx_list_cnt =
   2335				get_infop->nvmet_ctx_list_cnt - 1;
   2336			get_infop->nvmet_ctx_list_cnt = 0;
   2337			spin_unlock(&get_infop->nvmet_ctx_list_lock);
   2338
   2339			current_infop->nvmet_ctx_start_cpu = get_infop;
   2340			list_remove_head(&current_infop->nvmet_ctx_list,
   2341					 ctx_buf, struct lpfc_nvmet_ctxbuf,
   2342					 list);
   2343			return ctx_buf;
   2344		}
   2345
   2346		/* Otherwise, move on to the next CPU for this MRQ */
   2347		spin_unlock(&get_infop->nvmet_ctx_list_lock);
   2348		get_infop = get_infop->nvmet_ctx_next_cpu;
   2349	}
   2350
   2351#endif
   2352	/* Nothing found, all contexts for the MRQ are in-flight */
   2353	return NULL;
   2354}
   2355
   2356/**
   2357 * lpfc_nvmet_unsol_fcp_buffer - Process an unsolicited event data buffer
   2358 * @phba: pointer to lpfc hba data structure.
   2359 * @idx: relative index of MRQ vector
   2360 * @nvmebuf: pointer to lpfc nvme command HBQ data structure.
   2361 * @isr_timestamp: in jiffies.
   2362 * @cqflag: cq processing information regarding workload.
   2363 *
   2364 * This routine is used for processing the WQE associated with a unsolicited
   2365 * event. It first determines whether there is an existing ndlp that matches
   2366 * the DID from the unsolicited WQE. If not, it will create a new one with
   2367 * the DID from the unsolicited WQE. The ELS command from the unsolicited
   2368 * WQE is then used to invoke the proper routine and to set up proper state
   2369 * of the discovery state machine.
   2370 **/
   2371static void
   2372lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
   2373			    uint32_t idx,
   2374			    struct rqb_dmabuf *nvmebuf,
   2375			    uint64_t isr_timestamp,
   2376			    uint8_t cqflag)
   2377{
   2378	struct lpfc_async_xchg_ctx *ctxp;
   2379	struct lpfc_nvmet_tgtport *tgtp;
   2380	struct fc_frame_header *fc_hdr;
   2381	struct lpfc_nvmet_ctxbuf *ctx_buf;
   2382	struct lpfc_nvmet_ctx_info *current_infop;
   2383	uint32_t size, oxid, sid, qno;
   2384	unsigned long iflag;
   2385	int current_cpu;
   2386
   2387	if (!IS_ENABLED(CONFIG_NVME_TARGET_FC))
   2388		return;
   2389
   2390	ctx_buf = NULL;
   2391	if (!nvmebuf || !phba->targetport) {
   2392		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
   2393				"6157 NVMET FCP Drop IO\n");
   2394		if (nvmebuf)
   2395			lpfc_rq_buf_free(phba, &nvmebuf->hbuf);
   2396		return;
   2397	}
   2398
   2399	/*
   2400	 * Get a pointer to the context list for this MRQ based on
   2401	 * the CPU this MRQ IRQ is associated with. If the CPU association
   2402	 * changes from our initial assumption, the context list could
   2403	 * be empty, thus it would need to be replenished with the
   2404	 * context list from another CPU for this MRQ.
   2405	 */
   2406	current_cpu = raw_smp_processor_id();
   2407	current_infop = lpfc_get_ctx_list(phba, current_cpu, idx);
   2408	spin_lock_irqsave(&current_infop->nvmet_ctx_list_lock, iflag);
   2409	if (current_infop->nvmet_ctx_list_cnt) {
   2410		list_remove_head(&current_infop->nvmet_ctx_list,
   2411				 ctx_buf, struct lpfc_nvmet_ctxbuf, list);
   2412		current_infop->nvmet_ctx_list_cnt--;
   2413	} else {
   2414		ctx_buf = lpfc_nvmet_replenish_context(phba, current_infop);
   2415	}
   2416	spin_unlock_irqrestore(&current_infop->nvmet_ctx_list_lock, iflag);
   2417
   2418	fc_hdr = (struct fc_frame_header *)(nvmebuf->hbuf.virt);
   2419	oxid = be16_to_cpu(fc_hdr->fh_ox_id);
   2420	size = nvmebuf->bytes_recv;
   2421
   2422#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
   2423	if (phba->hdwqstat_on & LPFC_CHECK_NVMET_IO) {
   2424		this_cpu_inc(phba->sli4_hba.c_stat->rcv_io);
   2425		if (idx != current_cpu)
   2426			lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
   2427					"6703 CPU Check rcv: "
   2428					"cpu %d expect %d\n",
   2429					current_cpu, idx);
   2430	}
   2431#endif
   2432
   2433	lpfc_nvmeio_data(phba, "NVMET FCP  RCV: xri x%x sz %d CPU %02x\n",
   2434			 oxid, size, raw_smp_processor_id());
   2435
   2436	tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
   2437
   2438	if (!ctx_buf) {
   2439		/* Queue this NVME IO to process later */
   2440		spin_lock_irqsave(&phba->sli4_hba.nvmet_io_wait_lock, iflag);
   2441		list_add_tail(&nvmebuf->hbuf.list,
   2442			      &phba->sli4_hba.lpfc_nvmet_io_wait_list);
   2443		phba->sli4_hba.nvmet_io_wait_cnt++;
   2444		phba->sli4_hba.nvmet_io_wait_total++;
   2445		spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_wait_lock,
   2446				       iflag);
   2447
   2448		/* Post a brand new DMA buffer to RQ */
   2449		qno = nvmebuf->idx;
   2450		lpfc_post_rq_buffer(
   2451			phba, phba->sli4_hba.nvmet_mrq_hdr[qno],
   2452			phba->sli4_hba.nvmet_mrq_data[qno], 1, qno);
   2453
   2454		atomic_inc(&tgtp->defer_ctx);
   2455		return;
   2456	}
   2457
   2458	sid = sli4_sid_from_fc_hdr(fc_hdr);
   2459
   2460	ctxp = (struct lpfc_async_xchg_ctx *)ctx_buf->context;
   2461	spin_lock_irqsave(&phba->sli4_hba.t_active_list_lock, iflag);
   2462	list_add_tail(&ctxp->list, &phba->sli4_hba.t_active_ctx_list);
   2463	spin_unlock_irqrestore(&phba->sli4_hba.t_active_list_lock, iflag);
   2464	if (ctxp->state != LPFC_NVME_STE_FREE) {
   2465		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
   2466				"6414 NVMET Context corrupt %d %d oxid x%x\n",
   2467				ctxp->state, ctxp->entry_cnt, ctxp->oxid);
   2468	}
   2469	ctxp->wqeq = NULL;
   2470	ctxp->offset = 0;
   2471	ctxp->phba = phba;
   2472	ctxp->size = size;
   2473	ctxp->oxid = oxid;
   2474	ctxp->sid = sid;
   2475	ctxp->idx = idx;
   2476	ctxp->state = LPFC_NVME_STE_RCV;
   2477	ctxp->entry_cnt = 1;
   2478	ctxp->flag = 0;
   2479	ctxp->ctxbuf = ctx_buf;
   2480	ctxp->rqb_buffer = (void *)nvmebuf;
   2481	ctxp->hdwq = NULL;
   2482	spin_lock_init(&ctxp->ctxlock);
   2483
   2484#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
   2485	if (isr_timestamp)
   2486		ctxp->ts_isr_cmd = isr_timestamp;
   2487	ctxp->ts_cmd_nvme = 0;
   2488	ctxp->ts_nvme_data = 0;
   2489	ctxp->ts_data_wqput = 0;
   2490	ctxp->ts_isr_data = 0;
   2491	ctxp->ts_data_nvme = 0;
   2492	ctxp->ts_nvme_status = 0;
   2493	ctxp->ts_status_wqput = 0;
   2494	ctxp->ts_isr_status = 0;
   2495	ctxp->ts_status_nvme = 0;
   2496#endif
   2497
   2498	atomic_inc(&tgtp->rcv_fcp_cmd_in);
   2499	/* check for cq processing load */
   2500	if (!cqflag) {
   2501		lpfc_nvmet_process_rcv_fcp_req(ctx_buf);
   2502		return;
   2503	}
   2504
   2505	if (!queue_work(phba->wq, &ctx_buf->defer_work)) {
   2506		atomic_inc(&tgtp->rcv_fcp_cmd_drop);
   2507		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
   2508				"6325 Unable to queue work for oxid x%x. "
   2509				"FCP Drop IO [x%x x%x x%x]\n",
   2510				ctxp->oxid,
   2511				atomic_read(&tgtp->rcv_fcp_cmd_in),
   2512				atomic_read(&tgtp->rcv_fcp_cmd_out),
   2513				atomic_read(&tgtp->xmt_fcp_release));
   2514
   2515		spin_lock_irqsave(&ctxp->ctxlock, iflag);
   2516		lpfc_nvmet_defer_release(phba, ctxp);
   2517		spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
   2518		lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, sid, oxid);
   2519	}
   2520}
   2521
   2522/**
   2523 * lpfc_nvmet_unsol_fcp_event - Process an unsolicited event from an nvme nport
   2524 * @phba: pointer to lpfc hba data structure.
   2525 * @idx: relative index of MRQ vector
   2526 * @nvmebuf: pointer to received nvme data structure.
   2527 * @isr_timestamp: in jiffies.
   2528 * @cqflag: cq processing information regarding workload.
   2529 *
   2530 * This routine is used to process an unsolicited event received from a SLI
   2531 * (Service Level Interface) ring. The actual processing of the data buffer
   2532 * associated with the unsolicited event is done by invoking the routine
   2533 * lpfc_nvmet_unsol_fcp_buffer() after properly set up the buffer from the
   2534 * SLI RQ on which the unsolicited event was received.
   2535 **/
   2536void
   2537lpfc_nvmet_unsol_fcp_event(struct lpfc_hba *phba,
   2538			   uint32_t idx,
   2539			   struct rqb_dmabuf *nvmebuf,
   2540			   uint64_t isr_timestamp,
   2541			   uint8_t cqflag)
   2542{
   2543	if (!nvmebuf) {
   2544		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
   2545				"3167 NVMET FCP Drop IO\n");
   2546		return;
   2547	}
   2548	if (phba->nvmet_support == 0) {
   2549		lpfc_rq_buf_free(phba, &nvmebuf->hbuf);
   2550		return;
   2551	}
   2552	lpfc_nvmet_unsol_fcp_buffer(phba, idx, nvmebuf, isr_timestamp, cqflag);
   2553}
   2554
   2555/**
   2556 * lpfc_nvmet_prep_ls_wqe - Allocate and prepare a lpfc wqe data structure
   2557 * @phba: pointer to a host N_Port data structure.
   2558 * @ctxp: Context info for NVME LS Request
   2559 * @rspbuf: DMA buffer of NVME command.
   2560 * @rspsize: size of the NVME command.
   2561 *
   2562 * This routine is used for allocating a lpfc-WQE data structure from
   2563 * the driver lpfc-WQE free-list and prepare the WQE with the parameters
   2564 * passed into the routine for discovery state machine to issue an Extended
   2565 * Link Service (NVME) commands. It is a generic lpfc-WQE allocation
   2566 * and preparation routine that is used by all the discovery state machine
   2567 * routines and the NVME command-specific fields will be later set up by
   2568 * the individual discovery machine routines after calling this routine
   2569 * allocating and preparing a generic WQE data structure. It fills in the
   2570 * Buffer Descriptor Entries (BDEs), allocates buffers for both command
   2571 * payload and response payload (if expected). The reference count on the
   2572 * ndlp is incremented by 1 and the reference to the ndlp is put into
   2573 * context1 of the WQE data structure for this WQE to hold the ndlp
   2574 * reference for the command's callback function to access later.
   2575 *
   2576 * Return code
   2577 *   Pointer to the newly allocated/prepared nvme wqe data structure
   2578 *   NULL - when nvme wqe data structure allocation/preparation failed
   2579 **/
   2580static struct lpfc_iocbq *
   2581lpfc_nvmet_prep_ls_wqe(struct lpfc_hba *phba,
   2582		       struct lpfc_async_xchg_ctx *ctxp,
   2583		       dma_addr_t rspbuf, uint16_t rspsize)
   2584{
   2585	struct lpfc_nodelist *ndlp;
   2586	struct lpfc_iocbq *nvmewqe;
   2587	union lpfc_wqe128 *wqe;
   2588
   2589	if (!lpfc_is_link_up(phba)) {
   2590		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
   2591				"6104 NVMET prep LS wqe: link err: "
   2592				"NPORT x%x oxid:x%x ste %d\n",
   2593				ctxp->sid, ctxp->oxid, ctxp->state);
   2594		return NULL;
   2595	}
   2596
   2597	/* Allocate buffer for  command wqe */
   2598	nvmewqe = lpfc_sli_get_iocbq(phba);
   2599	if (nvmewqe == NULL) {
   2600		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
   2601				"6105 NVMET prep LS wqe: No WQE: "
   2602				"NPORT x%x oxid x%x ste %d\n",
   2603				ctxp->sid, ctxp->oxid, ctxp->state);
   2604		return NULL;
   2605	}
   2606
   2607	ndlp = lpfc_findnode_did(phba->pport, ctxp->sid);
   2608	if (!ndlp ||
   2609	    ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
   2610	    (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
   2611		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
   2612				"6106 NVMET prep LS wqe: No ndlp: "
   2613				"NPORT x%x oxid x%x ste %d\n",
   2614				ctxp->sid, ctxp->oxid, ctxp->state);
   2615		goto nvme_wqe_free_wqeq_exit;
   2616	}
   2617	ctxp->wqeq = nvmewqe;
   2618
   2619	/* prevent preparing wqe with NULL ndlp reference */
   2620	nvmewqe->ndlp = lpfc_nlp_get(ndlp);
   2621	if (!nvmewqe->ndlp)
   2622		goto nvme_wqe_free_wqeq_exit;
   2623	nvmewqe->context_un.axchg = ctxp;
   2624
   2625	wqe = &nvmewqe->wqe;
   2626	memset(wqe, 0, sizeof(union lpfc_wqe));
   2627
   2628	/* Words 0 - 2 */
   2629	wqe->xmit_sequence.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
   2630	wqe->xmit_sequence.bde.tus.f.bdeSize = rspsize;
   2631	wqe->xmit_sequence.bde.addrLow = le32_to_cpu(putPaddrLow(rspbuf));
   2632	wqe->xmit_sequence.bde.addrHigh = le32_to_cpu(putPaddrHigh(rspbuf));
   2633
   2634	/* Word 3 */
   2635
   2636	/* Word 4 */
   2637
   2638	/* Word 5 */
   2639	bf_set(wqe_dfctl, &wqe->xmit_sequence.wge_ctl, 0);
   2640	bf_set(wqe_ls, &wqe->xmit_sequence.wge_ctl, 1);
   2641	bf_set(wqe_la, &wqe->xmit_sequence.wge_ctl, 0);
   2642	bf_set(wqe_rctl, &wqe->xmit_sequence.wge_ctl, FC_RCTL_ELS4_REP);
   2643	bf_set(wqe_type, &wqe->xmit_sequence.wge_ctl, FC_TYPE_NVME);
   2644
   2645	/* Word 6 */
   2646	bf_set(wqe_ctxt_tag, &wqe->xmit_sequence.wqe_com,
   2647	       phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
   2648	bf_set(wqe_xri_tag, &wqe->xmit_sequence.wqe_com, nvmewqe->sli4_xritag);
   2649
   2650	/* Word 7 */
   2651	bf_set(wqe_cmnd, &wqe->xmit_sequence.wqe_com,
   2652	       CMD_XMIT_SEQUENCE64_WQE);
   2653	bf_set(wqe_ct, &wqe->xmit_sequence.wqe_com, SLI4_CT_RPI);
   2654	bf_set(wqe_class, &wqe->xmit_sequence.wqe_com, CLASS3);
   2655	bf_set(wqe_pu, &wqe->xmit_sequence.wqe_com, 0);
   2656
   2657	/* Word 8 */
   2658	wqe->xmit_sequence.wqe_com.abort_tag = nvmewqe->iotag;
   2659
   2660	/* Word 9 */
   2661	bf_set(wqe_reqtag, &wqe->xmit_sequence.wqe_com, nvmewqe->iotag);
   2662	/* Needs to be set by caller */
   2663	bf_set(wqe_rcvoxid, &wqe->xmit_sequence.wqe_com, ctxp->oxid);
   2664
   2665	/* Word 10 */
   2666	bf_set(wqe_dbde, &wqe->xmit_sequence.wqe_com, 1);
   2667	bf_set(wqe_iod, &wqe->xmit_sequence.wqe_com, LPFC_WQE_IOD_WRITE);
   2668	bf_set(wqe_lenloc, &wqe->xmit_sequence.wqe_com,
   2669	       LPFC_WQE_LENLOC_WORD12);
   2670	bf_set(wqe_ebde_cnt, &wqe->xmit_sequence.wqe_com, 0);
   2671
   2672	/* Word 11 */
   2673	bf_set(wqe_cqid, &wqe->xmit_sequence.wqe_com,
   2674	       LPFC_WQE_CQ_ID_DEFAULT);
   2675	bf_set(wqe_cmd_type, &wqe->xmit_sequence.wqe_com,
   2676	       OTHER_COMMAND);
   2677
   2678	/* Word 12 */
   2679	wqe->xmit_sequence.xmit_len = rspsize;
   2680
   2681	nvmewqe->retry = 1;
   2682	nvmewqe->vport = phba->pport;
   2683	nvmewqe->drvrTimeout = (phba->fc_ratov * 3) + LPFC_DRVR_TIMEOUT;
   2684	nvmewqe->cmd_flag |= LPFC_IO_NVME_LS;
   2685
   2686	/* Xmit NVMET response to remote NPORT <did> */
   2687	lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
   2688			"6039 Xmit NVMET LS response to remote "
   2689			"NPORT x%x iotag:x%x oxid:x%x size:x%x\n",
   2690			ndlp->nlp_DID, nvmewqe->iotag, ctxp->oxid,
   2691			rspsize);
   2692	return nvmewqe;
   2693
   2694nvme_wqe_free_wqeq_exit:
   2695	nvmewqe->context_un.axchg = NULL;
   2696	nvmewqe->ndlp = NULL;
   2697	nvmewqe->bpl_dmabuf = NULL;
   2698	lpfc_sli_release_iocbq(phba, nvmewqe);
   2699	return NULL;
   2700}
   2701
   2702
   2703static struct lpfc_iocbq *
   2704lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba,
   2705			struct lpfc_async_xchg_ctx *ctxp)
   2706{
   2707	struct nvmefc_tgt_fcp_req *rsp = &ctxp->hdlrctx.fcp_req;
   2708	struct lpfc_nvmet_tgtport *tgtp;
   2709	struct sli4_sge *sgl;
   2710	struct lpfc_nodelist *ndlp;
   2711	struct lpfc_iocbq *nvmewqe;
   2712	struct scatterlist *sgel;
   2713	union lpfc_wqe128 *wqe;
   2714	struct ulp_bde64 *bde;
   2715	dma_addr_t physaddr;
   2716	int i, cnt, nsegs;
   2717	bool use_pbde = false;
   2718	int xc = 1;
   2719
   2720	if (!lpfc_is_link_up(phba)) {
   2721		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
   2722				"6107 NVMET prep FCP wqe: link err:"
   2723				"NPORT x%x oxid x%x ste %d\n",
   2724				ctxp->sid, ctxp->oxid, ctxp->state);
   2725		return NULL;
   2726	}
   2727
   2728	ndlp = lpfc_findnode_did(phba->pport, ctxp->sid);
   2729	if (!ndlp ||
   2730	    ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
   2731	     (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
   2732		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
   2733				"6108 NVMET prep FCP wqe: no ndlp: "
   2734				"NPORT x%x oxid x%x ste %d\n",
   2735				ctxp->sid, ctxp->oxid, ctxp->state);
   2736		return NULL;
   2737	}
   2738
   2739	if (rsp->sg_cnt > lpfc_tgttemplate.max_sgl_segments) {
   2740		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
   2741				"6109 NVMET prep FCP wqe: seg cnt err: "
   2742				"NPORT x%x oxid x%x ste %d cnt %d\n",
   2743				ctxp->sid, ctxp->oxid, ctxp->state,
   2744				phba->cfg_nvme_seg_cnt);
   2745		return NULL;
   2746	}
   2747	nsegs = rsp->sg_cnt;
   2748
   2749	tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
   2750	nvmewqe = ctxp->wqeq;
   2751	if (nvmewqe == NULL) {
   2752		/* Allocate buffer for  command wqe */
   2753		nvmewqe = ctxp->ctxbuf->iocbq;
   2754		if (nvmewqe == NULL) {
   2755			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
   2756					"6110 NVMET prep FCP wqe: No "
   2757					"WQE: NPORT x%x oxid x%x ste %d\n",
   2758					ctxp->sid, ctxp->oxid, ctxp->state);
   2759			return NULL;
   2760		}
   2761		ctxp->wqeq = nvmewqe;
   2762		xc = 0; /* create new XRI */
   2763		nvmewqe->sli4_lxritag = NO_XRI;
   2764		nvmewqe->sli4_xritag = NO_XRI;
   2765	}
   2766
   2767	/* Sanity check */
   2768	if (((ctxp->state == LPFC_NVME_STE_RCV) &&
   2769	    (ctxp->entry_cnt == 1)) ||
   2770	    (ctxp->state == LPFC_NVME_STE_DATA)) {
   2771		wqe = &nvmewqe->wqe;
   2772	} else {
   2773		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
   2774				"6111 Wrong state NVMET FCP: %d  cnt %d\n",
   2775				ctxp->state, ctxp->entry_cnt);
   2776		return NULL;
   2777	}
   2778
   2779	sgl  = (struct sli4_sge *)ctxp->ctxbuf->sglq->sgl;
   2780	switch (rsp->op) {
   2781	case NVMET_FCOP_READDATA:
   2782	case NVMET_FCOP_READDATA_RSP:
   2783		/* From the tsend template, initialize words 7 - 11 */
   2784		memcpy(&wqe->words[7],
   2785		       &lpfc_tsend_cmd_template.words[7],
   2786		       sizeof(uint32_t) * 5);
   2787
   2788		/* Words 0 - 2 : The first sg segment */
   2789		sgel = &rsp->sg[0];
   2790		physaddr = sg_dma_address(sgel);
   2791		wqe->fcp_tsend.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
   2792		wqe->fcp_tsend.bde.tus.f.bdeSize = sg_dma_len(sgel);
   2793		wqe->fcp_tsend.bde.addrLow = cpu_to_le32(putPaddrLow(physaddr));
   2794		wqe->fcp_tsend.bde.addrHigh =
   2795			cpu_to_le32(putPaddrHigh(physaddr));
   2796
   2797		/* Word 3 */
   2798		wqe->fcp_tsend.payload_offset_len = 0;
   2799
   2800		/* Word 4 */
   2801		wqe->fcp_tsend.relative_offset = ctxp->offset;
   2802
   2803		/* Word 5 */
   2804		wqe->fcp_tsend.reserved = 0;
   2805
   2806		/* Word 6 */
   2807		bf_set(wqe_ctxt_tag, &wqe->fcp_tsend.wqe_com,
   2808		       phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
   2809		bf_set(wqe_xri_tag, &wqe->fcp_tsend.wqe_com,
   2810		       nvmewqe->sli4_xritag);
   2811
   2812		/* Word 7 - set ar later */
   2813
   2814		/* Word 8 */
   2815		wqe->fcp_tsend.wqe_com.abort_tag = nvmewqe->iotag;
   2816
   2817		/* Word 9 */
   2818		bf_set(wqe_reqtag, &wqe->fcp_tsend.wqe_com, nvmewqe->iotag);
   2819		bf_set(wqe_rcvoxid, &wqe->fcp_tsend.wqe_com, ctxp->oxid);
   2820
   2821		/* Word 10 - set wqes later, in template xc=1 */
   2822		if (!xc)
   2823			bf_set(wqe_xc, &wqe->fcp_tsend.wqe_com, 0);
   2824
   2825		/* Word 12 */
   2826		wqe->fcp_tsend.fcp_data_len = rsp->transfer_length;
   2827
   2828		/* Setup 2 SKIP SGEs */
   2829		sgl->addr_hi = 0;
   2830		sgl->addr_lo = 0;
   2831		sgl->word2 = 0;
   2832		bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP);
   2833		sgl->word2 = cpu_to_le32(sgl->word2);
   2834		sgl->sge_len = 0;
   2835		sgl++;
   2836		sgl->addr_hi = 0;
   2837		sgl->addr_lo = 0;
   2838		sgl->word2 = 0;
   2839		bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP);
   2840		sgl->word2 = cpu_to_le32(sgl->word2);
   2841		sgl->sge_len = 0;
   2842		sgl++;
   2843		if (rsp->op == NVMET_FCOP_READDATA_RSP) {
   2844			atomic_inc(&tgtp->xmt_fcp_read_rsp);
   2845
   2846			/* In template ar=1 wqes=0 sup=0 irsp=0 irsplen=0 */
   2847
   2848			if (rsp->rsplen == LPFC_NVMET_SUCCESS_LEN) {
   2849				if (ndlp->nlp_flag & NLP_SUPPRESS_RSP)
   2850					bf_set(wqe_sup,
   2851					       &wqe->fcp_tsend.wqe_com, 1);
   2852			} else {
   2853				bf_set(wqe_wqes, &wqe->fcp_tsend.wqe_com, 1);
   2854				bf_set(wqe_irsp, &wqe->fcp_tsend.wqe_com, 1);
   2855				bf_set(wqe_irsplen, &wqe->fcp_tsend.wqe_com,
   2856				       ((rsp->rsplen >> 2) - 1));
   2857				memcpy(&wqe->words[16], rsp->rspaddr,
   2858				       rsp->rsplen);
   2859			}
   2860		} else {
   2861			atomic_inc(&tgtp->xmt_fcp_read);
   2862
   2863			/* In template ar=1 wqes=0 sup=0 irsp=0 irsplen=0 */
   2864			bf_set(wqe_ar, &wqe->fcp_tsend.wqe_com, 0);
   2865		}
   2866		break;
   2867
   2868	case NVMET_FCOP_WRITEDATA:
   2869		/* From the treceive template, initialize words 3 - 11 */
   2870		memcpy(&wqe->words[3],
   2871		       &lpfc_treceive_cmd_template.words[3],
   2872		       sizeof(uint32_t) * 9);
   2873
   2874		/* Words 0 - 2 : First SGE is skipped, set invalid BDE type */
   2875		wqe->fcp_treceive.bde.tus.f.bdeFlags = LPFC_SGE_TYPE_SKIP;
   2876		wqe->fcp_treceive.bde.tus.f.bdeSize = 0;
   2877		wqe->fcp_treceive.bde.addrLow = 0;
   2878		wqe->fcp_treceive.bde.addrHigh = 0;
   2879
   2880		/* Word 4 */
   2881		wqe->fcp_treceive.relative_offset = ctxp->offset;
   2882
   2883		/* Word 6 */
   2884		bf_set(wqe_ctxt_tag, &wqe->fcp_treceive.wqe_com,
   2885		       phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
   2886		bf_set(wqe_xri_tag, &wqe->fcp_treceive.wqe_com,
   2887		       nvmewqe->sli4_xritag);
   2888
   2889		/* Word 7 */
   2890
   2891		/* Word 8 */
   2892		wqe->fcp_treceive.wqe_com.abort_tag = nvmewqe->iotag;
   2893
   2894		/* Word 9 */
   2895		bf_set(wqe_reqtag, &wqe->fcp_treceive.wqe_com, nvmewqe->iotag);
   2896		bf_set(wqe_rcvoxid, &wqe->fcp_treceive.wqe_com, ctxp->oxid);
   2897
   2898		/* Word 10 - in template xc=1 */
   2899		if (!xc)
   2900			bf_set(wqe_xc, &wqe->fcp_treceive.wqe_com, 0);
   2901
   2902		/* Word 11 - check for pbde */
   2903		if (nsegs == 1 && phba->cfg_enable_pbde) {
   2904			use_pbde = true;
   2905			/* Word 11 - PBDE bit already preset by template */
   2906		} else {
   2907			/* Overwrite default template setting */
   2908			bf_set(wqe_pbde, &wqe->fcp_treceive.wqe_com, 0);
   2909		}
   2910
   2911		/* Word 12 */
   2912		wqe->fcp_tsend.fcp_data_len = rsp->transfer_length;
   2913
   2914		/* Setup 2 SKIP SGEs */
   2915		sgl->addr_hi = 0;
   2916		sgl->addr_lo = 0;
   2917		sgl->word2 = 0;
   2918		bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP);
   2919		sgl->word2 = cpu_to_le32(sgl->word2);
   2920		sgl->sge_len = 0;
   2921		sgl++;
   2922		sgl->addr_hi = 0;
   2923		sgl->addr_lo = 0;
   2924		sgl->word2 = 0;
   2925		bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP);
   2926		sgl->word2 = cpu_to_le32(sgl->word2);
   2927		sgl->sge_len = 0;
   2928		sgl++;
   2929		atomic_inc(&tgtp->xmt_fcp_write);
   2930		break;
   2931
   2932	case NVMET_FCOP_RSP:
   2933		/* From the treceive template, initialize words 4 - 11 */
   2934		memcpy(&wqe->words[4],
   2935		       &lpfc_trsp_cmd_template.words[4],
   2936		       sizeof(uint32_t) * 8);
   2937
   2938		/* Words 0 - 2 */
   2939		physaddr = rsp->rspdma;
   2940		wqe->fcp_trsp.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
   2941		wqe->fcp_trsp.bde.tus.f.bdeSize = rsp->rsplen;
   2942		wqe->fcp_trsp.bde.addrLow =
   2943			cpu_to_le32(putPaddrLow(physaddr));
   2944		wqe->fcp_trsp.bde.addrHigh =
   2945			cpu_to_le32(putPaddrHigh(physaddr));
   2946
   2947		/* Word 3 */
   2948		wqe->fcp_trsp.response_len = rsp->rsplen;
   2949
   2950		/* Word 6 */
   2951		bf_set(wqe_ctxt_tag, &wqe->fcp_trsp.wqe_com,
   2952		       phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
   2953		bf_set(wqe_xri_tag, &wqe->fcp_trsp.wqe_com,
   2954		       nvmewqe->sli4_xritag);
   2955
   2956		/* Word 7 */
   2957
   2958		/* Word 8 */
   2959		wqe->fcp_trsp.wqe_com.abort_tag = nvmewqe->iotag;
   2960
   2961		/* Word 9 */
   2962		bf_set(wqe_reqtag, &wqe->fcp_trsp.wqe_com, nvmewqe->iotag);
   2963		bf_set(wqe_rcvoxid, &wqe->fcp_trsp.wqe_com, ctxp->oxid);
   2964
   2965		/* Word 10 */
   2966		if (xc)
   2967			bf_set(wqe_xc, &wqe->fcp_trsp.wqe_com, 1);
   2968
   2969		/* Word 11 */
   2970		/* In template wqes=0 irsp=0 irsplen=0 - good response */
   2971		if (rsp->rsplen != LPFC_NVMET_SUCCESS_LEN) {
   2972			/* Bad response - embed it */
   2973			bf_set(wqe_wqes, &wqe->fcp_trsp.wqe_com, 1);
   2974			bf_set(wqe_irsp, &wqe->fcp_trsp.wqe_com, 1);
   2975			bf_set(wqe_irsplen, &wqe->fcp_trsp.wqe_com,
   2976			       ((rsp->rsplen >> 2) - 1));
   2977			memcpy(&wqe->words[16], rsp->rspaddr, rsp->rsplen);
   2978		}
   2979
   2980		/* Word 12 */
   2981		wqe->fcp_trsp.rsvd_12_15[0] = 0;
   2982
   2983		/* Use rspbuf, NOT sg list */
   2984		nsegs = 0;
   2985		sgl->word2 = 0;
   2986		atomic_inc(&tgtp->xmt_fcp_rsp);
   2987		break;
   2988
   2989	default:
   2990		lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
   2991				"6064 Unknown Rsp Op %d\n",
   2992				rsp->op);
   2993		return NULL;
   2994	}
   2995
   2996	nvmewqe->retry = 1;
   2997	nvmewqe->vport = phba->pport;
   2998	nvmewqe->drvrTimeout = (phba->fc_ratov * 3) + LPFC_DRVR_TIMEOUT;
   2999	nvmewqe->ndlp = ndlp;
   3000
   3001	for_each_sg(rsp->sg, sgel, nsegs, i) {
   3002		physaddr = sg_dma_address(sgel);
   3003		cnt = sg_dma_len(sgel);
   3004		sgl->addr_hi = putPaddrHigh(physaddr);
   3005		sgl->addr_lo = putPaddrLow(physaddr);
   3006		sgl->word2 = 0;
   3007		bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA);
   3008		bf_set(lpfc_sli4_sge_offset, sgl, ctxp->offset);
   3009		if ((i+1) == rsp->sg_cnt)
   3010			bf_set(lpfc_sli4_sge_last, sgl, 1);
   3011		sgl->word2 = cpu_to_le32(sgl->word2);
   3012		sgl->sge_len = cpu_to_le32(cnt);
   3013		sgl++;
   3014		ctxp->offset += cnt;
   3015	}
   3016
   3017	bde = (struct ulp_bde64 *)&wqe->words[13];
   3018	if (use_pbde) {
   3019		/* decrement sgl ptr backwards once to first data sge */
   3020		sgl--;
   3021
   3022		/* Words 13-15 (PBDE) */
   3023		bde->addrLow = sgl->addr_lo;
   3024		bde->addrHigh = sgl->addr_hi;
   3025		bde->tus.f.bdeSize = le32_to_cpu(sgl->sge_len);
   3026		bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
   3027		bde->tus.w = cpu_to_le32(bde->tus.w);
   3028	} else {
   3029		memset(bde, 0, sizeof(struct ulp_bde64));
   3030	}
   3031	ctxp->state = LPFC_NVME_STE_DATA;
   3032	ctxp->entry_cnt++;
   3033	return nvmewqe;
   3034}
   3035
   3036/**
   3037 * lpfc_nvmet_sol_fcp_abort_cmp - Completion handler for ABTS
   3038 * @phba: Pointer to HBA context object.
   3039 * @cmdwqe: Pointer to driver command WQE object.
   3040 * @rspwqe: Pointer to driver response WQE object.
   3041 *
   3042 * The function is called from SLI ring event handler with no
   3043 * lock held. This function is the completion handler for NVME ABTS for FCP cmds
   3044 * The function frees memory resources used for the NVME commands.
   3045 **/
   3046static void
   3047lpfc_nvmet_sol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
   3048			     struct lpfc_iocbq *rspwqe)
   3049{
   3050	struct lpfc_async_xchg_ctx *ctxp;
   3051	struct lpfc_nvmet_tgtport *tgtp;
   3052	uint32_t result;
   3053	unsigned long flags;
   3054	bool released = false;
   3055	struct lpfc_wcqe_complete *wcqe = &rspwqe->wcqe_cmpl;
   3056
   3057	ctxp = cmdwqe->context_un.axchg;
   3058	result = wcqe->parameter;
   3059
   3060	tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
   3061	if (ctxp->flag & LPFC_NVME_ABORT_OP)
   3062		atomic_inc(&tgtp->xmt_fcp_abort_cmpl);
   3063
   3064	spin_lock_irqsave(&ctxp->ctxlock, flags);
   3065	ctxp->state = LPFC_NVME_STE_DONE;
   3066
   3067	/* Check if we already received a free context call
   3068	 * and we have completed processing an abort situation.
   3069	 */
   3070	if ((ctxp->flag & LPFC_NVME_CTX_RLS) &&
   3071	    !(ctxp->flag & LPFC_NVME_XBUSY)) {
   3072		spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
   3073		list_del_init(&ctxp->list);
   3074		spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
   3075		released = true;
   3076	}
   3077	ctxp->flag &= ~LPFC_NVME_ABORT_OP;
   3078	spin_unlock_irqrestore(&ctxp->ctxlock, flags);
   3079	atomic_inc(&tgtp->xmt_abort_rsp);
   3080
   3081	lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
   3082			"6165 ABORT cmpl: oxid x%x flg x%x (%d) "
   3083			"WCQE: %08x %08x %08x %08x\n",
   3084			ctxp->oxid, ctxp->flag, released,
   3085			wcqe->word0, wcqe->total_data_placed,
   3086			result, wcqe->word3);
   3087
   3088	cmdwqe->rsp_dmabuf = NULL;
   3089	cmdwqe->bpl_dmabuf = NULL;
   3090	/*
   3091	 * if transport has released ctx, then can reuse it. Otherwise,
   3092	 * will be recycled by transport release call.
   3093	 */
   3094	if (released)
   3095		lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
   3096
   3097	/* This is the iocbq for the abort, not the command */
   3098	lpfc_sli_release_iocbq(phba, cmdwqe);
   3099
   3100	/* Since iaab/iaar are NOT set, there is no work left.
   3101	 * For LPFC_NVME_XBUSY, lpfc_sli4_nvmet_xri_aborted
   3102	 * should have been called already.
   3103	 */
   3104}
   3105
   3106/**
   3107 * lpfc_nvmet_unsol_fcp_abort_cmp - Completion handler for ABTS
   3108 * @phba: Pointer to HBA context object.
   3109 * @cmdwqe: Pointer to driver command WQE object.
   3110 * @rspwqe: Pointer to driver response WQE object.
   3111 *
   3112 * The function is called from SLI ring event handler with no
   3113 * lock held. This function is the completion handler for NVME ABTS for FCP cmds
   3114 * The function frees memory resources used for the NVME commands.
   3115 **/
   3116static void
   3117lpfc_nvmet_unsol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
   3118			       struct lpfc_iocbq *rspwqe)
   3119{
   3120	struct lpfc_async_xchg_ctx *ctxp;
   3121	struct lpfc_nvmet_tgtport *tgtp;
   3122	unsigned long flags;
   3123	uint32_t result;
   3124	bool released = false;
   3125	struct lpfc_wcqe_complete *wcqe = &rspwqe->wcqe_cmpl;
   3126
   3127	ctxp = cmdwqe->context_un.axchg;
   3128	result = wcqe->parameter;
   3129
   3130	if (!ctxp) {
   3131		/* if context is clear, related io alrady complete */
   3132		lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
   3133				"6070 ABTS cmpl: WCQE: %08x %08x %08x %08x\n",
   3134				wcqe->word0, wcqe->total_data_placed,
   3135				result, wcqe->word3);
   3136		return;
   3137	}
   3138
   3139	tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
   3140	spin_lock_irqsave(&ctxp->ctxlock, flags);
   3141	if (ctxp->flag & LPFC_NVME_ABORT_OP)
   3142		atomic_inc(&tgtp->xmt_fcp_abort_cmpl);
   3143
   3144	/* Sanity check */
   3145	if (ctxp->state != LPFC_NVME_STE_ABORT) {
   3146		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
   3147				"6112 ABTS Wrong state:%d oxid x%x\n",
   3148				ctxp->state, ctxp->oxid);
   3149	}
   3150
   3151	/* Check if we already received a free context call
   3152	 * and we have completed processing an abort situation.
   3153	 */
   3154	ctxp->state = LPFC_NVME_STE_DONE;
   3155	if ((ctxp->flag & LPFC_NVME_CTX_RLS) &&
   3156	    !(ctxp->flag & LPFC_NVME_XBUSY)) {
   3157		spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
   3158		list_del_init(&ctxp->list);
   3159		spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
   3160		released = true;
   3161	}
   3162	ctxp->flag &= ~LPFC_NVME_ABORT_OP;
   3163	spin_unlock_irqrestore(&ctxp->ctxlock, flags);
   3164	atomic_inc(&tgtp->xmt_abort_rsp);
   3165
   3166	lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
   3167			"6316 ABTS cmpl oxid x%x flg x%x (%x) "
   3168			"WCQE: %08x %08x %08x %08x\n",
   3169			ctxp->oxid, ctxp->flag, released,
   3170			wcqe->word0, wcqe->total_data_placed,
   3171			result, wcqe->word3);
   3172
   3173	cmdwqe->rsp_dmabuf = NULL;
   3174	cmdwqe->bpl_dmabuf = NULL;
   3175	/*
   3176	 * if transport has released ctx, then can reuse it. Otherwise,
   3177	 * will be recycled by transport release call.
   3178	 */
   3179	if (released)
   3180		lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
   3181
   3182	/* Since iaab/iaar are NOT set, there is no work left.
   3183	 * For LPFC_NVME_XBUSY, lpfc_sli4_nvmet_xri_aborted
   3184	 * should have been called already.
   3185	 */
   3186}
   3187
   3188/**
   3189 * lpfc_nvmet_xmt_ls_abort_cmp - Completion handler for ABTS
   3190 * @phba: Pointer to HBA context object.
   3191 * @cmdwqe: Pointer to driver command WQE object.
   3192 * @rspwqe: Pointer to driver response WQE object.
   3193 *
   3194 * The function is called from SLI ring event handler with no
   3195 * lock held. This function is the completion handler for NVME ABTS for LS cmds
   3196 * The function frees memory resources used for the NVME commands.
   3197 **/
   3198static void
   3199lpfc_nvmet_xmt_ls_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
   3200			    struct lpfc_iocbq *rspwqe)
   3201{
   3202	struct lpfc_async_xchg_ctx *ctxp;
   3203	struct lpfc_nvmet_tgtport *tgtp;
   3204	uint32_t result;
   3205	struct lpfc_wcqe_complete *wcqe = &rspwqe->wcqe_cmpl;
   3206
   3207	ctxp = cmdwqe->context_un.axchg;
   3208	result = wcqe->parameter;
   3209
   3210	if (phba->nvmet_support) {
   3211		tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
   3212		atomic_inc(&tgtp->xmt_ls_abort_cmpl);
   3213	}
   3214
   3215	lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
   3216			"6083 Abort cmpl: ctx x%px WCQE:%08x %08x %08x %08x\n",
   3217			ctxp, wcqe->word0, wcqe->total_data_placed,
   3218			result, wcqe->word3);
   3219
   3220	if (!ctxp) {
   3221		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
   3222				"6415 NVMET LS Abort No ctx: WCQE: "
   3223				 "%08x %08x %08x %08x\n",
   3224				wcqe->word0, wcqe->total_data_placed,
   3225				result, wcqe->word3);
   3226
   3227		lpfc_sli_release_iocbq(phba, cmdwqe);
   3228		return;
   3229	}
   3230
   3231	if (ctxp->state != LPFC_NVME_STE_LS_ABORT) {
   3232		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
   3233				"6416 NVMET LS abort cmpl state mismatch: "
   3234				"oxid x%x: %d %d\n",
   3235				ctxp->oxid, ctxp->state, ctxp->entry_cnt);
   3236	}
   3237
   3238	cmdwqe->rsp_dmabuf = NULL;
   3239	cmdwqe->bpl_dmabuf = NULL;
   3240	lpfc_sli_release_iocbq(phba, cmdwqe);
   3241	kfree(ctxp);
   3242}
   3243
   3244static int
   3245lpfc_nvmet_unsol_issue_abort(struct lpfc_hba *phba,
   3246			     struct lpfc_async_xchg_ctx *ctxp,
   3247			     uint32_t sid, uint16_t xri)
   3248{
   3249	struct lpfc_nvmet_tgtport *tgtp = NULL;
   3250	struct lpfc_iocbq *abts_wqeq;
   3251	union lpfc_wqe128 *wqe_abts;
   3252	struct lpfc_nodelist *ndlp;
   3253
   3254	lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
   3255			"6067 ABTS: sid %x xri x%x/x%x\n",
   3256			sid, xri, ctxp->wqeq->sli4_xritag);
   3257
   3258	if (phba->nvmet_support && phba->targetport)
   3259		tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
   3260
   3261	ndlp = lpfc_findnode_did(phba->pport, sid);
   3262	if (!ndlp ||
   3263	    ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
   3264	    (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
   3265		if (tgtp)
   3266			atomic_inc(&tgtp->xmt_abort_rsp_error);
   3267		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
   3268				"6134 Drop ABTS - wrong NDLP state x%x.\n",
   3269				(ndlp) ? ndlp->nlp_state : NLP_STE_MAX_STATE);
   3270
   3271		/* No failure to an ABTS request. */
   3272		return 0;
   3273	}
   3274
   3275	abts_wqeq = ctxp->wqeq;
   3276	wqe_abts = &abts_wqeq->wqe;
   3277
   3278	/*
   3279	 * Since we zero the whole WQE, we need to ensure we set the WQE fields
   3280	 * that were initialized in lpfc_sli4_nvmet_alloc.
   3281	 */
   3282	memset(wqe_abts, 0, sizeof(union lpfc_wqe));
   3283
   3284	/* Word 5 */
   3285	bf_set(wqe_dfctl, &wqe_abts->xmit_sequence.wge_ctl, 0);
   3286	bf_set(wqe_ls, &wqe_abts->xmit_sequence.wge_ctl, 1);
   3287	bf_set(wqe_la, &wqe_abts->xmit_sequence.wge_ctl, 0);
   3288	bf_set(wqe_rctl, &wqe_abts->xmit_sequence.wge_ctl, FC_RCTL_BA_ABTS);
   3289	bf_set(wqe_type, &wqe_abts->xmit_sequence.wge_ctl, FC_TYPE_BLS);
   3290
   3291	/* Word 6 */
   3292	bf_set(wqe_ctxt_tag, &wqe_abts->xmit_sequence.wqe_com,
   3293	       phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
   3294	bf_set(wqe_xri_tag, &wqe_abts->xmit_sequence.wqe_com,
   3295	       abts_wqeq->sli4_xritag);
   3296
   3297	/* Word 7 */
   3298	bf_set(wqe_cmnd, &wqe_abts->xmit_sequence.wqe_com,
   3299	       CMD_XMIT_SEQUENCE64_WQE);
   3300	bf_set(wqe_ct, &wqe_abts->xmit_sequence.wqe_com, SLI4_CT_RPI);
   3301	bf_set(wqe_class, &wqe_abts->xmit_sequence.wqe_com, CLASS3);
   3302	bf_set(wqe_pu, &wqe_abts->xmit_sequence.wqe_com, 0);
   3303
   3304	/* Word 8 */
   3305	wqe_abts->xmit_sequence.wqe_com.abort_tag = abts_wqeq->iotag;
   3306
   3307	/* Word 9 */
   3308	bf_set(wqe_reqtag, &wqe_abts->xmit_sequence.wqe_com, abts_wqeq->iotag);
   3309	/* Needs to be set by caller */
   3310	bf_set(wqe_rcvoxid, &wqe_abts->xmit_sequence.wqe_com, xri);
   3311
   3312	/* Word 10 */
   3313	bf_set(wqe_iod, &wqe_abts->xmit_sequence.wqe_com, LPFC_WQE_IOD_WRITE);
   3314	bf_set(wqe_lenloc, &wqe_abts->xmit_sequence.wqe_com,
   3315	       LPFC_WQE_LENLOC_WORD12);
   3316	bf_set(wqe_ebde_cnt, &wqe_abts->xmit_sequence.wqe_com, 0);
   3317	bf_set(wqe_qosd, &wqe_abts->xmit_sequence.wqe_com, 0);
   3318
   3319	/* Word 11 */
   3320	bf_set(wqe_cqid, &wqe_abts->xmit_sequence.wqe_com,
   3321	       LPFC_WQE_CQ_ID_DEFAULT);
   3322	bf_set(wqe_cmd_type, &wqe_abts->xmit_sequence.wqe_com,
   3323	       OTHER_COMMAND);
   3324
   3325	abts_wqeq->vport = phba->pport;
   3326	abts_wqeq->ndlp = ndlp;
   3327	abts_wqeq->context_un.axchg = ctxp;
   3328	abts_wqeq->bpl_dmabuf = NULL;
   3329	abts_wqeq->num_bdes = 0;
   3330	/* hba_wqidx should already be setup from command we are aborting */
   3331	abts_wqeq->iocb.ulpCommand = CMD_XMIT_SEQUENCE64_CR;
   3332	abts_wqeq->iocb.ulpLe = 1;
   3333
   3334	lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
   3335			"6069 Issue ABTS to xri x%x reqtag x%x\n",
   3336			xri, abts_wqeq->iotag);
   3337	return 1;
   3338}
   3339
   3340/**
   3341 * lpfc_nvmet_prep_abort_wqe - set up 'abort' work queue entry.
   3342 * @pwqeq: Pointer to command iocb.
   3343 * @xritag: Tag that  uniqely identifies the local exchange resource.
   3344 * @opt: Option bits -
   3345 *		bit 0 = inhibit sending abts on the link
   3346 *
   3347 * This function is called with hbalock held.
   3348 **/
   3349static void
   3350lpfc_nvmet_prep_abort_wqe(struct lpfc_iocbq *pwqeq, u16 xritag, u8 opt)
   3351{
   3352	union lpfc_wqe128 *wqe = &pwqeq->wqe;
   3353
   3354	/* WQEs are reused.  Clear stale data and set key fields to
   3355	 * zero like ia, iaab, iaar, xri_tag, and ctxt_tag.
   3356	 */
   3357	memset(wqe, 0, sizeof(*wqe));
   3358
   3359	if (opt & INHIBIT_ABORT)
   3360		bf_set(abort_cmd_ia, &wqe->abort_cmd, 1);
   3361	/* Abort specified xri tag, with the mask deliberately zeroed */
   3362	bf_set(abort_cmd_criteria, &wqe->abort_cmd, T_XRI_TAG);
   3363
   3364	bf_set(wqe_cmnd, &wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX);
   3365
   3366	/* Abort the I/O associated with this outstanding exchange ID. */
   3367	wqe->abort_cmd.wqe_com.abort_tag = xritag;
   3368
   3369	/* iotag for the wqe completion. */
   3370	bf_set(wqe_reqtag, &wqe->abort_cmd.wqe_com, pwqeq->iotag);
   3371
   3372	bf_set(wqe_qosd, &wqe->abort_cmd.wqe_com, 1);
   3373	bf_set(wqe_lenloc, &wqe->abort_cmd.wqe_com, LPFC_WQE_LENLOC_NONE);
   3374
   3375	bf_set(wqe_cmd_type, &wqe->abort_cmd.wqe_com, OTHER_COMMAND);
   3376	bf_set(wqe_wqec, &wqe->abort_cmd.wqe_com, 1);
   3377	bf_set(wqe_cqid, &wqe->abort_cmd.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
   3378}
   3379
   3380static int
   3381lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba,
   3382			       struct lpfc_async_xchg_ctx *ctxp,
   3383			       uint32_t sid, uint16_t xri)
   3384{
   3385	struct lpfc_nvmet_tgtport *tgtp;
   3386	struct lpfc_iocbq *abts_wqeq;
   3387	struct lpfc_nodelist *ndlp;
   3388	unsigned long flags;
   3389	u8 opt;
   3390	int rc;
   3391
   3392	tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
   3393	if (!ctxp->wqeq) {
   3394		ctxp->wqeq = ctxp->ctxbuf->iocbq;
   3395		ctxp->wqeq->hba_wqidx = 0;
   3396	}
   3397
   3398	ndlp = lpfc_findnode_did(phba->pport, sid);
   3399	if (!ndlp ||
   3400	    ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
   3401	    (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
   3402		atomic_inc(&tgtp->xmt_abort_rsp_error);
   3403		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
   3404				"6160 Drop ABORT - wrong NDLP state x%x.\n",
   3405				(ndlp) ? ndlp->nlp_state : NLP_STE_MAX_STATE);
   3406
   3407		/* No failure to an ABTS request. */
   3408		spin_lock_irqsave(&ctxp->ctxlock, flags);
   3409		ctxp->flag &= ~LPFC_NVME_ABORT_OP;
   3410		spin_unlock_irqrestore(&ctxp->ctxlock, flags);
   3411		return 0;
   3412	}
   3413
   3414	/* Issue ABTS for this WQE based on iotag */
   3415	ctxp->abort_wqeq = lpfc_sli_get_iocbq(phba);
   3416	spin_lock_irqsave(&ctxp->ctxlock, flags);
   3417	if (!ctxp->abort_wqeq) {
   3418		atomic_inc(&tgtp->xmt_abort_rsp_error);
   3419		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
   3420				"6161 ABORT failed: No wqeqs: "
   3421				"xri: x%x\n", ctxp->oxid);
   3422		/* No failure to an ABTS request. */
   3423		ctxp->flag &= ~LPFC_NVME_ABORT_OP;
   3424		spin_unlock_irqrestore(&ctxp->ctxlock, flags);
   3425		return 0;
   3426	}
   3427	abts_wqeq = ctxp->abort_wqeq;
   3428	ctxp->state = LPFC_NVME_STE_ABORT;
   3429	opt = (ctxp->flag & LPFC_NVME_ABTS_RCV) ? INHIBIT_ABORT : 0;
   3430	spin_unlock_irqrestore(&ctxp->ctxlock, flags);
   3431
   3432	/* Announce entry to new IO submit field. */
   3433	lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
   3434			"6162 ABORT Request to rport DID x%06x "
   3435			"for xri x%x x%x\n",
   3436			ctxp->sid, ctxp->oxid, ctxp->wqeq->sli4_xritag);
   3437
   3438	/* If the hba is getting reset, this flag is set.  It is
   3439	 * cleared when the reset is complete and rings reestablished.
   3440	 */
   3441	spin_lock_irqsave(&phba->hbalock, flags);
   3442	/* driver queued commands are in process of being flushed */
   3443	if (phba->hba_flag & HBA_IOQ_FLUSH) {
   3444		spin_unlock_irqrestore(&phba->hbalock, flags);
   3445		atomic_inc(&tgtp->xmt_abort_rsp_error);
   3446		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
   3447				"6163 Driver in reset cleanup - flushing "
   3448				"NVME Req now. hba_flag x%x oxid x%x\n",
   3449				phba->hba_flag, ctxp->oxid);
   3450		lpfc_sli_release_iocbq(phba, abts_wqeq);
   3451		spin_lock_irqsave(&ctxp->ctxlock, flags);
   3452		ctxp->flag &= ~LPFC_NVME_ABORT_OP;
   3453		spin_unlock_irqrestore(&ctxp->ctxlock, flags);
   3454		return 0;
   3455	}
   3456
   3457	/* Outstanding abort is in progress */
   3458	if (abts_wqeq->cmd_flag & LPFC_DRIVER_ABORTED) {
   3459		spin_unlock_irqrestore(&phba->hbalock, flags);
   3460		atomic_inc(&tgtp->xmt_abort_rsp_error);
   3461		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
   3462				"6164 Outstanding NVME I/O Abort Request "
   3463				"still pending on oxid x%x\n",
   3464				ctxp->oxid);
   3465		lpfc_sli_release_iocbq(phba, abts_wqeq);
   3466		spin_lock_irqsave(&ctxp->ctxlock, flags);
   3467		ctxp->flag &= ~LPFC_NVME_ABORT_OP;
   3468		spin_unlock_irqrestore(&ctxp->ctxlock, flags);
   3469		return 0;
   3470	}
   3471
   3472	/* Ready - mark outstanding as aborted by driver. */
   3473	abts_wqeq->cmd_flag |= LPFC_DRIVER_ABORTED;
   3474
   3475	lpfc_nvmet_prep_abort_wqe(abts_wqeq, ctxp->wqeq->sli4_xritag, opt);
   3476
   3477	/* ABTS WQE must go to the same WQ as the WQE to be aborted */
   3478	abts_wqeq->hba_wqidx = ctxp->wqeq->hba_wqidx;
   3479	abts_wqeq->cmd_cmpl = lpfc_nvmet_sol_fcp_abort_cmp;
   3480	abts_wqeq->cmd_flag |= LPFC_IO_NVME;
   3481	abts_wqeq->context_un.axchg = ctxp;
   3482	abts_wqeq->vport = phba->pport;
   3483	if (!ctxp->hdwq)
   3484		ctxp->hdwq = &phba->sli4_hba.hdwq[abts_wqeq->hba_wqidx];
   3485
   3486	rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, abts_wqeq);
   3487	spin_unlock_irqrestore(&phba->hbalock, flags);
   3488	if (rc == WQE_SUCCESS) {
   3489		atomic_inc(&tgtp->xmt_abort_sol);
   3490		return 0;
   3491	}
   3492
   3493	atomic_inc(&tgtp->xmt_abort_rsp_error);
   3494	spin_lock_irqsave(&ctxp->ctxlock, flags);
   3495	ctxp->flag &= ~LPFC_NVME_ABORT_OP;
   3496	spin_unlock_irqrestore(&ctxp->ctxlock, flags);
   3497	lpfc_sli_release_iocbq(phba, abts_wqeq);
   3498	lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
   3499			"6166 Failed ABORT issue_wqe with status x%x "
   3500			"for oxid x%x.\n",
   3501			rc, ctxp->oxid);
   3502	return 1;
   3503}
   3504
   3505static int
   3506lpfc_nvmet_unsol_fcp_issue_abort(struct lpfc_hba *phba,
   3507				 struct lpfc_async_xchg_ctx *ctxp,
   3508				 uint32_t sid, uint16_t xri)
   3509{
   3510	struct lpfc_nvmet_tgtport *tgtp;
   3511	struct lpfc_iocbq *abts_wqeq;
   3512	unsigned long flags;
   3513	bool released = false;
   3514	int rc;
   3515
   3516	tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
   3517	if (!ctxp->wqeq) {
   3518		ctxp->wqeq = ctxp->ctxbuf->iocbq;
   3519		ctxp->wqeq->hba_wqidx = 0;
   3520	}
   3521
   3522	if (ctxp->state == LPFC_NVME_STE_FREE) {
   3523		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
   3524				"6417 NVMET ABORT ctx freed %d %d oxid x%x\n",
   3525				ctxp->state, ctxp->entry_cnt, ctxp->oxid);
   3526		rc = WQE_BUSY;
   3527		goto aerr;
   3528	}
   3529	ctxp->state = LPFC_NVME_STE_ABORT;
   3530	ctxp->entry_cnt++;
   3531	rc = lpfc_nvmet_unsol_issue_abort(phba, ctxp, sid, xri);
   3532	if (rc == 0)
   3533		goto aerr;
   3534
   3535	spin_lock_irqsave(&phba->hbalock, flags);
   3536	abts_wqeq = ctxp->wqeq;
   3537	abts_wqeq->cmd_cmpl = lpfc_nvmet_unsol_fcp_abort_cmp;
   3538	abts_wqeq->cmd_flag |= LPFC_IO_NVMET;
   3539	if (!ctxp->hdwq)
   3540		ctxp->hdwq = &phba->sli4_hba.hdwq[abts_wqeq->hba_wqidx];
   3541
   3542	rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, abts_wqeq);
   3543	spin_unlock_irqrestore(&phba->hbalock, flags);
   3544	if (rc == WQE_SUCCESS) {
   3545		return 0;
   3546	}
   3547
   3548aerr:
   3549	spin_lock_irqsave(&ctxp->ctxlock, flags);
   3550	if (ctxp->flag & LPFC_NVME_CTX_RLS) {
   3551		spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
   3552		list_del_init(&ctxp->list);
   3553		spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
   3554		released = true;
   3555	}
   3556	ctxp->flag &= ~(LPFC_NVME_ABORT_OP | LPFC_NVME_CTX_RLS);
   3557	spin_unlock_irqrestore(&ctxp->ctxlock, flags);
   3558
   3559	atomic_inc(&tgtp->xmt_abort_rsp_error);
   3560	lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
   3561			"6135 Failed to Issue ABTS for oxid x%x. Status x%x "
   3562			"(%x)\n",
   3563			ctxp->oxid, rc, released);
   3564	if (released)
   3565		lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
   3566	return 1;
   3567}
   3568
   3569/**
   3570 * lpfc_nvme_unsol_ls_issue_abort - issue ABTS on an exchange received
   3571 *        via async frame receive where the frame is not handled.
   3572 * @phba: pointer to adapter structure
   3573 * @ctxp: pointer to the asynchronously received received sequence
   3574 * @sid: address of the remote port to send the ABTS to
   3575 * @xri: oxid value to for the ABTS (other side's exchange id).
   3576 **/
   3577int
   3578lpfc_nvme_unsol_ls_issue_abort(struct lpfc_hba *phba,
   3579				struct lpfc_async_xchg_ctx *ctxp,
   3580				uint32_t sid, uint16_t xri)
   3581{
   3582	struct lpfc_nvmet_tgtport *tgtp = NULL;
   3583	struct lpfc_iocbq *abts_wqeq;
   3584	unsigned long flags;
   3585	int rc;
   3586
   3587	if ((ctxp->state == LPFC_NVME_STE_LS_RCV && ctxp->entry_cnt == 1) ||
   3588	    (ctxp->state == LPFC_NVME_STE_LS_RSP && ctxp->entry_cnt == 2)) {
   3589		ctxp->state = LPFC_NVME_STE_LS_ABORT;
   3590		ctxp->entry_cnt++;
   3591	} else {
   3592		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
   3593				"6418 NVMET LS abort state mismatch "
   3594				"IO x%x: %d %d\n",
   3595				ctxp->oxid, ctxp->state, ctxp->entry_cnt);
   3596		ctxp->state = LPFC_NVME_STE_LS_ABORT;
   3597	}
   3598
   3599	if (phba->nvmet_support && phba->targetport)
   3600		tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
   3601
   3602	if (!ctxp->wqeq) {
   3603		/* Issue ABTS for this WQE based on iotag */
   3604		ctxp->wqeq = lpfc_sli_get_iocbq(phba);
   3605		if (!ctxp->wqeq) {
   3606			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
   3607					"6068 Abort failed: No wqeqs: "
   3608					"xri: x%x\n", xri);
   3609			/* No failure to an ABTS request. */
   3610			kfree(ctxp);
   3611			return 0;
   3612		}
   3613	}
   3614	abts_wqeq = ctxp->wqeq;
   3615
   3616	if (lpfc_nvmet_unsol_issue_abort(phba, ctxp, sid, xri) == 0) {
   3617		rc = WQE_BUSY;
   3618		goto out;
   3619	}
   3620
   3621	spin_lock_irqsave(&phba->hbalock, flags);
   3622	abts_wqeq->cmd_cmpl = lpfc_nvmet_xmt_ls_abort_cmp;
   3623	abts_wqeq->cmd_flag |=  LPFC_IO_NVME_LS;
   3624	rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, abts_wqeq);
   3625	spin_unlock_irqrestore(&phba->hbalock, flags);
   3626	if (rc == WQE_SUCCESS) {
   3627		if (tgtp)
   3628			atomic_inc(&tgtp->xmt_abort_unsol);
   3629		return 0;
   3630	}
   3631out:
   3632	if (tgtp)
   3633		atomic_inc(&tgtp->xmt_abort_rsp_error);
   3634	abts_wqeq->rsp_dmabuf = NULL;
   3635	abts_wqeq->bpl_dmabuf = NULL;
   3636	lpfc_sli_release_iocbq(phba, abts_wqeq);
   3637	lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
   3638			"6056 Failed to Issue ABTS. Status x%x\n", rc);
   3639	return 1;
   3640}
   3641
   3642/**
   3643 * lpfc_nvmet_invalidate_host
   3644 *
   3645 * @phba: pointer to the driver instance bound to an adapter port.
   3646 * @ndlp: pointer to an lpfc_nodelist type
   3647 *
   3648 * This routine upcalls the nvmet transport to invalidate an NVME
   3649 * host to which this target instance had active connections.
   3650 */
   3651void
   3652lpfc_nvmet_invalidate_host(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
   3653{
   3654	u32 ndlp_has_hh;
   3655	struct lpfc_nvmet_tgtport *tgtp;
   3656
   3657	lpfc_printf_log(phba, KERN_INFO,
   3658			LOG_NVME | LOG_NVME_ABTS | LOG_NVME_DISC,
   3659			"6203 Invalidating hosthandle x%px\n",
   3660			ndlp);
   3661
   3662	tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
   3663	atomic_set(&tgtp->state, LPFC_NVMET_INV_HOST_ACTIVE);
   3664
   3665	spin_lock_irq(&ndlp->lock);
   3666	ndlp_has_hh = ndlp->fc4_xpt_flags & NLP_XPT_HAS_HH;
   3667	spin_unlock_irq(&ndlp->lock);
   3668
   3669	/* Do not invalidate any nodes that do not have a hosthandle.
   3670	 * The host_release callbk will cause a node reference
   3671	 * count imbalance and a crash.
   3672	 */
   3673	if (!ndlp_has_hh) {
   3674		lpfc_printf_log(phba, KERN_INFO,
   3675				LOG_NVME | LOG_NVME_ABTS | LOG_NVME_DISC,
   3676				"6204 Skip invalidate on node x%px DID x%x\n",
   3677				ndlp, ndlp->nlp_DID);
   3678		return;
   3679	}
   3680
   3681#if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
   3682	/* Need to get the nvmet_fc_target_port pointer here.*/
   3683	nvmet_fc_invalidate_host(phba->targetport, ndlp);
   3684#endif
   3685}