cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

lpfc_sli.c (688902B)


      1/*******************************************************************
      2 * This file is part of the Emulex Linux Device Driver for         *
      3 * Fibre Channel Host Bus Adapters.                                *
      4 * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term *
      5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.     *
      6 * Copyright (C) 2004-2016 Emulex.  All rights reserved.           *
      7 * EMULEX and SLI are trademarks of Emulex.                        *
      8 * www.broadcom.com                                                *
      9 * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
     10 *                                                                 *
     11 * This program is free software; you can redistribute it and/or   *
     12 * modify it under the terms of version 2 of the GNU General       *
     13 * Public License as published by the Free Software Foundation.    *
     14 * This program is distributed in the hope that it will be useful. *
     15 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
     16 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
     17 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
     18 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
     19 * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
     20 * more details, a copy of which can be found in the file COPYING  *
     21 * included with this package.                                     *
     22 *******************************************************************/
     23
     24#include <linux/blkdev.h>
     25#include <linux/pci.h>
     26#include <linux/interrupt.h>
     27#include <linux/delay.h>
     28#include <linux/slab.h>
     29#include <linux/lockdep.h>
     30
     31#include <scsi/scsi.h>
     32#include <scsi/scsi_cmnd.h>
     33#include <scsi/scsi_device.h>
     34#include <scsi/scsi_host.h>
     35#include <scsi/scsi_transport_fc.h>
     36#include <scsi/fc/fc_fs.h>
     37#include <linux/aer.h>
     38#include <linux/crash_dump.h>
     39#ifdef CONFIG_X86
     40#include <asm/set_memory.h>
     41#endif
     42
     43#include "lpfc_hw4.h"
     44#include "lpfc_hw.h"
     45#include "lpfc_sli.h"
     46#include "lpfc_sli4.h"
     47#include "lpfc_nl.h"
     48#include "lpfc_disc.h"
     49#include "lpfc.h"
     50#include "lpfc_scsi.h"
     51#include "lpfc_nvme.h"
     52#include "lpfc_crtn.h"
     53#include "lpfc_logmsg.h"
     54#include "lpfc_compat.h"
     55#include "lpfc_debugfs.h"
     56#include "lpfc_vport.h"
     57#include "lpfc_version.h"
     58
     59/* There are only four IOCB completion types. */
     60typedef enum _lpfc_iocb_type {
     61	LPFC_UNKNOWN_IOCB,
     62	LPFC_UNSOL_IOCB,
     63	LPFC_SOL_IOCB,
     64	LPFC_ABORT_IOCB
     65} lpfc_iocb_type;
     66
     67
     68/* Provide function prototypes local to this module. */
     69static int lpfc_sli_issue_mbox_s4(struct lpfc_hba *, LPFC_MBOXQ_t *,
     70				  uint32_t);
     71static int lpfc_sli4_read_rev(struct lpfc_hba *, LPFC_MBOXQ_t *,
     72			      uint8_t *, uint32_t *);
     73static struct lpfc_iocbq *
     74lpfc_sli4_els_preprocess_rspiocbq(struct lpfc_hba *phba,
     75				  struct lpfc_iocbq *rspiocbq);
     76static void lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *,
     77				      struct hbq_dmabuf *);
     78static void lpfc_sli4_handle_mds_loopback(struct lpfc_vport *vport,
     79					  struct hbq_dmabuf *dmabuf);
     80static bool lpfc_sli4_fp_handle_cqe(struct lpfc_hba *phba,
     81				   struct lpfc_queue *cq, struct lpfc_cqe *cqe);
     82static int lpfc_sli4_post_sgl_list(struct lpfc_hba *, struct list_head *,
     83				       int);
     84static void lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba,
     85				     struct lpfc_queue *eq,
     86				     struct lpfc_eqe *eqe);
     87static bool lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba);
     88static bool lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba);
     89static struct lpfc_cqe *lpfc_sli4_cq_get(struct lpfc_queue *q);
     90static void __lpfc_sli4_consume_cqe(struct lpfc_hba *phba,
     91				    struct lpfc_queue *cq,
     92				    struct lpfc_cqe *cqe);
     93static uint16_t lpfc_wqe_bpl2sgl(struct lpfc_hba *phba,
     94				 struct lpfc_iocbq *pwqeq,
     95				 struct lpfc_sglq *sglq);
     96
     97union lpfc_wqe128 lpfc_iread_cmd_template;
     98union lpfc_wqe128 lpfc_iwrite_cmd_template;
     99union lpfc_wqe128 lpfc_icmnd_cmd_template;
    100
    101/* Setup WQE templates for IOs */
    102void lpfc_wqe_cmd_template(void)
    103{
    104	union lpfc_wqe128 *wqe;
    105
    106	/* IREAD template */
    107	wqe = &lpfc_iread_cmd_template;
    108	memset(wqe, 0, sizeof(union lpfc_wqe128));
    109
    110	/* Word 0, 1, 2 - BDE is variable */
    111
    112	/* Word 3 - cmd_buff_len, payload_offset_len is zero */
    113
    114	/* Word 4 - total_xfer_len is variable */
    115
    116	/* Word 5 - is zero */
    117
    118	/* Word 6 - ctxt_tag, xri_tag is variable */
    119
    120	/* Word 7 */
    121	bf_set(wqe_cmnd, &wqe->fcp_iread.wqe_com, CMD_FCP_IREAD64_WQE);
    122	bf_set(wqe_pu, &wqe->fcp_iread.wqe_com, PARM_READ_CHECK);
    123	bf_set(wqe_class, &wqe->fcp_iread.wqe_com, CLASS3);
    124	bf_set(wqe_ct, &wqe->fcp_iread.wqe_com, SLI4_CT_RPI);
    125
    126	/* Word 8 - abort_tag is variable */
    127
    128	/* Word 9  - reqtag is variable */
    129
    130	/* Word 10 - dbde, wqes is variable */
    131	bf_set(wqe_qosd, &wqe->fcp_iread.wqe_com, 0);
    132	bf_set(wqe_iod, &wqe->fcp_iread.wqe_com, LPFC_WQE_IOD_READ);
    133	bf_set(wqe_lenloc, &wqe->fcp_iread.wqe_com, LPFC_WQE_LENLOC_WORD4);
    134	bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 0);
    135	bf_set(wqe_wqes, &wqe->fcp_iread.wqe_com, 1);
    136
    137	/* Word 11 - pbde is variable */
    138	bf_set(wqe_cmd_type, &wqe->fcp_iread.wqe_com, COMMAND_DATA_IN);
    139	bf_set(wqe_cqid, &wqe->fcp_iread.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
    140	bf_set(wqe_pbde, &wqe->fcp_iread.wqe_com, 0);
    141
    142	/* Word 12 - is zero */
    143
    144	/* Word 13, 14, 15 - PBDE is variable */
    145
    146	/* IWRITE template */
    147	wqe = &lpfc_iwrite_cmd_template;
    148	memset(wqe, 0, sizeof(union lpfc_wqe128));
    149
    150	/* Word 0, 1, 2 - BDE is variable */
    151
    152	/* Word 3 - cmd_buff_len, payload_offset_len is zero */
    153
    154	/* Word 4 - total_xfer_len is variable */
    155
    156	/* Word 5 - initial_xfer_len is variable */
    157
    158	/* Word 6 - ctxt_tag, xri_tag is variable */
    159
    160	/* Word 7 */
    161	bf_set(wqe_cmnd, &wqe->fcp_iwrite.wqe_com, CMD_FCP_IWRITE64_WQE);
    162	bf_set(wqe_pu, &wqe->fcp_iwrite.wqe_com, PARM_READ_CHECK);
    163	bf_set(wqe_class, &wqe->fcp_iwrite.wqe_com, CLASS3);
    164	bf_set(wqe_ct, &wqe->fcp_iwrite.wqe_com, SLI4_CT_RPI);
    165
    166	/* Word 8 - abort_tag is variable */
    167
    168	/* Word 9  - reqtag is variable */
    169
    170	/* Word 10 - dbde, wqes is variable */
    171	bf_set(wqe_qosd, &wqe->fcp_iwrite.wqe_com, 0);
    172	bf_set(wqe_iod, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_IOD_WRITE);
    173	bf_set(wqe_lenloc, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_LENLOC_WORD4);
    174	bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 0);
    175	bf_set(wqe_wqes, &wqe->fcp_iwrite.wqe_com, 1);
    176
    177	/* Word 11 - pbde is variable */
    178	bf_set(wqe_cmd_type, &wqe->fcp_iwrite.wqe_com, COMMAND_DATA_OUT);
    179	bf_set(wqe_cqid, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
    180	bf_set(wqe_pbde, &wqe->fcp_iwrite.wqe_com, 0);
    181
    182	/* Word 12 - is zero */
    183
    184	/* Word 13, 14, 15 - PBDE is variable */
    185
    186	/* ICMND template */
    187	wqe = &lpfc_icmnd_cmd_template;
    188	memset(wqe, 0, sizeof(union lpfc_wqe128));
    189
    190	/* Word 0, 1, 2 - BDE is variable */
    191
    192	/* Word 3 - payload_offset_len is variable */
    193
    194	/* Word 4, 5 - is zero */
    195
    196	/* Word 6 - ctxt_tag, xri_tag is variable */
    197
    198	/* Word 7 */
    199	bf_set(wqe_cmnd, &wqe->fcp_icmd.wqe_com, CMD_FCP_ICMND64_WQE);
    200	bf_set(wqe_pu, &wqe->fcp_icmd.wqe_com, 0);
    201	bf_set(wqe_class, &wqe->fcp_icmd.wqe_com, CLASS3);
    202	bf_set(wqe_ct, &wqe->fcp_icmd.wqe_com, SLI4_CT_RPI);
    203
    204	/* Word 8 - abort_tag is variable */
    205
    206	/* Word 9  - reqtag is variable */
    207
    208	/* Word 10 - dbde, wqes is variable */
    209	bf_set(wqe_qosd, &wqe->fcp_icmd.wqe_com, 1);
    210	bf_set(wqe_iod, &wqe->fcp_icmd.wqe_com, LPFC_WQE_IOD_NONE);
    211	bf_set(wqe_lenloc, &wqe->fcp_icmd.wqe_com, LPFC_WQE_LENLOC_NONE);
    212	bf_set(wqe_dbde, &wqe->fcp_icmd.wqe_com, 0);
    213	bf_set(wqe_wqes, &wqe->fcp_icmd.wqe_com, 1);
    214
    215	/* Word 11 */
    216	bf_set(wqe_cmd_type, &wqe->fcp_icmd.wqe_com, COMMAND_DATA_IN);
    217	bf_set(wqe_cqid, &wqe->fcp_icmd.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
    218	bf_set(wqe_pbde, &wqe->fcp_icmd.wqe_com, 0);
    219
    220	/* Word 12, 13, 14, 15 - is zero */
    221}
    222
    223#if defined(CONFIG_64BIT) && defined(__LITTLE_ENDIAN)
    224/**
    225 * lpfc_sli4_pcimem_bcopy - SLI4 memory copy function
    226 * @srcp: Source memory pointer.
    227 * @destp: Destination memory pointer.
    228 * @cnt: Number of words required to be copied.
    229 *       Must be a multiple of sizeof(uint64_t)
    230 *
    231 * This function is used for copying data between driver memory
    232 * and the SLI WQ. This function also changes the endianness
    233 * of each word if native endianness is different from SLI
    234 * endianness. This function can be called with or without
    235 * lock.
    236 **/
    237static void
    238lpfc_sli4_pcimem_bcopy(void *srcp, void *destp, uint32_t cnt)
    239{
    240	uint64_t *src = srcp;
    241	uint64_t *dest = destp;
    242	int i;
    243
    244	for (i = 0; i < (int)cnt; i += sizeof(uint64_t))
    245		*dest++ = *src++;
    246}
    247#else
    248#define lpfc_sli4_pcimem_bcopy(a, b, c) lpfc_sli_pcimem_bcopy(a, b, c)
    249#endif
    250
    251/**
    252 * lpfc_sli4_wq_put - Put a Work Queue Entry on an Work Queue
    253 * @q: The Work Queue to operate on.
    254 * @wqe: The work Queue Entry to put on the Work queue.
    255 *
    256 * This routine will copy the contents of @wqe to the next available entry on
    257 * the @q. This function will then ring the Work Queue Doorbell to signal the
    258 * HBA to start processing the Work Queue Entry. This function returns 0 if
    259 * successful. If no entries are available on @q then this function will return
    260 * -ENOMEM.
    261 * The caller is expected to hold the hbalock when calling this routine.
    262 **/
    263static int
    264lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe128 *wqe)
    265{
    266	union lpfc_wqe *temp_wqe;
    267	struct lpfc_register doorbell;
    268	uint32_t host_index;
    269	uint32_t idx;
    270	uint32_t i = 0;
    271	uint8_t *tmp;
    272	u32 if_type;
    273
    274	/* sanity check on queue memory */
    275	if (unlikely(!q))
    276		return -ENOMEM;
    277
    278	temp_wqe = lpfc_sli4_qe(q, q->host_index);
    279
    280	/* If the host has not yet processed the next entry then we are done */
    281	idx = ((q->host_index + 1) % q->entry_count);
    282	if (idx == q->hba_index) {
    283		q->WQ_overflow++;
    284		return -EBUSY;
    285	}
    286	q->WQ_posted++;
    287	/* set consumption flag every once in a while */
    288	if (!((q->host_index + 1) % q->notify_interval))
    289		bf_set(wqe_wqec, &wqe->generic.wqe_com, 1);
    290	else
    291		bf_set(wqe_wqec, &wqe->generic.wqe_com, 0);
    292	if (q->phba->sli3_options & LPFC_SLI4_PHWQ_ENABLED)
    293		bf_set(wqe_wqid, &wqe->generic.wqe_com, q->queue_id);
    294	lpfc_sli4_pcimem_bcopy(wqe, temp_wqe, q->entry_size);
    295	if (q->dpp_enable && q->phba->cfg_enable_dpp) {
    296		/* write to DPP aperture taking advatage of Combined Writes */
    297		tmp = (uint8_t *)temp_wqe;
    298#ifdef __raw_writeq
    299		for (i = 0; i < q->entry_size; i += sizeof(uint64_t))
    300			__raw_writeq(*((uint64_t *)(tmp + i)),
    301					q->dpp_regaddr + i);
    302#else
    303		for (i = 0; i < q->entry_size; i += sizeof(uint32_t))
    304			__raw_writel(*((uint32_t *)(tmp + i)),
    305					q->dpp_regaddr + i);
    306#endif
    307	}
    308	/* ensure WQE bcopy and DPP flushed before doorbell write */
    309	wmb();
    310
    311	/* Update the host index before invoking device */
    312	host_index = q->host_index;
    313
    314	q->host_index = idx;
    315
    316	/* Ring Doorbell */
    317	doorbell.word0 = 0;
    318	if (q->db_format == LPFC_DB_LIST_FORMAT) {
    319		if (q->dpp_enable && q->phba->cfg_enable_dpp) {
    320			bf_set(lpfc_if6_wq_db_list_fm_num_posted, &doorbell, 1);
    321			bf_set(lpfc_if6_wq_db_list_fm_dpp, &doorbell, 1);
    322			bf_set(lpfc_if6_wq_db_list_fm_dpp_id, &doorbell,
    323			    q->dpp_id);
    324			bf_set(lpfc_if6_wq_db_list_fm_id, &doorbell,
    325			    q->queue_id);
    326		} else {
    327			bf_set(lpfc_wq_db_list_fm_num_posted, &doorbell, 1);
    328			bf_set(lpfc_wq_db_list_fm_id, &doorbell, q->queue_id);
    329
    330			/* Leave bits <23:16> clear for if_type 6 dpp */
    331			if_type = bf_get(lpfc_sli_intf_if_type,
    332					 &q->phba->sli4_hba.sli_intf);
    333			if (if_type != LPFC_SLI_INTF_IF_TYPE_6)
    334				bf_set(lpfc_wq_db_list_fm_index, &doorbell,
    335				       host_index);
    336		}
    337	} else if (q->db_format == LPFC_DB_RING_FORMAT) {
    338		bf_set(lpfc_wq_db_ring_fm_num_posted, &doorbell, 1);
    339		bf_set(lpfc_wq_db_ring_fm_id, &doorbell, q->queue_id);
    340	} else {
    341		return -EINVAL;
    342	}
    343	writel(doorbell.word0, q->db_regaddr);
    344
    345	return 0;
    346}
    347
    348/**
    349 * lpfc_sli4_wq_release - Updates internal hba index for WQ
    350 * @q: The Work Queue to operate on.
    351 * @index: The index to advance the hba index to.
    352 *
    353 * This routine will update the HBA index of a queue to reflect consumption of
    354 * Work Queue Entries by the HBA. When the HBA indicates that it has consumed
    355 * an entry the host calls this function to update the queue's internal
    356 * pointers.
    357 **/
    358static void
    359lpfc_sli4_wq_release(struct lpfc_queue *q, uint32_t index)
    360{
    361	/* sanity check on queue memory */
    362	if (unlikely(!q))
    363		return;
    364
    365	q->hba_index = index;
    366}
    367
    368/**
    369 * lpfc_sli4_mq_put - Put a Mailbox Queue Entry on an Mailbox Queue
    370 * @q: The Mailbox Queue to operate on.
    371 * @mqe: The Mailbox Queue Entry to put on the Work queue.
    372 *
    373 * This routine will copy the contents of @mqe to the next available entry on
    374 * the @q. This function will then ring the Work Queue Doorbell to signal the
    375 * HBA to start processing the Work Queue Entry. This function returns 0 if
    376 * successful. If no entries are available on @q then this function will return
    377 * -ENOMEM.
    378 * The caller is expected to hold the hbalock when calling this routine.
    379 **/
    380static uint32_t
    381lpfc_sli4_mq_put(struct lpfc_queue *q, struct lpfc_mqe *mqe)
    382{
    383	struct lpfc_mqe *temp_mqe;
    384	struct lpfc_register doorbell;
    385
    386	/* sanity check on queue memory */
    387	if (unlikely(!q))
    388		return -ENOMEM;
    389	temp_mqe = lpfc_sli4_qe(q, q->host_index);
    390
    391	/* If the host has not yet processed the next entry then we are done */
    392	if (((q->host_index + 1) % q->entry_count) == q->hba_index)
    393		return -ENOMEM;
    394	lpfc_sli4_pcimem_bcopy(mqe, temp_mqe, q->entry_size);
    395	/* Save off the mailbox pointer for completion */
    396	q->phba->mbox = (MAILBOX_t *)temp_mqe;
    397
    398	/* Update the host index before invoking device */
    399	q->host_index = ((q->host_index + 1) % q->entry_count);
    400
    401	/* Ring Doorbell */
    402	doorbell.word0 = 0;
    403	bf_set(lpfc_mq_doorbell_num_posted, &doorbell, 1);
    404	bf_set(lpfc_mq_doorbell_id, &doorbell, q->queue_id);
    405	writel(doorbell.word0, q->phba->sli4_hba.MQDBregaddr);
    406	return 0;
    407}
    408
    409/**
    410 * lpfc_sli4_mq_release - Updates internal hba index for MQ
    411 * @q: The Mailbox Queue to operate on.
    412 *
    413 * This routine will update the HBA index of a queue to reflect consumption of
    414 * a Mailbox Queue Entry by the HBA. When the HBA indicates that it has consumed
    415 * an entry the host calls this function to update the queue's internal
    416 * pointers. This routine returns the number of entries that were consumed by
    417 * the HBA.
    418 **/
    419static uint32_t
    420lpfc_sli4_mq_release(struct lpfc_queue *q)
    421{
    422	/* sanity check on queue memory */
    423	if (unlikely(!q))
    424		return 0;
    425
    426	/* Clear the mailbox pointer for completion */
    427	q->phba->mbox = NULL;
    428	q->hba_index = ((q->hba_index + 1) % q->entry_count);
    429	return 1;
    430}
    431
    432/**
    433 * lpfc_sli4_eq_get - Gets the next valid EQE from a EQ
    434 * @q: The Event Queue to get the first valid EQE from
    435 *
    436 * This routine will get the first valid Event Queue Entry from @q, update
    437 * the queue's internal hba index, and return the EQE. If no valid EQEs are in
    438 * the Queue (no more work to do), or the Queue is full of EQEs that have been
    439 * processed, but not popped back to the HBA then this routine will return NULL.
    440 **/
    441static struct lpfc_eqe *
    442lpfc_sli4_eq_get(struct lpfc_queue *q)
    443{
    444	struct lpfc_eqe *eqe;
    445
    446	/* sanity check on queue memory */
    447	if (unlikely(!q))
    448		return NULL;
    449	eqe = lpfc_sli4_qe(q, q->host_index);
    450
    451	/* If the next EQE is not valid then we are done */
    452	if (bf_get_le32(lpfc_eqe_valid, eqe) != q->qe_valid)
    453		return NULL;
    454
    455	/*
    456	 * insert barrier for instruction interlock : data from the hardware
    457	 * must have the valid bit checked before it can be copied and acted
    458	 * upon. Speculative instructions were allowing a bcopy at the start
    459	 * of lpfc_sli4_fp_handle_wcqe(), which is called immediately
    460	 * after our return, to copy data before the valid bit check above
    461	 * was done. As such, some of the copied data was stale. The barrier
    462	 * ensures the check is before any data is copied.
    463	 */
    464	mb();
    465	return eqe;
    466}
    467
    468/**
    469 * lpfc_sli4_eq_clr_intr - Turn off interrupts from this EQ
    470 * @q: The Event Queue to disable interrupts
    471 *
    472 **/
    473void
    474lpfc_sli4_eq_clr_intr(struct lpfc_queue *q)
    475{
    476	struct lpfc_register doorbell;
    477
    478	doorbell.word0 = 0;
    479	bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1);
    480	bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT);
    481	bf_set(lpfc_eqcq_doorbell_eqid_hi, &doorbell,
    482		(q->queue_id >> LPFC_EQID_HI_FIELD_SHIFT));
    483	bf_set(lpfc_eqcq_doorbell_eqid_lo, &doorbell, q->queue_id);
    484	writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr);
    485}
    486
    487/**
    488 * lpfc_sli4_if6_eq_clr_intr - Turn off interrupts from this EQ
    489 * @q: The Event Queue to disable interrupts
    490 *
    491 **/
    492void
    493lpfc_sli4_if6_eq_clr_intr(struct lpfc_queue *q)
    494{
    495	struct lpfc_register doorbell;
    496
    497	doorbell.word0 = 0;
    498	bf_set(lpfc_if6_eq_doorbell_eqid, &doorbell, q->queue_id);
    499	writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr);
    500}
    501
    502/**
    503 * lpfc_sli4_write_eq_db - write EQ DB for eqe's consumed or arm state
    504 * @phba: adapter with EQ
    505 * @q: The Event Queue that the host has completed processing for.
    506 * @count: Number of elements that have been consumed
    507 * @arm: Indicates whether the host wants to arms this CQ.
    508 *
    509 * This routine will notify the HBA, by ringing the doorbell, that count
    510 * number of EQEs have been processed. The @arm parameter indicates whether
    511 * the queue should be rearmed when ringing the doorbell.
    512 **/
    513void
    514lpfc_sli4_write_eq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
    515		     uint32_t count, bool arm)
    516{
    517	struct lpfc_register doorbell;
    518
    519	/* sanity check on queue memory */
    520	if (unlikely(!q || (count == 0 && !arm)))
    521		return;
    522
    523	/* ring doorbell for number popped */
    524	doorbell.word0 = 0;
    525	if (arm) {
    526		bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1);
    527		bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1);
    528	}
    529	bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, count);
    530	bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT);
    531	bf_set(lpfc_eqcq_doorbell_eqid_hi, &doorbell,
    532			(q->queue_id >> LPFC_EQID_HI_FIELD_SHIFT));
    533	bf_set(lpfc_eqcq_doorbell_eqid_lo, &doorbell, q->queue_id);
    534	writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr);
    535	/* PCI read to flush PCI pipeline on re-arming for INTx mode */
    536	if ((q->phba->intr_type == INTx) && (arm == LPFC_QUEUE_REARM))
    537		readl(q->phba->sli4_hba.EQDBregaddr);
    538}
    539
    540/**
    541 * lpfc_sli4_if6_write_eq_db - write EQ DB for eqe's consumed or arm state
    542 * @phba: adapter with EQ
    543 * @q: The Event Queue that the host has completed processing for.
    544 * @count: Number of elements that have been consumed
    545 * @arm: Indicates whether the host wants to arms this CQ.
    546 *
    547 * This routine will notify the HBA, by ringing the doorbell, that count
    548 * number of EQEs have been processed. The @arm parameter indicates whether
    549 * the queue should be rearmed when ringing the doorbell.
    550 **/
    551void
    552lpfc_sli4_if6_write_eq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
    553			  uint32_t count, bool arm)
    554{
    555	struct lpfc_register doorbell;
    556
    557	/* sanity check on queue memory */
    558	if (unlikely(!q || (count == 0 && !arm)))
    559		return;
    560
    561	/* ring doorbell for number popped */
    562	doorbell.word0 = 0;
    563	if (arm)
    564		bf_set(lpfc_if6_eq_doorbell_arm, &doorbell, 1);
    565	bf_set(lpfc_if6_eq_doorbell_num_released, &doorbell, count);
    566	bf_set(lpfc_if6_eq_doorbell_eqid, &doorbell, q->queue_id);
    567	writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr);
    568	/* PCI read to flush PCI pipeline on re-arming for INTx mode */
    569	if ((q->phba->intr_type == INTx) && (arm == LPFC_QUEUE_REARM))
    570		readl(q->phba->sli4_hba.EQDBregaddr);
    571}
    572
    573static void
    574__lpfc_sli4_consume_eqe(struct lpfc_hba *phba, struct lpfc_queue *eq,
    575			struct lpfc_eqe *eqe)
    576{
    577	if (!phba->sli4_hba.pc_sli4_params.eqav)
    578		bf_set_le32(lpfc_eqe_valid, eqe, 0);
    579
    580	eq->host_index = ((eq->host_index + 1) % eq->entry_count);
    581
    582	/* if the index wrapped around, toggle the valid bit */
    583	if (phba->sli4_hba.pc_sli4_params.eqav && !eq->host_index)
    584		eq->qe_valid = (eq->qe_valid) ? 0 : 1;
    585}
    586
    587static void
    588lpfc_sli4_eqcq_flush(struct lpfc_hba *phba, struct lpfc_queue *eq)
    589{
    590	struct lpfc_eqe *eqe = NULL;
    591	u32 eq_count = 0, cq_count = 0;
    592	struct lpfc_cqe *cqe = NULL;
    593	struct lpfc_queue *cq = NULL, *childq = NULL;
    594	int cqid = 0;
    595
    596	/* walk all the EQ entries and drop on the floor */
    597	eqe = lpfc_sli4_eq_get(eq);
    598	while (eqe) {
    599		/* Get the reference to the corresponding CQ */
    600		cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
    601		cq = NULL;
    602
    603		list_for_each_entry(childq, &eq->child_list, list) {
    604			if (childq->queue_id == cqid) {
    605				cq = childq;
    606				break;
    607			}
    608		}
    609		/* If CQ is valid, iterate through it and drop all the CQEs */
    610		if (cq) {
    611			cqe = lpfc_sli4_cq_get(cq);
    612			while (cqe) {
    613				__lpfc_sli4_consume_cqe(phba, cq, cqe);
    614				cq_count++;
    615				cqe = lpfc_sli4_cq_get(cq);
    616			}
    617			/* Clear and re-arm the CQ */
    618			phba->sli4_hba.sli4_write_cq_db(phba, cq, cq_count,
    619			    LPFC_QUEUE_REARM);
    620			cq_count = 0;
    621		}
    622		__lpfc_sli4_consume_eqe(phba, eq, eqe);
    623		eq_count++;
    624		eqe = lpfc_sli4_eq_get(eq);
    625	}
    626
    627	/* Clear and re-arm the EQ */
    628	phba->sli4_hba.sli4_write_eq_db(phba, eq, eq_count, LPFC_QUEUE_REARM);
    629}
    630
    631static int
    632lpfc_sli4_process_eq(struct lpfc_hba *phba, struct lpfc_queue *eq,
    633		     uint8_t rearm)
    634{
    635	struct lpfc_eqe *eqe;
    636	int count = 0, consumed = 0;
    637
    638	if (cmpxchg(&eq->queue_claimed, 0, 1) != 0)
    639		goto rearm_and_exit;
    640
    641	eqe = lpfc_sli4_eq_get(eq);
    642	while (eqe) {
    643		lpfc_sli4_hba_handle_eqe(phba, eq, eqe);
    644		__lpfc_sli4_consume_eqe(phba, eq, eqe);
    645
    646		consumed++;
    647		if (!(++count % eq->max_proc_limit))
    648			break;
    649
    650		if (!(count % eq->notify_interval)) {
    651			phba->sli4_hba.sli4_write_eq_db(phba, eq, consumed,
    652							LPFC_QUEUE_NOARM);
    653			consumed = 0;
    654		}
    655
    656		eqe = lpfc_sli4_eq_get(eq);
    657	}
    658	eq->EQ_processed += count;
    659
    660	/* Track the max number of EQEs processed in 1 intr */
    661	if (count > eq->EQ_max_eqe)
    662		eq->EQ_max_eqe = count;
    663
    664	xchg(&eq->queue_claimed, 0);
    665
    666rearm_and_exit:
    667	/* Always clear the EQ. */
    668	phba->sli4_hba.sli4_write_eq_db(phba, eq, consumed, rearm);
    669
    670	return count;
    671}
    672
    673/**
    674 * lpfc_sli4_cq_get - Gets the next valid CQE from a CQ
    675 * @q: The Completion Queue to get the first valid CQE from
    676 *
    677 * This routine will get the first valid Completion Queue Entry from @q, update
    678 * the queue's internal hba index, and return the CQE. If no valid CQEs are in
    679 * the Queue (no more work to do), or the Queue is full of CQEs that have been
    680 * processed, but not popped back to the HBA then this routine will return NULL.
    681 **/
    682static struct lpfc_cqe *
    683lpfc_sli4_cq_get(struct lpfc_queue *q)
    684{
    685	struct lpfc_cqe *cqe;
    686
    687	/* sanity check on queue memory */
    688	if (unlikely(!q))
    689		return NULL;
    690	cqe = lpfc_sli4_qe(q, q->host_index);
    691
    692	/* If the next CQE is not valid then we are done */
    693	if (bf_get_le32(lpfc_cqe_valid, cqe) != q->qe_valid)
    694		return NULL;
    695
    696	/*
    697	 * insert barrier for instruction interlock : data from the hardware
    698	 * must have the valid bit checked before it can be copied and acted
    699	 * upon. Given what was seen in lpfc_sli4_cq_get() of speculative
    700	 * instructions allowing action on content before valid bit checked,
    701	 * add barrier here as well. May not be needed as "content" is a
    702	 * single 32-bit entity here (vs multi word structure for cq's).
    703	 */
    704	mb();
    705	return cqe;
    706}
    707
    708static void
    709__lpfc_sli4_consume_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
    710			struct lpfc_cqe *cqe)
    711{
    712	if (!phba->sli4_hba.pc_sli4_params.cqav)
    713		bf_set_le32(lpfc_cqe_valid, cqe, 0);
    714
    715	cq->host_index = ((cq->host_index + 1) % cq->entry_count);
    716
    717	/* if the index wrapped around, toggle the valid bit */
    718	if (phba->sli4_hba.pc_sli4_params.cqav && !cq->host_index)
    719		cq->qe_valid = (cq->qe_valid) ? 0 : 1;
    720}
    721
    722/**
    723 * lpfc_sli4_write_cq_db - write cq DB for entries consumed or arm state.
    724 * @phba: the adapter with the CQ
    725 * @q: The Completion Queue that the host has completed processing for.
    726 * @count: the number of elements that were consumed
    727 * @arm: Indicates whether the host wants to arms this CQ.
    728 *
    729 * This routine will notify the HBA, by ringing the doorbell, that the
    730 * CQEs have been processed. The @arm parameter specifies whether the
    731 * queue should be rearmed when ringing the doorbell.
    732 **/
    733void
    734lpfc_sli4_write_cq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
    735		     uint32_t count, bool arm)
    736{
    737	struct lpfc_register doorbell;
    738
    739	/* sanity check on queue memory */
    740	if (unlikely(!q || (count == 0 && !arm)))
    741		return;
    742
    743	/* ring doorbell for number popped */
    744	doorbell.word0 = 0;
    745	if (arm)
    746		bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1);
    747	bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, count);
    748	bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_COMPLETION);
    749	bf_set(lpfc_eqcq_doorbell_cqid_hi, &doorbell,
    750			(q->queue_id >> LPFC_CQID_HI_FIELD_SHIFT));
    751	bf_set(lpfc_eqcq_doorbell_cqid_lo, &doorbell, q->queue_id);
    752	writel(doorbell.word0, q->phba->sli4_hba.CQDBregaddr);
    753}
    754
    755/**
    756 * lpfc_sli4_if6_write_cq_db - write cq DB for entries consumed or arm state.
    757 * @phba: the adapter with the CQ
    758 * @q: The Completion Queue that the host has completed processing for.
    759 * @count: the number of elements that were consumed
    760 * @arm: Indicates whether the host wants to arms this CQ.
    761 *
    762 * This routine will notify the HBA, by ringing the doorbell, that the
    763 * CQEs have been processed. The @arm parameter specifies whether the
    764 * queue should be rearmed when ringing the doorbell.
    765 **/
    766void
    767lpfc_sli4_if6_write_cq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
    768			 uint32_t count, bool arm)
    769{
    770	struct lpfc_register doorbell;
    771
    772	/* sanity check on queue memory */
    773	if (unlikely(!q || (count == 0 && !arm)))
    774		return;
    775
    776	/* ring doorbell for number popped */
    777	doorbell.word0 = 0;
    778	if (arm)
    779		bf_set(lpfc_if6_cq_doorbell_arm, &doorbell, 1);
    780	bf_set(lpfc_if6_cq_doorbell_num_released, &doorbell, count);
    781	bf_set(lpfc_if6_cq_doorbell_cqid, &doorbell, q->queue_id);
    782	writel(doorbell.word0, q->phba->sli4_hba.CQDBregaddr);
    783}
    784
    785/*
    786 * lpfc_sli4_rq_put - Put a Receive Buffer Queue Entry on a Receive Queue
    787 *
    788 * This routine will copy the contents of @wqe to the next available entry on
    789 * the @q. This function will then ring the Receive Queue Doorbell to signal the
    790 * HBA to start processing the Receive Queue Entry. This function returns the
    791 * index that the rqe was copied to if successful. If no entries are available
    792 * on @q then this function will return -ENOMEM.
    793 * The caller is expected to hold the hbalock when calling this routine.
    794 **/
    795int
    796lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq,
    797		 struct lpfc_rqe *hrqe, struct lpfc_rqe *drqe)
    798{
    799	struct lpfc_rqe *temp_hrqe;
    800	struct lpfc_rqe *temp_drqe;
    801	struct lpfc_register doorbell;
    802	int hq_put_index;
    803	int dq_put_index;
    804
    805	/* sanity check on queue memory */
    806	if (unlikely(!hq) || unlikely(!dq))
    807		return -ENOMEM;
    808	hq_put_index = hq->host_index;
    809	dq_put_index = dq->host_index;
    810	temp_hrqe = lpfc_sli4_qe(hq, hq_put_index);
    811	temp_drqe = lpfc_sli4_qe(dq, dq_put_index);
    812
    813	if (hq->type != LPFC_HRQ || dq->type != LPFC_DRQ)
    814		return -EINVAL;
    815	if (hq_put_index != dq_put_index)
    816		return -EINVAL;
    817	/* If the host has not yet processed the next entry then we are done */
    818	if (((hq_put_index + 1) % hq->entry_count) == hq->hba_index)
    819		return -EBUSY;
    820	lpfc_sli4_pcimem_bcopy(hrqe, temp_hrqe, hq->entry_size);
    821	lpfc_sli4_pcimem_bcopy(drqe, temp_drqe, dq->entry_size);
    822
    823	/* Update the host index to point to the next slot */
    824	hq->host_index = ((hq_put_index + 1) % hq->entry_count);
    825	dq->host_index = ((dq_put_index + 1) % dq->entry_count);
    826	hq->RQ_buf_posted++;
    827
    828	/* Ring The Header Receive Queue Doorbell */
    829	if (!(hq->host_index % hq->notify_interval)) {
    830		doorbell.word0 = 0;
    831		if (hq->db_format == LPFC_DB_RING_FORMAT) {
    832			bf_set(lpfc_rq_db_ring_fm_num_posted, &doorbell,
    833			       hq->notify_interval);
    834			bf_set(lpfc_rq_db_ring_fm_id, &doorbell, hq->queue_id);
    835		} else if (hq->db_format == LPFC_DB_LIST_FORMAT) {
    836			bf_set(lpfc_rq_db_list_fm_num_posted, &doorbell,
    837			       hq->notify_interval);
    838			bf_set(lpfc_rq_db_list_fm_index, &doorbell,
    839			       hq->host_index);
    840			bf_set(lpfc_rq_db_list_fm_id, &doorbell, hq->queue_id);
    841		} else {
    842			return -EINVAL;
    843		}
    844		writel(doorbell.word0, hq->db_regaddr);
    845	}
    846	return hq_put_index;
    847}
    848
    849/*
    850 * lpfc_sli4_rq_release - Updates internal hba index for RQ
    851 *
    852 * This routine will update the HBA index of a queue to reflect consumption of
    853 * one Receive Queue Entry by the HBA. When the HBA indicates that it has
    854 * consumed an entry the host calls this function to update the queue's
    855 * internal pointers. This routine returns the number of entries that were
    856 * consumed by the HBA.
    857 **/
    858static uint32_t
    859lpfc_sli4_rq_release(struct lpfc_queue *hq, struct lpfc_queue *dq)
    860{
    861	/* sanity check on queue memory */
    862	if (unlikely(!hq) || unlikely(!dq))
    863		return 0;
    864
    865	if ((hq->type != LPFC_HRQ) || (dq->type != LPFC_DRQ))
    866		return 0;
    867	hq->hba_index = ((hq->hba_index + 1) % hq->entry_count);
    868	dq->hba_index = ((dq->hba_index + 1) % dq->entry_count);
    869	return 1;
    870}
    871
    872/**
    873 * lpfc_cmd_iocb - Get next command iocb entry in the ring
    874 * @phba: Pointer to HBA context object.
    875 * @pring: Pointer to driver SLI ring object.
    876 *
    877 * This function returns pointer to next command iocb entry
    878 * in the command ring. The caller must hold hbalock to prevent
    879 * other threads consume the next command iocb.
    880 * SLI-2/SLI-3 provide different sized iocbs.
    881 **/
    882static inline IOCB_t *
    883lpfc_cmd_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
    884{
    885	return (IOCB_t *) (((char *) pring->sli.sli3.cmdringaddr) +
    886			   pring->sli.sli3.cmdidx * phba->iocb_cmd_size);
    887}
    888
    889/**
    890 * lpfc_resp_iocb - Get next response iocb entry in the ring
    891 * @phba: Pointer to HBA context object.
    892 * @pring: Pointer to driver SLI ring object.
    893 *
    894 * This function returns pointer to next response iocb entry
    895 * in the response ring. The caller must hold hbalock to make sure
    896 * that no other thread consume the next response iocb.
    897 * SLI-2/SLI-3 provide different sized iocbs.
    898 **/
    899static inline IOCB_t *
    900lpfc_resp_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
    901{
    902	return (IOCB_t *) (((char *) pring->sli.sli3.rspringaddr) +
    903			   pring->sli.sli3.rspidx * phba->iocb_rsp_size);
    904}
    905
    906/**
    907 * __lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool
    908 * @phba: Pointer to HBA context object.
    909 *
    910 * This function is called with hbalock held. This function
    911 * allocates a new driver iocb object from the iocb pool. If the
    912 * allocation is successful, it returns pointer to the newly
    913 * allocated iocb object else it returns NULL.
    914 **/
    915struct lpfc_iocbq *
    916__lpfc_sli_get_iocbq(struct lpfc_hba *phba)
    917{
    918	struct list_head *lpfc_iocb_list = &phba->lpfc_iocb_list;
    919	struct lpfc_iocbq * iocbq = NULL;
    920
    921	lockdep_assert_held(&phba->hbalock);
    922
    923	list_remove_head(lpfc_iocb_list, iocbq, struct lpfc_iocbq, list);
    924	if (iocbq)
    925		phba->iocb_cnt++;
    926	if (phba->iocb_cnt > phba->iocb_max)
    927		phba->iocb_max = phba->iocb_cnt;
    928	return iocbq;
    929}
    930
    931/**
    932 * __lpfc_clear_active_sglq - Remove the active sglq for this XRI.
    933 * @phba: Pointer to HBA context object.
    934 * @xritag: XRI value.
    935 *
    936 * This function clears the sglq pointer from the array of active
    937 * sglq's. The xritag that is passed in is used to index into the
    938 * array. Before the xritag can be used it needs to be adjusted
    939 * by subtracting the xribase.
    940 *
    941 * Returns sglq ponter = success, NULL = Failure.
    942 **/
    943struct lpfc_sglq *
    944__lpfc_clear_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
    945{
    946	struct lpfc_sglq *sglq;
    947
    948	sglq = phba->sli4_hba.lpfc_sglq_active_list[xritag];
    949	phba->sli4_hba.lpfc_sglq_active_list[xritag] = NULL;
    950	return sglq;
    951}
    952
    953/**
    954 * __lpfc_get_active_sglq - Get the active sglq for this XRI.
    955 * @phba: Pointer to HBA context object.
    956 * @xritag: XRI value.
    957 *
    958 * This function returns the sglq pointer from the array of active
    959 * sglq's. The xritag that is passed in is used to index into the
    960 * array. Before the xritag can be used it needs to be adjusted
    961 * by subtracting the xribase.
    962 *
    963 * Returns sglq ponter = success, NULL = Failure.
    964 **/
    965struct lpfc_sglq *
    966__lpfc_get_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
    967{
    968	struct lpfc_sglq *sglq;
    969
    970	sglq =  phba->sli4_hba.lpfc_sglq_active_list[xritag];
    971	return sglq;
    972}
    973
    974/**
    975 * lpfc_clr_rrq_active - Clears RRQ active bit in xri_bitmap.
    976 * @phba: Pointer to HBA context object.
    977 * @xritag: xri used in this exchange.
    978 * @rrq: The RRQ to be cleared.
    979 *
    980 **/
    981void
    982lpfc_clr_rrq_active(struct lpfc_hba *phba,
    983		    uint16_t xritag,
    984		    struct lpfc_node_rrq *rrq)
    985{
    986	struct lpfc_nodelist *ndlp = NULL;
    987
    988	/* Lookup did to verify if did is still active on this vport */
    989	if (rrq->vport)
    990		ndlp = lpfc_findnode_did(rrq->vport, rrq->nlp_DID);
    991
    992	if (!ndlp)
    993		goto out;
    994
    995	if (test_and_clear_bit(xritag, ndlp->active_rrqs_xri_bitmap)) {
    996		rrq->send_rrq = 0;
    997		rrq->xritag = 0;
    998		rrq->rrq_stop_time = 0;
    999	}
   1000out:
   1001	mempool_free(rrq, phba->rrq_pool);
   1002}
   1003
   1004/**
   1005 * lpfc_handle_rrq_active - Checks if RRQ has waithed RATOV.
   1006 * @phba: Pointer to HBA context object.
   1007 *
   1008 * This function is called with hbalock held. This function
   1009 * Checks if stop_time (ratov from setting rrq active) has
   1010 * been reached, if it has and the send_rrq flag is set then
   1011 * it will call lpfc_send_rrq. If the send_rrq flag is not set
   1012 * then it will just call the routine to clear the rrq and
   1013 * free the rrq resource.
   1014 * The timer is set to the next rrq that is going to expire before
   1015 * leaving the routine.
   1016 *
   1017 **/
   1018void
   1019lpfc_handle_rrq_active(struct lpfc_hba *phba)
   1020{
   1021	struct lpfc_node_rrq *rrq;
   1022	struct lpfc_node_rrq *nextrrq;
   1023	unsigned long next_time;
   1024	unsigned long iflags;
   1025	LIST_HEAD(send_rrq);
   1026
   1027	spin_lock_irqsave(&phba->hbalock, iflags);
   1028	phba->hba_flag &= ~HBA_RRQ_ACTIVE;
   1029	next_time = jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov + 1));
   1030	list_for_each_entry_safe(rrq, nextrrq,
   1031				 &phba->active_rrq_list, list) {
   1032		if (time_after(jiffies, rrq->rrq_stop_time))
   1033			list_move(&rrq->list, &send_rrq);
   1034		else if (time_before(rrq->rrq_stop_time, next_time))
   1035			next_time = rrq->rrq_stop_time;
   1036	}
   1037	spin_unlock_irqrestore(&phba->hbalock, iflags);
   1038	if ((!list_empty(&phba->active_rrq_list)) &&
   1039	    (!(phba->pport->load_flag & FC_UNLOADING)))
   1040		mod_timer(&phba->rrq_tmr, next_time);
   1041	list_for_each_entry_safe(rrq, nextrrq, &send_rrq, list) {
   1042		list_del(&rrq->list);
   1043		if (!rrq->send_rrq) {
   1044			/* this call will free the rrq */
   1045			lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
   1046		} else if (lpfc_send_rrq(phba, rrq)) {
   1047			/* if we send the rrq then the completion handler
   1048			*  will clear the bit in the xribitmap.
   1049			*/
   1050			lpfc_clr_rrq_active(phba, rrq->xritag,
   1051					    rrq);
   1052		}
   1053	}
   1054}
   1055
   1056/**
   1057 * lpfc_get_active_rrq - Get the active RRQ for this exchange.
   1058 * @vport: Pointer to vport context object.
   1059 * @xri: The xri used in the exchange.
   1060 * @did: The targets DID for this exchange.
   1061 *
   1062 * returns NULL = rrq not found in the phba->active_rrq_list.
   1063 *         rrq = rrq for this xri and target.
   1064 **/
   1065struct lpfc_node_rrq *
   1066lpfc_get_active_rrq(struct lpfc_vport *vport, uint16_t xri, uint32_t did)
   1067{
   1068	struct lpfc_hba *phba = vport->phba;
   1069	struct lpfc_node_rrq *rrq;
   1070	struct lpfc_node_rrq *nextrrq;
   1071	unsigned long iflags;
   1072
   1073	if (phba->sli_rev != LPFC_SLI_REV4)
   1074		return NULL;
   1075	spin_lock_irqsave(&phba->hbalock, iflags);
   1076	list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list) {
   1077		if (rrq->vport == vport && rrq->xritag == xri &&
   1078				rrq->nlp_DID == did){
   1079			list_del(&rrq->list);
   1080			spin_unlock_irqrestore(&phba->hbalock, iflags);
   1081			return rrq;
   1082		}
   1083	}
   1084	spin_unlock_irqrestore(&phba->hbalock, iflags);
   1085	return NULL;
   1086}
   1087
   1088/**
   1089 * lpfc_cleanup_vports_rrqs - Remove and clear the active RRQ for this vport.
   1090 * @vport: Pointer to vport context object.
   1091 * @ndlp: Pointer to the lpfc_node_list structure.
   1092 * If ndlp is NULL Remove all active RRQs for this vport from the
   1093 * phba->active_rrq_list and clear the rrq.
   1094 * If ndlp is not NULL then only remove rrqs for this vport & this ndlp.
   1095 **/
   1096void
   1097lpfc_cleanup_vports_rrqs(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
   1098
   1099{
   1100	struct lpfc_hba *phba = vport->phba;
   1101	struct lpfc_node_rrq *rrq;
   1102	struct lpfc_node_rrq *nextrrq;
   1103	unsigned long iflags;
   1104	LIST_HEAD(rrq_list);
   1105
   1106	if (phba->sli_rev != LPFC_SLI_REV4)
   1107		return;
   1108	if (!ndlp) {
   1109		lpfc_sli4_vport_delete_els_xri_aborted(vport);
   1110		lpfc_sli4_vport_delete_fcp_xri_aborted(vport);
   1111	}
   1112	spin_lock_irqsave(&phba->hbalock, iflags);
   1113	list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list) {
   1114		if (rrq->vport != vport)
   1115			continue;
   1116
   1117		if (!ndlp || ndlp == lpfc_findnode_did(vport, rrq->nlp_DID))
   1118			list_move(&rrq->list, &rrq_list);
   1119
   1120	}
   1121	spin_unlock_irqrestore(&phba->hbalock, iflags);
   1122
   1123	list_for_each_entry_safe(rrq, nextrrq, &rrq_list, list) {
   1124		list_del(&rrq->list);
   1125		lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
   1126	}
   1127}
   1128
   1129/**
   1130 * lpfc_test_rrq_active - Test RRQ bit in xri_bitmap.
   1131 * @phba: Pointer to HBA context object.
   1132 * @ndlp: Targets nodelist pointer for this exchange.
   1133 * @xritag: the xri in the bitmap to test.
   1134 *
   1135 * This function returns:
   1136 * 0 = rrq not active for this xri
   1137 * 1 = rrq is valid for this xri.
   1138 **/
   1139int
   1140lpfc_test_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
   1141			uint16_t  xritag)
   1142{
   1143	if (!ndlp)
   1144		return 0;
   1145	if (!ndlp->active_rrqs_xri_bitmap)
   1146		return 0;
   1147	if (test_bit(xritag, ndlp->active_rrqs_xri_bitmap))
   1148		return 1;
   1149	else
   1150		return 0;
   1151}
   1152
   1153/**
   1154 * lpfc_set_rrq_active - set RRQ active bit in xri_bitmap.
   1155 * @phba: Pointer to HBA context object.
   1156 * @ndlp: nodelist pointer for this target.
   1157 * @xritag: xri used in this exchange.
   1158 * @rxid: Remote Exchange ID.
   1159 * @send_rrq: Flag used to determine if we should send rrq els cmd.
   1160 *
   1161 * This function takes the hbalock.
   1162 * The active bit is always set in the active rrq xri_bitmap even
   1163 * if there is no slot avaiable for the other rrq information.
   1164 *
   1165 * returns 0 rrq actived for this xri
   1166 *         < 0 No memory or invalid ndlp.
   1167 **/
   1168int
   1169lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
   1170		    uint16_t xritag, uint16_t rxid, uint16_t send_rrq)
   1171{
   1172	unsigned long iflags;
   1173	struct lpfc_node_rrq *rrq;
   1174	int empty;
   1175
   1176	if (!ndlp)
   1177		return -EINVAL;
   1178
   1179	if (!phba->cfg_enable_rrq)
   1180		return -EINVAL;
   1181
   1182	spin_lock_irqsave(&phba->hbalock, iflags);
   1183	if (phba->pport->load_flag & FC_UNLOADING) {
   1184		phba->hba_flag &= ~HBA_RRQ_ACTIVE;
   1185		goto out;
   1186	}
   1187
   1188	if (ndlp->vport && (ndlp->vport->load_flag & FC_UNLOADING))
   1189		goto out;
   1190
   1191	if (!ndlp->active_rrqs_xri_bitmap)
   1192		goto out;
   1193
   1194	if (test_and_set_bit(xritag, ndlp->active_rrqs_xri_bitmap))
   1195		goto out;
   1196
   1197	spin_unlock_irqrestore(&phba->hbalock, iflags);
   1198	rrq = mempool_alloc(phba->rrq_pool, GFP_ATOMIC);
   1199	if (!rrq) {
   1200		lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
   1201				"3155 Unable to allocate RRQ xri:0x%x rxid:0x%x"
   1202				" DID:0x%x Send:%d\n",
   1203				xritag, rxid, ndlp->nlp_DID, send_rrq);
   1204		return -EINVAL;
   1205	}
   1206	if (phba->cfg_enable_rrq == 1)
   1207		rrq->send_rrq = send_rrq;
   1208	else
   1209		rrq->send_rrq = 0;
   1210	rrq->xritag = xritag;
   1211	rrq->rrq_stop_time = jiffies +
   1212				msecs_to_jiffies(1000 * (phba->fc_ratov + 1));
   1213	rrq->nlp_DID = ndlp->nlp_DID;
   1214	rrq->vport = ndlp->vport;
   1215	rrq->rxid = rxid;
   1216	spin_lock_irqsave(&phba->hbalock, iflags);
   1217	empty = list_empty(&phba->active_rrq_list);
   1218	list_add_tail(&rrq->list, &phba->active_rrq_list);
   1219	phba->hba_flag |= HBA_RRQ_ACTIVE;
   1220	if (empty)
   1221		lpfc_worker_wake_up(phba);
   1222	spin_unlock_irqrestore(&phba->hbalock, iflags);
   1223	return 0;
   1224out:
   1225	spin_unlock_irqrestore(&phba->hbalock, iflags);
   1226	lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
   1227			"2921 Can't set rrq active xri:0x%x rxid:0x%x"
   1228			" DID:0x%x Send:%d\n",
   1229			xritag, rxid, ndlp->nlp_DID, send_rrq);
   1230	return -EINVAL;
   1231}
   1232
   1233/**
   1234 * __lpfc_sli_get_els_sglq - Allocates an iocb object from sgl pool
   1235 * @phba: Pointer to HBA context object.
   1236 * @piocbq: Pointer to the iocbq.
   1237 *
   1238 * The driver calls this function with either the nvme ls ring lock
   1239 * or the fc els ring lock held depending on the iocb usage.  This function
   1240 * gets a new driver sglq object from the sglq list. If the list is not empty
   1241 * then it is successful, it returns pointer to the newly allocated sglq
   1242 * object else it returns NULL.
   1243 **/
   1244static struct lpfc_sglq *
   1245__lpfc_sli_get_els_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
   1246{
   1247	struct list_head *lpfc_els_sgl_list = &phba->sli4_hba.lpfc_els_sgl_list;
   1248	struct lpfc_sglq *sglq = NULL;
   1249	struct lpfc_sglq *start_sglq = NULL;
   1250	struct lpfc_io_buf *lpfc_cmd;
   1251	struct lpfc_nodelist *ndlp;
   1252	int found = 0;
   1253	u8 cmnd;
   1254
   1255	cmnd = get_job_cmnd(phba, piocbq);
   1256
   1257	if (piocbq->cmd_flag & LPFC_IO_FCP) {
   1258		lpfc_cmd = piocbq->io_buf;
   1259		ndlp = lpfc_cmd->rdata->pnode;
   1260	} else  if ((cmnd == CMD_GEN_REQUEST64_CR) &&
   1261			!(piocbq->cmd_flag & LPFC_IO_LIBDFC)) {
   1262		ndlp = piocbq->ndlp;
   1263	} else  if (piocbq->cmd_flag & LPFC_IO_LIBDFC) {
   1264		if (piocbq->cmd_flag & LPFC_IO_LOOPBACK)
   1265			ndlp = NULL;
   1266		else
   1267			ndlp = piocbq->ndlp;
   1268	} else {
   1269		ndlp = piocbq->ndlp;
   1270	}
   1271
   1272	spin_lock(&phba->sli4_hba.sgl_list_lock);
   1273	list_remove_head(lpfc_els_sgl_list, sglq, struct lpfc_sglq, list);
   1274	start_sglq = sglq;
   1275	while (!found) {
   1276		if (!sglq)
   1277			break;
   1278		if (ndlp && ndlp->active_rrqs_xri_bitmap &&
   1279		    test_bit(sglq->sli4_lxritag,
   1280		    ndlp->active_rrqs_xri_bitmap)) {
   1281			/* This xri has an rrq outstanding for this DID.
   1282			 * put it back in the list and get another xri.
   1283			 */
   1284			list_add_tail(&sglq->list, lpfc_els_sgl_list);
   1285			sglq = NULL;
   1286			list_remove_head(lpfc_els_sgl_list, sglq,
   1287						struct lpfc_sglq, list);
   1288			if (sglq == start_sglq) {
   1289				list_add_tail(&sglq->list, lpfc_els_sgl_list);
   1290				sglq = NULL;
   1291				break;
   1292			} else
   1293				continue;
   1294		}
   1295		sglq->ndlp = ndlp;
   1296		found = 1;
   1297		phba->sli4_hba.lpfc_sglq_active_list[sglq->sli4_lxritag] = sglq;
   1298		sglq->state = SGL_ALLOCATED;
   1299	}
   1300	spin_unlock(&phba->sli4_hba.sgl_list_lock);
   1301	return sglq;
   1302}
   1303
   1304/**
   1305 * __lpfc_sli_get_nvmet_sglq - Allocates an iocb object from sgl pool
   1306 * @phba: Pointer to HBA context object.
   1307 * @piocbq: Pointer to the iocbq.
   1308 *
   1309 * This function is called with the sgl_list lock held. This function
   1310 * gets a new driver sglq object from the sglq list. If the
   1311 * list is not empty then it is successful, it returns pointer to the newly
   1312 * allocated sglq object else it returns NULL.
   1313 **/
   1314struct lpfc_sglq *
   1315__lpfc_sli_get_nvmet_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
   1316{
   1317	struct list_head *lpfc_nvmet_sgl_list;
   1318	struct lpfc_sglq *sglq = NULL;
   1319
   1320	lpfc_nvmet_sgl_list = &phba->sli4_hba.lpfc_nvmet_sgl_list;
   1321
   1322	lockdep_assert_held(&phba->sli4_hba.sgl_list_lock);
   1323
   1324	list_remove_head(lpfc_nvmet_sgl_list, sglq, struct lpfc_sglq, list);
   1325	if (!sglq)
   1326		return NULL;
   1327	phba->sli4_hba.lpfc_sglq_active_list[sglq->sli4_lxritag] = sglq;
   1328	sglq->state = SGL_ALLOCATED;
   1329	return sglq;
   1330}
   1331
   1332/**
   1333 * lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool
   1334 * @phba: Pointer to HBA context object.
   1335 *
   1336 * This function is called with no lock held. This function
   1337 * allocates a new driver iocb object from the iocb pool. If the
   1338 * allocation is successful, it returns pointer to the newly
   1339 * allocated iocb object else it returns NULL.
   1340 **/
   1341struct lpfc_iocbq *
   1342lpfc_sli_get_iocbq(struct lpfc_hba *phba)
   1343{
   1344	struct lpfc_iocbq * iocbq = NULL;
   1345	unsigned long iflags;
   1346
   1347	spin_lock_irqsave(&phba->hbalock, iflags);
   1348	iocbq = __lpfc_sli_get_iocbq(phba);
   1349	spin_unlock_irqrestore(&phba->hbalock, iflags);
   1350	return iocbq;
   1351}
   1352
   1353/**
   1354 * __lpfc_sli_release_iocbq_s4 - Release iocb to the iocb pool
   1355 * @phba: Pointer to HBA context object.
   1356 * @iocbq: Pointer to driver iocb object.
   1357 *
   1358 * This function is called to release the driver iocb object
   1359 * to the iocb pool. The iotag in the iocb object
   1360 * does not change for each use of the iocb object. This function
   1361 * clears all other fields of the iocb object when it is freed.
   1362 * The sqlq structure that holds the xritag and phys and virtual
   1363 * mappings for the scatter gather list is retrieved from the
   1364 * active array of sglq. The get of the sglq pointer also clears
   1365 * the entry in the array. If the status of the IO indiactes that
   1366 * this IO was aborted then the sglq entry it put on the
   1367 * lpfc_abts_els_sgl_list until the CQ_ABORTED_XRI is received. If the
   1368 * IO has good status or fails for any other reason then the sglq
   1369 * entry is added to the free list (lpfc_els_sgl_list). The hbalock is
   1370 *  asserted held in the code path calling this routine.
   1371 **/
   1372static void
   1373__lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
   1374{
   1375	struct lpfc_sglq *sglq;
   1376	size_t start_clean = offsetof(struct lpfc_iocbq, wqe);
   1377	unsigned long iflag = 0;
   1378	struct lpfc_sli_ring *pring;
   1379
   1380	if (iocbq->sli4_xritag == NO_XRI)
   1381		sglq = NULL;
   1382	else
   1383		sglq = __lpfc_clear_active_sglq(phba, iocbq->sli4_lxritag);
   1384
   1385
   1386	if (sglq)  {
   1387		if (iocbq->cmd_flag & LPFC_IO_NVMET) {
   1388			spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock,
   1389					  iflag);
   1390			sglq->state = SGL_FREED;
   1391			sglq->ndlp = NULL;
   1392			list_add_tail(&sglq->list,
   1393				      &phba->sli4_hba.lpfc_nvmet_sgl_list);
   1394			spin_unlock_irqrestore(
   1395				&phba->sli4_hba.sgl_list_lock, iflag);
   1396			goto out;
   1397		}
   1398
   1399		if ((iocbq->cmd_flag & LPFC_EXCHANGE_BUSY) &&
   1400		    (!(unlikely(pci_channel_offline(phba->pcidev)))) &&
   1401		    sglq->state != SGL_XRI_ABORTED) {
   1402			spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock,
   1403					  iflag);
   1404
   1405			/* Check if we can get a reference on ndlp */
   1406			if (sglq->ndlp && !lpfc_nlp_get(sglq->ndlp))
   1407				sglq->ndlp = NULL;
   1408
   1409			list_add(&sglq->list,
   1410				 &phba->sli4_hba.lpfc_abts_els_sgl_list);
   1411			spin_unlock_irqrestore(
   1412				&phba->sli4_hba.sgl_list_lock, iflag);
   1413		} else {
   1414			spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock,
   1415					  iflag);
   1416			sglq->state = SGL_FREED;
   1417			sglq->ndlp = NULL;
   1418			list_add_tail(&sglq->list,
   1419				      &phba->sli4_hba.lpfc_els_sgl_list);
   1420			spin_unlock_irqrestore(
   1421				&phba->sli4_hba.sgl_list_lock, iflag);
   1422			pring = lpfc_phba_elsring(phba);
   1423			/* Check if TXQ queue needs to be serviced */
   1424			if (pring && (!list_empty(&pring->txq)))
   1425				lpfc_worker_wake_up(phba);
   1426		}
   1427	}
   1428
   1429out:
   1430	/*
   1431	 * Clean all volatile data fields, preserve iotag and node struct.
   1432	 */
   1433	memset((char *)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean);
   1434	iocbq->sli4_lxritag = NO_XRI;
   1435	iocbq->sli4_xritag = NO_XRI;
   1436	iocbq->cmd_flag &= ~(LPFC_IO_NVME | LPFC_IO_NVMET | LPFC_IO_CMF |
   1437			      LPFC_IO_NVME_LS);
   1438	list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
   1439}
   1440
   1441
   1442/**
   1443 * __lpfc_sli_release_iocbq_s3 - Release iocb to the iocb pool
   1444 * @phba: Pointer to HBA context object.
   1445 * @iocbq: Pointer to driver iocb object.
   1446 *
   1447 * This function is called to release the driver iocb object to the
   1448 * iocb pool. The iotag in the iocb object does not change for each
   1449 * use of the iocb object. This function clears all other fields of
   1450 * the iocb object when it is freed. The hbalock is asserted held in
   1451 * the code path calling this routine.
   1452 **/
   1453static void
   1454__lpfc_sli_release_iocbq_s3(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
   1455{
   1456	size_t start_clean = offsetof(struct lpfc_iocbq, iocb);
   1457
   1458	/*
   1459	 * Clean all volatile data fields, preserve iotag and node struct.
   1460	 */
   1461	memset((char*)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean);
   1462	iocbq->sli4_xritag = NO_XRI;
   1463	list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
   1464}
   1465
   1466/**
   1467 * __lpfc_sli_release_iocbq - Release iocb to the iocb pool
   1468 * @phba: Pointer to HBA context object.
   1469 * @iocbq: Pointer to driver iocb object.
   1470 *
   1471 * This function is called with hbalock held to release driver
   1472 * iocb object to the iocb pool. The iotag in the iocb object
   1473 * does not change for each use of the iocb object. This function
   1474 * clears all other fields of the iocb object when it is freed.
   1475 **/
   1476static void
   1477__lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
   1478{
   1479	lockdep_assert_held(&phba->hbalock);
   1480
   1481	phba->__lpfc_sli_release_iocbq(phba, iocbq);
   1482	phba->iocb_cnt--;
   1483}
   1484
   1485/**
   1486 * lpfc_sli_release_iocbq - Release iocb to the iocb pool
   1487 * @phba: Pointer to HBA context object.
   1488 * @iocbq: Pointer to driver iocb object.
   1489 *
   1490 * This function is called with no lock held to release the iocb to
   1491 * iocb pool.
   1492 **/
   1493void
   1494lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
   1495{
   1496	unsigned long iflags;
   1497
   1498	/*
   1499	 * Clean all volatile data fields, preserve iotag and node struct.
   1500	 */
   1501	spin_lock_irqsave(&phba->hbalock, iflags);
   1502	__lpfc_sli_release_iocbq(phba, iocbq);
   1503	spin_unlock_irqrestore(&phba->hbalock, iflags);
   1504}
   1505
   1506/**
   1507 * lpfc_sli_cancel_iocbs - Cancel all iocbs from a list.
   1508 * @phba: Pointer to HBA context object.
   1509 * @iocblist: List of IOCBs.
   1510 * @ulpstatus: ULP status in IOCB command field.
   1511 * @ulpWord4: ULP word-4 in IOCB command field.
   1512 *
   1513 * This function is called with a list of IOCBs to cancel. It cancels the IOCB
   1514 * on the list by invoking the complete callback function associated with the
   1515 * IOCB with the provided @ulpstatus and @ulpword4 set to the IOCB commond
   1516 * fields.
   1517 **/
   1518void
   1519lpfc_sli_cancel_iocbs(struct lpfc_hba *phba, struct list_head *iocblist,
   1520		      uint32_t ulpstatus, uint32_t ulpWord4)
   1521{
   1522	struct lpfc_iocbq *piocb;
   1523
   1524	while (!list_empty(iocblist)) {
   1525		list_remove_head(iocblist, piocb, struct lpfc_iocbq, list);
   1526		if (piocb->cmd_cmpl) {
   1527			if (piocb->cmd_flag & LPFC_IO_NVME) {
   1528				lpfc_nvme_cancel_iocb(phba, piocb,
   1529						      ulpstatus, ulpWord4);
   1530			} else {
   1531				if (phba->sli_rev == LPFC_SLI_REV4) {
   1532					bf_set(lpfc_wcqe_c_status,
   1533					       &piocb->wcqe_cmpl, ulpstatus);
   1534					piocb->wcqe_cmpl.parameter = ulpWord4;
   1535				} else {
   1536					piocb->iocb.ulpStatus = ulpstatus;
   1537					piocb->iocb.un.ulpWord[4] = ulpWord4;
   1538				}
   1539				(piocb->cmd_cmpl) (phba, piocb, piocb);
   1540			}
   1541		} else {
   1542			lpfc_sli_release_iocbq(phba, piocb);
   1543		}
   1544	}
   1545	return;
   1546}
   1547
   1548/**
   1549 * lpfc_sli_iocb_cmd_type - Get the iocb type
   1550 * @iocb_cmnd: iocb command code.
   1551 *
   1552 * This function is called by ring event handler function to get the iocb type.
   1553 * This function translates the iocb command to an iocb command type used to
   1554 * decide the final disposition of each completed IOCB.
   1555 * The function returns
   1556 * LPFC_UNKNOWN_IOCB if it is an unsupported iocb
   1557 * LPFC_SOL_IOCB     if it is a solicited iocb completion
   1558 * LPFC_ABORT_IOCB   if it is an abort iocb
   1559 * LPFC_UNSOL_IOCB   if it is an unsolicited iocb
   1560 *
   1561 * The caller is not required to hold any lock.
   1562 **/
   1563static lpfc_iocb_type
   1564lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd)
   1565{
   1566	lpfc_iocb_type type = LPFC_UNKNOWN_IOCB;
   1567
   1568	if (iocb_cmnd > CMD_MAX_IOCB_CMD)
   1569		return 0;
   1570
   1571	switch (iocb_cmnd) {
   1572	case CMD_XMIT_SEQUENCE_CR:
   1573	case CMD_XMIT_SEQUENCE_CX:
   1574	case CMD_XMIT_BCAST_CN:
   1575	case CMD_XMIT_BCAST_CX:
   1576	case CMD_ELS_REQUEST_CR:
   1577	case CMD_ELS_REQUEST_CX:
   1578	case CMD_CREATE_XRI_CR:
   1579	case CMD_CREATE_XRI_CX:
   1580	case CMD_GET_RPI_CN:
   1581	case CMD_XMIT_ELS_RSP_CX:
   1582	case CMD_GET_RPI_CR:
   1583	case CMD_FCP_IWRITE_CR:
   1584	case CMD_FCP_IWRITE_CX:
   1585	case CMD_FCP_IREAD_CR:
   1586	case CMD_FCP_IREAD_CX:
   1587	case CMD_FCP_ICMND_CR:
   1588	case CMD_FCP_ICMND_CX:
   1589	case CMD_FCP_TSEND_CX:
   1590	case CMD_FCP_TRSP_CX:
   1591	case CMD_FCP_TRECEIVE_CX:
   1592	case CMD_FCP_AUTO_TRSP_CX:
   1593	case CMD_ADAPTER_MSG:
   1594	case CMD_ADAPTER_DUMP:
   1595	case CMD_XMIT_SEQUENCE64_CR:
   1596	case CMD_XMIT_SEQUENCE64_CX:
   1597	case CMD_XMIT_BCAST64_CN:
   1598	case CMD_XMIT_BCAST64_CX:
   1599	case CMD_ELS_REQUEST64_CR:
   1600	case CMD_ELS_REQUEST64_CX:
   1601	case CMD_FCP_IWRITE64_CR:
   1602	case CMD_FCP_IWRITE64_CX:
   1603	case CMD_FCP_IREAD64_CR:
   1604	case CMD_FCP_IREAD64_CX:
   1605	case CMD_FCP_ICMND64_CR:
   1606	case CMD_FCP_ICMND64_CX:
   1607	case CMD_FCP_TSEND64_CX:
   1608	case CMD_FCP_TRSP64_CX:
   1609	case CMD_FCP_TRECEIVE64_CX:
   1610	case CMD_GEN_REQUEST64_CR:
   1611	case CMD_GEN_REQUEST64_CX:
   1612	case CMD_XMIT_ELS_RSP64_CX:
   1613	case DSSCMD_IWRITE64_CR:
   1614	case DSSCMD_IWRITE64_CX:
   1615	case DSSCMD_IREAD64_CR:
   1616	case DSSCMD_IREAD64_CX:
   1617	case CMD_SEND_FRAME:
   1618		type = LPFC_SOL_IOCB;
   1619		break;
   1620	case CMD_ABORT_XRI_CN:
   1621	case CMD_ABORT_XRI_CX:
   1622	case CMD_CLOSE_XRI_CN:
   1623	case CMD_CLOSE_XRI_CX:
   1624	case CMD_XRI_ABORTED_CX:
   1625	case CMD_ABORT_MXRI64_CN:
   1626	case CMD_XMIT_BLS_RSP64_CX:
   1627		type = LPFC_ABORT_IOCB;
   1628		break;
   1629	case CMD_RCV_SEQUENCE_CX:
   1630	case CMD_RCV_ELS_REQ_CX:
   1631	case CMD_RCV_SEQUENCE64_CX:
   1632	case CMD_RCV_ELS_REQ64_CX:
   1633	case CMD_ASYNC_STATUS:
   1634	case CMD_IOCB_RCV_SEQ64_CX:
   1635	case CMD_IOCB_RCV_ELS64_CX:
   1636	case CMD_IOCB_RCV_CONT64_CX:
   1637	case CMD_IOCB_RET_XRI64_CX:
   1638		type = LPFC_UNSOL_IOCB;
   1639		break;
   1640	case CMD_IOCB_XMIT_MSEQ64_CR:
   1641	case CMD_IOCB_XMIT_MSEQ64_CX:
   1642	case CMD_IOCB_RCV_SEQ_LIST64_CX:
   1643	case CMD_IOCB_RCV_ELS_LIST64_CX:
   1644	case CMD_IOCB_CLOSE_EXTENDED_CN:
   1645	case CMD_IOCB_ABORT_EXTENDED_CN:
   1646	case CMD_IOCB_RET_HBQE64_CN:
   1647	case CMD_IOCB_FCP_IBIDIR64_CR:
   1648	case CMD_IOCB_FCP_IBIDIR64_CX:
   1649	case CMD_IOCB_FCP_ITASKMGT64_CX:
   1650	case CMD_IOCB_LOGENTRY_CN:
   1651	case CMD_IOCB_LOGENTRY_ASYNC_CN:
   1652		printk("%s - Unhandled SLI-3 Command x%x\n",
   1653				__func__, iocb_cmnd);
   1654		type = LPFC_UNKNOWN_IOCB;
   1655		break;
   1656	default:
   1657		type = LPFC_UNKNOWN_IOCB;
   1658		break;
   1659	}
   1660
   1661	return type;
   1662}
   1663
   1664/**
   1665 * lpfc_sli_ring_map - Issue config_ring mbox for all rings
   1666 * @phba: Pointer to HBA context object.
   1667 *
   1668 * This function is called from SLI initialization code
   1669 * to configure every ring of the HBA's SLI interface. The
   1670 * caller is not required to hold any lock. This function issues
   1671 * a config_ring mailbox command for each ring.
   1672 * This function returns zero if successful else returns a negative
   1673 * error code.
   1674 **/
   1675static int
   1676lpfc_sli_ring_map(struct lpfc_hba *phba)
   1677{
   1678	struct lpfc_sli *psli = &phba->sli;
   1679	LPFC_MBOXQ_t *pmb;
   1680	MAILBOX_t *pmbox;
   1681	int i, rc, ret = 0;
   1682
   1683	pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
   1684	if (!pmb)
   1685		return -ENOMEM;
   1686	pmbox = &pmb->u.mb;
   1687	phba->link_state = LPFC_INIT_MBX_CMDS;
   1688	for (i = 0; i < psli->num_rings; i++) {
   1689		lpfc_config_ring(phba, i, pmb);
   1690		rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
   1691		if (rc != MBX_SUCCESS) {
   1692			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
   1693					"0446 Adapter failed to init (%d), "
   1694					"mbxCmd x%x CFG_RING, mbxStatus x%x, "
   1695					"ring %d\n",
   1696					rc, pmbox->mbxCommand,
   1697					pmbox->mbxStatus, i);
   1698			phba->link_state = LPFC_HBA_ERROR;
   1699			ret = -ENXIO;
   1700			break;
   1701		}
   1702	}
   1703	mempool_free(pmb, phba->mbox_mem_pool);
   1704	return ret;
   1705}
   1706
   1707/**
   1708 * lpfc_sli_ringtxcmpl_put - Adds new iocb to the txcmplq
   1709 * @phba: Pointer to HBA context object.
   1710 * @pring: Pointer to driver SLI ring object.
   1711 * @piocb: Pointer to the driver iocb object.
   1712 *
   1713 * The driver calls this function with the hbalock held for SLI3 ports or
   1714 * the ring lock held for SLI4 ports. The function adds the
   1715 * new iocb to txcmplq of the given ring. This function always returns
   1716 * 0. If this function is called for ELS ring, this function checks if
   1717 * there is a vport associated with the ELS command. This function also
   1718 * starts els_tmofunc timer if this is an ELS command.
   1719 **/
   1720static int
   1721lpfc_sli_ringtxcmpl_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
   1722			struct lpfc_iocbq *piocb)
   1723{
   1724	u32 ulp_command = 0;
   1725
   1726	BUG_ON(!piocb);
   1727	ulp_command = get_job_cmnd(phba, piocb);
   1728
   1729	list_add_tail(&piocb->list, &pring->txcmplq);
   1730	piocb->cmd_flag |= LPFC_IO_ON_TXCMPLQ;
   1731	pring->txcmplq_cnt++;
   1732	if ((unlikely(pring->ringno == LPFC_ELS_RING)) &&
   1733	   (ulp_command != CMD_ABORT_XRI_WQE) &&
   1734	   (ulp_command != CMD_ABORT_XRI_CN) &&
   1735	   (ulp_command != CMD_CLOSE_XRI_CN)) {
   1736		BUG_ON(!piocb->vport);
   1737		if (!(piocb->vport->load_flag & FC_UNLOADING))
   1738			mod_timer(&piocb->vport->els_tmofunc,
   1739				  jiffies +
   1740				  msecs_to_jiffies(1000 * (phba->fc_ratov << 1)));
   1741	}
   1742
   1743	return 0;
   1744}
   1745
   1746/**
   1747 * lpfc_sli_ringtx_get - Get first element of the txq
   1748 * @phba: Pointer to HBA context object.
   1749 * @pring: Pointer to driver SLI ring object.
   1750 *
   1751 * This function is called with hbalock held to get next
   1752 * iocb in txq of the given ring. If there is any iocb in
   1753 * the txq, the function returns first iocb in the list after
   1754 * removing the iocb from the list, else it returns NULL.
   1755 **/
   1756struct lpfc_iocbq *
   1757lpfc_sli_ringtx_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
   1758{
   1759	struct lpfc_iocbq *cmd_iocb;
   1760
   1761	lockdep_assert_held(&phba->hbalock);
   1762
   1763	list_remove_head((&pring->txq), cmd_iocb, struct lpfc_iocbq, list);
   1764	return cmd_iocb;
   1765}
   1766
   1767/**
   1768 * lpfc_cmf_sync_cmpl - Process a CMF_SYNC_WQE cmpl
   1769 * @phba: Pointer to HBA context object.
   1770 * @cmdiocb: Pointer to driver command iocb object.
   1771 * @rspiocb: Pointer to driver response iocb object.
   1772 *
   1773 * This routine will inform the driver of any BW adjustments we need
   1774 * to make. These changes will be picked up during the next CMF
   1775 * timer interrupt. In addition, any BW changes will be logged
   1776 * with LOG_CGN_MGMT.
   1777 **/
   1778static void
   1779lpfc_cmf_sync_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
   1780		   struct lpfc_iocbq *rspiocb)
   1781{
   1782	union lpfc_wqe128 *wqe;
   1783	uint32_t status, info;
   1784	struct lpfc_wcqe_complete *wcqe = &rspiocb->wcqe_cmpl;
   1785	uint64_t bw, bwdif, slop;
   1786	uint64_t pcent, bwpcent;
   1787	int asig, afpin, sigcnt, fpincnt;
   1788	int wsigmax, wfpinmax, cg, tdp;
   1789	char *s;
   1790
   1791	/* First check for error */
   1792	status = bf_get(lpfc_wcqe_c_status, wcqe);
   1793	if (status) {
   1794		lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
   1795				"6211 CMF_SYNC_WQE Error "
   1796				"req_tag x%x status x%x hwstatus x%x "
   1797				"tdatap x%x parm x%x\n",
   1798				bf_get(lpfc_wcqe_c_request_tag, wcqe),
   1799				bf_get(lpfc_wcqe_c_status, wcqe),
   1800				bf_get(lpfc_wcqe_c_hw_status, wcqe),
   1801				wcqe->total_data_placed,
   1802				wcqe->parameter);
   1803		goto out;
   1804	}
   1805
   1806	/* Gather congestion information on a successful cmpl */
   1807	info = wcqe->parameter;
   1808	phba->cmf_active_info = info;
   1809
   1810	/* See if firmware info count is valid or has changed */
   1811	if (info > LPFC_MAX_CMF_INFO || phba->cmf_info_per_interval == info)
   1812		info = 0;
   1813	else
   1814		phba->cmf_info_per_interval = info;
   1815
   1816	tdp = bf_get(lpfc_wcqe_c_cmf_bw, wcqe);
   1817	cg = bf_get(lpfc_wcqe_c_cmf_cg, wcqe);
   1818
   1819	/* Get BW requirement from firmware */
   1820	bw = (uint64_t)tdp * LPFC_CMF_BLK_SIZE;
   1821	if (!bw) {
   1822		lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
   1823				"6212 CMF_SYNC_WQE x%x: NULL bw\n",
   1824				bf_get(lpfc_wcqe_c_request_tag, wcqe));
   1825		goto out;
   1826	}
   1827
   1828	/* Gather information needed for logging if a BW change is required */
   1829	wqe = &cmdiocb->wqe;
   1830	asig = bf_get(cmf_sync_asig, &wqe->cmf_sync);
   1831	afpin = bf_get(cmf_sync_afpin, &wqe->cmf_sync);
   1832	fpincnt = bf_get(cmf_sync_wfpincnt, &wqe->cmf_sync);
   1833	sigcnt = bf_get(cmf_sync_wsigcnt, &wqe->cmf_sync);
   1834	if (phba->cmf_max_bytes_per_interval != bw ||
   1835	    (asig || afpin || sigcnt || fpincnt)) {
   1836		/* Are we increasing or decreasing BW */
   1837		if (phba->cmf_max_bytes_per_interval <  bw) {
   1838			bwdif = bw - phba->cmf_max_bytes_per_interval;
   1839			s = "Increase";
   1840		} else {
   1841			bwdif = phba->cmf_max_bytes_per_interval - bw;
   1842			s = "Decrease";
   1843		}
   1844
   1845		/* What is the change percentage */
   1846		slop = div_u64(phba->cmf_link_byte_count, 200); /*For rounding*/
   1847		pcent = div64_u64(bwdif * 100 + slop,
   1848				  phba->cmf_link_byte_count);
   1849		bwpcent = div64_u64(bw * 100 + slop,
   1850				    phba->cmf_link_byte_count);
   1851		if (asig) {
   1852			lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
   1853					"6237 BW Threshold %lld%% (%lld): "
   1854					"%lld%% %s: Signal Alarm: cg:%d "
   1855					"Info:%u\n",
   1856					bwpcent, bw, pcent, s, cg,
   1857					phba->cmf_active_info);
   1858		} else if (afpin) {
   1859			lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
   1860					"6238 BW Threshold %lld%% (%lld): "
   1861					"%lld%% %s: FPIN Alarm: cg:%d "
   1862					"Info:%u\n",
   1863					bwpcent, bw, pcent, s, cg,
   1864					phba->cmf_active_info);
   1865		} else if (sigcnt) {
   1866			wsigmax = bf_get(cmf_sync_wsigmax, &wqe->cmf_sync);
   1867			lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
   1868					"6239 BW Threshold %lld%% (%lld): "
   1869					"%lld%% %s: Signal Warning: "
   1870					"Cnt %d Max %d: cg:%d Info:%u\n",
   1871					bwpcent, bw, pcent, s, sigcnt,
   1872					wsigmax, cg, phba->cmf_active_info);
   1873		} else if (fpincnt) {
   1874			wfpinmax = bf_get(cmf_sync_wfpinmax, &wqe->cmf_sync);
   1875			lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
   1876					"6240 BW Threshold %lld%% (%lld): "
   1877					"%lld%% %s: FPIN Warning: "
   1878					"Cnt %d Max %d: cg:%d Info:%u\n",
   1879					bwpcent, bw, pcent, s, fpincnt,
   1880					wfpinmax, cg, phba->cmf_active_info);
   1881		} else {
   1882			lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
   1883					"6241 BW Threshold %lld%% (%lld): "
   1884					"CMF %lld%% %s: cg:%d Info:%u\n",
   1885					bwpcent, bw, pcent, s, cg,
   1886					phba->cmf_active_info);
   1887		}
   1888	} else if (info) {
   1889		lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
   1890				"6246 Info Threshold %u\n", info);
   1891	}
   1892
   1893	/* Save BW change to be picked up during next timer interrupt */
   1894	phba->cmf_last_sync_bw = bw;
   1895out:
   1896	lpfc_sli_release_iocbq(phba, cmdiocb);
   1897}
   1898
   1899/**
   1900 * lpfc_issue_cmf_sync_wqe - Issue a CMF_SYNC_WQE
   1901 * @phba: Pointer to HBA context object.
   1902 * @ms:   ms to set in WQE interval, 0 means use init op
   1903 * @total: Total rcv bytes for this interval
   1904 *
   1905 * This routine is called every CMF timer interrupt. Its purpose is
   1906 * to issue a CMF_SYNC_WQE to the firmware to inform it of any events
   1907 * that may indicate we have congestion (FPINs or Signals). Upon
   1908 * completion, the firmware will indicate any BW restrictions the
   1909 * driver may need to take.
   1910 **/
   1911int
   1912lpfc_issue_cmf_sync_wqe(struct lpfc_hba *phba, u32 ms, u64 total)
   1913{
   1914	union lpfc_wqe128 *wqe;
   1915	struct lpfc_iocbq *sync_buf;
   1916	unsigned long iflags;
   1917	u32 ret_val;
   1918	u32 atot, wtot, max;
   1919
   1920	/* First address any alarm / warning activity */
   1921	atot = atomic_xchg(&phba->cgn_sync_alarm_cnt, 0);
   1922	wtot = atomic_xchg(&phba->cgn_sync_warn_cnt, 0);
   1923
   1924	/* ONLY Managed mode will send the CMF_SYNC_WQE to the HBA */
   1925	if (phba->cmf_active_mode != LPFC_CFG_MANAGED ||
   1926	    phba->link_state == LPFC_LINK_DOWN)
   1927		return 0;
   1928
   1929	spin_lock_irqsave(&phba->hbalock, iflags);
   1930	sync_buf = __lpfc_sli_get_iocbq(phba);
   1931	if (!sync_buf) {
   1932		lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT,
   1933				"6244 No available WQEs for CMF_SYNC_WQE\n");
   1934		ret_val = ENOMEM;
   1935		goto out_unlock;
   1936	}
   1937
   1938	wqe = &sync_buf->wqe;
   1939
   1940	/* WQEs are reused.  Clear stale data and set key fields to zero */
   1941	memset(wqe, 0, sizeof(*wqe));
   1942
   1943	/* If this is the very first CMF_SYNC_WQE, issue an init operation */
   1944	if (!ms) {
   1945		lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
   1946				"6441 CMF Init %d - CMF_SYNC_WQE\n",
   1947				phba->fc_eventTag);
   1948		bf_set(cmf_sync_op, &wqe->cmf_sync, 1); /* 1=init */
   1949		bf_set(cmf_sync_interval, &wqe->cmf_sync, LPFC_CMF_INTERVAL);
   1950		goto initpath;
   1951	}
   1952
   1953	bf_set(cmf_sync_op, &wqe->cmf_sync, 0); /* 0=recalc */
   1954	bf_set(cmf_sync_interval, &wqe->cmf_sync, ms);
   1955
   1956	/* Check for alarms / warnings */
   1957	if (atot) {
   1958		if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM) {
   1959			/* We hit an Signal alarm condition */
   1960			bf_set(cmf_sync_asig, &wqe->cmf_sync, 1);
   1961		} else {
   1962			/* We hit a FPIN alarm condition */
   1963			bf_set(cmf_sync_afpin, &wqe->cmf_sync, 1);
   1964		}
   1965	} else if (wtot) {
   1966		if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ONLY ||
   1967		    phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM) {
   1968			/* We hit an Signal warning condition */
   1969			max = LPFC_SEC_TO_MSEC / lpfc_fabric_cgn_frequency *
   1970				lpfc_acqe_cgn_frequency;
   1971			bf_set(cmf_sync_wsigmax, &wqe->cmf_sync, max);
   1972			bf_set(cmf_sync_wsigcnt, &wqe->cmf_sync, wtot);
   1973		} else {
   1974			/* We hit a FPIN warning condition */
   1975			bf_set(cmf_sync_wfpinmax, &wqe->cmf_sync, 1);
   1976			bf_set(cmf_sync_wfpincnt, &wqe->cmf_sync, 1);
   1977		}
   1978	}
   1979
   1980	/* Update total read blocks during previous timer interval */
   1981	wqe->cmf_sync.read_bytes = (u32)(total / LPFC_CMF_BLK_SIZE);
   1982
   1983initpath:
   1984	bf_set(cmf_sync_ver, &wqe->cmf_sync, LPFC_CMF_SYNC_VER);
   1985	wqe->cmf_sync.event_tag = phba->fc_eventTag;
   1986	bf_set(cmf_sync_cmnd, &wqe->cmf_sync, CMD_CMF_SYNC_WQE);
   1987
   1988	/* Setup reqtag to match the wqe completion. */
   1989	bf_set(cmf_sync_reqtag, &wqe->cmf_sync, sync_buf->iotag);
   1990
   1991	bf_set(cmf_sync_qosd, &wqe->cmf_sync, 1);
   1992
   1993	bf_set(cmf_sync_cmd_type, &wqe->cmf_sync, CMF_SYNC_COMMAND);
   1994	bf_set(cmf_sync_wqec, &wqe->cmf_sync, 1);
   1995	bf_set(cmf_sync_cqid, &wqe->cmf_sync, LPFC_WQE_CQ_ID_DEFAULT);
   1996
   1997	sync_buf->vport = phba->pport;
   1998	sync_buf->cmd_cmpl = lpfc_cmf_sync_cmpl;
   1999	sync_buf->cmd_dmabuf = NULL;
   2000	sync_buf->rsp_dmabuf = NULL;
   2001	sync_buf->bpl_dmabuf = NULL;
   2002	sync_buf->sli4_xritag = NO_XRI;
   2003
   2004	sync_buf->cmd_flag |= LPFC_IO_CMF;
   2005	ret_val = lpfc_sli4_issue_wqe(phba, &phba->sli4_hba.hdwq[0], sync_buf);
   2006	if (ret_val)
   2007		lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
   2008				"6214 Cannot issue CMF_SYNC_WQE: x%x\n",
   2009				ret_val);
   2010out_unlock:
   2011	spin_unlock_irqrestore(&phba->hbalock, iflags);
   2012	return ret_val;
   2013}
   2014
   2015/**
   2016 * lpfc_sli_next_iocb_slot - Get next iocb slot in the ring
   2017 * @phba: Pointer to HBA context object.
   2018 * @pring: Pointer to driver SLI ring object.
   2019 *
   2020 * This function is called with hbalock held and the caller must post the
   2021 * iocb without releasing the lock. If the caller releases the lock,
   2022 * iocb slot returned by the function is not guaranteed to be available.
   2023 * The function returns pointer to the next available iocb slot if there
   2024 * is available slot in the ring, else it returns NULL.
   2025 * If the get index of the ring is ahead of the put index, the function
   2026 * will post an error attention event to the worker thread to take the
   2027 * HBA to offline state.
   2028 **/
   2029static IOCB_t *
   2030lpfc_sli_next_iocb_slot (struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
   2031{
   2032	struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
   2033	uint32_t  max_cmd_idx = pring->sli.sli3.numCiocb;
   2034
   2035	lockdep_assert_held(&phba->hbalock);
   2036
   2037	if ((pring->sli.sli3.next_cmdidx == pring->sli.sli3.cmdidx) &&
   2038	   (++pring->sli.sli3.next_cmdidx >= max_cmd_idx))
   2039		pring->sli.sli3.next_cmdidx = 0;
   2040
   2041	if (unlikely(pring->sli.sli3.local_getidx ==
   2042		pring->sli.sli3.next_cmdidx)) {
   2043
   2044		pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx);
   2045
   2046		if (unlikely(pring->sli.sli3.local_getidx >= max_cmd_idx)) {
   2047			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
   2048					"0315 Ring %d issue: portCmdGet %d "
   2049					"is bigger than cmd ring %d\n",
   2050					pring->ringno,
   2051					pring->sli.sli3.local_getidx,
   2052					max_cmd_idx);
   2053
   2054			phba->link_state = LPFC_HBA_ERROR;
   2055			/*
   2056			 * All error attention handlers are posted to
   2057			 * worker thread
   2058			 */
   2059			phba->work_ha |= HA_ERATT;
   2060			phba->work_hs = HS_FFER3;
   2061
   2062			lpfc_worker_wake_up(phba);
   2063
   2064			return NULL;
   2065		}
   2066
   2067		if (pring->sli.sli3.local_getidx == pring->sli.sli3.next_cmdidx)
   2068			return NULL;
   2069	}
   2070
   2071	return lpfc_cmd_iocb(phba, pring);
   2072}
   2073
   2074/**
   2075 * lpfc_sli_next_iotag - Get an iotag for the iocb
   2076 * @phba: Pointer to HBA context object.
   2077 * @iocbq: Pointer to driver iocb object.
   2078 *
   2079 * This function gets an iotag for the iocb. If there is no unused iotag and
   2080 * the iocbq_lookup_len < 0xffff, this function allocates a bigger iotag_lookup
   2081 * array and assigns a new iotag.
   2082 * The function returns the allocated iotag if successful, else returns zero.
   2083 * Zero is not a valid iotag.
   2084 * The caller is not required to hold any lock.
   2085 **/
   2086uint16_t
   2087lpfc_sli_next_iotag(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
   2088{
   2089	struct lpfc_iocbq **new_arr;
   2090	struct lpfc_iocbq **old_arr;
   2091	size_t new_len;
   2092	struct lpfc_sli *psli = &phba->sli;
   2093	uint16_t iotag;
   2094
   2095	spin_lock_irq(&phba->hbalock);
   2096	iotag = psli->last_iotag;
   2097	if(++iotag < psli->iocbq_lookup_len) {
   2098		psli->last_iotag = iotag;
   2099		psli->iocbq_lookup[iotag] = iocbq;
   2100		spin_unlock_irq(&phba->hbalock);
   2101		iocbq->iotag = iotag;
   2102		return iotag;
   2103	} else if (psli->iocbq_lookup_len < (0xffff
   2104					   - LPFC_IOCBQ_LOOKUP_INCREMENT)) {
   2105		new_len = psli->iocbq_lookup_len + LPFC_IOCBQ_LOOKUP_INCREMENT;
   2106		spin_unlock_irq(&phba->hbalock);
   2107		new_arr = kcalloc(new_len, sizeof(struct lpfc_iocbq *),
   2108				  GFP_KERNEL);
   2109		if (new_arr) {
   2110			spin_lock_irq(&phba->hbalock);
   2111			old_arr = psli->iocbq_lookup;
   2112			if (new_len <= psli->iocbq_lookup_len) {
   2113				/* highly unprobable case */
   2114				kfree(new_arr);
   2115				iotag = psli->last_iotag;
   2116				if(++iotag < psli->iocbq_lookup_len) {
   2117					psli->last_iotag = iotag;
   2118					psli->iocbq_lookup[iotag] = iocbq;
   2119					spin_unlock_irq(&phba->hbalock);
   2120					iocbq->iotag = iotag;
   2121					return iotag;
   2122				}
   2123				spin_unlock_irq(&phba->hbalock);
   2124				return 0;
   2125			}
   2126			if (psli->iocbq_lookup)
   2127				memcpy(new_arr, old_arr,
   2128				       ((psli->last_iotag  + 1) *
   2129					sizeof (struct lpfc_iocbq *)));
   2130			psli->iocbq_lookup = new_arr;
   2131			psli->iocbq_lookup_len = new_len;
   2132			psli->last_iotag = iotag;
   2133			psli->iocbq_lookup[iotag] = iocbq;
   2134			spin_unlock_irq(&phba->hbalock);
   2135			iocbq->iotag = iotag;
   2136			kfree(old_arr);
   2137			return iotag;
   2138		}
   2139	} else
   2140		spin_unlock_irq(&phba->hbalock);
   2141
   2142	lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
   2143			"0318 Failed to allocate IOTAG.last IOTAG is %d\n",
   2144			psli->last_iotag);
   2145
   2146	return 0;
   2147}
   2148
   2149/**
   2150 * lpfc_sli_submit_iocb - Submit an iocb to the firmware
   2151 * @phba: Pointer to HBA context object.
   2152 * @pring: Pointer to driver SLI ring object.
   2153 * @iocb: Pointer to iocb slot in the ring.
   2154 * @nextiocb: Pointer to driver iocb object which need to be
   2155 *            posted to firmware.
   2156 *
   2157 * This function is called to post a new iocb to the firmware. This
   2158 * function copies the new iocb to ring iocb slot and updates the
   2159 * ring pointers. It adds the new iocb to txcmplq if there is
   2160 * a completion call back for this iocb else the function will free the
   2161 * iocb object.  The hbalock is asserted held in the code path calling
   2162 * this routine.
   2163 **/
   2164static void
   2165lpfc_sli_submit_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
   2166		IOCB_t *iocb, struct lpfc_iocbq *nextiocb)
   2167{
   2168	/*
   2169	 * Set up an iotag
   2170	 */
   2171	nextiocb->iocb.ulpIoTag = (nextiocb->cmd_cmpl) ? nextiocb->iotag : 0;
   2172
   2173
   2174	if (pring->ringno == LPFC_ELS_RING) {
   2175		lpfc_debugfs_slow_ring_trc(phba,
   2176			"IOCB cmd ring:   wd4:x%08x wd6:x%08x wd7:x%08x",
   2177			*(((uint32_t *) &nextiocb->iocb) + 4),
   2178			*(((uint32_t *) &nextiocb->iocb) + 6),
   2179			*(((uint32_t *) &nextiocb->iocb) + 7));
   2180	}
   2181
   2182	/*
   2183	 * Issue iocb command to adapter
   2184	 */
   2185	lpfc_sli_pcimem_bcopy(&nextiocb->iocb, iocb, phba->iocb_cmd_size);
   2186	wmb();
   2187	pring->stats.iocb_cmd++;
   2188
   2189	/*
   2190	 * If there is no completion routine to call, we can release the
   2191	 * IOCB buffer back right now. For IOCBs, like QUE_RING_BUF,
   2192	 * that have no rsp ring completion, cmd_cmpl MUST be NULL.
   2193	 */
   2194	if (nextiocb->cmd_cmpl)
   2195		lpfc_sli_ringtxcmpl_put(phba, pring, nextiocb);
   2196	else
   2197		__lpfc_sli_release_iocbq(phba, nextiocb);
   2198
   2199	/*
   2200	 * Let the HBA know what IOCB slot will be the next one the
   2201	 * driver will put a command into.
   2202	 */
   2203	pring->sli.sli3.cmdidx = pring->sli.sli3.next_cmdidx;
   2204	writel(pring->sli.sli3.cmdidx, &phba->host_gp[pring->ringno].cmdPutInx);
   2205}
   2206
   2207/**
   2208 * lpfc_sli_update_full_ring - Update the chip attention register
   2209 * @phba: Pointer to HBA context object.
   2210 * @pring: Pointer to driver SLI ring object.
   2211 *
   2212 * The caller is not required to hold any lock for calling this function.
   2213 * This function updates the chip attention bits for the ring to inform firmware
   2214 * that there are pending work to be done for this ring and requests an
   2215 * interrupt when there is space available in the ring. This function is
   2216 * called when the driver is unable to post more iocbs to the ring due
   2217 * to unavailability of space in the ring.
   2218 **/
   2219static void
   2220lpfc_sli_update_full_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
   2221{
   2222	int ringno = pring->ringno;
   2223
   2224	pring->flag |= LPFC_CALL_RING_AVAILABLE;
   2225
   2226	wmb();
   2227
   2228	/*
   2229	 * Set ring 'ringno' to SET R0CE_REQ in Chip Att register.
   2230	 * The HBA will tell us when an IOCB entry is available.
   2231	 */
   2232	writel((CA_R0ATT|CA_R0CE_REQ) << (ringno*4), phba->CAregaddr);
   2233	readl(phba->CAregaddr); /* flush */
   2234
   2235	pring->stats.iocb_cmd_full++;
   2236}
   2237
   2238/**
   2239 * lpfc_sli_update_ring - Update chip attention register
   2240 * @phba: Pointer to HBA context object.
   2241 * @pring: Pointer to driver SLI ring object.
   2242 *
   2243 * This function updates the chip attention register bit for the
   2244 * given ring to inform HBA that there is more work to be done
   2245 * in this ring. The caller is not required to hold any lock.
   2246 **/
   2247static void
   2248lpfc_sli_update_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
   2249{
   2250	int ringno = pring->ringno;
   2251
   2252	/*
   2253	 * Tell the HBA that there is work to do in this ring.
   2254	 */
   2255	if (!(phba->sli3_options & LPFC_SLI3_CRP_ENABLED)) {
   2256		wmb();
   2257		writel(CA_R0ATT << (ringno * 4), phba->CAregaddr);
   2258		readl(phba->CAregaddr); /* flush */
   2259	}
   2260}
   2261
   2262/**
   2263 * lpfc_sli_resume_iocb - Process iocbs in the txq
   2264 * @phba: Pointer to HBA context object.
   2265 * @pring: Pointer to driver SLI ring object.
   2266 *
   2267 * This function is called with hbalock held to post pending iocbs
   2268 * in the txq to the firmware. This function is called when driver
   2269 * detects space available in the ring.
   2270 **/
   2271static void
   2272lpfc_sli_resume_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
   2273{
   2274	IOCB_t *iocb;
   2275	struct lpfc_iocbq *nextiocb;
   2276
   2277	lockdep_assert_held(&phba->hbalock);
   2278
   2279	/*
   2280	 * Check to see if:
   2281	 *  (a) there is anything on the txq to send
   2282	 *  (b) link is up
   2283	 *  (c) link attention events can be processed (fcp ring only)
   2284	 *  (d) IOCB processing is not blocked by the outstanding mbox command.
   2285	 */
   2286
   2287	if (lpfc_is_link_up(phba) &&
   2288	    (!list_empty(&pring->txq)) &&
   2289	    (pring->ringno != LPFC_FCP_RING ||
   2290	     phba->sli.sli_flag & LPFC_PROCESS_LA)) {
   2291
   2292		while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) &&
   2293		       (nextiocb = lpfc_sli_ringtx_get(phba, pring)))
   2294			lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb);
   2295
   2296		if (iocb)
   2297			lpfc_sli_update_ring(phba, pring);
   2298		else
   2299			lpfc_sli_update_full_ring(phba, pring);
   2300	}
   2301
   2302	return;
   2303}
   2304
   2305/**
   2306 * lpfc_sli_next_hbq_slot - Get next hbq entry for the HBQ
   2307 * @phba: Pointer to HBA context object.
   2308 * @hbqno: HBQ number.
   2309 *
   2310 * This function is called with hbalock held to get the next
   2311 * available slot for the given HBQ. If there is free slot
   2312 * available for the HBQ it will return pointer to the next available
   2313 * HBQ entry else it will return NULL.
   2314 **/
   2315static struct lpfc_hbq_entry *
   2316lpfc_sli_next_hbq_slot(struct lpfc_hba *phba, uint32_t hbqno)
   2317{
   2318	struct hbq_s *hbqp = &phba->hbqs[hbqno];
   2319
   2320	lockdep_assert_held(&phba->hbalock);
   2321
   2322	if (hbqp->next_hbqPutIdx == hbqp->hbqPutIdx &&
   2323	    ++hbqp->next_hbqPutIdx >= hbqp->entry_count)
   2324		hbqp->next_hbqPutIdx = 0;
   2325
   2326	if (unlikely(hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)) {
   2327		uint32_t raw_index = phba->hbq_get[hbqno];
   2328		uint32_t getidx = le32_to_cpu(raw_index);
   2329
   2330		hbqp->local_hbqGetIdx = getidx;
   2331
   2332		if (unlikely(hbqp->local_hbqGetIdx >= hbqp->entry_count)) {
   2333			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
   2334					"1802 HBQ %d: local_hbqGetIdx "
   2335					"%u is > than hbqp->entry_count %u\n",
   2336					hbqno, hbqp->local_hbqGetIdx,
   2337					hbqp->entry_count);
   2338
   2339			phba->link_state = LPFC_HBA_ERROR;
   2340			return NULL;
   2341		}
   2342
   2343		if (hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)
   2344			return NULL;
   2345	}
   2346
   2347	return (struct lpfc_hbq_entry *) phba->hbqs[hbqno].hbq_virt +
   2348			hbqp->hbqPutIdx;
   2349}
   2350
   2351/**
   2352 * lpfc_sli_hbqbuf_free_all - Free all the hbq buffers
   2353 * @phba: Pointer to HBA context object.
   2354 *
   2355 * This function is called with no lock held to free all the
   2356 * hbq buffers while uninitializing the SLI interface. It also
   2357 * frees the HBQ buffers returned by the firmware but not yet
   2358 * processed by the upper layers.
   2359 **/
   2360void
   2361lpfc_sli_hbqbuf_free_all(struct lpfc_hba *phba)
   2362{
   2363	struct lpfc_dmabuf *dmabuf, *next_dmabuf;
   2364	struct hbq_dmabuf *hbq_buf;
   2365	unsigned long flags;
   2366	int i, hbq_count;
   2367
   2368	hbq_count = lpfc_sli_hbq_count();
   2369	/* Return all memory used by all HBQs */
   2370	spin_lock_irqsave(&phba->hbalock, flags);
   2371	for (i = 0; i < hbq_count; ++i) {
   2372		list_for_each_entry_safe(dmabuf, next_dmabuf,
   2373				&phba->hbqs[i].hbq_buffer_list, list) {
   2374			hbq_buf = container_of(dmabuf, struct hbq_dmabuf, dbuf);
   2375			list_del(&hbq_buf->dbuf.list);
   2376			(phba->hbqs[i].hbq_free_buffer)(phba, hbq_buf);
   2377		}
   2378		phba->hbqs[i].buffer_count = 0;
   2379	}
   2380
   2381	/* Mark the HBQs not in use */
   2382	phba->hbq_in_use = 0;
   2383	spin_unlock_irqrestore(&phba->hbalock, flags);
   2384}
   2385
   2386/**
   2387 * lpfc_sli_hbq_to_firmware - Post the hbq buffer to firmware
   2388 * @phba: Pointer to HBA context object.
   2389 * @hbqno: HBQ number.
   2390 * @hbq_buf: Pointer to HBQ buffer.
   2391 *
   2392 * This function is called with the hbalock held to post a
   2393 * hbq buffer to the firmware. If the function finds an empty
   2394 * slot in the HBQ, it will post the buffer. The function will return
   2395 * pointer to the hbq entry if it successfully post the buffer
   2396 * else it will return NULL.
   2397 **/
   2398static int
   2399lpfc_sli_hbq_to_firmware(struct lpfc_hba *phba, uint32_t hbqno,
   2400			 struct hbq_dmabuf *hbq_buf)
   2401{
   2402	lockdep_assert_held(&phba->hbalock);
   2403	return phba->lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buf);
   2404}
   2405
   2406/**
   2407 * lpfc_sli_hbq_to_firmware_s3 - Post the hbq buffer to SLI3 firmware
   2408 * @phba: Pointer to HBA context object.
   2409 * @hbqno: HBQ number.
   2410 * @hbq_buf: Pointer to HBQ buffer.
   2411 *
   2412 * This function is called with the hbalock held to post a hbq buffer to the
   2413 * firmware. If the function finds an empty slot in the HBQ, it will post the
   2414 * buffer and place it on the hbq_buffer_list. The function will return zero if
   2415 * it successfully post the buffer else it will return an error.
   2416 **/
   2417static int
   2418lpfc_sli_hbq_to_firmware_s3(struct lpfc_hba *phba, uint32_t hbqno,
   2419			    struct hbq_dmabuf *hbq_buf)
   2420{
   2421	struct lpfc_hbq_entry *hbqe;
   2422	dma_addr_t physaddr = hbq_buf->dbuf.phys;
   2423
   2424	lockdep_assert_held(&phba->hbalock);
   2425	/* Get next HBQ entry slot to use */
   2426	hbqe = lpfc_sli_next_hbq_slot(phba, hbqno);
   2427	if (hbqe) {
   2428		struct hbq_s *hbqp = &phba->hbqs[hbqno];
   2429
   2430		hbqe->bde.addrHigh = le32_to_cpu(putPaddrHigh(physaddr));
   2431		hbqe->bde.addrLow  = le32_to_cpu(putPaddrLow(physaddr));
   2432		hbqe->bde.tus.f.bdeSize = hbq_buf->total_size;
   2433		hbqe->bde.tus.f.bdeFlags = 0;
   2434		hbqe->bde.tus.w = le32_to_cpu(hbqe->bde.tus.w);
   2435		hbqe->buffer_tag = le32_to_cpu(hbq_buf->tag);
   2436				/* Sync SLIM */
   2437		hbqp->hbqPutIdx = hbqp->next_hbqPutIdx;
   2438		writel(hbqp->hbqPutIdx, phba->hbq_put + hbqno);
   2439				/* flush */
   2440		readl(phba->hbq_put + hbqno);
   2441		list_add_tail(&hbq_buf->dbuf.list, &hbqp->hbq_buffer_list);
   2442		return 0;
   2443	} else
   2444		return -ENOMEM;
   2445}
   2446
   2447/**
   2448 * lpfc_sli_hbq_to_firmware_s4 - Post the hbq buffer to SLI4 firmware
   2449 * @phba: Pointer to HBA context object.
   2450 * @hbqno: HBQ number.
   2451 * @hbq_buf: Pointer to HBQ buffer.
   2452 *
   2453 * This function is called with the hbalock held to post an RQE to the SLI4
   2454 * firmware. If able to post the RQE to the RQ it will queue the hbq entry to
   2455 * the hbq_buffer_list and return zero, otherwise it will return an error.
   2456 **/
   2457static int
   2458lpfc_sli_hbq_to_firmware_s4(struct lpfc_hba *phba, uint32_t hbqno,
   2459			    struct hbq_dmabuf *hbq_buf)
   2460{
   2461	int rc;
   2462	struct lpfc_rqe hrqe;
   2463	struct lpfc_rqe drqe;
   2464	struct lpfc_queue *hrq;
   2465	struct lpfc_queue *drq;
   2466
   2467	if (hbqno != LPFC_ELS_HBQ)
   2468		return 1;
   2469	hrq = phba->sli4_hba.hdr_rq;
   2470	drq = phba->sli4_hba.dat_rq;
   2471
   2472	lockdep_assert_held(&phba->hbalock);
   2473	hrqe.address_lo = putPaddrLow(hbq_buf->hbuf.phys);
   2474	hrqe.address_hi = putPaddrHigh(hbq_buf->hbuf.phys);
   2475	drqe.address_lo = putPaddrLow(hbq_buf->dbuf.phys);
   2476	drqe.address_hi = putPaddrHigh(hbq_buf->dbuf.phys);
   2477	rc = lpfc_sli4_rq_put(hrq, drq, &hrqe, &drqe);
   2478	if (rc < 0)
   2479		return rc;
   2480	hbq_buf->tag = (rc | (hbqno << 16));
   2481	list_add_tail(&hbq_buf->dbuf.list, &phba->hbqs[hbqno].hbq_buffer_list);
   2482	return 0;
   2483}
   2484
   2485/* HBQ for ELS and CT traffic. */
   2486static struct lpfc_hbq_init lpfc_els_hbq = {
   2487	.rn = 1,
   2488	.entry_count = 256,
   2489	.mask_count = 0,
   2490	.profile = 0,
   2491	.ring_mask = (1 << LPFC_ELS_RING),
   2492	.buffer_count = 0,
   2493	.init_count = 40,
   2494	.add_count = 40,
   2495};
   2496
   2497/* Array of HBQs */
   2498struct lpfc_hbq_init *lpfc_hbq_defs[] = {
   2499	&lpfc_els_hbq,
   2500};
   2501
   2502/**
   2503 * lpfc_sli_hbqbuf_fill_hbqs - Post more hbq buffers to HBQ
   2504 * @phba: Pointer to HBA context object.
   2505 * @hbqno: HBQ number.
   2506 * @count: Number of HBQ buffers to be posted.
   2507 *
   2508 * This function is called with no lock held to post more hbq buffers to the
   2509 * given HBQ. The function returns the number of HBQ buffers successfully
   2510 * posted.
   2511 **/
   2512static int
   2513lpfc_sli_hbqbuf_fill_hbqs(struct lpfc_hba *phba, uint32_t hbqno, uint32_t count)
   2514{
   2515	uint32_t i, posted = 0;
   2516	unsigned long flags;
   2517	struct hbq_dmabuf *hbq_buffer;
   2518	LIST_HEAD(hbq_buf_list);
   2519	if (!phba->hbqs[hbqno].hbq_alloc_buffer)
   2520		return 0;
   2521
   2522	if ((phba->hbqs[hbqno].buffer_count + count) >
   2523	    lpfc_hbq_defs[hbqno]->entry_count)
   2524		count = lpfc_hbq_defs[hbqno]->entry_count -
   2525					phba->hbqs[hbqno].buffer_count;
   2526	if (!count)
   2527		return 0;
   2528	/* Allocate HBQ entries */
   2529	for (i = 0; i < count; i++) {
   2530		hbq_buffer = (phba->hbqs[hbqno].hbq_alloc_buffer)(phba);
   2531		if (!hbq_buffer)
   2532			break;
   2533		list_add_tail(&hbq_buffer->dbuf.list, &hbq_buf_list);
   2534	}
   2535	/* Check whether HBQ is still in use */
   2536	spin_lock_irqsave(&phba->hbalock, flags);
   2537	if (!phba->hbq_in_use)
   2538		goto err;
   2539	while (!list_empty(&hbq_buf_list)) {
   2540		list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf,
   2541				 dbuf.list);
   2542		hbq_buffer->tag = (phba->hbqs[hbqno].buffer_count |
   2543				      (hbqno << 16));
   2544		if (!lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer)) {
   2545			phba->hbqs[hbqno].buffer_count++;
   2546			posted++;
   2547		} else
   2548			(phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
   2549	}
   2550	spin_unlock_irqrestore(&phba->hbalock, flags);
   2551	return posted;
   2552err:
   2553	spin_unlock_irqrestore(&phba->hbalock, flags);
   2554	while (!list_empty(&hbq_buf_list)) {
   2555		list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf,
   2556				 dbuf.list);
   2557		(phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
   2558	}
   2559	return 0;
   2560}
   2561
   2562/**
   2563 * lpfc_sli_hbqbuf_add_hbqs - Post more HBQ buffers to firmware
   2564 * @phba: Pointer to HBA context object.
   2565 * @qno: HBQ number.
   2566 *
   2567 * This function posts more buffers to the HBQ. This function
   2568 * is called with no lock held. The function returns the number of HBQ entries
   2569 * successfully allocated.
   2570 **/
   2571int
   2572lpfc_sli_hbqbuf_add_hbqs(struct lpfc_hba *phba, uint32_t qno)
   2573{
   2574	if (phba->sli_rev == LPFC_SLI_REV4)
   2575		return 0;
   2576	else
   2577		return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
   2578					 lpfc_hbq_defs[qno]->add_count);
   2579}
   2580
   2581/**
   2582 * lpfc_sli_hbqbuf_init_hbqs - Post initial buffers to the HBQ
   2583 * @phba: Pointer to HBA context object.
   2584 * @qno:  HBQ queue number.
   2585 *
   2586 * This function is called from SLI initialization code path with
   2587 * no lock held to post initial HBQ buffers to firmware. The
   2588 * function returns the number of HBQ entries successfully allocated.
   2589 **/
   2590static int
   2591lpfc_sli_hbqbuf_init_hbqs(struct lpfc_hba *phba, uint32_t qno)
   2592{
   2593	if (phba->sli_rev == LPFC_SLI_REV4)
   2594		return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
   2595					lpfc_hbq_defs[qno]->entry_count);
   2596	else
   2597		return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
   2598					 lpfc_hbq_defs[qno]->init_count);
   2599}
   2600
   2601/*
   2602 * lpfc_sli_hbqbuf_get - Remove the first hbq off of an hbq list
   2603 *
   2604 * This function removes the first hbq buffer on an hbq list and returns a
   2605 * pointer to that buffer. If it finds no buffers on the list it returns NULL.
   2606 **/
   2607static struct hbq_dmabuf *
   2608lpfc_sli_hbqbuf_get(struct list_head *rb_list)
   2609{
   2610	struct lpfc_dmabuf *d_buf;
   2611
   2612	list_remove_head(rb_list, d_buf, struct lpfc_dmabuf, list);
   2613	if (!d_buf)
   2614		return NULL;
   2615	return container_of(d_buf, struct hbq_dmabuf, dbuf);
   2616}
   2617
   2618/**
   2619 * lpfc_sli_rqbuf_get - Remove the first dma buffer off of an RQ list
   2620 * @phba: Pointer to HBA context object.
   2621 * @hrq: HBQ number.
   2622 *
   2623 * This function removes the first RQ buffer on an RQ buffer list and returns a
   2624 * pointer to that buffer. If it finds no buffers on the list it returns NULL.
   2625 **/
   2626static struct rqb_dmabuf *
   2627lpfc_sli_rqbuf_get(struct lpfc_hba *phba, struct lpfc_queue *hrq)
   2628{
   2629	struct lpfc_dmabuf *h_buf;
   2630	struct lpfc_rqb *rqbp;
   2631
   2632	rqbp = hrq->rqbp;
   2633	list_remove_head(&rqbp->rqb_buffer_list, h_buf,
   2634			 struct lpfc_dmabuf, list);
   2635	if (!h_buf)
   2636		return NULL;
   2637	rqbp->buffer_count--;
   2638	return container_of(h_buf, struct rqb_dmabuf, hbuf);
   2639}
   2640
   2641/**
   2642 * lpfc_sli_hbqbuf_find - Find the hbq buffer associated with a tag
   2643 * @phba: Pointer to HBA context object.
   2644 * @tag: Tag of the hbq buffer.
   2645 *
   2646 * This function searches for the hbq buffer associated with the given tag in
   2647 * the hbq buffer list. If it finds the hbq buffer, it returns the hbq_buffer
   2648 * otherwise it returns NULL.
   2649 **/
   2650static struct hbq_dmabuf *
   2651lpfc_sli_hbqbuf_find(struct lpfc_hba *phba, uint32_t tag)
   2652{
   2653	struct lpfc_dmabuf *d_buf;
   2654	struct hbq_dmabuf *hbq_buf;
   2655	uint32_t hbqno;
   2656
   2657	hbqno = tag >> 16;
   2658	if (hbqno >= LPFC_MAX_HBQS)
   2659		return NULL;
   2660
   2661	spin_lock_irq(&phba->hbalock);
   2662	list_for_each_entry(d_buf, &phba->hbqs[hbqno].hbq_buffer_list, list) {
   2663		hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
   2664		if (hbq_buf->tag == tag) {
   2665			spin_unlock_irq(&phba->hbalock);
   2666			return hbq_buf;
   2667		}
   2668	}
   2669	spin_unlock_irq(&phba->hbalock);
   2670	lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
   2671			"1803 Bad hbq tag. Data: x%x x%x\n",
   2672			tag, phba->hbqs[tag >> 16].buffer_count);
   2673	return NULL;
   2674}
   2675
   2676/**
   2677 * lpfc_sli_free_hbq - Give back the hbq buffer to firmware
   2678 * @phba: Pointer to HBA context object.
   2679 * @hbq_buffer: Pointer to HBQ buffer.
   2680 *
   2681 * This function is called with hbalock. This function gives back
   2682 * the hbq buffer to firmware. If the HBQ does not have space to
   2683 * post the buffer, it will free the buffer.
   2684 **/
   2685void
   2686lpfc_sli_free_hbq(struct lpfc_hba *phba, struct hbq_dmabuf *hbq_buffer)
   2687{
   2688	uint32_t hbqno;
   2689
   2690	if (hbq_buffer) {
   2691		hbqno = hbq_buffer->tag >> 16;
   2692		if (lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer))
   2693			(phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
   2694	}
   2695}
   2696
   2697/**
   2698 * lpfc_sli_chk_mbx_command - Check if the mailbox is a legitimate mailbox
   2699 * @mbxCommand: mailbox command code.
   2700 *
   2701 * This function is called by the mailbox event handler function to verify
   2702 * that the completed mailbox command is a legitimate mailbox command. If the
   2703 * completed mailbox is not known to the function, it will return MBX_SHUTDOWN
   2704 * and the mailbox event handler will take the HBA offline.
   2705 **/
   2706static int
   2707lpfc_sli_chk_mbx_command(uint8_t mbxCommand)
   2708{
   2709	uint8_t ret;
   2710
   2711	switch (mbxCommand) {
   2712	case MBX_LOAD_SM:
   2713	case MBX_READ_NV:
   2714	case MBX_WRITE_NV:
   2715	case MBX_WRITE_VPARMS:
   2716	case MBX_RUN_BIU_DIAG:
   2717	case MBX_INIT_LINK:
   2718	case MBX_DOWN_LINK:
   2719	case MBX_CONFIG_LINK:
   2720	case MBX_CONFIG_RING:
   2721	case MBX_RESET_RING:
   2722	case MBX_READ_CONFIG:
   2723	case MBX_READ_RCONFIG:
   2724	case MBX_READ_SPARM:
   2725	case MBX_READ_STATUS:
   2726	case MBX_READ_RPI:
   2727	case MBX_READ_XRI:
   2728	case MBX_READ_REV:
   2729	case MBX_READ_LNK_STAT:
   2730	case MBX_REG_LOGIN:
   2731	case MBX_UNREG_LOGIN:
   2732	case MBX_CLEAR_LA:
   2733	case MBX_DUMP_MEMORY:
   2734	case MBX_DUMP_CONTEXT:
   2735	case MBX_RUN_DIAGS:
   2736	case MBX_RESTART:
   2737	case MBX_UPDATE_CFG:
   2738	case MBX_DOWN_LOAD:
   2739	case MBX_DEL_LD_ENTRY:
   2740	case MBX_RUN_PROGRAM:
   2741	case MBX_SET_MASK:
   2742	case MBX_SET_VARIABLE:
   2743	case MBX_UNREG_D_ID:
   2744	case MBX_KILL_BOARD:
   2745	case MBX_CONFIG_FARP:
   2746	case MBX_BEACON:
   2747	case MBX_LOAD_AREA:
   2748	case MBX_RUN_BIU_DIAG64:
   2749	case MBX_CONFIG_PORT:
   2750	case MBX_READ_SPARM64:
   2751	case MBX_READ_RPI64:
   2752	case MBX_REG_LOGIN64:
   2753	case MBX_READ_TOPOLOGY:
   2754	case MBX_WRITE_WWN:
   2755	case MBX_SET_DEBUG:
   2756	case MBX_LOAD_EXP_ROM:
   2757	case MBX_ASYNCEVT_ENABLE:
   2758	case MBX_REG_VPI:
   2759	case MBX_UNREG_VPI:
   2760	case MBX_HEARTBEAT:
   2761	case MBX_PORT_CAPABILITIES:
   2762	case MBX_PORT_IOV_CONTROL:
   2763	case MBX_SLI4_CONFIG:
   2764	case MBX_SLI4_REQ_FTRS:
   2765	case MBX_REG_FCFI:
   2766	case MBX_UNREG_FCFI:
   2767	case MBX_REG_VFI:
   2768	case MBX_UNREG_VFI:
   2769	case MBX_INIT_VPI:
   2770	case MBX_INIT_VFI:
   2771	case MBX_RESUME_RPI:
   2772	case MBX_READ_EVENT_LOG_STATUS:
   2773	case MBX_READ_EVENT_LOG:
   2774	case MBX_SECURITY_MGMT:
   2775	case MBX_AUTH_PORT:
   2776	case MBX_ACCESS_VDATA:
   2777		ret = mbxCommand;
   2778		break;
   2779	default:
   2780		ret = MBX_SHUTDOWN;
   2781		break;
   2782	}
   2783	return ret;
   2784}
   2785
   2786/**
   2787 * lpfc_sli_wake_mbox_wait - lpfc_sli_issue_mbox_wait mbox completion handler
   2788 * @phba: Pointer to HBA context object.
   2789 * @pmboxq: Pointer to mailbox command.
   2790 *
   2791 * This is completion handler function for mailbox commands issued from
   2792 * lpfc_sli_issue_mbox_wait function. This function is called by the
   2793 * mailbox event handler function with no lock held. This function
   2794 * will wake up thread waiting on the wait queue pointed by context1
   2795 * of the mailbox.
   2796 **/
   2797void
   2798lpfc_sli_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
   2799{
   2800	unsigned long drvr_flag;
   2801	struct completion *pmbox_done;
   2802
   2803	/*
   2804	 * If pmbox_done is empty, the driver thread gave up waiting and
   2805	 * continued running.
   2806	 */
   2807	pmboxq->mbox_flag |= LPFC_MBX_WAKE;
   2808	spin_lock_irqsave(&phba->hbalock, drvr_flag);
   2809	pmbox_done = (struct completion *)pmboxq->context3;
   2810	if (pmbox_done)
   2811		complete(pmbox_done);
   2812	spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
   2813	return;
   2814}
   2815
   2816static void
   2817__lpfc_sli_rpi_release(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
   2818{
   2819	unsigned long iflags;
   2820
   2821	if (ndlp->nlp_flag & NLP_RELEASE_RPI) {
   2822		lpfc_sli4_free_rpi(vport->phba, ndlp->nlp_rpi);
   2823		spin_lock_irqsave(&ndlp->lock, iflags);
   2824		ndlp->nlp_flag &= ~NLP_RELEASE_RPI;
   2825		ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR;
   2826		spin_unlock_irqrestore(&ndlp->lock, iflags);
   2827	}
   2828	ndlp->nlp_flag &= ~NLP_UNREG_INP;
   2829}
   2830
   2831void
   2832lpfc_sli_rpi_release(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
   2833{
   2834	__lpfc_sli_rpi_release(vport, ndlp);
   2835}
   2836
   2837/**
   2838 * lpfc_sli_def_mbox_cmpl - Default mailbox completion handler
   2839 * @phba: Pointer to HBA context object.
   2840 * @pmb: Pointer to mailbox object.
   2841 *
   2842 * This function is the default mailbox completion handler. It
   2843 * frees the memory resources associated with the completed mailbox
   2844 * command. If the completed command is a REG_LOGIN mailbox command,
   2845 * this function will issue a UREG_LOGIN to re-claim the RPI.
   2846 **/
   2847void
   2848lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
   2849{
   2850	struct lpfc_vport  *vport = pmb->vport;
   2851	struct lpfc_nodelist *ndlp;
   2852	struct Scsi_Host *shost;
   2853	uint16_t rpi, vpi;
   2854	int rc;
   2855
   2856	/*
   2857	 * If a REG_LOGIN succeeded  after node is destroyed or node
   2858	 * is in re-discovery driver need to cleanup the RPI.
   2859	 */
   2860	if (!(phba->pport->load_flag & FC_UNLOADING) &&
   2861	    pmb->u.mb.mbxCommand == MBX_REG_LOGIN64 &&
   2862	    !pmb->u.mb.mbxStatus) {
   2863		rpi = pmb->u.mb.un.varWords[0];
   2864		vpi = pmb->u.mb.un.varRegLogin.vpi;
   2865		if (phba->sli_rev == LPFC_SLI_REV4)
   2866			vpi -= phba->sli4_hba.max_cfg_param.vpi_base;
   2867		lpfc_unreg_login(phba, vpi, rpi, pmb);
   2868		pmb->vport = vport;
   2869		pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
   2870		rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
   2871		if (rc != MBX_NOT_FINISHED)
   2872			return;
   2873	}
   2874
   2875	if ((pmb->u.mb.mbxCommand == MBX_REG_VPI) &&
   2876		!(phba->pport->load_flag & FC_UNLOADING) &&
   2877		!pmb->u.mb.mbxStatus) {
   2878		shost = lpfc_shost_from_vport(vport);
   2879		spin_lock_irq(shost->host_lock);
   2880		vport->vpi_state |= LPFC_VPI_REGISTERED;
   2881		vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
   2882		spin_unlock_irq(shost->host_lock);
   2883	}
   2884
   2885	if (pmb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
   2886		ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
   2887		lpfc_nlp_put(ndlp);
   2888	}
   2889
   2890	if (pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) {
   2891		ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
   2892
   2893		/* Check to see if there are any deferred events to process */
   2894		if (ndlp) {
   2895			lpfc_printf_vlog(
   2896				vport,
   2897				KERN_INFO, LOG_MBOX | LOG_DISCOVERY,
   2898				"1438 UNREG cmpl deferred mbox x%x "
   2899				"on NPort x%x Data: x%x x%x x%px x%x x%x\n",
   2900				ndlp->nlp_rpi, ndlp->nlp_DID,
   2901				ndlp->nlp_flag, ndlp->nlp_defer_did,
   2902				ndlp, vport->load_flag, kref_read(&ndlp->kref));
   2903
   2904			if ((ndlp->nlp_flag & NLP_UNREG_INP) &&
   2905			    (ndlp->nlp_defer_did != NLP_EVT_NOTHING_PENDING)) {
   2906				ndlp->nlp_flag &= ~NLP_UNREG_INP;
   2907				ndlp->nlp_defer_did = NLP_EVT_NOTHING_PENDING;
   2908				lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
   2909			} else {
   2910				__lpfc_sli_rpi_release(vport, ndlp);
   2911			}
   2912
   2913			/* The unreg_login mailbox is complete and had a
   2914			 * reference that has to be released.  The PLOGI
   2915			 * got its own ref.
   2916			 */
   2917			lpfc_nlp_put(ndlp);
   2918			pmb->ctx_ndlp = NULL;
   2919		}
   2920	}
   2921
   2922	/* This nlp_put pairs with lpfc_sli4_resume_rpi */
   2923	if (pmb->u.mb.mbxCommand == MBX_RESUME_RPI) {
   2924		ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
   2925		lpfc_nlp_put(ndlp);
   2926	}
   2927
   2928	/* Check security permission status on INIT_LINK mailbox command */
   2929	if ((pmb->u.mb.mbxCommand == MBX_INIT_LINK) &&
   2930	    (pmb->u.mb.mbxStatus == MBXERR_SEC_NO_PERMISSION))
   2931		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
   2932				"2860 SLI authentication is required "
   2933				"for INIT_LINK but has not done yet\n");
   2934
   2935	if (bf_get(lpfc_mqe_command, &pmb->u.mqe) == MBX_SLI4_CONFIG)
   2936		lpfc_sli4_mbox_cmd_free(phba, pmb);
   2937	else
   2938		lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED);
   2939}
   2940 /**
   2941 * lpfc_sli4_unreg_rpi_cmpl_clr - mailbox completion handler
   2942 * @phba: Pointer to HBA context object.
   2943 * @pmb: Pointer to mailbox object.
   2944 *
   2945 * This function is the unreg rpi mailbox completion handler. It
   2946 * frees the memory resources associated with the completed mailbox
   2947 * command. An additional reference is put on the ndlp to prevent
   2948 * lpfc_nlp_release from freeing the rpi bit in the bitmask before
   2949 * the unreg mailbox command completes, this routine puts the
   2950 * reference back.
   2951 *
   2952 **/
   2953void
   2954lpfc_sli4_unreg_rpi_cmpl_clr(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
   2955{
   2956	struct lpfc_vport  *vport = pmb->vport;
   2957	struct lpfc_nodelist *ndlp;
   2958
   2959	ndlp = pmb->ctx_ndlp;
   2960	if (pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) {
   2961		if (phba->sli_rev == LPFC_SLI_REV4 &&
   2962		    (bf_get(lpfc_sli_intf_if_type,
   2963		     &phba->sli4_hba.sli_intf) >=
   2964		     LPFC_SLI_INTF_IF_TYPE_2)) {
   2965			if (ndlp) {
   2966				lpfc_printf_vlog(
   2967					 vport, KERN_INFO, LOG_MBOX | LOG_SLI,
   2968					 "0010 UNREG_LOGIN vpi:%x "
   2969					 "rpi:%x DID:%x defer x%x flg x%x "
   2970					 "x%px\n",
   2971					 vport->vpi, ndlp->nlp_rpi,
   2972					 ndlp->nlp_DID, ndlp->nlp_defer_did,
   2973					 ndlp->nlp_flag,
   2974					 ndlp);
   2975				ndlp->nlp_flag &= ~NLP_LOGO_ACC;
   2976
   2977				/* Check to see if there are any deferred
   2978				 * events to process
   2979				 */
   2980				if ((ndlp->nlp_flag & NLP_UNREG_INP) &&
   2981				    (ndlp->nlp_defer_did !=
   2982				    NLP_EVT_NOTHING_PENDING)) {
   2983					lpfc_printf_vlog(
   2984						vport, KERN_INFO, LOG_DISCOVERY,
   2985						"4111 UNREG cmpl deferred "
   2986						"clr x%x on "
   2987						"NPort x%x Data: x%x x%px\n",
   2988						ndlp->nlp_rpi, ndlp->nlp_DID,
   2989						ndlp->nlp_defer_did, ndlp);
   2990					ndlp->nlp_flag &= ~NLP_UNREG_INP;
   2991					ndlp->nlp_defer_did =
   2992						NLP_EVT_NOTHING_PENDING;
   2993					lpfc_issue_els_plogi(
   2994						vport, ndlp->nlp_DID, 0);
   2995				} else {
   2996					__lpfc_sli_rpi_release(vport, ndlp);
   2997				}
   2998				lpfc_nlp_put(ndlp);
   2999			}
   3000		}
   3001	}
   3002
   3003	mempool_free(pmb, phba->mbox_mem_pool);
   3004}
   3005
   3006/**
   3007 * lpfc_sli_handle_mb_event - Handle mailbox completions from firmware
   3008 * @phba: Pointer to HBA context object.
   3009 *
   3010 * This function is called with no lock held. This function processes all
   3011 * the completed mailbox commands and gives it to upper layers. The interrupt
   3012 * service routine processes mailbox completion interrupt and adds completed
   3013 * mailbox commands to the mboxq_cmpl queue and signals the worker thread.
   3014 * Worker thread call lpfc_sli_handle_mb_event, which will return the
   3015 * completed mailbox commands in mboxq_cmpl queue to the upper layers. This
   3016 * function returns the mailbox commands to the upper layer by calling the
   3017 * completion handler function of each mailbox.
   3018 **/
   3019int
   3020lpfc_sli_handle_mb_event(struct lpfc_hba *phba)
   3021{
   3022	MAILBOX_t *pmbox;
   3023	LPFC_MBOXQ_t *pmb;
   3024	int rc;
   3025	LIST_HEAD(cmplq);
   3026
   3027	phba->sli.slistat.mbox_event++;
   3028
   3029	/* Get all completed mailboxe buffers into the cmplq */
   3030	spin_lock_irq(&phba->hbalock);
   3031	list_splice_init(&phba->sli.mboxq_cmpl, &cmplq);
   3032	spin_unlock_irq(&phba->hbalock);
   3033
   3034	/* Get a Mailbox buffer to setup mailbox commands for callback */
   3035	do {
   3036		list_remove_head(&cmplq, pmb, LPFC_MBOXQ_t, list);
   3037		if (pmb == NULL)
   3038			break;
   3039
   3040		pmbox = &pmb->u.mb;
   3041
   3042		if (pmbox->mbxCommand != MBX_HEARTBEAT) {
   3043			if (pmb->vport) {
   3044				lpfc_debugfs_disc_trc(pmb->vport,
   3045					LPFC_DISC_TRC_MBOX_VPORT,
   3046					"MBOX cmpl vport: cmd:x%x mb:x%x x%x",
   3047					(uint32_t)pmbox->mbxCommand,
   3048					pmbox->un.varWords[0],
   3049					pmbox->un.varWords[1]);
   3050			}
   3051			else {
   3052				lpfc_debugfs_disc_trc(phba->pport,
   3053					LPFC_DISC_TRC_MBOX,
   3054					"MBOX cmpl:       cmd:x%x mb:x%x x%x",
   3055					(uint32_t)pmbox->mbxCommand,
   3056					pmbox->un.varWords[0],
   3057					pmbox->un.varWords[1]);
   3058			}
   3059		}
   3060
   3061		/*
   3062		 * It is a fatal error if unknown mbox command completion.
   3063		 */
   3064		if (lpfc_sli_chk_mbx_command(pmbox->mbxCommand) ==
   3065		    MBX_SHUTDOWN) {
   3066			/* Unknown mailbox command compl */
   3067			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
   3068					"(%d):0323 Unknown Mailbox command "
   3069					"x%x (x%x/x%x) Cmpl\n",
   3070					pmb->vport ? pmb->vport->vpi :
   3071					LPFC_VPORT_UNKNOWN,
   3072					pmbox->mbxCommand,
   3073					lpfc_sli_config_mbox_subsys_get(phba,
   3074									pmb),
   3075					lpfc_sli_config_mbox_opcode_get(phba,
   3076									pmb));
   3077			phba->link_state = LPFC_HBA_ERROR;
   3078			phba->work_hs = HS_FFER3;
   3079			lpfc_handle_eratt(phba);
   3080			continue;
   3081		}
   3082
   3083		if (pmbox->mbxStatus) {
   3084			phba->sli.slistat.mbox_stat_err++;
   3085			if (pmbox->mbxStatus == MBXERR_NO_RESOURCES) {
   3086				/* Mbox cmd cmpl error - RETRYing */
   3087				lpfc_printf_log(phba, KERN_INFO,
   3088					LOG_MBOX | LOG_SLI,
   3089					"(%d):0305 Mbox cmd cmpl "
   3090					"error - RETRYing Data: x%x "
   3091					"(x%x/x%x) x%x x%x x%x\n",
   3092					pmb->vport ? pmb->vport->vpi :
   3093					LPFC_VPORT_UNKNOWN,
   3094					pmbox->mbxCommand,
   3095					lpfc_sli_config_mbox_subsys_get(phba,
   3096									pmb),
   3097					lpfc_sli_config_mbox_opcode_get(phba,
   3098									pmb),
   3099					pmbox->mbxStatus,
   3100					pmbox->un.varWords[0],
   3101					pmb->vport ? pmb->vport->port_state :
   3102					LPFC_VPORT_UNKNOWN);
   3103				pmbox->mbxStatus = 0;
   3104				pmbox->mbxOwner = OWN_HOST;
   3105				rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
   3106				if (rc != MBX_NOT_FINISHED)
   3107					continue;
   3108			}
   3109		}
   3110
   3111		/* Mailbox cmd <cmd> Cmpl <cmpl> */
   3112		lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
   3113				"(%d):0307 Mailbox cmd x%x (x%x/x%x) Cmpl %ps "
   3114				"Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x "
   3115				"x%x x%x x%x\n",
   3116				pmb->vport ? pmb->vport->vpi : 0,
   3117				pmbox->mbxCommand,
   3118				lpfc_sli_config_mbox_subsys_get(phba, pmb),
   3119				lpfc_sli_config_mbox_opcode_get(phba, pmb),
   3120				pmb->mbox_cmpl,
   3121				*((uint32_t *) pmbox),
   3122				pmbox->un.varWords[0],
   3123				pmbox->un.varWords[1],
   3124				pmbox->un.varWords[2],
   3125				pmbox->un.varWords[3],
   3126				pmbox->un.varWords[4],
   3127				pmbox->un.varWords[5],
   3128				pmbox->un.varWords[6],
   3129				pmbox->un.varWords[7],
   3130				pmbox->un.varWords[8],
   3131				pmbox->un.varWords[9],
   3132				pmbox->un.varWords[10]);
   3133
   3134		if (pmb->mbox_cmpl)
   3135			pmb->mbox_cmpl(phba,pmb);
   3136	} while (1);
   3137	return 0;
   3138}
   3139
   3140/**
   3141 * lpfc_sli_get_buff - Get the buffer associated with the buffer tag
   3142 * @phba: Pointer to HBA context object.
   3143 * @pring: Pointer to driver SLI ring object.
   3144 * @tag: buffer tag.
   3145 *
   3146 * This function is called with no lock held. When QUE_BUFTAG_BIT bit
   3147 * is set in the tag the buffer is posted for a particular exchange,
   3148 * the function will return the buffer without replacing the buffer.
   3149 * If the buffer is for unsolicited ELS or CT traffic, this function
   3150 * returns the buffer and also posts another buffer to the firmware.
   3151 **/
   3152static struct lpfc_dmabuf *
   3153lpfc_sli_get_buff(struct lpfc_hba *phba,
   3154		  struct lpfc_sli_ring *pring,
   3155		  uint32_t tag)
   3156{
   3157	struct hbq_dmabuf *hbq_entry;
   3158
   3159	if (tag & QUE_BUFTAG_BIT)
   3160		return lpfc_sli_ring_taggedbuf_get(phba, pring, tag);
   3161	hbq_entry = lpfc_sli_hbqbuf_find(phba, tag);
   3162	if (!hbq_entry)
   3163		return NULL;
   3164	return &hbq_entry->dbuf;
   3165}
   3166
   3167/**
   3168 * lpfc_nvme_unsol_ls_handler - Process an unsolicited event data buffer
   3169 *                              containing a NVME LS request.
   3170 * @phba: pointer to lpfc hba data structure.
   3171 * @piocb: pointer to the iocbq struct representing the sequence starting
   3172 *        frame.
   3173 *
   3174 * This routine initially validates the NVME LS, validates there is a login
   3175 * with the port that sent the LS, and then calls the appropriate nvme host
   3176 * or target LS request handler.
   3177 **/
   3178static void
   3179lpfc_nvme_unsol_ls_handler(struct lpfc_hba *phba, struct lpfc_iocbq *piocb)
   3180{
   3181	struct lpfc_nodelist *ndlp;
   3182	struct lpfc_dmabuf *d_buf;
   3183	struct hbq_dmabuf *nvmebuf;
   3184	struct fc_frame_header *fc_hdr;
   3185	struct lpfc_async_xchg_ctx *axchg = NULL;
   3186	char *failwhy = NULL;
   3187	uint32_t oxid, sid, did, fctl, size;
   3188	int ret = 1;
   3189
   3190	d_buf = piocb->cmd_dmabuf;
   3191
   3192	nvmebuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
   3193	fc_hdr = nvmebuf->hbuf.virt;
   3194	oxid = be16_to_cpu(fc_hdr->fh_ox_id);
   3195	sid = sli4_sid_from_fc_hdr(fc_hdr);
   3196	did = sli4_did_from_fc_hdr(fc_hdr);
   3197	fctl = (fc_hdr->fh_f_ctl[0] << 16 |
   3198		fc_hdr->fh_f_ctl[1] << 8 |
   3199		fc_hdr->fh_f_ctl[2]);
   3200	size = bf_get(lpfc_rcqe_length, &nvmebuf->cq_event.cqe.rcqe_cmpl);
   3201
   3202	lpfc_nvmeio_data(phba, "NVME LS    RCV: xri x%x sz %d from %06x\n",
   3203			 oxid, size, sid);
   3204
   3205	if (phba->pport->load_flag & FC_UNLOADING) {
   3206		failwhy = "Driver Unloading";
   3207	} else if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)) {
   3208		failwhy = "NVME FC4 Disabled";
   3209	} else if (!phba->nvmet_support && !phba->pport->localport) {
   3210		failwhy = "No Localport";
   3211	} else if (phba->nvmet_support && !phba->targetport) {
   3212		failwhy = "No Targetport";
   3213	} else if (unlikely(fc_hdr->fh_r_ctl != FC_RCTL_ELS4_REQ)) {
   3214		failwhy = "Bad NVME LS R_CTL";
   3215	} else if (unlikely((fctl & 0x00FF0000) !=
   3216			(FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT))) {
   3217		failwhy = "Bad NVME LS F_CTL";
   3218	} else {
   3219		axchg = kzalloc(sizeof(*axchg), GFP_ATOMIC);
   3220		if (!axchg)
   3221			failwhy = "No CTX memory";
   3222	}
   3223
   3224	if (unlikely(failwhy)) {
   3225		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
   3226				"6154 Drop NVME LS: SID %06X OXID x%X: %s\n",
   3227				sid, oxid, failwhy);
   3228		goto out_fail;
   3229	}
   3230
   3231	/* validate the source of the LS is logged in */
   3232	ndlp = lpfc_findnode_did(phba->pport, sid);
   3233	if (!ndlp ||
   3234	    ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
   3235	     (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
   3236		lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
   3237				"6216 NVME Unsol rcv: No ndlp: "
   3238				"NPort_ID x%x oxid x%x\n",
   3239				sid, oxid);
   3240		goto out_fail;
   3241	}
   3242
   3243	axchg->phba = phba;
   3244	axchg->ndlp = ndlp;
   3245	axchg->size = size;
   3246	axchg->oxid = oxid;
   3247	axchg->sid = sid;
   3248	axchg->wqeq = NULL;
   3249	axchg->state = LPFC_NVME_STE_LS_RCV;
   3250	axchg->entry_cnt = 1;
   3251	axchg->rqb_buffer = (void *)nvmebuf;
   3252	axchg->hdwq = &phba->sli4_hba.hdwq[0];
   3253	axchg->payload = nvmebuf->dbuf.virt;
   3254	INIT_LIST_HEAD(&axchg->list);
   3255
   3256	if (phba->nvmet_support) {
   3257		ret = lpfc_nvmet_handle_lsreq(phba, axchg);
   3258		spin_lock_irq(&ndlp->lock);
   3259		if (!ret && !(ndlp->fc4_xpt_flags & NLP_XPT_HAS_HH)) {
   3260			ndlp->fc4_xpt_flags |= NLP_XPT_HAS_HH;
   3261			spin_unlock_irq(&ndlp->lock);
   3262
   3263			/* This reference is a single occurrence to hold the
   3264			 * node valid until the nvmet transport calls
   3265			 * host_release.
   3266			 */
   3267			if (!lpfc_nlp_get(ndlp))
   3268				goto out_fail;
   3269
   3270			lpfc_printf_log(phba, KERN_ERR, LOG_NODE,
   3271					"6206 NVMET unsol ls_req ndlp x%px "
   3272					"DID x%x xflags x%x refcnt %d\n",
   3273					ndlp, ndlp->nlp_DID,
   3274					ndlp->fc4_xpt_flags,
   3275					kref_read(&ndlp->kref));
   3276		} else {
   3277			spin_unlock_irq(&ndlp->lock);
   3278		}
   3279	} else {
   3280		ret = lpfc_nvme_handle_lsreq(phba, axchg);
   3281	}
   3282
   3283	/* if zero, LS was successfully handled. If non-zero, LS not handled */
   3284	if (!ret)
   3285		return;
   3286
   3287out_fail:
   3288	lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
   3289			"6155 Drop NVME LS from DID %06X: SID %06X OXID x%X "
   3290			"NVMe%s handler failed %d\n",
   3291			did, sid, oxid,
   3292			(phba->nvmet_support) ? "T" : "I", ret);
   3293
   3294	/* recycle receive buffer */
   3295	lpfc_in_buf_free(phba, &nvmebuf->dbuf);
   3296
   3297	/* If start of new exchange, abort it */
   3298	if (axchg && (fctl & FC_FC_FIRST_SEQ && !(fctl & FC_FC_EX_CTX)))
   3299		ret = lpfc_nvme_unsol_ls_issue_abort(phba, axchg, sid, oxid);
   3300
   3301	if (ret)
   3302		kfree(axchg);
   3303}
   3304
   3305/**
   3306 * lpfc_complete_unsol_iocb - Complete an unsolicited sequence
   3307 * @phba: Pointer to HBA context object.
   3308 * @pring: Pointer to driver SLI ring object.
   3309 * @saveq: Pointer to the iocbq struct representing the sequence starting frame.
   3310 * @fch_r_ctl: the r_ctl for the first frame of the sequence.
   3311 * @fch_type: the type for the first frame of the sequence.
   3312 *
   3313 * This function is called with no lock held. This function uses the r_ctl and
   3314 * type of the received sequence to find the correct callback function to call
   3315 * to process the sequence.
   3316 **/
   3317static int
   3318lpfc_complete_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
   3319			 struct lpfc_iocbq *saveq, uint32_t fch_r_ctl,
   3320			 uint32_t fch_type)
   3321{
   3322	int i;
   3323
   3324	switch (fch_type) {
   3325	case FC_TYPE_NVME:
   3326		lpfc_nvme_unsol_ls_handler(phba, saveq);
   3327		return 1;
   3328	default:
   3329		break;
   3330	}
   3331
   3332	/* unSolicited Responses */
   3333	if (pring->prt[0].profile) {
   3334		if (pring->prt[0].lpfc_sli_rcv_unsol_event)
   3335			(pring->prt[0].lpfc_sli_rcv_unsol_event) (phba, pring,
   3336									saveq);
   3337		return 1;
   3338	}
   3339	/* We must search, based on rctl / type
   3340	   for the right routine */
   3341	for (i = 0; i < pring->num_mask; i++) {
   3342		if ((pring->prt[i].rctl == fch_r_ctl) &&
   3343		    (pring->prt[i].type == fch_type)) {
   3344			if (pring->prt[i].lpfc_sli_rcv_unsol_event)
   3345				(pring->prt[i].lpfc_sli_rcv_unsol_event)
   3346						(phba, pring, saveq);
   3347			return 1;
   3348		}
   3349	}
   3350	return 0;
   3351}
   3352
   3353static void
   3354lpfc_sli_prep_unsol_wqe(struct lpfc_hba *phba,
   3355			struct lpfc_iocbq *saveq)
   3356{
   3357	IOCB_t *irsp;
   3358	union lpfc_wqe128 *wqe;
   3359	u16 i = 0;
   3360
   3361	irsp = &saveq->iocb;
   3362	wqe = &saveq->wqe;
   3363
   3364	/* Fill wcqe with the IOCB status fields */
   3365	bf_set(lpfc_wcqe_c_status, &saveq->wcqe_cmpl, irsp->ulpStatus);
   3366	saveq->wcqe_cmpl.word3 = irsp->ulpBdeCount;
   3367	saveq->wcqe_cmpl.parameter = irsp->un.ulpWord[4];
   3368	saveq->wcqe_cmpl.total_data_placed = irsp->unsli3.rcvsli3.acc_len;
   3369
   3370	/* Source ID */
   3371	bf_set(els_rsp64_sid, &wqe->xmit_els_rsp, irsp->un.rcvels.parmRo);
   3372
   3373	/* rx-id of the response frame */
   3374	bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com, irsp->ulpContext);
   3375
   3376	/* ox-id of the frame */
   3377	bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
   3378	       irsp->unsli3.rcvsli3.ox_id);
   3379
   3380	/* DID */
   3381	bf_set(wqe_els_did, &wqe->xmit_els_rsp.wqe_dest,
   3382	       irsp->un.rcvels.remoteID);
   3383
   3384	/* unsol data len */
   3385	for (i = 0; i < irsp->ulpBdeCount; i++) {
   3386		struct lpfc_hbq_entry *hbqe = NULL;
   3387
   3388		if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
   3389			if (i == 0) {
   3390				hbqe = (struct lpfc_hbq_entry *)
   3391					&irsp->un.ulpWord[0];
   3392				saveq->wqe.gen_req.bde.tus.f.bdeSize =
   3393					hbqe->bde.tus.f.bdeSize;
   3394			} else if (i == 1) {
   3395				hbqe = (struct lpfc_hbq_entry *)
   3396					&irsp->unsli3.sli3Words[4];
   3397				saveq->unsol_rcv_len = hbqe->bde.tus.f.bdeSize;
   3398			}
   3399		}
   3400	}
   3401}
   3402
   3403/**
   3404 * lpfc_sli_process_unsol_iocb - Unsolicited iocb handler
   3405 * @phba: Pointer to HBA context object.
   3406 * @pring: Pointer to driver SLI ring object.
   3407 * @saveq: Pointer to the unsolicited iocb.
   3408 *
   3409 * This function is called with no lock held by the ring event handler
   3410 * when there is an unsolicited iocb posted to the response ring by the
   3411 * firmware. This function gets the buffer associated with the iocbs
   3412 * and calls the event handler for the ring. This function handles both
   3413 * qring buffers and hbq buffers.
   3414 * When the function returns 1 the caller can free the iocb object otherwise
   3415 * upper layer functions will free the iocb objects.
   3416 **/
   3417static int
   3418lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
   3419			    struct lpfc_iocbq *saveq)
   3420{
   3421	IOCB_t           * irsp;
   3422	WORD5            * w5p;
   3423	dma_addr_t	 paddr;
   3424	uint32_t           Rctl, Type;
   3425	struct lpfc_iocbq *iocbq;
   3426	struct lpfc_dmabuf *dmzbuf;
   3427
   3428	irsp = &saveq->iocb;
   3429	saveq->vport = phba->pport;
   3430
   3431	if (irsp->ulpCommand == CMD_ASYNC_STATUS) {
   3432		if (pring->lpfc_sli_rcv_async_status)
   3433			pring->lpfc_sli_rcv_async_status(phba, pring, saveq);
   3434		else
   3435			lpfc_printf_log(phba,
   3436					KERN_WARNING,
   3437					LOG_SLI,
   3438					"0316 Ring %d handler: unexpected "
   3439					"ASYNC_STATUS iocb received evt_code "
   3440					"0x%x\n",
   3441					pring->ringno,
   3442					irsp->un.asyncstat.evt_code);
   3443		return 1;
   3444	}
   3445
   3446	if ((irsp->ulpCommand == CMD_IOCB_RET_XRI64_CX) &&
   3447	    (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)) {
   3448		if (irsp->ulpBdeCount > 0) {
   3449			dmzbuf = lpfc_sli_get_buff(phba, pring,
   3450						   irsp->un.ulpWord[3]);
   3451			lpfc_in_buf_free(phba, dmzbuf);
   3452		}
   3453
   3454		if (irsp->ulpBdeCount > 1) {
   3455			dmzbuf = lpfc_sli_get_buff(phba, pring,
   3456						   irsp->unsli3.sli3Words[3]);
   3457			lpfc_in_buf_free(phba, dmzbuf);
   3458		}
   3459
   3460		if (irsp->ulpBdeCount > 2) {
   3461			dmzbuf = lpfc_sli_get_buff(phba, pring,
   3462						   irsp->unsli3.sli3Words[7]);
   3463			lpfc_in_buf_free(phba, dmzbuf);
   3464		}
   3465
   3466		return 1;
   3467	}
   3468
   3469	if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
   3470		if (irsp->ulpBdeCount != 0) {
   3471			saveq->cmd_dmabuf = lpfc_sli_get_buff(phba, pring,
   3472						irsp->un.ulpWord[3]);
   3473			if (!saveq->cmd_dmabuf)
   3474				lpfc_printf_log(phba,
   3475					KERN_ERR,
   3476					LOG_SLI,
   3477					"0341 Ring %d Cannot find buffer for "
   3478					"an unsolicited iocb. tag 0x%x\n",
   3479					pring->ringno,
   3480					irsp->un.ulpWord[3]);
   3481		}
   3482		if (irsp->ulpBdeCount == 2) {
   3483			saveq->bpl_dmabuf = lpfc_sli_get_buff(phba, pring,
   3484						irsp->unsli3.sli3Words[7]);
   3485			if (!saveq->bpl_dmabuf)
   3486				lpfc_printf_log(phba,
   3487					KERN_ERR,
   3488					LOG_SLI,
   3489					"0342 Ring %d Cannot find buffer for an"
   3490					" unsolicited iocb. tag 0x%x\n",
   3491					pring->ringno,
   3492					irsp->unsli3.sli3Words[7]);
   3493		}
   3494		list_for_each_entry(iocbq, &saveq->list, list) {
   3495			irsp = &iocbq->iocb;
   3496			if (irsp->ulpBdeCount != 0) {
   3497				iocbq->cmd_dmabuf = lpfc_sli_get_buff(phba,
   3498							pring,
   3499							irsp->un.ulpWord[3]);
   3500				if (!iocbq->cmd_dmabuf)
   3501					lpfc_printf_log(phba,
   3502						KERN_ERR,
   3503						LOG_SLI,
   3504						"0343 Ring %d Cannot find "
   3505						"buffer for an unsolicited iocb"
   3506						". tag 0x%x\n", pring->ringno,
   3507						irsp->un.ulpWord[3]);
   3508			}
   3509			if (irsp->ulpBdeCount == 2) {
   3510				iocbq->bpl_dmabuf = lpfc_sli_get_buff(phba,
   3511						pring,
   3512						irsp->unsli3.sli3Words[7]);
   3513				if (!iocbq->bpl_dmabuf)
   3514					lpfc_printf_log(phba,
   3515						KERN_ERR,
   3516						LOG_SLI,
   3517						"0344 Ring %d Cannot find "
   3518						"buffer for an unsolicited "
   3519						"iocb. tag 0x%x\n",
   3520						pring->ringno,
   3521						irsp->unsli3.sli3Words[7]);
   3522			}
   3523		}
   3524	} else {
   3525		paddr = getPaddr(irsp->un.cont64[0].addrHigh,
   3526				 irsp->un.cont64[0].addrLow);
   3527		saveq->cmd_dmabuf = lpfc_sli_ringpostbuf_get(phba, pring,
   3528							     paddr);
   3529		if (irsp->ulpBdeCount == 2) {
   3530			paddr = getPaddr(irsp->un.cont64[1].addrHigh,
   3531					 irsp->un.cont64[1].addrLow);
   3532			saveq->bpl_dmabuf = lpfc_sli_ringpostbuf_get(phba,
   3533								   pring,
   3534								   paddr);
   3535		}
   3536	}
   3537
   3538	if (irsp->ulpBdeCount != 0 &&
   3539	    (irsp->ulpCommand == CMD_IOCB_RCV_CONT64_CX ||
   3540	     irsp->ulpStatus == IOSTAT_INTERMED_RSP)) {
   3541		int found = 0;
   3542
   3543		/* search continue save q for same XRI */
   3544		list_for_each_entry(iocbq, &pring->iocb_continue_saveq, clist) {
   3545			if (iocbq->iocb.unsli3.rcvsli3.ox_id ==
   3546				saveq->iocb.unsli3.rcvsli3.ox_id) {
   3547				list_add_tail(&saveq->list, &iocbq->list);
   3548				found = 1;
   3549				break;
   3550			}
   3551		}
   3552		if (!found)
   3553			list_add_tail(&saveq->clist,
   3554				      &pring->iocb_continue_saveq);
   3555
   3556		if (saveq->iocb.ulpStatus != IOSTAT_INTERMED_RSP) {
   3557			list_del_init(&iocbq->clist);
   3558			saveq = iocbq;
   3559			irsp = &saveq->iocb;
   3560		} else {
   3561			return 0;
   3562		}
   3563	}
   3564	if ((irsp->ulpCommand == CMD_RCV_ELS_REQ64_CX) ||
   3565	    (irsp->ulpCommand == CMD_RCV_ELS_REQ_CX) ||
   3566	    (irsp->ulpCommand == CMD_IOCB_RCV_ELS64_CX)) {
   3567		Rctl = FC_RCTL_ELS_REQ;
   3568		Type = FC_TYPE_ELS;
   3569	} else {
   3570		w5p = (WORD5 *)&(saveq->iocb.un.ulpWord[5]);
   3571		Rctl = w5p->hcsw.Rctl;
   3572		Type = w5p->hcsw.Type;
   3573
   3574		/* Firmware Workaround */
   3575		if ((Rctl == 0) && (pring->ringno == LPFC_ELS_RING) &&
   3576			(irsp->ulpCommand == CMD_RCV_SEQUENCE64_CX ||
   3577			 irsp->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) {
   3578			Rctl = FC_RCTL_ELS_REQ;
   3579			Type = FC_TYPE_ELS;
   3580			w5p->hcsw.Rctl = Rctl;
   3581			w5p->hcsw.Type = Type;
   3582		}
   3583	}
   3584
   3585	if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
   3586	    (irsp->ulpCommand == CMD_IOCB_RCV_ELS64_CX ||
   3587	    irsp->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) {
   3588		if (irsp->unsli3.rcvsli3.vpi == 0xffff)
   3589			saveq->vport = phba->pport;
   3590		else
   3591			saveq->vport = lpfc_find_vport_by_vpid(phba,
   3592					       irsp->unsli3.rcvsli3.vpi);
   3593	}
   3594
   3595	/* Prepare WQE with Unsol frame */
   3596	lpfc_sli_prep_unsol_wqe(phba, saveq);
   3597
   3598	if (!lpfc_complete_unsol_iocb(phba, pring, saveq, Rctl, Type))
   3599		lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
   3600				"0313 Ring %d handler: unexpected Rctl x%x "
   3601				"Type x%x received\n",
   3602				pring->ringno, Rctl, Type);
   3603
   3604	return 1;
   3605}
   3606
   3607/**
   3608 * lpfc_sli_iocbq_lookup - Find command iocb for the given response iocb
   3609 * @phba: Pointer to HBA context object.
   3610 * @pring: Pointer to driver SLI ring object.
   3611 * @prspiocb: Pointer to response iocb object.
   3612 *
   3613 * This function looks up the iocb_lookup table to get the command iocb
   3614 * corresponding to the given response iocb using the iotag of the
   3615 * response iocb. The driver calls this function with the hbalock held
   3616 * for SLI3 ports or the ring lock held for SLI4 ports.
   3617 * This function returns the command iocb object if it finds the command
   3618 * iocb else returns NULL.
   3619 **/
   3620static struct lpfc_iocbq *
   3621lpfc_sli_iocbq_lookup(struct lpfc_hba *phba,
   3622		      struct lpfc_sli_ring *pring,
   3623		      struct lpfc_iocbq *prspiocb)
   3624{
   3625	struct lpfc_iocbq *cmd_iocb = NULL;
   3626	u16 iotag;
   3627
   3628	if (phba->sli_rev == LPFC_SLI_REV4)
   3629		iotag = get_wqe_reqtag(prspiocb);
   3630	else
   3631		iotag = prspiocb->iocb.ulpIoTag;
   3632
   3633	if (iotag != 0 && iotag <= phba->sli.last_iotag) {
   3634		cmd_iocb = phba->sli.iocbq_lookup[iotag];
   3635		if (cmd_iocb->cmd_flag & LPFC_IO_ON_TXCMPLQ) {
   3636			/* remove from txcmpl queue list */
   3637			list_del_init(&cmd_iocb->list);
   3638			cmd_iocb->cmd_flag &= ~LPFC_IO_ON_TXCMPLQ;
   3639			pring->txcmplq_cnt--;
   3640			return cmd_iocb;
   3641		}
   3642	}
   3643
   3644	lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
   3645			"0317 iotag x%x is out of "
   3646			"range: max iotag x%x\n",
   3647			iotag, phba->sli.last_iotag);
   3648	return NULL;
   3649}
   3650
   3651/**
   3652 * lpfc_sli_iocbq_lookup_by_tag - Find command iocb for the iotag
   3653 * @phba: Pointer to HBA context object.
   3654 * @pring: Pointer to driver SLI ring object.
   3655 * @iotag: IOCB tag.
   3656 *
   3657 * This function looks up the iocb_lookup table to get the command iocb
   3658 * corresponding to the given iotag. The driver calls this function with
   3659 * the ring lock held because this function is an SLI4 port only helper.
   3660 * This function returns the command iocb object if it finds the command
   3661 * iocb else returns NULL.
   3662 **/
   3663static struct lpfc_iocbq *
   3664lpfc_sli_iocbq_lookup_by_tag(struct lpfc_hba *phba,
   3665			     struct lpfc_sli_ring *pring, uint16_t iotag)
   3666{
   3667	struct lpfc_iocbq *cmd_iocb = NULL;
   3668
   3669	if (iotag != 0 && iotag <= phba->sli.last_iotag) {
   3670		cmd_iocb = phba->sli.iocbq_lookup[iotag];
   3671		if (cmd_iocb->cmd_flag & LPFC_IO_ON_TXCMPLQ) {
   3672			/* remove from txcmpl queue list */
   3673			list_del_init(&cmd_iocb->list);
   3674			cmd_iocb->cmd_flag &= ~LPFC_IO_ON_TXCMPLQ;
   3675			pring->txcmplq_cnt--;
   3676			return cmd_iocb;
   3677		}
   3678	}
   3679
   3680	lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
   3681			"0372 iotag x%x lookup error: max iotag (x%x) "
   3682			"cmd_flag x%x\n",
   3683			iotag, phba->sli.last_iotag,
   3684			cmd_iocb ? cmd_iocb->cmd_flag : 0xffff);
   3685	return NULL;
   3686}
   3687
   3688/**
   3689 * lpfc_sli_process_sol_iocb - process solicited iocb completion
   3690 * @phba: Pointer to HBA context object.
   3691 * @pring: Pointer to driver SLI ring object.
   3692 * @saveq: Pointer to the response iocb to be processed.
   3693 *
   3694 * This function is called by the ring event handler for non-fcp
   3695 * rings when there is a new response iocb in the response ring.
   3696 * The caller is not required to hold any locks. This function
   3697 * gets the command iocb associated with the response iocb and
   3698 * calls the completion handler for the command iocb. If there
   3699 * is no completion handler, the function will free the resources
   3700 * associated with command iocb. If the response iocb is for
   3701 * an already aborted command iocb, the status of the completion
   3702 * is changed to IOSTAT_LOCAL_REJECT/IOERR_SLI_ABORTED.
   3703 * This function always returns 1.
   3704 **/
   3705static int
   3706lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
   3707			  struct lpfc_iocbq *saveq)
   3708{
   3709	struct lpfc_iocbq *cmdiocbp;
   3710	unsigned long iflag;
   3711	u32 ulp_command, ulp_status, ulp_word4, ulp_context, iotag;
   3712
   3713	if (phba->sli_rev == LPFC_SLI_REV4)
   3714		spin_lock_irqsave(&pring->ring_lock, iflag);
   3715	else
   3716		spin_lock_irqsave(&phba->hbalock, iflag);
   3717	cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring, saveq);
   3718	if (phba->sli_rev == LPFC_SLI_REV4)
   3719		spin_unlock_irqrestore(&pring->ring_lock, iflag);
   3720	else
   3721		spin_unlock_irqrestore(&phba->hbalock, iflag);
   3722
   3723	ulp_command = get_job_cmnd(phba, saveq);
   3724	ulp_status = get_job_ulpstatus(phba, saveq);
   3725	ulp_word4 = get_job_word4(phba, saveq);
   3726	ulp_context = get_job_ulpcontext(phba, saveq);
   3727	if (phba->sli_rev == LPFC_SLI_REV4)
   3728		iotag = get_wqe_reqtag(saveq);
   3729	else
   3730		iotag = saveq->iocb.ulpIoTag;
   3731
   3732	if (cmdiocbp) {
   3733		ulp_command = get_job_cmnd(phba, cmdiocbp);
   3734		if (cmdiocbp->cmd_cmpl) {
   3735			/*
   3736			 * If an ELS command failed send an event to mgmt
   3737			 * application.
   3738			 */
   3739			if (ulp_status &&
   3740			     (pring->ringno == LPFC_ELS_RING) &&
   3741			     (ulp_command == CMD_ELS_REQUEST64_CR))
   3742				lpfc_send_els_failure_event(phba,
   3743					cmdiocbp, saveq);
   3744
   3745			/*
   3746			 * Post all ELS completions to the worker thread.
   3747			 * All other are passed to the completion callback.
   3748			 */
   3749			if (pring->ringno == LPFC_ELS_RING) {
   3750				if ((phba->sli_rev < LPFC_SLI_REV4) &&
   3751				    (cmdiocbp->cmd_flag &
   3752							LPFC_DRIVER_ABORTED)) {
   3753					spin_lock_irqsave(&phba->hbalock,
   3754							  iflag);
   3755					cmdiocbp->cmd_flag &=
   3756						~LPFC_DRIVER_ABORTED;
   3757					spin_unlock_irqrestore(&phba->hbalock,
   3758							       iflag);
   3759					saveq->iocb.ulpStatus =
   3760						IOSTAT_LOCAL_REJECT;
   3761					saveq->iocb.un.ulpWord[4] =
   3762						IOERR_SLI_ABORTED;
   3763
   3764					/* Firmware could still be in progress
   3765					 * of DMAing payload, so don't free data
   3766					 * buffer till after a hbeat.
   3767					 */
   3768					spin_lock_irqsave(&phba->hbalock,
   3769							  iflag);
   3770					saveq->cmd_flag |= LPFC_DELAY_MEM_FREE;
   3771					spin_unlock_irqrestore(&phba->hbalock,
   3772							       iflag);
   3773				}
   3774				if (phba->sli_rev == LPFC_SLI_REV4) {
   3775					if (saveq->cmd_flag &
   3776					    LPFC_EXCHANGE_BUSY) {
   3777						/* Set cmdiocb flag for the
   3778						 * exchange busy so sgl (xri)
   3779						 * will not be released until
   3780						 * the abort xri is received
   3781						 * from hba.
   3782						 */
   3783						spin_lock_irqsave(
   3784							&phba->hbalock, iflag);
   3785						cmdiocbp->cmd_flag |=
   3786							LPFC_EXCHANGE_BUSY;
   3787						spin_unlock_irqrestore(
   3788							&phba->hbalock, iflag);
   3789					}
   3790					if (cmdiocbp->cmd_flag &
   3791					    LPFC_DRIVER_ABORTED) {
   3792						/*
   3793						 * Clear LPFC_DRIVER_ABORTED
   3794						 * bit in case it was driver
   3795						 * initiated abort.
   3796						 */
   3797						spin_lock_irqsave(
   3798							&phba->hbalock, iflag);
   3799						cmdiocbp->cmd_flag &=
   3800							~LPFC_DRIVER_ABORTED;
   3801						spin_unlock_irqrestore(
   3802							&phba->hbalock, iflag);
   3803						set_job_ulpstatus(cmdiocbp,
   3804								  IOSTAT_LOCAL_REJECT);
   3805						set_job_ulpword4(cmdiocbp,
   3806								 IOERR_ABORT_REQUESTED);
   3807						/*
   3808						 * For SLI4, irspiocb contains
   3809						 * NO_XRI in sli_xritag, it
   3810						 * shall not affect releasing
   3811						 * sgl (xri) process.
   3812						 */
   3813						set_job_ulpstatus(saveq,
   3814								  IOSTAT_LOCAL_REJECT);
   3815						set_job_ulpword4(saveq,
   3816								 IOERR_SLI_ABORTED);
   3817						spin_lock_irqsave(
   3818							&phba->hbalock, iflag);
   3819						saveq->cmd_flag |=
   3820							LPFC_DELAY_MEM_FREE;
   3821						spin_unlock_irqrestore(
   3822							&phba->hbalock, iflag);
   3823					}
   3824				}
   3825			}
   3826			cmdiocbp->cmd_cmpl(phba, cmdiocbp, saveq);
   3827		} else
   3828			lpfc_sli_release_iocbq(phba, cmdiocbp);
   3829	} else {
   3830		/*
   3831		 * Unknown initiating command based on the response iotag.
   3832		 * This could be the case on the ELS ring because of
   3833		 * lpfc_els_abort().
   3834		 */
   3835		if (pring->ringno != LPFC_ELS_RING) {
   3836			/*
   3837			 * Ring <ringno> handler: unexpected completion IoTag
   3838			 * <IoTag>
   3839			 */
   3840			lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
   3841					 "0322 Ring %d handler: "
   3842					 "unexpected completion IoTag x%x "
   3843					 "Data: x%x x%x x%x x%x\n",
   3844					 pring->ringno, iotag, ulp_status,
   3845					 ulp_word4, ulp_command, ulp_context);
   3846		}
   3847	}
   3848
   3849	return 1;
   3850}
   3851
   3852/**
   3853 * lpfc_sli_rsp_pointers_error - Response ring pointer error handler
   3854 * @phba: Pointer to HBA context object.
   3855 * @pring: Pointer to driver SLI ring object.
   3856 *
   3857 * This function is called from the iocb ring event handlers when
   3858 * put pointer is ahead of the get pointer for a ring. This function signal
   3859 * an error attention condition to the worker thread and the worker
   3860 * thread will transition the HBA to offline state.
   3861 **/
   3862static void
   3863lpfc_sli_rsp_pointers_error(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
   3864{
   3865	struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
   3866	/*
   3867	 * Ring <ringno> handler: portRspPut <portRspPut> is bigger than
   3868	 * rsp ring <portRspMax>
   3869	 */
   3870	lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
   3871			"0312 Ring %d handler: portRspPut %d "
   3872			"is bigger than rsp ring %d\n",
   3873			pring->ringno, le32_to_cpu(pgp->rspPutInx),
   3874			pring->sli.sli3.numRiocb);
   3875
   3876	phba->link_state = LPFC_HBA_ERROR;
   3877
   3878	/*
   3879	 * All error attention handlers are posted to
   3880	 * worker thread
   3881	 */
   3882	phba->work_ha |= HA_ERATT;
   3883	phba->work_hs = HS_FFER3;
   3884
   3885	lpfc_worker_wake_up(phba);
   3886
   3887	return;
   3888}
   3889
   3890/**
   3891 * lpfc_poll_eratt - Error attention polling timer timeout handler
   3892 * @t: Context to fetch pointer to address of HBA context object from.
   3893 *
   3894 * This function is invoked by the Error Attention polling timer when the
   3895 * timer times out. It will check the SLI Error Attention register for
   3896 * possible attention events. If so, it will post an Error Attention event
   3897 * and wake up worker thread to process it. Otherwise, it will set up the
   3898 * Error Attention polling timer for the next poll.
   3899 **/
   3900void lpfc_poll_eratt(struct timer_list *t)
   3901{
   3902	struct lpfc_hba *phba;
   3903	uint32_t eratt = 0;
   3904	uint64_t sli_intr, cnt;
   3905
   3906	phba = from_timer(phba, t, eratt_poll);
   3907
   3908	/* Here we will also keep track of interrupts per sec of the hba */
   3909	sli_intr = phba->sli.slistat.sli_intr;
   3910
   3911	if (phba->sli.slistat.sli_prev_intr > sli_intr)
   3912		cnt = (((uint64_t)(-1) - phba->sli.slistat.sli_prev_intr) +
   3913			sli_intr);
   3914	else
   3915		cnt = (sli_intr - phba->sli.slistat.sli_prev_intr);
   3916
   3917	/* 64-bit integer division not supported on 32-bit x86 - use do_div */
   3918	do_div(cnt, phba->eratt_poll_interval);
   3919	phba->sli.slistat.sli_ips = cnt;
   3920
   3921	phba->sli.slistat.sli_prev_intr = sli_intr;
   3922
   3923	/* Check chip HA register for error event */
   3924	eratt = lpfc_sli_check_eratt(phba);
   3925
   3926	if (eratt)
   3927		/* Tell the worker thread there is work to do */
   3928		lpfc_worker_wake_up(phba);
   3929	else
   3930		/* Restart the timer for next eratt poll */
   3931		mod_timer(&phba->eratt_poll,
   3932			  jiffies +
   3933			  msecs_to_jiffies(1000 * phba->eratt_poll_interval));
   3934	return;
   3935}
   3936
   3937
   3938/**
   3939 * lpfc_sli_handle_fast_ring_event - Handle ring events on FCP ring
   3940 * @phba: Pointer to HBA context object.
   3941 * @pring: Pointer to driver SLI ring object.
   3942 * @mask: Host attention register mask for this ring.
   3943 *
   3944 * This function is called from the interrupt context when there is a ring
   3945 * event for the fcp ring. The caller does not hold any lock.
   3946 * The function processes each response iocb in the response ring until it
   3947 * finds an iocb with LE bit set and chains all the iocbs up to the iocb with
   3948 * LE bit set. The function will call the completion handler of the command iocb
   3949 * if the response iocb indicates a completion for a command iocb or it is
   3950 * an abort completion. The function will call lpfc_sli_process_unsol_iocb
   3951 * function if this is an unsolicited iocb.
   3952 * This routine presumes LPFC_FCP_RING handling and doesn't bother
   3953 * to check it explicitly.
   3954 */
   3955int
   3956lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
   3957				struct lpfc_sli_ring *pring, uint32_t mask)
   3958{
   3959	struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
   3960	IOCB_t *irsp = NULL;
   3961	IOCB_t *entry = NULL;
   3962	struct lpfc_iocbq *cmdiocbq = NULL;
   3963	struct lpfc_iocbq rspiocbq;
   3964	uint32_t status;
   3965	uint32_t portRspPut, portRspMax;
   3966	int rc = 1;
   3967	lpfc_iocb_type type;
   3968	unsigned long iflag;
   3969	uint32_t rsp_cmpl = 0;
   3970
   3971	spin_lock_irqsave(&phba->hbalock, iflag);
   3972	pring->stats.iocb_event++;
   3973
   3974	/*
   3975	 * The next available response entry should never exceed the maximum
   3976	 * entries.  If it does, treat it as an adapter hardware error.
   3977	 */
   3978	portRspMax = pring->sli.sli3.numRiocb;
   3979	portRspPut = le32_to_cpu(pgp->rspPutInx);
   3980	if (unlikely(portRspPut >= portRspMax)) {
   3981		lpfc_sli_rsp_pointers_error(phba, pring);
   3982		spin_unlock_irqrestore(&phba->hbalock, iflag);
   3983		return 1;
   3984	}
   3985	if (phba->fcp_ring_in_use) {
   3986		spin_unlock_irqrestore(&phba->hbalock, iflag);
   3987		return 1;
   3988	} else
   3989		phba->fcp_ring_in_use = 1;
   3990
   3991	rmb();
   3992	while (pring->sli.sli3.rspidx != portRspPut) {
   3993		/*
   3994		 * Fetch an entry off the ring and copy it into a local data
   3995		 * structure.  The copy involves a byte-swap since the
   3996		 * network byte order and pci byte orders are different.
   3997		 */
   3998		entry = lpfc_resp_iocb(phba, pring);
   3999		phba->last_completion_time = jiffies;
   4000
   4001		if (++pring->sli.sli3.rspidx >= portRspMax)
   4002			pring->sli.sli3.rspidx = 0;
   4003
   4004		lpfc_sli_pcimem_bcopy((uint32_t *) entry,
   4005				      (uint32_t *) &rspiocbq.iocb,
   4006				      phba->iocb_rsp_size);
   4007		INIT_LIST_HEAD(&(rspiocbq.list));
   4008		irsp = &rspiocbq.iocb;
   4009
   4010		type = lpfc_sli_iocb_cmd_type(irsp->ulpCommand & CMD_IOCB_MASK);
   4011		pring->stats.iocb_rsp++;
   4012		rsp_cmpl++;
   4013
   4014		if (unlikely(irsp->ulpStatus)) {
   4015			/*
   4016			 * If resource errors reported from HBA, reduce
   4017			 * queuedepths of the SCSI device.
   4018			 */
   4019			if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
   4020			    ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
   4021			     IOERR_NO_RESOURCES)) {
   4022				spin_unlock_irqrestore(&phba->hbalock, iflag);
   4023				phba->lpfc_rampdown_queue_depth(phba);
   4024				spin_lock_irqsave(&phba->hbalock, iflag);
   4025			}
   4026
   4027			/* Rsp ring <ringno> error: IOCB */
   4028			lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
   4029					"0336 Rsp Ring %d error: IOCB Data: "
   4030					"x%x x%x x%x x%x x%x x%x x%x x%x\n",
   4031					pring->ringno,
   4032					irsp->un.ulpWord[0],
   4033					irsp->un.ulpWord[1],
   4034					irsp->un.ulpWord[2],
   4035					irsp->un.ulpWord[3],
   4036					irsp->un.ulpWord[4],
   4037					irsp->un.ulpWord[5],
   4038					*(uint32_t *)&irsp->un1,
   4039					*((uint32_t *)&irsp->un1 + 1));
   4040		}
   4041
   4042		switch (type) {
   4043		case LPFC_ABORT_IOCB:
   4044		case LPFC_SOL_IOCB:
   4045			/*
   4046			 * Idle exchange closed via ABTS from port.  No iocb
   4047			 * resources need to be recovered.
   4048			 */
   4049			if (unlikely(irsp->ulpCommand == CMD_XRI_ABORTED_CX)) {
   4050				lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
   4051						"0333 IOCB cmd 0x%x"
   4052						" processed. Skipping"
   4053						" completion\n",
   4054						irsp->ulpCommand);
   4055				break;
   4056			}
   4057
   4058			cmdiocbq = lpfc_sli_iocbq_lookup(phba, pring,
   4059							 &rspiocbq);
   4060			if (unlikely(!cmdiocbq))
   4061				break;
   4062			if (cmdiocbq->cmd_flag & LPFC_DRIVER_ABORTED)
   4063				cmdiocbq->cmd_flag &= ~LPFC_DRIVER_ABORTED;
   4064			if (cmdiocbq->cmd_cmpl) {
   4065				spin_unlock_irqrestore(&phba->hbalock, iflag);
   4066				cmdiocbq->cmd_cmpl(phba, cmdiocbq, &rspiocbq);
   4067				spin_lock_irqsave(&phba->hbalock, iflag);
   4068			}
   4069			break;
   4070		case LPFC_UNSOL_IOCB:
   4071			spin_unlock_irqrestore(&phba->hbalock, iflag);
   4072			lpfc_sli_process_unsol_iocb(phba, pring, &rspiocbq);
   4073			spin_lock_irqsave(&phba->hbalock, iflag);
   4074			break;
   4075		default:
   4076			if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
   4077				char adaptermsg[LPFC_MAX_ADPTMSG];
   4078				memset(adaptermsg, 0, LPFC_MAX_ADPTMSG);
   4079				memcpy(&adaptermsg[0], (uint8_t *) irsp,
   4080				       MAX_MSG_DATA);
   4081				dev_warn(&((phba->pcidev)->dev),
   4082					 "lpfc%d: %s\n",
   4083					 phba->brd_no, adaptermsg);
   4084			} else {
   4085				/* Unknown IOCB command */
   4086				lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
   4087						"0334 Unknown IOCB command "
   4088						"Data: x%x, x%x x%x x%x x%x\n",
   4089						type, irsp->ulpCommand,
   4090						irsp->ulpStatus,
   4091						irsp->ulpIoTag,
   4092						irsp->ulpContext);
   4093			}
   4094			break;
   4095		}
   4096
   4097		/*
   4098		 * The response IOCB has been processed.  Update the ring
   4099		 * pointer in SLIM.  If the port response put pointer has not
   4100		 * been updated, sync the pgp->rspPutInx and fetch the new port
   4101		 * response put pointer.
   4102		 */
   4103		writel(pring->sli.sli3.rspidx,
   4104			&phba->host_gp[pring->ringno].rspGetInx);
   4105
   4106		if (pring->sli.sli3.rspidx == portRspPut)
   4107			portRspPut = le32_to_cpu(pgp->rspPutInx);
   4108	}
   4109
   4110	if ((rsp_cmpl > 0) && (mask & HA_R0RE_REQ)) {
   4111		pring->stats.iocb_rsp_full++;
   4112		status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4));
   4113		writel(status, phba->CAregaddr);
   4114		readl(phba->CAregaddr);
   4115	}
   4116	if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) {
   4117		pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
   4118		pring->stats.iocb_cmd_empty++;
   4119
   4120		/* Force update of the local copy of cmdGetInx */
   4121		pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx);
   4122		lpfc_sli_resume_iocb(phba, pring);
   4123
   4124		if ((pring->lpfc_sli_cmd_available))
   4125			(pring->lpfc_sli_cmd_available) (phba, pring);
   4126
   4127	}
   4128
   4129	phba->fcp_ring_in_use = 0;
   4130	spin_unlock_irqrestore(&phba->hbalock, iflag);
   4131	return rc;
   4132}
   4133
   4134/**
   4135 * lpfc_sli_sp_handle_rspiocb - Handle slow-path response iocb
   4136 * @phba: Pointer to HBA context object.
   4137 * @pring: Pointer to driver SLI ring object.
   4138 * @rspiocbp: Pointer to driver response IOCB object.
   4139 *
   4140 * This function is called from the worker thread when there is a slow-path
   4141 * response IOCB to process. This function chains all the response iocbs until
   4142 * seeing the iocb with the LE bit set. The function will call
   4143 * lpfc_sli_process_sol_iocb function if the response iocb indicates a
   4144 * completion of a command iocb. The function will call the
   4145 * lpfc_sli_process_unsol_iocb function if this is an unsolicited iocb.
   4146 * The function frees the resources or calls the completion handler if this
   4147 * iocb is an abort completion. The function returns NULL when the response
   4148 * iocb has the LE bit set and all the chained iocbs are processed, otherwise
   4149 * this function shall chain the iocb on to the iocb_continueq and return the
   4150 * response iocb passed in.
   4151 **/
   4152static struct lpfc_iocbq *
   4153lpfc_sli_sp_handle_rspiocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
   4154			struct lpfc_iocbq *rspiocbp)
   4155{
   4156	struct lpfc_iocbq *saveq;
   4157	struct lpfc_iocbq *cmdiocb;
   4158	struct lpfc_iocbq *next_iocb;
   4159	IOCB_t *irsp;
   4160	uint32_t free_saveq;
   4161	u8 cmd_type;
   4162	lpfc_iocb_type type;
   4163	unsigned long iflag;
   4164	u32 ulp_status = get_job_ulpstatus(phba, rspiocbp);
   4165	u32 ulp_word4 = get_job_word4(phba, rspiocbp);
   4166	u32 ulp_command = get_job_cmnd(phba, rspiocbp);
   4167	int rc;
   4168
   4169	spin_lock_irqsave(&phba->hbalock, iflag);
   4170	/* First add the response iocb to the countinueq list */
   4171	list_add_tail(&rspiocbp->list, &pring->iocb_continueq);
   4172	pring->iocb_continueq_cnt++;
   4173
   4174	/*
   4175	 * By default, the driver expects to free all resources
   4176	 * associated with this iocb completion.
   4177	 */
   4178	free_saveq = 1;
   4179	saveq = list_get_first(&pring->iocb_continueq,
   4180			       struct lpfc_iocbq, list);
   4181	list_del_init(&pring->iocb_continueq);
   4182	pring->iocb_continueq_cnt = 0;
   4183
   4184	pring->stats.iocb_rsp++;
   4185
   4186	/*
   4187	 * If resource errors reported from HBA, reduce
   4188	 * queuedepths of the SCSI device.
   4189	 */
   4190	if (ulp_status == IOSTAT_LOCAL_REJECT &&
   4191	    ((ulp_word4 & IOERR_PARAM_MASK) ==
   4192	     IOERR_NO_RESOURCES)) {
   4193		spin_unlock_irqrestore(&phba->hbalock, iflag);
   4194		phba->lpfc_rampdown_queue_depth(phba);
   4195		spin_lock_irqsave(&phba->hbalock, iflag);
   4196	}
   4197
   4198	if (ulp_status) {
   4199		/* Rsp ring <ringno> error: IOCB */
   4200		if (phba->sli_rev < LPFC_SLI_REV4) {
   4201			irsp = &rspiocbp->iocb;
   4202			lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
   4203					"0328 Rsp Ring %d error: ulp_status x%x "
   4204					"IOCB Data: "
   4205					"x%08x x%08x x%08x x%08x "
   4206					"x%08x x%08x x%08x x%08x "
   4207					"x%08x x%08x x%08x x%08x "
   4208					"x%08x x%08x x%08x x%08x\n",
   4209					pring->ringno, ulp_status,
   4210					get_job_ulpword(rspiocbp, 0),
   4211					get_job_ulpword(rspiocbp, 1),
   4212					get_job_ulpword(rspiocbp, 2),
   4213					get_job_ulpword(rspiocbp, 3),
   4214					get_job_ulpword(rspiocbp, 4),
   4215					get_job_ulpword(rspiocbp, 5),
   4216					*(((uint32_t *)irsp) + 6),
   4217					*(((uint32_t *)irsp) + 7),
   4218					*(((uint32_t *)irsp) + 8),
   4219					*(((uint32_t *)irsp) + 9),
   4220					*(((uint32_t *)irsp) + 10),
   4221					*(((uint32_t *)irsp) + 11),
   4222					*(((uint32_t *)irsp) + 12),
   4223					*(((uint32_t *)irsp) + 13),
   4224					*(((uint32_t *)irsp) + 14),
   4225					*(((uint32_t *)irsp) + 15));
   4226		} else {
   4227			lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
   4228					"0321 Rsp Ring %d error: "
   4229					"IOCB Data: "
   4230					"x%x x%x x%x x%x\n",
   4231					pring->ringno,
   4232					rspiocbp->wcqe_cmpl.word0,
   4233					rspiocbp->wcqe_cmpl.total_data_placed,
   4234					rspiocbp->wcqe_cmpl.parameter,
   4235					rspiocbp->wcqe_cmpl.word3);
   4236		}
   4237	}
   4238
   4239
   4240	/*
   4241	 * Fetch the iocb command type and call the correct completion
   4242	 * routine. Solicited and Unsolicited IOCBs on the ELS ring
   4243	 * get freed back to the lpfc_iocb_list by the discovery
   4244	 * kernel thread.
   4245	 */
   4246	cmd_type = ulp_command & CMD_IOCB_MASK;
   4247	type = lpfc_sli_iocb_cmd_type(cmd_type);
   4248	switch (type) {
   4249	case LPFC_SOL_IOCB:
   4250		spin_unlock_irqrestore(&phba->hbalock, iflag);
   4251		rc = lpfc_sli_process_sol_iocb(phba, pring, saveq);
   4252		spin_lock_irqsave(&phba->hbalock, iflag);
   4253		break;
   4254	case LPFC_UNSOL_IOCB:
   4255		spin_unlock_irqrestore(&phba->hbalock, iflag);
   4256		rc = lpfc_sli_process_unsol_iocb(phba, pring, saveq);
   4257		spin_lock_irqsave(&phba->hbalock, iflag);
   4258		if (!rc)
   4259			free_saveq = 0;
   4260		break;
   4261	case LPFC_ABORT_IOCB:
   4262		cmdiocb = NULL;
   4263		if (ulp_command != CMD_XRI_ABORTED_CX)
   4264			cmdiocb = lpfc_sli_iocbq_lookup(phba, pring,
   4265							saveq);
   4266		if (cmdiocb) {
   4267			/* Call the specified completion routine */
   4268			if (cmdiocb->cmd_cmpl) {
   4269				spin_unlock_irqrestore(&phba->hbalock, iflag);
   4270				cmdiocb->cmd_cmpl(phba, cmdiocb, saveq);
   4271				spin_lock_irqsave(&phba->hbalock, iflag);
   4272			} else {
   4273				__lpfc_sli_release_iocbq(phba, cmdiocb);
   4274			}
   4275		}
   4276		break;
   4277	case LPFC_UNKNOWN_IOCB:
   4278		if (ulp_command == CMD_ADAPTER_MSG) {
   4279			char adaptermsg[LPFC_MAX_ADPTMSG];
   4280
   4281			memset(adaptermsg, 0, LPFC_MAX_ADPTMSG);
   4282			memcpy(&adaptermsg[0], (uint8_t *)&rspiocbp->wqe,
   4283			       MAX_MSG_DATA);
   4284			dev_warn(&((phba->pcidev)->dev),
   4285				 "lpfc%d: %s\n",
   4286				 phba->brd_no, adaptermsg);
   4287		} else {
   4288			/* Unknown command */
   4289			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
   4290					"0335 Unknown IOCB "
   4291					"command Data: x%x "
   4292					"x%x x%x x%x\n",
   4293					ulp_command,
   4294					ulp_status,
   4295					get_wqe_reqtag(rspiocbp),
   4296					get_job_ulpcontext(phba, rspiocbp));
   4297		}
   4298		break;
   4299	}
   4300
   4301	if (free_saveq) {
   4302		list_for_each_entry_safe(rspiocbp, next_iocb,
   4303					 &saveq->list, list) {
   4304			list_del_init(&rspiocbp->list);
   4305			__lpfc_sli_release_iocbq(phba, rspiocbp);
   4306		}
   4307		__lpfc_sli_release_iocbq(phba, saveq);
   4308	}
   4309	rspiocbp = NULL;
   4310	spin_unlock_irqrestore(&phba->hbalock, iflag);
   4311	return rspiocbp;
   4312}
   4313
   4314/**
   4315 * lpfc_sli_handle_slow_ring_event - Wrapper func for handling slow-path iocbs
   4316 * @phba: Pointer to HBA context object.
   4317 * @pring: Pointer to driver SLI ring object.
   4318 * @mask: Host attention register mask for this ring.
   4319 *
   4320 * This routine wraps the actual slow_ring event process routine from the
   4321 * API jump table function pointer from the lpfc_hba struct.
   4322 **/
   4323void
   4324lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba,
   4325				struct lpfc_sli_ring *pring, uint32_t mask)
   4326{
   4327	phba->lpfc_sli_handle_slow_ring_event(phba, pring, mask);
   4328}
   4329
   4330/**
   4331 * lpfc_sli_handle_slow_ring_event_s3 - Handle SLI3 ring event for non-FCP rings
   4332 * @phba: Pointer to HBA context object.
   4333 * @pring: Pointer to driver SLI ring object.
   4334 * @mask: Host attention register mask for this ring.
   4335 *
   4336 * This function is called from the worker thread when there is a ring event
   4337 * for non-fcp rings. The caller does not hold any lock. The function will
   4338 * remove each response iocb in the response ring and calls the handle
   4339 * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it.
   4340 **/
   4341static void
   4342lpfc_sli_handle_slow_ring_event_s3(struct lpfc_hba *phba,
   4343				   struct lpfc_sli_ring *pring, uint32_t mask)
   4344{
   4345	struct lpfc_pgp *pgp;
   4346	IOCB_t *entry;
   4347	IOCB_t *irsp = NULL;
   4348	struct lpfc_iocbq *rspiocbp = NULL;
   4349	uint32_t portRspPut, portRspMax;
   4350	unsigned long iflag;
   4351	uint32_t status;
   4352
   4353	pgp = &phba->port_gp[pring->ringno];
   4354	spin_lock_irqsave(&phba->hbalock, iflag);
   4355	pring->stats.iocb_event++;
   4356
   4357	/*
   4358	 * The next available response entry should never exceed the maximum
   4359	 * entries.  If it does, treat it as an adapter hardware error.
   4360	 */
   4361	portRspMax = pring->sli.sli3.numRiocb;
   4362	portRspPut = le32_to_cpu(pgp->rspPutInx);
   4363	if (portRspPut >= portRspMax) {
   4364		/*
   4365		 * Ring <ringno> handler: portRspPut <portRspPut> is bigger than
   4366		 * rsp ring <portRspMax>
   4367		 */
   4368		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
   4369				"0303 Ring %d handler: portRspPut %d "
   4370				"is bigger than rsp ring %d\n",
   4371				pring->ringno, portRspPut, portRspMax);
   4372
   4373		phba->link_state = LPFC_HBA_ERROR;
   4374		spin_unlock_irqrestore(&phba->hbalock, iflag);
   4375
   4376		phba->work_hs = HS_FFER3;
   4377		lpfc_handle_eratt(phba);
   4378
   4379		return;
   4380	}
   4381
   4382	rmb();
   4383	while (pring->sli.sli3.rspidx != portRspPut) {
   4384		/*
   4385		 * Build a completion list and call the appropriate handler.
   4386		 * The process is to get the next available response iocb, get
   4387		 * a free iocb from the list, copy the response data into the
   4388		 * free iocb, insert to the continuation list, and update the
   4389		 * next response index to slim.  This process makes response
   4390		 * iocb's in the ring available to DMA as fast as possible but
   4391		 * pays a penalty for a copy operation.  Since the iocb is
   4392		 * only 32 bytes, this penalty is considered small relative to
   4393		 * the PCI reads for register values and a slim write.  When
   4394		 * the ulpLe field is set, the entire Command has been
   4395		 * received.
   4396		 */
   4397		entry = lpfc_resp_iocb(phba, pring);
   4398
   4399		phba->last_completion_time = jiffies;
   4400		rspiocbp = __lpfc_sli_get_iocbq(phba);
   4401		if (rspiocbp == NULL) {
   4402			printk(KERN_ERR "%s: out of buffers! Failing "
   4403			       "completion.\n", __func__);
   4404			break;
   4405		}
   4406
   4407		lpfc_sli_pcimem_bcopy(entry, &rspiocbp->iocb,
   4408				      phba->iocb_rsp_size);
   4409		irsp = &rspiocbp->iocb;
   4410
   4411		if (++pring->sli.sli3.rspidx >= portRspMax)
   4412			pring->sli.sli3.rspidx = 0;
   4413
   4414		if (pring->ringno == LPFC_ELS_RING) {
   4415			lpfc_debugfs_slow_ring_trc(phba,
   4416			"IOCB rsp ring:   wd4:x%08x wd6:x%08x wd7:x%08x",
   4417				*(((uint32_t *) irsp) + 4),
   4418				*(((uint32_t *) irsp) + 6),
   4419				*(((uint32_t *) irsp) + 7));
   4420		}
   4421
   4422		writel(pring->sli.sli3.rspidx,
   4423			&phba->host_gp[pring->ringno].rspGetInx);
   4424
   4425		spin_unlock_irqrestore(&phba->hbalock, iflag);
   4426		/* Handle the response IOCB */
   4427		rspiocbp = lpfc_sli_sp_handle_rspiocb(phba, pring, rspiocbp);
   4428		spin_lock_irqsave(&phba->hbalock, iflag);
   4429
   4430		/*
   4431		 * If the port response put pointer has not been updated, sync
   4432		 * the pgp->rspPutInx in the MAILBOX_tand fetch the new port
   4433		 * response put pointer.
   4434		 */
   4435		if (pring->sli.sli3.rspidx == portRspPut) {
   4436			portRspPut = le32_to_cpu(pgp->rspPutInx);
   4437		}
   4438	} /* while (pring->sli.sli3.rspidx != portRspPut) */
   4439
   4440	if ((rspiocbp != NULL) && (mask & HA_R0RE_REQ)) {
   4441		/* At least one response entry has been freed */
   4442		pring->stats.iocb_rsp_full++;
   4443		/* SET RxRE_RSP in Chip Att register */
   4444		status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4));
   4445		writel(status, phba->CAregaddr);
   4446		readl(phba->CAregaddr); /* flush */
   4447	}
   4448	if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) {
   4449		pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
   4450		pring->stats.iocb_cmd_empty++;
   4451
   4452		/* Force update of the local copy of cmdGetInx */
   4453		pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx);
   4454		lpfc_sli_resume_iocb(phba, pring);
   4455
   4456		if ((pring->lpfc_sli_cmd_available))
   4457			(pring->lpfc_sli_cmd_available) (phba, pring);
   4458
   4459	}
   4460
   4461	spin_unlock_irqrestore(&phba->hbalock, iflag);
   4462	return;
   4463}
   4464
   4465/**
   4466 * lpfc_sli_handle_slow_ring_event_s4 - Handle SLI4 slow-path els events
   4467 * @phba: Pointer to HBA context object.
   4468 * @pring: Pointer to driver SLI ring object.
   4469 * @mask: Host attention register mask for this ring.
   4470 *
   4471 * This function is called from the worker thread when there is a pending
   4472 * ELS response iocb on the driver internal slow-path response iocb worker
   4473 * queue. The caller does not hold any lock. The function will remove each
   4474 * response iocb from the response worker queue and calls the handle
   4475 * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it.
   4476 **/
   4477static void
   4478lpfc_sli_handle_slow_ring_event_s4(struct lpfc_hba *phba,
   4479				   struct lpfc_sli_ring *pring, uint32_t mask)
   4480{
   4481	struct lpfc_iocbq *irspiocbq;
   4482	struct hbq_dmabuf *dmabuf;
   4483	struct lpfc_cq_event *cq_event;
   4484	unsigned long iflag;
   4485	int count = 0;
   4486
   4487	spin_lock_irqsave(&phba->hbalock, iflag);
   4488	phba->hba_flag &= ~HBA_SP_QUEUE_EVT;
   4489	spin_unlock_irqrestore(&phba->hbalock, iflag);
   4490	while (!list_empty(&phba->sli4_hba.sp_queue_event)) {
   4491		/* Get the response iocb from the head of work queue */
   4492		spin_lock_irqsave(&phba->hbalock, iflag);
   4493		list_remove_head(&phba->sli4_hba.sp_queue_event,
   4494				 cq_event, struct lpfc_cq_event, list);
   4495		spin_unlock_irqrestore(&phba->hbalock, iflag);
   4496
   4497		switch (bf_get(lpfc_wcqe_c_code, &cq_event->cqe.wcqe_cmpl)) {
   4498		case CQE_CODE_COMPL_WQE:
   4499			irspiocbq = container_of(cq_event, struct lpfc_iocbq,
   4500						 cq_event);
   4501			/* Translate ELS WCQE to response IOCBQ */
   4502			irspiocbq = lpfc_sli4_els_preprocess_rspiocbq(phba,
   4503								      irspiocbq);
   4504			if (irspiocbq)
   4505				lpfc_sli_sp_handle_rspiocb(phba, pring,
   4506							   irspiocbq);
   4507			count++;
   4508			break;
   4509		case CQE_CODE_RECEIVE:
   4510		case CQE_CODE_RECEIVE_V1:
   4511			dmabuf = container_of(cq_event, struct hbq_dmabuf,
   4512					      cq_event);
   4513			lpfc_sli4_handle_received_buffer(phba, dmabuf);
   4514			count++;
   4515			break;
   4516		default:
   4517			break;
   4518		}
   4519
   4520		/* Limit the number of events to 64 to avoid soft lockups */
   4521		if (count == 64)
   4522			break;
   4523	}
   4524}
   4525
   4526/**
   4527 * lpfc_sli_abort_iocb_ring - Abort all iocbs in the ring
   4528 * @phba: Pointer to HBA context object.
   4529 * @pring: Pointer to driver SLI ring object.
   4530 *
   4531 * This function aborts all iocbs in the given ring and frees all the iocb
   4532 * objects in txq. This function issues an abort iocb for all the iocb commands
   4533 * in txcmplq. The iocbs in the txcmplq is not guaranteed to complete before
   4534 * the return of this function. The caller is not required to hold any locks.
   4535 **/
   4536void
   4537lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
   4538{
   4539	LIST_HEAD(tx_completions);
   4540	LIST_HEAD(txcmplq_completions);
   4541	struct lpfc_iocbq *iocb, *next_iocb;
   4542	int offline;
   4543
   4544	if (pring->ringno == LPFC_ELS_RING) {
   4545		lpfc_fabric_abort_hba(phba);
   4546	}
   4547	offline = pci_channel_offline(phba->pcidev);
   4548
   4549	/* Error everything on txq and txcmplq
   4550	 * First do the txq.
   4551	 */
   4552	if (phba->sli_rev >= LPFC_SLI_REV4) {
   4553		spin_lock_irq(&pring->ring_lock);
   4554		list_splice_init(&pring->txq, &tx_completions);
   4555		pring->txq_cnt = 0;
   4556
   4557		if (offline) {
   4558			list_splice_init(&pring->txcmplq,
   4559					 &txcmplq_completions);
   4560		} else {
   4561			/* Next issue ABTS for everything on the txcmplq */
   4562			list_for_each_entry_safe(iocb, next_iocb,
   4563						 &pring->txcmplq, list)
   4564				lpfc_sli_issue_abort_iotag(phba, pring,
   4565							   iocb, NULL);
   4566		}
   4567		spin_unlock_irq(&pring->ring_lock);
   4568	} else {
   4569		spin_lock_irq(&phba->hbalock);
   4570		list_splice_init(&pring->txq, &tx_completions);
   4571		pring->txq_cnt = 0;
   4572
   4573		if (offline) {
   4574			list_splice_init(&pring->txcmplq, &txcmplq_completions);
   4575		} else {
   4576			/* Next issue ABTS for everything on the txcmplq */
   4577			list_for_each_entry_safe(iocb, next_iocb,
   4578						 &pring->txcmplq, list)
   4579				lpfc_sli_issue_abort_iotag(phba, pring,
   4580							   iocb, NULL);
   4581		}
   4582		spin_unlock_irq(&phba->hbalock);
   4583	}
   4584
   4585	if (offline) {
   4586		/* Cancel all the IOCBs from the completions list */
   4587		lpfc_sli_cancel_iocbs(phba, &txcmplq_completions,
   4588				      IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED);
   4589	} else {
   4590		/* Make sure HBA is alive */
   4591		lpfc_issue_hb_tmo(phba);
   4592	}
   4593	/* Cancel all the IOCBs from the completions list */
   4594	lpfc_sli_cancel_iocbs(phba, &tx_completions, IOSTAT_LOCAL_REJECT,
   4595			      IOERR_SLI_ABORTED);
   4596}
   4597
   4598/**
   4599 * lpfc_sli_abort_fcp_rings - Abort all iocbs in all FCP rings
   4600 * @phba: Pointer to HBA context object.
   4601 *
   4602 * This function aborts all iocbs in FCP rings and frees all the iocb
   4603 * objects in txq. This function issues an abort iocb for all the iocb commands
   4604 * in txcmplq. The iocbs in the txcmplq is not guaranteed to complete before
   4605 * the return of this function. The caller is not required to hold any locks.
   4606 **/
   4607void
   4608lpfc_sli_abort_fcp_rings(struct lpfc_hba *phba)
   4609{
   4610	struct lpfc_sli *psli = &phba->sli;
   4611	struct lpfc_sli_ring  *pring;
   4612	uint32_t i;
   4613
   4614	/* Look on all the FCP Rings for the iotag */
   4615	if (phba->sli_rev >= LPFC_SLI_REV4) {
   4616		for (i = 0; i < phba->cfg_hdw_queue; i++) {
   4617			pring = phba->sli4_hba.hdwq[i].io_wq->pring;
   4618			lpfc_sli_abort_iocb_ring(phba, pring);
   4619		}
   4620	} else {
   4621		pring = &psli->sli3_ring[LPFC_FCP_RING];
   4622		lpfc_sli_abort_iocb_ring(phba, pring);
   4623	}
   4624}
   4625
   4626/**
   4627 * lpfc_sli_flush_io_rings - flush all iocbs in the IO ring
   4628 * @phba: Pointer to HBA context object.
   4629 *
   4630 * This function flushes all iocbs in the IO ring and frees all the iocb
   4631 * objects in txq and txcmplq. This function will not issue abort iocbs
   4632 * for all the iocb commands in txcmplq, they will just be returned with
   4633 * IOERR_SLI_DOWN. This function is invoked with EEH when device's PCI
   4634 * slot has been permanently disabled.
   4635 **/
   4636void
   4637lpfc_sli_flush_io_rings(struct lpfc_hba *phba)
   4638{
   4639	LIST_HEAD(txq);
   4640	LIST_HEAD(txcmplq);
   4641	struct lpfc_sli *psli = &phba->sli;
   4642	struct lpfc_sli_ring  *pring;
   4643	uint32_t i;
   4644	struct lpfc_iocbq *piocb, *next_iocb;
   4645
   4646	spin_lock_irq(&phba->hbalock);
   4647	/* Indicate the I/O queues are flushed */
   4648	phba->hba_flag |= HBA_IOQ_FLUSH;
   4649	spin_unlock_irq(&phba->hbalock);
   4650
   4651	/* Look on all the FCP Rings for the iotag */
   4652	if (phba->sli_rev >= LPFC_SLI_REV4) {
   4653		for (i = 0; i < phba->cfg_hdw_queue; i++) {
   4654			pring = phba->sli4_hba.hdwq[i].io_wq->pring;
   4655
   4656			spin_lock_irq(&pring->ring_lock);
   4657			/* Retrieve everything on txq */
   4658			list_splice_init(&pring->txq, &txq);
   4659			list_for_each_entry_safe(piocb, next_iocb,
   4660						 &pring->txcmplq, list)
   4661				piocb->cmd_flag &= ~LPFC_IO_ON_TXCMPLQ;
   4662			/* Retrieve everything on the txcmplq */
   4663			list_splice_init(&pring->txcmplq, &txcmplq);
   4664			pring->txq_cnt = 0;
   4665			pring->txcmplq_cnt = 0;
   4666			spin_unlock_irq(&pring->ring_lock);
   4667
   4668			/* Flush the txq */
   4669			lpfc_sli_cancel_iocbs(phba, &txq,
   4670					      IOSTAT_LOCAL_REJECT,
   4671					      IOERR_SLI_DOWN);
   4672			/* Flush the txcmplq */
   4673			lpfc_sli_cancel_iocbs(phba, &txcmplq,
   4674					      IOSTAT_LOCAL_REJECT,
   4675					      IOERR_SLI_DOWN);
   4676			if (unlikely(pci_channel_offline(phba->pcidev)))
   4677				lpfc_sli4_io_xri_aborted(phba, NULL, 0);
   4678		}
   4679	} else {
   4680		pring = &psli->sli3_ring[LPFC_FCP_RING];
   4681
   4682		spin_lock_irq(&phba->hbalock);
   4683		/* Retrieve everything on txq */
   4684		list_splice_init(&pring->txq, &txq);
   4685		list_for_each_entry_safe(piocb, next_iocb,
   4686					 &pring->txcmplq, list)
   4687			piocb->cmd_flag &= ~LPFC_IO_ON_TXCMPLQ;
   4688		/* Retrieve everything on the txcmplq */
   4689		list_splice_init(&pring->txcmplq, &txcmplq);
   4690		pring->txq_cnt = 0;
   4691		pring->txcmplq_cnt = 0;
   4692		spin_unlock_irq(&phba->hbalock);
   4693
   4694		/* Flush the txq */
   4695		lpfc_sli_cancel_iocbs(phba, &txq, IOSTAT_LOCAL_REJECT,
   4696				      IOERR_SLI_DOWN);
   4697		/* Flush the txcmpq */
   4698		lpfc_sli_cancel_iocbs(phba, &txcmplq, IOSTAT_LOCAL_REJECT,
   4699				      IOERR_SLI_DOWN);
   4700	}
   4701}
   4702
   4703/**
   4704 * lpfc_sli_brdready_s3 - Check for sli3 host ready status
   4705 * @phba: Pointer to HBA context object.
   4706 * @mask: Bit mask to be checked.
   4707 *
   4708 * This function reads the host status register and compares
   4709 * with the provided bit mask to check if HBA completed
   4710 * the restart. This function will wait in a loop for the
   4711 * HBA to complete restart. If the HBA does not restart within
   4712 * 15 iterations, the function will reset the HBA again. The
   4713 * function returns 1 when HBA fail to restart otherwise returns
   4714 * zero.
   4715 **/
   4716static int
   4717lpfc_sli_brdready_s3(struct lpfc_hba *phba, uint32_t mask)
   4718{
   4719	uint32_t status;
   4720	int i = 0;
   4721	int retval = 0;
   4722
   4723	/* Read the HBA Host Status Register */
   4724	if (lpfc_readl(phba->HSregaddr, &status))
   4725		return 1;
   4726
   4727	phba->hba_flag |= HBA_NEEDS_CFG_PORT;
   4728
   4729	/*
   4730	 * Check status register every 100ms for 5 retries, then every
   4731	 * 500ms for 5, then every 2.5 sec for 5, then reset board and
   4732	 * every 2.5 sec for 4.
   4733	 * Break our of the loop if errors occurred during init.
   4734	 */
   4735	while (((status & mask) != mask) &&
   4736	       !(status & HS_FFERM) &&
   4737	       i++ < 20) {
   4738
   4739		if (i <= 5)
   4740			msleep(10);
   4741		else if (i <= 10)
   4742			msleep(500);
   4743		else
   4744			msleep(2500);
   4745
   4746		if (i == 15) {
   4747				/* Do post */
   4748			phba->pport->port_state = LPFC_VPORT_UNKNOWN;
   4749			lpfc_sli_brdrestart(phba);
   4750		}
   4751		/* Read the HBA Host Status Register */
   4752		if (lpfc_readl(phba->HSregaddr, &status)) {
   4753			retval = 1;
   4754			break;
   4755		}
   4756	}
   4757
   4758	/* Check to see if any errors occurred during init */
   4759	if ((status & HS_FFERM) || (i >= 20)) {
   4760		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
   4761				"2751 Adapter failed to restart, "
   4762				"status reg x%x, FW Data: A8 x%x AC x%x\n",
   4763				status,
   4764				readl(phba->MBslimaddr + 0xa8),
   4765				readl(phba->MBslimaddr + 0xac));
   4766		phba->link_state = LPFC_HBA_ERROR;
   4767		retval = 1;
   4768	}
   4769
   4770	return retval;
   4771}
   4772
   4773/**
   4774 * lpfc_sli_brdready_s4 - Check for sli4 host ready status
   4775 * @phba: Pointer to HBA context object.
   4776 * @mask: Bit mask to be checked.
   4777 *
   4778 * This function checks the host status register to check if HBA is
   4779 * ready. This function will wait in a loop for the HBA to be ready
   4780 * If the HBA is not ready , the function will will reset the HBA PCI
   4781 * function again. The function returns 1 when HBA fail to be ready
   4782 * otherwise returns zero.
   4783 **/
   4784static int
   4785lpfc_sli_brdready_s4(struct lpfc_hba *phba, uint32_t mask)
   4786{
   4787	uint32_t status;
   4788	int retval = 0;
   4789
   4790	/* Read the HBA Host Status Register */
   4791	status = lpfc_sli4_post_status_check(phba);
   4792
   4793	if (status) {
   4794		phba->pport->port_state = LPFC_VPORT_UNKNOWN;
   4795		lpfc_sli_brdrestart(phba);
   4796		status = lpfc_sli4_post_status_check(phba);
   4797	}
   4798
   4799	/* Check to see if any errors occurred during init */
   4800	if (status) {
   4801		phba->link_state = LPFC_HBA_ERROR;
   4802		retval = 1;
   4803	} else
   4804		phba->sli4_hba.intr_enable = 0;
   4805
   4806	phba->hba_flag &= ~HBA_SETUP;
   4807	return retval;
   4808}
   4809
   4810/**
   4811 * lpfc_sli_brdready - Wrapper func for checking the hba readyness
   4812 * @phba: Pointer to HBA context object.
   4813 * @mask: Bit mask to be checked.
   4814 *
   4815 * This routine wraps the actual SLI3 or SLI4 hba readyness check routine
   4816 * from the API jump table function pointer from the lpfc_hba struct.
   4817 **/
   4818int
   4819lpfc_sli_brdready(struct lpfc_hba *phba, uint32_t mask)
   4820{
   4821	return phba->lpfc_sli_brdready(phba, mask);
   4822}
   4823
   4824#define BARRIER_TEST_PATTERN (0xdeadbeef)
   4825
   4826/**
   4827 * lpfc_reset_barrier - Make HBA ready for HBA reset
   4828 * @phba: Pointer to HBA context object.
   4829 *
   4830 * This function is called before resetting an HBA. This function is called
   4831 * with hbalock held and requests HBA to quiesce DMAs before a reset.
   4832 **/
   4833void lpfc_reset_barrier(struct lpfc_hba *phba)
   4834{
   4835	uint32_t __iomem *resp_buf;
   4836	uint32_t __iomem *mbox_buf;
   4837	volatile struct MAILBOX_word0 mbox;
   4838	uint32_t hc_copy, ha_copy, resp_data;
   4839	int  i;
   4840	uint8_t hdrtype;
   4841
   4842	lockdep_assert_held(&phba->hbalock);
   4843
   4844	pci_read_config_byte(phba->pcidev, PCI_HEADER_TYPE, &hdrtype);
   4845	if (hdrtype != 0x80 ||
   4846	    (FC_JEDEC_ID(phba->vpd.rev.biuRev) != HELIOS_JEDEC_ID &&
   4847	     FC_JEDEC_ID(phba->vpd.rev.biuRev) != THOR_JEDEC_ID))
   4848		return;
   4849
   4850	/*
   4851	 * Tell the other part of the chip to suspend temporarily all
   4852	 * its DMA activity.
   4853	 */
   4854	resp_buf = phba->MBslimaddr;
   4855
   4856	/* Disable the error attention */
   4857	if (lpfc_readl(phba->HCregaddr, &hc_copy))
   4858		return;
   4859	writel((hc_copy & ~HC_ERINT_ENA), phba->HCregaddr);
   4860	readl(phba->HCregaddr); /* flush */
   4861	phba->link_flag |= LS_IGNORE_ERATT;
   4862
   4863	if (lpfc_readl(phba->HAregaddr, &ha_copy))
   4864		return;
   4865	if (ha_copy & HA_ERATT) {
   4866		/* Clear Chip error bit */
   4867		writel(HA_ERATT, phba->HAregaddr);
   4868		phba->pport->stopped = 1;
   4869	}
   4870
   4871	mbox.word0 = 0;
   4872	mbox.mbxCommand = MBX_KILL_BOARD;
   4873	mbox.mbxOwner = OWN_CHIP;
   4874
   4875	writel(BARRIER_TEST_PATTERN, (resp_buf + 1));
   4876	mbox_buf = phba->MBslimaddr;
   4877	writel(mbox.word0, mbox_buf);
   4878
   4879	for (i = 0; i < 50; i++) {
   4880		if (lpfc_readl((resp_buf + 1), &resp_data))
   4881			return;
   4882		if (resp_data != ~(BARRIER_TEST_PATTERN))
   4883			mdelay(1);
   4884		else
   4885			break;
   4886	}
   4887	resp_data = 0;
   4888	if (lpfc_readl((resp_buf + 1), &resp_data))
   4889		return;
   4890	if (resp_data  != ~(BARRIER_TEST_PATTERN)) {
   4891		if (phba->sli.sli_flag & LPFC_SLI_ACTIVE ||
   4892		    phba->pport->stopped)
   4893			goto restore_hc;
   4894		else
   4895			goto clear_errat;
   4896	}
   4897
   4898	mbox.mbxOwner = OWN_HOST;
   4899	resp_data = 0;
   4900	for (i = 0; i < 500; i++) {
   4901		if (lpfc_readl(resp_buf, &resp_data))
   4902			return;
   4903		if (resp_data != mbox.word0)
   4904			mdelay(1);
   4905		else
   4906			break;
   4907	}
   4908
   4909clear_errat:
   4910
   4911	while (++i < 500) {
   4912		if (lpfc_readl(phba->HAregaddr, &ha_copy))
   4913			return;
   4914		if (!(ha_copy & HA_ERATT))
   4915			mdelay(1);
   4916		else
   4917			break;
   4918	}
   4919
   4920	if (readl(phba->HAregaddr) & HA_ERATT) {
   4921		writel(HA_ERATT, phba->HAregaddr);
   4922		phba->pport->stopped = 1;
   4923	}
   4924
   4925restore_hc:
   4926	phba->link_flag &= ~LS_IGNORE_ERATT;
   4927	writel(hc_copy, phba->HCregaddr);
   4928	readl(phba->HCregaddr); /* flush */
   4929}
   4930
   4931/**
   4932 * lpfc_sli_brdkill - Issue a kill_board mailbox command
   4933 * @phba: Pointer to HBA context object.
   4934 *
   4935 * This function issues a kill_board mailbox command and waits for
   4936 * the error attention interrupt. This function is called for stopping
   4937 * the firmware processing. The caller is not required to hold any
   4938 * locks. This function calls lpfc_hba_down_post function to free
   4939 * any pending commands after the kill. The function will return 1 when it
   4940 * fails to kill the board else will return 0.
   4941 **/
   4942int
   4943lpfc_sli_brdkill(struct lpfc_hba *phba)
   4944{
   4945	struct lpfc_sli *psli;
   4946	LPFC_MBOXQ_t *pmb;
   4947	uint32_t status;
   4948	uint32_t ha_copy;
   4949	int retval;
   4950	int i = 0;
   4951
   4952	psli = &phba->sli;
   4953
   4954	/* Kill HBA */
   4955	lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
   4956			"0329 Kill HBA Data: x%x x%x\n",
   4957			phba->pport->port_state, psli->sli_flag);
   4958
   4959	pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
   4960	if (!pmb)
   4961		return 1;
   4962
   4963	/* Disable the error attention */
   4964	spin_lock_irq(&phba->hbalock);
   4965	if (lpfc_readl(phba->HCregaddr, &status)) {
   4966		spin_unlock_irq(&phba->hbalock);
   4967		mempool_free(pmb, phba->mbox_mem_pool);
   4968		return 1;
   4969	}
   4970	status &= ~HC_ERINT_ENA;
   4971	writel(status, phba->HCregaddr);
   4972	readl(phba->HCregaddr); /* flush */
   4973	phba->link_flag |= LS_IGNORE_ERATT;
   4974	spin_unlock_irq(&phba->hbalock);
   4975
   4976	lpfc_kill_board(phba, pmb);
   4977	pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
   4978	retval = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
   4979
   4980	if (retval != MBX_SUCCESS) {
   4981		if (retval != MBX_BUSY)
   4982			mempool_free(pmb, phba->mbox_mem_pool);
   4983		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
   4984				"2752 KILL_BOARD command failed retval %d\n",
   4985				retval);
   4986		spin_lock_irq(&phba->hbalock);
   4987		phba->link_flag &= ~LS_IGNORE_ERATT;
   4988		spin_unlock_irq(&phba->hbalock);
   4989		return 1;
   4990	}
   4991
   4992	spin_lock_irq(&phba->hbalock);
   4993	psli->sli_flag &= ~LPFC_SLI_ACTIVE;
   4994	spin_unlock_irq(&phba->hbalock);
   4995
   4996	mempool_free(pmb, phba->mbox_mem_pool);
   4997
   4998	/* There is no completion for a KILL_BOARD mbox cmd. Check for an error
   4999	 * attention every 100ms for 3 seconds. If we don't get ERATT after
   5000	 * 3 seconds we still set HBA_ERROR state because the status of the
   5001	 * board is now undefined.
   5002	 */
   5003	if (lpfc_readl(phba->HAregaddr, &ha_copy))
   5004		return 1;
   5005	while ((i++ < 30) && !(ha_copy & HA_ERATT)) {
   5006		mdelay(100);
   5007		if (lpfc_readl(phba->HAregaddr, &ha_copy))
   5008			return 1;
   5009	}
   5010
   5011	del_timer_sync(&psli->mbox_tmo);
   5012	if (ha_copy & HA_ERATT) {
   5013		writel(HA_ERATT, phba->HAregaddr);
   5014		phba->pport->stopped = 1;
   5015	}
   5016	spin_lock_irq(&phba->hbalock);
   5017	psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
   5018	psli->mbox_active = NULL;
   5019	phba->link_flag &= ~LS_IGNORE_ERATT;
   5020	spin_unlock_irq(&phba->hbalock);
   5021
   5022	lpfc_hba_down_post(phba);
   5023	phba->link_state = LPFC_HBA_ERROR;
   5024
   5025	return ha_copy & HA_ERATT ? 0 : 1;
   5026}
   5027
   5028/**
   5029 * lpfc_sli_brdreset - Reset a sli-2 or sli-3 HBA
   5030 * @phba: Pointer to HBA context object.
   5031 *
   5032 * This function resets the HBA by writing HC_INITFF to the control
   5033 * register. After the HBA resets, this function resets all the iocb ring
   5034 * indices. This function disables PCI layer parity checking during
   5035 * the reset.
   5036 * This function returns 0 always.
   5037 * The caller is not required to hold any locks.
   5038 **/
   5039int
   5040lpfc_sli_brdreset(struct lpfc_hba *phba)
   5041{
   5042	struct lpfc_sli *psli;
   5043	struct lpfc_sli_ring *pring;
   5044	uint16_t cfg_value;
   5045	int i;
   5046
   5047	psli = &phba->sli;
   5048
   5049	/* Reset HBA */
   5050	lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
   5051			"0325 Reset HBA Data: x%x x%x\n",
   5052			(phba->pport) ? phba->pport->port_state : 0,
   5053			psli->sli_flag);
   5054
   5055	/* perform board reset */
   5056	phba->fc_eventTag = 0;
   5057	phba->link_events = 0;
   5058	phba->hba_flag |= HBA_NEEDS_CFG_PORT;
   5059	if (phba->pport) {
   5060		phba->pport->fc_myDID = 0;
   5061		phba->pport->fc_prevDID = 0;
   5062	}
   5063
   5064	/* Turn off parity checking and serr during the physical reset */
   5065	if (pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value))
   5066		return -EIO;
   5067
   5068	pci_write_config_word(phba->pcidev, PCI_COMMAND,
   5069			      (cfg_value &
   5070			       ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
   5071
   5072	psli->sli_flag &= ~(LPFC_SLI_ACTIVE | LPFC_PROCESS_LA);
   5073
   5074	/* Now toggle INITFF bit in the Host Control Register */
   5075	writel(HC_INITFF, phba->HCregaddr);
   5076	mdelay(1);
   5077	readl(phba->HCregaddr); /* flush */
   5078	writel(0, phba->HCregaddr);
   5079	readl(phba->HCregaddr); /* flush */
   5080
   5081	/* Restore PCI cmd register */
   5082	pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value);
   5083
   5084	/* Initialize relevant SLI info */
   5085	for (i = 0; i < psli->num_rings; i++) {
   5086		pring = &psli->sli3_ring[i];
   5087		pring->flag = 0;
   5088		pring->sli.sli3.rspidx = 0;
   5089		pring->sli.sli3.next_cmdidx  = 0;
   5090		pring->sli.sli3.local_getidx = 0;
   5091		pring->sli.sli3.cmdidx = 0;
   5092		pring->missbufcnt = 0;
   5093	}
   5094
   5095	phba->link_state = LPFC_WARM_START;
   5096	return 0;
   5097}
   5098
   5099/**
   5100 * lpfc_sli4_brdreset - Reset a sli-4 HBA
   5101 * @phba: Pointer to HBA context object.
   5102 *
   5103 * This function resets a SLI4 HBA. This function disables PCI layer parity
   5104 * checking during resets the device. The caller is not required to hold
   5105 * any locks.
   5106 *
   5107 * This function returns 0 on success else returns negative error code.
   5108 **/
   5109int
   5110lpfc_sli4_brdreset(struct lpfc_hba *phba)
   5111{
   5112	struct lpfc_sli *psli = &phba->sli;
   5113	uint16_t cfg_value;
   5114	int rc = 0;
   5115
   5116	/* Reset HBA */
   5117	lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
   5118			"0295 Reset HBA Data: x%x x%x x%x\n",
   5119			phba->pport->port_state, psli->sli_flag,
   5120			phba->hba_flag);
   5121
   5122	/* perform board reset */
   5123	phba->fc_eventTag = 0;
   5124	phba->link_events = 0;
   5125	phba->pport->fc_myDID = 0;
   5126	phba->pport->fc_prevDID = 0;
   5127	phba->hba_flag &= ~HBA_SETUP;
   5128
   5129	spin_lock_irq(&phba->hbalock);
   5130	psli->sli_flag &= ~(LPFC_PROCESS_LA);
   5131	phba->fcf.fcf_flag = 0;
   5132	spin_unlock_irq(&phba->hbalock);
   5133
   5134	/* Now physically reset the device */
   5135	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
   5136			"0389 Performing PCI function reset!\n");
   5137
   5138	/* Turn off parity checking and serr during the physical reset */
   5139	if (pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value)) {
   5140		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
   5141				"3205 PCI read Config failed\n");
   5142		return -EIO;
   5143	}
   5144
   5145	pci_write_config_word(phba->pcidev, PCI_COMMAND, (cfg_value &
   5146			      ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
   5147
   5148	/* Perform FCoE PCI function reset before freeing queue memory */
   5149	rc = lpfc_pci_function_reset(phba);
   5150
   5151	/* Restore PCI cmd register */
   5152	pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value);
   5153
   5154	return rc;
   5155}
   5156
   5157/**
   5158 * lpfc_sli_brdrestart_s3 - Restart a sli-3 hba
   5159 * @phba: Pointer to HBA context object.
   5160 *
   5161 * This function is called in the SLI initialization code path to
   5162 * restart the HBA. The caller is not required to hold any lock.
   5163 * This function writes MBX_RESTART mailbox command to the SLIM and
   5164 * resets the HBA. At the end of the function, it calls lpfc_hba_down_post
   5165 * function to free any pending commands. The function enables
   5166 * POST only during the first initialization. The function returns zero.
   5167 * The function does not guarantee completion of MBX_RESTART mailbox
   5168 * command before the return of this function.
   5169 **/
   5170static int
   5171lpfc_sli_brdrestart_s3(struct lpfc_hba *phba)
   5172{
   5173	volatile struct MAILBOX_word0 mb;
   5174	struct lpfc_sli *psli;
   5175	void __iomem *to_slim;
   5176	uint32_t hba_aer_enabled;
   5177
   5178	spin_lock_irq(&phba->hbalock);
   5179
   5180	/* Take PCIe device Advanced Error Reporting (AER) state */
   5181	hba_aer_enabled = phba->hba_flag & HBA_AER_ENABLED;
   5182
   5183	psli = &phba->sli;
   5184
   5185	/* Restart HBA */
   5186	lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
   5187			"0337 Restart HBA Data: x%x x%x\n",
   5188			(phba->pport) ? phba->pport->port_state : 0,
   5189			psli->sli_flag);
   5190
   5191	mb.word0 = 0;
   5192	mb.mbxCommand = MBX_RESTART;
   5193	mb.mbxHc = 1;
   5194
   5195	lpfc_reset_barrier(phba);
   5196
   5197	to_slim = phba->MBslimaddr;
   5198	writel(mb.word0, to_slim);
   5199	readl(to_slim); /* flush */
   5200
   5201	/* Only skip post after fc_ffinit is completed */
   5202	if (phba->pport && phba->pport->port_state)
   5203		mb.word0 = 1;	/* This is really setting up word1 */
   5204	else
   5205		mb.word0 = 0;	/* This is really setting up word1 */
   5206	to_slim = phba->MBslimaddr + sizeof (uint32_t);
   5207	writel(mb.word0, to_slim);
   5208	readl(to_slim); /* flush */
   5209
   5210	lpfc_sli_brdreset(phba);
   5211	if (phba->pport)
   5212		phba->pport->stopped = 0;
   5213	phba->link_state = LPFC_INIT_START;
   5214	phba->hba_flag = 0;
   5215	spin_unlock_irq(&phba->hbalock);
   5216
   5217	memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
   5218	psli->stats_start = ktime_get_seconds();
   5219
   5220	/* Give the INITFF and Post time to settle. */
   5221	mdelay(100);
   5222
   5223	/* Reset HBA AER if it was enabled, note hba_flag was reset above */
   5224	if (hba_aer_enabled)
   5225		pci_disable_pcie_error_reporting(phba->pcidev);
   5226
   5227	lpfc_hba_down_post(phba);
   5228
   5229	return 0;
   5230}
   5231
   5232/**
   5233 * lpfc_sli_brdrestart_s4 - Restart the sli-4 hba
   5234 * @phba: Pointer to HBA context object.
   5235 *
   5236 * This function is called in the SLI initialization code path to restart
   5237 * a SLI4 HBA. The caller is not required to hold any lock.
   5238 * At the end of the function, it calls lpfc_hba_down_post function to
   5239 * free any pending commands.
   5240 **/
   5241static int
   5242lpfc_sli_brdrestart_s4(struct lpfc_hba *phba)
   5243{
   5244	struct lpfc_sli *psli = &phba->sli;
   5245	uint32_t hba_aer_enabled;
   5246	int rc;
   5247
   5248	/* Restart HBA */
   5249	lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
   5250			"0296 Restart HBA Data: x%x x%x\n",
   5251			phba->pport->port_state, psli->sli_flag);
   5252
   5253	/* Take PCIe device Advanced Error Reporting (AER) state */
   5254	hba_aer_enabled = phba->hba_flag & HBA_AER_ENABLED;
   5255
   5256	rc = lpfc_sli4_brdreset(phba);
   5257	if (rc) {
   5258		phba->link_state = LPFC_HBA_ERROR;
   5259		goto hba_down_queue;
   5260	}
   5261
   5262	spin_lock_irq(&phba->hbalock);
   5263	phba->pport->stopped = 0;
   5264	phba->link_state = LPFC_INIT_START;
   5265	phba->hba_flag = 0;
   5266	phba->sli4_hba.fawwpn_flag = 0;
   5267	spin_unlock_irq(&phba->hbalock);
   5268
   5269	memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
   5270	psli->stats_start = ktime_get_seconds();
   5271
   5272	/* Reset HBA AER if it was enabled, note hba_flag was reset above */
   5273	if (hba_aer_enabled)
   5274		pci_disable_pcie_error_reporting(phba->pcidev);
   5275
   5276hba_down_queue:
   5277	lpfc_hba_down_post(phba);
   5278	lpfc_sli4_queue_destroy(phba);
   5279
   5280	return rc;
   5281}
   5282
   5283/**
   5284 * lpfc_sli_brdrestart - Wrapper func for restarting hba
   5285 * @phba: Pointer to HBA context object.
   5286 *
   5287 * This routine wraps the actual SLI3 or SLI4 hba restart routine from the
   5288 * API jump table function pointer from the lpfc_hba struct.
   5289**/
   5290int
   5291lpfc_sli_brdrestart(struct lpfc_hba *phba)
   5292{
   5293	return phba->lpfc_sli_brdrestart(phba);
   5294}
   5295
   5296/**
   5297 * lpfc_sli_chipset_init - Wait for the restart of the HBA after a restart
   5298 * @phba: Pointer to HBA context object.
   5299 *
   5300 * This function is called after a HBA restart to wait for successful
   5301 * restart of the HBA. Successful restart of the HBA is indicated by
   5302 * HS_FFRDY and HS_MBRDY bits. If the HBA fails to restart even after 15
   5303 * iteration, the function will restart the HBA again. The function returns
   5304 * zero if HBA successfully restarted else returns negative error code.
   5305 **/
   5306int
   5307lpfc_sli_chipset_init(struct lpfc_hba *phba)
   5308{
   5309	uint32_t status, i = 0;
   5310
   5311	/* Read the HBA Host Status Register */
   5312	if (lpfc_readl(phba->HSregaddr, &status))
   5313		return -EIO;
   5314
   5315	/* Check status register to see what current state is */
   5316	i = 0;
   5317	while ((status & (HS_FFRDY | HS_MBRDY)) != (HS_FFRDY | HS_MBRDY)) {
   5318
   5319		/* Check every 10ms for 10 retries, then every 100ms for 90
   5320		 * retries, then every 1 sec for 50 retires for a total of
   5321		 * ~60 seconds before reset the board again and check every
   5322		 * 1 sec for 50 retries. The up to 60 seconds before the
   5323		 * board ready is required by the Falcon FIPS zeroization
   5324		 * complete, and any reset the board in between shall cause
   5325		 * restart of zeroization, further delay the board ready.
   5326		 */
   5327		if (i++ >= 200) {
   5328			/* Adapter failed to init, timeout, status reg
   5329			   <status> */
   5330			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
   5331					"0436 Adapter failed to init, "
   5332					"timeout, status reg x%x, "
   5333					"FW Data: A8 x%x AC x%x\n", status,
   5334					readl(phba->MBslimaddr + 0xa8),
   5335					readl(phba->MBslimaddr + 0xac));
   5336			phba->link_state = LPFC_HBA_ERROR;
   5337			return -ETIMEDOUT;
   5338		}
   5339
   5340		/* Check to see if any errors occurred during init */
   5341		if (status & HS_FFERM) {
   5342			/* ERROR: During chipset initialization */
   5343			/* Adapter failed to init, chipset, status reg
   5344			   <status> */
   5345			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
   5346					"0437 Adapter failed to init, "
   5347					"chipset, status reg x%x, "
   5348					"FW Data: A8 x%x AC x%x\n", status,
   5349					readl(phba->MBslimaddr + 0xa8),
   5350					readl(phba->MBslimaddr + 0xac));
   5351			phba->link_state = LPFC_HBA_ERROR;
   5352			return -EIO;
   5353		}
   5354
   5355		if (i <= 10)
   5356			msleep(10);
   5357		else if (i <= 100)
   5358			msleep(100);
   5359		else
   5360			msleep(1000);
   5361
   5362		if (i == 150) {
   5363			/* Do post */
   5364			phba->pport->port_state = LPFC_VPORT_UNKNOWN;
   5365			lpfc_sli_brdrestart(phba);
   5366		}
   5367		/* Read the HBA Host Status Register */
   5368		if (lpfc_readl(phba->HSregaddr, &status))
   5369			return -EIO;
   5370	}
   5371
   5372	/* Check to see if any errors occurred during init */
   5373	if (status & HS_FFERM) {
   5374		/* ERROR: During chipset initialization */
   5375		/* Adapter failed to init, chipset, status reg <status> */
   5376		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
   5377				"0438 Adapter failed to init, chipset, "
   5378				"status reg x%x, "
   5379				"FW Data: A8 x%x AC x%x\n", status,
   5380				readl(phba->MBslimaddr + 0xa8),
   5381				readl(phba->MBslimaddr + 0xac));
   5382		phba->link_state = LPFC_HBA_ERROR;
   5383		return -EIO;
   5384	}
   5385
   5386	phba->hba_flag |= HBA_NEEDS_CFG_PORT;
   5387
   5388	/* Clear all interrupt enable conditions */
   5389	writel(0, phba->HCregaddr);
   5390	readl(phba->HCregaddr); /* flush */
   5391
   5392	/* setup host attn register */
   5393	writel(0xffffffff, phba->HAregaddr);
   5394	readl(phba->HAregaddr); /* flush */
   5395	return 0;
   5396}
   5397
   5398/**
   5399 * lpfc_sli_hbq_count - Get the number of HBQs to be configured
   5400 *
   5401 * This function calculates and returns the number of HBQs required to be
   5402 * configured.
   5403 **/
   5404int
   5405lpfc_sli_hbq_count(void)
   5406{
   5407	return ARRAY_SIZE(lpfc_hbq_defs);
   5408}
   5409
   5410/**
   5411 * lpfc_sli_hbq_entry_count - Calculate total number of hbq entries
   5412 *
   5413 * This function adds the number of hbq entries in every HBQ to get
   5414 * the total number of hbq entries required for the HBA and returns
   5415 * the total count.
   5416 **/
   5417static int
   5418lpfc_sli_hbq_entry_count(void)
   5419{
   5420	int  hbq_count = lpfc_sli_hbq_count();
   5421	int  count = 0;
   5422	int  i;
   5423
   5424	for (i = 0; i < hbq_count; ++i)
   5425		count += lpfc_hbq_defs[i]->entry_count;
   5426	return count;
   5427}
   5428
   5429/**
   5430 * lpfc_sli_hbq_size - Calculate memory required for all hbq entries
   5431 *
   5432 * This function calculates amount of memory required for all hbq entries
   5433 * to be configured and returns the total memory required.
   5434 **/
   5435int
   5436lpfc_sli_hbq_size(void)
   5437{
   5438	return lpfc_sli_hbq_entry_count() * sizeof(struct lpfc_hbq_entry);
   5439}
   5440
   5441/**
   5442 * lpfc_sli_hbq_setup - configure and initialize HBQs
   5443 * @phba: Pointer to HBA context object.
   5444 *
   5445 * This function is called during the SLI initialization to configure
   5446 * all the HBQs and post buffers to the HBQ. The caller is not
   5447 * required to hold any locks. This function will return zero if successful
   5448 * else it will return negative error code.
   5449 **/
   5450static int
   5451lpfc_sli_hbq_setup(struct lpfc_hba *phba)
   5452{
   5453	int  hbq_count = lpfc_sli_hbq_count();
   5454	LPFC_MBOXQ_t *pmb;
   5455	MAILBOX_t *pmbox;
   5456	uint32_t hbqno;
   5457	uint32_t hbq_entry_index;
   5458
   5459				/* Get a Mailbox buffer to setup mailbox
   5460				 * commands for HBA initialization
   5461				 */
   5462	pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
   5463
   5464	if (!pmb)
   5465		return -ENOMEM;
   5466
   5467	pmbox = &pmb->u.mb;
   5468
   5469	/* Initialize the struct lpfc_sli_hbq structure for each hbq */
   5470	phba->link_state = LPFC_INIT_MBX_CMDS;
   5471	phba->hbq_in_use = 1;
   5472
   5473	hbq_entry_index = 0;
   5474	for (hbqno = 0; hbqno < hbq_count; ++hbqno) {
   5475		phba->hbqs[hbqno].next_hbqPutIdx = 0;
   5476		phba->hbqs[hbqno].hbqPutIdx      = 0;
   5477		phba->hbqs[hbqno].local_hbqGetIdx   = 0;
   5478		phba->hbqs[hbqno].entry_count =
   5479			lpfc_hbq_defs[hbqno]->entry_count;
   5480		lpfc_config_hbq(phba, hbqno, lpfc_hbq_defs[hbqno],
   5481			hbq_entry_index, pmb);
   5482		hbq_entry_index += phba->hbqs[hbqno].entry_count;
   5483
   5484		if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
   5485			/* Adapter failed to init, mbxCmd <cmd> CFG_RING,
   5486			   mbxStatus <status>, ring <num> */
   5487
   5488			lpfc_printf_log(phba, KERN_ERR,
   5489					LOG_SLI | LOG_VPORT,
   5490					"1805 Adapter failed to init. "
   5491					"Data: x%x x%x x%x\n",
   5492					pmbox->mbxCommand,
   5493					pmbox->mbxStatus, hbqno);
   5494
   5495			phba->link_state = LPFC_HBA_ERROR;
   5496			mempool_free(pmb, phba->mbox_mem_pool);
   5497			return -ENXIO;
   5498		}
   5499	}
   5500	phba->hbq_count = hbq_count;
   5501
   5502	mempool_free(pmb, phba->mbox_mem_pool);
   5503
   5504	/* Initially populate or replenish the HBQs */
   5505	for (hbqno = 0; hbqno < hbq_count; ++hbqno)
   5506		lpfc_sli_hbqbuf_init_hbqs(phba, hbqno);
   5507	return 0;
   5508}
   5509
   5510/**
   5511 * lpfc_sli4_rb_setup - Initialize and post RBs to HBA
   5512 * @phba: Pointer to HBA context object.
   5513 *
   5514 * This function is called during the SLI initialization to configure
   5515 * all the HBQs and post buffers to the HBQ. The caller is not
   5516 * required to hold any locks. This function will return zero if successful
   5517 * else it will return negative error code.
   5518 **/
   5519static int
   5520lpfc_sli4_rb_setup(struct lpfc_hba *phba)
   5521{
   5522	phba->hbq_in_use = 1;
   5523	/**
   5524	 * Specific case when the MDS diagnostics is enabled and supported.
   5525	 * The receive buffer count is truncated to manage the incoming
   5526	 * traffic.
   5527	 **/
   5528	if (phba->cfg_enable_mds_diags && phba->mds_diags_support)
   5529		phba->hbqs[LPFC_ELS_HBQ].entry_count =
   5530			lpfc_hbq_defs[LPFC_ELS_HBQ]->entry_count >> 1;
   5531	else
   5532		phba->hbqs[LPFC_ELS_HBQ].entry_count =
   5533			lpfc_hbq_defs[LPFC_ELS_HBQ]->entry_count;
   5534	phba->hbq_count = 1;
   5535	lpfc_sli_hbqbuf_init_hbqs(phba, LPFC_ELS_HBQ);
   5536	/* Initially populate or replenish the HBQs */
   5537	return 0;
   5538}
   5539
   5540/**
   5541 * lpfc_sli_config_port - Issue config port mailbox command
   5542 * @phba: Pointer to HBA context object.
   5543 * @sli_mode: sli mode - 2/3
   5544 *
   5545 * This function is called by the sli initialization code path
   5546 * to issue config_port mailbox command. This function restarts the
   5547 * HBA firmware and issues a config_port mailbox command to configure
   5548 * the SLI interface in the sli mode specified by sli_mode
   5549 * variable. The caller is not required to hold any locks.
   5550 * The function returns 0 if successful, else returns negative error
   5551 * code.
   5552 **/
   5553int
   5554lpfc_sli_config_port(struct lpfc_hba *phba, int sli_mode)
   5555{
   5556	LPFC_MBOXQ_t *pmb;
   5557	uint32_t resetcount = 0, rc = 0, done = 0;
   5558
   5559	pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
   5560	if (!pmb) {
   5561		phba->link_state = LPFC_HBA_ERROR;
   5562		return -ENOMEM;
   5563	}
   5564
   5565	phba->sli_rev = sli_mode;
   5566	while (resetcount < 2 && !done) {
   5567		spin_lock_irq(&phba->hbalock);
   5568		phba->sli.sli_flag |= LPFC_SLI_MBOX_ACTIVE;
   5569		spin_unlock_irq(&phba->hbalock);
   5570		phba->pport->port_state = LPFC_VPORT_UNKNOWN;
   5571		lpfc_sli_brdrestart(phba);
   5572		rc = lpfc_sli_chipset_init(phba);
   5573		if (rc)
   5574			break;
   5575
   5576		spin_lock_irq(&phba->hbalock);
   5577		phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
   5578		spin_unlock_irq(&phba->hbalock);
   5579		resetcount++;
   5580
   5581		/* Call pre CONFIG_PORT mailbox command initialization.  A
   5582		 * value of 0 means the call was successful.  Any other
   5583		 * nonzero value is a failure, but if ERESTART is returned,
   5584		 * the driver may reset the HBA and try again.
   5585		 */
   5586		rc = lpfc_config_port_prep(phba);
   5587		if (rc == -ERESTART) {
   5588			phba->link_state = LPFC_LINK_UNKNOWN;
   5589			continue;
   5590		} else if (rc)
   5591			break;
   5592
   5593		phba->link_state = LPFC_INIT_MBX_CMDS;
   5594		lpfc_config_port(phba, pmb);
   5595		rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
   5596		phba->sli3_options &= ~(LPFC_SLI3_NPIV_ENABLED |
   5597					LPFC_SLI3_HBQ_ENABLED |
   5598					LPFC_SLI3_CRP_ENABLED |
   5599					LPFC_SLI3_DSS_ENABLED);
   5600		if (rc != MBX_SUCCESS) {
   5601			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
   5602				"0442 Adapter failed to init, mbxCmd x%x "
   5603				"CONFIG_PORT, mbxStatus x%x Data: x%x\n",
   5604				pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus, 0);
   5605			spin_lock_irq(&phba->hbalock);
   5606			phba->sli.sli_flag &= ~LPFC_SLI_ACTIVE;
   5607			spin_unlock_irq(&phba->hbalock);
   5608			rc = -ENXIO;
   5609		} else {
   5610			/* Allow asynchronous mailbox command to go through */
   5611			spin_lock_irq(&phba->hbalock);
   5612			phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
   5613			spin_unlock_irq(&phba->hbalock);
   5614			done = 1;
   5615
   5616			if ((pmb->u.mb.un.varCfgPort.casabt == 1) &&
   5617			    (pmb->u.mb.un.varCfgPort.gasabt == 0))
   5618				lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
   5619					"3110 Port did not grant ASABT\n");
   5620		}
   5621	}
   5622	if (!done) {
   5623		rc = -EINVAL;
   5624		goto do_prep_failed;
   5625	}
   5626	if (pmb->u.mb.un.varCfgPort.sli_mode == 3) {
   5627		if (!pmb->u.mb.un.varCfgPort.cMA) {
   5628			rc = -ENXIO;
   5629			goto do_prep_failed;
   5630		}
   5631		if (phba->max_vpi && pmb->u.mb.un.varCfgPort.gmv) {
   5632			phba->sli3_options |= LPFC_SLI3_NPIV_ENABLED;
   5633			phba->max_vpi = pmb->u.mb.un.varCfgPort.max_vpi;
   5634			phba->max_vports = (phba->max_vpi > phba->max_vports) ?
   5635				phba->max_vpi : phba->max_vports;
   5636
   5637		} else
   5638			phba->max_vpi = 0;
   5639		if (pmb->u.mb.un.varCfgPort.gerbm)
   5640			phba->sli3_options |= LPFC_SLI3_HBQ_ENABLED;
   5641		if (pmb->u.mb.un.varCfgPort.gcrp)
   5642			phba->sli3_options |= LPFC_SLI3_CRP_ENABLED;
   5643
   5644		phba->hbq_get = phba->mbox->us.s3_pgp.hbq_get;
   5645		phba->port_gp = phba->mbox->us.s3_pgp.port;
   5646
   5647		if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) {
   5648			if (pmb->u.mb.un.varCfgPort.gbg == 0) {
   5649				phba->cfg_enable_bg = 0;
   5650				phba->sli3_options &= ~LPFC_SLI3_BG_ENABLED;
   5651				lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
   5652						"0443 Adapter did not grant "
   5653						"BlockGuard\n");
   5654			}
   5655		}
   5656	} else {
   5657		phba->hbq_get = NULL;
   5658		phba->port_gp = phba->mbox->us.s2.port;
   5659		phba->max_vpi = 0;
   5660	}
   5661do_prep_failed:
   5662	mempool_free(pmb, phba->mbox_mem_pool);
   5663	return rc;
   5664}
   5665
   5666
   5667/**
   5668 * lpfc_sli_hba_setup - SLI initialization function
   5669 * @phba: Pointer to HBA context object.
   5670 *
   5671 * This function is the main SLI initialization function. This function
   5672 * is called by the HBA initialization code, HBA reset code and HBA
   5673 * error attention handler code. Caller is not required to hold any
   5674 * locks. This function issues config_port mailbox command to configure
   5675 * the SLI, setup iocb rings and HBQ rings. In the end the function
   5676 * calls the config_port_post function to issue init_link mailbox
   5677 * command and to start the discovery. The function will return zero
   5678 * if successful, else it will return negative error code.
   5679 **/
   5680int
   5681lpfc_sli_hba_setup(struct lpfc_hba *phba)
   5682{
   5683	uint32_t rc;
   5684	int  i;
   5685	int longs;
   5686
   5687	/* Enable ISR already does config_port because of config_msi mbx */
   5688	if (phba->hba_flag & HBA_NEEDS_CFG_PORT) {
   5689		rc = lpfc_sli_config_port(phba, LPFC_SLI_REV3);
   5690		if (rc)
   5691			return -EIO;
   5692		phba->hba_flag &= ~HBA_NEEDS_CFG_PORT;
   5693	}
   5694	phba->fcp_embed_io = 0;	/* SLI4 FC support only */
   5695
   5696	/* Enable PCIe device Advanced Error Reporting (AER) if configured */
   5697	if (phba->cfg_aer_support == 1 && !(phba->hba_flag & HBA_AER_ENABLED)) {
   5698		rc = pci_enable_pcie_error_reporting(phba->pcidev);
   5699		if (!rc) {
   5700			lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
   5701					"2709 This device supports "
   5702					"Advanced Error Reporting (AER)\n");
   5703			spin_lock_irq(&phba->hbalock);
   5704			phba->hba_flag |= HBA_AER_ENABLED;
   5705			spin_unlock_irq(&phba->hbalock);
   5706		} else {
   5707			lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
   5708					"2708 This device does not support "
   5709					"Advanced Error Reporting (AER): %d\n",
   5710					rc);
   5711			phba->cfg_aer_support = 0;
   5712		}
   5713	}
   5714
   5715	if (phba->sli_rev == 3) {
   5716		phba->iocb_cmd_size = SLI3_IOCB_CMD_SIZE;
   5717		phba->iocb_rsp_size = SLI3_IOCB_RSP_SIZE;
   5718	} else {
   5719		phba->iocb_cmd_size = SLI2_IOCB_CMD_SIZE;
   5720		phba->iocb_rsp_size = SLI2_IOCB_RSP_SIZE;
   5721		phba->sli3_options = 0;
   5722	}
   5723
   5724	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
   5725			"0444 Firmware in SLI %x mode. Max_vpi %d\n",
   5726			phba->sli_rev, phba->max_vpi);
   5727	rc = lpfc_sli_ring_map(phba);
   5728
   5729	if (rc)
   5730		goto lpfc_sli_hba_setup_error;
   5731
   5732	/* Initialize VPIs. */
   5733	if (phba->sli_rev == LPFC_SLI_REV3) {
   5734		/*
   5735		 * The VPI bitmask and physical ID array are allocated
   5736		 * and initialized once only - at driver load.  A port
   5737		 * reset doesn't need to reinitialize this memory.
   5738		 */
   5739		if ((phba->vpi_bmask == NULL) && (phba->vpi_ids == NULL)) {
   5740			longs = (phba->max_vpi + BITS_PER_LONG) / BITS_PER_LONG;
   5741			phba->vpi_bmask = kcalloc(longs,
   5742						  sizeof(unsigned long),
   5743						  GFP_KERNEL);
   5744			if (!phba->vpi_bmask) {
   5745				rc = -ENOMEM;
   5746				goto lpfc_sli_hba_setup_error;
   5747			}
   5748
   5749			phba->vpi_ids = kcalloc(phba->max_vpi + 1,
   5750						sizeof(uint16_t),
   5751						GFP_KERNEL);
   5752			if (!phba->vpi_ids) {
   5753				kfree(phba->vpi_bmask);
   5754				rc = -ENOMEM;
   5755				goto lpfc_sli_hba_setup_error;
   5756			}
   5757			for (i = 0; i < phba->max_vpi; i++)
   5758				phba->vpi_ids[i] = i;
   5759		}
   5760	}
   5761
   5762	/* Init HBQs */
   5763	if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
   5764		rc = lpfc_sli_hbq_setup(phba);
   5765		if (rc)
   5766			goto lpfc_sli_hba_setup_error;
   5767	}
   5768	spin_lock_irq(&phba->hbalock);
   5769	phba->sli.sli_flag |= LPFC_PROCESS_LA;
   5770	spin_unlock_irq(&phba->hbalock);
   5771
   5772	rc = lpfc_config_port_post(phba);
   5773	if (rc)
   5774		goto lpfc_sli_hba_setup_error;
   5775
   5776	return rc;
   5777
   5778lpfc_sli_hba_setup_error:
   5779	phba->link_state = LPFC_HBA_ERROR;
   5780	lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
   5781			"0445 Firmware initialization failed\n");
   5782	return rc;
   5783}
   5784
   5785/**
   5786 * lpfc_sli4_read_fcoe_params - Read fcoe params from conf region
   5787 * @phba: Pointer to HBA context object.
   5788 *
   5789 * This function issue a dump mailbox command to read config region
   5790 * 23 and parse the records in the region and populate driver
   5791 * data structure.
   5792 **/
   5793static int
   5794lpfc_sli4_read_fcoe_params(struct lpfc_hba *phba)
   5795{
   5796	LPFC_MBOXQ_t *mboxq;
   5797	struct lpfc_dmabuf *mp;
   5798	struct lpfc_mqe *mqe;
   5799	uint32_t data_length;
   5800	int rc;
   5801
   5802	/* Program the default value of vlan_id and fc_map */
   5803	phba->valid_vlan = 0;
   5804	phba->fc_map[0] = LPFC_FCOE_FCF_MAP0;
   5805	phba->fc_map[1] = LPFC_FCOE_FCF_MAP1;
   5806	phba->fc_map[2] = LPFC_FCOE_FCF_MAP2;
   5807
   5808	mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
   5809	if (!mboxq)
   5810		return -ENOMEM;
   5811
   5812	mqe = &mboxq->u.mqe;
   5813	if (lpfc_sli4_dump_cfg_rg23(phba, mboxq)) {
   5814		rc = -ENOMEM;
   5815		goto out_free_mboxq;
   5816	}
   5817
   5818	mp = (struct lpfc_dmabuf *)mboxq->ctx_buf;
   5819	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
   5820
   5821	lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
   5822			"(%d):2571 Mailbox cmd x%x Status x%x "
   5823			"Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x "
   5824			"x%x x%x x%x x%x x%x x%x x%x x%x x%x "
   5825			"CQ: x%x x%x x%x x%x\n",
   5826			mboxq->vport ? mboxq->vport->vpi : 0,
   5827			bf_get(lpfc_mqe_command, mqe),
   5828			bf_get(lpfc_mqe_status, mqe),
   5829			mqe->un.mb_words[0], mqe->un.mb_words[1],
   5830			mqe->un.mb_words[2], mqe->un.mb_words[3],
   5831			mqe->un.mb_words[4], mqe->un.mb_words[5],
   5832			mqe->un.mb_words[6], mqe->un.mb_words[7],
   5833			mqe->un.mb_words[8], mqe->un.mb_words[9],
   5834			mqe->un.mb_words[10], mqe->un.mb_words[11],
   5835			mqe->un.mb_words[12], mqe->un.mb_words[13],
   5836			mqe->un.mb_words[14], mqe->un.mb_words[15],
   5837			mqe->un.mb_words[16], mqe->un.mb_words[50],
   5838			mboxq->mcqe.word0,
   5839			mboxq->mcqe.mcqe_tag0, 	mboxq->mcqe.mcqe_tag1,
   5840			mboxq->mcqe.trailer);
   5841
   5842	if (rc) {
   5843		rc = -EIO;
   5844		goto out_free_mboxq;
   5845	}
   5846	data_length = mqe->un.mb_words[5];
   5847	if (data_length > DMP_RGN23_SIZE) {
   5848		rc = -EIO;
   5849		goto out_free_mboxq;
   5850	}
   5851
   5852	lpfc_parse_fcoe_conf(phba, mp->virt, data_length);
   5853	rc = 0;
   5854
   5855out_free_mboxq:
   5856	lpfc_mbox_rsrc_cleanup(phba, mboxq, MBOX_THD_UNLOCKED);
   5857	return rc;
   5858}
   5859
   5860/**
   5861 * lpfc_sli4_read_rev - Issue READ_REV and collect vpd data
   5862 * @phba: pointer to lpfc hba data structure.
   5863 * @mboxq: pointer to the LPFC_MBOXQ_t structure.
   5864 * @vpd: pointer to the memory to hold resulting port vpd data.
   5865 * @vpd_size: On input, the number of bytes allocated to @vpd.
   5866 *	      On output, the number of data bytes in @vpd.
   5867 *
   5868 * This routine executes a READ_REV SLI4 mailbox command.  In
   5869 * addition, this routine gets the port vpd data.
   5870 *
   5871 * Return codes
   5872 * 	0 - successful
   5873 * 	-ENOMEM - could not allocated memory.
   5874 **/
   5875static int
   5876lpfc_sli4_read_rev(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
   5877		    uint8_t *vpd, uint32_t *vpd_size)
   5878{
   5879	int rc = 0;
   5880	uint32_t dma_size;
   5881	struct lpfc_dmabuf *dmabuf;
   5882	struct lpfc_mqe *mqe;
   5883
   5884	dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
   5885	if (!dmabuf)
   5886		return -ENOMEM;
   5887
   5888	/*
   5889	 * Get a DMA buffer for the vpd data resulting from the READ_REV
   5890	 * mailbox command.
   5891	 */
   5892	dma_size = *vpd_size;
   5893	dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, dma_size,
   5894					  &dmabuf->phys, GFP_KERNEL);
   5895	if (!dmabuf->virt) {
   5896		kfree(dmabuf);
   5897		return -ENOMEM;
   5898	}
   5899
   5900	/*
   5901	 * The SLI4 implementation of READ_REV conflicts at word1,
   5902	 * bits 31:16 and SLI4 adds vpd functionality not present
   5903	 * in SLI3.  This code corrects the conflicts.
   5904	 */
   5905	lpfc_read_rev(phba, mboxq);
   5906	mqe = &mboxq->u.mqe;
   5907	mqe->un.read_rev.vpd_paddr_high = putPaddrHigh(dmabuf->phys);
   5908	mqe->un.read_rev.vpd_paddr_low = putPaddrLow(dmabuf->phys);
   5909	mqe->un.read_rev.word1 &= 0x0000FFFF;
   5910	bf_set(lpfc_mbx_rd_rev_vpd, &mqe->un.read_rev, 1);
   5911	bf_set(lpfc_mbx_rd_rev_avail_len, &mqe->un.read_rev, dma_size);
   5912
   5913	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
   5914	if (rc) {
   5915		dma_free_coherent(&phba->pcidev->dev, dma_size,
   5916				  dmabuf->virt, dmabuf->phys);
   5917		kfree(dmabuf);
   5918		return -EIO;
   5919	}
   5920
   5921	/*
   5922	 * The available vpd length cannot be bigger than the
   5923	 * DMA buffer passed to the port.  Catch the less than
   5924	 * case and update the caller's size.
   5925	 */
   5926	if (mqe->un.read_rev.avail_vpd_len < *vpd_size)
   5927		*vpd_size = mqe->un.read_rev.avail_vpd_len;
   5928
   5929	memcpy(vpd, dmabuf->virt, *vpd_size);
   5930
   5931	dma_free_coherent(&phba->pcidev->dev, dma_size,
   5932			  dmabuf->virt, dmabuf->phys);
   5933	kfree(dmabuf);
   5934	return 0;
   5935}
   5936
   5937/**
   5938 * lpfc_sli4_get_ctl_attr - Retrieve SLI4 device controller attributes
   5939 * @phba: pointer to lpfc hba data structure.
   5940 *
   5941 * This routine retrieves SLI4 device physical port name this PCI function
   5942 * is attached to.
   5943 *
   5944 * Return codes
   5945 *      0 - successful
   5946 *      otherwise - failed to retrieve controller attributes
   5947 **/
   5948static int
   5949lpfc_sli4_get_ctl_attr(struct lpfc_hba *phba)
   5950{
   5951	LPFC_MBOXQ_t *mboxq;
   5952	struct lpfc_mbx_get_cntl_attributes *mbx_cntl_attr;
   5953	struct lpfc_controller_attribute *cntl_attr;
   5954	void *virtaddr = NULL;
   5955	uint32_t alloclen, reqlen;
   5956	uint32_t shdr_status, shdr_add_status;
   5957	union lpfc_sli4_cfg_shdr *shdr;
   5958	int rc;
   5959
   5960	mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
   5961	if (!mboxq)
   5962		return -ENOMEM;
   5963
   5964	/* Send COMMON_GET_CNTL_ATTRIBUTES mbox cmd */
   5965	reqlen = sizeof(struct lpfc_mbx_get_cntl_attributes);
   5966	alloclen = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
   5967			LPFC_MBOX_OPCODE_GET_CNTL_ATTRIBUTES, reqlen,
   5968			LPFC_SLI4_MBX_NEMBED);
   5969
   5970	if (alloclen < reqlen) {
   5971		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
   5972				"3084 Allocated DMA memory size (%d) is "
   5973				"less than the requested DMA memory size "
   5974				"(%d)\n", alloclen, reqlen);
   5975		rc = -ENOMEM;
   5976		goto out_free_mboxq;
   5977	}
   5978	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
   5979	virtaddr = mboxq->sge_array->addr[0];
   5980	mbx_cntl_attr = (struct lpfc_mbx_get_cntl_attributes *)virtaddr;
   5981	shdr = &mbx_cntl_attr->cfg_shdr;
   5982	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
   5983	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
   5984	if (shdr_status || shdr_add_status || rc) {
   5985		lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
   5986				"3085 Mailbox x%x (x%x/x%x) failed, "
   5987				"rc:x%x, status:x%x, add_status:x%x\n",
   5988				bf_get(lpfc_mqe_command, &mboxq->u.mqe),
   5989				lpfc_sli_config_mbox_subsys_get(phba, mboxq),
   5990				lpfc_sli_config_mbox_opcode_get(phba, mboxq),
   5991				rc, shdr_status, shdr_add_status);
   5992		rc = -ENXIO;
   5993		goto out_free_mboxq;
   5994	}
   5995
   5996	cntl_attr = &mbx_cntl_attr->cntl_attr;
   5997	phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_VAL;
   5998	phba->sli4_hba.lnk_info.lnk_tp =
   5999		bf_get(lpfc_cntl_attr_lnk_type, cntl_attr);
   6000	phba->sli4_hba.lnk_info.lnk_no =
   6001		bf_get(lpfc_cntl_attr_lnk_numb, cntl_attr);
   6002	phba->sli4_hba.flash_id = bf_get(lpfc_cntl_attr_flash_id, cntl_attr);
   6003	phba->sli4_hba.asic_rev = bf_get(lpfc_cntl_attr_asic_rev, cntl_attr);
   6004
   6005	memset(phba->BIOSVersion, 0, sizeof(phba->BIOSVersion));
   6006	strlcat(phba->BIOSVersion, (char *)cntl_attr->bios_ver_str,
   6007		sizeof(phba->BIOSVersion));
   6008
   6009	lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
   6010			"3086 lnk_type:%d, lnk_numb:%d, bios_ver:%s, "
   6011			"flash_id: x%02x, asic_rev: x%02x\n",
   6012			phba->sli4_hba.lnk_info.lnk_tp,
   6013			phba->sli4_hba.lnk_info.lnk_no,
   6014			phba->BIOSVersion, phba->sli4_hba.flash_id,
   6015			phba->sli4_hba.asic_rev);
   6016out_free_mboxq:
   6017	if (bf_get(lpfc_mqe_command, &mboxq->u.mqe) == MBX_SLI4_CONFIG)
   6018		lpfc_sli4_mbox_cmd_free(phba, mboxq);
   6019	else
   6020		mempool_free(mboxq, phba->mbox_mem_pool);
   6021	return rc;
   6022}
   6023
   6024/**
   6025 * lpfc_sli4_retrieve_pport_name - Retrieve SLI4 device physical port name
   6026 * @phba: pointer to lpfc hba data structure.
   6027 *
   6028 * This routine retrieves SLI4 device physical port name this PCI function
   6029 * is attached to.
   6030 *
   6031 * Return codes
   6032 *      0 - successful
   6033 *      otherwise - failed to retrieve physical port name
   6034 **/
   6035static int
   6036lpfc_sli4_retrieve_pport_name(struct lpfc_hba *phba)
   6037{
   6038	LPFC_MBOXQ_t *mboxq;
   6039	struct lpfc_mbx_get_port_name *get_port_name;
   6040	uint32_t shdr_status, shdr_add_status;
   6041	union lpfc_sli4_cfg_shdr *shdr;
   6042	char cport_name = 0;
   6043	int rc;
   6044
   6045	/* We assume nothing at this point */
   6046	phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_INVAL;
   6047	phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_NON;
   6048
   6049	mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
   6050	if (!mboxq)
   6051		return -ENOMEM;
   6052	/* obtain link type and link number via READ_CONFIG */
   6053	phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_INVAL;
   6054	lpfc_sli4_read_config(phba);
   6055	if (phba->sli4_hba.lnk_info.lnk_dv == LPFC_LNK_DAT_VAL)
   6056		goto retrieve_ppname;
   6057
   6058	/* obtain link type and link number via COMMON_GET_CNTL_ATTRIBUTES */
   6059	rc = lpfc_sli4_get_ctl_attr(phba);
   6060	if (rc)
   6061		goto out_free_mboxq;
   6062
   6063retrieve_ppname:
   6064	lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
   6065		LPFC_MBOX_OPCODE_GET_PORT_NAME,
   6066		sizeof(struct lpfc_mbx_get_port_name) -
   6067		sizeof(struct lpfc_sli4_cfg_mhdr),
   6068		LPFC_SLI4_MBX_EMBED);
   6069	get_port_name = &mboxq->u.mqe.un.get_port_name;
   6070	shdr = (union lpfc_sli4_cfg_shdr *)&get_port_name->header.cfg_shdr;
   6071	bf_set(lpfc_mbox_hdr_version, &shdr->request, LPFC_OPCODE_VERSION_1);
   6072	bf_set(lpfc_mbx_get_port_name_lnk_type, &get_port_name->u.request,
   6073		phba->sli4_hba.lnk_info.lnk_tp);
   6074	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
   6075	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
   6076	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
   6077	if (shdr_status || shdr_add_status || rc) {
   6078		lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
   6079				"3087 Mailbox x%x (x%x/x%x) failed: "
   6080				"rc:x%x, status:x%x, add_status:x%x\n",
   6081				bf_get(lpfc_mqe_command, &mboxq->u.mqe),
   6082				lpfc_sli_config_mbox_subsys_get(phba, mboxq),
   6083				lpfc_sli_config_mbox_opcode_get(phba, mboxq),
   6084				rc, shdr_status, shdr_add_status);
   6085		rc = -ENXIO;
   6086		goto out_free_mboxq;
   6087	}
   6088	switch (phba->sli4_hba.lnk_info.lnk_no) {
   6089	case LPFC_LINK_NUMBER_0:
   6090		cport_name = bf_get(lpfc_mbx_get_port_name_name0,
   6091				&get_port_name->u.response);
   6092		phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
   6093		break;
   6094	case LPFC_LINK_NUMBER_1:
   6095		cport_name = bf_get(lpfc_mbx_get_port_name_name1,
   6096				&get_port_name->u.response);
   6097		phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
   6098		break;
   6099	case LPFC_LINK_NUMBER_2:
   6100		cport_name = bf_get(lpfc_mbx_get_port_name_name2,
   6101				&get_port_name->u.response);
   6102		phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
   6103		break;
   6104	case LPFC_LINK_NUMBER_3:
   6105		cport_name = bf_get(lpfc_mbx_get_port_name_name3,
   6106				&get_port_name->u.response);
   6107		phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
   6108		break;
   6109	default:
   6110		break;
   6111	}
   6112
   6113	if (phba->sli4_hba.pport_name_sta == LPFC_SLI4_PPNAME_GET) {
   6114		phba->Port[0] = cport_name;
   6115		phba->Port[1] = '\0';
   6116		lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
   6117				"3091 SLI get port name: %s\n", phba->Port);
   6118	}
   6119
   6120out_free_mboxq:
   6121	if (bf_get(lpfc_mqe_command, &mboxq->u.mqe) == MBX_SLI4_CONFIG)
   6122		lpfc_sli4_mbox_cmd_free(phba, mboxq);
   6123	else
   6124		mempool_free(mboxq, phba->mbox_mem_pool);
   6125	return rc;
   6126}
   6127
   6128/**
   6129 * lpfc_sli4_arm_cqeq_intr - Arm sli-4 device completion and event queues
   6130 * @phba: pointer to lpfc hba data structure.
   6131 *
   6132 * This routine is called to explicitly arm the SLI4 device's completion and
   6133 * event queues
   6134 **/
   6135static void
   6136lpfc_sli4_arm_cqeq_intr(struct lpfc_hba *phba)
   6137{
   6138	int qidx;
   6139	struct lpfc_sli4_hba *sli4_hba = &phba->sli4_hba;
   6140	struct lpfc_sli4_hdw_queue *qp;
   6141	struct lpfc_queue *eq;
   6142
   6143	sli4_hba->sli4_write_cq_db(phba, sli4_hba->mbx_cq, 0, LPFC_QUEUE_REARM);
   6144	sli4_hba->sli4_write_cq_db(phba, sli4_hba->els_cq, 0, LPFC_QUEUE_REARM);
   6145	if (sli4_hba->nvmels_cq)
   6146		sli4_hba->sli4_write_cq_db(phba, sli4_hba->nvmels_cq, 0,
   6147					   LPFC_QUEUE_REARM);
   6148
   6149	if (sli4_hba->hdwq) {
   6150		/* Loop thru all Hardware Queues */
   6151		for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
   6152			qp = &sli4_hba->hdwq[qidx];
   6153			/* ARM the corresponding CQ */
   6154			sli4_hba->sli4_write_cq_db(phba, qp->io_cq, 0,
   6155						LPFC_QUEUE_REARM);
   6156		}
   6157
   6158		/* Loop thru all IRQ vectors */
   6159		for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) {
   6160			eq = sli4_hba->hba_eq_hdl[qidx].eq;
   6161			/* ARM the corresponding EQ */
   6162			sli4_hba->sli4_write_eq_db(phba, eq,
   6163						   0, LPFC_QUEUE_REARM);
   6164		}
   6165	}
   6166
   6167	if (phba->nvmet_support) {
   6168		for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++) {
   6169			sli4_hba->sli4_write_cq_db(phba,
   6170				sli4_hba->nvmet_cqset[qidx], 0,
   6171				LPFC_QUEUE_REARM);
   6172		}
   6173	}
   6174}
   6175
   6176/**
   6177 * lpfc_sli4_get_avail_extnt_rsrc - Get available resource extent count.
   6178 * @phba: Pointer to HBA context object.
   6179 * @type: The resource extent type.
   6180 * @extnt_count: buffer to hold port available extent count.
   6181 * @extnt_size: buffer to hold element count per extent.
   6182 *
   6183 * This function calls the port and retrievs the number of available
   6184 * extents and their size for a particular extent type.
   6185 *
   6186 * Returns: 0 if successful.  Nonzero otherwise.
   6187 **/
   6188int
   6189lpfc_sli4_get_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type,
   6190			       uint16_t *extnt_count, uint16_t *extnt_size)
   6191{
   6192	int rc = 0;
   6193	uint32_t length;
   6194	uint32_t mbox_tmo;
   6195	struct lpfc_mbx_get_rsrc_extent_info *rsrc_info;
   6196	LPFC_MBOXQ_t *mbox;
   6197
   6198	mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
   6199	if (!mbox)
   6200		return -ENOMEM;
   6201
   6202	/* Find out how many extents are available for this resource type */
   6203	length = (sizeof(struct lpfc_mbx_get_rsrc_extent_info) -
   6204		  sizeof(struct lpfc_sli4_cfg_mhdr));
   6205	lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
   6206			 LPFC_MBOX_OPCODE_GET_RSRC_EXTENT_INFO,
   6207			 length, LPFC_SLI4_MBX_EMBED);
   6208
   6209	/* Send an extents count of 0 - the GET doesn't use it. */
   6210	rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, 0, type,
   6211					LPFC_SLI4_MBX_EMBED);
   6212	if (unlikely(rc)) {
   6213		rc = -EIO;
   6214		goto err_exit;
   6215	}
   6216
   6217	if (!phba->sli4_hba.intr_enable)
   6218		rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
   6219	else {
   6220		mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
   6221		rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
   6222	}
   6223	if (unlikely(rc)) {
   6224		rc = -EIO;
   6225		goto err_exit;
   6226	}
   6227
   6228	rsrc_info = &mbox->u.mqe.un.rsrc_extent_info;
   6229	if (bf_get(lpfc_mbox_hdr_status,
   6230		   &rsrc_info->header.cfg_shdr.response)) {
   6231		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
   6232				"2930 Failed to get resource extents "
   6233				"Status 0x%x Add'l Status 0x%x\n",
   6234				bf_get(lpfc_mbox_hdr_status,
   6235				       &rsrc_info->header.cfg_shdr.response),
   6236				bf_get(lpfc_mbox_hdr_add_status,
   6237				       &rsrc_info->header.cfg_shdr.response));
   6238		rc = -EIO;
   6239		goto err_exit;
   6240	}
   6241
   6242	*extnt_count = bf_get(lpfc_mbx_get_rsrc_extent_info_cnt,
   6243			      &rsrc_info->u.rsp);
   6244	*extnt_size = bf_get(lpfc_mbx_get_rsrc_extent_info_size,
   6245			     &rsrc_info->u.rsp);
   6246
   6247	lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
   6248			"3162 Retrieved extents type-%d from port: count:%d, "
   6249			"size:%d\n", type, *extnt_count, *extnt_size);
   6250
   6251err_exit:
   6252	mempool_free(mbox, phba->mbox_mem_pool);
   6253	return rc;
   6254}
   6255
   6256/**
   6257 * lpfc_sli4_chk_avail_extnt_rsrc - Check for available SLI4 resource extents.
   6258 * @phba: Pointer to HBA context object.
   6259 * @type: The extent type to check.
   6260 *
   6261 * This function reads the current available extents from the port and checks
   6262 * if the extent count or extent size has changed since the last access.
   6263 * Callers use this routine post port reset to understand if there is a
   6264 * extent reprovisioning requirement.
   6265 *
   6266 * Returns:
   6267 *   -Error: error indicates problem.
   6268 *   1: Extent count or size has changed.
   6269 *   0: No changes.
   6270 **/
   6271static int
   6272lpfc_sli4_chk_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type)
   6273{
   6274	uint16_t curr_ext_cnt, rsrc_ext_cnt;
   6275	uint16_t size_diff, rsrc_ext_size;
   6276	int rc = 0;
   6277	struct lpfc_rsrc_blks *rsrc_entry;
   6278	struct list_head *rsrc_blk_list = NULL;
   6279
   6280	size_diff = 0;
   6281	curr_ext_cnt = 0;
   6282	rc = lpfc_sli4_get_avail_extnt_rsrc(phba, type,
   6283					    &rsrc_ext_cnt,
   6284					    &rsrc_ext_size);
   6285	if (unlikely(rc))
   6286		return -EIO;
   6287
   6288	switch (type) {
   6289	case LPFC_RSC_TYPE_FCOE_RPI:
   6290		rsrc_blk_list = &phba->sli4_hba.lpfc_rpi_blk_list;
   6291		break;
   6292	case LPFC_RSC_TYPE_FCOE_VPI:
   6293		rsrc_blk_list = &phba->lpfc_vpi_blk_list;
   6294		break;
   6295	case LPFC_RSC_TYPE_FCOE_XRI:
   6296		rsrc_blk_list = &phba->sli4_hba.lpfc_xri_blk_list;
   6297		break;
   6298	case LPFC_RSC_TYPE_FCOE_VFI:
   6299		rsrc_blk_list = &phba->sli4_hba.lpfc_vfi_blk_list;
   6300		break;
   6301	default:
   6302		break;
   6303	}
   6304
   6305	list_for_each_entry(rsrc_entry, rsrc_blk_list, list) {
   6306		curr_ext_cnt++;
   6307		if (rsrc_entry->rsrc_size != rsrc_ext_size)
   6308			size_diff++;
   6309	}
   6310
   6311	if (curr_ext_cnt != rsrc_ext_cnt || size_diff != 0)
   6312		rc = 1;
   6313
   6314	return rc;
   6315}
   6316
   6317/**
   6318 * lpfc_sli4_cfg_post_extnts -
   6319 * @phba: Pointer to HBA context object.
   6320 * @extnt_cnt: number of available extents.
   6321 * @type: the extent type (rpi, xri, vfi, vpi).
   6322 * @emb: buffer to hold either MBX_EMBED or MBX_NEMBED operation.
   6323 * @mbox: pointer to the caller's allocated mailbox structure.
   6324 *
   6325 * This function executes the extents allocation request.  It also
   6326 * takes care of the amount of memory needed to allocate or get the
   6327 * allocated extents. It is the caller's responsibility to evaluate
   6328 * the response.
   6329 *
   6330 * Returns:
   6331 *   -Error:  Error value describes the condition found.
   6332 *   0: if successful
   6333 **/
   6334static int
   6335lpfc_sli4_cfg_post_extnts(struct lpfc_hba *phba, uint16_t extnt_cnt,
   6336			  uint16_t type, bool *emb, LPFC_MBOXQ_t *mbox)
   6337{
   6338	int rc = 0;
   6339	uint32_t req_len;
   6340	uint32_t emb_len;
   6341	uint32_t alloc_len, mbox_tmo;
   6342
   6343	/* Calculate the total requested length of the dma memory */
   6344	req_len = extnt_cnt * sizeof(uint16_t);
   6345
   6346	/*
   6347	 * Calculate the size of an embedded mailbox.  The uint32_t
   6348	 * accounts for extents-specific word.
   6349	 */
   6350	emb_len = sizeof(MAILBOX_t) - sizeof(struct mbox_header) -
   6351		sizeof(uint32_t);
   6352
   6353	/*
   6354	 * Presume the allocation and response will fit into an embedded
   6355	 * mailbox.  If not true, reconfigure to a non-embedded mailbox.
   6356	 */
   6357	*emb = LPFC_SLI4_MBX_EMBED;
   6358	if (req_len > emb_len) {
   6359		req_len = extnt_cnt * sizeof(uint16_t) +
   6360			sizeof(union lpfc_sli4_cfg_shdr) +
   6361			sizeof(uint32_t);
   6362		*emb = LPFC_SLI4_MBX_NEMBED;
   6363	}
   6364
   6365	alloc_len = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
   6366				     LPFC_MBOX_OPCODE_ALLOC_RSRC_EXTENT,
   6367				     req_len, *emb);
   6368	if (alloc_len < req_len) {
   6369		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
   6370			"2982 Allocated DMA memory size (x%x) is "
   6371			"less than the requested DMA memory "
   6372			"size (x%x)\n", alloc_len, req_len);
   6373		return -ENOMEM;
   6374	}
   6375	rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, extnt_cnt, type, *emb);
   6376	if (unlikely(rc))
   6377		return -EIO;
   6378
   6379	if (!phba->sli4_hba.intr_enable)
   6380		rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
   6381	else {
   6382		mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
   6383		rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
   6384	}
   6385
   6386	if (unlikely(rc))
   6387		rc = -EIO;
   6388	return rc;
   6389}
   6390
   6391/**
   6392 * lpfc_sli4_alloc_extent - Allocate an SLI4 resource extent.
   6393 * @phba: Pointer to HBA context object.
   6394 * @type:  The resource extent type to allocate.
   6395 *
   6396 * This function allocates the number of elements for the specified
   6397 * resource type.
   6398 **/
   6399static int
   6400lpfc_sli4_alloc_extent(struct lpfc_hba *phba, uint16_t type)
   6401{
   6402	bool emb = false;
   6403	uint16_t rsrc_id_cnt, rsrc_cnt, rsrc_size;
   6404	uint16_t rsrc_id, rsrc_start, j, k;
   6405	uint16_t *ids;
   6406	int i, rc;
   6407	unsigned long longs;
   6408	unsigned long *bmask;
   6409	struct lpfc_rsrc_blks *rsrc_blks;
   6410	LPFC_MBOXQ_t *mbox;
   6411	uint32_t length;
   6412	struct lpfc_id_range *id_array = NULL;
   6413	void *virtaddr = NULL;
   6414	struct lpfc_mbx_nembed_rsrc_extent *n_rsrc;
   6415	struct lpfc_mbx_alloc_rsrc_extents *rsrc_ext;
   6416	struct list_head *ext_blk_list;
   6417
   6418	rc = lpfc_sli4_get_avail_extnt_rsrc(phba, type,
   6419					    &rsrc_cnt,
   6420					    &rsrc_size);
   6421	if (unlikely(rc))
   6422		return -EIO;
   6423
   6424	if ((rsrc_cnt == 0) || (rsrc_size == 0)) {
   6425		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
   6426			"3009 No available Resource Extents "
   6427			"for resource type 0x%x: Count: 0x%x, "
   6428			"Size 0x%x\n", type, rsrc_cnt,
   6429			rsrc_size);
   6430		return -ENOMEM;
   6431	}
   6432
   6433	lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_INIT | LOG_SLI,
   6434			"2903 Post resource extents type-0x%x: "
   6435			"count:%d, size %d\n", type, rsrc_cnt, rsrc_size);
   6436
   6437	mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
   6438	if (!mbox)
   6439		return -ENOMEM;
   6440
   6441	rc = lpfc_sli4_cfg_post_extnts(phba, rsrc_cnt, type, &emb, mbox);
   6442	if (unlikely(rc)) {
   6443		rc = -EIO;
   6444		goto err_exit;
   6445	}
   6446
   6447	/*
   6448	 * Figure out where the response is located.  Then get local pointers
   6449	 * to the response data.  The port does not guarantee to respond to
   6450	 * all extents counts request so update the local variable with the
   6451	 * allocated count from the port.
   6452	 */
   6453	if (emb == LPFC_SLI4_MBX_EMBED) {
   6454		rsrc_ext = &mbox->u.mqe.un.alloc_rsrc_extents;
   6455		id_array = &rsrc_ext->u.rsp.id[0];
   6456		rsrc_cnt = bf_get(lpfc_mbx_rsrc_cnt, &rsrc_ext->u.rsp);
   6457	} else {
   6458		virtaddr = mbox->sge_array->addr[0];
   6459		n_rsrc = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr;
   6460		rsrc_cnt = bf_get(lpfc_mbx_rsrc_cnt, n_rsrc);
   6461		id_array = &n_rsrc->id;
   6462	}
   6463
   6464	longs = ((rsrc_cnt * rsrc_size) + BITS_PER_LONG - 1) / BITS_PER_LONG;
   6465	rsrc_id_cnt = rsrc_cnt * rsrc_size;
   6466
   6467	/*
   6468	 * Based on the resource size and count, correct the base and max
   6469	 * resource values.
   6470	 */
   6471	length = sizeof(struct lpfc_rsrc_blks);
   6472	switch (type) {
   6473	case LPFC_RSC_TYPE_FCOE_RPI:
   6474		phba->sli4_hba.rpi_bmask = kcalloc(longs,
   6475						   sizeof(unsigned long),
   6476						   GFP_KERNEL);
   6477		if (unlikely(!phba->sli4_hba.rpi_bmask)) {
   6478			rc = -ENOMEM;
   6479			goto err_exit;
   6480		}
   6481		phba->sli4_hba.rpi_ids = kcalloc(rsrc_id_cnt,
   6482						 sizeof(uint16_t),
   6483						 GFP_KERNEL);
   6484		if (unlikely(!phba->sli4_hba.rpi_ids)) {
   6485			kfree(phba->sli4_hba.rpi_bmask);
   6486			rc = -ENOMEM;
   6487			goto err_exit;
   6488		}
   6489
   6490		/*
   6491		 * The next_rpi was initialized with the maximum available
   6492		 * count but the port may allocate a smaller number.  Catch
   6493		 * that case and update the next_rpi.
   6494		 */
   6495		phba->sli4_hba.next_rpi = rsrc_id_cnt;
   6496
   6497		/* Initialize local ptrs for common extent processing later. */
   6498		bmask = phba->sli4_hba.rpi_bmask;
   6499		ids = phba->sli4_hba.rpi_ids;
   6500		ext_blk_list = &phba->sli4_hba.lpfc_rpi_blk_list;
   6501		break;
   6502	case LPFC_RSC_TYPE_FCOE_VPI:
   6503		phba->vpi_bmask = kcalloc(longs, sizeof(unsigned long),
   6504					  GFP_KERNEL);
   6505		if (unlikely(!phba->vpi_bmask)) {
   6506			rc = -ENOMEM;
   6507			goto err_exit;
   6508		}
   6509		phba->vpi_ids = kcalloc(rsrc_id_cnt, sizeof(uint16_t),
   6510					 GFP_KERNEL);
   6511		if (unlikely(!phba->vpi_ids)) {
   6512			kfree(phba->vpi_bmask);
   6513			rc = -ENOMEM;
   6514			goto err_exit;
   6515		}
   6516
   6517		/* Initialize local ptrs for common extent processing later. */
   6518		bmask = phba->vpi_bmask;
   6519		ids = phba->vpi_ids;
   6520		ext_blk_list = &phba->lpfc_vpi_blk_list;
   6521		break;
   6522	case LPFC_RSC_TYPE_FCOE_XRI:
   6523		phba->sli4_hba.xri_bmask = kcalloc(longs,
   6524						   sizeof(unsigned long),
   6525						   GFP_KERNEL);
   6526		if (unlikely(!phba->sli4_hba.xri_bmask)) {
   6527			rc = -ENOMEM;
   6528			goto err_exit;
   6529		}
   6530		phba->sli4_hba.max_cfg_param.xri_used = 0;
   6531		phba->sli4_hba.xri_ids = kcalloc(rsrc_id_cnt,
   6532						 sizeof(uint16_t),
   6533						 GFP_KERNEL);
   6534		if (unlikely(!phba->sli4_hba.xri_ids)) {
   6535			kfree(phba->sli4_hba.xri_bmask);
   6536			rc = -ENOMEM;
   6537			goto err_exit;
   6538		}
   6539
   6540		/* Initialize local ptrs for common extent processing later. */
   6541		bmask = phba->sli4_hba.xri_bmask;
   6542		ids = phba->sli4_hba.xri_ids;
   6543		ext_blk_list = &phba->sli4_hba.lpfc_xri_blk_list;
   6544		break;
   6545	case LPFC_RSC_TYPE_FCOE_VFI:
   6546		phba->sli4_hba.vfi_bmask = kcalloc(longs,
   6547						   sizeof(unsigned long),
   6548						   GFP_KERNEL);
   6549		if (unlikely(!phba->sli4_hba.vfi_bmask)) {
   6550			rc = -ENOMEM;
   6551			goto err_exit;
   6552		}
   6553		phba->sli4_hba.vfi_ids = kcalloc(rsrc_id_cnt,
   6554						 sizeof(uint16_t),
   6555						 GFP_KERNEL);
   6556		if (unlikely(!phba->sli4_hba.vfi_ids)) {
   6557			kfree(phba->sli4_hba.vfi_bmask);
   6558			rc = -ENOMEM;
   6559			goto err_exit;
   6560		}
   6561
   6562		/* Initialize local ptrs for common extent processing later. */
   6563		bmask = phba->sli4_hba.vfi_bmask;
   6564		ids = phba->sli4_hba.vfi_ids;
   6565		ext_blk_list = &phba->sli4_hba.lpfc_vfi_blk_list;
   6566		break;
   6567	default:
   6568		/* Unsupported Opcode.  Fail call. */
   6569		id_array = NULL;
   6570		bmask = NULL;
   6571		ids = NULL;
   6572		ext_blk_list = NULL;
   6573		goto err_exit;
   6574	}
   6575
   6576	/*
   6577	 * Complete initializing the extent configuration with the
   6578	 * allocated ids assigned to this function.  The bitmask serves
   6579	 * as an index into the array and manages the available ids.  The
   6580	 * array just stores the ids communicated to the port via the wqes.
   6581	 */
   6582	for (i = 0, j = 0, k = 0; i < rsrc_cnt; i++) {
   6583		if ((i % 2) == 0)
   6584			rsrc_id = bf_get(lpfc_mbx_rsrc_id_word4_0,
   6585					 &id_array[k]);
   6586		else
   6587			rsrc_id = bf_get(lpfc_mbx_rsrc_id_word4_1,
   6588					 &id_array[k]);
   6589
   6590		rsrc_blks = kzalloc(length, GFP_KERNEL);
   6591		if (unlikely(!rsrc_blks)) {
   6592			rc = -ENOMEM;
   6593			kfree(bmask);
   6594			kfree(ids);
   6595			goto err_exit;
   6596		}
   6597		rsrc_blks->rsrc_start = rsrc_id;
   6598		rsrc_blks->rsrc_size = rsrc_size;
   6599		list_add_tail(&rsrc_blks->list, ext_blk_list);
   6600		rsrc_start = rsrc_id;
   6601		if ((type == LPFC_RSC_TYPE_FCOE_XRI) && (j == 0)) {
   6602			phba->sli4_hba.io_xri_start = rsrc_start +
   6603				lpfc_sli4_get_iocb_cnt(phba);
   6604		}
   6605
   6606		while (rsrc_id < (rsrc_start + rsrc_size)) {
   6607			ids[j] = rsrc_id;
   6608			rsrc_id++;
   6609			j++;
   6610		}
   6611		/* Entire word processed.  Get next word.*/
   6612		if ((i % 2) == 1)
   6613			k++;
   6614	}
   6615 err_exit:
   6616	lpfc_sli4_mbox_cmd_free(phba, mbox);
   6617	return rc;
   6618}
   6619
   6620
   6621
   6622/**
   6623 * lpfc_sli4_dealloc_extent - Deallocate an SLI4 resource extent.
   6624 * @phba: Pointer to HBA context object.
   6625 * @type: the extent's type.
   6626 *
   6627 * This function deallocates all extents of a particular resource type.
   6628 * SLI4 does not allow for deallocating a particular extent range.  It
   6629 * is the caller's responsibility to release all kernel memory resources.
   6630 **/
   6631static int
   6632lpfc_sli4_dealloc_extent(struct lpfc_hba *phba, uint16_t type)
   6633{
   6634	int rc;
   6635	uint32_t length, mbox_tmo = 0;
   6636	LPFC_MBOXQ_t *mbox;
   6637	struct lpfc_mbx_dealloc_rsrc_extents *dealloc_rsrc;
   6638	struct lpfc_rsrc_blks *rsrc_blk, *rsrc_blk_next;
   6639
   6640	mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
   6641	if (!mbox)
   6642		return -ENOMEM;
   6643
   6644	/*
   6645	 * This function sends an embedded mailbox because it only sends the
   6646	 * the resource type.  All extents of this type are released by the
   6647	 * port.
   6648	 */
   6649	length = (sizeof(struct lpfc_mbx_dealloc_rsrc_extents) -
   6650		  sizeof(struct lpfc_sli4_cfg_mhdr));
   6651	lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
   6652			 LPFC_MBOX_OPCODE_DEALLOC_RSRC_EXTENT,
   6653			 length, LPFC_SLI4_MBX_EMBED);
   6654
   6655	/* Send an extents count of 0 - the dealloc doesn't use it. */
   6656	rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, 0, type,
   6657					LPFC_SLI4_MBX_EMBED);
   6658	if (unlikely(rc)) {
   6659		rc = -EIO;
   6660		goto out_free_mbox;
   6661	}
   6662	if (!phba->sli4_hba.intr_enable)
   6663		rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
   6664	else {
   6665		mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
   6666		rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
   6667	}
   6668	if (unlikely(rc)) {
   6669		rc = -EIO;
   6670		goto out_free_mbox;
   6671	}
   6672
   6673	dealloc_rsrc = &mbox->u.mqe.un.dealloc_rsrc_extents;
   6674	if (bf_get(lpfc_mbox_hdr_status,
   6675		   &dealloc_rsrc->header.cfg_shdr.response)) {
   6676		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
   6677				"2919 Failed to release resource extents "
   6678				"for type %d - Status 0x%x Add'l Status 0x%x. "
   6679				"Resource memory not released.\n",
   6680				type,
   6681				bf_get(lpfc_mbox_hdr_status,
   6682				    &dealloc_rsrc->header.cfg_shdr.response),
   6683				bf_get(lpfc_mbox_hdr_add_status,
   6684				    &dealloc_rsrc->header.cfg_shdr.response));
   6685		rc = -EIO;
   6686		goto out_free_mbox;
   6687	}
   6688
   6689	/* Release kernel memory resources for the specific type. */
   6690	switch (type) {
   6691	case LPFC_RSC_TYPE_FCOE_VPI:
   6692		kfree(phba->vpi_bmask);
   6693		kfree(phba->vpi_ids);
   6694		bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
   6695		list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
   6696				    &phba->lpfc_vpi_blk_list, list) {
   6697			list_del_init(&rsrc_blk->list);
   6698			kfree(rsrc_blk);
   6699		}
   6700		phba->sli4_hba.max_cfg_param.vpi_used = 0;
   6701		break;
   6702	case LPFC_RSC_TYPE_FCOE_XRI:
   6703		kfree(phba->sli4_hba.xri_bmask);
   6704		kfree(phba->sli4_hba.xri_ids);
   6705		list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
   6706				    &phba->sli4_hba.lpfc_xri_blk_list, list) {
   6707			list_del_init(&rsrc_blk->list);
   6708			kfree(rsrc_blk);
   6709		}
   6710		break;
   6711	case LPFC_RSC_TYPE_FCOE_VFI:
   6712		kfree(phba->sli4_hba.vfi_bmask);
   6713		kfree(phba->sli4_hba.vfi_ids);
   6714		bf_set(lpfc_vfi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
   6715		list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
   6716				    &phba->sli4_hba.lpfc_vfi_blk_list, list) {
   6717			list_del_init(&rsrc_blk->list);
   6718			kfree(rsrc_blk);
   6719		}
   6720		break;
   6721	case LPFC_RSC_TYPE_FCOE_RPI:
   6722		/* RPI bitmask and physical id array are cleaned up earlier. */
   6723		list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
   6724				    &phba->sli4_hba.lpfc_rpi_blk_list, list) {
   6725			list_del_init(&rsrc_blk->list);
   6726			kfree(rsrc_blk);
   6727		}
   6728		break;
   6729	default:
   6730		break;
   6731	}
   6732
   6733	bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
   6734
   6735 out_free_mbox:
   6736	mempool_free(mbox, phba->mbox_mem_pool);
   6737	return rc;
   6738}
   6739
   6740static void
   6741lpfc_set_features(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox,
   6742		  uint32_t feature)
   6743{
   6744	uint32_t len;
   6745	u32 sig_freq = 0;
   6746
   6747	len = sizeof(struct lpfc_mbx_set_feature) -
   6748		sizeof(struct lpfc_sli4_cfg_mhdr);
   6749	lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
   6750			 LPFC_MBOX_OPCODE_SET_FEATURES, len,
   6751			 LPFC_SLI4_MBX_EMBED);
   6752
   6753	switch (feature) {
   6754	case LPFC_SET_UE_RECOVERY:
   6755		bf_set(lpfc_mbx_set_feature_UER,
   6756		       &mbox->u.mqe.un.set_feature, 1);
   6757		mbox->u.mqe.un.set_feature.feature = LPFC_SET_UE_RECOVERY;
   6758		mbox->u.mqe.un.set_feature.param_len = 8;
   6759		break;
   6760	case LPFC_SET_MDS_DIAGS:
   6761		bf_set(lpfc_mbx_set_feature_mds,
   6762		       &mbox->u.mqe.un.set_feature, 1);
   6763		bf_set(lpfc_mbx_set_feature_mds_deep_loopbk,
   6764		       &mbox->u.mqe.un.set_feature, 1);
   6765		mbox->u.mqe.un.set_feature.feature = LPFC_SET_MDS_DIAGS;
   6766		mbox->u.mqe.un.set_feature.param_len = 8;
   6767		break;
   6768	case LPFC_SET_CGN_SIGNAL:
   6769		if (phba->cmf_active_mode == LPFC_CFG_OFF)
   6770			sig_freq = 0;
   6771		else
   6772			sig_freq = phba->cgn_sig_freq;
   6773
   6774		if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM) {
   6775			bf_set(lpfc_mbx_set_feature_CGN_alarm_freq,
   6776			       &mbox->u.mqe.un.set_feature, sig_freq);
   6777			bf_set(lpfc_mbx_set_feature_CGN_warn_freq,
   6778			       &mbox->u.mqe.un.set_feature, sig_freq);
   6779		}
   6780
   6781		if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ONLY)
   6782			bf_set(lpfc_mbx_set_feature_CGN_warn_freq,
   6783			       &mbox->u.mqe.un.set_feature, sig_freq);
   6784
   6785		if (phba->cmf_active_mode == LPFC_CFG_OFF ||
   6786		    phba->cgn_reg_signal == EDC_CG_SIG_NOTSUPPORTED)
   6787			sig_freq = 0;
   6788		else
   6789			sig_freq = lpfc_acqe_cgn_frequency;
   6790
   6791		bf_set(lpfc_mbx_set_feature_CGN_acqe_freq,
   6792		       &mbox->u.mqe.un.set_feature, sig_freq);
   6793
   6794		mbox->u.mqe.un.set_feature.feature = LPFC_SET_CGN_SIGNAL;
   6795		mbox->u.mqe.un.set_feature.param_len = 12;
   6796		break;
   6797	case LPFC_SET_DUAL_DUMP:
   6798		bf_set(lpfc_mbx_set_feature_dd,
   6799		       &mbox->u.mqe.un.set_feature, LPFC_ENABLE_DUAL_DUMP);
   6800		bf_set(lpfc_mbx_set_feature_ddquery,
   6801		       &mbox->u.mqe.un.set_feature, 0);
   6802		mbox->u.mqe.un.set_feature.feature = LPFC_SET_DUAL_DUMP;
   6803		mbox->u.mqe.un.set_feature.param_len = 4;
   6804		break;
   6805	case LPFC_SET_ENABLE_MI:
   6806		mbox->u.mqe.un.set_feature.feature = LPFC_SET_ENABLE_MI;
   6807		mbox->u.mqe.un.set_feature.param_len = 4;
   6808		bf_set(lpfc_mbx_set_feature_milunq, &mbox->u.mqe.un.set_feature,
   6809		       phba->pport->cfg_lun_queue_depth);
   6810		bf_set(lpfc_mbx_set_feature_mi, &mbox->u.mqe.un.set_feature,
   6811		       phba->sli4_hba.pc_sli4_params.mi_ver);
   6812		break;
   6813	case LPFC_SET_ENABLE_CMF:
   6814		bf_set(lpfc_mbx_set_feature_dd, &mbox->u.mqe.un.set_feature, 1);
   6815		mbox->u.mqe.un.set_feature.feature = LPFC_SET_ENABLE_CMF;
   6816		mbox->u.mqe.un.set_feature.param_len = 4;
   6817		bf_set(lpfc_mbx_set_feature_cmf,
   6818		       &mbox->u.mqe.un.set_feature, 1);
   6819		break;
   6820	}
   6821	return;
   6822}
   6823
   6824/**
   6825 * lpfc_ras_stop_fwlog: Disable FW logging by the adapter
   6826 * @phba: Pointer to HBA context object.
   6827 *
   6828 * Disable FW logging into host memory on the adapter. To
   6829 * be done before reading logs from the host memory.
   6830 **/
   6831void
   6832lpfc_ras_stop_fwlog(struct lpfc_hba *phba)
   6833{
   6834	struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
   6835
   6836	spin_lock_irq(&phba->hbalock);
   6837	ras_fwlog->state = INACTIVE;
   6838	spin_unlock_irq(&phba->hbalock);
   6839
   6840	/* Disable FW logging to host memory */
   6841	writel(LPFC_CTL_PDEV_CTL_DDL_RAS,
   6842	       phba->sli4_hba.conf_regs_memmap_p + LPFC_CTL_PDEV_CTL_OFFSET);
   6843
   6844	/* Wait 10ms for firmware to stop using DMA buffer */
   6845	usleep_range(10 * 1000, 20 * 1000);
   6846}
   6847
   6848/**
   6849 * lpfc_sli4_ras_dma_free - Free memory allocated for FW logging.
   6850 * @phba: Pointer to HBA context object.
   6851 *
   6852 * This function is called to free memory allocated for RAS FW logging
   6853 * support in the driver.
   6854 **/
   6855void
   6856lpfc_sli4_ras_dma_free(struct lpfc_hba *phba)
   6857{
   6858	struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
   6859	struct lpfc_dmabuf *dmabuf, *next;
   6860
   6861	if (!list_empty(&ras_fwlog->fwlog_buff_list)) {
   6862		list_for_each_entry_safe(dmabuf, next,
   6863				    &ras_fwlog->fwlog_buff_list,
   6864				    list) {
   6865			list_del(&dmabuf->list);
   6866			dma_free_coherent(&phba->pcidev->dev,
   6867					  LPFC_RAS_MAX_ENTRY_SIZE,
   6868					  dmabuf->virt, dmabuf->phys);
   6869			kfree(dmabuf);
   6870		}
   6871	}
   6872
   6873	if (ras_fwlog->lwpd.virt) {
   6874		dma_free_coherent(&phba->pcidev->dev,
   6875				  sizeof(uint32_t) * 2,
   6876				  ras_fwlog->lwpd.virt,
   6877				  ras_fwlog->lwpd.phys);
   6878		ras_fwlog->lwpd.virt = NULL;
   6879	}
   6880
   6881	spin_lock_irq(&phba->hbalock);
   6882	ras_fwlog->state = INACTIVE;
   6883	spin_unlock_irq(&phba->hbalock);
   6884}
   6885
   6886/**
   6887 * lpfc_sli4_ras_dma_alloc: Allocate memory for FW support
   6888 * @phba: Pointer to HBA context object.
   6889 * @fwlog_buff_count: Count of buffers to be created.
   6890 *
   6891 * This routine DMA memory for Log Write Position Data[LPWD] and buffer
   6892 * to update FW log is posted to the adapter.
   6893 * Buffer count is calculated based on module param ras_fwlog_buffsize
   6894 * Size of each buffer posted to FW is 64K.
   6895 **/
   6896
   6897static int
   6898lpfc_sli4_ras_dma_alloc(struct lpfc_hba *phba,
   6899			uint32_t fwlog_buff_count)
   6900{
   6901	struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
   6902	struct lpfc_dmabuf *dmabuf;
   6903	int rc = 0, i = 0;
   6904
   6905	/* Initialize List */
   6906	INIT_LIST_HEAD(&ras_fwlog->fwlog_buff_list);
   6907
   6908	/* Allocate memory for the LWPD */
   6909	ras_fwlog->lwpd.virt = dma_alloc_coherent(&phba->pcidev->dev,
   6910					    sizeof(uint32_t) * 2,
   6911					    &ras_fwlog->lwpd.phys,
   6912					    GFP_KERNEL);
   6913	if (!ras_fwlog->lwpd.virt) {
   6914		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
   6915				"6185 LWPD Memory Alloc Failed\n");
   6916
   6917		return -ENOMEM;
   6918	}
   6919
   6920	ras_fwlog->fw_buffcount = fwlog_buff_count;
   6921	for (i = 0; i < ras_fwlog->fw_buffcount; i++) {
   6922		dmabuf = kzalloc(sizeof(struct lpfc_dmabuf),
   6923				 GFP_KERNEL);
   6924		if (!dmabuf) {
   6925			rc = -ENOMEM;
   6926			lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
   6927					"6186 Memory Alloc failed FW logging");
   6928			goto free_mem;
   6929		}
   6930
   6931		dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
   6932						  LPFC_RAS_MAX_ENTRY_SIZE,
   6933						  &dmabuf->phys, GFP_KERNEL);
   6934		if (!dmabuf->virt) {
   6935			kfree(dmabuf);
   6936			rc = -ENOMEM;
   6937			lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
   6938					"6187 DMA Alloc Failed FW logging");
   6939			goto free_mem;
   6940		}
   6941		dmabuf->buffer_tag = i;
   6942		list_add_tail(&dmabuf->list, &ras_fwlog->fwlog_buff_list);
   6943	}
   6944
   6945free_mem:
   6946	if (rc)
   6947		lpfc_sli4_ras_dma_free(phba);
   6948
   6949	return rc;
   6950}
   6951
   6952/**
   6953 * lpfc_sli4_ras_mbox_cmpl: Completion handler for RAS MBX command
   6954 * @phba: pointer to lpfc hba data structure.
   6955 * @pmb: pointer to the driver internal queue element for mailbox command.
   6956 *
   6957 * Completion handler for driver's RAS MBX command to the device.
   6958 **/
   6959static void
   6960lpfc_sli4_ras_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
   6961{
   6962	MAILBOX_t *mb;
   6963	union lpfc_sli4_cfg_shdr *shdr;
   6964	uint32_t shdr_status, shdr_add_status;
   6965	struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
   6966
   6967	mb = &pmb->u.mb;
   6968
   6969	shdr = (union lpfc_sli4_cfg_shdr *)
   6970		&pmb->u.mqe.un.ras_fwlog.header.cfg_shdr;
   6971	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
   6972	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
   6973
   6974	if (mb->mbxStatus != MBX_SUCCESS || shdr_status) {
   6975		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
   6976				"6188 FW LOG mailbox "
   6977				"completed with status x%x add_status x%x,"
   6978				" mbx status x%x\n",
   6979				shdr_status, shdr_add_status, mb->mbxStatus);
   6980
   6981		ras_fwlog->ras_hwsupport = false;
   6982		goto disable_ras;
   6983	}
   6984
   6985	spin_lock_irq(&phba->hbalock);
   6986	ras_fwlog->state = ACTIVE;
   6987	spin_unlock_irq(&phba->hbalock);
   6988	mempool_free(pmb, phba->mbox_mem_pool);
   6989
   6990	return;
   6991
   6992disable_ras:
   6993	/* Free RAS DMA memory */
   6994	lpfc_sli4_ras_dma_free(phba);
   6995	mempool_free(pmb, phba->mbox_mem_pool);
   6996}
   6997
   6998/**
   6999 * lpfc_sli4_ras_fwlog_init: Initialize memory and post RAS MBX command
   7000 * @phba: pointer to lpfc hba data structure.
   7001 * @fwlog_level: Logging verbosity level.
   7002 * @fwlog_enable: Enable/Disable logging.
   7003 *
   7004 * Initialize memory and post mailbox command to enable FW logging in host
   7005 * memory.
   7006 **/
   7007int
   7008lpfc_sli4_ras_fwlog_init(struct lpfc_hba *phba,
   7009			 uint32_t fwlog_level,
   7010			 uint32_t fwlog_enable)
   7011{
   7012	struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
   7013	struct lpfc_mbx_set_ras_fwlog *mbx_fwlog = NULL;
   7014	struct lpfc_dmabuf *dmabuf;
   7015	LPFC_MBOXQ_t *mbox;
   7016	uint32_t len = 0, fwlog_buffsize, fwlog_entry_count;
   7017	int rc = 0;
   7018
   7019	spin_lock_irq(&phba->hbalock);
   7020	ras_fwlog->state = INACTIVE;
   7021	spin_unlock_irq(&phba->hbalock);
   7022
   7023	fwlog_buffsize = (LPFC_RAS_MIN_BUFF_POST_SIZE *
   7024			  phba->cfg_ras_fwlog_buffsize);
   7025	fwlog_entry_count = (fwlog_buffsize/LPFC_RAS_MAX_ENTRY_SIZE);
   7026
   7027	/*
   7028	 * If re-enabling FW logging support use earlier allocated
   7029	 * DMA buffers while posting MBX command.
   7030	 **/
   7031	if (!ras_fwlog->lwpd.virt) {
   7032		rc = lpfc_sli4_ras_dma_alloc(phba, fwlog_entry_count);
   7033		if (rc) {
   7034			lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
   7035					"6189 FW Log Memory Allocation Failed");
   7036			return rc;
   7037		}
   7038	}
   7039
   7040	/* Setup Mailbox command */
   7041	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
   7042	if (!mbox) {
   7043		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
   7044				"6190 RAS MBX Alloc Failed");
   7045		rc = -ENOMEM;
   7046		goto mem_free;
   7047	}
   7048
   7049	ras_fwlog->fw_loglevel = fwlog_level;
   7050	len = (sizeof(struct lpfc_mbx_set_ras_fwlog) -
   7051		sizeof(struct lpfc_sli4_cfg_mhdr));
   7052
   7053	lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_LOWLEVEL,
   7054			 LPFC_MBOX_OPCODE_SET_DIAG_LOG_OPTION,
   7055			 len, LPFC_SLI4_MBX_EMBED);
   7056
   7057	mbx_fwlog = (struct lpfc_mbx_set_ras_fwlog *)&mbox->u.mqe.un.ras_fwlog;
   7058	bf_set(lpfc_fwlog_enable, &mbx_fwlog->u.request,
   7059	       fwlog_enable);
   7060	bf_set(lpfc_fwlog_loglvl, &mbx_fwlog->u.request,
   7061	       ras_fwlog->fw_loglevel);
   7062	bf_set(lpfc_fwlog_buffcnt, &mbx_fwlog->u.request,
   7063	       ras_fwlog->fw_buffcount);
   7064	bf_set(lpfc_fwlog_buffsz, &mbx_fwlog->u.request,
   7065	       LPFC_RAS_MAX_ENTRY_SIZE/SLI4_PAGE_SIZE);
   7066
   7067	/* Update DMA buffer address */
   7068	list_for_each_entry(dmabuf, &ras_fwlog->fwlog_buff_list, list) {
   7069		memset(dmabuf->virt, 0, LPFC_RAS_MAX_ENTRY_SIZE);
   7070
   7071		mbx_fwlog->u.request.buff_fwlog[dmabuf->buffer_tag].addr_lo =
   7072			putPaddrLow(dmabuf->phys);
   7073
   7074		mbx_fwlog->u.request.buff_fwlog[dmabuf->buffer_tag].addr_hi =
   7075			putPaddrHigh(dmabuf->phys);
   7076	}
   7077
   7078	/* Update LPWD address */
   7079	mbx_fwlog->u.request.lwpd.addr_lo = putPaddrLow(ras_fwlog->lwpd.phys);
   7080	mbx_fwlog->u.request.lwpd.addr_hi = putPaddrHigh(ras_fwlog->lwpd.phys);
   7081
   7082	spin_lock_irq(&phba->hbalock);
   7083	ras_fwlog->state = REG_INPROGRESS;
   7084	spin_unlock_irq(&phba->hbalock);
   7085	mbox->vport = phba->pport;
   7086	mbox->mbox_cmpl = lpfc_sli4_ras_mbox_cmpl;
   7087
   7088	rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
   7089
   7090	if (rc == MBX_NOT_FINISHED) {
   7091		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
   7092				"6191 FW-Log Mailbox failed. "
   7093				"status %d mbxStatus : x%x", rc,
   7094				bf_get(lpfc_mqe_status, &mbox->u.mqe));
   7095		mempool_free(mbox, phba->mbox_mem_pool);
   7096		rc = -EIO;
   7097		goto mem_free;
   7098	} else
   7099		rc = 0;
   7100mem_free:
   7101	if (rc)
   7102		lpfc_sli4_ras_dma_free(phba);
   7103
   7104	return rc;
   7105}
   7106
   7107/**
   7108 * lpfc_sli4_ras_setup - Check if RAS supported on the adapter
   7109 * @phba: Pointer to HBA context object.
   7110 *
   7111 * Check if RAS is supported on the adapter and initialize it.
   7112 **/
   7113void
   7114lpfc_sli4_ras_setup(struct lpfc_hba *phba)
   7115{
   7116	/* Check RAS FW Log needs to be enabled or not */
   7117	if (lpfc_check_fwlog_support(phba))
   7118		return;
   7119
   7120	lpfc_sli4_ras_fwlog_init(phba, phba->cfg_ras_fwlog_level,
   7121				 LPFC_RAS_ENABLE_LOGGING);
   7122}
   7123
   7124/**
   7125 * lpfc_sli4_alloc_resource_identifiers - Allocate all SLI4 resource extents.
   7126 * @phba: Pointer to HBA context object.
   7127 *
   7128 * This function allocates all SLI4 resource identifiers.
   7129 **/
   7130int
   7131lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *phba)
   7132{
   7133	int i, rc, error = 0;
   7134	uint16_t count, base;
   7135	unsigned long longs;
   7136
   7137	if (!phba->sli4_hba.rpi_hdrs_in_use)
   7138		phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.max_rpi;
   7139	if (phba->sli4_hba.extents_in_use) {
   7140		/*
   7141		 * The port supports resource extents. The XRI, VPI, VFI, RPI
   7142		 * resource extent count must be read and allocated before
   7143		 * provisioning the resource id arrays.
   7144		 */
   7145		if (bf_get(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags) ==
   7146		    LPFC_IDX_RSRC_RDY) {
   7147			/*
   7148			 * Extent-based resources are set - the driver could
   7149			 * be in a port reset. Figure out if any corrective
   7150			 * actions need to be taken.
   7151			 */
   7152			rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
   7153						 LPFC_RSC_TYPE_FCOE_VFI);
   7154			if (rc != 0)
   7155				error++;
   7156			rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
   7157						 LPFC_RSC_TYPE_FCOE_VPI);
   7158			if (rc != 0)
   7159				error++;
   7160			rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
   7161						 LPFC_RSC_TYPE_FCOE_XRI);
   7162			if (rc != 0)
   7163				error++;
   7164			rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
   7165						 LPFC_RSC_TYPE_FCOE_RPI);
   7166			if (rc != 0)
   7167				error++;
   7168
   7169			/*
   7170			 * It's possible that the number of resources
   7171			 * provided to this port instance changed between
   7172			 * resets.  Detect this condition and reallocate
   7173			 * resources.  Otherwise, there is no action.
   7174			 */
   7175			if (error) {
   7176				lpfc_printf_log(phba, KERN_INFO,
   7177						LOG_MBOX | LOG_INIT,
   7178						"2931 Detected extent resource "
   7179						"change.  Reallocating all "
   7180						"extents.\n");
   7181				rc = lpfc_sli4_dealloc_extent(phba,
   7182						 LPFC_RSC_TYPE_FCOE_VFI);
   7183				rc = lpfc_sli4_dealloc_extent(phba,
   7184						 LPFC_RSC_TYPE_FCOE_VPI);
   7185				rc = lpfc_sli4_dealloc_extent(phba,
   7186						 LPFC_RSC_TYPE_FCOE_XRI);
   7187				rc = lpfc_sli4_dealloc_extent(phba,
   7188						 LPFC_RSC_TYPE_FCOE_RPI);
   7189			} else
   7190				return 0;
   7191		}
   7192
   7193		rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI);
   7194		if (unlikely(rc))
   7195			goto err_exit;
   7196
   7197		rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_VPI);
   7198		if (unlikely(rc))
   7199			goto err_exit;
   7200
   7201		rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_RPI);
   7202		if (unlikely(rc))
   7203			goto err_exit;
   7204
   7205		rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_XRI);
   7206		if (unlikely(rc))
   7207			goto err_exit;
   7208		bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags,
   7209		       LPFC_IDX_RSRC_RDY);
   7210		return rc;
   7211	} else {
   7212		/*
   7213		 * The port does not support resource extents.  The XRI, VPI,
   7214		 * VFI, RPI resource ids were determined from READ_CONFIG.
   7215		 * Just allocate the bitmasks and provision the resource id
   7216		 * arrays.  If a port reset is active, the resources don't
   7217		 * need any action - just exit.
   7218		 */
   7219		if (bf_get(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags) ==
   7220		    LPFC_IDX_RSRC_RDY) {
   7221			lpfc_sli4_dealloc_resource_identifiers(phba);
   7222			lpfc_sli4_remove_rpis(phba);
   7223		}
   7224		/* RPIs. */
   7225		count = phba->sli4_hba.max_cfg_param.max_rpi;
   7226		if (count <= 0) {
   7227			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
   7228					"3279 Invalid provisioning of "
   7229					"rpi:%d\n", count);
   7230			rc = -EINVAL;
   7231			goto err_exit;
   7232		}
   7233		base = phba->sli4_hba.max_cfg_param.rpi_base;
   7234		longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
   7235		phba->sli4_hba.rpi_bmask = kcalloc(longs,
   7236						   sizeof(unsigned long),
   7237						   GFP_KERNEL);
   7238		if (unlikely(!phba->sli4_hba.rpi_bmask)) {
   7239			rc = -ENOMEM;
   7240			goto err_exit;
   7241		}
   7242		phba->sli4_hba.rpi_ids = kcalloc(count, sizeof(uint16_t),
   7243						 GFP_KERNEL);
   7244		if (unlikely(!phba->sli4_hba.rpi_ids)) {
   7245			rc = -ENOMEM;
   7246			goto free_rpi_bmask;
   7247		}
   7248
   7249		for (i = 0; i < count; i++)
   7250			phba->sli4_hba.rpi_ids[i] = base + i;
   7251
   7252		/* VPIs. */
   7253		count = phba->sli4_hba.max_cfg_param.max_vpi;
   7254		if (count <= 0) {
   7255			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
   7256					"3280 Invalid provisioning of "
   7257					"vpi:%d\n", count);
   7258			rc = -EINVAL;
   7259			goto free_rpi_ids;
   7260		}
   7261		base = phba->sli4_hba.max_cfg_param.vpi_base;
   7262		longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
   7263		phba->vpi_bmask = kcalloc(longs, sizeof(unsigned long),
   7264					  GFP_KERNEL);
   7265		if (unlikely(!phba->vpi_bmask)) {
   7266			rc = -ENOMEM;
   7267			goto free_rpi_ids;
   7268		}
   7269		phba->vpi_ids = kcalloc(count, sizeof(uint16_t),
   7270					GFP_KERNEL);
   7271		if (unlikely(!phba->vpi_ids)) {
   7272			rc = -ENOMEM;
   7273			goto free_vpi_bmask;
   7274		}
   7275
   7276		for (i = 0; i < count; i++)
   7277			phba->vpi_ids[i] = base + i;
   7278
   7279		/* XRIs. */
   7280		count = phba->sli4_hba.max_cfg_param.max_xri;
   7281		if (count <= 0) {
   7282			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
   7283					"3281 Invalid provisioning of "
   7284					"xri:%d\n", count);
   7285			rc = -EINVAL;
   7286			goto free_vpi_ids;
   7287		}
   7288		base = phba->sli4_hba.max_cfg_param.xri_base;
   7289		longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
   7290		phba->sli4_hba.xri_bmask = kcalloc(longs,
   7291						   sizeof(unsigned long),
   7292						   GFP_KERNEL);
   7293		if (unlikely(!phba->sli4_hba.xri_bmask)) {
   7294			rc = -ENOMEM;
   7295			goto free_vpi_ids;
   7296		}
   7297		phba->sli4_hba.max_cfg_param.xri_used = 0;
   7298		phba->sli4_hba.xri_ids = kcalloc(count, sizeof(uint16_t),
   7299						 GFP_KERNEL);
   7300		if (unlikely(!phba->sli4_hba.xri_ids)) {
   7301			rc = -ENOMEM;
   7302			goto free_xri_bmask;
   7303		}
   7304
   7305		for (i = 0; i < count; i++)
   7306			phba->sli4_hba.xri_ids[i] = base + i;
   7307
   7308		/* VFIs. */
   7309		count = phba->sli4_hba.max_cfg_param.max_vfi;
   7310		if (count <= 0) {
   7311			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
   7312					"3282 Invalid provisioning of "
   7313					"vfi:%d\n", count);
   7314			rc = -EINVAL;
   7315			goto free_xri_ids;
   7316		}
   7317		base = phba->sli4_hba.max_cfg_param.vfi_base;
   7318		longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
   7319		phba->sli4_hba.vfi_bmask = kcalloc(longs,
   7320						   sizeof(unsigned long),
   7321						   GFP_KERNEL);
   7322		if (unlikely(!phba->sli4_hba.vfi_bmask)) {
   7323			rc = -ENOMEM;
   7324			goto free_xri_ids;
   7325		}
   7326		phba->sli4_hba.vfi_ids = kcalloc(count, sizeof(uint16_t),
   7327						 GFP_KERNEL);
   7328		if (unlikely(!phba->sli4_hba.vfi_ids)) {
   7329			rc = -ENOMEM;
   7330			goto free_vfi_bmask;
   7331		}
   7332
   7333		for (i = 0; i < count; i++)
   7334			phba->sli4_hba.vfi_ids[i] = base + i;
   7335
   7336		/*
   7337		 * Mark all resources ready.  An HBA reset doesn't need
   7338		 * to reset the initialization.
   7339		 */
   7340		bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags,
   7341		       LPFC_IDX_RSRC_RDY);
   7342		return 0;
   7343	}
   7344
   7345 free_vfi_bmask:
   7346	kfree(phba->sli4_hba.vfi_bmask);
   7347	phba->sli4_hba.vfi_bmask = NULL;
   7348 free_xri_ids:
   7349	kfree(phba->sli4_hba.xri_ids);
   7350	phba->sli4_hba.xri_ids = NULL;
   7351 free_xri_bmask:
   7352	kfree(phba->sli4_hba.xri_bmask);
   7353	phba->sli4_hba.xri_bmask = NULL;
   7354 free_vpi_ids:
   7355	kfree(phba->vpi_ids);
   7356	phba->vpi_ids = NULL;
   7357 free_vpi_bmask:
   7358	kfree(phba->vpi_bmask);
   7359	phba->vpi_bmask = NULL;
   7360 free_rpi_ids:
   7361	kfree(phba->sli4_hba.rpi_ids);
   7362	phba->sli4_hba.rpi_ids = NULL;
   7363 free_rpi_bmask:
   7364	kfree(phba->sli4_hba.rpi_bmask);
   7365	phba->sli4_hba.rpi_bmask = NULL;
   7366 err_exit:
   7367	return rc;
   7368}
   7369
   7370/**
   7371 * lpfc_sli4_dealloc_resource_identifiers - Deallocate all SLI4 resource extents.
   7372 * @phba: Pointer to HBA context object.
   7373 *
   7374 * This function allocates the number of elements for the specified
   7375 * resource type.
   7376 **/
   7377int
   7378lpfc_sli4_dealloc_resource_identifiers(struct lpfc_hba *phba)
   7379{
   7380	if (phba->sli4_hba.extents_in_use) {
   7381		lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VPI);
   7382		lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_RPI);
   7383		lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_XRI);
   7384		lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI);
   7385	} else {
   7386		kfree(phba->vpi_bmask);
   7387		phba->sli4_hba.max_cfg_param.vpi_used = 0;
   7388		kfree(phba->vpi_ids);
   7389		bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
   7390		kfree(phba->sli4_hba.xri_bmask);
   7391		kfree(phba->sli4_hba.xri_ids);
   7392		kfree(phba->sli4_hba.vfi_bmask);
   7393		kfree(phba->sli4_hba.vfi_ids);
   7394		bf_set(lpfc_vfi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
   7395		bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
   7396	}
   7397
   7398	return 0;
   7399}
   7400
   7401/**
   7402 * lpfc_sli4_get_allocated_extnts - Get the port's allocated extents.
   7403 * @phba: Pointer to HBA context object.
   7404 * @type: The resource extent type.
   7405 * @extnt_cnt: buffer to hold port extent count response
   7406 * @extnt_size: buffer to hold port extent size response.
   7407 *
   7408 * This function calls the port to read the host allocated extents
   7409 * for a particular type.
   7410 **/
   7411int
   7412lpfc_sli4_get_allocated_extnts(struct lpfc_hba *phba, uint16_t type,
   7413			       uint16_t *extnt_cnt, uint16_t *extnt_size)
   7414{
   7415	bool emb;
   7416	int rc = 0;
   7417	uint16_t curr_blks = 0;
   7418	uint32_t req_len, emb_len;
   7419	uint32_t alloc_len, mbox_tmo;
   7420	struct list_head *blk_list_head;
   7421	struct lpfc_rsrc_blks *rsrc_blk;
   7422	LPFC_MBOXQ_t *mbox;
   7423	void *virtaddr = NULL;
   7424	struct lpfc_mbx_nembed_rsrc_extent *n_rsrc;
   7425	struct lpfc_mbx_alloc_rsrc_extents *rsrc_ext;
   7426	union  lpfc_sli4_cfg_shdr *shdr;
   7427
   7428	switch (type) {
   7429	case LPFC_RSC_TYPE_FCOE_VPI:
   7430		blk_list_head = &phba->lpfc_vpi_blk_list;
   7431		break;
   7432	case LPFC_RSC_TYPE_FCOE_XRI:
   7433		blk_list_head = &phba->sli4_hba.lpfc_xri_blk_list;
   7434		break;
   7435	case LPFC_RSC_TYPE_FCOE_VFI:
   7436		blk_list_head = &phba->sli4_hba.lpfc_vfi_blk_list;
   7437		break;
   7438	case LPFC_RSC_TYPE_FCOE_RPI:
   7439		blk_list_head = &phba->sli4_hba.lpfc_rpi_blk_list;
   7440		break;
   7441	default:
   7442		return -EIO;
   7443	}
   7444
   7445	/* Count the number of extents currently allocatd for this type. */
   7446	list_for_each_entry(rsrc_blk, blk_list_head, list) {
   7447		if (curr_blks == 0) {
   7448			/*
   7449			 * The GET_ALLOCATED mailbox does not return the size,
   7450			 * just the count.  The size should be just the size
   7451			 * stored in the current allocated block and all sizes
   7452			 * for an extent type are the same so set the return
   7453			 * value now.
   7454			 */
   7455			*extnt_size = rsrc_blk->rsrc_size;
   7456		}
   7457		curr_blks++;
   7458	}
   7459
   7460	/*
   7461	 * Calculate the size of an embedded mailbox.  The uint32_t
   7462	 * accounts for extents-specific word.
   7463	 */
   7464	emb_len = sizeof(MAILBOX_t) - sizeof(struct mbox_header) -
   7465		sizeof(uint32_t);
   7466
   7467	/*
   7468	 * Presume the allocation and response will fit into an embedded
   7469	 * mailbox.  If not true, reconfigure to a non-embedded mailbox.
   7470	 */
   7471	emb = LPFC_SLI4_MBX_EMBED;
   7472	req_len = emb_len;
   7473	if (req_len > emb_len) {
   7474		req_len = curr_blks * sizeof(uint16_t) +
   7475			sizeof(union lpfc_sli4_cfg_shdr) +
   7476			sizeof(uint32_t);
   7477		emb = LPFC_SLI4_MBX_NEMBED;
   7478	}
   7479
   7480	mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
   7481	if (!mbox)
   7482		return -ENOMEM;
   7483	memset(mbox, 0, sizeof(LPFC_MBOXQ_t));
   7484
   7485	alloc_len = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
   7486				     LPFC_MBOX_OPCODE_GET_ALLOC_RSRC_EXTENT,
   7487				     req_len, emb);
   7488	if (alloc_len < req_len) {
   7489		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
   7490			"2983 Allocated DMA memory size (x%x) is "
   7491			"less than the requested DMA memory "
   7492			"size (x%x)\n", alloc_len, req_len);
   7493		rc = -ENOMEM;
   7494		goto err_exit;
   7495	}
   7496	rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, curr_blks, type, emb);
   7497	if (unlikely(rc)) {
   7498		rc = -EIO;
   7499		goto err_exit;
   7500	}
   7501
   7502	if (!phba->sli4_hba.intr_enable)
   7503		rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
   7504	else {
   7505		mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
   7506		rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
   7507	}
   7508
   7509	if (unlikely(rc)) {
   7510		rc = -EIO;
   7511		goto err_exit;
   7512	}
   7513
   7514	/*
   7515	 * Figure out where the response is located.  Then get local pointers
   7516	 * to the response data.  The port does not guarantee to respond to
   7517	 * all extents counts request so update the local variable with the
   7518	 * allocated count from the port.
   7519	 */
   7520	if (emb == LPFC_SLI4_MBX_EMBED) {
   7521		rsrc_ext = &mbox->u.mqe.un.alloc_rsrc_extents;
   7522		shdr = &rsrc_ext->header.cfg_shdr;
   7523		*extnt_cnt = bf_get(lpfc_mbx_rsrc_cnt, &rsrc_ext->u.rsp);
   7524	} else {
   7525		virtaddr = mbox->sge_array->addr[0];
   7526		n_rsrc = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr;
   7527		shdr = &n_rsrc->cfg_shdr;
   7528		*extnt_cnt = bf_get(lpfc_mbx_rsrc_cnt, n_rsrc);
   7529	}
   7530
   7531	if (bf_get(lpfc_mbox_hdr_status, &shdr->response)) {
   7532		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
   7533			"2984 Failed to read allocated resources "
   7534			"for type %d - Status 0x%x Add'l Status 0x%x.\n",
   7535			type,
   7536			bf_get(lpfc_mbox_hdr_status, &shdr->response),
   7537			bf_get(lpfc_mbox_hdr_add_status, &shdr->response));
   7538		rc = -EIO;
   7539		goto err_exit;
   7540	}
   7541 err_exit:
   7542	lpfc_sli4_mbox_cmd_free(phba, mbox);
   7543	return rc;
   7544}
   7545
   7546/**
   7547 * lpfc_sli4_repost_sgl_list - Repost the buffers sgl pages as block
   7548 * @phba: pointer to lpfc hba data structure.
   7549 * @sgl_list: linked link of sgl buffers to post
   7550 * @cnt: number of linked list buffers
   7551 *
   7552 * This routine walks the list of buffers that have been allocated and
   7553 * repost them to the port by using SGL block post. This is needed after a
   7554 * pci_function_reset/warm_start or start. It attempts to construct blocks
   7555 * of buffer sgls which contains contiguous xris and uses the non-embedded
   7556 * SGL block post mailbox commands to post them to the port. For single
   7557 * buffer sgl with non-contiguous xri, if any, it shall use embedded SGL post
   7558 * mailbox command for posting.
   7559 *
   7560 * Returns: 0 = success, non-zero failure.
   7561 **/
   7562static int
   7563lpfc_sli4_repost_sgl_list(struct lpfc_hba *phba,
   7564			  struct list_head *sgl_list, int cnt)
   7565{
   7566	struct lpfc_sglq *sglq_entry = NULL;
   7567	struct lpfc_sglq *sglq_entry_next = NULL;
   7568	struct lpfc_sglq *sglq_entry_first = NULL;
   7569	int status, total_cnt;
   7570	int post_cnt = 0, num_posted = 0, block_cnt = 0;
   7571	int last_xritag = NO_XRI;
   7572	LIST_HEAD(prep_sgl_list);
   7573	LIST_HEAD(blck_sgl_list);
   7574	LIST_HEAD(allc_sgl_list);
   7575	LIST_HEAD(post_sgl_list);
   7576	LIST_HEAD(free_sgl_list);
   7577
   7578	spin_lock_irq(&phba->hbalock);
   7579	spin_lock(&phba->sli4_hba.sgl_list_lock);
   7580	list_splice_init(sgl_list, &allc_sgl_list);
   7581	spin_unlock(&phba->sli4_hba.sgl_list_lock);
   7582	spin_unlock_irq(&phba->hbalock);
   7583
   7584	total_cnt = cnt;
   7585	list_for_each_entry_safe(sglq_entry, sglq_entry_next,
   7586				 &allc_sgl_list, list) {
   7587		list_del_init(&sglq_entry->list);
   7588		block_cnt++;
   7589		if ((last_xritag != NO_XRI) &&
   7590		    (sglq_entry->sli4_xritag != last_xritag + 1)) {
   7591			/* a hole in xri block, form a sgl posting block */
   7592			list_splice_init(&prep_sgl_list, &blck_sgl_list);
   7593			post_cnt = block_cnt - 1;
   7594			/* prepare list for next posting block */
   7595			list_add_tail(&sglq_entry->list, &prep_sgl_list);
   7596			block_cnt = 1;
   7597		} else {
   7598			/* prepare list for next posting block */
   7599			list_add_tail(&sglq_entry->list, &prep_sgl_list);
   7600			/* enough sgls for non-embed sgl mbox command */
   7601			if (block_cnt == LPFC_NEMBED_MBOX_SGL_CNT) {
   7602				list_splice_init(&prep_sgl_list,
   7603						 &blck_sgl_list);
   7604				post_cnt = block_cnt;
   7605				block_cnt = 0;
   7606			}
   7607		}
   7608		num_posted++;
   7609
   7610		/* keep track of last sgl's xritag */
   7611		last_xritag = sglq_entry->sli4_xritag;
   7612
   7613		/* end of repost sgl list condition for buffers */
   7614		if (num_posted == total_cnt) {
   7615			if (post_cnt == 0) {
   7616				list_splice_init(&prep_sgl_list,
   7617						 &blck_sgl_list);
   7618				post_cnt = block_cnt;
   7619			} else if (block_cnt == 1) {
   7620				status = lpfc_sli4_post_sgl(phba,
   7621						sglq_entry->phys, 0,
   7622						sglq_entry->sli4_xritag);
   7623				if (!status) {
   7624					/* successful, put sgl to posted list */
   7625					list_add_tail(&sglq_entry->list,
   7626						      &post_sgl_list);
   7627				} else {
   7628					/* Failure, put sgl to free list */
   7629					lpfc_printf_log(phba, KERN_WARNING,
   7630						LOG_SLI,
   7631						"3159 Failed to post "
   7632						"sgl, xritag:x%x\n",
   7633						sglq_entry->sli4_xritag);
   7634					list_add_tail(&sglq_entry->list,
   7635						      &free_sgl_list);
   7636					total_cnt--;
   7637				}
   7638			}
   7639		}
   7640
   7641		/* continue until a nembed page worth of sgls */
   7642		if (post_cnt == 0)
   7643			continue;
   7644
   7645		/* post the buffer list sgls as a block */
   7646		status = lpfc_sli4_post_sgl_list(phba, &blck_sgl_list,
   7647						 post_cnt);
   7648
   7649		if (!status) {
   7650			/* success, put sgl list to posted sgl list */
   7651			list_splice_init(&blck_sgl_list, &post_sgl_list);
   7652		} else {
   7653			/* Failure, put sgl list to free sgl list */
   7654			sglq_entry_first = list_first_entry(&blck_sgl_list,
   7655							    struct lpfc_sglq,
   7656							    list);
   7657			lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
   7658					"3160 Failed to post sgl-list, "
   7659					"xritag:x%x-x%x\n",
   7660					sglq_entry_first->sli4_xritag,
   7661					(sglq_entry_first->sli4_xritag +
   7662					 post_cnt - 1));
   7663			list_splice_init(&blck_sgl_list, &free_sgl_list);
   7664			total_cnt -= post_cnt;
   7665		}
   7666
   7667		/* don't reset xirtag due to hole in xri block */
   7668		if (block_cnt == 0)
   7669			last_xritag = NO_XRI;
   7670
   7671		/* reset sgl post count for next round of posting */
   7672		post_cnt = 0;
   7673	}
   7674
   7675	/* free the sgls failed to post */
   7676	lpfc_free_sgl_list(phba, &free_sgl_list);
   7677
   7678	/* push sgls posted to the available list */
   7679	if (!list_empty(&post_sgl_list)) {
   7680		spin_lock_irq(&phba->hbalock);
   7681		spin_lock(&phba->sli4_hba.sgl_list_lock);
   7682		list_splice_init(&post_sgl_list, sgl_list);
   7683		spin_unlock(&phba->sli4_hba.sgl_list_lock);
   7684		spin_unlock_irq(&phba->hbalock);
   7685	} else {
   7686		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
   7687				"3161 Failure to post sgl to port.\n");
   7688		return -EIO;
   7689	}
   7690
   7691	/* return the number of XRIs actually posted */
   7692	return total_cnt;
   7693}
   7694
   7695/**
   7696 * lpfc_sli4_repost_io_sgl_list - Repost all the allocated nvme buffer sgls
   7697 * @phba: pointer to lpfc hba data structure.
   7698 *
   7699 * This routine walks the list of nvme buffers that have been allocated and
   7700 * repost them to the port by using SGL block post. This is needed after a
   7701 * pci_function_reset/warm_start or start. The lpfc_hba_down_post_s4 routine
   7702 * is responsible for moving all nvme buffers on the lpfc_abts_nvme_sgl_list
   7703 * to the lpfc_io_buf_list. If the repost fails, reject all nvme buffers.
   7704 *
   7705 * Returns: 0 = success, non-zero failure.
   7706 **/
   7707static int
   7708lpfc_sli4_repost_io_sgl_list(struct lpfc_hba *phba)
   7709{
   7710	LIST_HEAD(post_nblist);
   7711	int num_posted, rc = 0;
   7712
   7713	/* get all NVME buffers need to repost to a local list */
   7714	lpfc_io_buf_flush(phba, &post_nblist);
   7715
   7716	/* post the list of nvme buffer sgls to port if available */
   7717	if (!list_empty(&post_nblist)) {
   7718		num_posted = lpfc_sli4_post_io_sgl_list(
   7719			phba, &post_nblist, phba->sli4_hba.io_xri_cnt);
   7720		/* failed to post any nvme buffer, return error */
   7721		if (num_posted == 0)
   7722			rc = -EIO;
   7723	}
   7724	return rc;
   7725}
   7726
   7727static void
   7728lpfc_set_host_data(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
   7729{
   7730	uint32_t len;
   7731
   7732	len = sizeof(struct lpfc_mbx_set_host_data) -
   7733		sizeof(struct lpfc_sli4_cfg_mhdr);
   7734	lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
   7735			 LPFC_MBOX_OPCODE_SET_HOST_DATA, len,
   7736			 LPFC_SLI4_MBX_EMBED);
   7737
   7738	mbox->u.mqe.un.set_host_data.param_id = LPFC_SET_HOST_OS_DRIVER_VERSION;
   7739	mbox->u.mqe.un.set_host_data.param_len =
   7740					LPFC_HOST_OS_DRIVER_VERSION_SIZE;
   7741	snprintf(mbox->u.mqe.un.set_host_data.un.data,
   7742		 LPFC_HOST_OS_DRIVER_VERSION_SIZE,
   7743		 "Linux %s v"LPFC_DRIVER_VERSION,
   7744		 (phba->hba_flag & HBA_FCOE_MODE) ? "FCoE" : "FC");
   7745}
   7746
   7747int
   7748lpfc_post_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *hrq,
   7749		    struct lpfc_queue *drq, int count, int idx)
   7750{
   7751	int rc, i;
   7752	struct lpfc_rqe hrqe;
   7753	struct lpfc_rqe drqe;
   7754	struct lpfc_rqb *rqbp;
   7755	unsigned long flags;
   7756	struct rqb_dmabuf *rqb_buffer;
   7757	LIST_HEAD(rqb_buf_list);
   7758
   7759	rqbp = hrq->rqbp;
   7760	for (i = 0; i < count; i++) {
   7761		spin_lock_irqsave(&phba->hbalock, flags);
   7762		/* IF RQ is already full, don't bother */
   7763		if (rqbp->buffer_count + i >= rqbp->entry_count - 1) {
   7764			spin_unlock_irqrestore(&phba->hbalock, flags);
   7765			break;
   7766		}
   7767		spin_unlock_irqrestore(&phba->hbalock, flags);
   7768
   7769		rqb_buffer = rqbp->rqb_alloc_buffer(phba);
   7770		if (!rqb_buffer)
   7771			break;
   7772		rqb_buffer->hrq = hrq;
   7773		rqb_buffer->drq = drq;
   7774		rqb_buffer->idx = idx;
   7775		list_add_tail(&rqb_buffer->hbuf.list, &rqb_buf_list);
   7776	}
   7777
   7778	spin_lock_irqsave(&phba->hbalock, flags);
   7779	while (!list_empty(&rqb_buf_list)) {
   7780		list_remove_head(&rqb_buf_list, rqb_buffer, struct rqb_dmabuf,
   7781				 hbuf.list);
   7782
   7783		hrqe.address_lo = putPaddrLow(rqb_buffer->hbuf.phys);
   7784		hrqe.address_hi = putPaddrHigh(rqb_buffer->hbuf.phys);
   7785		drqe.address_lo = putPaddrLow(rqb_buffer->dbuf.phys);
   7786		drqe.address_hi = putPaddrHigh(rqb_buffer->dbuf.phys);
   7787		rc = lpfc_sli4_rq_put(hrq, drq, &hrqe, &drqe);
   7788		if (rc < 0) {
   7789			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
   7790					"6421 Cannot post to HRQ %d: %x %x %x "
   7791					"DRQ %x %x\n",
   7792					hrq->queue_id,
   7793					hrq->host_index,
   7794					hrq->hba_index,
   7795					hrq->entry_count,
   7796					drq->host_index,
   7797					drq->hba_index);
   7798			rqbp->rqb_free_buffer(phba, rqb_buffer);
   7799		} else {
   7800			list_add_tail(&rqb_buffer->hbuf.list,
   7801				      &rqbp->rqb_buffer_list);
   7802			rqbp->buffer_count++;
   7803		}
   7804	}
   7805	spin_unlock_irqrestore(&phba->hbalock, flags);
   7806	return 1;
   7807}
   7808
   7809static void
   7810lpfc_mbx_cmpl_cgn_set_ftrs(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
   7811{
   7812	struct lpfc_vport *vport = pmb->vport;
   7813	union lpfc_sli4_cfg_shdr *shdr;
   7814	u32 shdr_status, shdr_add_status;
   7815	u32 sig, acqe;
   7816
   7817	/* Two outcomes. (1) Set featurs was successul and EDC negotiation
   7818	 * is done. (2) Mailbox failed and send FPIN support only.
   7819	 */
   7820	shdr = (union lpfc_sli4_cfg_shdr *)
   7821		&pmb->u.mqe.un.sli4_config.header.cfg_shdr;
   7822	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
   7823	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
   7824	if (shdr_status || shdr_add_status || pmb->u.mb.mbxStatus) {
   7825		lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_CGN_MGMT,
   7826				"2516 CGN SET_FEATURE mbox failed with "
   7827				"status x%x add_status x%x, mbx status x%x "
   7828				"Reset Congestion to FPINs only\n",
   7829				shdr_status, shdr_add_status,
   7830				pmb->u.mb.mbxStatus);
   7831		/* If there is a mbox error, move on to RDF */
   7832		phba->cgn_reg_signal = EDC_CG_SIG_NOTSUPPORTED;
   7833		phba->cgn_reg_fpin = LPFC_CGN_FPIN_WARN | LPFC_CGN_FPIN_ALARM;
   7834		goto out;
   7835	}
   7836
   7837	/* Zero out Congestion Signal ACQE counter */
   7838	phba->cgn_acqe_cnt = 0;
   7839
   7840	acqe = bf_get(lpfc_mbx_set_feature_CGN_acqe_freq,
   7841		      &pmb->u.mqe.un.set_feature);
   7842	sig = bf_get(lpfc_mbx_set_feature_CGN_warn_freq,
   7843		     &pmb->u.mqe.un.set_feature);
   7844	lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
   7845			"4620 SET_FEATURES Success: Freq: %ds %dms "
   7846			" Reg: x%x x%x\n", acqe, sig,
   7847			phba->cgn_reg_signal, phba->cgn_reg_fpin);
   7848out:
   7849	mempool_free(pmb, phba->mbox_mem_pool);
   7850
   7851	/* Register for FPIN events from the fabric now that the
   7852	 * EDC common_set_features has completed.
   7853	 */
   7854	lpfc_issue_els_rdf(vport, 0);
   7855}
   7856
   7857int
   7858lpfc_config_cgn_signal(struct lpfc_hba *phba)
   7859{
   7860	LPFC_MBOXQ_t *mboxq;
   7861	u32 rc;
   7862
   7863	mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
   7864	if (!mboxq)
   7865		goto out_rdf;
   7866
   7867	lpfc_set_features(phba, mboxq, LPFC_SET_CGN_SIGNAL);
   7868	mboxq->vport = phba->pport;
   7869	mboxq->mbox_cmpl = lpfc_mbx_cmpl_cgn_set_ftrs;
   7870
   7871	lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
   7872			"4621 SET_FEATURES: FREQ sig x%x acqe x%x: "
   7873			"Reg: x%x x%x\n",
   7874			phba->cgn_sig_freq, lpfc_acqe_cgn_frequency,
   7875			phba->cgn_reg_signal, phba->cgn_reg_fpin);
   7876
   7877	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
   7878	if (rc == MBX_NOT_FINISHED)
   7879		goto out;
   7880	return 0;
   7881
   7882out:
   7883	mempool_free(mboxq, phba->mbox_mem_pool);
   7884out_rdf:
   7885	/* If there is a mbox error, move on to RDF */
   7886	phba->cgn_reg_fpin = LPFC_CGN_FPIN_WARN | LPFC_CGN_FPIN_ALARM;
   7887	phba->cgn_reg_signal = EDC_CG_SIG_NOTSUPPORTED;
   7888	lpfc_issue_els_rdf(phba->pport, 0);
   7889	return -EIO;
   7890}
   7891
   7892/**
   7893 * lpfc_init_idle_stat_hb - Initialize idle_stat tracking
   7894 * @phba: pointer to lpfc hba data structure.
   7895 *
   7896 * This routine initializes the per-cq idle_stat to dynamically dictate
   7897 * polling decisions.
   7898 *
   7899 * Return codes:
   7900 *   None
   7901 **/
   7902static void lpfc_init_idle_stat_hb(struct lpfc_hba *phba)
   7903{
   7904	int i;
   7905	struct lpfc_sli4_hdw_queue *hdwq;
   7906	struct lpfc_queue *cq;
   7907	struct lpfc_idle_stat *idle_stat;
   7908	u64 wall;
   7909
   7910	for_each_present_cpu(i) {
   7911		hdwq = &phba->sli4_hba.hdwq[phba->sli4_hba.cpu_map[i].hdwq];
   7912		cq = hdwq->io_cq;
   7913
   7914		/* Skip if we've already handled this cq's primary CPU */
   7915		if (cq->chann != i)
   7916			continue;
   7917
   7918		idle_stat = &phba->sli4_hba.idle_stat[i];
   7919
   7920		idle_stat->prev_idle = get_cpu_idle_time(i, &wall, 1);
   7921		idle_stat->prev_wall = wall;
   7922
   7923		if (phba->nvmet_support ||
   7924		    phba->cmf_active_mode != LPFC_CFG_OFF)
   7925			cq->poll_mode = LPFC_QUEUE_WORK;
   7926		else
   7927			cq->poll_mode = LPFC_IRQ_POLL;
   7928	}
   7929
   7930	if (!phba->nvmet_support)
   7931		schedule_delayed_work(&phba->idle_stat_delay_work,
   7932				      msecs_to_jiffies(LPFC_IDLE_STAT_DELAY));
   7933}
   7934
   7935static void lpfc_sli4_dip(struct lpfc_hba *phba)
   7936{
   7937	uint32_t if_type;
   7938
   7939	if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
   7940	if (if_type == LPFC_SLI_INTF_IF_TYPE_2 ||
   7941	    if_type == LPFC_SLI_INTF_IF_TYPE_6) {
   7942		struct lpfc_register reg_data;
   7943
   7944		if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
   7945			       &reg_data.word0))
   7946			return;
   7947
   7948		if (bf_get(lpfc_sliport_status_dip, &reg_data))
   7949			lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
   7950					"2904 Firmware Dump Image Present"
   7951					" on Adapter");
   7952	}
   7953}
   7954
   7955/**
   7956 * lpfc_cmf_setup - Initialize idle_stat tracking
   7957 * @phba: Pointer to HBA context object.
   7958 *
   7959 * This is called from HBA setup during driver load or when the HBA
   7960 * comes online. this does all the initialization to support CMF and MI.
   7961 **/
   7962static int
   7963lpfc_cmf_setup(struct lpfc_hba *phba)
   7964{
   7965	LPFC_MBOXQ_t *mboxq;
   7966	struct lpfc_dmabuf *mp;
   7967	struct lpfc_pc_sli4_params *sli4_params;
   7968	int rc, cmf, mi_ver;
   7969
   7970	rc = lpfc_sli4_refresh_params(phba);
   7971	if (unlikely(rc))
   7972		return rc;
   7973
   7974	mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
   7975	if (!mboxq)
   7976		return -ENOMEM;
   7977
   7978	sli4_params = &phba->sli4_hba.pc_sli4_params;
   7979
   7980	/* Always try to enable MI feature if we can */
   7981	if (sli4_params->mi_ver) {
   7982		lpfc_set_features(phba, mboxq, LPFC_SET_ENABLE_MI);
   7983		rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
   7984		mi_ver = bf_get(lpfc_mbx_set_feature_mi,
   7985				 &mboxq->u.mqe.un.set_feature);
   7986
   7987		if (rc == MBX_SUCCESS) {
   7988			if (mi_ver) {
   7989				lpfc_printf_log(phba,
   7990						KERN_WARNING, LOG_CGN_MGMT,
   7991						"6215 MI is enabled\n");
   7992				sli4_params->mi_ver = mi_ver;
   7993			} else {
   7994				lpfc_printf_log(phba,
   7995						KERN_WARNING, LOG_CGN_MGMT,
   7996						"6338 MI is disabled\n");
   7997				sli4_params->mi_ver = 0;
   7998			}
   7999		} else {
   8000			/* mi_ver is already set from GET_SLI4_PARAMETERS */
   8001			lpfc_printf_log(phba, KERN_INFO,
   8002					LOG_CGN_MGMT | LOG_INIT,
   8003					"6245 Enable MI Mailbox x%x (x%x/x%x) "
   8004					"failed, rc:x%x mi:x%x\n",
   8005					bf_get(lpfc_mqe_command, &mboxq->u.mqe),
   8006					lpfc_sli_config_mbox_subsys_get
   8007						(phba, mboxq),
   8008					lpfc_sli_config_mbox_opcode_get
   8009						(phba, mboxq),
   8010					rc, sli4_params->mi_ver);
   8011		}
   8012	} else {
   8013		lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT,
   8014				"6217 MI is disabled\n");
   8015	}
   8016
   8017	/* Ensure FDMI is enabled for MI if enable_mi is set */
   8018	if (sli4_params->mi_ver)
   8019		phba->cfg_fdmi_on = LPFC_FDMI_SUPPORT;
   8020
   8021	/* Always try to enable CMF feature if we can */
   8022	if (sli4_params->cmf) {
   8023		lpfc_set_features(phba, mboxq, LPFC_SET_ENABLE_CMF);
   8024		rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
   8025		cmf = bf_get(lpfc_mbx_set_feature_cmf,
   8026			     &mboxq->u.mqe.un.set_feature);
   8027		if (rc == MBX_SUCCESS && cmf) {
   8028			lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT,
   8029					"6218 CMF is enabled: mode %d\n",
   8030					phba->cmf_active_mode);
   8031		} else {
   8032			lpfc_printf_log(phba, KERN_WARNING,
   8033					LOG_CGN_MGMT | LOG_INIT,
   8034					"6219 Enable CMF Mailbox x%x (x%x/x%x) "
   8035					"failed, rc:x%x dd:x%x\n",
   8036					bf_get(lpfc_mqe_command, &mboxq->u.mqe),
   8037					lpfc_sli_config_mbox_subsys_get
   8038						(phba, mboxq),
   8039					lpfc_sli_config_mbox_opcode_get
   8040						(phba, mboxq),
   8041					rc, cmf);
   8042			sli4_params->cmf = 0;
   8043			phba->cmf_active_mode = LPFC_CFG_OFF;
   8044			goto no_cmf;
   8045		}
   8046
   8047		/* Allocate Congestion Information Buffer */
   8048		if (!phba->cgn_i) {
   8049			mp = kmalloc(sizeof(*mp), GFP_KERNEL);
   8050			if (mp)
   8051				mp->virt = dma_alloc_coherent
   8052						(&phba->pcidev->dev,
   8053						sizeof(struct lpfc_cgn_info),
   8054						&mp->phys, GFP_KERNEL);
   8055			if (!mp || !mp->virt) {
   8056				lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
   8057						"2640 Failed to alloc memory "
   8058						"for Congestion Info\n");
   8059				kfree(mp);
   8060				sli4_params->cmf = 0;
   8061				phba->cmf_active_mode = LPFC_CFG_OFF;
   8062				goto no_cmf;
   8063			}
   8064			phba->cgn_i = mp;
   8065
   8066			/* initialize congestion buffer info */
   8067			lpfc_init_congestion_buf(phba);
   8068			lpfc_init_congestion_stat(phba);
   8069
   8070			/* Zero out Congestion Signal counters */
   8071			atomic64_set(&phba->cgn_acqe_stat.alarm, 0);
   8072			atomic64_set(&phba->cgn_acqe_stat.warn, 0);
   8073		}
   8074
   8075		rc = lpfc_sli4_cgn_params_read(phba);
   8076		if (rc < 0) {
   8077			lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT | LOG_INIT,
   8078					"6242 Error reading Cgn Params (%d)\n",
   8079					rc);
   8080			/* Ensure CGN Mode is off */
   8081			sli4_params->cmf = 0;
   8082		} else if (!rc) {
   8083			lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT | LOG_INIT,
   8084					"6243 CGN Event empty object.\n");
   8085			/* Ensure CGN Mode is off */
   8086			sli4_params->cmf = 0;
   8087		}
   8088	} else {
   8089no_cmf:
   8090		lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT,
   8091				"6220 CMF is disabled\n");
   8092	}
   8093
   8094	/* Only register congestion buffer with firmware if BOTH
   8095	 * CMF and E2E are enabled.
   8096	 */
   8097	if (sli4_params->cmf && sli4_params->mi_ver) {
   8098		rc = lpfc_reg_congestion_buf(phba);
   8099		if (rc) {
   8100			dma_free_coherent(&phba->pcidev->dev,
   8101					  sizeof(struct lpfc_cgn_info),
   8102					  phba->cgn_i->virt, phba->cgn_i->phys);
   8103			kfree(phba->cgn_i);
   8104			phba->cgn_i = NULL;
   8105			/* Ensure CGN Mode is off */
   8106			phba->cmf_active_mode = LPFC_CFG_OFF;
   8107			return 0;
   8108		}
   8109	}
   8110	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
   8111			"6470 Setup MI version %d CMF %d mode %d\n",
   8112			sli4_params->mi_ver, sli4_params->cmf,
   8113			phba->cmf_active_mode);
   8114
   8115	mempool_free(mboxq, phba->mbox_mem_pool);
   8116
   8117	/* Initialize atomic counters */
   8118	atomic_set(&phba->cgn_fabric_warn_cnt, 0);
   8119	atomic_set(&phba->cgn_fabric_alarm_cnt, 0);
   8120	atomic_set(&phba->cgn_sync_alarm_cnt, 0);
   8121	atomic_set(&phba->cgn_sync_warn_cnt, 0);
   8122	atomic_set(&phba->cgn_driver_evt_cnt, 0);
   8123	atomic_set(&phba->cgn_latency_evt_cnt, 0);
   8124	atomic64_set(&phba->cgn_latency_evt, 0);
   8125
   8126	phba->cmf_interval_rate = LPFC_CMF_INTERVAL;
   8127
   8128	/* Allocate RX Monitor Buffer */
   8129	if (!phba->rxtable) {
   8130		phba->rxtable = kmalloc_array(LPFC_MAX_RXMONITOR_ENTRY,
   8131					      sizeof(struct rxtable_entry),
   8132					      GFP_KERNEL);
   8133		if (!phba->rxtable) {
   8134			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
   8135					"2644 Failed to alloc memory "
   8136					"for RX Monitor Buffer\n");
   8137			return -ENOMEM;
   8138		}
   8139	}
   8140	atomic_set(&phba->rxtable_idx_head, 0);
   8141	atomic_set(&phba->rxtable_idx_tail, 0);
   8142	return 0;
   8143}
   8144
   8145static int
   8146lpfc_set_host_tm(struct lpfc_hba *phba)
   8147{
   8148	LPFC_MBOXQ_t *mboxq;
   8149	uint32_t len, rc;
   8150	struct timespec64 cur_time;
   8151	struct tm broken;
   8152	uint32_t month, day, year;
   8153	uint32_t hour, minute, second;
   8154	struct lpfc_mbx_set_host_date_time *tm;
   8155
   8156	mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
   8157	if (!mboxq)
   8158		return -ENOMEM;
   8159
   8160	len = sizeof(struct lpfc_mbx_set_host_data) -
   8161		sizeof(struct lpfc_sli4_cfg_mhdr);
   8162	lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
   8163			 LPFC_MBOX_OPCODE_SET_HOST_DATA, len,
   8164			 LPFC_SLI4_MBX_EMBED);
   8165
   8166	mboxq->u.mqe.un.set_host_data.param_id = LPFC_SET_HOST_DATE_TIME;
   8167	mboxq->u.mqe.un.set_host_data.param_len =
   8168			sizeof(struct lpfc_mbx_set_host_date_time);
   8169	tm = &mboxq->u.mqe.un.set_host_data.un.tm;
   8170	ktime_get_real_ts64(&cur_time);
   8171	time64_to_tm(cur_time.tv_sec, 0, &broken);
   8172	month = broken.tm_mon + 1;
   8173	day = broken.tm_mday;
   8174	year = broken.tm_year - 100;
   8175	hour = broken.tm_hour;
   8176	minute = broken.tm_min;
   8177	second = broken.tm_sec;
   8178	bf_set(lpfc_mbx_set_host_month, tm, month);
   8179	bf_set(lpfc_mbx_set_host_day, tm, day);
   8180	bf_set(lpfc_mbx_set_host_year, tm, year);
   8181	bf_set(lpfc_mbx_set_host_hour, tm, hour);
   8182	bf_set(lpfc_mbx_set_host_min, tm, minute);
   8183	bf_set(lpfc_mbx_set_host_sec, tm, second);
   8184
   8185	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
   8186	mempool_free(mboxq, phba->mbox_mem_pool);
   8187	return rc;
   8188}
   8189
   8190/**
   8191 * lpfc_sli4_hba_setup - SLI4 device initialization PCI function
   8192 * @phba: Pointer to HBA context object.
   8193 *
   8194 * This function is the main SLI4 device initialization PCI function. This
   8195 * function is called by the HBA initialization code, HBA reset code and
   8196 * HBA error attention handler code. Caller is not required to hold any
   8197 * locks.
   8198 **/
   8199int
   8200lpfc_sli4_hba_setup(struct lpfc_hba *phba)
   8201{
   8202	int rc, i, cnt, len, dd;
   8203	LPFC_MBOXQ_t *mboxq;
   8204	struct lpfc_mqe *mqe;
   8205	uint8_t *vpd;
   8206	uint32_t vpd_size;
   8207	uint32_t ftr_rsp = 0;
   8208	struct Scsi_Host *shost = lpfc_shost_from_vport(phba->pport);
   8209	struct lpfc_vport *vport = phba->pport;
   8210	struct lpfc_dmabuf *mp;
   8211	struct lpfc_rqb *rqbp;
   8212	u32 flg;
   8213
   8214	/* Perform a PCI function reset to start from clean */
   8215	rc = lpfc_pci_function_reset(phba);
   8216	if (unlikely(rc))
   8217		return -ENODEV;
   8218
   8219	/* Check the HBA Host Status Register for readyness */
   8220	rc = lpfc_sli4_post_status_check(phba);
   8221	if (unlikely(rc))
   8222		return -ENODEV;
   8223	else {
   8224		spin_lock_irq(&phba->hbalock);
   8225		phba->sli.sli_flag |= LPFC_SLI_ACTIVE;
   8226		flg = phba->sli.sli_flag;
   8227		spin_unlock_irq(&phba->hbalock);
   8228		/* Allow a little time after setting SLI_ACTIVE for any polled
   8229		 * MBX commands to complete via BSG.
   8230		 */
   8231		for (i = 0; i < 50 && (flg & LPFC_SLI_MBOX_ACTIVE); i++) {
   8232			msleep(20);
   8233			spin_lock_irq(&phba->hbalock);
   8234			flg = phba->sli.sli_flag;
   8235			spin_unlock_irq(&phba->hbalock);
   8236		}
   8237	}
   8238
   8239	lpfc_sli4_dip(phba);
   8240
   8241	/*
   8242	 * Allocate a single mailbox container for initializing the
   8243	 * port.
   8244	 */
   8245	mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
   8246	if (!mboxq)
   8247		return -ENOMEM;
   8248
   8249	/* Issue READ_REV to collect vpd and FW information. */
   8250	vpd_size = SLI4_PAGE_SIZE;
   8251	vpd = kzalloc(vpd_size, GFP_KERNEL);
   8252	if (!vpd) {
   8253		rc = -ENOMEM;
   8254		goto out_free_mbox;
   8255	}
   8256
   8257	rc = lpfc_sli4_read_rev(phba, mboxq, vpd, &vpd_size);
   8258	if (unlikely(rc)) {
   8259		kfree(vpd);
   8260		goto out_free_mbox;
   8261	}
   8262
   8263	mqe = &mboxq->u.mqe;
   8264	phba->sli_rev = bf_get(lpfc_mbx_rd_rev_sli_lvl, &mqe->un.read_rev);
   8265	if (bf_get(lpfc_mbx_rd_rev_fcoe, &mqe->un.read_rev)) {
   8266		phba->hba_flag |= HBA_FCOE_MODE;
   8267		phba->fcp_embed_io = 0;	/* SLI4 FC support only */
   8268	} else {
   8269		phba->hba_flag &= ~HBA_FCOE_MODE;
   8270	}
   8271
   8272	if (bf_get(lpfc_mbx_rd_rev_cee_ver, &mqe->un.read_rev) ==
   8273		LPFC_DCBX_CEE_MODE)
   8274		phba->hba_flag |= HBA_FIP_SUPPORT;
   8275	else
   8276		phba->hba_flag &= ~HBA_FIP_SUPPORT;
   8277
   8278	phba->hba_flag &= ~HBA_IOQ_FLUSH;
   8279
   8280	if (phba->sli_rev != LPFC_SLI_REV4) {
   8281		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
   8282			"0376 READ_REV Error. SLI Level %d "
   8283			"FCoE enabled %d\n",
   8284			phba->sli_rev, phba->hba_flag & HBA_FCOE_MODE);
   8285		rc = -EIO;
   8286		kfree(vpd);
   8287		goto out_free_mbox;
   8288	}
   8289
   8290	rc = lpfc_set_host_tm(phba);
   8291	lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
   8292			"6468 Set host date / time: Status x%x:\n", rc);
   8293
   8294	/*
   8295	 * Continue initialization with default values even if driver failed
   8296	 * to read FCoE param config regions, only read parameters if the
   8297	 * board is FCoE
   8298	 */
   8299	if (phba->hba_flag & HBA_FCOE_MODE &&
   8300	    lpfc_sli4_read_fcoe_params(phba))
   8301		lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_INIT,
   8302			"2570 Failed to read FCoE parameters\n");
   8303
   8304	/*
   8305	 * Retrieve sli4 device physical port name, failure of doing it
   8306	 * is considered as non-fatal.
   8307	 */
   8308	rc = lpfc_sli4_retrieve_pport_name(phba);
   8309	if (!rc)
   8310		lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
   8311				"3080 Successful retrieving SLI4 device "
   8312				"physical port name: %s.\n", phba->Port);
   8313
   8314	rc = lpfc_sli4_get_ctl_attr(phba);
   8315	if (!rc)
   8316		lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
   8317				"8351 Successful retrieving SLI4 device "
   8318				"CTL ATTR\n");
   8319
   8320	/*
   8321	 * Evaluate the read rev and vpd data. Populate the driver
   8322	 * state with the results. If this routine fails, the failure
   8323	 * is not fatal as the driver will use generic values.
   8324	 */
   8325	rc = lpfc_parse_vpd(phba, vpd, vpd_size);
   8326	if (unlikely(!rc)) {
   8327		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
   8328				"0377 Error %d parsing vpd. "
   8329				"Using defaults.\n", rc);
   8330		rc = 0;
   8331	}
   8332	kfree(vpd);
   8333
   8334	/* Save information as VPD data */
   8335	phba->vpd.rev.biuRev = mqe->un.read_rev.first_hw_rev;
   8336	phba->vpd.rev.smRev = mqe->un.read_rev.second_hw_rev;
   8337
   8338	/*
   8339	 * This is because first G7 ASIC doesn't support the standard
   8340	 * 0x5a NVME cmd descriptor type/subtype
   8341	 */
   8342	if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
   8343			LPFC_SLI_INTF_IF_TYPE_6) &&
   8344	    (phba->vpd.rev.biuRev == LPFC_G7_ASIC_1) &&
   8345	    (phba->vpd.rev.smRev == 0) &&
   8346	    (phba->cfg_nvme_embed_cmd == 1))
   8347		phba->cfg_nvme_embed_cmd = 0;
   8348
   8349	phba->vpd.rev.endecRev = mqe->un.read_rev.third_hw_rev;
   8350	phba->vpd.rev.fcphHigh = bf_get(lpfc_mbx_rd_rev_fcph_high,
   8351					 &mqe->un.read_rev);
   8352	phba->vpd.rev.fcphLow = bf_get(lpfc_mbx_rd_rev_fcph_low,
   8353				       &mqe->un.read_rev);
   8354	phba->vpd.rev.feaLevelHigh = bf_get(lpfc_mbx_rd_rev_ftr_lvl_high,
   8355					    &mqe->un.read_rev);
   8356	phba->vpd.rev.feaLevelLow = bf_get(lpfc_mbx_rd_rev_ftr_lvl_low,
   8357					   &mqe->un.read_rev);
   8358	phba->vpd.rev.sli1FwRev = mqe->un.read_rev.fw_id_rev;
   8359	memcpy(phba->vpd.rev.sli1FwName, mqe->un.read_rev.fw_name, 16);
   8360	phba->vpd.rev.sli2FwRev = mqe->un.read_rev.ulp_fw_id_rev;
   8361	memcpy(phba->vpd.rev.sli2FwName, mqe->un.read_rev.ulp_fw_name, 16);
   8362	phba->vpd.rev.opFwRev = mqe->un.read_rev.fw_id_rev;
   8363	memcpy(phba->vpd.rev.opFwName, mqe->un.read_rev.fw_name, 16);
   8364	lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
   8365			"(%d):0380 READ_REV Status x%x "
   8366			"fw_rev:%s fcphHi:%x fcphLo:%x flHi:%x flLo:%x\n",
   8367			mboxq->vport ? mboxq->vport->vpi : 0,
   8368			bf_get(lpfc_mqe_status, mqe),
   8369			phba->vpd.rev.opFwName,
   8370			phba->vpd.rev.fcphHigh, phba->vpd.rev.fcphLow,
   8371			phba->vpd.rev.feaLevelHigh, phba->vpd.rev.feaLevelLow);
   8372
   8373	if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
   8374	    LPFC_SLI_INTF_IF_TYPE_0) {
   8375		lpfc_set_features(phba, mboxq, LPFC_SET_UE_RECOVERY);
   8376		rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
   8377		if (rc == MBX_SUCCESS) {
   8378			phba->hba_flag |= HBA_RECOVERABLE_UE;
   8379			/* Set 1Sec interval to detect UE */
   8380			phba->eratt_poll_interval = 1;
   8381			phba->sli4_hba.ue_to_sr = bf_get(
   8382					lpfc_mbx_set_feature_UESR,
   8383					&mboxq->u.mqe.un.set_feature);
   8384			phba->sli4_hba.ue_to_rp = bf_get(
   8385					lpfc_mbx_set_feature_UERP,
   8386					&mboxq->u.mqe.un.set_feature);
   8387		}
   8388	}
   8389
   8390	if (phba->cfg_enable_mds_diags && phba->mds_diags_support) {
   8391		/* Enable MDS Diagnostics only if the SLI Port supports it */
   8392		lpfc_set_features(phba, mboxq, LPFC_SET_MDS_DIAGS);
   8393		rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
   8394		if (rc != MBX_SUCCESS)
   8395			phba->mds_diags_support = 0;
   8396	}
   8397
   8398	/*
   8399	 * Discover the port's supported feature set and match it against the
   8400	 * hosts requests.
   8401	 */
   8402	lpfc_request_features(phba, mboxq);
   8403	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
   8404	if (unlikely(rc)) {
   8405		rc = -EIO;
   8406		goto out_free_mbox;
   8407	}
   8408
   8409	/* Disable VMID if app header is not supported */
   8410	if (phba->cfg_vmid_app_header && !(bf_get(lpfc_mbx_rq_ftr_rsp_ashdr,
   8411						  &mqe->un.req_ftrs))) {
   8412		bf_set(lpfc_ftr_ashdr, &phba->sli4_hba.sli4_flags, 0);
   8413		phba->cfg_vmid_app_header = 0;
   8414		lpfc_printf_log(phba, KERN_DEBUG, LOG_SLI,
   8415				"1242 vmid feature not supported\n");
   8416	}
   8417
   8418	/*
   8419	 * The port must support FCP initiator mode as this is the
   8420	 * only mode running in the host.
   8421	 */
   8422	if (!(bf_get(lpfc_mbx_rq_ftr_rsp_fcpi, &mqe->un.req_ftrs))) {
   8423		lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
   8424				"0378 No support for fcpi mode.\n");
   8425		ftr_rsp++;
   8426	}
   8427
   8428	/* Performance Hints are ONLY for FCoE */
   8429	if (phba->hba_flag & HBA_FCOE_MODE) {
   8430		if (bf_get(lpfc_mbx_rq_ftr_rsp_perfh, &mqe->un.req_ftrs))
   8431			phba->sli3_options |= LPFC_SLI4_PERFH_ENABLED;
   8432		else
   8433			phba->sli3_options &= ~LPFC_SLI4_PERFH_ENABLED;
   8434	}
   8435
   8436	/*
   8437	 * If the port cannot support the host's requested features
   8438	 * then turn off the global config parameters to disable the
   8439	 * feature in the driver.  This is not a fatal error.
   8440	 */
   8441	if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) {
   8442		if (!(bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs))) {
   8443			phba->cfg_enable_bg = 0;
   8444			phba->sli3_options &= ~LPFC_SLI3_BG_ENABLED;
   8445			ftr_rsp++;
   8446		}
   8447	}
   8448
   8449	if (phba->max_vpi && phba->cfg_enable_npiv &&
   8450	    !(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs)))
   8451		ftr_rsp++;
   8452
   8453	if (ftr_rsp) {
   8454		lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
   8455				"0379 Feature Mismatch Data: x%08x %08x "
   8456				"x%x x%x x%x\n", mqe->un.req_ftrs.word2,
   8457				mqe->un.req_ftrs.word3, phba->cfg_enable_bg,
   8458				phba->cfg_enable_npiv, phba->max_vpi);
   8459		if (!(bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs)))
   8460			phba->cfg_enable_bg = 0;
   8461		if (!(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs)))
   8462			phba->cfg_enable_npiv = 0;
   8463	}
   8464
   8465	/* These SLI3 features are assumed in SLI4 */
   8466	spin_lock_irq(&phba->hbalock);
   8467	phba->sli3_options |= (LPFC_SLI3_NPIV_ENABLED | LPFC_SLI3_HBQ_ENABLED);
   8468	spin_unlock_irq(&phba->hbalock);
   8469
   8470	/* Always try to enable dual dump feature if we can */
   8471	lpfc_set_features(phba, mboxq, LPFC_SET_DUAL_DUMP);
   8472	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
   8473	dd = bf_get(lpfc_mbx_set_feature_dd, &mboxq->u.mqe.un.set_feature);
   8474	if ((rc == MBX_SUCCESS) && (dd == LPFC_ENABLE_DUAL_DUMP))
   8475		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
   8476				"6448 Dual Dump is enabled\n");
   8477	else
   8478		lpfc_printf_log(phba, KERN_INFO, LOG_SLI | LOG_INIT,
   8479				"6447 Dual Dump Mailbox x%x (x%x/x%x) failed, "
   8480				"rc:x%x dd:x%x\n",
   8481				bf_get(lpfc_mqe_command, &mboxq->u.mqe),
   8482				lpfc_sli_config_mbox_subsys_get(
   8483					phba, mboxq),
   8484				lpfc_sli_config_mbox_opcode_get(
   8485					phba, mboxq),
   8486				rc, dd);
   8487	/*
   8488	 * Allocate all resources (xri,rpi,vpi,vfi) now.  Subsequent
   8489	 * calls depends on these resources to complete port setup.
   8490	 */
   8491	rc = lpfc_sli4_alloc_resource_identifiers(phba);
   8492	if (rc) {
   8493		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
   8494				"2920 Failed to alloc Resource IDs "
   8495				"rc = x%x\n", rc);
   8496		goto out_free_mbox;
   8497	}
   8498
   8499	lpfc_set_host_data(phba, mboxq);
   8500
   8501	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
   8502	if (rc) {
   8503		lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
   8504				"2134 Failed to set host os driver version %x",
   8505				rc);
   8506	}
   8507
   8508	/* Read the port's service parameters. */
   8509	rc = lpfc_read_sparam(phba, mboxq, vport->vpi);
   8510	if (rc) {
   8511		phba->link_state = LPFC_HBA_ERROR;
   8512		rc = -ENOMEM;
   8513		goto out_free_mbox;
   8514	}
   8515
   8516	mboxq->vport = vport;
   8517	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
   8518	mp = (struct lpfc_dmabuf *)mboxq->ctx_buf;
   8519	if (rc == MBX_SUCCESS) {
   8520		memcpy(&vport->fc_sparam, mp->virt, sizeof(struct serv_parm));
   8521		rc = 0;
   8522	}
   8523
   8524	/*
   8525	 * This memory was allocated by the lpfc_read_sparam routine but is
   8526	 * no longer needed.  It is released and ctx_buf NULLed to prevent
   8527	 * unintended pointer access as the mbox is reused.
   8528	 */
   8529	lpfc_mbuf_free(phba, mp->virt, mp->phys);
   8530	kfree(mp);
   8531	mboxq->ctx_buf = NULL;
   8532	if (unlikely(rc)) {
   8533		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
   8534				"0382 READ_SPARAM command failed "
   8535				"status %d, mbxStatus x%x\n",
   8536				rc, bf_get(lpfc_mqe_status, mqe));
   8537		phba->link_state = LPFC_HBA_ERROR;
   8538		rc = -EIO;
   8539		goto out_free_mbox;
   8540	}
   8541
   8542	lpfc_update_vport_wwn(vport);
   8543
   8544	/* Update the fc_host data structures with new wwn. */
   8545	fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
   8546	fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
   8547
   8548	/* Create all the SLI4 queues */
   8549	rc = lpfc_sli4_queue_create(phba);
   8550	if (rc) {
   8551		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
   8552				"3089 Failed to allocate queues\n");
   8553		rc = -ENODEV;
   8554		goto out_free_mbox;
   8555	}
   8556	/* Set up all the queues to the device */
   8557	rc = lpfc_sli4_queue_setup(phba);
   8558	if (unlikely(rc)) {
   8559		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
   8560				"0381 Error %d during queue setup.\n ", rc);
   8561		goto out_stop_timers;
   8562	}
   8563	/* Initialize the driver internal SLI layer lists. */
   8564	lpfc_sli4_setup(phba);
   8565	lpfc_sli4_queue_init(phba);
   8566
   8567	/* update host els xri-sgl sizes and mappings */
   8568	rc = lpfc_sli4_els_sgl_update(phba);
   8569	if (unlikely(rc)) {
   8570		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
   8571				"1400 Failed to update xri-sgl size and "
   8572				"mapping: %d\n", rc);
   8573		goto out_destroy_queue;
   8574	}
   8575
   8576	/* register the els sgl pool to the port */
   8577	rc = lpfc_sli4_repost_sgl_list(phba, &phba->sli4_hba.lpfc_els_sgl_list,
   8578				       phba->sli4_hba.els_xri_cnt);
   8579	if (unlikely(rc < 0)) {
   8580		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
   8581				"0582 Error %d during els sgl post "
   8582				"operation\n", rc);
   8583		rc = -ENODEV;
   8584		goto out_destroy_queue;
   8585	}
   8586	phba->sli4_hba.els_xri_cnt = rc;
   8587
   8588	if (phba->nvmet_support) {
   8589		/* update host nvmet xri-sgl sizes and mappings */
   8590		rc = lpfc_sli4_nvmet_sgl_update(phba);
   8591		if (unlikely(rc)) {
   8592			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
   8593					"6308 Failed to update nvmet-sgl size "
   8594					"and mapping: %d\n", rc);
   8595			goto out_destroy_queue;
   8596		}
   8597
   8598		/* register the nvmet sgl pool to the port */
   8599		rc = lpfc_sli4_repost_sgl_list(
   8600			phba,
   8601			&phba->sli4_hba.lpfc_nvmet_sgl_list,
   8602			phba->sli4_hba.nvmet_xri_cnt);
   8603		if (unlikely(rc < 0)) {
   8604			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
   8605					"3117 Error %d during nvmet "
   8606					"sgl post\n", rc);
   8607			rc = -ENODEV;
   8608			goto out_destroy_queue;
   8609		}
   8610		phba->sli4_hba.nvmet_xri_cnt = rc;
   8611
   8612		/* We allocate an iocbq for every receive context SGL.
   8613		 * The additional allocation is for abort and ls handling.
   8614		 */
   8615		cnt = phba->sli4_hba.nvmet_xri_cnt +
   8616			phba->sli4_hba.max_cfg_param.max_xri;
   8617	} else {
   8618		/* update host common xri-sgl sizes and mappings */
   8619		rc = lpfc_sli4_io_sgl_update(phba);
   8620		if (unlikely(rc)) {
   8621			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
   8622					"6082 Failed to update nvme-sgl size "
   8623					"and mapping: %d\n", rc);
   8624			goto out_destroy_queue;
   8625		}
   8626
   8627		/* register the allocated common sgl pool to the port */
   8628		rc = lpfc_sli4_repost_io_sgl_list(phba);
   8629		if (unlikely(rc)) {
   8630			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
   8631					"6116 Error %d during nvme sgl post "
   8632					"operation\n", rc);
   8633			/* Some NVME buffers were moved to abort nvme list */
   8634			/* A pci function reset will repost them */
   8635			rc = -ENODEV;
   8636			goto out_destroy_queue;
   8637		}
   8638		/* Each lpfc_io_buf job structure has an iocbq element.
   8639		 * This cnt provides for abort, els, ct and ls requests.
   8640		 */
   8641		cnt = phba->sli4_hba.max_cfg_param.max_xri;
   8642	}
   8643
   8644	if (!phba->sli.iocbq_lookup) {
   8645		/* Initialize and populate the iocb list per host */
   8646		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
   8647				"2821 initialize iocb list with %d entries\n",
   8648				cnt);
   8649		rc = lpfc_init_iocb_list(phba, cnt);
   8650		if (rc) {
   8651			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
   8652					"1413 Failed to init iocb list.\n");
   8653			goto out_destroy_queue;
   8654		}
   8655	}
   8656
   8657	if (phba->nvmet_support)
   8658		lpfc_nvmet_create_targetport(phba);
   8659
   8660	if (phba->nvmet_support && phba->cfg_nvmet_mrq) {
   8661		/* Post initial buffers to all RQs created */
   8662		for (i = 0; i < phba->cfg_nvmet_mrq; i++) {
   8663			rqbp = phba->sli4_hba.nvmet_mrq_hdr[i]->rqbp;
   8664			INIT_LIST_HEAD(&rqbp->rqb_buffer_list);
   8665			rqbp->rqb_alloc_buffer = lpfc_sli4_nvmet_alloc;
   8666			rqbp->rqb_free_buffer = lpfc_sli4_nvmet_free;
   8667			rqbp->entry_count = LPFC_NVMET_RQE_DEF_COUNT;
   8668			rqbp->buffer_count = 0;
   8669
   8670			lpfc_post_rq_buffer(
   8671				phba, phba->sli4_hba.nvmet_mrq_hdr[i],
   8672				phba->sli4_hba.nvmet_mrq_data[i],
   8673				phba->cfg_nvmet_mrq_post, i);
   8674		}
   8675	}
   8676
   8677	/* Post the rpi header region to the device. */
   8678	rc = lpfc_sli4_post_all_rpi_hdrs(phba);
   8679	if (unlikely(rc)) {
   8680		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
   8681				"0393 Error %d during rpi post operation\n",
   8682				rc);
   8683		rc = -ENODEV;
   8684		goto out_free_iocblist;
   8685	}
   8686	lpfc_sli4_node_prep(phba);
   8687
   8688	if (!(phba->hba_flag & HBA_FCOE_MODE)) {
   8689		if ((phba->nvmet_support == 0) || (phba->cfg_nvmet_mrq == 1)) {
   8690			/*
   8691			 * The FC Port needs to register FCFI (index 0)
   8692			 */
   8693			lpfc_reg_fcfi(phba, mboxq);
   8694			mboxq->vport = phba->pport;
   8695			rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
   8696			if (rc != MBX_SUCCESS)
   8697				goto out_unset_queue;
   8698			rc = 0;
   8699			phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_fcfi,
   8700						&mboxq->u.mqe.un.reg_fcfi);
   8701		} else {
   8702			/* We are a NVME Target mode with MRQ > 1 */
   8703
   8704			/* First register the FCFI */
   8705			lpfc_reg_fcfi_mrq(phba, mboxq, 0);
   8706			mboxq->vport = phba->pport;
   8707			rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
   8708			if (rc != MBX_SUCCESS)
   8709				goto out_unset_queue;
   8710			rc = 0;
   8711			phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_mrq_fcfi,
   8712						&mboxq->u.mqe.un.reg_fcfi_mrq);
   8713
   8714			/* Next register the MRQs */
   8715			lpfc_reg_fcfi_mrq(phba, mboxq, 1);
   8716			mboxq->vport = phba->pport;
   8717			rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
   8718			if (rc != MBX_SUCCESS)
   8719				goto out_unset_queue;
   8720			rc = 0;
   8721		}
   8722		/* Check if the port is configured to be disabled */
   8723		lpfc_sli_read_link_ste(phba);
   8724	}
   8725
   8726	/* Don't post more new bufs if repost already recovered
   8727	 * the nvme sgls.
   8728	 */
   8729	if (phba->nvmet_support == 0) {
   8730		if (phba->sli4_hba.io_xri_cnt == 0) {
   8731			len = lpfc_new_io_buf(
   8732					      phba, phba->sli4_hba.io_xri_max);
   8733			if (len == 0) {
   8734				rc = -ENOMEM;
   8735				goto out_unset_queue;
   8736			}
   8737
   8738			if (phba->cfg_xri_rebalancing)
   8739				lpfc_create_multixri_pools(phba);
   8740		}
   8741	} else {
   8742		phba->cfg_xri_rebalancing = 0;
   8743	}
   8744
   8745	/* Allow asynchronous mailbox command to go through */
   8746	spin_lock_irq(&phba->hbalock);
   8747	phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
   8748	spin_unlock_irq(&phba->hbalock);
   8749
   8750	/* Post receive buffers to the device */
   8751	lpfc_sli4_rb_setup(phba);
   8752
   8753	/* Reset HBA FCF states after HBA reset */
   8754	phba->fcf.fcf_flag = 0;
   8755	phba->fcf.current_rec.flag = 0;
   8756
   8757	/* Start the ELS watchdog timer */
   8758	mod_timer(&vport->els_tmofunc,
   8759		  jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov * 2)));
   8760
   8761	/* Start heart beat timer */
   8762	mod_timer(&phba->hb_tmofunc,
   8763		  jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
   8764	phba->hba_flag &= ~(HBA_HBEAT_INP | HBA_HBEAT_TMO);
   8765	phba->last_completion_time = jiffies;
   8766
   8767	/* start eq_delay heartbeat */
   8768	if (phba->cfg_auto_imax)
   8769		queue_delayed_work(phba->wq, &phba->eq_delay_work,
   8770				   msecs_to_jiffies(LPFC_EQ_DELAY_MSECS));
   8771
   8772	/* start per phba idle_stat_delay heartbeat */
   8773	lpfc_init_idle_stat_hb(phba);
   8774
   8775	/* Start error attention (ERATT) polling timer */
   8776	mod_timer(&phba->eratt_poll,
   8777		  jiffies + msecs_to_jiffies(1000 * phba->eratt_poll_interval));
   8778
   8779	/* Enable PCIe device Advanced Error Reporting (AER) if configured */
   8780	if (phba->cfg_aer_support == 1 && !(phba->hba_flag & HBA_AER_ENABLED)) {
   8781		rc = pci_enable_pcie_error_reporting(phba->pcidev);
   8782		if (!rc) {
   8783			lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
   8784					"2829 This device supports "
   8785					"Advanced Error Reporting (AER)\n");
   8786			spin_lock_irq(&phba->hbalock);
   8787			phba->hba_flag |= HBA_AER_ENABLED;
   8788			spin_unlock_irq(&phba->hbalock);
   8789		} else {
   8790			lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
   8791					"2830 This device does not support "
   8792					"Advanced Error Reporting (AER)\n");
   8793			phba->cfg_aer_support = 0;
   8794		}
   8795		rc = 0;
   8796	}
   8797
   8798	/*
   8799	 * The port is ready, set the host's link state to LINK_DOWN
   8800	 * in preparation for link interrupts.
   8801	 */
   8802	spin_lock_irq(&phba->hbalock);
   8803	phba->link_state = LPFC_LINK_DOWN;
   8804
   8805	/* Check if physical ports are trunked */
   8806	if (bf_get(lpfc_conf_trunk_port0, &phba->sli4_hba))
   8807		phba->trunk_link.link0.state = LPFC_LINK_DOWN;
   8808	if (bf_get(lpfc_conf_trunk_port1, &phba->sli4_hba))
   8809		phba->trunk_link.link1.state = LPFC_LINK_DOWN;
   8810	if (bf_get(lpfc_conf_trunk_port2, &phba->sli4_hba))
   8811		phba->trunk_link.link2.state = LPFC_LINK_DOWN;
   8812	if (bf_get(lpfc_conf_trunk_port3, &phba->sli4_hba))
   8813		phba->trunk_link.link3.state = LPFC_LINK_DOWN;
   8814	spin_unlock_irq(&phba->hbalock);
   8815
   8816	/* Arm the CQs and then EQs on device */
   8817	lpfc_sli4_arm_cqeq_intr(phba);
   8818
   8819	/* Indicate device interrupt mode */
   8820	phba->sli4_hba.intr_enable = 1;
   8821
   8822	/* Setup CMF after HBA is initialized */
   8823	lpfc_cmf_setup(phba);
   8824
   8825	if (!(phba->hba_flag & HBA_FCOE_MODE) &&
   8826	    (phba->hba_flag & LINK_DISABLED)) {
   8827		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
   8828				"3103 Adapter Link is disabled.\n");
   8829		lpfc_down_link(phba, mboxq);
   8830		rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
   8831		if (rc != MBX_SUCCESS) {
   8832			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
   8833					"3104 Adapter failed to issue "
   8834					"DOWN_LINK mbox cmd, rc:x%x\n", rc);
   8835			goto out_io_buff_free;
   8836		}
   8837	} else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) {
   8838		/* don't perform init_link on SLI4 FC port loopback test */
   8839		if (!(phba->link_flag & LS_LOOPBACK_MODE)) {
   8840			rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT);
   8841			if (rc)
   8842				goto out_io_buff_free;
   8843		}
   8844	}
   8845	mempool_free(mboxq, phba->mbox_mem_pool);
   8846
   8847	/* Enable RAS FW log support */
   8848	lpfc_sli4_ras_setup(phba);
   8849
   8850	phba->hba_flag |= HBA_SETUP;
   8851	return rc;
   8852
   8853out_io_buff_free:
   8854	/* Free allocated IO Buffers */
   8855	lpfc_io_free(phba);
   8856out_unset_queue:
   8857	/* Unset all the queues set up in this routine when error out */
   8858	lpfc_sli4_queue_unset(phba);
   8859out_free_iocblist:
   8860	lpfc_free_iocb_list(phba);
   8861out_destroy_queue:
   8862	lpfc_sli4_queue_destroy(phba);
   8863out_stop_timers:
   8864	lpfc_stop_hba_timers(phba);
   8865out_free_mbox:
   8866	mempool_free(mboxq, phba->mbox_mem_pool);
   8867	return rc;
   8868}
   8869
   8870/**
   8871 * lpfc_mbox_timeout - Timeout call back function for mbox timer
   8872 * @t: Context to fetch pointer to hba structure from.
   8873 *
   8874 * This is the callback function for mailbox timer. The mailbox
   8875 * timer is armed when a new mailbox command is issued and the timer
   8876 * is deleted when the mailbox complete. The function is called by
   8877 * the kernel timer code when a mailbox does not complete within
   8878 * expected time. This function wakes up the worker thread to
   8879 * process the mailbox timeout and returns. All the processing is
   8880 * done by the worker thread function lpfc_mbox_timeout_handler.
   8881 **/
   8882void
   8883lpfc_mbox_timeout(struct timer_list *t)
   8884{
   8885	struct lpfc_hba  *phba = from_timer(phba, t, sli.mbox_tmo);
   8886	unsigned long iflag;
   8887	uint32_t tmo_posted;
   8888
   8889	spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
   8890	tmo_posted = phba->pport->work_port_events & WORKER_MBOX_TMO;
   8891	if (!tmo_posted)
   8892		phba->pport->work_port_events |= WORKER_MBOX_TMO;
   8893	spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
   8894
   8895	if (!tmo_posted)
   8896		lpfc_worker_wake_up(phba);
   8897	return;
   8898}
   8899
   8900/**
   8901 * lpfc_sli4_mbox_completions_pending - check to see if any mailbox completions
   8902 *                                    are pending
   8903 * @phba: Pointer to HBA context object.
   8904 *
   8905 * This function checks if any mailbox completions are present on the mailbox
   8906 * completion queue.
   8907 **/
   8908static bool
   8909lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba)
   8910{
   8911
   8912	uint32_t idx;
   8913	struct lpfc_queue *mcq;
   8914	struct lpfc_mcqe *mcqe;
   8915	bool pending_completions = false;
   8916	uint8_t	qe_valid;
   8917
   8918	if (unlikely(!phba) || (phba->sli_rev != LPFC_SLI_REV4))
   8919		return false;
   8920
   8921	/* Check for completions on mailbox completion queue */
   8922
   8923	mcq = phba->sli4_hba.mbx_cq;
   8924	idx = mcq->hba_index;
   8925	qe_valid = mcq->qe_valid;
   8926	while (bf_get_le32(lpfc_cqe_valid,
   8927	       (struct lpfc_cqe *)lpfc_sli4_qe(mcq, idx)) == qe_valid) {
   8928		mcqe = (struct lpfc_mcqe *)(lpfc_sli4_qe(mcq, idx));
   8929		if (bf_get_le32(lpfc_trailer_completed, mcqe) &&
   8930		    (!bf_get_le32(lpfc_trailer_async, mcqe))) {
   8931			pending_completions = true;
   8932			break;
   8933		}
   8934		idx = (idx + 1) % mcq->entry_count;
   8935		if (mcq->hba_index == idx)
   8936			break;
   8937
   8938		/* if the index wrapped around, toggle the valid bit */
   8939		if (phba->sli4_hba.pc_sli4_params.cqav && !idx)
   8940			qe_valid = (qe_valid) ? 0 : 1;
   8941	}
   8942	return pending_completions;
   8943
   8944}
   8945
   8946/**
   8947 * lpfc_sli4_process_missed_mbox_completions - process mbox completions
   8948 *					      that were missed.
   8949 * @phba: Pointer to HBA context object.
   8950 *
   8951 * For sli4, it is possible to miss an interrupt. As such mbox completions
   8952 * maybe missed causing erroneous mailbox timeouts to occur. This function
   8953 * checks to see if mbox completions are on the mailbox completion queue
   8954 * and will process all the completions associated with the eq for the
   8955 * mailbox completion queue.
   8956 **/
   8957static bool
   8958lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba)
   8959{
   8960	struct lpfc_sli4_hba *sli4_hba = &phba->sli4_hba;
   8961	uint32_t eqidx;
   8962	struct lpfc_queue *fpeq = NULL;
   8963	struct lpfc_queue *eq;
   8964	bool mbox_pending;
   8965
   8966	if (unlikely(!phba) || (phba->sli_rev != LPFC_SLI_REV4))
   8967		return false;
   8968
   8969	/* Find the EQ associated with the mbox CQ */
   8970	if (sli4_hba->hdwq) {
   8971		for (eqidx = 0; eqidx < phba->cfg_irq_chann; eqidx++) {
   8972			eq = phba->sli4_hba.hba_eq_hdl[eqidx].eq;
   8973			if (eq && eq->queue_id == sli4_hba->mbx_cq->assoc_qid) {
   8974				fpeq = eq;
   8975				break;
   8976			}
   8977		}
   8978	}
   8979	if (!fpeq)
   8980		return false;
   8981
   8982	/* Turn off interrupts from this EQ */
   8983
   8984	sli4_hba->sli4_eq_clr_intr(fpeq);
   8985
   8986	/* Check to see if a mbox completion is pending */
   8987
   8988	mbox_pending = lpfc_sli4_mbox_completions_pending(phba);
   8989
   8990	/*
   8991	 * If a mbox completion is pending, process all the events on EQ
   8992	 * associated with the mbox completion queue (this could include
   8993	 * mailbox commands, async events, els commands, receive queue data
   8994	 * and fcp commands)
   8995	 */
   8996
   8997	if (mbox_pending)
   8998		/* process and rearm the EQ */
   8999		lpfc_sli4_process_eq(phba, fpeq, LPFC_QUEUE_REARM);
   9000	else
   9001		/* Always clear and re-arm the EQ */
   9002		sli4_hba->sli4_write_eq_db(phba, fpeq, 0, LPFC_QUEUE_REARM);
   9003
   9004	return mbox_pending;
   9005
   9006}
   9007
   9008/**
   9009 * lpfc_mbox_timeout_handler - Worker thread function to handle mailbox timeout
   9010 * @phba: Pointer to HBA context object.
   9011 *
   9012 * This function is called from worker thread when a mailbox command times out.
   9013 * The caller is not required to hold any locks. This function will reset the
   9014 * HBA and recover all the pending commands.
   9015 **/
   9016void
   9017lpfc_mbox_timeout_handler(struct lpfc_hba *phba)
   9018{
   9019	LPFC_MBOXQ_t *pmbox = phba->sli.mbox_active;
   9020	MAILBOX_t *mb = NULL;
   9021
   9022	struct lpfc_sli *psli = &phba->sli;
   9023
   9024	/* If the mailbox completed, process the completion */
   9025	lpfc_sli4_process_missed_mbox_completions(phba);
   9026
   9027	if (!(psli->sli_flag & LPFC_SLI_ACTIVE))
   9028		return;
   9029
   9030	if (pmbox != NULL)
   9031		mb = &pmbox->u.mb;
   9032	/* Check the pmbox pointer first.  There is a race condition
   9033	 * between the mbox timeout handler getting executed in the
   9034	 * worklist and the mailbox actually completing. When this
   9035	 * race condition occurs, the mbox_active will be NULL.
   9036	 */
   9037	spin_lock_irq(&phba->hbalock);
   9038	if (pmbox == NULL) {
   9039		lpfc_printf_log(phba, KERN_WARNING,
   9040				LOG_MBOX | LOG_SLI,
   9041				"0353 Active Mailbox cleared - mailbox timeout "
   9042				"exiting\n");
   9043		spin_unlock_irq(&phba->hbalock);
   9044		return;
   9045	}
   9046
   9047	/* Mbox cmd <mbxCommand> timeout */
   9048	lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
   9049			"0310 Mailbox command x%x timeout Data: x%x x%x x%px\n",
   9050			mb->mbxCommand,
   9051			phba->pport->port_state,
   9052			phba->sli.sli_flag,
   9053			phba->sli.mbox_active);
   9054	spin_unlock_irq(&phba->hbalock);
   9055
   9056	/* Setting state unknown so lpfc_sli_abort_iocb_ring
   9057	 * would get IOCB_ERROR from lpfc_sli_issue_iocb, allowing
   9058	 * it to fail all outstanding SCSI IO.
   9059	 */
   9060	spin_lock_irq(&phba->pport->work_port_lock);
   9061	phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
   9062	spin_unlock_irq(&phba->pport->work_port_lock);
   9063	spin_lock_irq(&phba->hbalock);
   9064	phba->link_state = LPFC_LINK_UNKNOWN;
   9065	psli->sli_flag &= ~LPFC_SLI_ACTIVE;
   9066	spin_unlock_irq(&phba->hbalock);
   9067
   9068	lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
   9069			"0345 Resetting board due to mailbox timeout\n");
   9070
   9071	/* Reset the HBA device */
   9072	lpfc_reset_hba(phba);
   9073}
   9074
   9075/**
   9076 * lpfc_sli_issue_mbox_s3 - Issue an SLI3 mailbox command to firmware
   9077 * @phba: Pointer to HBA context object.
   9078 * @pmbox: Pointer to mailbox object.
   9079 * @flag: Flag indicating how the mailbox need to be processed.
   9080 *
   9081 * This function is called by discovery code and HBA management code
   9082 * to submit a mailbox command to firmware with SLI-3 interface spec. This
   9083 * function gets the hbalock to protect the data structures.
   9084 * The mailbox command can be submitted in polling mode, in which case
   9085 * this function will wait in a polling loop for the completion of the
   9086 * mailbox.
   9087 * If the mailbox is submitted in no_wait mode (not polling) the
   9088 * function will submit the command and returns immediately without waiting
   9089 * for the mailbox completion. The no_wait is supported only when HBA
   9090 * is in SLI2/SLI3 mode - interrupts are enabled.
   9091 * The SLI interface allows only one mailbox pending at a time. If the
   9092 * mailbox is issued in polling mode and there is already a mailbox
   9093 * pending, then the function will return an error. If the mailbox is issued
   9094 * in NO_WAIT mode and there is a mailbox pending already, the function
   9095 * will return MBX_BUSY after queuing the mailbox into mailbox queue.
   9096 * The sli layer owns the mailbox object until the completion of mailbox
   9097 * command if this function return MBX_BUSY or MBX_SUCCESS. For all other
   9098 * return codes the caller owns the mailbox command after the return of
   9099 * the function.
   9100 **/
   9101static int
   9102lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
   9103		       uint32_t flag)
   9104{
   9105	MAILBOX_t *mbx;
   9106	struct lpfc_sli *psli = &phba->sli;
   9107	uint32_t status, evtctr;
   9108	uint32_t ha_copy, hc_copy;
   9109	int i;
   9110	unsigned long timeout;
   9111	unsigned long drvr_flag = 0;
   9112	uint32_t word0, ldata;
   9113	void __iomem *to_slim;
   9114	int processing_queue = 0;
   9115
   9116	spin_lock_irqsave(&phba->hbalock, drvr_flag);
   9117	if (!pmbox) {
   9118		phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
   9119		/* processing mbox queue from intr_handler */
   9120		if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
   9121			spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
   9122			return MBX_SUCCESS;
   9123		}
   9124		processing_queue = 1;
   9125		pmbox = lpfc_mbox_get(phba);
   9126		if (!pmbox) {
   9127			spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
   9128			return MBX_SUCCESS;
   9129		}
   9130	}
   9131
   9132	if (pmbox->mbox_cmpl && pmbox->mbox_cmpl != lpfc_sli_def_mbox_cmpl &&
   9133		pmbox->mbox_cmpl != lpfc_sli_wake_mbox_wait) {
   9134		if(!pmbox->vport) {
   9135			spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
   9136			lpfc_printf_log(phba, KERN_ERR,
   9137					LOG_MBOX | LOG_VPORT,
   9138					"1806 Mbox x%x failed. No vport\n",
   9139					pmbox->u.mb.mbxCommand);
   9140			dump_stack();
   9141			goto out_not_finished;
   9142		}
   9143	}
   9144
   9145	/* If the PCI channel is in offline state, do not post mbox. */
   9146	if (unlikely(pci_channel_offline(phba->pcidev))) {
   9147		spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
   9148		goto out_not_finished;
   9149	}
   9150
   9151	/* If HBA has a deferred error attention, fail the iocb. */
   9152	if (unlikely(phba->hba_flag & DEFER_ERATT)) {
   9153		spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
   9154		goto out_not_finished;
   9155	}
   9156
   9157	psli = &phba->sli;
   9158
   9159	mbx = &pmbox->u.mb;
   9160	status = MBX_SUCCESS;
   9161
   9162	if (phba->link_state == LPFC_HBA_ERROR) {
   9163		spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
   9164
   9165		/* Mbox command <mbxCommand> cannot issue */
   9166		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
   9167				"(%d):0311 Mailbox command x%x cannot "
   9168				"issue Data: x%x x%x\n",
   9169				pmbox->vport ? pmbox->vport->vpi : 0,
   9170				pmbox->u.mb.mbxCommand, psli->sli_flag, flag);
   9171		goto out_not_finished;
   9172	}
   9173
   9174	if (mbx->mbxCommand != MBX_KILL_BOARD && flag & MBX_NOWAIT) {
   9175		if (lpfc_readl(phba->HCregaddr, &hc_copy) ||
   9176			!(hc_copy & HC_MBINT_ENA)) {
   9177			spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
   9178			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
   9179				"(%d):2528 Mailbox command x%x cannot "
   9180				"issue Data: x%x x%x\n",
   9181				pmbox->vport ? pmbox->vport->vpi : 0,
   9182				pmbox->u.mb.mbxCommand, psli->sli_flag, flag);
   9183			goto out_not_finished;
   9184		}
   9185	}
   9186
   9187	if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
   9188		/* Polling for a mbox command when another one is already active
   9189		 * is not allowed in SLI. Also, the driver must have established
   9190		 * SLI2 mode to queue and process multiple mbox commands.
   9191		 */
   9192
   9193		if (flag & MBX_POLL) {
   9194			spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
   9195
   9196			/* Mbox command <mbxCommand> cannot issue */
   9197			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
   9198					"(%d):2529 Mailbox command x%x "
   9199					"cannot issue Data: x%x x%x\n",
   9200					pmbox->vport ? pmbox->vport->vpi : 0,
   9201					pmbox->u.mb.mbxCommand,
   9202					psli->sli_flag, flag);
   9203			goto out_not_finished;
   9204		}
   9205
   9206		if (!(psli->sli_flag & LPFC_SLI_ACTIVE)) {
   9207			spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
   9208			/* Mbox command <mbxCommand> cannot issue */
   9209			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
   9210					"(%d):2530 Mailbox command x%x "
   9211					"cannot issue Data: x%x x%x\n",
   9212					pmbox->vport ? pmbox->vport->vpi : 0,
   9213					pmbox->u.mb.mbxCommand,
   9214					psli->sli_flag, flag);
   9215			goto out_not_finished;
   9216		}
   9217
   9218		/* Another mailbox command is still being processed, queue this
   9219		 * command to be processed later.
   9220		 */
   9221		lpfc_mbox_put(phba, pmbox);
   9222
   9223		/* Mbox cmd issue - BUSY */
   9224		lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
   9225				"(%d):0308 Mbox cmd issue - BUSY Data: "
   9226				"x%x x%x x%x x%x\n",
   9227				pmbox->vport ? pmbox->vport->vpi : 0xffffff,
   9228				mbx->mbxCommand,
   9229				phba->pport ? phba->pport->port_state : 0xff,
   9230				psli->sli_flag, flag);
   9231
   9232		psli->slistat.mbox_busy++;
   9233		spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
   9234
   9235		if (pmbox->vport) {
   9236			lpfc_debugfs_disc_trc(pmbox->vport,
   9237				LPFC_DISC_TRC_MBOX_VPORT,
   9238				"MBOX Bsy vport:  cmd:x%x mb:x%x x%x",
   9239				(uint32_t)mbx->mbxCommand,
   9240				mbx->un.varWords[0], mbx->un.varWords[1]);
   9241		}
   9242		else {
   9243			lpfc_debugfs_disc_trc(phba->pport,
   9244				LPFC_DISC_TRC_MBOX,
   9245				"MBOX Bsy:        cmd:x%x mb:x%x x%x",
   9246				(uint32_t)mbx->mbxCommand,
   9247				mbx->un.varWords[0], mbx->un.varWords[1]);
   9248		}
   9249
   9250		return MBX_BUSY;
   9251	}
   9252
   9253	psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
   9254
   9255	/* If we are not polling, we MUST be in SLI2 mode */
   9256	if (flag != MBX_POLL) {
   9257		if (!(psli->sli_flag & LPFC_SLI_ACTIVE) &&
   9258		    (mbx->mbxCommand != MBX_KILL_BOARD)) {
   9259			psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
   9260			spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
   9261			/* Mbox command <mbxCommand> cannot issue */
   9262			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
   9263					"(%d):2531 Mailbox command x%x "
   9264					"cannot issue Data: x%x x%x\n",
   9265					pmbox->vport ? pmbox->vport->vpi : 0,
   9266					pmbox->u.mb.mbxCommand,
   9267					psli->sli_flag, flag);
   9268			goto out_not_finished;
   9269		}
   9270		/* timeout active mbox command */
   9271		timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, pmbox) *
   9272					   1000);
   9273		mod_timer(&psli->mbox_tmo, jiffies + timeout);
   9274	}
   9275
   9276	/* Mailbox cmd <cmd> issue */
   9277	lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
   9278			"(%d):0309 Mailbox cmd x%x issue Data: x%x x%x "
   9279			"x%x\n",
   9280			pmbox->vport ? pmbox->vport->vpi : 0,
   9281			mbx->mbxCommand,
   9282			phba->pport ? phba->pport->port_state : 0xff,
   9283			psli->sli_flag, flag);
   9284
   9285	if (mbx->mbxCommand != MBX_HEARTBEAT) {
   9286		if (pmbox->vport) {
   9287			lpfc_debugfs_disc_trc(pmbox->vport,
   9288				LPFC_DISC_TRC_MBOX_VPORT,
   9289				"MBOX Send vport: cmd:x%x mb:x%x x%x",
   9290				(uint32_t)mbx->mbxCommand,
   9291				mbx->un.varWords[0], mbx->un.varWords[1]);
   9292		}
   9293		else {
   9294			lpfc_debugfs_disc_trc(phba->pport,
   9295				LPFC_DISC_TRC_MBOX,
   9296				"MBOX Send:       cmd:x%x mb:x%x x%x",
   9297				(uint32_t)mbx->mbxCommand,
   9298				mbx->un.varWords[0], mbx->un.varWords[1]);
   9299		}
   9300	}
   9301
   9302	psli->slistat.mbox_cmd++;
   9303	evtctr = psli->slistat.mbox_event;
   9304
   9305	/* next set own bit for the adapter and copy over command word */
   9306	mbx->mbxOwner = OWN_CHIP;
   9307
   9308	if (psli->sli_flag & LPFC_SLI_ACTIVE) {
   9309		/* Populate mbox extension offset word. */
   9310		if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len) {
   9311			*(((uint32_t *)mbx) + pmbox->mbox_offset_word)
   9312				= (uint8_t *)phba->mbox_ext
   9313				  - (uint8_t *)phba->mbox;
   9314		}
   9315
   9316		/* Copy the mailbox extension data */
   9317		if (pmbox->in_ext_byte_len && pmbox->ctx_buf) {
   9318			lpfc_sli_pcimem_bcopy(pmbox->ctx_buf,
   9319					      (uint8_t *)phba->mbox_ext,
   9320					      pmbox->in_ext_byte_len);
   9321		}
   9322		/* Copy command data to host SLIM area */
   9323		lpfc_sli_pcimem_bcopy(mbx, phba->mbox, MAILBOX_CMD_SIZE);
   9324	} else {
   9325		/* Populate mbox extension offset word. */
   9326		if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len)
   9327			*(((uint32_t *)mbx) + pmbox->mbox_offset_word)
   9328				= MAILBOX_HBA_EXT_OFFSET;
   9329
   9330		/* Copy the mailbox extension data */
   9331		if (pmbox->in_ext_byte_len && pmbox->ctx_buf)
   9332			lpfc_memcpy_to_slim(phba->MBslimaddr +
   9333				MAILBOX_HBA_EXT_OFFSET,
   9334				pmbox->ctx_buf, pmbox->in_ext_byte_len);
   9335
   9336		if (mbx->mbxCommand == MBX_CONFIG_PORT)
   9337			/* copy command data into host mbox for cmpl */
   9338			lpfc_sli_pcimem_bcopy(mbx, phba->mbox,
   9339					      MAILBOX_CMD_SIZE);
   9340
   9341		/* First copy mbox command data to HBA SLIM, skip past first
   9342		   word */
   9343		to_slim = phba->MBslimaddr + sizeof (uint32_t);
   9344		lpfc_memcpy_to_slim(to_slim, &mbx->un.varWords[0],
   9345			    MAILBOX_CMD_SIZE - sizeof (uint32_t));
   9346
   9347		/* Next copy over first word, with mbxOwner set */
   9348		ldata = *((uint32_t *)mbx);
   9349		to_slim = phba->MBslimaddr;
   9350		writel(ldata, to_slim);
   9351		readl(to_slim); /* flush */
   9352
   9353		if (mbx->mbxCommand == MBX_CONFIG_PORT)
   9354			/* switch over to host mailbox */
   9355			psli->sli_flag |= LPFC_SLI_ACTIVE;
   9356	}
   9357
   9358	wmb();
   9359
   9360	switch (flag) {
   9361	case MBX_NOWAIT:
   9362		/* Set up reference to mailbox command */
   9363		psli->mbox_active = pmbox;
   9364		/* Interrupt board to do it */
   9365		writel(CA_MBATT, phba->CAregaddr);
   9366		readl(phba->CAregaddr); /* flush */
   9367		/* Don't wait for it to finish, just return */
   9368		break;
   9369
   9370	case MBX_POLL:
   9371		/* Set up null reference to mailbox command */
   9372		psli->mbox_active = NULL;
   9373		/* Interrupt board to do it */
   9374		writel(CA_MBATT, phba->CAregaddr);
   9375		readl(phba->CAregaddr); /* flush */
   9376
   9377		if (psli->sli_flag & LPFC_SLI_ACTIVE) {
   9378			/* First read mbox status word */
   9379			word0 = *((uint32_t *)phba->mbox);
   9380			word0 = le32_to_cpu(word0);
   9381		} else {
   9382			/* First read mbox status word */
   9383			if (lpfc_readl(phba->MBslimaddr, &word0)) {
   9384				spin_unlock_irqrestore(&phba->hbalock,
   9385						       drvr_flag);
   9386				goto out_not_finished;
   9387			}
   9388		}
   9389
   9390		/* Read the HBA Host Attention Register */
   9391		if (lpfc_readl(phba->HAregaddr, &ha_copy)) {
   9392			spin_unlock_irqrestore(&phba->hbalock,
   9393						       drvr_flag);
   9394			goto out_not_finished;
   9395		}
   9396		timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, pmbox) *
   9397							1000) + jiffies;
   9398		i = 0;
   9399		/* Wait for command to complete */
   9400		while (((word0 & OWN_CHIP) == OWN_CHIP) ||
   9401		       (!(ha_copy & HA_MBATT) &&
   9402			(phba->link_state > LPFC_WARM_START))) {
   9403			if (time_after(jiffies, timeout)) {
   9404				psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
   9405				spin_unlock_irqrestore(&phba->hbalock,
   9406						       drvr_flag);
   9407				goto out_not_finished;
   9408			}
   9409
   9410			/* Check if we took a mbox interrupt while we were
   9411			   polling */
   9412			if (((word0 & OWN_CHIP) != OWN_CHIP)
   9413			    && (evtctr != psli->slistat.mbox_event))
   9414				break;
   9415
   9416			if (i++ > 10) {
   9417				spin_unlock_irqrestore(&phba->hbalock,
   9418						       drvr_flag);
   9419				msleep(1);
   9420				spin_lock_irqsave(&phba->hbalock, drvr_flag);
   9421			}
   9422
   9423			if (psli->sli_flag & LPFC_SLI_ACTIVE) {
   9424				/* First copy command data */
   9425				word0 = *((uint32_t *)phba->mbox);
   9426				word0 = le32_to_cpu(word0);
   9427				if (mbx->mbxCommand == MBX_CONFIG_PORT) {
   9428					MAILBOX_t *slimmb;
   9429					uint32_t slimword0;
   9430					/* Check real SLIM for any errors */
   9431					slimword0 = readl(phba->MBslimaddr);
   9432					slimmb = (MAILBOX_t *) & slimword0;
   9433					if (((slimword0 & OWN_CHIP) != OWN_CHIP)
   9434					    && slimmb->mbxStatus) {
   9435						psli->sli_flag &=
   9436						    ~LPFC_SLI_ACTIVE;
   9437						word0 = slimword0;
   9438					}
   9439				}
   9440			} else {
   9441				/* First copy command data */
   9442				word0 = readl(phba->MBslimaddr);
   9443			}
   9444			/* Read the HBA Host Attention Register */
   9445			if (lpfc_readl(phba->HAregaddr, &ha_copy)) {
   9446				spin_unlock_irqrestore(&phba->hbalock,
   9447						       drvr_flag);
   9448				goto out_not_finished;
   9449			}
   9450		}
   9451
   9452		if (psli->sli_flag & LPFC_SLI_ACTIVE) {
   9453			/* copy results back to user */
   9454			lpfc_sli_pcimem_bcopy(phba->mbox, mbx,
   9455						MAILBOX_CMD_SIZE);
   9456			/* Copy the mailbox extension data */
   9457			if (pmbox->out_ext_byte_len && pmbox->ctx_buf) {
   9458				lpfc_sli_pcimem_bcopy(phba->mbox_ext,
   9459						      pmbox->ctx_buf,
   9460						      pmbox->out_ext_byte_len);
   9461			}
   9462		} else {
   9463			/* First copy command data */
   9464			lpfc_memcpy_from_slim(mbx, phba->MBslimaddr,
   9465						MAILBOX_CMD_SIZE);
   9466			/* Copy the mailbox extension data */
   9467			if (pmbox->out_ext_byte_len && pmbox->ctx_buf) {
   9468				lpfc_memcpy_from_slim(
   9469					pmbox->ctx_buf,
   9470					phba->MBslimaddr +
   9471					MAILBOX_HBA_EXT_OFFSET,
   9472					pmbox->out_ext_byte_len);
   9473			}
   9474		}
   9475
   9476		writel(HA_MBATT, phba->HAregaddr);
   9477		readl(phba->HAregaddr); /* flush */
   9478
   9479		psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
   9480		status = mbx->mbxStatus;
   9481	}
   9482
   9483	spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
   9484	return status;
   9485
   9486out_not_finished:
   9487	if (processing_queue) {
   9488		pmbox->u.mb.mbxStatus = MBX_NOT_FINISHED;
   9489		lpfc_mbox_cmpl_put(phba, pmbox);
   9490	}
   9491	return MBX_NOT_FINISHED;
   9492}
   9493
   9494/**
   9495 * lpfc_sli4_async_mbox_block - Block posting SLI4 asynchronous mailbox command
   9496 * @phba: Pointer to HBA context object.
   9497 *
   9498 * The function blocks the posting of SLI4 asynchronous mailbox commands from
   9499 * the driver internal pending mailbox queue. It will then try to wait out the
   9500 * possible outstanding mailbox command before return.
   9501 *
   9502 * Returns:
   9503 * 	0 - the outstanding mailbox command completed; otherwise, the wait for
   9504 * 	the outstanding mailbox command timed out.
   9505 **/
   9506static int
   9507lpfc_sli4_async_mbox_block(struct lpfc_hba *phba)
   9508{
   9509	struct lpfc_sli *psli = &phba->sli;
   9510	LPFC_MBOXQ_t *mboxq;
   9511	int rc = 0;
   9512	unsigned long timeout = 0;
   9513	u32 sli_flag;
   9514	u8 cmd, subsys, opcode;
   9515
   9516	/* Mark the asynchronous mailbox command posting as blocked */
   9517	spin_lock_irq(&phba->hbalock);
   9518	psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
   9519	/* Determine how long we might wait for the active mailbox
   9520	 * command to be gracefully completed by firmware.
   9521	 */
   9522	if (phba->sli.mbox_active)
   9523		timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
   9524						phba->sli.mbox_active) *
   9525						1000) + jiffies;
   9526	spin_unlock_irq(&phba->hbalock);
   9527
   9528	/* Make sure the mailbox is really active */
   9529	if (timeout)
   9530		lpfc_sli4_process_missed_mbox_completions(phba);
   9531
   9532	/* Wait for the outstanding mailbox command to complete */
   9533	while (phba->sli.mbox_active) {
   9534		/* Check active mailbox complete status every 2ms */
   9535		msleep(2);
   9536		if (time_after(jiffies, timeout)) {
   9537			/* Timeout, mark the outstanding cmd not complete */
   9538
   9539			/* Sanity check sli.mbox_active has not completed or
   9540			 * cancelled from another context during last 2ms sleep,
   9541			 * so take hbalock to be sure before logging.
   9542			 */
   9543			spin_lock_irq(&phba->hbalock);
   9544			if (phba->sli.mbox_active) {
   9545				mboxq = phba->sli.mbox_active;
   9546				cmd = mboxq->u.mb.mbxCommand;
   9547				subsys = lpfc_sli_config_mbox_subsys_get(phba,
   9548									 mboxq);
   9549				opcode = lpfc_sli_config_mbox_opcode_get(phba,
   9550									 mboxq);
   9551				sli_flag = psli->sli_flag;
   9552				spin_unlock_irq(&phba->hbalock);
   9553				lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
   9554						"2352 Mailbox command x%x "
   9555						"(x%x/x%x) sli_flag x%x could "
   9556						"not complete\n",
   9557						cmd, subsys, opcode,
   9558						sli_flag);
   9559			} else {
   9560				spin_unlock_irq(&phba->hbalock);
   9561			}
   9562
   9563			rc = 1;
   9564			break;
   9565		}
   9566	}
   9567
   9568	/* Can not cleanly block async mailbox command, fails it */
   9569	if (rc) {
   9570		spin_lock_irq(&phba->hbalock);
   9571		psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
   9572		spin_unlock_irq(&phba->hbalock);
   9573	}
   9574	return rc;
   9575}
   9576
   9577/**
   9578 * lpfc_sli4_async_mbox_unblock - Block posting SLI4 async mailbox command
   9579 * @phba: Pointer to HBA context object.
   9580 *
   9581 * The function unblocks and resume posting of SLI4 asynchronous mailbox
   9582 * commands from the driver internal pending mailbox queue. It makes sure
   9583 * that there is no outstanding mailbox command before resuming posting
   9584 * asynchronous mailbox commands. If, for any reason, there is outstanding
   9585 * mailbox command, it will try to wait it out before resuming asynchronous
   9586 * mailbox command posting.
   9587 **/
   9588static void
   9589lpfc_sli4_async_mbox_unblock(struct lpfc_hba *phba)
   9590{
   9591	struct lpfc_sli *psli = &phba->sli;
   9592
   9593	spin_lock_irq(&phba->hbalock);
   9594	if (!(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
   9595		/* Asynchronous mailbox posting is not blocked, do nothing */
   9596		spin_unlock_irq(&phba->hbalock);
   9597		return;
   9598	}
   9599
   9600	/* Outstanding synchronous mailbox command is guaranteed to be done,
   9601	 * successful or timeout, after timing-out the outstanding mailbox
   9602	 * command shall always be removed, so just unblock posting async
   9603	 * mailbox command and resume
   9604	 */
   9605	psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
   9606	spin_unlock_irq(&phba->hbalock);
   9607
   9608	/* wake up worker thread to post asynchronous mailbox command */
   9609	lpfc_worker_wake_up(phba);
   9610}
   9611
   9612/**
   9613 * lpfc_sli4_wait_bmbx_ready - Wait for bootstrap mailbox register ready
   9614 * @phba: Pointer to HBA context object.
   9615 * @mboxq: Pointer to mailbox object.
   9616 *
   9617 * The function waits for the bootstrap mailbox register ready bit from
   9618 * port for twice the regular mailbox command timeout value.
   9619 *
   9620 *      0 - no timeout on waiting for bootstrap mailbox register ready.
   9621 *      MBXERR_ERROR - wait for bootstrap mailbox register timed out.
   9622 **/
   9623static int
   9624lpfc_sli4_wait_bmbx_ready(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
   9625{
   9626	uint32_t db_ready;
   9627	unsigned long timeout;
   9628	struct lpfc_register bmbx_reg;
   9629
   9630	timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, mboxq)
   9631				   * 1000) + jiffies;
   9632
   9633	do {
   9634		bmbx_reg.word0 = readl(phba->sli4_hba.BMBXregaddr);
   9635		db_ready = bf_get(lpfc_bmbx_rdy, &bmbx_reg);
   9636		if (!db_ready)
   9637			mdelay(2);
   9638
   9639		if (time_after(jiffies, timeout))
   9640			return MBXERR_ERROR;
   9641	} while (!db_ready);
   9642
   9643	return 0;
   9644}
   9645
   9646/**
   9647 * lpfc_sli4_post_sync_mbox - Post an SLI4 mailbox to the bootstrap mailbox
   9648 * @phba: Pointer to HBA context object.
   9649 * @mboxq: Pointer to mailbox object.
   9650 *
   9651 * The function posts a mailbox to the port.  The mailbox is expected
   9652 * to be comletely filled in and ready for the port to operate on it.
   9653 * This routine executes a synchronous completion operation on the
   9654 * mailbox by polling for its completion.
   9655 *
   9656 * The caller must not be holding any locks when calling this routine.
   9657 *
   9658 * Returns:
   9659 *	MBX_SUCCESS - mailbox posted successfully
   9660 *	Any of the MBX error values.
   9661 **/
   9662static int
   9663lpfc_sli4_post_sync_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
   9664{
   9665	int rc = MBX_SUCCESS;
   9666	unsigned long iflag;
   9667	uint32_t mcqe_status;
   9668	uint32_t mbx_cmnd;
   9669	struct lpfc_sli *psli = &phba->sli;
   9670	struct lpfc_mqe *mb = &mboxq->u.mqe;
   9671	struct lpfc_bmbx_create *mbox_rgn;
   9672	struct dma_address *dma_address;
   9673
   9674	/*
   9675	 * Only one mailbox can be active to the bootstrap mailbox region
   9676	 * at a time and there is no queueing provided.
   9677	 */
   9678	spin_lock_irqsave(&phba->hbalock, iflag);
   9679	if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
   9680		spin_unlock_irqrestore(&phba->hbalock, iflag);
   9681		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
   9682				"(%d):2532 Mailbox command x%x (x%x/x%x) "
   9683				"cannot issue Data: x%x x%x\n",
   9684				mboxq->vport ? mboxq->vport->vpi : 0,
   9685				mboxq->u.mb.mbxCommand,
   9686				lpfc_sli_config_mbox_subsys_get(phba, mboxq),
   9687				lpfc_sli_config_mbox_opcode_get(phba, mboxq),
   9688				psli->sli_flag, MBX_POLL);
   9689		return MBXERR_ERROR;
   9690	}
   9691	/* The server grabs the token and owns it until release */
   9692	psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
   9693	phba->sli.mbox_active = mboxq;
   9694	spin_unlock_irqrestore(&phba->hbalock, iflag);
   9695
   9696	/* wait for bootstrap mbox register for readyness */
   9697	rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq);
   9698	if (rc)
   9699		goto exit;
   9700	/*
   9701	 * Initialize the bootstrap memory region to avoid stale data areas
   9702	 * in the mailbox post.  Then copy the caller's mailbox contents to
   9703	 * the bmbx mailbox region.
   9704	 */
   9705	mbx_cmnd = bf_get(lpfc_mqe_command, mb);
   9706	memset(phba->sli4_hba.bmbx.avirt, 0, sizeof(struct lpfc_bmbx_create));
   9707	lpfc_sli4_pcimem_bcopy(mb, phba->sli4_hba.bmbx.avirt,
   9708			       sizeof(struct lpfc_mqe));
   9709
   9710	/* Post the high mailbox dma address to the port and wait for ready. */
   9711	dma_address = &phba->sli4_hba.bmbx.dma_address;
   9712	writel(dma_address->addr_hi, phba->sli4_hba.BMBXregaddr);
   9713
   9714	/* wait for bootstrap mbox register for hi-address write done */
   9715	rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq);
   9716	if (rc)
   9717		goto exit;
   9718
   9719	/* Post the low mailbox dma address to the port. */
   9720	writel(dma_address->addr_lo, phba->sli4_hba.BMBXregaddr);
   9721
   9722	/* wait for bootstrap mbox register for low address write done */
   9723	rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq);
   9724	if (rc)
   9725		goto exit;
   9726
   9727	/*
   9728	 * Read the CQ to ensure the mailbox has completed.
   9729	 * If so, update the mailbox status so that the upper layers
   9730	 * can complete the request normally.
   9731	 */
   9732	lpfc_sli4_pcimem_bcopy(phba->sli4_hba.bmbx.avirt, mb,
   9733			       sizeof(struct lpfc_mqe));
   9734	mbox_rgn = (struct lpfc_bmbx_create *) phba->sli4_hba.bmbx.avirt;
   9735	lpfc_sli4_pcimem_bcopy(&mbox_rgn->mcqe, &mboxq->mcqe,
   9736			       sizeof(struct lpfc_mcqe));
   9737	mcqe_status = bf_get(lpfc_mcqe_status, &mbox_rgn->mcqe);
   9738	/*
   9739	 * When the CQE status indicates a failure and the mailbox status
   9740	 * indicates success then copy the CQE status into the mailbox status
   9741	 * (and prefix it with x4000).
   9742	 */
   9743	if (mcqe_status != MB_CQE_STATUS_SUCCESS) {
   9744		if (bf_get(lpfc_mqe_status, mb) == MBX_SUCCESS)
   9745			bf_set(lpfc_mqe_status, mb,
   9746			       (LPFC_MBX_ERROR_RANGE | mcqe_status));
   9747		rc = MBXERR_ERROR;
   9748	} else
   9749		lpfc_sli4_swap_str(phba, mboxq);
   9750
   9751	lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
   9752			"(%d):0356 Mailbox cmd x%x (x%x/x%x) Status x%x "
   9753			"Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x"
   9754			" x%x x%x CQ: x%x x%x x%x x%x\n",
   9755			mboxq->vport ? mboxq->vport->vpi : 0, mbx_cmnd,
   9756			lpfc_sli_config_mbox_subsys_get(phba, mboxq),
   9757			lpfc_sli_config_mbox_opcode_get(phba, mboxq),
   9758			bf_get(lpfc_mqe_status, mb),
   9759			mb->un.mb_words[0], mb->un.mb_words[1],
   9760			mb->un.mb_words[2], mb->un.mb_words[3],
   9761			mb->un.mb_words[4], mb->un.mb_words[5],
   9762			mb->un.mb_words[6], mb->un.mb_words[7],
   9763			mb->un.mb_words[8], mb->un.mb_words[9],
   9764			mb->un.mb_words[10], mb->un.mb_words[11],
   9765			mb->un.mb_words[12], mboxq->mcqe.word0,
   9766			mboxq->mcqe.mcqe_tag0, 	mboxq->mcqe.mcqe_tag1,
   9767			mboxq->mcqe.trailer);
   9768exit:
   9769	/* We are holding the token, no needed for lock when release */
   9770	spin_lock_irqsave(&phba->hbalock, iflag);
   9771	psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
   9772	phba->sli.mbox_active = NULL;
   9773	spin_unlock_irqrestore(&phba->hbalock, iflag);
   9774	return rc;
   9775}
   9776
   9777/**
   9778 * lpfc_sli_issue_mbox_s4 - Issue an SLI4 mailbox command to firmware
   9779 * @phba: Pointer to HBA context object.
   9780 * @mboxq: Pointer to mailbox object.
   9781 * @flag: Flag indicating how the mailbox need to be processed.
   9782 *
   9783 * This function is called by discovery code and HBA management code to submit
   9784 * a mailbox command to firmware with SLI-4 interface spec.
   9785 *
   9786 * Return codes the caller owns the mailbox command after the return of the
   9787 * function.
   9788 **/
   9789static int
   9790lpfc_sli_issue_mbox_s4(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
   9791		       uint32_t flag)
   9792{
   9793	struct lpfc_sli *psli = &phba->sli;
   9794	unsigned long iflags;
   9795	int rc;
   9796
   9797	/* dump from issue mailbox command if setup */
   9798	lpfc_idiag_mbxacc_dump_issue_mbox(phba, &mboxq->u.mb);
   9799
   9800	rc = lpfc_mbox_dev_check(phba);
   9801	if (unlikely(rc)) {
   9802		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
   9803				"(%d):2544 Mailbox command x%x (x%x/x%x) "
   9804				"cannot issue Data: x%x x%x\n",
   9805				mboxq->vport ? mboxq->vport->vpi : 0,
   9806				mboxq->u.mb.mbxCommand,
   9807				lpfc_sli_config_mbox_subsys_get(phba, mboxq),
   9808				lpfc_sli_config_mbox_opcode_get(phba, mboxq),
   9809				psli->sli_flag, flag);
   9810		goto out_not_finished;
   9811	}
   9812
   9813	/* Detect polling mode and jump to a handler */
   9814	if (!phba->sli4_hba.intr_enable) {
   9815		if (flag == MBX_POLL)
   9816			rc = lpfc_sli4_post_sync_mbox(phba, mboxq);
   9817		else
   9818			rc = -EIO;
   9819		if (rc != MBX_SUCCESS)
   9820			lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
   9821					"(%d):2541 Mailbox command x%x "
   9822					"(x%x/x%x) failure: "
   9823					"mqe_sta: x%x mcqe_sta: x%x/x%x "
   9824					"Data: x%x x%x\n",
   9825					mboxq->vport ? mboxq->vport->vpi : 0,
   9826					mboxq->u.mb.mbxCommand,
   9827					lpfc_sli_config_mbox_subsys_get(phba,
   9828									mboxq),
   9829					lpfc_sli_config_mbox_opcode_get(phba,
   9830									mboxq),
   9831					bf_get(lpfc_mqe_status, &mboxq->u.mqe),
   9832					bf_get(lpfc_mcqe_status, &mboxq->mcqe),
   9833					bf_get(lpfc_mcqe_ext_status,
   9834					       &mboxq->mcqe),
   9835					psli->sli_flag, flag);
   9836		return rc;
   9837	} else if (flag == MBX_POLL) {
   9838		lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
   9839				"(%d):2542 Try to issue mailbox command "
   9840				"x%x (x%x/x%x) synchronously ahead of async "
   9841				"mailbox command queue: x%x x%x\n",
   9842				mboxq->vport ? mboxq->vport->vpi : 0,
   9843				mboxq->u.mb.mbxCommand,
   9844				lpfc_sli_config_mbox_subsys_get(phba, mboxq),
   9845				lpfc_sli_config_mbox_opcode_get(phba, mboxq),
   9846				psli->sli_flag, flag);
   9847		/* Try to block the asynchronous mailbox posting */
   9848		rc = lpfc_sli4_async_mbox_block(phba);
   9849		if (!rc) {
   9850			/* Successfully blocked, now issue sync mbox cmd */
   9851			rc = lpfc_sli4_post_sync_mbox(phba, mboxq);
   9852			if (rc != MBX_SUCCESS)
   9853				lpfc_printf_log(phba, KERN_WARNING,
   9854					LOG_MBOX | LOG_SLI,
   9855					"(%d):2597 Sync Mailbox command "
   9856					"x%x (x%x/x%x) failure: "
   9857					"mqe_sta: x%x mcqe_sta: x%x/x%x "
   9858					"Data: x%x x%x\n",
   9859					mboxq->vport ? mboxq->vport->vpi : 0,
   9860					mboxq->u.mb.mbxCommand,
   9861					lpfc_sli_config_mbox_subsys_get(phba,
   9862									mboxq),
   9863					lpfc_sli_config_mbox_opcode_get(phba,
   9864									mboxq),
   9865					bf_get(lpfc_mqe_status, &mboxq->u.mqe),
   9866					bf_get(lpfc_mcqe_status, &mboxq->mcqe),
   9867					bf_get(lpfc_mcqe_ext_status,
   9868					       &mboxq->mcqe),
   9869					psli->sli_flag, flag);
   9870			/* Unblock the async mailbox posting afterward */
   9871			lpfc_sli4_async_mbox_unblock(phba);
   9872		}
   9873		return rc;
   9874	}
   9875
   9876	/* Now, interrupt mode asynchronous mailbox command */
   9877	rc = lpfc_mbox_cmd_check(phba, mboxq);
   9878	if (rc) {
   9879		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
   9880				"(%d):2543 Mailbox command x%x (x%x/x%x) "
   9881				"cannot issue Data: x%x x%x\n",
   9882				mboxq->vport ? mboxq->vport->vpi : 0,
   9883				mboxq->u.mb.mbxCommand,
   9884				lpfc_sli_config_mbox_subsys_get(phba, mboxq),
   9885				lpfc_sli_config_mbox_opcode_get(phba, mboxq),
   9886				psli->sli_flag, flag);
   9887		goto out_not_finished;
   9888	}
   9889
   9890	/* Put the mailbox command to the driver internal FIFO */
   9891	psli->slistat.mbox_busy++;
   9892	spin_lock_irqsave(&phba->hbalock, iflags);
   9893	lpfc_mbox_put(phba, mboxq);
   9894	spin_unlock_irqrestore(&phba->hbalock, iflags);
   9895	lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
   9896			"(%d):0354 Mbox cmd issue - Enqueue Data: "
   9897			"x%x (x%x/x%x) x%x x%x x%x\n",
   9898			mboxq->vport ? mboxq->vport->vpi : 0xffffff,
   9899			bf_get(lpfc_mqe_command, &mboxq->u.mqe),
   9900			lpfc_sli_config_mbox_subsys_get(phba, mboxq),
   9901			lpfc_sli_config_mbox_opcode_get(phba, mboxq),
   9902			phba->pport->port_state,
   9903			psli->sli_flag, MBX_NOWAIT);
   9904	/* Wake up worker thread to transport mailbox command from head */
   9905	lpfc_worker_wake_up(phba);
   9906
   9907	return MBX_BUSY;
   9908
   9909out_not_finished:
   9910	return MBX_NOT_FINISHED;
   9911}
   9912
   9913/**
   9914 * lpfc_sli4_post_async_mbox - Post an SLI4 mailbox command to device
   9915 * @phba: Pointer to HBA context object.
   9916 *
   9917 * This function is called by worker thread to send a mailbox command to
   9918 * SLI4 HBA firmware.
   9919 *
   9920 **/
   9921int
   9922lpfc_sli4_post_async_mbox(struct lpfc_hba *phba)
   9923{
   9924	struct lpfc_sli *psli = &phba->sli;
   9925	LPFC_MBOXQ_t *mboxq;
   9926	int rc = MBX_SUCCESS;
   9927	unsigned long iflags;
   9928	struct lpfc_mqe *mqe;
   9929	uint32_t mbx_cmnd;
   9930
   9931	/* Check interrupt mode before post async mailbox command */
   9932	if (unlikely(!phba->sli4_hba.intr_enable))
   9933		return MBX_NOT_FINISHED;
   9934
   9935	/* Check for mailbox command service token */
   9936	spin_lock_irqsave(&phba->hbalock, iflags);
   9937	if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
   9938		spin_unlock_irqrestore(&phba->hbalock, iflags);
   9939		return MBX_NOT_FINISHED;
   9940	}
   9941	if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
   9942		spin_unlock_irqrestore(&phba->hbalock, iflags);
   9943		return MBX_NOT_FINISHED;
   9944	}
   9945	if (unlikely(phba->sli.mbox_active)) {
   9946		spin_unlock_irqrestore(&phba->hbalock, iflags);
   9947		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
   9948				"0384 There is pending active mailbox cmd\n");
   9949		return MBX_NOT_FINISHED;
   9950	}
   9951	/* Take the mailbox command service token */
   9952	psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
   9953
   9954	/* Get the next mailbox command from head of queue */
   9955	mboxq = lpfc_mbox_get(phba);
   9956
   9957	/* If no more mailbox command waiting for post, we're done */
   9958	if (!mboxq) {
   9959		psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
   9960		spin_unlock_irqrestore(&phba->hbalock, iflags);
   9961		return MBX_SUCCESS;
   9962	}
   9963	phba->sli.mbox_active = mboxq;
   9964	spin_unlock_irqrestore(&phba->hbalock, iflags);
   9965
   9966	/* Check device readiness for posting mailbox command */
   9967	rc = lpfc_mbox_dev_check(phba);
   9968	if (unlikely(rc))
   9969		/* Driver clean routine will clean up pending mailbox */
   9970		goto out_not_finished;
   9971
   9972	/* Prepare the mbox command to be posted */
   9973	mqe = &mboxq->u.mqe;
   9974	mbx_cmnd = bf_get(lpfc_mqe_command, mqe);
   9975
   9976	/* Start timer for the mbox_tmo and log some mailbox post messages */
   9977	mod_timer(&psli->mbox_tmo, (jiffies +
   9978		  msecs_to_jiffies(1000 * lpfc_mbox_tmo_val(phba, mboxq))));
   9979
   9980	lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
   9981			"(%d):0355 Mailbox cmd x%x (x%x/x%x) issue Data: "
   9982			"x%x x%x\n",
   9983			mboxq->vport ? mboxq->vport->vpi : 0, mbx_cmnd,
   9984			lpfc_sli_config_mbox_subsys_get(phba, mboxq),
   9985			lpfc_sli_config_mbox_opcode_get(phba, mboxq),
   9986			phba->pport->port_state, psli->sli_flag);
   9987
   9988	if (mbx_cmnd != MBX_HEARTBEAT) {
   9989		if (mboxq->vport) {
   9990			lpfc_debugfs_disc_trc(mboxq->vport,
   9991				LPFC_DISC_TRC_MBOX_VPORT,
   9992				"MBOX Send vport: cmd:x%x mb:x%x x%x",
   9993				mbx_cmnd, mqe->un.mb_words[0],
   9994				mqe->un.mb_words[1]);
   9995		} else {
   9996			lpfc_debugfs_disc_trc(phba->pport,
   9997				LPFC_DISC_TRC_MBOX,
   9998				"MBOX Send: cmd:x%x mb:x%x x%x",
   9999				mbx_cmnd, mqe->un.mb_words[0],
  10000				mqe->un.mb_words[1]);
  10001		}
  10002	}
  10003	psli->slistat.mbox_cmd++;
  10004
  10005	/* Post the mailbox command to the port */
  10006	rc = lpfc_sli4_mq_put(phba->sli4_hba.mbx_wq, mqe);
  10007	if (rc != MBX_SUCCESS) {
  10008		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  10009				"(%d):2533 Mailbox command x%x (x%x/x%x) "
  10010				"cannot issue Data: x%x x%x\n",
  10011				mboxq->vport ? mboxq->vport->vpi : 0,
  10012				mboxq->u.mb.mbxCommand,
  10013				lpfc_sli_config_mbox_subsys_get(phba, mboxq),
  10014				lpfc_sli_config_mbox_opcode_get(phba, mboxq),
  10015				psli->sli_flag, MBX_NOWAIT);
  10016		goto out_not_finished;
  10017	}
  10018
  10019	return rc;
  10020
  10021out_not_finished:
  10022	spin_lock_irqsave(&phba->hbalock, iflags);
  10023	if (phba->sli.mbox_active) {
  10024		mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED;
  10025		__lpfc_mbox_cmpl_put(phba, mboxq);
  10026		/* Release the token */
  10027		psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
  10028		phba->sli.mbox_active = NULL;
  10029	}
  10030	spin_unlock_irqrestore(&phba->hbalock, iflags);
  10031
  10032	return MBX_NOT_FINISHED;
  10033}
  10034
  10035/**
  10036 * lpfc_sli_issue_mbox - Wrapper func for issuing mailbox command
  10037 * @phba: Pointer to HBA context object.
  10038 * @pmbox: Pointer to mailbox object.
  10039 * @flag: Flag indicating how the mailbox need to be processed.
  10040 *
  10041 * This routine wraps the actual SLI3 or SLI4 mailbox issuing routine from
  10042 * the API jump table function pointer from the lpfc_hba struct.
  10043 *
  10044 * Return codes the caller owns the mailbox command after the return of the
  10045 * function.
  10046 **/
  10047int
  10048lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
  10049{
  10050	return phba->lpfc_sli_issue_mbox(phba, pmbox, flag);
  10051}
  10052
  10053/**
  10054 * lpfc_mbox_api_table_setup - Set up mbox api function jump table
  10055 * @phba: The hba struct for which this call is being executed.
  10056 * @dev_grp: The HBA PCI-Device group number.
  10057 *
  10058 * This routine sets up the mbox interface API function jump table in @phba
  10059 * struct.
  10060 * Returns: 0 - success, -ENODEV - failure.
  10061 **/
  10062int
  10063lpfc_mbox_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
  10064{
  10065
  10066	switch (dev_grp) {
  10067	case LPFC_PCI_DEV_LP:
  10068		phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s3;
  10069		phba->lpfc_sli_handle_slow_ring_event =
  10070				lpfc_sli_handle_slow_ring_event_s3;
  10071		phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s3;
  10072		phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s3;
  10073		phba->lpfc_sli_brdready = lpfc_sli_brdready_s3;
  10074		break;
  10075	case LPFC_PCI_DEV_OC:
  10076		phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s4;
  10077		phba->lpfc_sli_handle_slow_ring_event =
  10078				lpfc_sli_handle_slow_ring_event_s4;
  10079		phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s4;
  10080		phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s4;
  10081		phba->lpfc_sli_brdready = lpfc_sli_brdready_s4;
  10082		break;
  10083	default:
  10084		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  10085				"1420 Invalid HBA PCI-device group: 0x%x\n",
  10086				dev_grp);
  10087		return -ENODEV;
  10088	}
  10089	return 0;
  10090}
  10091
  10092/**
  10093 * __lpfc_sli_ringtx_put - Add an iocb to the txq
  10094 * @phba: Pointer to HBA context object.
  10095 * @pring: Pointer to driver SLI ring object.
  10096 * @piocb: Pointer to address of newly added command iocb.
  10097 *
  10098 * This function is called with hbalock held for SLI3 ports or
  10099 * the ring lock held for SLI4 ports to add a command
  10100 * iocb to the txq when SLI layer cannot submit the command iocb
  10101 * to the ring.
  10102 **/
  10103void
  10104__lpfc_sli_ringtx_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
  10105		    struct lpfc_iocbq *piocb)
  10106{
  10107	if (phba->sli_rev == LPFC_SLI_REV4)
  10108		lockdep_assert_held(&pring->ring_lock);
  10109	else
  10110		lockdep_assert_held(&phba->hbalock);
  10111	/* Insert the caller's iocb in the txq tail for later processing. */
  10112	list_add_tail(&piocb->list, &pring->txq);
  10113}
  10114
  10115/**
  10116 * lpfc_sli_next_iocb - Get the next iocb in the txq
  10117 * @phba: Pointer to HBA context object.
  10118 * @pring: Pointer to driver SLI ring object.
  10119 * @piocb: Pointer to address of newly added command iocb.
  10120 *
  10121 * This function is called with hbalock held before a new
  10122 * iocb is submitted to the firmware. This function checks
  10123 * txq to flush the iocbs in txq to Firmware before
  10124 * submitting new iocbs to the Firmware.
  10125 * If there are iocbs in the txq which need to be submitted
  10126 * to firmware, lpfc_sli_next_iocb returns the first element
  10127 * of the txq after dequeuing it from txq.
  10128 * If there is no iocb in the txq then the function will return
  10129 * *piocb and *piocb is set to NULL. Caller needs to check
  10130 * *piocb to find if there are more commands in the txq.
  10131 **/
  10132static struct lpfc_iocbq *
  10133lpfc_sli_next_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
  10134		   struct lpfc_iocbq **piocb)
  10135{
  10136	struct lpfc_iocbq * nextiocb;
  10137
  10138	lockdep_assert_held(&phba->hbalock);
  10139
  10140	nextiocb = lpfc_sli_ringtx_get(phba, pring);
  10141	if (!nextiocb) {
  10142		nextiocb = *piocb;
  10143		*piocb = NULL;
  10144	}
  10145
  10146	return nextiocb;
  10147}
  10148
  10149/**
  10150 * __lpfc_sli_issue_iocb_s3 - SLI3 device lockless ver of lpfc_sli_issue_iocb
  10151 * @phba: Pointer to HBA context object.
  10152 * @ring_number: SLI ring number to issue iocb on.
  10153 * @piocb: Pointer to command iocb.
  10154 * @flag: Flag indicating if this command can be put into txq.
  10155 *
  10156 * __lpfc_sli_issue_iocb_s3 is used by other functions in the driver to issue
  10157 * an iocb command to an HBA with SLI-3 interface spec. If the PCI slot is
  10158 * recovering from error state, if HBA is resetting or if LPFC_STOP_IOCB_EVENT
  10159 * flag is turned on, the function returns IOCB_ERROR. When the link is down,
  10160 * this function allows only iocbs for posting buffers. This function finds
  10161 * next available slot in the command ring and posts the command to the
  10162 * available slot and writes the port attention register to request HBA start
  10163 * processing new iocb. If there is no slot available in the ring and
  10164 * flag & SLI_IOCB_RET_IOCB is set, the new iocb is added to the txq, otherwise
  10165 * the function returns IOCB_BUSY.
  10166 *
  10167 * This function is called with hbalock held. The function will return success
  10168 * after it successfully submit the iocb to firmware or after adding to the
  10169 * txq.
  10170 **/
  10171static int
  10172__lpfc_sli_issue_iocb_s3(struct lpfc_hba *phba, uint32_t ring_number,
  10173		    struct lpfc_iocbq *piocb, uint32_t flag)
  10174{
  10175	struct lpfc_iocbq *nextiocb;
  10176	IOCB_t *iocb;
  10177	struct lpfc_sli_ring *pring = &phba->sli.sli3_ring[ring_number];
  10178
  10179	lockdep_assert_held(&phba->hbalock);
  10180
  10181	if (piocb->cmd_cmpl && (!piocb->vport) &&
  10182	   (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
  10183	   (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) {
  10184		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  10185				"1807 IOCB x%x failed. No vport\n",
  10186				piocb->iocb.ulpCommand);
  10187		dump_stack();
  10188		return IOCB_ERROR;
  10189	}
  10190
  10191
  10192	/* If the PCI channel is in offline state, do not post iocbs. */
  10193	if (unlikely(pci_channel_offline(phba->pcidev)))
  10194		return IOCB_ERROR;
  10195
  10196	/* If HBA has a deferred error attention, fail the iocb. */
  10197	if (unlikely(phba->hba_flag & DEFER_ERATT))
  10198		return IOCB_ERROR;
  10199
  10200	/*
  10201	 * We should never get an IOCB if we are in a < LINK_DOWN state
  10202	 */
  10203	if (unlikely(phba->link_state < LPFC_LINK_DOWN))
  10204		return IOCB_ERROR;
  10205
  10206	/*
  10207	 * Check to see if we are blocking IOCB processing because of a
  10208	 * outstanding event.
  10209	 */
  10210	if (unlikely(pring->flag & LPFC_STOP_IOCB_EVENT))
  10211		goto iocb_busy;
  10212
  10213	if (unlikely(phba->link_state == LPFC_LINK_DOWN)) {
  10214		/*
  10215		 * Only CREATE_XRI, CLOSE_XRI, and QUE_RING_BUF
  10216		 * can be issued if the link is not up.
  10217		 */
  10218		switch (piocb->iocb.ulpCommand) {
  10219		case CMD_GEN_REQUEST64_CR:
  10220		case CMD_GEN_REQUEST64_CX:
  10221			if (!(phba->sli.sli_flag & LPFC_MENLO_MAINT) ||
  10222				(piocb->iocb.un.genreq64.w5.hcsw.Rctl !=
  10223					FC_RCTL_DD_UNSOL_CMD) ||
  10224				(piocb->iocb.un.genreq64.w5.hcsw.Type !=
  10225					MENLO_TRANSPORT_TYPE))
  10226
  10227				goto iocb_busy;
  10228			break;
  10229		case CMD_QUE_RING_BUF_CN:
  10230		case CMD_QUE_RING_BUF64_CN:
  10231			/*
  10232			 * For IOCBs, like QUE_RING_BUF, that have no rsp ring
  10233			 * completion, cmd_cmpl MUST be 0.
  10234			 */
  10235			if (piocb->cmd_cmpl)
  10236				piocb->cmd_cmpl = NULL;
  10237			fallthrough;
  10238		case CMD_CREATE_XRI_CR:
  10239		case CMD_CLOSE_XRI_CN:
  10240		case CMD_CLOSE_XRI_CX:
  10241			break;
  10242		default:
  10243			goto iocb_busy;
  10244		}
  10245
  10246	/*
  10247	 * For FCP commands, we must be in a state where we can process link
  10248	 * attention events.
  10249	 */
  10250	} else if (unlikely(pring->ringno == LPFC_FCP_RING &&
  10251			    !(phba->sli.sli_flag & LPFC_PROCESS_LA))) {
  10252		goto iocb_busy;
  10253	}
  10254
  10255	while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) &&
  10256	       (nextiocb = lpfc_sli_next_iocb(phba, pring, &piocb)))
  10257		lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb);
  10258
  10259	if (iocb)
  10260		lpfc_sli_update_ring(phba, pring);
  10261	else
  10262		lpfc_sli_update_full_ring(phba, pring);
  10263
  10264	if (!piocb)
  10265		return IOCB_SUCCESS;
  10266
  10267	goto out_busy;
  10268
  10269 iocb_busy:
  10270	pring->stats.iocb_cmd_delay++;
  10271
  10272 out_busy:
  10273
  10274	if (!(flag & SLI_IOCB_RET_IOCB)) {
  10275		__lpfc_sli_ringtx_put(phba, pring, piocb);
  10276		return IOCB_SUCCESS;
  10277	}
  10278
  10279	return IOCB_BUSY;
  10280}
  10281
  10282/**
  10283 * __lpfc_sli_issue_fcp_io_s3 - SLI3 device for sending fcp io iocb
  10284 * @phba: Pointer to HBA context object.
  10285 * @ring_number: SLI ring number to issue wqe on.
  10286 * @piocb: Pointer to command iocb.
  10287 * @flag: Flag indicating if this command can be put into txq.
  10288 *
  10289 * __lpfc_sli_issue_fcp_io_s3 is wrapper function to invoke lockless func to
  10290 * send  an iocb command to an HBA with SLI-3 interface spec.
  10291 *
  10292 * This function takes the hbalock before invoking the lockless version.
  10293 * The function will return success after it successfully submit the wqe to
  10294 * firmware or after adding to the txq.
  10295 **/
  10296static int
  10297__lpfc_sli_issue_fcp_io_s3(struct lpfc_hba *phba, uint32_t ring_number,
  10298			   struct lpfc_iocbq *piocb, uint32_t flag)
  10299{
  10300	unsigned long iflags;
  10301	int rc;
  10302
  10303	spin_lock_irqsave(&phba->hbalock, iflags);
  10304	rc = __lpfc_sli_issue_iocb_s3(phba, ring_number, piocb, flag);
  10305	spin_unlock_irqrestore(&phba->hbalock, iflags);
  10306
  10307	return rc;
  10308}
  10309
  10310/**
  10311 * __lpfc_sli_issue_fcp_io_s4 - SLI4 device for sending fcp io wqe
  10312 * @phba: Pointer to HBA context object.
  10313 * @ring_number: SLI ring number to issue wqe on.
  10314 * @piocb: Pointer to command iocb.
  10315 * @flag: Flag indicating if this command can be put into txq.
  10316 *
  10317 * __lpfc_sli_issue_fcp_io_s4 is used by other functions in the driver to issue
  10318 * an wqe command to an HBA with SLI-4 interface spec.
  10319 *
  10320 * This function is a lockless version. The function will return success
  10321 * after it successfully submit the wqe to firmware or after adding to the
  10322 * txq.
  10323 **/
  10324static int
  10325__lpfc_sli_issue_fcp_io_s4(struct lpfc_hba *phba, uint32_t ring_number,
  10326			   struct lpfc_iocbq *piocb, uint32_t flag)
  10327{
  10328	int rc;
  10329	struct lpfc_io_buf *lpfc_cmd = piocb->io_buf;
  10330
  10331	lpfc_prep_embed_io(phba, lpfc_cmd);
  10332	rc = lpfc_sli4_issue_wqe(phba, lpfc_cmd->hdwq, piocb);
  10333	return rc;
  10334}
  10335
  10336void
  10337lpfc_prep_embed_io(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
  10338{
  10339	struct lpfc_iocbq *piocb = &lpfc_cmd->cur_iocbq;
  10340	union lpfc_wqe128 *wqe = &lpfc_cmd->cur_iocbq.wqe;
  10341	struct sli4_sge *sgl;
  10342
  10343	/* 128 byte wqe support here */
  10344	sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl;
  10345
  10346	if (phba->fcp_embed_io) {
  10347		struct fcp_cmnd *fcp_cmnd;
  10348		u32 *ptr;
  10349
  10350		fcp_cmnd = lpfc_cmd->fcp_cmnd;
  10351
  10352		/* Word 0-2 - FCP_CMND */
  10353		wqe->generic.bde.tus.f.bdeFlags =
  10354			BUFF_TYPE_BDE_IMMED;
  10355		wqe->generic.bde.tus.f.bdeSize = sgl->sge_len;
  10356		wqe->generic.bde.addrHigh = 0;
  10357		wqe->generic.bde.addrLow =  88;  /* Word 22 */
  10358
  10359		bf_set(wqe_wqes, &wqe->fcp_iwrite.wqe_com, 1);
  10360		bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 0);
  10361
  10362		/* Word 22-29  FCP CMND Payload */
  10363		ptr = &wqe->words[22];
  10364		memcpy(ptr, fcp_cmnd, sizeof(struct fcp_cmnd));
  10365	} else {
  10366		/* Word 0-2 - Inline BDE */
  10367		wqe->generic.bde.tus.f.bdeFlags =  BUFF_TYPE_BDE_64;
  10368		wqe->generic.bde.tus.f.bdeSize = sizeof(struct fcp_cmnd);
  10369		wqe->generic.bde.addrHigh = sgl->addr_hi;
  10370		wqe->generic.bde.addrLow =  sgl->addr_lo;
  10371
  10372		/* Word 10 */
  10373		bf_set(wqe_dbde, &wqe->generic.wqe_com, 1);
  10374		bf_set(wqe_wqes, &wqe->generic.wqe_com, 0);
  10375	}
  10376
  10377	/* add the VMID tags as per switch response */
  10378	if (unlikely(piocb->cmd_flag & LPFC_IO_VMID)) {
  10379		if (phba->pport->vmid_flag & LPFC_VMID_TYPE_PRIO) {
  10380			bf_set(wqe_ccpe, &wqe->fcp_iwrite.wqe_com, 1);
  10381			bf_set(wqe_ccp, &wqe->fcp_iwrite.wqe_com,
  10382					(piocb->vmid_tag.cs_ctl_vmid));
  10383		} else if (phba->cfg_vmid_app_header) {
  10384			bf_set(wqe_appid, &wqe->fcp_iwrite.wqe_com, 1);
  10385			bf_set(wqe_wqes, &wqe->fcp_iwrite.wqe_com, 1);
  10386			wqe->words[31] = piocb->vmid_tag.app_id;
  10387		}
  10388	}
  10389}
  10390
  10391/**
  10392 * __lpfc_sli_issue_iocb_s4 - SLI4 device lockless ver of lpfc_sli_issue_iocb
  10393 * @phba: Pointer to HBA context object.
  10394 * @ring_number: SLI ring number to issue iocb on.
  10395 * @piocb: Pointer to command iocb.
  10396 * @flag: Flag indicating if this command can be put into txq.
  10397 *
  10398 * __lpfc_sli_issue_iocb_s4 is used by other functions in the driver to issue
  10399 * an iocb command to an HBA with SLI-4 interface spec.
  10400 *
  10401 * This function is called with ringlock held. The function will return success
  10402 * after it successfully submit the iocb to firmware or after adding to the
  10403 * txq.
  10404 **/
  10405static int
  10406__lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
  10407			 struct lpfc_iocbq *piocb, uint32_t flag)
  10408{
  10409	struct lpfc_sglq *sglq;
  10410	union lpfc_wqe128 *wqe;
  10411	struct lpfc_queue *wq;
  10412	struct lpfc_sli_ring *pring;
  10413	u32 ulp_command = get_job_cmnd(phba, piocb);
  10414
  10415	/* Get the WQ */
  10416	if ((piocb->cmd_flag & LPFC_IO_FCP) ||
  10417	    (piocb->cmd_flag & LPFC_USE_FCPWQIDX)) {
  10418		wq = phba->sli4_hba.hdwq[piocb->hba_wqidx].io_wq;
  10419	} else {
  10420		wq = phba->sli4_hba.els_wq;
  10421	}
  10422
  10423	/* Get corresponding ring */
  10424	pring = wq->pring;
  10425
  10426	/*
  10427	 * The WQE can be either 64 or 128 bytes,
  10428	 */
  10429
  10430	lockdep_assert_held(&pring->ring_lock);
  10431	wqe = &piocb->wqe;
  10432	if (piocb->sli4_xritag == NO_XRI) {
  10433		if (ulp_command == CMD_ABORT_XRI_CX)
  10434			sglq = NULL;
  10435		else {
  10436			sglq = __lpfc_sli_get_els_sglq(phba, piocb);
  10437			if (!sglq) {
  10438				if (!(flag & SLI_IOCB_RET_IOCB)) {
  10439					__lpfc_sli_ringtx_put(phba,
  10440							pring,
  10441							piocb);
  10442					return IOCB_SUCCESS;
  10443				} else {
  10444					return IOCB_BUSY;
  10445				}
  10446			}
  10447		}
  10448	} else if (piocb->cmd_flag &  LPFC_IO_FCP) {
  10449		/* These IO's already have an XRI and a mapped sgl. */
  10450		sglq = NULL;
  10451	}
  10452	else {
  10453		/*
  10454		 * This is a continuation of a commandi,(CX) so this
  10455		 * sglq is on the active list
  10456		 */
  10457		sglq = __lpfc_get_active_sglq(phba, piocb->sli4_lxritag);
  10458		if (!sglq)
  10459			return IOCB_ERROR;
  10460	}
  10461
  10462	if (sglq) {
  10463		piocb->sli4_lxritag = sglq->sli4_lxritag;
  10464		piocb->sli4_xritag = sglq->sli4_xritag;
  10465
  10466		/* ABTS sent by initiator to CT exchange, the
  10467		 * RX_ID field will be filled with the newly
  10468		 * allocated responder XRI.
  10469		 */
  10470		if (ulp_command == CMD_XMIT_BLS_RSP64_CX &&
  10471		    piocb->abort_bls == LPFC_ABTS_UNSOL_INT)
  10472			bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp,
  10473			       piocb->sli4_xritag);
  10474
  10475		bf_set(wqe_xri_tag, &wqe->generic.wqe_com,
  10476		       piocb->sli4_xritag);
  10477
  10478		if (lpfc_wqe_bpl2sgl(phba, piocb, sglq) == NO_XRI)
  10479			return IOCB_ERROR;
  10480	}
  10481
  10482	if (lpfc_sli4_wq_put(wq, wqe))
  10483		return IOCB_ERROR;
  10484
  10485	lpfc_sli_ringtxcmpl_put(phba, pring, piocb);
  10486
  10487	return 0;
  10488}
  10489
  10490/*
  10491 * lpfc_sli_issue_fcp_io - Wrapper func for issuing fcp i/o
  10492 *
  10493 * This routine wraps the actual fcp i/o function for issusing WQE for sli-4
  10494 * or IOCB for sli-3  function.
  10495 * pointer from the lpfc_hba struct.
  10496 *
  10497 * Return codes:
  10498 * IOCB_ERROR - Error
  10499 * IOCB_SUCCESS - Success
  10500 * IOCB_BUSY - Busy
  10501 **/
  10502int
  10503lpfc_sli_issue_fcp_io(struct lpfc_hba *phba, uint32_t ring_number,
  10504		      struct lpfc_iocbq *piocb, uint32_t flag)
  10505{
  10506	return phba->__lpfc_sli_issue_fcp_io(phba, ring_number, piocb, flag);
  10507}
  10508
  10509/*
  10510 * __lpfc_sli_issue_iocb - Wrapper func of lockless version for issuing iocb
  10511 *
  10512 * This routine wraps the actual lockless version for issusing IOCB function
  10513 * pointer from the lpfc_hba struct.
  10514 *
  10515 * Return codes:
  10516 * IOCB_ERROR - Error
  10517 * IOCB_SUCCESS - Success
  10518 * IOCB_BUSY - Busy
  10519 **/
  10520int
  10521__lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
  10522		struct lpfc_iocbq *piocb, uint32_t flag)
  10523{
  10524	return phba->__lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
  10525}
  10526
  10527static void
  10528__lpfc_sli_prep_els_req_rsp_s3(struct lpfc_iocbq *cmdiocbq,
  10529			       struct lpfc_vport *vport,
  10530			       struct lpfc_dmabuf *bmp, u16 cmd_size, u32 did,
  10531			       u32 elscmd, u8 tmo, u8 expect_rsp)
  10532{
  10533	struct lpfc_hba *phba = vport->phba;
  10534	IOCB_t *cmd;
  10535
  10536	cmd = &cmdiocbq->iocb;
  10537	memset(cmd, 0, sizeof(*cmd));
  10538
  10539	cmd->un.elsreq64.bdl.addrHigh = putPaddrHigh(bmp->phys);
  10540	cmd->un.elsreq64.bdl.addrLow = putPaddrLow(bmp->phys);
  10541	cmd->un.elsreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
  10542
  10543	if (expect_rsp) {
  10544		cmd->un.elsreq64.bdl.bdeSize = (2 * sizeof(struct ulp_bde64));
  10545		cmd->un.elsreq64.remoteID = did; /* DID */
  10546		cmd->ulpCommand = CMD_ELS_REQUEST64_CR;
  10547		cmd->ulpTimeout = tmo;
  10548	} else {
  10549		cmd->un.elsreq64.bdl.bdeSize = sizeof(struct ulp_bde64);
  10550		cmd->un.genreq64.xmit_els_remoteID = did; /* DID */
  10551		cmd->ulpCommand = CMD_XMIT_ELS_RSP64_CX;
  10552	}
  10553	cmd->ulpBdeCount = 1;
  10554	cmd->ulpLe = 1;
  10555	cmd->ulpClass = CLASS3;
  10556
  10557	/* If we have NPIV enabled, we want to send ELS traffic by VPI. */
  10558	if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) {
  10559		if (expect_rsp) {
  10560			cmd->un.elsreq64.myID = vport->fc_myDID;
  10561
  10562			/* For ELS_REQUEST64_CR, use the VPI by default */
  10563			cmd->ulpContext = phba->vpi_ids[vport->vpi];
  10564		}
  10565
  10566		cmd->ulpCt_h = 0;
  10567		/* The CT field must be 0=INVALID_RPI for the ECHO cmd */
  10568		if (elscmd == ELS_CMD_ECHO)
  10569			cmd->ulpCt_l = 0; /* context = invalid RPI */
  10570		else
  10571			cmd->ulpCt_l = 1; /* context = VPI */
  10572	}
  10573}
  10574
  10575static void
  10576__lpfc_sli_prep_els_req_rsp_s4(struct lpfc_iocbq *cmdiocbq,
  10577			       struct lpfc_vport *vport,
  10578			       struct lpfc_dmabuf *bmp, u16 cmd_size, u32 did,
  10579			       u32 elscmd, u8 tmo, u8 expect_rsp)
  10580{
  10581	struct lpfc_hba  *phba = vport->phba;
  10582	union lpfc_wqe128 *wqe;
  10583	struct ulp_bde64_le *bde;
  10584	u8 els_id;
  10585
  10586	wqe = &cmdiocbq->wqe;
  10587	memset(wqe, 0, sizeof(*wqe));
  10588
  10589	/* Word 0 - 2 BDE */
  10590	bde = (struct ulp_bde64_le *)&wqe->generic.bde;
  10591	bde->addr_low = cpu_to_le32(putPaddrLow(bmp->phys));
  10592	bde->addr_high = cpu_to_le32(putPaddrHigh(bmp->phys));
  10593	bde->type_size = cpu_to_le32(cmd_size);
  10594	bde->type_size |= cpu_to_le32(ULP_BDE64_TYPE_BDE_64);
  10595
  10596	if (expect_rsp) {
  10597		bf_set(wqe_cmnd, &wqe->els_req.wqe_com, CMD_ELS_REQUEST64_WQE);
  10598
  10599		/* Transfer length */
  10600		wqe->els_req.payload_len = cmd_size;
  10601		wqe->els_req.max_response_payload_len = FCELSSIZE;
  10602
  10603		/* DID */
  10604		bf_set(wqe_els_did, &wqe->els_req.wqe_dest, did);
  10605
  10606		/* Word 11 - ELS_ID */
  10607		switch (elscmd) {
  10608		case ELS_CMD_PLOGI:
  10609			els_id = LPFC_ELS_ID_PLOGI;
  10610			break;
  10611		case ELS_CMD_FLOGI:
  10612			els_id = LPFC_ELS_ID_FLOGI;
  10613			break;
  10614		case ELS_CMD_LOGO:
  10615			els_id = LPFC_ELS_ID_LOGO;
  10616			break;
  10617		case ELS_CMD_FDISC:
  10618			if (!vport->fc_myDID) {
  10619				els_id = LPFC_ELS_ID_FDISC;
  10620				break;
  10621			}
  10622			fallthrough;
  10623		default:
  10624			els_id = LPFC_ELS_ID_DEFAULT;
  10625			break;
  10626		}
  10627
  10628		bf_set(wqe_els_id, &wqe->els_req.wqe_com, els_id);
  10629	} else {
  10630		/* DID */
  10631		bf_set(wqe_els_did, &wqe->xmit_els_rsp.wqe_dest, did);
  10632
  10633		/* Transfer length */
  10634		wqe->xmit_els_rsp.response_payload_len = cmd_size;
  10635
  10636		bf_set(wqe_cmnd, &wqe->xmit_els_rsp.wqe_com,
  10637		       CMD_XMIT_ELS_RSP64_WQE);
  10638	}
  10639
  10640	bf_set(wqe_tmo, &wqe->generic.wqe_com, tmo);
  10641	bf_set(wqe_reqtag, &wqe->generic.wqe_com, cmdiocbq->iotag);
  10642	bf_set(wqe_class, &wqe->generic.wqe_com, CLASS3);
  10643
  10644	/* If we have NPIV enabled, we want to send ELS traffic by VPI.
  10645	 * For SLI4, since the driver controls VPIs we also want to include
  10646	 * all ELS pt2pt protocol traffic as well.
  10647	 */
  10648	if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) ||
  10649	    (vport->fc_flag & FC_PT2PT)) {
  10650		if (expect_rsp) {
  10651			bf_set(els_req64_sid, &wqe->els_req, vport->fc_myDID);
  10652
  10653			/* For ELS_REQUEST64_WQE, use the VPI by default */
  10654			bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
  10655			       phba->vpi_ids[vport->vpi]);
  10656		}
  10657
  10658		/* The CT field must be 0=INVALID_RPI for the ECHO cmd */
  10659		if (elscmd == ELS_CMD_ECHO)
  10660			bf_set(wqe_ct, &wqe->generic.wqe_com, 0);
  10661		else
  10662			bf_set(wqe_ct, &wqe->generic.wqe_com, 1);
  10663	}
  10664}
  10665
  10666void
  10667lpfc_sli_prep_els_req_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocbq,
  10668			  struct lpfc_vport *vport, struct lpfc_dmabuf *bmp,
  10669			  u16 cmd_size, u32 did, u32 elscmd, u8 tmo,
  10670			  u8 expect_rsp)
  10671{
  10672	phba->__lpfc_sli_prep_els_req_rsp(cmdiocbq, vport, bmp, cmd_size, did,
  10673					  elscmd, tmo, expect_rsp);
  10674}
  10675
  10676static void
  10677__lpfc_sli_prep_gen_req_s3(struct lpfc_iocbq *cmdiocbq, struct lpfc_dmabuf *bmp,
  10678			   u16 rpi, u32 num_entry, u8 tmo)
  10679{
  10680	IOCB_t *cmd;
  10681
  10682	cmd = &cmdiocbq->iocb;
  10683	memset(cmd, 0, sizeof(*cmd));
  10684
  10685	cmd->un.genreq64.bdl.addrHigh = putPaddrHigh(bmp->phys);
  10686	cmd->un.genreq64.bdl.addrLow = putPaddrLow(bmp->phys);
  10687	cmd->un.genreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
  10688	cmd->un.genreq64.bdl.bdeSize = num_entry * sizeof(struct ulp_bde64);
  10689
  10690	cmd->un.genreq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CTL;
  10691	cmd->un.genreq64.w5.hcsw.Type = FC_TYPE_CT;
  10692	cmd->un.genreq64.w5.hcsw.Fctl = (SI | LA);
  10693
  10694	cmd->ulpContext = rpi;
  10695	cmd->ulpClass = CLASS3;
  10696	cmd->ulpCommand = CMD_GEN_REQUEST64_CR;
  10697	cmd->ulpBdeCount = 1;
  10698	cmd->ulpLe = 1;
  10699	cmd->ulpOwner = OWN_CHIP;
  10700	cmd->ulpTimeout = tmo;
  10701}
  10702
  10703static void
  10704__lpfc_sli_prep_gen_req_s4(struct lpfc_iocbq *cmdiocbq, struct lpfc_dmabuf *bmp,
  10705			   u16 rpi, u32 num_entry, u8 tmo)
  10706{
  10707	union lpfc_wqe128 *cmdwqe;
  10708	struct ulp_bde64_le *bde, *bpl;
  10709	u32 xmit_len = 0, total_len = 0, size, type, i;
  10710
  10711	cmdwqe = &cmdiocbq->wqe;
  10712	memset(cmdwqe, 0, sizeof(*cmdwqe));
  10713
  10714	/* Calculate total_len and xmit_len */
  10715	bpl = (struct ulp_bde64_le *)bmp->virt;
  10716	for (i = 0; i < num_entry; i++) {
  10717		size = le32_to_cpu(bpl[i].type_size) & ULP_BDE64_SIZE_MASK;
  10718		total_len += size;
  10719	}
  10720	for (i = 0; i < num_entry; i++) {
  10721		size = le32_to_cpu(bpl[i].type_size) & ULP_BDE64_SIZE_MASK;
  10722		type = le32_to_cpu(bpl[i].type_size) & ULP_BDE64_TYPE_MASK;
  10723		if (type != ULP_BDE64_TYPE_BDE_64)
  10724			break;
  10725		xmit_len += size;
  10726	}
  10727
  10728	/* Words 0 - 2 */
  10729	bde = (struct ulp_bde64_le *)&cmdwqe->generic.bde;
  10730	bde->addr_low = bpl->addr_low;
  10731	bde->addr_high = bpl->addr_high;
  10732	bde->type_size = cpu_to_le32(xmit_len);
  10733	bde->type_size |= cpu_to_le32(ULP_BDE64_TYPE_BDE_64);
  10734
  10735	/* Word 3 */
  10736	cmdwqe->gen_req.request_payload_len = xmit_len;
  10737
  10738	/* Word 5 */
  10739	bf_set(wqe_type, &cmdwqe->gen_req.wge_ctl, FC_TYPE_CT);
  10740	bf_set(wqe_rctl, &cmdwqe->gen_req.wge_ctl, FC_RCTL_DD_UNSOL_CTL);
  10741	bf_set(wqe_si, &cmdwqe->gen_req.wge_ctl, 1);
  10742	bf_set(wqe_la, &cmdwqe->gen_req.wge_ctl, 1);
  10743
  10744	/* Word 6 */
  10745	bf_set(wqe_ctxt_tag, &cmdwqe->gen_req.wqe_com, rpi);
  10746
  10747	/* Word 7 */
  10748	bf_set(wqe_tmo, &cmdwqe->gen_req.wqe_com, tmo);
  10749	bf_set(wqe_class, &cmdwqe->gen_req.wqe_com, CLASS3);
  10750	bf_set(wqe_cmnd, &cmdwqe->gen_req.wqe_com, CMD_GEN_REQUEST64_CR);
  10751	bf_set(wqe_ct, &cmdwqe->gen_req.wqe_com, SLI4_CT_RPI);
  10752
  10753	/* Word 12 */
  10754	cmdwqe->gen_req.max_response_payload_len = total_len - xmit_len;
  10755}
  10756
  10757void
  10758lpfc_sli_prep_gen_req(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocbq,
  10759		      struct lpfc_dmabuf *bmp, u16 rpi, u32 num_entry, u8 tmo)
  10760{
  10761	phba->__lpfc_sli_prep_gen_req(cmdiocbq, bmp, rpi, num_entry, tmo);
  10762}
  10763
  10764static void
  10765__lpfc_sli_prep_xmit_seq64_s3(struct lpfc_iocbq *cmdiocbq,
  10766			      struct lpfc_dmabuf *bmp, u16 rpi, u16 ox_id,
  10767			      u32 num_entry, u8 rctl, u8 last_seq, u8 cr_cx_cmd)
  10768{
  10769	IOCB_t *icmd;
  10770
  10771	icmd = &cmdiocbq->iocb;
  10772	memset(icmd, 0, sizeof(*icmd));
  10773
  10774	icmd->un.xseq64.bdl.addrHigh = putPaddrHigh(bmp->phys);
  10775	icmd->un.xseq64.bdl.addrLow = putPaddrLow(bmp->phys);
  10776	icmd->un.xseq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
  10777	icmd->un.xseq64.bdl.bdeSize = (num_entry * sizeof(struct ulp_bde64));
  10778	icmd->un.xseq64.w5.hcsw.Fctl = LA;
  10779	if (last_seq)
  10780		icmd->un.xseq64.w5.hcsw.Fctl |= LS;
  10781	icmd->un.xseq64.w5.hcsw.Dfctl = 0;
  10782	icmd->un.xseq64.w5.hcsw.Rctl = rctl;
  10783	icmd->un.xseq64.w5.hcsw.Type = FC_TYPE_CT;
  10784
  10785	icmd->ulpBdeCount = 1;
  10786	icmd->ulpLe = 1;
  10787	icmd->ulpClass = CLASS3;
  10788
  10789	switch (cr_cx_cmd) {
  10790	case CMD_XMIT_SEQUENCE64_CR:
  10791		icmd->ulpContext = rpi;
  10792		icmd->ulpCommand = CMD_XMIT_SEQUENCE64_CR;
  10793		break;
  10794	case CMD_XMIT_SEQUENCE64_CX:
  10795		icmd->ulpContext = ox_id;
  10796		icmd->ulpCommand = CMD_XMIT_SEQUENCE64_CX;
  10797		break;
  10798	default:
  10799		break;
  10800	}
  10801}
  10802
  10803static void
  10804__lpfc_sli_prep_xmit_seq64_s4(struct lpfc_iocbq *cmdiocbq,
  10805			      struct lpfc_dmabuf *bmp, u16 rpi, u16 ox_id,
  10806			      u32 full_size, u8 rctl, u8 last_seq, u8 cr_cx_cmd)
  10807{
  10808	union lpfc_wqe128 *wqe;
  10809	struct ulp_bde64 *bpl;
  10810
  10811	wqe = &cmdiocbq->wqe;
  10812	memset(wqe, 0, sizeof(*wqe));
  10813
  10814	/* Words 0 - 2 */
  10815	bpl = (struct ulp_bde64 *)bmp->virt;
  10816	wqe->xmit_sequence.bde.addrHigh = bpl->addrHigh;
  10817	wqe->xmit_sequence.bde.addrLow = bpl->addrLow;
  10818	wqe->xmit_sequence.bde.tus.w = bpl->tus.w;
  10819
  10820	/* Word 5 */
  10821	bf_set(wqe_ls, &wqe->xmit_sequence.wge_ctl, last_seq);
  10822	bf_set(wqe_la, &wqe->xmit_sequence.wge_ctl, 1);
  10823	bf_set(wqe_dfctl, &wqe->xmit_sequence.wge_ctl, 0);
  10824	bf_set(wqe_rctl, &wqe->xmit_sequence.wge_ctl, rctl);
  10825	bf_set(wqe_type, &wqe->xmit_sequence.wge_ctl, FC_TYPE_CT);
  10826
  10827	/* Word 6 */
  10828	bf_set(wqe_ctxt_tag, &wqe->xmit_sequence.wqe_com, rpi);
  10829
  10830	bf_set(wqe_cmnd, &wqe->xmit_sequence.wqe_com,
  10831	       CMD_XMIT_SEQUENCE64_WQE);
  10832
  10833	/* Word 7 */
  10834	bf_set(wqe_class, &wqe->xmit_sequence.wqe_com, CLASS3);
  10835
  10836	/* Word 9 */
  10837	bf_set(wqe_rcvoxid, &wqe->xmit_sequence.wqe_com, ox_id);
  10838
  10839	/* Word 12 */
  10840	if (cmdiocbq->cmd_flag & (LPFC_IO_LIBDFC | LPFC_IO_LOOPBACK))
  10841		wqe->xmit_sequence.xmit_len = full_size;
  10842	else
  10843		wqe->xmit_sequence.xmit_len =
  10844			wqe->xmit_sequence.bde.tus.f.bdeSize;
  10845}
  10846
  10847void
  10848lpfc_sli_prep_xmit_seq64(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocbq,
  10849			 struct lpfc_dmabuf *bmp, u16 rpi, u16 ox_id,
  10850			 u32 num_entry, u8 rctl, u8 last_seq, u8 cr_cx_cmd)
  10851{
  10852	phba->__lpfc_sli_prep_xmit_seq64(cmdiocbq, bmp, rpi, ox_id, num_entry,
  10853					 rctl, last_seq, cr_cx_cmd);
  10854}
  10855
  10856static void
  10857__lpfc_sli_prep_abort_xri_s3(struct lpfc_iocbq *cmdiocbq, u16 ulp_context,
  10858			     u16 iotag, u8 ulp_class, u16 cqid, bool ia)
  10859{
  10860	IOCB_t *icmd = NULL;
  10861
  10862	icmd = &cmdiocbq->iocb;
  10863	memset(icmd, 0, sizeof(*icmd));
  10864
  10865	/* Word 5 */
  10866	icmd->un.acxri.abortContextTag = ulp_context;
  10867	icmd->un.acxri.abortIoTag = iotag;
  10868
  10869	if (ia) {
  10870		/* Word 7 */
  10871		icmd->ulpCommand = CMD_CLOSE_XRI_CN;
  10872	} else {
  10873		/* Word 3 */
  10874		icmd->un.acxri.abortType = ABORT_TYPE_ABTS;
  10875
  10876		/* Word 7 */
  10877		icmd->ulpClass = ulp_class;
  10878		icmd->ulpCommand = CMD_ABORT_XRI_CN;
  10879	}
  10880
  10881	/* Word 7 */
  10882	icmd->ulpLe = 1;
  10883}
  10884
  10885static void
  10886__lpfc_sli_prep_abort_xri_s4(struct lpfc_iocbq *cmdiocbq, u16 ulp_context,
  10887			     u16 iotag, u8 ulp_class, u16 cqid, bool ia)
  10888{
  10889	union lpfc_wqe128 *wqe;
  10890
  10891	wqe = &cmdiocbq->wqe;
  10892	memset(wqe, 0, sizeof(*wqe));
  10893
  10894	/* Word 3 */
  10895	bf_set(abort_cmd_criteria, &wqe->abort_cmd, T_XRI_TAG);
  10896	if (ia)
  10897		bf_set(abort_cmd_ia, &wqe->abort_cmd, 1);
  10898	else
  10899		bf_set(abort_cmd_ia, &wqe->abort_cmd, 0);
  10900
  10901	/* Word 7 */
  10902	bf_set(wqe_cmnd, &wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_WQE);
  10903
  10904	/* Word 8 */
  10905	wqe->abort_cmd.wqe_com.abort_tag = ulp_context;
  10906
  10907	/* Word 9 */
  10908	bf_set(wqe_reqtag, &wqe->abort_cmd.wqe_com, iotag);
  10909
  10910	/* Word 10 */
  10911	bf_set(wqe_qosd, &wqe->abort_cmd.wqe_com, 1);
  10912
  10913	/* Word 11 */
  10914	bf_set(wqe_cqid, &wqe->abort_cmd.wqe_com, cqid);
  10915	bf_set(wqe_cmd_type, &wqe->abort_cmd.wqe_com, OTHER_COMMAND);
  10916}
  10917
  10918void
  10919lpfc_sli_prep_abort_xri(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocbq,
  10920			u16 ulp_context, u16 iotag, u8 ulp_class, u16 cqid,
  10921			bool ia)
  10922{
  10923	phba->__lpfc_sli_prep_abort_xri(cmdiocbq, ulp_context, iotag, ulp_class,
  10924					cqid, ia);
  10925}
  10926
  10927/**
  10928 * lpfc_sli_api_table_setup - Set up sli api function jump table
  10929 * @phba: The hba struct for which this call is being executed.
  10930 * @dev_grp: The HBA PCI-Device group number.
  10931 *
  10932 * This routine sets up the SLI interface API function jump table in @phba
  10933 * struct.
  10934 * Returns: 0 - success, -ENODEV - failure.
  10935 **/
  10936int
  10937lpfc_sli_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
  10938{
  10939
  10940	switch (dev_grp) {
  10941	case LPFC_PCI_DEV_LP:
  10942		phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s3;
  10943		phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s3;
  10944		phba->__lpfc_sli_issue_fcp_io = __lpfc_sli_issue_fcp_io_s3;
  10945		phba->__lpfc_sli_prep_els_req_rsp = __lpfc_sli_prep_els_req_rsp_s3;
  10946		phba->__lpfc_sli_prep_gen_req = __lpfc_sli_prep_gen_req_s3;
  10947		phba->__lpfc_sli_prep_xmit_seq64 = __lpfc_sli_prep_xmit_seq64_s3;
  10948		phba->__lpfc_sli_prep_abort_xri = __lpfc_sli_prep_abort_xri_s3;
  10949		break;
  10950	case LPFC_PCI_DEV_OC:
  10951		phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s4;
  10952		phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s4;
  10953		phba->__lpfc_sli_issue_fcp_io = __lpfc_sli_issue_fcp_io_s4;
  10954		phba->__lpfc_sli_prep_els_req_rsp = __lpfc_sli_prep_els_req_rsp_s4;
  10955		phba->__lpfc_sli_prep_gen_req = __lpfc_sli_prep_gen_req_s4;
  10956		phba->__lpfc_sli_prep_xmit_seq64 = __lpfc_sli_prep_xmit_seq64_s4;
  10957		phba->__lpfc_sli_prep_abort_xri = __lpfc_sli_prep_abort_xri_s4;
  10958		break;
  10959	default:
  10960		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  10961				"1419 Invalid HBA PCI-device group: 0x%x\n",
  10962				dev_grp);
  10963		return -ENODEV;
  10964	}
  10965	return 0;
  10966}
  10967
  10968/**
  10969 * lpfc_sli4_calc_ring - Calculates which ring to use
  10970 * @phba: Pointer to HBA context object.
  10971 * @piocb: Pointer to command iocb.
  10972 *
  10973 * For SLI4 only, FCP IO can deferred to one fo many WQs, based on
  10974 * hba_wqidx, thus we need to calculate the corresponding ring.
  10975 * Since ABORTS must go on the same WQ of the command they are
  10976 * aborting, we use command's hba_wqidx.
  10977 */
  10978struct lpfc_sli_ring *
  10979lpfc_sli4_calc_ring(struct lpfc_hba *phba, struct lpfc_iocbq *piocb)
  10980{
  10981	struct lpfc_io_buf *lpfc_cmd;
  10982
  10983	if (piocb->cmd_flag & (LPFC_IO_FCP | LPFC_USE_FCPWQIDX)) {
  10984		if (unlikely(!phba->sli4_hba.hdwq))
  10985			return NULL;
  10986		/*
  10987		 * for abort iocb hba_wqidx should already
  10988		 * be setup based on what work queue we used.
  10989		 */
  10990		if (!(piocb->cmd_flag & LPFC_USE_FCPWQIDX)) {
  10991			lpfc_cmd = piocb->io_buf;
  10992			piocb->hba_wqidx = lpfc_cmd->hdwq_no;
  10993		}
  10994		return phba->sli4_hba.hdwq[piocb->hba_wqidx].io_wq->pring;
  10995	} else {
  10996		if (unlikely(!phba->sli4_hba.els_wq))
  10997			return NULL;
  10998		piocb->hba_wqidx = 0;
  10999		return phba->sli4_hba.els_wq->pring;
  11000	}
  11001}
  11002
  11003/**
  11004 * lpfc_sli_issue_iocb - Wrapper function for __lpfc_sli_issue_iocb
  11005 * @phba: Pointer to HBA context object.
  11006 * @ring_number: Ring number
  11007 * @piocb: Pointer to command iocb.
  11008 * @flag: Flag indicating if this command can be put into txq.
  11009 *
  11010 * lpfc_sli_issue_iocb is a wrapper around __lpfc_sli_issue_iocb
  11011 * function. This function gets the hbalock and calls
  11012 * __lpfc_sli_issue_iocb function and will return the error returned
  11013 * by __lpfc_sli_issue_iocb function. This wrapper is used by
  11014 * functions which do not hold hbalock.
  11015 **/
  11016int
  11017lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
  11018		    struct lpfc_iocbq *piocb, uint32_t flag)
  11019{
  11020	struct lpfc_sli_ring *pring;
  11021	struct lpfc_queue *eq;
  11022	unsigned long iflags;
  11023	int rc;
  11024
  11025	/* If the PCI channel is in offline state, do not post iocbs. */
  11026	if (unlikely(pci_channel_offline(phba->pcidev)))
  11027		return IOCB_ERROR;
  11028
  11029	if (phba->sli_rev == LPFC_SLI_REV4) {
  11030		lpfc_sli_prep_wqe(phba, piocb);
  11031
  11032		eq = phba->sli4_hba.hdwq[piocb->hba_wqidx].hba_eq;
  11033
  11034		pring = lpfc_sli4_calc_ring(phba, piocb);
  11035		if (unlikely(pring == NULL))
  11036			return IOCB_ERROR;
  11037
  11038		spin_lock_irqsave(&pring->ring_lock, iflags);
  11039		rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
  11040		spin_unlock_irqrestore(&pring->ring_lock, iflags);
  11041
  11042		lpfc_sli4_poll_eq(eq, LPFC_POLL_FASTPATH);
  11043	} else {
  11044		/* For now, SLI2/3 will still use hbalock */
  11045		spin_lock_irqsave(&phba->hbalock, iflags);
  11046		rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
  11047		spin_unlock_irqrestore(&phba->hbalock, iflags);
  11048	}
  11049	return rc;
  11050}
  11051
  11052/**
  11053 * lpfc_extra_ring_setup - Extra ring setup function
  11054 * @phba: Pointer to HBA context object.
  11055 *
  11056 * This function is called while driver attaches with the
  11057 * HBA to setup the extra ring. The extra ring is used
  11058 * only when driver needs to support target mode functionality
  11059 * or IP over FC functionalities.
  11060 *
  11061 * This function is called with no lock held. SLI3 only.
  11062 **/
  11063static int
  11064lpfc_extra_ring_setup( struct lpfc_hba *phba)
  11065{
  11066	struct lpfc_sli *psli;
  11067	struct lpfc_sli_ring *pring;
  11068
  11069	psli = &phba->sli;
  11070
  11071	/* Adjust cmd/rsp ring iocb entries more evenly */
  11072
  11073	/* Take some away from the FCP ring */
  11074	pring = &psli->sli3_ring[LPFC_FCP_RING];
  11075	pring->sli.sli3.numCiocb -= SLI2_IOCB_CMD_R1XTRA_ENTRIES;
  11076	pring->sli.sli3.numRiocb -= SLI2_IOCB_RSP_R1XTRA_ENTRIES;
  11077	pring->sli.sli3.numCiocb -= SLI2_IOCB_CMD_R3XTRA_ENTRIES;
  11078	pring->sli.sli3.numRiocb -= SLI2_IOCB_RSP_R3XTRA_ENTRIES;
  11079
  11080	/* and give them to the extra ring */
  11081	pring = &psli->sli3_ring[LPFC_EXTRA_RING];
  11082
  11083	pring->sli.sli3.numCiocb += SLI2_IOCB_CMD_R1XTRA_ENTRIES;
  11084	pring->sli.sli3.numRiocb += SLI2_IOCB_RSP_R1XTRA_ENTRIES;
  11085	pring->sli.sli3.numCiocb += SLI2_IOCB_CMD_R3XTRA_ENTRIES;
  11086	pring->sli.sli3.numRiocb += SLI2_IOCB_RSP_R3XTRA_ENTRIES;
  11087
  11088	/* Setup default profile for this ring */
  11089	pring->iotag_max = 4096;
  11090	pring->num_mask = 1;
  11091	pring->prt[0].profile = 0;      /* Mask 0 */
  11092	pring->prt[0].rctl = phba->cfg_multi_ring_rctl;
  11093	pring->prt[0].type = phba->cfg_multi_ring_type;
  11094	pring->prt[0].lpfc_sli_rcv_unsol_event = NULL;
  11095	return 0;
  11096}
  11097
  11098static void
  11099lpfc_sli_post_recovery_event(struct lpfc_hba *phba,
  11100			     struct lpfc_nodelist *ndlp)
  11101{
  11102	unsigned long iflags;
  11103	struct lpfc_work_evt  *evtp = &ndlp->recovery_evt;
  11104
  11105	spin_lock_irqsave(&phba->hbalock, iflags);
  11106	if (!list_empty(&evtp->evt_listp)) {
  11107		spin_unlock_irqrestore(&phba->hbalock, iflags);
  11108		return;
  11109	}
  11110
  11111	/* Incrementing the reference count until the queued work is done. */
  11112	evtp->evt_arg1  = lpfc_nlp_get(ndlp);
  11113	if (!evtp->evt_arg1) {
  11114		spin_unlock_irqrestore(&phba->hbalock, iflags);
  11115		return;
  11116	}
  11117	evtp->evt = LPFC_EVT_RECOVER_PORT;
  11118	list_add_tail(&evtp->evt_listp, &phba->work_list);
  11119	spin_unlock_irqrestore(&phba->hbalock, iflags);
  11120
  11121	lpfc_worker_wake_up(phba);
  11122}
  11123
  11124/* lpfc_sli_abts_err_handler - handle a failed ABTS request from an SLI3 port.
  11125 * @phba: Pointer to HBA context object.
  11126 * @iocbq: Pointer to iocb object.
  11127 *
  11128 * The async_event handler calls this routine when it receives
  11129 * an ASYNC_STATUS_CN event from the port.  The port generates
  11130 * this event when an Abort Sequence request to an rport fails
  11131 * twice in succession.  The abort could be originated by the
  11132 * driver or by the port.  The ABTS could have been for an ELS
  11133 * or FCP IO.  The port only generates this event when an ABTS
  11134 * fails to complete after one retry.
  11135 */
  11136static void
  11137lpfc_sli_abts_err_handler(struct lpfc_hba *phba,
  11138			  struct lpfc_iocbq *iocbq)
  11139{
  11140	struct lpfc_nodelist *ndlp = NULL;
  11141	uint16_t rpi = 0, vpi = 0;
  11142	struct lpfc_vport *vport = NULL;
  11143
  11144	/* The rpi in the ulpContext is vport-sensitive. */
  11145	vpi = iocbq->iocb.un.asyncstat.sub_ctxt_tag;
  11146	rpi = iocbq->iocb.ulpContext;
  11147
  11148	lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
  11149			"3092 Port generated ABTS async event "
  11150			"on vpi %d rpi %d status 0x%x\n",
  11151			vpi, rpi, iocbq->iocb.ulpStatus);
  11152
  11153	vport = lpfc_find_vport_by_vpid(phba, vpi);
  11154	if (!vport)
  11155		goto err_exit;
  11156	ndlp = lpfc_findnode_rpi(vport, rpi);
  11157	if (!ndlp)
  11158		goto err_exit;
  11159
  11160	if (iocbq->iocb.ulpStatus == IOSTAT_LOCAL_REJECT)
  11161		lpfc_sli_abts_recover_port(vport, ndlp);
  11162	return;
  11163
  11164 err_exit:
  11165	lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
  11166			"3095 Event Context not found, no "
  11167			"action on vpi %d rpi %d status 0x%x, reason 0x%x\n",
  11168			vpi, rpi, iocbq->iocb.ulpStatus,
  11169			iocbq->iocb.ulpContext);
  11170}
  11171
  11172/* lpfc_sli4_abts_err_handler - handle a failed ABTS request from an SLI4 port.
  11173 * @phba: pointer to HBA context object.
  11174 * @ndlp: nodelist pointer for the impacted rport.
  11175 * @axri: pointer to the wcqe containing the failed exchange.
  11176 *
  11177 * The driver calls this routine when it receives an ABORT_XRI_FCP CQE from the
  11178 * port.  The port generates this event when an abort exchange request to an
  11179 * rport fails twice in succession with no reply.  The abort could be originated
  11180 * by the driver or by the port.  The ABTS could have been for an ELS or FCP IO.
  11181 */
  11182void
  11183lpfc_sli4_abts_err_handler(struct lpfc_hba *phba,
  11184			   struct lpfc_nodelist *ndlp,
  11185			   struct sli4_wcqe_xri_aborted *axri)
  11186{
  11187	uint32_t ext_status = 0;
  11188
  11189	if (!ndlp) {
  11190		lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
  11191				"3115 Node Context not found, driver "
  11192				"ignoring abts err event\n");
  11193		return;
  11194	}
  11195
  11196	lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
  11197			"3116 Port generated FCP XRI ABORT event on "
  11198			"vpi %d rpi %d xri x%x status 0x%x parameter x%x\n",
  11199			ndlp->vport->vpi, phba->sli4_hba.rpi_ids[ndlp->nlp_rpi],
  11200			bf_get(lpfc_wcqe_xa_xri, axri),
  11201			bf_get(lpfc_wcqe_xa_status, axri),
  11202			axri->parameter);
  11203
  11204	/*
  11205	 * Catch the ABTS protocol failure case.  Older OCe FW releases returned
  11206	 * LOCAL_REJECT and 0 for a failed ABTS exchange and later OCe and
  11207	 * LPe FW releases returned LOCAL_REJECT and SEQUENCE_TIMEOUT.
  11208	 */
  11209	ext_status = axri->parameter & IOERR_PARAM_MASK;
  11210	if ((bf_get(lpfc_wcqe_xa_status, axri) == IOSTAT_LOCAL_REJECT) &&
  11211	    ((ext_status == IOERR_SEQUENCE_TIMEOUT) || (ext_status == 0)))
  11212		lpfc_sli_post_recovery_event(phba, ndlp);
  11213}
  11214
  11215/**
  11216 * lpfc_sli_async_event_handler - ASYNC iocb handler function
  11217 * @phba: Pointer to HBA context object.
  11218 * @pring: Pointer to driver SLI ring object.
  11219 * @iocbq: Pointer to iocb object.
  11220 *
  11221 * This function is called by the slow ring event handler
  11222 * function when there is an ASYNC event iocb in the ring.
  11223 * This function is called with no lock held.
  11224 * Currently this function handles only temperature related
  11225 * ASYNC events. The function decodes the temperature sensor
  11226 * event message and posts events for the management applications.
  11227 **/
  11228static void
  11229lpfc_sli_async_event_handler(struct lpfc_hba * phba,
  11230	struct lpfc_sli_ring * pring, struct lpfc_iocbq * iocbq)
  11231{
  11232	IOCB_t *icmd;
  11233	uint16_t evt_code;
  11234	struct temp_event temp_event_data;
  11235	struct Scsi_Host *shost;
  11236	uint32_t *iocb_w;
  11237
  11238	icmd = &iocbq->iocb;
  11239	evt_code = icmd->un.asyncstat.evt_code;
  11240
  11241	switch (evt_code) {
  11242	case ASYNC_TEMP_WARN:
  11243	case ASYNC_TEMP_SAFE:
  11244		temp_event_data.data = (uint32_t) icmd->ulpContext;
  11245		temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
  11246		if (evt_code == ASYNC_TEMP_WARN) {
  11247			temp_event_data.event_code = LPFC_THRESHOLD_TEMP;
  11248			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  11249				"0347 Adapter is very hot, please take "
  11250				"corrective action. temperature : %d Celsius\n",
  11251				(uint32_t) icmd->ulpContext);
  11252		} else {
  11253			temp_event_data.event_code = LPFC_NORMAL_TEMP;
  11254			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  11255				"0340 Adapter temperature is OK now. "
  11256				"temperature : %d Celsius\n",
  11257				(uint32_t) icmd->ulpContext);
  11258		}
  11259
  11260		/* Send temperature change event to applications */
  11261		shost = lpfc_shost_from_vport(phba->pport);
  11262		fc_host_post_vendor_event(shost, fc_get_event_number(),
  11263			sizeof(temp_event_data), (char *) &temp_event_data,
  11264			LPFC_NL_VENDOR_ID);
  11265		break;
  11266	case ASYNC_STATUS_CN:
  11267		lpfc_sli_abts_err_handler(phba, iocbq);
  11268		break;
  11269	default:
  11270		iocb_w = (uint32_t *) icmd;
  11271		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  11272			"0346 Ring %d handler: unexpected ASYNC_STATUS"
  11273			" evt_code 0x%x\n"
  11274			"W0  0x%08x W1  0x%08x W2  0x%08x W3  0x%08x\n"
  11275			"W4  0x%08x W5  0x%08x W6  0x%08x W7  0x%08x\n"
  11276			"W8  0x%08x W9  0x%08x W10 0x%08x W11 0x%08x\n"
  11277			"W12 0x%08x W13 0x%08x W14 0x%08x W15 0x%08x\n",
  11278			pring->ringno, icmd->un.asyncstat.evt_code,
  11279			iocb_w[0], iocb_w[1], iocb_w[2], iocb_w[3],
  11280			iocb_w[4], iocb_w[5], iocb_w[6], iocb_w[7],
  11281			iocb_w[8], iocb_w[9], iocb_w[10], iocb_w[11],
  11282			iocb_w[12], iocb_w[13], iocb_w[14], iocb_w[15]);
  11283
  11284		break;
  11285	}
  11286}
  11287
  11288
  11289/**
  11290 * lpfc_sli4_setup - SLI ring setup function
  11291 * @phba: Pointer to HBA context object.
  11292 *
  11293 * lpfc_sli_setup sets up rings of the SLI interface with
  11294 * number of iocbs per ring and iotags. This function is
  11295 * called while driver attach to the HBA and before the
  11296 * interrupts are enabled. So there is no need for locking.
  11297 *
  11298 * This function always returns 0.
  11299 **/
  11300int
  11301lpfc_sli4_setup(struct lpfc_hba *phba)
  11302{
  11303	struct lpfc_sli_ring *pring;
  11304
  11305	pring = phba->sli4_hba.els_wq->pring;
  11306	pring->num_mask = LPFC_MAX_RING_MASK;
  11307	pring->prt[0].profile = 0;	/* Mask 0 */
  11308	pring->prt[0].rctl = FC_RCTL_ELS_REQ;
  11309	pring->prt[0].type = FC_TYPE_ELS;
  11310	pring->prt[0].lpfc_sli_rcv_unsol_event =
  11311	    lpfc_els_unsol_event;
  11312	pring->prt[1].profile = 0;	/* Mask 1 */
  11313	pring->prt[1].rctl = FC_RCTL_ELS_REP;
  11314	pring->prt[1].type = FC_TYPE_ELS;
  11315	pring->prt[1].lpfc_sli_rcv_unsol_event =
  11316	    lpfc_els_unsol_event;
  11317	pring->prt[2].profile = 0;	/* Mask 2 */
  11318	/* NameServer Inquiry */
  11319	pring->prt[2].rctl = FC_RCTL_DD_UNSOL_CTL;
  11320	/* NameServer */
  11321	pring->prt[2].type = FC_TYPE_CT;
  11322	pring->prt[2].lpfc_sli_rcv_unsol_event =
  11323	    lpfc_ct_unsol_event;
  11324	pring->prt[3].profile = 0;	/* Mask 3 */
  11325	/* NameServer response */
  11326	pring->prt[3].rctl = FC_RCTL_DD_SOL_CTL;
  11327	/* NameServer */
  11328	pring->prt[3].type = FC_TYPE_CT;
  11329	pring->prt[3].lpfc_sli_rcv_unsol_event =
  11330	    lpfc_ct_unsol_event;
  11331	return 0;
  11332}
  11333
  11334/**
  11335 * lpfc_sli_setup - SLI ring setup function
  11336 * @phba: Pointer to HBA context object.
  11337 *
  11338 * lpfc_sli_setup sets up rings of the SLI interface with
  11339 * number of iocbs per ring and iotags. This function is
  11340 * called while driver attach to the HBA and before the
  11341 * interrupts are enabled. So there is no need for locking.
  11342 *
  11343 * This function always returns 0. SLI3 only.
  11344 **/
  11345int
  11346lpfc_sli_setup(struct lpfc_hba *phba)
  11347{
  11348	int i, totiocbsize = 0;
  11349	struct lpfc_sli *psli = &phba->sli;
  11350	struct lpfc_sli_ring *pring;
  11351
  11352	psli->num_rings = MAX_SLI3_CONFIGURED_RINGS;
  11353	psli->sli_flag = 0;
  11354
  11355	psli->iocbq_lookup = NULL;
  11356	psli->iocbq_lookup_len = 0;
  11357	psli->last_iotag = 0;
  11358
  11359	for (i = 0; i < psli->num_rings; i++) {
  11360		pring = &psli->sli3_ring[i];
  11361		switch (i) {
  11362		case LPFC_FCP_RING:	/* ring 0 - FCP */
  11363			/* numCiocb and numRiocb are used in config_port */
  11364			pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R0_ENTRIES;
  11365			pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R0_ENTRIES;
  11366			pring->sli.sli3.numCiocb +=
  11367				SLI2_IOCB_CMD_R1XTRA_ENTRIES;
  11368			pring->sli.sli3.numRiocb +=
  11369				SLI2_IOCB_RSP_R1XTRA_ENTRIES;
  11370			pring->sli.sli3.numCiocb +=
  11371				SLI2_IOCB_CMD_R3XTRA_ENTRIES;
  11372			pring->sli.sli3.numRiocb +=
  11373				SLI2_IOCB_RSP_R3XTRA_ENTRIES;
  11374			pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ?
  11375							SLI3_IOCB_CMD_SIZE :
  11376							SLI2_IOCB_CMD_SIZE;
  11377			pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ?
  11378							SLI3_IOCB_RSP_SIZE :
  11379							SLI2_IOCB_RSP_SIZE;
  11380			pring->iotag_ctr = 0;
  11381			pring->iotag_max =
  11382			    (phba->cfg_hba_queue_depth * 2);
  11383			pring->fast_iotag = pring->iotag_max;
  11384			pring->num_mask = 0;
  11385			break;
  11386		case LPFC_EXTRA_RING:	/* ring 1 - EXTRA */
  11387			/* numCiocb and numRiocb are used in config_port */
  11388			pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R1_ENTRIES;
  11389			pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R1_ENTRIES;
  11390			pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ?
  11391							SLI3_IOCB_CMD_SIZE :
  11392							SLI2_IOCB_CMD_SIZE;
  11393			pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ?
  11394							SLI3_IOCB_RSP_SIZE :
  11395							SLI2_IOCB_RSP_SIZE;
  11396			pring->iotag_max = phba->cfg_hba_queue_depth;
  11397			pring->num_mask = 0;
  11398			break;
  11399		case LPFC_ELS_RING:	/* ring 2 - ELS / CT */
  11400			/* numCiocb and numRiocb are used in config_port */
  11401			pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R2_ENTRIES;
  11402			pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R2_ENTRIES;
  11403			pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ?
  11404							SLI3_IOCB_CMD_SIZE :
  11405							SLI2_IOCB_CMD_SIZE;
  11406			pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ?
  11407							SLI3_IOCB_RSP_SIZE :
  11408							SLI2_IOCB_RSP_SIZE;
  11409			pring->fast_iotag = 0;
  11410			pring->iotag_ctr = 0;
  11411			pring->iotag_max = 4096;
  11412			pring->lpfc_sli_rcv_async_status =
  11413				lpfc_sli_async_event_handler;
  11414			pring->num_mask = LPFC_MAX_RING_MASK;
  11415			pring->prt[0].profile = 0;	/* Mask 0 */
  11416			pring->prt[0].rctl = FC_RCTL_ELS_REQ;
  11417			pring->prt[0].type = FC_TYPE_ELS;
  11418			pring->prt[0].lpfc_sli_rcv_unsol_event =
  11419			    lpfc_els_unsol_event;
  11420			pring->prt[1].profile = 0;	/* Mask 1 */
  11421			pring->prt[1].rctl = FC_RCTL_ELS_REP;
  11422			pring->prt[1].type = FC_TYPE_ELS;
  11423			pring->prt[1].lpfc_sli_rcv_unsol_event =
  11424			    lpfc_els_unsol_event;
  11425			pring->prt[2].profile = 0;	/* Mask 2 */
  11426			/* NameServer Inquiry */
  11427			pring->prt[2].rctl = FC_RCTL_DD_UNSOL_CTL;
  11428			/* NameServer */
  11429			pring->prt[2].type = FC_TYPE_CT;
  11430			pring->prt[2].lpfc_sli_rcv_unsol_event =
  11431			    lpfc_ct_unsol_event;
  11432			pring->prt[3].profile = 0;	/* Mask 3 */
  11433			/* NameServer response */
  11434			pring->prt[3].rctl = FC_RCTL_DD_SOL_CTL;
  11435			/* NameServer */
  11436			pring->prt[3].type = FC_TYPE_CT;
  11437			pring->prt[3].lpfc_sli_rcv_unsol_event =
  11438			    lpfc_ct_unsol_event;
  11439			break;
  11440		}
  11441		totiocbsize += (pring->sli.sli3.numCiocb *
  11442			pring->sli.sli3.sizeCiocb) +
  11443			(pring->sli.sli3.numRiocb * pring->sli.sli3.sizeRiocb);
  11444	}
  11445	if (totiocbsize > MAX_SLIM_IOCB_SIZE) {
  11446		/* Too many cmd / rsp ring entries in SLI2 SLIM */
  11447		printk(KERN_ERR "%d:0462 Too many cmd / rsp ring entries in "
  11448		       "SLI2 SLIM Data: x%x x%lx\n",
  11449		       phba->brd_no, totiocbsize,
  11450		       (unsigned long) MAX_SLIM_IOCB_SIZE);
  11451	}
  11452	if (phba->cfg_multi_ring_support == 2)
  11453		lpfc_extra_ring_setup(phba);
  11454
  11455	return 0;
  11456}
  11457
  11458/**
  11459 * lpfc_sli4_queue_init - Queue initialization function
  11460 * @phba: Pointer to HBA context object.
  11461 *
  11462 * lpfc_sli4_queue_init sets up mailbox queues and iocb queues for each
  11463 * ring. This function also initializes ring indices of each ring.
  11464 * This function is called during the initialization of the SLI
  11465 * interface of an HBA.
  11466 * This function is called with no lock held and always returns
  11467 * 1.
  11468 **/
  11469void
  11470lpfc_sli4_queue_init(struct lpfc_hba *phba)
  11471{
  11472	struct lpfc_sli *psli;
  11473	struct lpfc_sli_ring *pring;
  11474	int i;
  11475
  11476	psli = &phba->sli;
  11477	spin_lock_irq(&phba->hbalock);
  11478	INIT_LIST_HEAD(&psli->mboxq);
  11479	INIT_LIST_HEAD(&psli->mboxq_cmpl);
  11480	/* Initialize list headers for txq and txcmplq as double linked lists */
  11481	for (i = 0; i < phba->cfg_hdw_queue; i++) {
  11482		pring = phba->sli4_hba.hdwq[i].io_wq->pring;
  11483		pring->flag = 0;
  11484		pring->ringno = LPFC_FCP_RING;
  11485		pring->txcmplq_cnt = 0;
  11486		INIT_LIST_HEAD(&pring->txq);
  11487		INIT_LIST_HEAD(&pring->txcmplq);
  11488		INIT_LIST_HEAD(&pring->iocb_continueq);
  11489		spin_lock_init(&pring->ring_lock);
  11490	}
  11491	pring = phba->sli4_hba.els_wq->pring;
  11492	pring->flag = 0;
  11493	pring->ringno = LPFC_ELS_RING;
  11494	pring->txcmplq_cnt = 0;
  11495	INIT_LIST_HEAD(&pring->txq);
  11496	INIT_LIST_HEAD(&pring->txcmplq);
  11497	INIT_LIST_HEAD(&pring->iocb_continueq);
  11498	spin_lock_init(&pring->ring_lock);
  11499
  11500	if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
  11501		pring = phba->sli4_hba.nvmels_wq->pring;
  11502		pring->flag = 0;
  11503		pring->ringno = LPFC_ELS_RING;
  11504		pring->txcmplq_cnt = 0;
  11505		INIT_LIST_HEAD(&pring->txq);
  11506		INIT_LIST_HEAD(&pring->txcmplq);
  11507		INIT_LIST_HEAD(&pring->iocb_continueq);
  11508		spin_lock_init(&pring->ring_lock);
  11509	}
  11510
  11511	spin_unlock_irq(&phba->hbalock);
  11512}
  11513
  11514/**
  11515 * lpfc_sli_queue_init - Queue initialization function
  11516 * @phba: Pointer to HBA context object.
  11517 *
  11518 * lpfc_sli_queue_init sets up mailbox queues and iocb queues for each
  11519 * ring. This function also initializes ring indices of each ring.
  11520 * This function is called during the initialization of the SLI
  11521 * interface of an HBA.
  11522 * This function is called with no lock held and always returns
  11523 * 1.
  11524 **/
  11525void
  11526lpfc_sli_queue_init(struct lpfc_hba *phba)
  11527{
  11528	struct lpfc_sli *psli;
  11529	struct lpfc_sli_ring *pring;
  11530	int i;
  11531
  11532	psli = &phba->sli;
  11533	spin_lock_irq(&phba->hbalock);
  11534	INIT_LIST_HEAD(&psli->mboxq);
  11535	INIT_LIST_HEAD(&psli->mboxq_cmpl);
  11536	/* Initialize list headers for txq and txcmplq as double linked lists */
  11537	for (i = 0; i < psli->num_rings; i++) {
  11538		pring = &psli->sli3_ring[i];
  11539		pring->ringno = i;
  11540		pring->sli.sli3.next_cmdidx  = 0;
  11541		pring->sli.sli3.local_getidx = 0;
  11542		pring->sli.sli3.cmdidx = 0;
  11543		INIT_LIST_HEAD(&pring->iocb_continueq);
  11544		INIT_LIST_HEAD(&pring->iocb_continue_saveq);
  11545		INIT_LIST_HEAD(&pring->postbufq);
  11546		pring->flag = 0;
  11547		INIT_LIST_HEAD(&pring->txq);
  11548		INIT_LIST_HEAD(&pring->txcmplq);
  11549		spin_lock_init(&pring->ring_lock);
  11550	}
  11551	spin_unlock_irq(&phba->hbalock);
  11552}
  11553
  11554/**
  11555 * lpfc_sli_mbox_sys_flush - Flush mailbox command sub-system
  11556 * @phba: Pointer to HBA context object.
  11557 *
  11558 * This routine flushes the mailbox command subsystem. It will unconditionally
  11559 * flush all the mailbox commands in the three possible stages in the mailbox
  11560 * command sub-system: pending mailbox command queue; the outstanding mailbox
  11561 * command; and completed mailbox command queue. It is caller's responsibility
  11562 * to make sure that the driver is in the proper state to flush the mailbox
  11563 * command sub-system. Namely, the posting of mailbox commands into the
  11564 * pending mailbox command queue from the various clients must be stopped;
  11565 * either the HBA is in a state that it will never works on the outstanding
  11566 * mailbox command (such as in EEH or ERATT conditions) or the outstanding
  11567 * mailbox command has been completed.
  11568 **/
  11569static void
  11570lpfc_sli_mbox_sys_flush(struct lpfc_hba *phba)
  11571{
  11572	LIST_HEAD(completions);
  11573	struct lpfc_sli *psli = &phba->sli;
  11574	LPFC_MBOXQ_t *pmb;
  11575	unsigned long iflag;
  11576
  11577	/* Disable softirqs, including timers from obtaining phba->hbalock */
  11578	local_bh_disable();
  11579
  11580	/* Flush all the mailbox commands in the mbox system */
  11581	spin_lock_irqsave(&phba->hbalock, iflag);
  11582
  11583	/* The pending mailbox command queue */
  11584	list_splice_init(&phba->sli.mboxq, &completions);
  11585	/* The outstanding active mailbox command */
  11586	if (psli->mbox_active) {
  11587		list_add_tail(&psli->mbox_active->list, &completions);
  11588		psli->mbox_active = NULL;
  11589		psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
  11590	}
  11591	/* The completed mailbox command queue */
  11592	list_splice_init(&phba->sli.mboxq_cmpl, &completions);
  11593	spin_unlock_irqrestore(&phba->hbalock, iflag);
  11594
  11595	/* Enable softirqs again, done with phba->hbalock */
  11596	local_bh_enable();
  11597
  11598	/* Return all flushed mailbox commands with MBX_NOT_FINISHED status */
  11599	while (!list_empty(&completions)) {
  11600		list_remove_head(&completions, pmb, LPFC_MBOXQ_t, list);
  11601		pmb->u.mb.mbxStatus = MBX_NOT_FINISHED;
  11602		if (pmb->mbox_cmpl)
  11603			pmb->mbox_cmpl(phba, pmb);
  11604	}
  11605}
  11606
  11607/**
  11608 * lpfc_sli_host_down - Vport cleanup function
  11609 * @vport: Pointer to virtual port object.
  11610 *
  11611 * lpfc_sli_host_down is called to clean up the resources
  11612 * associated with a vport before destroying virtual
  11613 * port data structures.
  11614 * This function does following operations:
  11615 * - Free discovery resources associated with this virtual
  11616 *   port.
  11617 * - Free iocbs associated with this virtual port in
  11618 *   the txq.
  11619 * - Send abort for all iocb commands associated with this
  11620 *   vport in txcmplq.
  11621 *
  11622 * This function is called with no lock held and always returns 1.
  11623 **/
  11624int
  11625lpfc_sli_host_down(struct lpfc_vport *vport)
  11626{
  11627	LIST_HEAD(completions);
  11628	struct lpfc_hba *phba = vport->phba;
  11629	struct lpfc_sli *psli = &phba->sli;
  11630	struct lpfc_queue *qp = NULL;
  11631	struct lpfc_sli_ring *pring;
  11632	struct lpfc_iocbq *iocb, *next_iocb;
  11633	int i;
  11634	unsigned long flags = 0;
  11635	uint16_t prev_pring_flag;
  11636
  11637	lpfc_cleanup_discovery_resources(vport);
  11638
  11639	spin_lock_irqsave(&phba->hbalock, flags);
  11640
  11641	/*
  11642	 * Error everything on the txq since these iocbs
  11643	 * have not been given to the FW yet.
  11644	 * Also issue ABTS for everything on the txcmplq
  11645	 */
  11646	if (phba->sli_rev != LPFC_SLI_REV4) {
  11647		for (i = 0; i < psli->num_rings; i++) {
  11648			pring = &psli->sli3_ring[i];
  11649			prev_pring_flag = pring->flag;
  11650			/* Only slow rings */
  11651			if (pring->ringno == LPFC_ELS_RING) {
  11652				pring->flag |= LPFC_DEFERRED_RING_EVENT;
  11653				/* Set the lpfc data pending flag */
  11654				set_bit(LPFC_DATA_READY, &phba->data_flags);
  11655			}
  11656			list_for_each_entry_safe(iocb, next_iocb,
  11657						 &pring->txq, list) {
  11658				if (iocb->vport != vport)
  11659					continue;
  11660				list_move_tail(&iocb->list, &completions);
  11661			}
  11662			list_for_each_entry_safe(iocb, next_iocb,
  11663						 &pring->txcmplq, list) {
  11664				if (iocb->vport != vport)
  11665					continue;
  11666				lpfc_sli_issue_abort_iotag(phba, pring, iocb,
  11667							   NULL);
  11668			}
  11669			pring->flag = prev_pring_flag;
  11670		}
  11671	} else {
  11672		list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
  11673			pring = qp->pring;
  11674			if (!pring)
  11675				continue;
  11676			if (pring == phba->sli4_hba.els_wq->pring) {
  11677				pring->flag |= LPFC_DEFERRED_RING_EVENT;
  11678				/* Set the lpfc data pending flag */
  11679				set_bit(LPFC_DATA_READY, &phba->data_flags);
  11680			}
  11681			prev_pring_flag = pring->flag;
  11682			spin_lock(&pring->ring_lock);
  11683			list_for_each_entry_safe(iocb, next_iocb,
  11684						 &pring->txq, list) {
  11685				if (iocb->vport != vport)
  11686					continue;
  11687				list_move_tail(&iocb->list, &completions);
  11688			}
  11689			spin_unlock(&pring->ring_lock);
  11690			list_for_each_entry_safe(iocb, next_iocb,
  11691						 &pring->txcmplq, list) {
  11692				if (iocb->vport != vport)
  11693					continue;
  11694				lpfc_sli_issue_abort_iotag(phba, pring, iocb,
  11695							   NULL);
  11696			}
  11697			pring->flag = prev_pring_flag;
  11698		}
  11699	}
  11700	spin_unlock_irqrestore(&phba->hbalock, flags);
  11701
  11702	/* Make sure HBA is alive */
  11703	lpfc_issue_hb_tmo(phba);
  11704
  11705	/* Cancel all the IOCBs from the completions list */
  11706	lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
  11707			      IOERR_SLI_DOWN);
  11708	return 1;
  11709}
  11710
  11711/**
  11712 * lpfc_sli_hba_down - Resource cleanup function for the HBA
  11713 * @phba: Pointer to HBA context object.
  11714 *
  11715 * This function cleans up all iocb, buffers, mailbox commands
  11716 * while shutting down the HBA. This function is called with no
  11717 * lock held and always returns 1.
  11718 * This function does the following to cleanup driver resources:
  11719 * - Free discovery resources for each virtual port
  11720 * - Cleanup any pending fabric iocbs
  11721 * - Iterate through the iocb txq and free each entry
  11722 *   in the list.
  11723 * - Free up any buffer posted to the HBA
  11724 * - Free mailbox commands in the mailbox queue.
  11725 **/
  11726int
  11727lpfc_sli_hba_down(struct lpfc_hba *phba)
  11728{
  11729	LIST_HEAD(completions);
  11730	struct lpfc_sli *psli = &phba->sli;
  11731	struct lpfc_queue *qp = NULL;
  11732	struct lpfc_sli_ring *pring;
  11733	struct lpfc_dmabuf *buf_ptr;
  11734	unsigned long flags = 0;
  11735	int i;
  11736
  11737	/* Shutdown the mailbox command sub-system */
  11738	lpfc_sli_mbox_sys_shutdown(phba, LPFC_MBX_WAIT);
  11739
  11740	lpfc_hba_down_prep(phba);
  11741
  11742	/* Disable softirqs, including timers from obtaining phba->hbalock */
  11743	local_bh_disable();
  11744
  11745	lpfc_fabric_abort_hba(phba);
  11746
  11747	spin_lock_irqsave(&phba->hbalock, flags);
  11748
  11749	/*
  11750	 * Error everything on the txq since these iocbs
  11751	 * have not been given to the FW yet.
  11752	 */
  11753	if (phba->sli_rev != LPFC_SLI_REV4) {
  11754		for (i = 0; i < psli->num_rings; i++) {
  11755			pring = &psli->sli3_ring[i];
  11756			/* Only slow rings */
  11757			if (pring->ringno == LPFC_ELS_RING) {
  11758				pring->flag |= LPFC_DEFERRED_RING_EVENT;
  11759				/* Set the lpfc data pending flag */
  11760				set_bit(LPFC_DATA_READY, &phba->data_flags);
  11761			}
  11762			list_splice_init(&pring->txq, &completions);
  11763		}
  11764	} else {
  11765		list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
  11766			pring = qp->pring;
  11767			if (!pring)
  11768				continue;
  11769			spin_lock(&pring->ring_lock);
  11770			list_splice_init(&pring->txq, &completions);
  11771			spin_unlock(&pring->ring_lock);
  11772			if (pring == phba->sli4_hba.els_wq->pring) {
  11773				pring->flag |= LPFC_DEFERRED_RING_EVENT;
  11774				/* Set the lpfc data pending flag */
  11775				set_bit(LPFC_DATA_READY, &phba->data_flags);
  11776			}
  11777		}
  11778	}
  11779	spin_unlock_irqrestore(&phba->hbalock, flags);
  11780
  11781	/* Cancel all the IOCBs from the completions list */
  11782	lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
  11783			      IOERR_SLI_DOWN);
  11784
  11785	spin_lock_irqsave(&phba->hbalock, flags);
  11786	list_splice_init(&phba->elsbuf, &completions);
  11787	phba->elsbuf_cnt = 0;
  11788	phba->elsbuf_prev_cnt = 0;
  11789	spin_unlock_irqrestore(&phba->hbalock, flags);
  11790
  11791	while (!list_empty(&completions)) {
  11792		list_remove_head(&completions, buf_ptr,
  11793			struct lpfc_dmabuf, list);
  11794		lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
  11795		kfree(buf_ptr);
  11796	}
  11797
  11798	/* Enable softirqs again, done with phba->hbalock */
  11799	local_bh_enable();
  11800
  11801	/* Return any active mbox cmds */
  11802	del_timer_sync(&psli->mbox_tmo);
  11803
  11804	spin_lock_irqsave(&phba->pport->work_port_lock, flags);
  11805	phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
  11806	spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);
  11807
  11808	return 1;
  11809}
  11810
  11811/**
  11812 * lpfc_sli_pcimem_bcopy - SLI memory copy function
  11813 * @srcp: Source memory pointer.
  11814 * @destp: Destination memory pointer.
  11815 * @cnt: Number of words required to be copied.
  11816 *
  11817 * This function is used for copying data between driver memory
  11818 * and the SLI memory. This function also changes the endianness
  11819 * of each word if native endianness is different from SLI
  11820 * endianness. This function can be called with or without
  11821 * lock.
  11822 **/
  11823void
  11824lpfc_sli_pcimem_bcopy(void *srcp, void *destp, uint32_t cnt)
  11825{
  11826	uint32_t *src = srcp;
  11827	uint32_t *dest = destp;
  11828	uint32_t ldata;
  11829	int i;
  11830
  11831	for (i = 0; i < (int)cnt; i += sizeof (uint32_t)) {
  11832		ldata = *src;
  11833		ldata = le32_to_cpu(ldata);
  11834		*dest = ldata;
  11835		src++;
  11836		dest++;
  11837	}
  11838}
  11839
  11840
  11841/**
  11842 * lpfc_sli_bemem_bcopy - SLI memory copy function
  11843 * @srcp: Source memory pointer.
  11844 * @destp: Destination memory pointer.
  11845 * @cnt: Number of words required to be copied.
  11846 *
  11847 * This function is used for copying data between a data structure
  11848 * with big endian representation to local endianness.
  11849 * This function can be called with or without lock.
  11850 **/
  11851void
  11852lpfc_sli_bemem_bcopy(void *srcp, void *destp, uint32_t cnt)
  11853{
  11854	uint32_t *src = srcp;
  11855	uint32_t *dest = destp;
  11856	uint32_t ldata;
  11857	int i;
  11858
  11859	for (i = 0; i < (int)cnt; i += sizeof(uint32_t)) {
  11860		ldata = *src;
  11861		ldata = be32_to_cpu(ldata);
  11862		*dest = ldata;
  11863		src++;
  11864		dest++;
  11865	}
  11866}
  11867
  11868/**
  11869 * lpfc_sli_ringpostbuf_put - Function to add a buffer to postbufq
  11870 * @phba: Pointer to HBA context object.
  11871 * @pring: Pointer to driver SLI ring object.
  11872 * @mp: Pointer to driver buffer object.
  11873 *
  11874 * This function is called with no lock held.
  11875 * It always return zero after adding the buffer to the postbufq
  11876 * buffer list.
  11877 **/
  11878int
  11879lpfc_sli_ringpostbuf_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
  11880			 struct lpfc_dmabuf *mp)
  11881{
  11882	/* Stick struct lpfc_dmabuf at end of postbufq so driver can look it up
  11883	   later */
  11884	spin_lock_irq(&phba->hbalock);
  11885	list_add_tail(&mp->list, &pring->postbufq);
  11886	pring->postbufq_cnt++;
  11887	spin_unlock_irq(&phba->hbalock);
  11888	return 0;
  11889}
  11890
  11891/**
  11892 * lpfc_sli_get_buffer_tag - allocates a tag for a CMD_QUE_XRI64_CX buffer
  11893 * @phba: Pointer to HBA context object.
  11894 *
  11895 * When HBQ is enabled, buffers are searched based on tags. This function
  11896 * allocates a tag for buffer posted using CMD_QUE_XRI64_CX iocb. The
  11897 * tag is bit wise or-ed with QUE_BUFTAG_BIT to make sure that the tag
  11898 * does not conflict with tags of buffer posted for unsolicited events.
  11899 * The function returns the allocated tag. The function is called with
  11900 * no locks held.
  11901 **/
  11902uint32_t
  11903lpfc_sli_get_buffer_tag(struct lpfc_hba *phba)
  11904{
  11905	spin_lock_irq(&phba->hbalock);
  11906	phba->buffer_tag_count++;
  11907	/*
  11908	 * Always set the QUE_BUFTAG_BIT to distiguish between
  11909	 * a tag assigned by HBQ.
  11910	 */
  11911	phba->buffer_tag_count |= QUE_BUFTAG_BIT;
  11912	spin_unlock_irq(&phba->hbalock);
  11913	return phba->buffer_tag_count;
  11914}
  11915
  11916/**
  11917 * lpfc_sli_ring_taggedbuf_get - find HBQ buffer associated with given tag
  11918 * @phba: Pointer to HBA context object.
  11919 * @pring: Pointer to driver SLI ring object.
  11920 * @tag: Buffer tag.
  11921 *
  11922 * Buffers posted using CMD_QUE_XRI64_CX iocb are in pring->postbufq
  11923 * list. After HBA DMA data to these buffers, CMD_IOCB_RET_XRI64_CX
  11924 * iocb is posted to the response ring with the tag of the buffer.
  11925 * This function searches the pring->postbufq list using the tag
  11926 * to find buffer associated with CMD_IOCB_RET_XRI64_CX
  11927 * iocb. If the buffer is found then lpfc_dmabuf object of the
  11928 * buffer is returned to the caller else NULL is returned.
  11929 * This function is called with no lock held.
  11930 **/
  11931struct lpfc_dmabuf *
  11932lpfc_sli_ring_taggedbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
  11933			uint32_t tag)
  11934{
  11935	struct lpfc_dmabuf *mp, *next_mp;
  11936	struct list_head *slp = &pring->postbufq;
  11937
  11938	/* Search postbufq, from the beginning, looking for a match on tag */
  11939	spin_lock_irq(&phba->hbalock);
  11940	list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
  11941		if (mp->buffer_tag == tag) {
  11942			list_del_init(&mp->list);
  11943			pring->postbufq_cnt--;
  11944			spin_unlock_irq(&phba->hbalock);
  11945			return mp;
  11946		}
  11947	}
  11948
  11949	spin_unlock_irq(&phba->hbalock);
  11950	lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  11951			"0402 Cannot find virtual addr for buffer tag on "
  11952			"ring %d Data x%lx x%px x%px x%x\n",
  11953			pring->ringno, (unsigned long) tag,
  11954			slp->next, slp->prev, pring->postbufq_cnt);
  11955
  11956	return NULL;
  11957}
  11958
  11959/**
  11960 * lpfc_sli_ringpostbuf_get - search buffers for unsolicited CT and ELS events
  11961 * @phba: Pointer to HBA context object.
  11962 * @pring: Pointer to driver SLI ring object.
  11963 * @phys: DMA address of the buffer.
  11964 *
  11965 * This function searches the buffer list using the dma_address
  11966 * of unsolicited event to find the driver's lpfc_dmabuf object
  11967 * corresponding to the dma_address. The function returns the
  11968 * lpfc_dmabuf object if a buffer is found else it returns NULL.
  11969 * This function is called by the ct and els unsolicited event
  11970 * handlers to get the buffer associated with the unsolicited
  11971 * event.
  11972 *
  11973 * This function is called with no lock held.
  11974 **/
  11975struct lpfc_dmabuf *
  11976lpfc_sli_ringpostbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
  11977			 dma_addr_t phys)
  11978{
  11979	struct lpfc_dmabuf *mp, *next_mp;
  11980	struct list_head *slp = &pring->postbufq;
  11981
  11982	/* Search postbufq, from the beginning, looking for a match on phys */
  11983	spin_lock_irq(&phba->hbalock);
  11984	list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
  11985		if (mp->phys == phys) {
  11986			list_del_init(&mp->list);
  11987			pring->postbufq_cnt--;
  11988			spin_unlock_irq(&phba->hbalock);
  11989			return mp;
  11990		}
  11991	}
  11992
  11993	spin_unlock_irq(&phba->hbalock);
  11994	lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  11995			"0410 Cannot find virtual addr for mapped buf on "
  11996			"ring %d Data x%llx x%px x%px x%x\n",
  11997			pring->ringno, (unsigned long long)phys,
  11998			slp->next, slp->prev, pring->postbufq_cnt);
  11999	return NULL;
  12000}
  12001
  12002/**
  12003 * lpfc_sli_abort_els_cmpl - Completion handler for the els abort iocbs
  12004 * @phba: Pointer to HBA context object.
  12005 * @cmdiocb: Pointer to driver command iocb object.
  12006 * @rspiocb: Pointer to driver response iocb object.
  12007 *
  12008 * This function is the completion handler for the abort iocbs for
  12009 * ELS commands. This function is called from the ELS ring event
  12010 * handler with no lock held. This function frees memory resources
  12011 * associated with the abort iocb.
  12012 **/
  12013static void
  12014lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
  12015			struct lpfc_iocbq *rspiocb)
  12016{
  12017	u32 ulp_status = get_job_ulpstatus(phba, rspiocb);
  12018	u32 ulp_word4 = get_job_word4(phba, rspiocb);
  12019	u8 cmnd = get_job_cmnd(phba, cmdiocb);
  12020
  12021	if (ulp_status) {
  12022		/*
  12023		 * Assume that the port already completed and returned, or
  12024		 * will return the iocb. Just Log the message.
  12025		 */
  12026		if (phba->sli_rev < LPFC_SLI_REV4) {
  12027			if (cmnd == CMD_ABORT_XRI_CX &&
  12028			    ulp_status == IOSTAT_LOCAL_REJECT &&
  12029			    ulp_word4 == IOERR_ABORT_REQUESTED) {
  12030				goto release_iocb;
  12031			}
  12032		}
  12033
  12034		lpfc_printf_log(phba, KERN_WARNING, LOG_ELS | LOG_SLI,
  12035				"0327 Cannot abort els iocb x%px "
  12036				"with io cmd xri %x abort tag : x%x, "
  12037				"abort status %x abort code %x\n",
  12038				cmdiocb, get_job_abtsiotag(phba, cmdiocb),
  12039				(phba->sli_rev == LPFC_SLI_REV4) ?
  12040				get_wqe_reqtag(cmdiocb) :
  12041				cmdiocb->iocb.un.acxri.abortContextTag,
  12042				ulp_status, ulp_word4);
  12043
  12044	}
  12045release_iocb:
  12046	lpfc_sli_release_iocbq(phba, cmdiocb);
  12047	return;
  12048}
  12049
  12050/**
  12051 * lpfc_ignore_els_cmpl - Completion handler for aborted ELS command
  12052 * @phba: Pointer to HBA context object.
  12053 * @cmdiocb: Pointer to driver command iocb object.
  12054 * @rspiocb: Pointer to driver response iocb object.
  12055 *
  12056 * The function is called from SLI ring event handler with no
  12057 * lock held. This function is the completion handler for ELS commands
  12058 * which are aborted. The function frees memory resources used for
  12059 * the aborted ELS commands.
  12060 **/
  12061void
  12062lpfc_ignore_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
  12063		     struct lpfc_iocbq *rspiocb)
  12064{
  12065	struct lpfc_nodelist *ndlp = cmdiocb->ndlp;
  12066	IOCB_t *irsp;
  12067	LPFC_MBOXQ_t *mbox;
  12068	u32 ulp_command, ulp_status, ulp_word4, iotag;
  12069
  12070	ulp_command = get_job_cmnd(phba, cmdiocb);
  12071	ulp_status = get_job_ulpstatus(phba, rspiocb);
  12072	ulp_word4 = get_job_word4(phba, rspiocb);
  12073
  12074	if (phba->sli_rev == LPFC_SLI_REV4) {
  12075		iotag = get_wqe_reqtag(cmdiocb);
  12076	} else {
  12077		irsp = &rspiocb->iocb;
  12078		iotag = irsp->ulpIoTag;
  12079
  12080		/* It is possible a PLOGI_RJT for NPIV ports to get aborted.
  12081		 * The MBX_REG_LOGIN64 mbox command is freed back to the
  12082		 * mbox_mem_pool here.
  12083		 */
  12084		if (cmdiocb->context_un.mbox) {
  12085			mbox = cmdiocb->context_un.mbox;
  12086			lpfc_mbox_rsrc_cleanup(phba, mbox, MBOX_THD_UNLOCKED);
  12087			cmdiocb->context_un.mbox = NULL;
  12088		}
  12089	}
  12090
  12091	/* ELS cmd tag <ulpIoTag> completes */
  12092	lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
  12093			"0139 Ignoring ELS cmd code x%x completion Data: "
  12094			"x%x x%x x%x x%px\n",
  12095			ulp_command, ulp_status, ulp_word4, iotag,
  12096			cmdiocb->ndlp);
  12097	/*
  12098	 * Deref the ndlp after free_iocb. sli_release_iocb will access the ndlp
  12099	 * if exchange is busy.
  12100	 */
  12101	if (ulp_command == CMD_GEN_REQUEST64_CR)
  12102		lpfc_ct_free_iocb(phba, cmdiocb);
  12103	else
  12104		lpfc_els_free_iocb(phba, cmdiocb);
  12105
  12106	lpfc_nlp_put(ndlp);
  12107}
  12108
  12109/**
  12110 * lpfc_sli_issue_abort_iotag - Abort function for a command iocb
  12111 * @phba: Pointer to HBA context object.
  12112 * @pring: Pointer to driver SLI ring object.
  12113 * @cmdiocb: Pointer to driver command iocb object.
  12114 * @cmpl: completion function.
  12115 *
  12116 * This function issues an abort iocb for the provided command iocb. In case
  12117 * of unloading, the abort iocb will not be issued to commands on the ELS
  12118 * ring. Instead, the callback function shall be changed to those commands
  12119 * so that nothing happens when them finishes. This function is called with
  12120 * hbalock held andno ring_lock held (SLI4). The function returns IOCB_SUCCESS
  12121 * when the command iocb is an abort request.
  12122 *
  12123 **/
  12124int
  12125lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
  12126			   struct lpfc_iocbq *cmdiocb, void *cmpl)
  12127{
  12128	struct lpfc_vport *vport = cmdiocb->vport;
  12129	struct lpfc_iocbq *abtsiocbp;
  12130	int retval = IOCB_ERROR;
  12131	unsigned long iflags;
  12132	struct lpfc_nodelist *ndlp = NULL;
  12133	u32 ulp_command = get_job_cmnd(phba, cmdiocb);
  12134	u16 ulp_context, iotag;
  12135	bool ia;
  12136
  12137	/*
  12138	 * There are certain command types we don't want to abort.  And we
  12139	 * don't want to abort commands that are already in the process of
  12140	 * being aborted.
  12141	 */
  12142	if (ulp_command == CMD_ABORT_XRI_WQE ||
  12143	    ulp_command == CMD_ABORT_XRI_CN ||
  12144	    ulp_command == CMD_CLOSE_XRI_CN ||
  12145	    cmdiocb->cmd_flag & LPFC_DRIVER_ABORTED)
  12146		return IOCB_ABORTING;
  12147
  12148	if (!pring) {
  12149		if (cmdiocb->cmd_flag & LPFC_IO_FABRIC)
  12150			cmdiocb->fabric_cmd_cmpl = lpfc_ignore_els_cmpl;
  12151		else
  12152			cmdiocb->cmd_cmpl = lpfc_ignore_els_cmpl;
  12153		return retval;
  12154	}
  12155
  12156	/*
  12157	 * If we're unloading, don't abort iocb on the ELS ring, but change
  12158	 * the callback so that nothing happens when it finishes.
  12159	 */
  12160	if ((vport->load_flag & FC_UNLOADING) &&
  12161	    pring->ringno == LPFC_ELS_RING) {
  12162		if (cmdiocb->cmd_flag & LPFC_IO_FABRIC)
  12163			cmdiocb->fabric_cmd_cmpl = lpfc_ignore_els_cmpl;
  12164		else
  12165			cmdiocb->cmd_cmpl = lpfc_ignore_els_cmpl;
  12166		return retval;
  12167	}
  12168
  12169	/* issue ABTS for this IOCB based on iotag */
  12170	abtsiocbp = __lpfc_sli_get_iocbq(phba);
  12171	if (abtsiocbp == NULL)
  12172		return IOCB_NORESOURCE;
  12173
  12174	/* This signals the response to set the correct status
  12175	 * before calling the completion handler
  12176	 */
  12177	cmdiocb->cmd_flag |= LPFC_DRIVER_ABORTED;
  12178
  12179	if (phba->sli_rev == LPFC_SLI_REV4) {
  12180		ulp_context = cmdiocb->sli4_xritag;
  12181		iotag = abtsiocbp->iotag;
  12182	} else {
  12183		iotag = cmdiocb->iocb.ulpIoTag;
  12184		if (pring->ringno == LPFC_ELS_RING) {
  12185			ndlp = cmdiocb->ndlp;
  12186			ulp_context = ndlp->nlp_rpi;
  12187		} else {
  12188			ulp_context = cmdiocb->iocb.ulpContext;
  12189		}
  12190	}
  12191
  12192	if (phba->link_state < LPFC_LINK_UP ||
  12193	    (phba->sli_rev == LPFC_SLI_REV4 &&
  12194	     phba->sli4_hba.link_state.status == LPFC_FC_LA_TYPE_LINK_DOWN) ||
  12195	    (phba->link_flag & LS_EXTERNAL_LOOPBACK))
  12196		ia = true;
  12197	else
  12198		ia = false;
  12199
  12200	lpfc_sli_prep_abort_xri(phba, abtsiocbp, ulp_context, iotag,
  12201				cmdiocb->iocb.ulpClass,
  12202				LPFC_WQE_CQ_ID_DEFAULT, ia);
  12203
  12204	abtsiocbp->vport = vport;
  12205
  12206	/* ABTS WQE must go to the same WQ as the WQE to be aborted */
  12207	abtsiocbp->hba_wqidx = cmdiocb->hba_wqidx;
  12208	if (cmdiocb->cmd_flag & LPFC_IO_FCP)
  12209		abtsiocbp->cmd_flag |= (LPFC_IO_FCP | LPFC_USE_FCPWQIDX);
  12210
  12211	if (cmdiocb->cmd_flag & LPFC_IO_FOF)
  12212		abtsiocbp->cmd_flag |= LPFC_IO_FOF;
  12213
  12214	if (cmpl)
  12215		abtsiocbp->cmd_cmpl = cmpl;
  12216	else
  12217		abtsiocbp->cmd_cmpl = lpfc_sli_abort_els_cmpl;
  12218	abtsiocbp->vport = vport;
  12219
  12220	if (phba->sli_rev == LPFC_SLI_REV4) {
  12221		pring = lpfc_sli4_calc_ring(phba, abtsiocbp);
  12222		if (unlikely(pring == NULL))
  12223			goto abort_iotag_exit;
  12224		/* Note: both hbalock and ring_lock need to be set here */
  12225		spin_lock_irqsave(&pring->ring_lock, iflags);
  12226		retval = __lpfc_sli_issue_iocb(phba, pring->ringno,
  12227			abtsiocbp, 0);
  12228		spin_unlock_irqrestore(&pring->ring_lock, iflags);
  12229	} else {
  12230		retval = __lpfc_sli_issue_iocb(phba, pring->ringno,
  12231			abtsiocbp, 0);
  12232	}
  12233
  12234abort_iotag_exit:
  12235
  12236	lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
  12237			 "0339 Abort IO XRI x%x, Original iotag x%x, "
  12238			 "abort tag x%x Cmdjob : x%px Abortjob : x%px "
  12239			 "retval x%x\n",
  12240			 ulp_context, (phba->sli_rev == LPFC_SLI_REV4) ?
  12241			 cmdiocb->iotag : iotag, iotag, cmdiocb, abtsiocbp,
  12242			 retval);
  12243	if (retval) {
  12244		cmdiocb->cmd_flag &= ~LPFC_DRIVER_ABORTED;
  12245		__lpfc_sli_release_iocbq(phba, abtsiocbp);
  12246	}
  12247
  12248	/*
  12249	 * Caller to this routine should check for IOCB_ERROR
  12250	 * and handle it properly.  This routine no longer removes
  12251	 * iocb off txcmplq and call compl in case of IOCB_ERROR.
  12252	 */
  12253	return retval;
  12254}
  12255
  12256/**
  12257 * lpfc_sli_hba_iocb_abort - Abort all iocbs to an hba.
  12258 * @phba: pointer to lpfc HBA data structure.
  12259 *
  12260 * This routine will abort all pending and outstanding iocbs to an HBA.
  12261 **/
  12262void
  12263lpfc_sli_hba_iocb_abort(struct lpfc_hba *phba)
  12264{
  12265	struct lpfc_sli *psli = &phba->sli;
  12266	struct lpfc_sli_ring *pring;
  12267	struct lpfc_queue *qp = NULL;
  12268	int i;
  12269
  12270	if (phba->sli_rev != LPFC_SLI_REV4) {
  12271		for (i = 0; i < psli->num_rings; i++) {
  12272			pring = &psli->sli3_ring[i];
  12273			lpfc_sli_abort_iocb_ring(phba, pring);
  12274		}
  12275		return;
  12276	}
  12277	list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
  12278		pring = qp->pring;
  12279		if (!pring)
  12280			continue;
  12281		lpfc_sli_abort_iocb_ring(phba, pring);
  12282	}
  12283}
  12284
  12285/**
  12286 * lpfc_sli_validate_fcp_iocb_for_abort - filter iocbs appropriate for FCP aborts
  12287 * @iocbq: Pointer to iocb object.
  12288 * @vport: Pointer to driver virtual port object.
  12289 *
  12290 * This function acts as an iocb filter for functions which abort FCP iocbs.
  12291 *
  12292 * Return values
  12293 * -ENODEV, if a null iocb or vport ptr is encountered
  12294 * -EINVAL, if the iocb is not an FCP I/O, not on the TX cmpl queue, premarked as
  12295 *          driver already started the abort process, or is an abort iocb itself
  12296 * 0, passes criteria for aborting the FCP I/O iocb
  12297 **/
  12298static int
  12299lpfc_sli_validate_fcp_iocb_for_abort(struct lpfc_iocbq *iocbq,
  12300				     struct lpfc_vport *vport)
  12301{
  12302	u8 ulp_command;
  12303
  12304	/* No null ptr vports */
  12305	if (!iocbq || iocbq->vport != vport)
  12306		return -ENODEV;
  12307
  12308	/* iocb must be for FCP IO, already exists on the TX cmpl queue,
  12309	 * can't be premarked as driver aborted, nor be an ABORT iocb itself
  12310	 */
  12311	ulp_command = get_job_cmnd(vport->phba, iocbq);
  12312	if (!(iocbq->cmd_flag & LPFC_IO_FCP) ||
  12313	    !(iocbq->cmd_flag & LPFC_IO_ON_TXCMPLQ) ||
  12314	    (iocbq->cmd_flag & LPFC_DRIVER_ABORTED) ||
  12315	    (ulp_command == CMD_ABORT_XRI_CN ||
  12316	     ulp_command == CMD_CLOSE_XRI_CN ||
  12317	     ulp_command == CMD_ABORT_XRI_WQE))
  12318		return -EINVAL;
  12319
  12320	return 0;
  12321}
  12322
  12323/**
  12324 * lpfc_sli_validate_fcp_iocb - validate commands associated with a SCSI target
  12325 * @iocbq: Pointer to driver iocb object.
  12326 * @vport: Pointer to driver virtual port object.
  12327 * @tgt_id: SCSI ID of the target.
  12328 * @lun_id: LUN ID of the scsi device.
  12329 * @ctx_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST
  12330 *
  12331 * This function acts as an iocb filter for validating a lun/SCSI target/SCSI
  12332 * host.
  12333 *
  12334 * It will return
  12335 * 0 if the filtering criteria is met for the given iocb and will return
  12336 * 1 if the filtering criteria is not met.
  12337 * If ctx_cmd == LPFC_CTX_LUN, the function returns 0 only if the
  12338 * given iocb is for the SCSI device specified by vport, tgt_id and
  12339 * lun_id parameter.
  12340 * If ctx_cmd == LPFC_CTX_TGT,  the function returns 0 only if the
  12341 * given iocb is for the SCSI target specified by vport and tgt_id
  12342 * parameters.
  12343 * If ctx_cmd == LPFC_CTX_HOST, the function returns 0 only if the
  12344 * given iocb is for the SCSI host associated with the given vport.
  12345 * This function is called with no locks held.
  12346 **/
  12347static int
  12348lpfc_sli_validate_fcp_iocb(struct lpfc_iocbq *iocbq, struct lpfc_vport *vport,
  12349			   uint16_t tgt_id, uint64_t lun_id,
  12350			   lpfc_ctx_cmd ctx_cmd)
  12351{
  12352	struct lpfc_io_buf *lpfc_cmd;
  12353	int rc = 1;
  12354
  12355	lpfc_cmd = container_of(iocbq, struct lpfc_io_buf, cur_iocbq);
  12356
  12357	if (lpfc_cmd->pCmd == NULL)
  12358		return rc;
  12359
  12360	switch (ctx_cmd) {
  12361	case LPFC_CTX_LUN:
  12362		if ((lpfc_cmd->rdata) && (lpfc_cmd->rdata->pnode) &&
  12363		    (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id) &&
  12364		    (scsilun_to_int(&lpfc_cmd->fcp_cmnd->fcp_lun) == lun_id))
  12365			rc = 0;
  12366		break;
  12367	case LPFC_CTX_TGT:
  12368		if ((lpfc_cmd->rdata) && (lpfc_cmd->rdata->pnode) &&
  12369		    (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id))
  12370			rc = 0;
  12371		break;
  12372	case LPFC_CTX_HOST:
  12373		rc = 0;
  12374		break;
  12375	default:
  12376		printk(KERN_ERR "%s: Unknown context cmd type, value %d\n",
  12377			__func__, ctx_cmd);
  12378		break;
  12379	}
  12380
  12381	return rc;
  12382}
  12383
  12384/**
  12385 * lpfc_sli_sum_iocb - Function to count the number of FCP iocbs pending
  12386 * @vport: Pointer to virtual port.
  12387 * @tgt_id: SCSI ID of the target.
  12388 * @lun_id: LUN ID of the scsi device.
  12389 * @ctx_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST.
  12390 *
  12391 * This function returns number of FCP commands pending for the vport.
  12392 * When ctx_cmd == LPFC_CTX_LUN, the function returns number of FCP
  12393 * commands pending on the vport associated with SCSI device specified
  12394 * by tgt_id and lun_id parameters.
  12395 * When ctx_cmd == LPFC_CTX_TGT, the function returns number of FCP
  12396 * commands pending on the vport associated with SCSI target specified
  12397 * by tgt_id parameter.
  12398 * When ctx_cmd == LPFC_CTX_HOST, the function returns number of FCP
  12399 * commands pending on the vport.
  12400 * This function returns the number of iocbs which satisfy the filter.
  12401 * This function is called without any lock held.
  12402 **/
  12403int
  12404lpfc_sli_sum_iocb(struct lpfc_vport *vport, uint16_t tgt_id, uint64_t lun_id,
  12405		  lpfc_ctx_cmd ctx_cmd)
  12406{
  12407	struct lpfc_hba *phba = vport->phba;
  12408	struct lpfc_iocbq *iocbq;
  12409	int sum, i;
  12410	unsigned long iflags;
  12411	u8 ulp_command;
  12412
  12413	spin_lock_irqsave(&phba->hbalock, iflags);
  12414	for (i = 1, sum = 0; i <= phba->sli.last_iotag; i++) {
  12415		iocbq = phba->sli.iocbq_lookup[i];
  12416
  12417		if (!iocbq || iocbq->vport != vport)
  12418			continue;
  12419		if (!(iocbq->cmd_flag & LPFC_IO_FCP) ||
  12420		    !(iocbq->cmd_flag & LPFC_IO_ON_TXCMPLQ))
  12421			continue;
  12422
  12423		/* Include counting outstanding aborts */
  12424		ulp_command = get_job_cmnd(phba, iocbq);
  12425		if (ulp_command == CMD_ABORT_XRI_CN ||
  12426		    ulp_command == CMD_CLOSE_XRI_CN ||
  12427		    ulp_command == CMD_ABORT_XRI_WQE) {
  12428			sum++;
  12429			continue;
  12430		}
  12431
  12432		if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id,
  12433					       ctx_cmd) == 0)
  12434			sum++;
  12435	}
  12436	spin_unlock_irqrestore(&phba->hbalock, iflags);
  12437
  12438	return sum;
  12439}
  12440
  12441/**
  12442 * lpfc_sli_abort_fcp_cmpl - Completion handler function for aborted FCP IOCBs
  12443 * @phba: Pointer to HBA context object
  12444 * @cmdiocb: Pointer to command iocb object.
  12445 * @rspiocb: Pointer to response iocb object.
  12446 *
  12447 * This function is called when an aborted FCP iocb completes. This
  12448 * function is called by the ring event handler with no lock held.
  12449 * This function frees the iocb.
  12450 **/
  12451void
  12452lpfc_sli_abort_fcp_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
  12453			struct lpfc_iocbq *rspiocb)
  12454{
  12455	lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
  12456			"3096 ABORT_XRI_CX completing on rpi x%x "
  12457			"original iotag x%x, abort cmd iotag x%x "
  12458			"status 0x%x, reason 0x%x\n",
  12459			(phba->sli_rev == LPFC_SLI_REV4) ?
  12460			cmdiocb->sli4_xritag :
  12461			cmdiocb->iocb.un.acxri.abortContextTag,
  12462			get_job_abtsiotag(phba, cmdiocb),
  12463			cmdiocb->iotag, get_job_ulpstatus(phba, rspiocb),
  12464			get_job_word4(phba, rspiocb));
  12465	lpfc_sli_release_iocbq(phba, cmdiocb);
  12466	return;
  12467}
  12468
  12469/**
  12470 * lpfc_sli_abort_iocb - issue abort for all commands on a host/target/LUN
  12471 * @vport: Pointer to virtual port.
  12472 * @tgt_id: SCSI ID of the target.
  12473 * @lun_id: LUN ID of the scsi device.
  12474 * @abort_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST.
  12475 *
  12476 * This function sends an abort command for every SCSI command
  12477 * associated with the given virtual port pending on the ring
  12478 * filtered by lpfc_sli_validate_fcp_iocb_for_abort and then
  12479 * lpfc_sli_validate_fcp_iocb function.  The ordering for validation before
  12480 * submitting abort iocbs must be lpfc_sli_validate_fcp_iocb_for_abort
  12481 * followed by lpfc_sli_validate_fcp_iocb.
  12482 *
  12483 * When abort_cmd == LPFC_CTX_LUN, the function sends abort only to the
  12484 * FCP iocbs associated with lun specified by tgt_id and lun_id
  12485 * parameters
  12486 * When abort_cmd == LPFC_CTX_TGT, the function sends abort only to the
  12487 * FCP iocbs associated with SCSI target specified by tgt_id parameter.
  12488 * When abort_cmd == LPFC_CTX_HOST, the function sends abort to all
  12489 * FCP iocbs associated with virtual port.
  12490 * The pring used for SLI3 is sli3_ring[LPFC_FCP_RING], for SLI4
  12491 * lpfc_sli4_calc_ring is used.
  12492 * This function returns number of iocbs it failed to abort.
  12493 * This function is called with no locks held.
  12494 **/
  12495int
  12496lpfc_sli_abort_iocb(struct lpfc_vport *vport, u16 tgt_id, u64 lun_id,
  12497		    lpfc_ctx_cmd abort_cmd)
  12498{
  12499	struct lpfc_hba *phba = vport->phba;
  12500	struct lpfc_sli_ring *pring = NULL;
  12501	struct lpfc_iocbq *iocbq;
  12502	int errcnt = 0, ret_val = 0;
  12503	unsigned long iflags;
  12504	int i;
  12505
  12506	/* all I/Os are in process of being flushed */
  12507	if (phba->hba_flag & HBA_IOQ_FLUSH)
  12508		return errcnt;
  12509
  12510	for (i = 1; i <= phba->sli.last_iotag; i++) {
  12511		iocbq = phba->sli.iocbq_lookup[i];
  12512
  12513		if (lpfc_sli_validate_fcp_iocb_for_abort(iocbq, vport))
  12514			continue;
  12515
  12516		if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id,
  12517					       abort_cmd) != 0)
  12518			continue;
  12519
  12520		spin_lock_irqsave(&phba->hbalock, iflags);
  12521		if (phba->sli_rev == LPFC_SLI_REV3) {
  12522			pring = &phba->sli.sli3_ring[LPFC_FCP_RING];
  12523		} else if (phba->sli_rev == LPFC_SLI_REV4) {
  12524			pring = lpfc_sli4_calc_ring(phba, iocbq);
  12525		}
  12526		ret_val = lpfc_sli_issue_abort_iotag(phba, pring, iocbq,
  12527						     lpfc_sli_abort_fcp_cmpl);
  12528		spin_unlock_irqrestore(&phba->hbalock, iflags);
  12529		if (ret_val != IOCB_SUCCESS)
  12530			errcnt++;
  12531	}
  12532
  12533	return errcnt;
  12534}
  12535
  12536/**
  12537 * lpfc_sli_abort_taskmgmt - issue abort for all commands on a host/target/LUN
  12538 * @vport: Pointer to virtual port.
  12539 * @pring: Pointer to driver SLI ring object.
  12540 * @tgt_id: SCSI ID of the target.
  12541 * @lun_id: LUN ID of the scsi device.
  12542 * @cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST.
  12543 *
  12544 * This function sends an abort command for every SCSI command
  12545 * associated with the given virtual port pending on the ring
  12546 * filtered by lpfc_sli_validate_fcp_iocb_for_abort and then
  12547 * lpfc_sli_validate_fcp_iocb function.  The ordering for validation before
  12548 * submitting abort iocbs must be lpfc_sli_validate_fcp_iocb_for_abort
  12549 * followed by lpfc_sli_validate_fcp_iocb.
  12550 *
  12551 * When taskmgmt_cmd == LPFC_CTX_LUN, the function sends abort only to the
  12552 * FCP iocbs associated with lun specified by tgt_id and lun_id
  12553 * parameters
  12554 * When taskmgmt_cmd == LPFC_CTX_TGT, the function sends abort only to the
  12555 * FCP iocbs associated with SCSI target specified by tgt_id parameter.
  12556 * When taskmgmt_cmd == LPFC_CTX_HOST, the function sends abort to all
  12557 * FCP iocbs associated with virtual port.
  12558 * This function returns number of iocbs it aborted .
  12559 * This function is called with no locks held right after a taskmgmt
  12560 * command is sent.
  12561 **/
  12562int
  12563lpfc_sli_abort_taskmgmt(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
  12564			uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd cmd)
  12565{
  12566	struct lpfc_hba *phba = vport->phba;
  12567	struct lpfc_io_buf *lpfc_cmd;
  12568	struct lpfc_iocbq *abtsiocbq;
  12569	struct lpfc_nodelist *ndlp = NULL;
  12570	struct lpfc_iocbq *iocbq;
  12571	int sum, i, ret_val;
  12572	unsigned long iflags;
  12573	struct lpfc_sli_ring *pring_s4 = NULL;
  12574	u16 ulp_context, iotag, cqid = LPFC_WQE_CQ_ID_DEFAULT;
  12575	bool ia;
  12576
  12577	spin_lock_irqsave(&phba->hbalock, iflags);
  12578
  12579	/* all I/Os are in process of being flushed */
  12580	if (phba->hba_flag & HBA_IOQ_FLUSH) {
  12581		spin_unlock_irqrestore(&phba->hbalock, iflags);
  12582		return 0;
  12583	}
  12584	sum = 0;
  12585
  12586	for (i = 1; i <= phba->sli.last_iotag; i++) {
  12587		iocbq = phba->sli.iocbq_lookup[i];
  12588
  12589		if (lpfc_sli_validate_fcp_iocb_for_abort(iocbq, vport))
  12590			continue;
  12591
  12592		if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id,
  12593					       cmd) != 0)
  12594			continue;
  12595
  12596		/* Guard against IO completion being called at same time */
  12597		lpfc_cmd = container_of(iocbq, struct lpfc_io_buf, cur_iocbq);
  12598		spin_lock(&lpfc_cmd->buf_lock);
  12599
  12600		if (!lpfc_cmd->pCmd) {
  12601			spin_unlock(&lpfc_cmd->buf_lock);
  12602			continue;
  12603		}
  12604
  12605		if (phba->sli_rev == LPFC_SLI_REV4) {
  12606			pring_s4 =
  12607			    phba->sli4_hba.hdwq[iocbq->hba_wqidx].io_wq->pring;
  12608			if (!pring_s4) {
  12609				spin_unlock(&lpfc_cmd->buf_lock);
  12610				continue;
  12611			}
  12612			/* Note: both hbalock and ring_lock must be set here */
  12613			spin_lock(&pring_s4->ring_lock);
  12614		}
  12615
  12616		/*
  12617		 * If the iocbq is already being aborted, don't take a second
  12618		 * action, but do count it.
  12619		 */
  12620		if ((iocbq->cmd_flag & LPFC_DRIVER_ABORTED) ||
  12621		    !(iocbq->cmd_flag & LPFC_IO_ON_TXCMPLQ)) {
  12622			if (phba->sli_rev == LPFC_SLI_REV4)
  12623				spin_unlock(&pring_s4->ring_lock);
  12624			spin_unlock(&lpfc_cmd->buf_lock);
  12625			continue;
  12626		}
  12627
  12628		/* issue ABTS for this IOCB based on iotag */
  12629		abtsiocbq = __lpfc_sli_get_iocbq(phba);
  12630		if (!abtsiocbq) {
  12631			if (phba->sli_rev == LPFC_SLI_REV4)
  12632				spin_unlock(&pring_s4->ring_lock);
  12633			spin_unlock(&lpfc_cmd->buf_lock);
  12634			continue;
  12635		}
  12636
  12637		if (phba->sli_rev == LPFC_SLI_REV4) {
  12638			iotag = abtsiocbq->iotag;
  12639			ulp_context = iocbq->sli4_xritag;
  12640			cqid = lpfc_cmd->hdwq->io_cq_map;
  12641		} else {
  12642			iotag = iocbq->iocb.ulpIoTag;
  12643			if (pring->ringno == LPFC_ELS_RING) {
  12644				ndlp = iocbq->ndlp;
  12645				ulp_context = ndlp->nlp_rpi;
  12646			} else {
  12647				ulp_context = iocbq->iocb.ulpContext;
  12648			}
  12649		}
  12650
  12651		ndlp = lpfc_cmd->rdata->pnode;
  12652
  12653		if (lpfc_is_link_up(phba) &&
  12654		    (ndlp && ndlp->nlp_state == NLP_STE_MAPPED_NODE) &&
  12655		    !(phba->link_flag & LS_EXTERNAL_LOOPBACK))
  12656			ia = false;
  12657		else
  12658			ia = true;
  12659
  12660		lpfc_sli_prep_abort_xri(phba, abtsiocbq, ulp_context, iotag,
  12661					iocbq->iocb.ulpClass, cqid,
  12662					ia);
  12663
  12664		abtsiocbq->vport = vport;
  12665
  12666		/* ABTS WQE must go to the same WQ as the WQE to be aborted */
  12667		abtsiocbq->hba_wqidx = iocbq->hba_wqidx;
  12668		if (iocbq->cmd_flag & LPFC_IO_FCP)
  12669			abtsiocbq->cmd_flag |= LPFC_USE_FCPWQIDX;
  12670		if (iocbq->cmd_flag & LPFC_IO_FOF)
  12671			abtsiocbq->cmd_flag |= LPFC_IO_FOF;
  12672
  12673		/* Setup callback routine and issue the command. */
  12674		abtsiocbq->cmd_cmpl = lpfc_sli_abort_fcp_cmpl;
  12675
  12676		/*
  12677		 * Indicate the IO is being aborted by the driver and set
  12678		 * the caller's flag into the aborted IO.
  12679		 */
  12680		iocbq->cmd_flag |= LPFC_DRIVER_ABORTED;
  12681
  12682		if (phba->sli_rev == LPFC_SLI_REV4) {
  12683			ret_val = __lpfc_sli_issue_iocb(phba, pring_s4->ringno,
  12684							abtsiocbq, 0);
  12685			spin_unlock(&pring_s4->ring_lock);
  12686		} else {
  12687			ret_val = __lpfc_sli_issue_iocb(phba, pring->ringno,
  12688							abtsiocbq, 0);
  12689		}
  12690
  12691		spin_unlock(&lpfc_cmd->buf_lock);
  12692
  12693		if (ret_val == IOCB_ERROR)
  12694			__lpfc_sli_release_iocbq(phba, abtsiocbq);
  12695		else
  12696			sum++;
  12697	}
  12698	spin_unlock_irqrestore(&phba->hbalock, iflags);
  12699	return sum;
  12700}
  12701
  12702/**
  12703 * lpfc_sli_wake_iocb_wait - lpfc_sli_issue_iocb_wait's completion handler
  12704 * @phba: Pointer to HBA context object.
  12705 * @cmdiocbq: Pointer to command iocb.
  12706 * @rspiocbq: Pointer to response iocb.
  12707 *
  12708 * This function is the completion handler for iocbs issued using
  12709 * lpfc_sli_issue_iocb_wait function. This function is called by the
  12710 * ring event handler function without any lock held. This function
  12711 * can be called from both worker thread context and interrupt
  12712 * context. This function also can be called from other thread which
  12713 * cleans up the SLI layer objects.
  12714 * This function copy the contents of the response iocb to the
  12715 * response iocb memory object provided by the caller of
  12716 * lpfc_sli_issue_iocb_wait and then wakes up the thread which
  12717 * sleeps for the iocb completion.
  12718 **/
  12719static void
  12720lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba,
  12721			struct lpfc_iocbq *cmdiocbq,
  12722			struct lpfc_iocbq *rspiocbq)
  12723{
  12724	wait_queue_head_t *pdone_q;
  12725	unsigned long iflags;
  12726	struct lpfc_io_buf *lpfc_cmd;
  12727	size_t offset = offsetof(struct lpfc_iocbq, wqe);
  12728
  12729	spin_lock_irqsave(&phba->hbalock, iflags);
  12730	if (cmdiocbq->cmd_flag & LPFC_IO_WAKE_TMO) {
  12731
  12732		/*
  12733		 * A time out has occurred for the iocb.  If a time out
  12734		 * completion handler has been supplied, call it.  Otherwise,
  12735		 * just free the iocbq.
  12736		 */
  12737
  12738		spin_unlock_irqrestore(&phba->hbalock, iflags);
  12739		cmdiocbq->cmd_cmpl = cmdiocbq->wait_cmd_cmpl;
  12740		cmdiocbq->wait_cmd_cmpl = NULL;
  12741		if (cmdiocbq->cmd_cmpl)
  12742			cmdiocbq->cmd_cmpl(phba, cmdiocbq, NULL);
  12743		else
  12744			lpfc_sli_release_iocbq(phba, cmdiocbq);
  12745		return;
  12746	}
  12747
  12748	/* Copy the contents of the local rspiocb into the caller's buffer. */
  12749	cmdiocbq->cmd_flag |= LPFC_IO_WAKE;
  12750	if (cmdiocbq->rsp_iocb && rspiocbq)
  12751		memcpy((char *)cmdiocbq->rsp_iocb + offset,
  12752		       (char *)rspiocbq + offset, sizeof(*rspiocbq) - offset);
  12753
  12754	/* Set the exchange busy flag for task management commands */
  12755	if ((cmdiocbq->cmd_flag & LPFC_IO_FCP) &&
  12756	    !(cmdiocbq->cmd_flag & LPFC_IO_LIBDFC)) {
  12757		lpfc_cmd = container_of(cmdiocbq, struct lpfc_io_buf,
  12758					cur_iocbq);
  12759		if (rspiocbq && (rspiocbq->cmd_flag & LPFC_EXCHANGE_BUSY))
  12760			lpfc_cmd->flags |= LPFC_SBUF_XBUSY;
  12761		else
  12762			lpfc_cmd->flags &= ~LPFC_SBUF_XBUSY;
  12763	}
  12764
  12765	pdone_q = cmdiocbq->context_un.wait_queue;
  12766	if (pdone_q)
  12767		wake_up(pdone_q);
  12768	spin_unlock_irqrestore(&phba->hbalock, iflags);
  12769	return;
  12770}
  12771
  12772/**
  12773 * lpfc_chk_iocb_flg - Test IOCB flag with lock held.
  12774 * @phba: Pointer to HBA context object..
  12775 * @piocbq: Pointer to command iocb.
  12776 * @flag: Flag to test.
  12777 *
  12778 * This routine grabs the hbalock and then test the cmd_flag to
  12779 * see if the passed in flag is set.
  12780 * Returns:
  12781 * 1 if flag is set.
  12782 * 0 if flag is not set.
  12783 **/
  12784static int
  12785lpfc_chk_iocb_flg(struct lpfc_hba *phba,
  12786		 struct lpfc_iocbq *piocbq, uint32_t flag)
  12787{
  12788	unsigned long iflags;
  12789	int ret;
  12790
  12791	spin_lock_irqsave(&phba->hbalock, iflags);
  12792	ret = piocbq->cmd_flag & flag;
  12793	spin_unlock_irqrestore(&phba->hbalock, iflags);
  12794	return ret;
  12795
  12796}
  12797
  12798/**
  12799 * lpfc_sli_issue_iocb_wait - Synchronous function to issue iocb commands
  12800 * @phba: Pointer to HBA context object..
  12801 * @ring_number: Ring number
  12802 * @piocb: Pointer to command iocb.
  12803 * @prspiocbq: Pointer to response iocb.
  12804 * @timeout: Timeout in number of seconds.
  12805 *
  12806 * This function issues the iocb to firmware and waits for the
  12807 * iocb to complete. The cmd_cmpl field of the shall be used
  12808 * to handle iocbs which time out. If the field is NULL, the
  12809 * function shall free the iocbq structure.  If more clean up is
  12810 * needed, the caller is expected to provide a completion function
  12811 * that will provide the needed clean up.  If the iocb command is
  12812 * not completed within timeout seconds, the function will either
  12813 * free the iocbq structure (if cmd_cmpl == NULL) or execute the
  12814 * completion function set in the cmd_cmpl field and then return
  12815 * a status of IOCB_TIMEDOUT.  The caller should not free the iocb
  12816 * resources if this function returns IOCB_TIMEDOUT.
  12817 * The function waits for the iocb completion using an
  12818 * non-interruptible wait.
  12819 * This function will sleep while waiting for iocb completion.
  12820 * So, this function should not be called from any context which
  12821 * does not allow sleeping. Due to the same reason, this function
  12822 * cannot be called with interrupt disabled.
  12823 * This function assumes that the iocb completions occur while
  12824 * this function sleep. So, this function cannot be called from
  12825 * the thread which process iocb completion for this ring.
  12826 * This function clears the cmd_flag of the iocb object before
  12827 * issuing the iocb and the iocb completion handler sets this
  12828 * flag and wakes this thread when the iocb completes.
  12829 * The contents of the response iocb will be copied to prspiocbq
  12830 * by the completion handler when the command completes.
  12831 * This function returns IOCB_SUCCESS when success.
  12832 * This function is called with no lock held.
  12833 **/
  12834int
  12835lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba,
  12836			 uint32_t ring_number,
  12837			 struct lpfc_iocbq *piocb,
  12838			 struct lpfc_iocbq *prspiocbq,
  12839			 uint32_t timeout)
  12840{
  12841	DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_q);
  12842	long timeleft, timeout_req = 0;
  12843	int retval = IOCB_SUCCESS;
  12844	uint32_t creg_val;
  12845	struct lpfc_iocbq *iocb;
  12846	int txq_cnt = 0;
  12847	int txcmplq_cnt = 0;
  12848	struct lpfc_sli_ring *pring;
  12849	unsigned long iflags;
  12850	bool iocb_completed = true;
  12851
  12852	if (phba->sli_rev >= LPFC_SLI_REV4) {
  12853		lpfc_sli_prep_wqe(phba, piocb);
  12854
  12855		pring = lpfc_sli4_calc_ring(phba, piocb);
  12856	} else
  12857		pring = &phba->sli.sli3_ring[ring_number];
  12858	/*
  12859	 * If the caller has provided a response iocbq buffer, then rsp_iocb
  12860	 * is NULL or its an error.
  12861	 */
  12862	if (prspiocbq) {
  12863		if (piocb->rsp_iocb)
  12864			return IOCB_ERROR;
  12865		piocb->rsp_iocb = prspiocbq;
  12866	}
  12867
  12868	piocb->wait_cmd_cmpl = piocb->cmd_cmpl;
  12869	piocb->cmd_cmpl = lpfc_sli_wake_iocb_wait;
  12870	piocb->context_un.wait_queue = &done_q;
  12871	piocb->cmd_flag &= ~(LPFC_IO_WAKE | LPFC_IO_WAKE_TMO);
  12872
  12873	if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
  12874		if (lpfc_readl(phba->HCregaddr, &creg_val))
  12875			return IOCB_ERROR;
  12876		creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
  12877		writel(creg_val, phba->HCregaddr);
  12878		readl(phba->HCregaddr); /* flush */
  12879	}
  12880
  12881	retval = lpfc_sli_issue_iocb(phba, ring_number, piocb,
  12882				     SLI_IOCB_RET_IOCB);
  12883	if (retval == IOCB_SUCCESS) {
  12884		timeout_req = msecs_to_jiffies(timeout * 1000);
  12885		timeleft = wait_event_timeout(done_q,
  12886				lpfc_chk_iocb_flg(phba, piocb, LPFC_IO_WAKE),
  12887				timeout_req);
  12888		spin_lock_irqsave(&phba->hbalock, iflags);
  12889		if (!(piocb->cmd_flag & LPFC_IO_WAKE)) {
  12890
  12891			/*
  12892			 * IOCB timed out.  Inform the wake iocb wait
  12893			 * completion function and set local status
  12894			 */
  12895
  12896			iocb_completed = false;
  12897			piocb->cmd_flag |= LPFC_IO_WAKE_TMO;
  12898		}
  12899		spin_unlock_irqrestore(&phba->hbalock, iflags);
  12900		if (iocb_completed) {
  12901			lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
  12902					"0331 IOCB wake signaled\n");
  12903			/* Note: we are not indicating if the IOCB has a success
  12904			 * status or not - that's for the caller to check.
  12905			 * IOCB_SUCCESS means just that the command was sent and
  12906			 * completed. Not that it completed successfully.
  12907			 * */
  12908		} else if (timeleft == 0) {
  12909			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  12910					"0338 IOCB wait timeout error - no "
  12911					"wake response Data x%x\n", timeout);
  12912			retval = IOCB_TIMEDOUT;
  12913		} else {
  12914			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  12915					"0330 IOCB wake NOT set, "
  12916					"Data x%x x%lx\n",
  12917					timeout, (timeleft / jiffies));
  12918			retval = IOCB_TIMEDOUT;
  12919		}
  12920	} else if (retval == IOCB_BUSY) {
  12921		if (phba->cfg_log_verbose & LOG_SLI) {
  12922			list_for_each_entry(iocb, &pring->txq, list) {
  12923				txq_cnt++;
  12924			}
  12925			list_for_each_entry(iocb, &pring->txcmplq, list) {
  12926				txcmplq_cnt++;
  12927			}
  12928			lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
  12929				"2818 Max IOCBs %d txq cnt %d txcmplq cnt %d\n",
  12930				phba->iocb_cnt, txq_cnt, txcmplq_cnt);
  12931		}
  12932		return retval;
  12933	} else {
  12934		lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
  12935				"0332 IOCB wait issue failed, Data x%x\n",
  12936				retval);
  12937		retval = IOCB_ERROR;
  12938	}
  12939
  12940	if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
  12941		if (lpfc_readl(phba->HCregaddr, &creg_val))
  12942			return IOCB_ERROR;
  12943		creg_val &= ~(HC_R0INT_ENA << LPFC_FCP_RING);
  12944		writel(creg_val, phba->HCregaddr);
  12945		readl(phba->HCregaddr); /* flush */
  12946	}
  12947
  12948	if (prspiocbq)
  12949		piocb->rsp_iocb = NULL;
  12950
  12951	piocb->context_un.wait_queue = NULL;
  12952	piocb->cmd_cmpl = NULL;
  12953	return retval;
  12954}
  12955
  12956/**
  12957 * lpfc_sli_issue_mbox_wait - Synchronous function to issue mailbox
  12958 * @phba: Pointer to HBA context object.
  12959 * @pmboxq: Pointer to driver mailbox object.
  12960 * @timeout: Timeout in number of seconds.
  12961 *
  12962 * This function issues the mailbox to firmware and waits for the
  12963 * mailbox command to complete. If the mailbox command is not
  12964 * completed within timeout seconds, it returns MBX_TIMEOUT.
  12965 * The function waits for the mailbox completion using an
  12966 * interruptible wait. If the thread is woken up due to a
  12967 * signal, MBX_TIMEOUT error is returned to the caller. Caller
  12968 * should not free the mailbox resources, if this function returns
  12969 * MBX_TIMEOUT.
  12970 * This function will sleep while waiting for mailbox completion.
  12971 * So, this function should not be called from any context which
  12972 * does not allow sleeping. Due to the same reason, this function
  12973 * cannot be called with interrupt disabled.
  12974 * This function assumes that the mailbox completion occurs while
  12975 * this function sleep. So, this function cannot be called from
  12976 * the worker thread which processes mailbox completion.
  12977 * This function is called in the context of HBA management
  12978 * applications.
  12979 * This function returns MBX_SUCCESS when successful.
  12980 * This function is called with no lock held.
  12981 **/
  12982int
  12983lpfc_sli_issue_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq,
  12984			 uint32_t timeout)
  12985{
  12986	struct completion mbox_done;
  12987	int retval;
  12988	unsigned long flag;
  12989
  12990	pmboxq->mbox_flag &= ~LPFC_MBX_WAKE;
  12991	/* setup wake call as IOCB callback */
  12992	pmboxq->mbox_cmpl = lpfc_sli_wake_mbox_wait;
  12993
  12994	/* setup context3 field to pass wait_queue pointer to wake function  */
  12995	init_completion(&mbox_done);
  12996	pmboxq->context3 = &mbox_done;
  12997	/* now issue the command */
  12998	retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
  12999	if (retval == MBX_BUSY || retval == MBX_SUCCESS) {
  13000		wait_for_completion_timeout(&mbox_done,
  13001					    msecs_to_jiffies(timeout * 1000));
  13002
  13003		spin_lock_irqsave(&phba->hbalock, flag);
  13004		pmboxq->context3 = NULL;
  13005		/*
  13006		 * if LPFC_MBX_WAKE flag is set the mailbox is completed
  13007		 * else do not free the resources.
  13008		 */
  13009		if (pmboxq->mbox_flag & LPFC_MBX_WAKE) {
  13010			retval = MBX_SUCCESS;
  13011		} else {
  13012			retval = MBX_TIMEOUT;
  13013			pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
  13014		}
  13015		spin_unlock_irqrestore(&phba->hbalock, flag);
  13016	}
  13017	return retval;
  13018}
  13019
  13020/**
  13021 * lpfc_sli_mbox_sys_shutdown - shutdown mailbox command sub-system
  13022 * @phba: Pointer to HBA context.
  13023 * @mbx_action: Mailbox shutdown options.
  13024 *
  13025 * This function is called to shutdown the driver's mailbox sub-system.
  13026 * It first marks the mailbox sub-system is in a block state to prevent
  13027 * the asynchronous mailbox command from issued off the pending mailbox
  13028 * command queue. If the mailbox command sub-system shutdown is due to
  13029 * HBA error conditions such as EEH or ERATT, this routine shall invoke
  13030 * the mailbox sub-system flush routine to forcefully bring down the
  13031 * mailbox sub-system. Otherwise, if it is due to normal condition (such
  13032 * as with offline or HBA function reset), this routine will wait for the
  13033 * outstanding mailbox command to complete before invoking the mailbox
  13034 * sub-system flush routine to gracefully bring down mailbox sub-system.
  13035 **/
  13036void
  13037lpfc_sli_mbox_sys_shutdown(struct lpfc_hba *phba, int mbx_action)
  13038{
  13039	struct lpfc_sli *psli = &phba->sli;
  13040	unsigned long timeout;
  13041
  13042	if (mbx_action == LPFC_MBX_NO_WAIT) {
  13043		/* delay 100ms for port state */
  13044		msleep(100);
  13045		lpfc_sli_mbox_sys_flush(phba);
  13046		return;
  13047	}
  13048	timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies;
  13049
  13050	/* Disable softirqs, including timers from obtaining phba->hbalock */
  13051	local_bh_disable();
  13052
  13053	spin_lock_irq(&phba->hbalock);
  13054	psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
  13055
  13056	if (psli->sli_flag & LPFC_SLI_ACTIVE) {
  13057		/* Determine how long we might wait for the active mailbox
  13058		 * command to be gracefully completed by firmware.
  13059		 */
  13060		if (phba->sli.mbox_active)
  13061			timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
  13062						phba->sli.mbox_active) *
  13063						1000) + jiffies;
  13064		spin_unlock_irq(&phba->hbalock);
  13065
  13066		/* Enable softirqs again, done with phba->hbalock */
  13067		local_bh_enable();
  13068
  13069		while (phba->sli.mbox_active) {
  13070			/* Check active mailbox complete status every 2ms */
  13071			msleep(2);
  13072			if (time_after(jiffies, timeout))
  13073				/* Timeout, let the mailbox flush routine to
  13074				 * forcefully release active mailbox command
  13075				 */
  13076				break;
  13077		}
  13078	} else {
  13079		spin_unlock_irq(&phba->hbalock);
  13080
  13081		/* Enable softirqs again, done with phba->hbalock */
  13082		local_bh_enable();
  13083	}
  13084
  13085	lpfc_sli_mbox_sys_flush(phba);
  13086}
  13087
  13088/**
  13089 * lpfc_sli_eratt_read - read sli-3 error attention events
  13090 * @phba: Pointer to HBA context.
  13091 *
  13092 * This function is called to read the SLI3 device error attention registers
  13093 * for possible error attention events. The caller must hold the hostlock
  13094 * with spin_lock_irq().
  13095 *
  13096 * This function returns 1 when there is Error Attention in the Host Attention
  13097 * Register and returns 0 otherwise.
  13098 **/
  13099static int
  13100lpfc_sli_eratt_read(struct lpfc_hba *phba)
  13101{
  13102	uint32_t ha_copy;
  13103
  13104	/* Read chip Host Attention (HA) register */
  13105	if (lpfc_readl(phba->HAregaddr, &ha_copy))
  13106		goto unplug_err;
  13107
  13108	if (ha_copy & HA_ERATT) {
  13109		/* Read host status register to retrieve error event */
  13110		if (lpfc_sli_read_hs(phba))
  13111			goto unplug_err;
  13112
  13113		/* Check if there is a deferred error condition is active */
  13114		if ((HS_FFER1 & phba->work_hs) &&
  13115		    ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 |
  13116		      HS_FFER6 | HS_FFER7 | HS_FFER8) & phba->work_hs)) {
  13117			phba->hba_flag |= DEFER_ERATT;
  13118			/* Clear all interrupt enable conditions */
  13119			writel(0, phba->HCregaddr);
  13120			readl(phba->HCregaddr);
  13121		}
  13122
  13123		/* Set the driver HA work bitmap */
  13124		phba->work_ha |= HA_ERATT;
  13125		/* Indicate polling handles this ERATT */
  13126		phba->hba_flag |= HBA_ERATT_HANDLED;
  13127		return 1;
  13128	}
  13129	return 0;
  13130
  13131unplug_err:
  13132	/* Set the driver HS work bitmap */
  13133	phba->work_hs |= UNPLUG_ERR;
  13134	/* Set the driver HA work bitmap */
  13135	phba->work_ha |= HA_ERATT;
  13136	/* Indicate polling handles this ERATT */
  13137	phba->hba_flag |= HBA_ERATT_HANDLED;
  13138	return 1;
  13139}
  13140
  13141/**
  13142 * lpfc_sli4_eratt_read - read sli-4 error attention events
  13143 * @phba: Pointer to HBA context.
  13144 *
  13145 * This function is called to read the SLI4 device error attention registers
  13146 * for possible error attention events. The caller must hold the hostlock
  13147 * with spin_lock_irq().
  13148 *
  13149 * This function returns 1 when there is Error Attention in the Host Attention
  13150 * Register and returns 0 otherwise.
  13151 **/
  13152static int
  13153lpfc_sli4_eratt_read(struct lpfc_hba *phba)
  13154{
  13155	uint32_t uerr_sta_hi, uerr_sta_lo;
  13156	uint32_t if_type, portsmphr;
  13157	struct lpfc_register portstat_reg;
  13158	u32 logmask;
  13159
  13160	/*
  13161	 * For now, use the SLI4 device internal unrecoverable error
  13162	 * registers for error attention. This can be changed later.
  13163	 */
  13164	if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
  13165	switch (if_type) {
  13166	case LPFC_SLI_INTF_IF_TYPE_0:
  13167		if (lpfc_readl(phba->sli4_hba.u.if_type0.UERRLOregaddr,
  13168			&uerr_sta_lo) ||
  13169			lpfc_readl(phba->sli4_hba.u.if_type0.UERRHIregaddr,
  13170			&uerr_sta_hi)) {
  13171			phba->work_hs |= UNPLUG_ERR;
  13172			phba->work_ha |= HA_ERATT;
  13173			phba->hba_flag |= HBA_ERATT_HANDLED;
  13174			return 1;
  13175		}
  13176		if ((~phba->sli4_hba.ue_mask_lo & uerr_sta_lo) ||
  13177		    (~phba->sli4_hba.ue_mask_hi & uerr_sta_hi)) {
  13178			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  13179					"1423 HBA Unrecoverable error: "
  13180					"uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, "
  13181					"ue_mask_lo_reg=0x%x, "
  13182					"ue_mask_hi_reg=0x%x\n",
  13183					uerr_sta_lo, uerr_sta_hi,
  13184					phba->sli4_hba.ue_mask_lo,
  13185					phba->sli4_hba.ue_mask_hi);
  13186			phba->work_status[0] = uerr_sta_lo;
  13187			phba->work_status[1] = uerr_sta_hi;
  13188			phba->work_ha |= HA_ERATT;
  13189			phba->hba_flag |= HBA_ERATT_HANDLED;
  13190			return 1;
  13191		}
  13192		break;
  13193	case LPFC_SLI_INTF_IF_TYPE_2:
  13194	case LPFC_SLI_INTF_IF_TYPE_6:
  13195		if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
  13196			&portstat_reg.word0) ||
  13197			lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
  13198			&portsmphr)){
  13199			phba->work_hs |= UNPLUG_ERR;
  13200			phba->work_ha |= HA_ERATT;
  13201			phba->hba_flag |= HBA_ERATT_HANDLED;
  13202			return 1;
  13203		}
  13204		if (bf_get(lpfc_sliport_status_err, &portstat_reg)) {
  13205			phba->work_status[0] =
  13206				readl(phba->sli4_hba.u.if_type2.ERR1regaddr);
  13207			phba->work_status[1] =
  13208				readl(phba->sli4_hba.u.if_type2.ERR2regaddr);
  13209			logmask = LOG_TRACE_EVENT;
  13210			if (phba->work_status[0] ==
  13211				SLIPORT_ERR1_REG_ERR_CODE_2 &&
  13212			    phba->work_status[1] == SLIPORT_ERR2_REG_FW_RESTART)
  13213				logmask = LOG_SLI;
  13214			lpfc_printf_log(phba, KERN_ERR, logmask,
  13215					"2885 Port Status Event: "
  13216					"port status reg 0x%x, "
  13217					"port smphr reg 0x%x, "
  13218					"error 1=0x%x, error 2=0x%x\n",
  13219					portstat_reg.word0,
  13220					portsmphr,
  13221					phba->work_status[0],
  13222					phba->work_status[1]);
  13223			phba->work_ha |= HA_ERATT;
  13224			phba->hba_flag |= HBA_ERATT_HANDLED;
  13225			return 1;
  13226		}
  13227		break;
  13228	case LPFC_SLI_INTF_IF_TYPE_1:
  13229	default:
  13230		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  13231				"2886 HBA Error Attention on unsupported "
  13232				"if type %d.", if_type);
  13233		return 1;
  13234	}
  13235
  13236	return 0;
  13237}
  13238
  13239/**
  13240 * lpfc_sli_check_eratt - check error attention events
  13241 * @phba: Pointer to HBA context.
  13242 *
  13243 * This function is called from timer soft interrupt context to check HBA's
  13244 * error attention register bit for error attention events.
  13245 *
  13246 * This function returns 1 when there is Error Attention in the Host Attention
  13247 * Register and returns 0 otherwise.
  13248 **/
  13249int
  13250lpfc_sli_check_eratt(struct lpfc_hba *phba)
  13251{
  13252	uint32_t ha_copy;
  13253
  13254	/* If somebody is waiting to handle an eratt, don't process it
  13255	 * here. The brdkill function will do this.
  13256	 */
  13257	if (phba->link_flag & LS_IGNORE_ERATT)
  13258		return 0;
  13259
  13260	/* Check if interrupt handler handles this ERATT */
  13261	spin_lock_irq(&phba->hbalock);
  13262	if (phba->hba_flag & HBA_ERATT_HANDLED) {
  13263		/* Interrupt handler has handled ERATT */
  13264		spin_unlock_irq(&phba->hbalock);
  13265		return 0;
  13266	}
  13267
  13268	/*
  13269	 * If there is deferred error attention, do not check for error
  13270	 * attention
  13271	 */
  13272	if (unlikely(phba->hba_flag & DEFER_ERATT)) {
  13273		spin_unlock_irq(&phba->hbalock);
  13274		return 0;
  13275	}
  13276
  13277	/* If PCI channel is offline, don't process it */
  13278	if (unlikely(pci_channel_offline(phba->pcidev))) {
  13279		spin_unlock_irq(&phba->hbalock);
  13280		return 0;
  13281	}
  13282
  13283	switch (phba->sli_rev) {
  13284	case LPFC_SLI_REV2:
  13285	case LPFC_SLI_REV3:
  13286		/* Read chip Host Attention (HA) register */
  13287		ha_copy = lpfc_sli_eratt_read(phba);
  13288		break;
  13289	case LPFC_SLI_REV4:
  13290		/* Read device Uncoverable Error (UERR) registers */
  13291		ha_copy = lpfc_sli4_eratt_read(phba);
  13292		break;
  13293	default:
  13294		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  13295				"0299 Invalid SLI revision (%d)\n",
  13296				phba->sli_rev);
  13297		ha_copy = 0;
  13298		break;
  13299	}
  13300	spin_unlock_irq(&phba->hbalock);
  13301
  13302	return ha_copy;
  13303}
  13304
  13305/**
  13306 * lpfc_intr_state_check - Check device state for interrupt handling
  13307 * @phba: Pointer to HBA context.
  13308 *
  13309 * This inline routine checks whether a device or its PCI slot is in a state
  13310 * that the interrupt should be handled.
  13311 *
  13312 * This function returns 0 if the device or the PCI slot is in a state that
  13313 * interrupt should be handled, otherwise -EIO.
  13314 */
  13315static inline int
  13316lpfc_intr_state_check(struct lpfc_hba *phba)
  13317{
  13318	/* If the pci channel is offline, ignore all the interrupts */
  13319	if (unlikely(pci_channel_offline(phba->pcidev)))
  13320		return -EIO;
  13321
  13322	/* Update device level interrupt statistics */
  13323	phba->sli.slistat.sli_intr++;
  13324
  13325	/* Ignore all interrupts during initialization. */
  13326	if (unlikely(phba->link_state < LPFC_LINK_DOWN))
  13327		return -EIO;
  13328
  13329	return 0;
  13330}
  13331
  13332/**
  13333 * lpfc_sli_sp_intr_handler - Slow-path interrupt handler to SLI-3 device
  13334 * @irq: Interrupt number.
  13335 * @dev_id: The device context pointer.
  13336 *
  13337 * This function is directly called from the PCI layer as an interrupt
  13338 * service routine when device with SLI-3 interface spec is enabled with
  13339 * MSI-X multi-message interrupt mode and there are slow-path events in
  13340 * the HBA. However, when the device is enabled with either MSI or Pin-IRQ
  13341 * interrupt mode, this function is called as part of the device-level
  13342 * interrupt handler. When the PCI slot is in error recovery or the HBA
  13343 * is undergoing initialization, the interrupt handler will not process
  13344 * the interrupt. The link attention and ELS ring attention events are
  13345 * handled by the worker thread. The interrupt handler signals the worker
  13346 * thread and returns for these events. This function is called without
  13347 * any lock held. It gets the hbalock to access and update SLI data
  13348 * structures.
  13349 *
  13350 * This function returns IRQ_HANDLED when interrupt is handled else it
  13351 * returns IRQ_NONE.
  13352 **/
  13353irqreturn_t
  13354lpfc_sli_sp_intr_handler(int irq, void *dev_id)
  13355{
  13356	struct lpfc_hba  *phba;
  13357	uint32_t ha_copy, hc_copy;
  13358	uint32_t work_ha_copy;
  13359	unsigned long status;
  13360	unsigned long iflag;
  13361	uint32_t control;
  13362
  13363	MAILBOX_t *mbox, *pmbox;
  13364	struct lpfc_vport *vport;
  13365	struct lpfc_nodelist *ndlp;
  13366	struct lpfc_dmabuf *mp;
  13367	LPFC_MBOXQ_t *pmb;
  13368	int rc;
  13369
  13370	/*
  13371	 * Get the driver's phba structure from the dev_id and
  13372	 * assume the HBA is not interrupting.
  13373	 */
  13374	phba = (struct lpfc_hba *)dev_id;
  13375
  13376	if (unlikely(!phba))
  13377		return IRQ_NONE;
  13378
  13379	/*
  13380	 * Stuff needs to be attented to when this function is invoked as an
  13381	 * individual interrupt handler in MSI-X multi-message interrupt mode
  13382	 */
  13383	if (phba->intr_type == MSIX) {
  13384		/* Check device state for handling interrupt */
  13385		if (lpfc_intr_state_check(phba))
  13386			return IRQ_NONE;
  13387		/* Need to read HA REG for slow-path events */
  13388		spin_lock_irqsave(&phba->hbalock, iflag);
  13389		if (lpfc_readl(phba->HAregaddr, &ha_copy))
  13390			goto unplug_error;
  13391		/* If somebody is waiting to handle an eratt don't process it
  13392		 * here. The brdkill function will do this.
  13393		 */
  13394		if (phba->link_flag & LS_IGNORE_ERATT)
  13395			ha_copy &= ~HA_ERATT;
  13396		/* Check the need for handling ERATT in interrupt handler */
  13397		if (ha_copy & HA_ERATT) {
  13398			if (phba->hba_flag & HBA_ERATT_HANDLED)
  13399				/* ERATT polling has handled ERATT */
  13400				ha_copy &= ~HA_ERATT;
  13401			else
  13402				/* Indicate interrupt handler handles ERATT */
  13403				phba->hba_flag |= HBA_ERATT_HANDLED;
  13404		}
  13405
  13406		/*
  13407		 * If there is deferred error attention, do not check for any
  13408		 * interrupt.
  13409		 */
  13410		if (unlikely(phba->hba_flag & DEFER_ERATT)) {
  13411			spin_unlock_irqrestore(&phba->hbalock, iflag);
  13412			return IRQ_NONE;
  13413		}
  13414
  13415		/* Clear up only attention source related to slow-path */
  13416		if (lpfc_readl(phba->HCregaddr, &hc_copy))
  13417			goto unplug_error;
  13418
  13419		writel(hc_copy & ~(HC_MBINT_ENA | HC_R2INT_ENA |
  13420			HC_LAINT_ENA | HC_ERINT_ENA),
  13421			phba->HCregaddr);
  13422		writel((ha_copy & (HA_MBATT | HA_R2_CLR_MSK)),
  13423			phba->HAregaddr);
  13424		writel(hc_copy, phba->HCregaddr);
  13425		readl(phba->HAregaddr); /* flush */
  13426		spin_unlock_irqrestore(&phba->hbalock, iflag);
  13427	} else
  13428		ha_copy = phba->ha_copy;
  13429
  13430	work_ha_copy = ha_copy & phba->work_ha_mask;
  13431
  13432	if (work_ha_copy) {
  13433		if (work_ha_copy & HA_LATT) {
  13434			if (phba->sli.sli_flag & LPFC_PROCESS_LA) {
  13435				/*
  13436				 * Turn off Link Attention interrupts
  13437				 * until CLEAR_LA done
  13438				 */
  13439				spin_lock_irqsave(&phba->hbalock, iflag);
  13440				phba->sli.sli_flag &= ~LPFC_PROCESS_LA;
  13441				if (lpfc_readl(phba->HCregaddr, &control))
  13442					goto unplug_error;
  13443				control &= ~HC_LAINT_ENA;
  13444				writel(control, phba->HCregaddr);
  13445				readl(phba->HCregaddr); /* flush */
  13446				spin_unlock_irqrestore(&phba->hbalock, iflag);
  13447			}
  13448			else
  13449				work_ha_copy &= ~HA_LATT;
  13450		}
  13451
  13452		if (work_ha_copy & ~(HA_ERATT | HA_MBATT | HA_LATT)) {
  13453			/*
  13454			 * Turn off Slow Rings interrupts, LPFC_ELS_RING is
  13455			 * the only slow ring.
  13456			 */
  13457			status = (work_ha_copy &
  13458				(HA_RXMASK  << (4*LPFC_ELS_RING)));
  13459			status >>= (4*LPFC_ELS_RING);
  13460			if (status & HA_RXMASK) {
  13461				spin_lock_irqsave(&phba->hbalock, iflag);
  13462				if (lpfc_readl(phba->HCregaddr, &control))
  13463					goto unplug_error;
  13464
  13465				lpfc_debugfs_slow_ring_trc(phba,
  13466				"ISR slow ring:   ctl:x%x stat:x%x isrcnt:x%x",
  13467				control, status,
  13468				(uint32_t)phba->sli.slistat.sli_intr);
  13469
  13470				if (control & (HC_R0INT_ENA << LPFC_ELS_RING)) {
  13471					lpfc_debugfs_slow_ring_trc(phba,
  13472						"ISR Disable ring:"
  13473						"pwork:x%x hawork:x%x wait:x%x",
  13474						phba->work_ha, work_ha_copy,
  13475						(uint32_t)((unsigned long)
  13476						&phba->work_waitq));
  13477
  13478					control &=
  13479					    ~(HC_R0INT_ENA << LPFC_ELS_RING);
  13480					writel(control, phba->HCregaddr);
  13481					readl(phba->HCregaddr); /* flush */
  13482				}
  13483				else {
  13484					lpfc_debugfs_slow_ring_trc(phba,
  13485						"ISR slow ring:   pwork:"
  13486						"x%x hawork:x%x wait:x%x",
  13487						phba->work_ha, work_ha_copy,
  13488						(uint32_t)((unsigned long)
  13489						&phba->work_waitq));
  13490				}
  13491				spin_unlock_irqrestore(&phba->hbalock, iflag);
  13492			}
  13493		}
  13494		spin_lock_irqsave(&phba->hbalock, iflag);
  13495		if (work_ha_copy & HA_ERATT) {
  13496			if (lpfc_sli_read_hs(phba))
  13497				goto unplug_error;
  13498			/*
  13499			 * Check if there is a deferred error condition
  13500			 * is active
  13501			 */
  13502			if ((HS_FFER1 & phba->work_hs) &&
  13503				((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 |
  13504				  HS_FFER6 | HS_FFER7 | HS_FFER8) &
  13505				  phba->work_hs)) {
  13506				phba->hba_flag |= DEFER_ERATT;
  13507				/* Clear all interrupt enable conditions */
  13508				writel(0, phba->HCregaddr);
  13509				readl(phba->HCregaddr);
  13510			}
  13511		}
  13512
  13513		if ((work_ha_copy & HA_MBATT) && (phba->sli.mbox_active)) {
  13514			pmb = phba->sli.mbox_active;
  13515			pmbox = &pmb->u.mb;
  13516			mbox = phba->mbox;
  13517			vport = pmb->vport;
  13518
  13519			/* First check out the status word */
  13520			lpfc_sli_pcimem_bcopy(mbox, pmbox, sizeof(uint32_t));
  13521			if (pmbox->mbxOwner != OWN_HOST) {
  13522				spin_unlock_irqrestore(&phba->hbalock, iflag);
  13523				/*
  13524				 * Stray Mailbox Interrupt, mbxCommand <cmd>
  13525				 * mbxStatus <status>
  13526				 */
  13527				lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  13528						"(%d):0304 Stray Mailbox "
  13529						"Interrupt mbxCommand x%x "
  13530						"mbxStatus x%x\n",
  13531						(vport ? vport->vpi : 0),
  13532						pmbox->mbxCommand,
  13533						pmbox->mbxStatus);
  13534				/* clear mailbox attention bit */
  13535				work_ha_copy &= ~HA_MBATT;
  13536			} else {
  13537				phba->sli.mbox_active = NULL;
  13538				spin_unlock_irqrestore(&phba->hbalock, iflag);
  13539				phba->last_completion_time = jiffies;
  13540				del_timer(&phba->sli.mbox_tmo);
  13541				if (pmb->mbox_cmpl) {
  13542					lpfc_sli_pcimem_bcopy(mbox, pmbox,
  13543							MAILBOX_CMD_SIZE);
  13544					if (pmb->out_ext_byte_len &&
  13545						pmb->ctx_buf)
  13546						lpfc_sli_pcimem_bcopy(
  13547						phba->mbox_ext,
  13548						pmb->ctx_buf,
  13549						pmb->out_ext_byte_len);
  13550				}
  13551				if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) {
  13552					pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG;
  13553
  13554					lpfc_debugfs_disc_trc(vport,
  13555						LPFC_DISC_TRC_MBOX_VPORT,
  13556						"MBOX dflt rpi: : "
  13557						"status:x%x rpi:x%x",
  13558						(uint32_t)pmbox->mbxStatus,
  13559						pmbox->un.varWords[0], 0);
  13560
  13561					if (!pmbox->mbxStatus) {
  13562						mp = (struct lpfc_dmabuf *)
  13563							(pmb->ctx_buf);
  13564						ndlp = (struct lpfc_nodelist *)
  13565							pmb->ctx_ndlp;
  13566
  13567						/* Reg_LOGIN of dflt RPI was
  13568						 * successful. new lets get
  13569						 * rid of the RPI using the
  13570						 * same mbox buffer.
  13571						 */
  13572						lpfc_unreg_login(phba,
  13573							vport->vpi,
  13574							pmbox->un.varWords[0],
  13575							pmb);
  13576						pmb->mbox_cmpl =
  13577							lpfc_mbx_cmpl_dflt_rpi;
  13578						pmb->ctx_buf = mp;
  13579						pmb->ctx_ndlp = ndlp;
  13580						pmb->vport = vport;
  13581						rc = lpfc_sli_issue_mbox(phba,
  13582								pmb,
  13583								MBX_NOWAIT);
  13584						if (rc != MBX_BUSY)
  13585							lpfc_printf_log(phba,
  13586							KERN_ERR,
  13587							LOG_TRACE_EVENT,
  13588							"0350 rc should have"
  13589							"been MBX_BUSY\n");
  13590						if (rc != MBX_NOT_FINISHED)
  13591							goto send_current_mbox;
  13592					}
  13593				}
  13594				spin_lock_irqsave(
  13595						&phba->pport->work_port_lock,
  13596						iflag);
  13597				phba->pport->work_port_events &=
  13598					~WORKER_MBOX_TMO;
  13599				spin_unlock_irqrestore(
  13600						&phba->pport->work_port_lock,
  13601						iflag);
  13602
  13603				/* Do NOT queue MBX_HEARTBEAT to the worker
  13604				 * thread for processing.
  13605				 */
  13606				if (pmbox->mbxCommand == MBX_HEARTBEAT) {
  13607					/* Process mbox now */
  13608					phba->sli.mbox_active = NULL;
  13609					phba->sli.sli_flag &=
  13610						~LPFC_SLI_MBOX_ACTIVE;
  13611					if (pmb->mbox_cmpl)
  13612						pmb->mbox_cmpl(phba, pmb);
  13613				} else {
  13614					/* Queue to worker thread to process */
  13615					lpfc_mbox_cmpl_put(phba, pmb);
  13616				}
  13617			}
  13618		} else
  13619			spin_unlock_irqrestore(&phba->hbalock, iflag);
  13620
  13621		if ((work_ha_copy & HA_MBATT) &&
  13622		    (phba->sli.mbox_active == NULL)) {
  13623send_current_mbox:
  13624			/* Process next mailbox command if there is one */
  13625			do {
  13626				rc = lpfc_sli_issue_mbox(phba, NULL,
  13627							 MBX_NOWAIT);
  13628			} while (rc == MBX_NOT_FINISHED);
  13629			if (rc != MBX_SUCCESS)
  13630				lpfc_printf_log(phba, KERN_ERR,
  13631						LOG_TRACE_EVENT,
  13632						"0349 rc should be "
  13633						"MBX_SUCCESS\n");
  13634		}
  13635
  13636		spin_lock_irqsave(&phba->hbalock, iflag);
  13637		phba->work_ha |= work_ha_copy;
  13638		spin_unlock_irqrestore(&phba->hbalock, iflag);
  13639		lpfc_worker_wake_up(phba);
  13640	}
  13641	return IRQ_HANDLED;
  13642unplug_error:
  13643	spin_unlock_irqrestore(&phba->hbalock, iflag);
  13644	return IRQ_HANDLED;
  13645
  13646} /* lpfc_sli_sp_intr_handler */
  13647
  13648/**
  13649 * lpfc_sli_fp_intr_handler - Fast-path interrupt handler to SLI-3 device.
  13650 * @irq: Interrupt number.
  13651 * @dev_id: The device context pointer.
  13652 *
  13653 * This function is directly called from the PCI layer as an interrupt
  13654 * service routine when device with SLI-3 interface spec is enabled with
  13655 * MSI-X multi-message interrupt mode and there is a fast-path FCP IOCB
  13656 * ring event in the HBA. However, when the device is enabled with either
  13657 * MSI or Pin-IRQ interrupt mode, this function is called as part of the
  13658 * device-level interrupt handler. When the PCI slot is in error recovery
  13659 * or the HBA is undergoing initialization, the interrupt handler will not
  13660 * process the interrupt. The SCSI FCP fast-path ring event are handled in
  13661 * the intrrupt context. This function is called without any lock held.
  13662 * It gets the hbalock to access and update SLI data structures.
  13663 *
  13664 * This function returns IRQ_HANDLED when interrupt is handled else it
  13665 * returns IRQ_NONE.
  13666 **/
  13667irqreturn_t
  13668lpfc_sli_fp_intr_handler(int irq, void *dev_id)
  13669{
  13670	struct lpfc_hba  *phba;
  13671	uint32_t ha_copy;
  13672	unsigned long status;
  13673	unsigned long iflag;
  13674	struct lpfc_sli_ring *pring;
  13675
  13676	/* Get the driver's phba structure from the dev_id and
  13677	 * assume the HBA is not interrupting.
  13678	 */
  13679	phba = (struct lpfc_hba *) dev_id;
  13680
  13681	if (unlikely(!phba))
  13682		return IRQ_NONE;
  13683
  13684	/*
  13685	 * Stuff needs to be attented to when this function is invoked as an
  13686	 * individual interrupt handler in MSI-X multi-message interrupt mode
  13687	 */
  13688	if (phba->intr_type == MSIX) {
  13689		/* Check device state for handling interrupt */
  13690		if (lpfc_intr_state_check(phba))
  13691			return IRQ_NONE;
  13692		/* Need to read HA REG for FCP ring and other ring events */
  13693		if (lpfc_readl(phba->HAregaddr, &ha_copy))
  13694			return IRQ_HANDLED;
  13695		/* Clear up only attention source related to fast-path */
  13696		spin_lock_irqsave(&phba->hbalock, iflag);
  13697		/*
  13698		 * If there is deferred error attention, do not check for
  13699		 * any interrupt.
  13700		 */
  13701		if (unlikely(phba->hba_flag & DEFER_ERATT)) {
  13702			spin_unlock_irqrestore(&phba->hbalock, iflag);
  13703			return IRQ_NONE;
  13704		}
  13705		writel((ha_copy & (HA_R0_CLR_MSK | HA_R1_CLR_MSK)),
  13706			phba->HAregaddr);
  13707		readl(phba->HAregaddr); /* flush */
  13708		spin_unlock_irqrestore(&phba->hbalock, iflag);
  13709	} else
  13710		ha_copy = phba->ha_copy;
  13711
  13712	/*
  13713	 * Process all events on FCP ring. Take the optimized path for FCP IO.
  13714	 */
  13715	ha_copy &= ~(phba->work_ha_mask);
  13716
  13717	status = (ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING)));
  13718	status >>= (4*LPFC_FCP_RING);
  13719	pring = &phba->sli.sli3_ring[LPFC_FCP_RING];
  13720	if (status & HA_RXMASK)
  13721		lpfc_sli_handle_fast_ring_event(phba, pring, status);
  13722
  13723	if (phba->cfg_multi_ring_support == 2) {
  13724		/*
  13725		 * Process all events on extra ring. Take the optimized path
  13726		 * for extra ring IO.
  13727		 */
  13728		status = (ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING)));
  13729		status >>= (4*LPFC_EXTRA_RING);
  13730		if (status & HA_RXMASK) {
  13731			lpfc_sli_handle_fast_ring_event(phba,
  13732					&phba->sli.sli3_ring[LPFC_EXTRA_RING],
  13733					status);
  13734		}
  13735	}
  13736	return IRQ_HANDLED;
  13737}  /* lpfc_sli_fp_intr_handler */
  13738
  13739/**
  13740 * lpfc_sli_intr_handler - Device-level interrupt handler to SLI-3 device
  13741 * @irq: Interrupt number.
  13742 * @dev_id: The device context pointer.
  13743 *
  13744 * This function is the HBA device-level interrupt handler to device with
  13745 * SLI-3 interface spec, called from the PCI layer when either MSI or
  13746 * Pin-IRQ interrupt mode is enabled and there is an event in the HBA which
  13747 * requires driver attention. This function invokes the slow-path interrupt
  13748 * attention handling function and fast-path interrupt attention handling
  13749 * function in turn to process the relevant HBA attention events. This
  13750 * function is called without any lock held. It gets the hbalock to access
  13751 * and update SLI data structures.
  13752 *
  13753 * This function returns IRQ_HANDLED when interrupt is handled, else it
  13754 * returns IRQ_NONE.
  13755 **/
  13756irqreturn_t
  13757lpfc_sli_intr_handler(int irq, void *dev_id)
  13758{
  13759	struct lpfc_hba  *phba;
  13760	irqreturn_t sp_irq_rc, fp_irq_rc;
  13761	unsigned long status1, status2;
  13762	uint32_t hc_copy;
  13763
  13764	/*
  13765	 * Get the driver's phba structure from the dev_id and
  13766	 * assume the HBA is not interrupting.
  13767	 */
  13768	phba = (struct lpfc_hba *) dev_id;
  13769
  13770	if (unlikely(!phba))
  13771		return IRQ_NONE;
  13772
  13773	/* Check device state for handling interrupt */
  13774	if (lpfc_intr_state_check(phba))
  13775		return IRQ_NONE;
  13776
  13777	spin_lock(&phba->hbalock);
  13778	if (lpfc_readl(phba->HAregaddr, &phba->ha_copy)) {
  13779		spin_unlock(&phba->hbalock);
  13780		return IRQ_HANDLED;
  13781	}
  13782
  13783	if (unlikely(!phba->ha_copy)) {
  13784		spin_unlock(&phba->hbalock);
  13785		return IRQ_NONE;
  13786	} else if (phba->ha_copy & HA_ERATT) {
  13787		if (phba->hba_flag & HBA_ERATT_HANDLED)
  13788			/* ERATT polling has handled ERATT */
  13789			phba->ha_copy &= ~HA_ERATT;
  13790		else
  13791			/* Indicate interrupt handler handles ERATT */
  13792			phba->hba_flag |= HBA_ERATT_HANDLED;
  13793	}
  13794
  13795	/*
  13796	 * If there is deferred error attention, do not check for any interrupt.
  13797	 */
  13798	if (unlikely(phba->hba_flag & DEFER_ERATT)) {
  13799		spin_unlock(&phba->hbalock);
  13800		return IRQ_NONE;
  13801	}
  13802
  13803	/* Clear attention sources except link and error attentions */
  13804	if (lpfc_readl(phba->HCregaddr, &hc_copy)) {
  13805		spin_unlock(&phba->hbalock);
  13806		return IRQ_HANDLED;
  13807	}
  13808	writel(hc_copy & ~(HC_MBINT_ENA | HC_R0INT_ENA | HC_R1INT_ENA
  13809		| HC_R2INT_ENA | HC_LAINT_ENA | HC_ERINT_ENA),
  13810		phba->HCregaddr);
  13811	writel((phba->ha_copy & ~(HA_LATT | HA_ERATT)), phba->HAregaddr);
  13812	writel(hc_copy, phba->HCregaddr);
  13813	readl(phba->HAregaddr); /* flush */
  13814	spin_unlock(&phba->hbalock);
  13815
  13816	/*
  13817	 * Invokes slow-path host attention interrupt handling as appropriate.
  13818	 */
  13819
  13820	/* status of events with mailbox and link attention */
  13821	status1 = phba->ha_copy & (HA_MBATT | HA_LATT | HA_ERATT);
  13822
  13823	/* status of events with ELS ring */
  13824	status2 = (phba->ha_copy & (HA_RXMASK  << (4*LPFC_ELS_RING)));
  13825	status2 >>= (4*LPFC_ELS_RING);
  13826
  13827	if (status1 || (status2 & HA_RXMASK))
  13828		sp_irq_rc = lpfc_sli_sp_intr_handler(irq, dev_id);
  13829	else
  13830		sp_irq_rc = IRQ_NONE;
  13831
  13832	/*
  13833	 * Invoke fast-path host attention interrupt handling as appropriate.
  13834	 */
  13835
  13836	/* status of events with FCP ring */
  13837	status1 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING)));
  13838	status1 >>= (4*LPFC_FCP_RING);
  13839
  13840	/* status of events with extra ring */
  13841	if (phba->cfg_multi_ring_support == 2) {
  13842		status2 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING)));
  13843		status2 >>= (4*LPFC_EXTRA_RING);
  13844	} else
  13845		status2 = 0;
  13846
  13847	if ((status1 & HA_RXMASK) || (status2 & HA_RXMASK))
  13848		fp_irq_rc = lpfc_sli_fp_intr_handler(irq, dev_id);
  13849	else
  13850		fp_irq_rc = IRQ_NONE;
  13851
  13852	/* Return device-level interrupt handling status */
  13853	return (sp_irq_rc == IRQ_HANDLED) ? sp_irq_rc : fp_irq_rc;
  13854}  /* lpfc_sli_intr_handler */
  13855
  13856/**
  13857 * lpfc_sli4_els_xri_abort_event_proc - Process els xri abort event
  13858 * @phba: pointer to lpfc hba data structure.
  13859 *
  13860 * This routine is invoked by the worker thread to process all the pending
  13861 * SLI4 els abort xri events.
  13862 **/
  13863void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *phba)
  13864{
  13865	struct lpfc_cq_event *cq_event;
  13866	unsigned long iflags;
  13867
  13868	/* First, declare the els xri abort event has been handled */
  13869	spin_lock_irqsave(&phba->hbalock, iflags);
  13870	phba->hba_flag &= ~ELS_XRI_ABORT_EVENT;
  13871	spin_unlock_irqrestore(&phba->hbalock, iflags);
  13872
  13873	/* Now, handle all the els xri abort events */
  13874	spin_lock_irqsave(&phba->sli4_hba.els_xri_abrt_list_lock, iflags);
  13875	while (!list_empty(&phba->sli4_hba.sp_els_xri_aborted_work_queue)) {
  13876		/* Get the first event from the head of the event queue */
  13877		list_remove_head(&phba->sli4_hba.sp_els_xri_aborted_work_queue,
  13878				 cq_event, struct lpfc_cq_event, list);
  13879		spin_unlock_irqrestore(&phba->sli4_hba.els_xri_abrt_list_lock,
  13880				       iflags);
  13881		/* Notify aborted XRI for ELS work queue */
  13882		lpfc_sli4_els_xri_aborted(phba, &cq_event->cqe.wcqe_axri);
  13883
  13884		/* Free the event processed back to the free pool */
  13885		lpfc_sli4_cq_event_release(phba, cq_event);
  13886		spin_lock_irqsave(&phba->sli4_hba.els_xri_abrt_list_lock,
  13887				  iflags);
  13888	}
  13889	spin_unlock_irqrestore(&phba->sli4_hba.els_xri_abrt_list_lock, iflags);
  13890}
  13891
  13892/**
  13893 * lpfc_sli4_els_preprocess_rspiocbq - Get response iocbq from els wcqe
  13894 * @phba: Pointer to HBA context object.
  13895 * @irspiocbq: Pointer to work-queue completion queue entry.
  13896 *
  13897 * This routine handles an ELS work-queue completion event and construct
  13898 * a pseudo response ELS IOCBQ from the SLI4 ELS WCQE for the common
  13899 * discovery engine to handle.
  13900 *
  13901 * Return: Pointer to the receive IOCBQ, NULL otherwise.
  13902 **/
  13903static struct lpfc_iocbq *
  13904lpfc_sli4_els_preprocess_rspiocbq(struct lpfc_hba *phba,
  13905				  struct lpfc_iocbq *irspiocbq)
  13906{
  13907	struct lpfc_sli_ring *pring;
  13908	struct lpfc_iocbq *cmdiocbq;
  13909	struct lpfc_wcqe_complete *wcqe;
  13910	unsigned long iflags;
  13911
  13912	pring = lpfc_phba_elsring(phba);
  13913	if (unlikely(!pring))
  13914		return NULL;
  13915
  13916	wcqe = &irspiocbq->cq_event.cqe.wcqe_cmpl;
  13917	spin_lock_irqsave(&pring->ring_lock, iflags);
  13918	pring->stats.iocb_event++;
  13919	/* Look up the ELS command IOCB and create pseudo response IOCB */
  13920	cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring,
  13921				bf_get(lpfc_wcqe_c_request_tag, wcqe));
  13922	if (unlikely(!cmdiocbq)) {
  13923		spin_unlock_irqrestore(&pring->ring_lock, iflags);
  13924		lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
  13925				"0386 ELS complete with no corresponding "
  13926				"cmdiocb: 0x%x 0x%x 0x%x 0x%x\n",
  13927				wcqe->word0, wcqe->total_data_placed,
  13928				wcqe->parameter, wcqe->word3);
  13929		lpfc_sli_release_iocbq(phba, irspiocbq);
  13930		return NULL;
  13931	}
  13932
  13933	memcpy(&irspiocbq->wqe, &cmdiocbq->wqe, sizeof(union lpfc_wqe128));
  13934	memcpy(&irspiocbq->wcqe_cmpl, wcqe, sizeof(*wcqe));
  13935
  13936	/* Put the iocb back on the txcmplq */
  13937	lpfc_sli_ringtxcmpl_put(phba, pring, cmdiocbq);
  13938	spin_unlock_irqrestore(&pring->ring_lock, iflags);
  13939
  13940	if (bf_get(lpfc_wcqe_c_xb, wcqe)) {
  13941		spin_lock_irqsave(&phba->hbalock, iflags);
  13942		irspiocbq->cmd_flag |= LPFC_EXCHANGE_BUSY;
  13943		spin_unlock_irqrestore(&phba->hbalock, iflags);
  13944	}
  13945
  13946	return irspiocbq;
  13947}
  13948
  13949inline struct lpfc_cq_event *
  13950lpfc_cq_event_setup(struct lpfc_hba *phba, void *entry, int size)
  13951{
  13952	struct lpfc_cq_event *cq_event;
  13953
  13954	/* Allocate a new internal CQ_EVENT entry */
  13955	cq_event = lpfc_sli4_cq_event_alloc(phba);
  13956	if (!cq_event) {
  13957		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  13958				"0602 Failed to alloc CQ_EVENT entry\n");
  13959		return NULL;
  13960	}
  13961
  13962	/* Move the CQE into the event */
  13963	memcpy(&cq_event->cqe, entry, size);
  13964	return cq_event;
  13965}
  13966
  13967/**
  13968 * lpfc_sli4_sp_handle_async_event - Handle an asynchronous event
  13969 * @phba: Pointer to HBA context object.
  13970 * @mcqe: Pointer to mailbox completion queue entry.
  13971 *
  13972 * This routine process a mailbox completion queue entry with asynchronous
  13973 * event.
  13974 *
  13975 * Return: true if work posted to worker thread, otherwise false.
  13976 **/
  13977static bool
  13978lpfc_sli4_sp_handle_async_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe)
  13979{
  13980	struct lpfc_cq_event *cq_event;
  13981	unsigned long iflags;
  13982
  13983	lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
  13984			"0392 Async Event: word0:x%x, word1:x%x, "
  13985			"word2:x%x, word3:x%x\n", mcqe->word0,
  13986			mcqe->mcqe_tag0, mcqe->mcqe_tag1, mcqe->trailer);
  13987
  13988	cq_event = lpfc_cq_event_setup(phba, mcqe, sizeof(struct lpfc_mcqe));
  13989	if (!cq_event)
  13990		return false;
  13991
  13992	spin_lock_irqsave(&phba->sli4_hba.asynce_list_lock, iflags);
  13993	list_add_tail(&cq_event->list, &phba->sli4_hba.sp_asynce_work_queue);
  13994	spin_unlock_irqrestore(&phba->sli4_hba.asynce_list_lock, iflags);
  13995
  13996	/* Set the async event flag */
  13997	spin_lock_irqsave(&phba->hbalock, iflags);
  13998	phba->hba_flag |= ASYNC_EVENT;
  13999	spin_unlock_irqrestore(&phba->hbalock, iflags);
  14000
  14001	return true;
  14002}
  14003
  14004/**
  14005 * lpfc_sli4_sp_handle_mbox_event - Handle a mailbox completion event
  14006 * @phba: Pointer to HBA context object.
  14007 * @mcqe: Pointer to mailbox completion queue entry.
  14008 *
  14009 * This routine process a mailbox completion queue entry with mailbox
  14010 * completion event.
  14011 *
  14012 * Return: true if work posted to worker thread, otherwise false.
  14013 **/
  14014static bool
  14015lpfc_sli4_sp_handle_mbox_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe)
  14016{
  14017	uint32_t mcqe_status;
  14018	MAILBOX_t *mbox, *pmbox;
  14019	struct lpfc_mqe *mqe;
  14020	struct lpfc_vport *vport;
  14021	struct lpfc_nodelist *ndlp;
  14022	struct lpfc_dmabuf *mp;
  14023	unsigned long iflags;
  14024	LPFC_MBOXQ_t *pmb;
  14025	bool workposted = false;
  14026	int rc;
  14027
  14028	/* If not a mailbox complete MCQE, out by checking mailbox consume */
  14029	if (!bf_get(lpfc_trailer_completed, mcqe))
  14030		goto out_no_mqe_complete;
  14031
  14032	/* Get the reference to the active mbox command */
  14033	spin_lock_irqsave(&phba->hbalock, iflags);
  14034	pmb = phba->sli.mbox_active;
  14035	if (unlikely(!pmb)) {
  14036		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  14037				"1832 No pending MBOX command to handle\n");
  14038		spin_unlock_irqrestore(&phba->hbalock, iflags);
  14039		goto out_no_mqe_complete;
  14040	}
  14041	spin_unlock_irqrestore(&phba->hbalock, iflags);
  14042	mqe = &pmb->u.mqe;
  14043	pmbox = (MAILBOX_t *)&pmb->u.mqe;
  14044	mbox = phba->mbox;
  14045	vport = pmb->vport;
  14046
  14047	/* Reset heartbeat timer */
  14048	phba->last_completion_time = jiffies;
  14049	del_timer(&phba->sli.mbox_tmo);
  14050
  14051	/* Move mbox data to caller's mailbox region, do endian swapping */
  14052	if (pmb->mbox_cmpl && mbox)
  14053		lpfc_sli4_pcimem_bcopy(mbox, mqe, sizeof(struct lpfc_mqe));
  14054
  14055	/*
  14056	 * For mcqe errors, conditionally move a modified error code to
  14057	 * the mbox so that the error will not be missed.
  14058	 */
  14059	mcqe_status = bf_get(lpfc_mcqe_status, mcqe);
  14060	if (mcqe_status != MB_CQE_STATUS_SUCCESS) {
  14061		if (bf_get(lpfc_mqe_status, mqe) == MBX_SUCCESS)
  14062			bf_set(lpfc_mqe_status, mqe,
  14063			       (LPFC_MBX_ERROR_RANGE | mcqe_status));
  14064	}
  14065	if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) {
  14066		pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG;
  14067		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_MBOX_VPORT,
  14068				      "MBOX dflt rpi: status:x%x rpi:x%x",
  14069				      mcqe_status,
  14070				      pmbox->un.varWords[0], 0);
  14071		if (mcqe_status == MB_CQE_STATUS_SUCCESS) {
  14072			mp = (struct lpfc_dmabuf *)(pmb->ctx_buf);
  14073			ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
  14074
  14075			/* Reg_LOGIN of dflt RPI was successful. Mark the
  14076			 * node as having an UNREG_LOGIN in progress to stop
  14077			 * an unsolicited PLOGI from the same NPortId from
  14078			 * starting another mailbox transaction.
  14079			 */
  14080			spin_lock_irqsave(&ndlp->lock, iflags);
  14081			ndlp->nlp_flag |= NLP_UNREG_INP;
  14082			spin_unlock_irqrestore(&ndlp->lock, iflags);
  14083			lpfc_unreg_login(phba, vport->vpi,
  14084					 pmbox->un.varWords[0], pmb);
  14085			pmb->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi;
  14086			pmb->ctx_buf = mp;
  14087
  14088			/* No reference taken here.  This is a default
  14089			 * RPI reg/immediate unreg cycle. The reference was
  14090			 * taken in the reg rpi path and is released when
  14091			 * this mailbox completes.
  14092			 */
  14093			pmb->ctx_ndlp = ndlp;
  14094			pmb->vport = vport;
  14095			rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
  14096			if (rc != MBX_BUSY)
  14097				lpfc_printf_log(phba, KERN_ERR,
  14098						LOG_TRACE_EVENT,
  14099						"0385 rc should "
  14100						"have been MBX_BUSY\n");
  14101			if (rc != MBX_NOT_FINISHED)
  14102				goto send_current_mbox;
  14103		}
  14104	}
  14105	spin_lock_irqsave(&phba->pport->work_port_lock, iflags);
  14106	phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
  14107	spin_unlock_irqrestore(&phba->pport->work_port_lock, iflags);
  14108
  14109	/* Do NOT queue MBX_HEARTBEAT to the worker thread for processing. */
  14110	if (pmbox->mbxCommand == MBX_HEARTBEAT) {
  14111		spin_lock_irqsave(&phba->hbalock, iflags);
  14112		/* Release the mailbox command posting token */
  14113		phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
  14114		phba->sli.mbox_active = NULL;
  14115		if (bf_get(lpfc_trailer_consumed, mcqe))
  14116			lpfc_sli4_mq_release(phba->sli4_hba.mbx_wq);
  14117		spin_unlock_irqrestore(&phba->hbalock, iflags);
  14118
  14119		/* Post the next mbox command, if there is one */
  14120		lpfc_sli4_post_async_mbox(phba);
  14121
  14122		/* Process cmpl now */
  14123		if (pmb->mbox_cmpl)
  14124			pmb->mbox_cmpl(phba, pmb);
  14125		return false;
  14126	}
  14127
  14128	/* There is mailbox completion work to queue to the worker thread */
  14129	spin_lock_irqsave(&phba->hbalock, iflags);
  14130	__lpfc_mbox_cmpl_put(phba, pmb);
  14131	phba->work_ha |= HA_MBATT;
  14132	spin_unlock_irqrestore(&phba->hbalock, iflags);
  14133	workposted = true;
  14134
  14135send_current_mbox:
  14136	spin_lock_irqsave(&phba->hbalock, iflags);
  14137	/* Release the mailbox command posting token */
  14138	phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
  14139	/* Setting active mailbox pointer need to be in sync to flag clear */
  14140	phba->sli.mbox_active = NULL;
  14141	if (bf_get(lpfc_trailer_consumed, mcqe))
  14142		lpfc_sli4_mq_release(phba->sli4_hba.mbx_wq);
  14143	spin_unlock_irqrestore(&phba->hbalock, iflags);
  14144	/* Wake up worker thread to post the next pending mailbox command */
  14145	lpfc_worker_wake_up(phba);
  14146	return workposted;
  14147
  14148out_no_mqe_complete:
  14149	spin_lock_irqsave(&phba->hbalock, iflags);
  14150	if (bf_get(lpfc_trailer_consumed, mcqe))
  14151		lpfc_sli4_mq_release(phba->sli4_hba.mbx_wq);
  14152	spin_unlock_irqrestore(&phba->hbalock, iflags);
  14153	return false;
  14154}
  14155
  14156/**
  14157 * lpfc_sli4_sp_handle_mcqe - Process a mailbox completion queue entry
  14158 * @phba: Pointer to HBA context object.
  14159 * @cq: Pointer to associated CQ
  14160 * @cqe: Pointer to mailbox completion queue entry.
  14161 *
  14162 * This routine process a mailbox completion queue entry, it invokes the
  14163 * proper mailbox complete handling or asynchronous event handling routine
  14164 * according to the MCQE's async bit.
  14165 *
  14166 * Return: true if work posted to worker thread, otherwise false.
  14167 **/
  14168static bool
  14169lpfc_sli4_sp_handle_mcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
  14170			 struct lpfc_cqe *cqe)
  14171{
  14172	struct lpfc_mcqe mcqe;
  14173	bool workposted;
  14174
  14175	cq->CQ_mbox++;
  14176
  14177	/* Copy the mailbox MCQE and convert endian order as needed */
  14178	lpfc_sli4_pcimem_bcopy(cqe, &mcqe, sizeof(struct lpfc_mcqe));
  14179
  14180	/* Invoke the proper event handling routine */
  14181	if (!bf_get(lpfc_trailer_async, &mcqe))
  14182		workposted = lpfc_sli4_sp_handle_mbox_event(phba, &mcqe);
  14183	else
  14184		workposted = lpfc_sli4_sp_handle_async_event(phba, &mcqe);
  14185	return workposted;
  14186}
  14187
  14188/**
  14189 * lpfc_sli4_sp_handle_els_wcqe - Handle els work-queue completion event
  14190 * @phba: Pointer to HBA context object.
  14191 * @cq: Pointer to associated CQ
  14192 * @wcqe: Pointer to work-queue completion queue entry.
  14193 *
  14194 * This routine handles an ELS work-queue completion event.
  14195 *
  14196 * Return: true if work posted to worker thread, otherwise false.
  14197 **/
  14198static bool
  14199lpfc_sli4_sp_handle_els_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
  14200			     struct lpfc_wcqe_complete *wcqe)
  14201{
  14202	struct lpfc_iocbq *irspiocbq;
  14203	unsigned long iflags;
  14204	struct lpfc_sli_ring *pring = cq->pring;
  14205	int txq_cnt = 0;
  14206	int txcmplq_cnt = 0;
  14207
  14208	/* Check for response status */
  14209	if (unlikely(bf_get(lpfc_wcqe_c_status, wcqe))) {
  14210		/* Log the error status */
  14211		lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
  14212				"0357 ELS CQE error: status=x%x: "
  14213				"CQE: %08x %08x %08x %08x\n",
  14214				bf_get(lpfc_wcqe_c_status, wcqe),
  14215				wcqe->word0, wcqe->total_data_placed,
  14216				wcqe->parameter, wcqe->word3);
  14217	}
  14218
  14219	/* Get an irspiocbq for later ELS response processing use */
  14220	irspiocbq = lpfc_sli_get_iocbq(phba);
  14221	if (!irspiocbq) {
  14222		if (!list_empty(&pring->txq))
  14223			txq_cnt++;
  14224		if (!list_empty(&pring->txcmplq))
  14225			txcmplq_cnt++;
  14226		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  14227			"0387 NO IOCBQ data: txq_cnt=%d iocb_cnt=%d "
  14228			"els_txcmplq_cnt=%d\n",
  14229			txq_cnt, phba->iocb_cnt,
  14230			txcmplq_cnt);
  14231		return false;
  14232	}
  14233
  14234	/* Save off the slow-path queue event for work thread to process */
  14235	memcpy(&irspiocbq->cq_event.cqe.wcqe_cmpl, wcqe, sizeof(*wcqe));
  14236	spin_lock_irqsave(&phba->hbalock, iflags);
  14237	list_add_tail(&irspiocbq->cq_event.list,
  14238		      &phba->sli4_hba.sp_queue_event);
  14239	phba->hba_flag |= HBA_SP_QUEUE_EVT;
  14240	spin_unlock_irqrestore(&phba->hbalock, iflags);
  14241
  14242	return true;
  14243}
  14244
  14245/**
  14246 * lpfc_sli4_sp_handle_rel_wcqe - Handle slow-path WQ entry consumed event
  14247 * @phba: Pointer to HBA context object.
  14248 * @wcqe: Pointer to work-queue completion queue entry.
  14249 *
  14250 * This routine handles slow-path WQ entry consumed event by invoking the
  14251 * proper WQ release routine to the slow-path WQ.
  14252 **/
  14253static void
  14254lpfc_sli4_sp_handle_rel_wcqe(struct lpfc_hba *phba,
  14255			     struct lpfc_wcqe_release *wcqe)
  14256{
  14257	/* sanity check on queue memory */
  14258	if (unlikely(!phba->sli4_hba.els_wq))
  14259		return;
  14260	/* Check for the slow-path ELS work queue */
  14261	if (bf_get(lpfc_wcqe_r_wq_id, wcqe) == phba->sli4_hba.els_wq->queue_id)
  14262		lpfc_sli4_wq_release(phba->sli4_hba.els_wq,
  14263				     bf_get(lpfc_wcqe_r_wqe_index, wcqe));
  14264	else
  14265		lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
  14266				"2579 Slow-path wqe consume event carries "
  14267				"miss-matched qid: wcqe-qid=x%x, sp-qid=x%x\n",
  14268				bf_get(lpfc_wcqe_r_wqe_index, wcqe),
  14269				phba->sli4_hba.els_wq->queue_id);
  14270}
  14271
  14272/**
  14273 * lpfc_sli4_sp_handle_abort_xri_wcqe - Handle a xri abort event
  14274 * @phba: Pointer to HBA context object.
  14275 * @cq: Pointer to a WQ completion queue.
  14276 * @wcqe: Pointer to work-queue completion queue entry.
  14277 *
  14278 * This routine handles an XRI abort event.
  14279 *
  14280 * Return: true if work posted to worker thread, otherwise false.
  14281 **/
  14282static bool
  14283lpfc_sli4_sp_handle_abort_xri_wcqe(struct lpfc_hba *phba,
  14284				   struct lpfc_queue *cq,
  14285				   struct sli4_wcqe_xri_aborted *wcqe)
  14286{
  14287	bool workposted = false;
  14288	struct lpfc_cq_event *cq_event;
  14289	unsigned long iflags;
  14290
  14291	switch (cq->subtype) {
  14292	case LPFC_IO:
  14293		lpfc_sli4_io_xri_aborted(phba, wcqe, cq->hdwq);
  14294		if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
  14295			/* Notify aborted XRI for NVME work queue */
  14296			if (phba->nvmet_support)
  14297				lpfc_sli4_nvmet_xri_aborted(phba, wcqe);
  14298		}
  14299		workposted = false;
  14300		break;
  14301	case LPFC_NVME_LS: /* NVME LS uses ELS resources */
  14302	case LPFC_ELS:
  14303		cq_event = lpfc_cq_event_setup(phba, wcqe, sizeof(*wcqe));
  14304		if (!cq_event) {
  14305			workposted = false;
  14306			break;
  14307		}
  14308		cq_event->hdwq = cq->hdwq;
  14309		spin_lock_irqsave(&phba->sli4_hba.els_xri_abrt_list_lock,
  14310				  iflags);
  14311		list_add_tail(&cq_event->list,
  14312			      &phba->sli4_hba.sp_els_xri_aborted_work_queue);
  14313		/* Set the els xri abort event flag */
  14314		phba->hba_flag |= ELS_XRI_ABORT_EVENT;
  14315		spin_unlock_irqrestore(&phba->sli4_hba.els_xri_abrt_list_lock,
  14316				       iflags);
  14317		workposted = true;
  14318		break;
  14319	default:
  14320		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  14321				"0603 Invalid CQ subtype %d: "
  14322				"%08x %08x %08x %08x\n",
  14323				cq->subtype, wcqe->word0, wcqe->parameter,
  14324				wcqe->word2, wcqe->word3);
  14325		workposted = false;
  14326		break;
  14327	}
  14328	return workposted;
  14329}
  14330
  14331#define FC_RCTL_MDS_DIAGS	0xF4
  14332
  14333/**
  14334 * lpfc_sli4_sp_handle_rcqe - Process a receive-queue completion queue entry
  14335 * @phba: Pointer to HBA context object.
  14336 * @rcqe: Pointer to receive-queue completion queue entry.
  14337 *
  14338 * This routine process a receive-queue completion queue entry.
  14339 *
  14340 * Return: true if work posted to worker thread, otherwise false.
  14341 **/
  14342static bool
  14343lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe)
  14344{
  14345	bool workposted = false;
  14346	struct fc_frame_header *fc_hdr;
  14347	struct lpfc_queue *hrq = phba->sli4_hba.hdr_rq;
  14348	struct lpfc_queue *drq = phba->sli4_hba.dat_rq;
  14349	struct lpfc_nvmet_tgtport *tgtp;
  14350	struct hbq_dmabuf *dma_buf;
  14351	uint32_t status, rq_id;
  14352	unsigned long iflags;
  14353
  14354	/* sanity check on queue memory */
  14355	if (unlikely(!hrq) || unlikely(!drq))
  14356		return workposted;
  14357
  14358	if (bf_get(lpfc_cqe_code, rcqe) == CQE_CODE_RECEIVE_V1)
  14359		rq_id = bf_get(lpfc_rcqe_rq_id_v1, rcqe);
  14360	else
  14361		rq_id = bf_get(lpfc_rcqe_rq_id, rcqe);
  14362	if (rq_id != hrq->queue_id)
  14363		goto out;
  14364
  14365	status = bf_get(lpfc_rcqe_status, rcqe);
  14366	switch (status) {
  14367	case FC_STATUS_RQ_BUF_LEN_EXCEEDED:
  14368		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  14369				"2537 Receive Frame Truncated!!\n");
  14370		fallthrough;
  14371	case FC_STATUS_RQ_SUCCESS:
  14372		spin_lock_irqsave(&phba->hbalock, iflags);
  14373		lpfc_sli4_rq_release(hrq, drq);
  14374		dma_buf = lpfc_sli_hbqbuf_get(&phba->hbqs[0].hbq_buffer_list);
  14375		if (!dma_buf) {
  14376			hrq->RQ_no_buf_found++;
  14377			spin_unlock_irqrestore(&phba->hbalock, iflags);
  14378			goto out;
  14379		}
  14380		hrq->RQ_rcv_buf++;
  14381		hrq->RQ_buf_posted--;
  14382		memcpy(&dma_buf->cq_event.cqe.rcqe_cmpl, rcqe, sizeof(*rcqe));
  14383
  14384		fc_hdr = (struct fc_frame_header *)dma_buf->hbuf.virt;
  14385
  14386		if (fc_hdr->fh_r_ctl == FC_RCTL_MDS_DIAGS ||
  14387		    fc_hdr->fh_r_ctl == FC_RCTL_DD_UNSOL_DATA) {
  14388			spin_unlock_irqrestore(&phba->hbalock, iflags);
  14389			/* Handle MDS Loopback frames */
  14390			if  (!(phba->pport->load_flag & FC_UNLOADING))
  14391				lpfc_sli4_handle_mds_loopback(phba->pport,
  14392							      dma_buf);
  14393			else
  14394				lpfc_in_buf_free(phba, &dma_buf->dbuf);
  14395			break;
  14396		}
  14397
  14398		/* save off the frame for the work thread to process */
  14399		list_add_tail(&dma_buf->cq_event.list,
  14400			      &phba->sli4_hba.sp_queue_event);
  14401		/* Frame received */
  14402		phba->hba_flag |= HBA_SP_QUEUE_EVT;
  14403		spin_unlock_irqrestore(&phba->hbalock, iflags);
  14404		workposted = true;
  14405		break;
  14406	case FC_STATUS_INSUFF_BUF_FRM_DISC:
  14407		if (phba->nvmet_support) {
  14408			tgtp = phba->targetport->private;
  14409			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  14410					"6402 RQE Error x%x, posted %d err_cnt "
  14411					"%d: %x %x %x\n",
  14412					status, hrq->RQ_buf_posted,
  14413					hrq->RQ_no_posted_buf,
  14414					atomic_read(&tgtp->rcv_fcp_cmd_in),
  14415					atomic_read(&tgtp->rcv_fcp_cmd_out),
  14416					atomic_read(&tgtp->xmt_fcp_release));
  14417		}
  14418		fallthrough;
  14419
  14420	case FC_STATUS_INSUFF_BUF_NEED_BUF:
  14421		hrq->RQ_no_posted_buf++;
  14422		/* Post more buffers if possible */
  14423		spin_lock_irqsave(&phba->hbalock, iflags);
  14424		phba->hba_flag |= HBA_POST_RECEIVE_BUFFER;
  14425		spin_unlock_irqrestore(&phba->hbalock, iflags);
  14426		workposted = true;
  14427		break;
  14428	}
  14429out:
  14430	return workposted;
  14431}
  14432
  14433/**
  14434 * lpfc_sli4_sp_handle_cqe - Process a slow path completion queue entry
  14435 * @phba: Pointer to HBA context object.
  14436 * @cq: Pointer to the completion queue.
  14437 * @cqe: Pointer to a completion queue entry.
  14438 *
  14439 * This routine process a slow-path work-queue or receive queue completion queue
  14440 * entry.
  14441 *
  14442 * Return: true if work posted to worker thread, otherwise false.
  14443 **/
  14444static bool
  14445lpfc_sli4_sp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
  14446			 struct lpfc_cqe *cqe)
  14447{
  14448	struct lpfc_cqe cqevt;
  14449	bool workposted = false;
  14450
  14451	/* Copy the work queue CQE and convert endian order if needed */
  14452	lpfc_sli4_pcimem_bcopy(cqe, &cqevt, sizeof(struct lpfc_cqe));
  14453
  14454	/* Check and process for different type of WCQE and dispatch */
  14455	switch (bf_get(lpfc_cqe_code, &cqevt)) {
  14456	case CQE_CODE_COMPL_WQE:
  14457		/* Process the WQ/RQ complete event */
  14458		phba->last_completion_time = jiffies;
  14459		workposted = lpfc_sli4_sp_handle_els_wcqe(phba, cq,
  14460				(struct lpfc_wcqe_complete *)&cqevt);
  14461		break;
  14462	case CQE_CODE_RELEASE_WQE:
  14463		/* Process the WQ release event */
  14464		lpfc_sli4_sp_handle_rel_wcqe(phba,
  14465				(struct lpfc_wcqe_release *)&cqevt);
  14466		break;
  14467	case CQE_CODE_XRI_ABORTED:
  14468		/* Process the WQ XRI abort event */
  14469		phba->last_completion_time = jiffies;
  14470		workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq,
  14471				(struct sli4_wcqe_xri_aborted *)&cqevt);
  14472		break;
  14473	case CQE_CODE_RECEIVE:
  14474	case CQE_CODE_RECEIVE_V1:
  14475		/* Process the RQ event */
  14476		phba->last_completion_time = jiffies;
  14477		workposted = lpfc_sli4_sp_handle_rcqe(phba,
  14478				(struct lpfc_rcqe *)&cqevt);
  14479		break;
  14480	default:
  14481		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  14482				"0388 Not a valid WCQE code: x%x\n",
  14483				bf_get(lpfc_cqe_code, &cqevt));
  14484		break;
  14485	}
  14486	return workposted;
  14487}
  14488
  14489/**
  14490 * lpfc_sli4_sp_handle_eqe - Process a slow-path event queue entry
  14491 * @phba: Pointer to HBA context object.
  14492 * @eqe: Pointer to fast-path event queue entry.
  14493 * @speq: Pointer to slow-path event queue.
  14494 *
  14495 * This routine process a event queue entry from the slow-path event queue.
  14496 * It will check the MajorCode and MinorCode to determine this is for a
  14497 * completion event on a completion queue, if not, an error shall be logged
  14498 * and just return. Otherwise, it will get to the corresponding completion
  14499 * queue and process all the entries on that completion queue, rearm the
  14500 * completion queue, and then return.
  14501 *
  14502 **/
  14503static void
  14504lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
  14505	struct lpfc_queue *speq)
  14506{
  14507	struct lpfc_queue *cq = NULL, *childq;
  14508	uint16_t cqid;
  14509	int ret = 0;
  14510
  14511	/* Get the reference to the corresponding CQ */
  14512	cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
  14513
  14514	list_for_each_entry(childq, &speq->child_list, list) {
  14515		if (childq->queue_id == cqid) {
  14516			cq = childq;
  14517			break;
  14518		}
  14519	}
  14520	if (unlikely(!cq)) {
  14521		if (phba->sli.sli_flag & LPFC_SLI_ACTIVE)
  14522			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  14523					"0365 Slow-path CQ identifier "
  14524					"(%d) does not exist\n", cqid);
  14525		return;
  14526	}
  14527
  14528	/* Save EQ associated with this CQ */
  14529	cq->assoc_qp = speq;
  14530
  14531	if (is_kdump_kernel())
  14532		ret = queue_work(phba->wq, &cq->spwork);
  14533	else
  14534		ret = queue_work_on(cq->chann, phba->wq, &cq->spwork);
  14535
  14536	if (!ret)
  14537		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  14538				"0390 Cannot schedule queue work "
  14539				"for CQ eqcqid=%d, cqid=%d on CPU %d\n",
  14540				cqid, cq->queue_id, raw_smp_processor_id());
  14541}
  14542
  14543/**
  14544 * __lpfc_sli4_process_cq - Process elements of a CQ
  14545 * @phba: Pointer to HBA context object.
  14546 * @cq: Pointer to CQ to be processed
  14547 * @handler: Routine to process each cqe
  14548 * @delay: Pointer to usdelay to set in case of rescheduling of the handler
  14549 * @poll_mode: Polling mode we were called from
  14550 *
  14551 * This routine processes completion queue entries in a CQ. While a valid
  14552 * queue element is found, the handler is called. During processing checks
  14553 * are made for periodic doorbell writes to let the hardware know of
  14554 * element consumption.
  14555 *
  14556 * If the max limit on cqes to process is hit, or there are no more valid
  14557 * entries, the loop stops. If we processed a sufficient number of elements,
  14558 * meaning there is sufficient load, rather than rearming and generating
  14559 * another interrupt, a cq rescheduling delay will be set. A delay of 0
  14560 * indicates no rescheduling.
  14561 *
  14562 * Returns True if work scheduled, False otherwise.
  14563 **/
  14564static bool
  14565__lpfc_sli4_process_cq(struct lpfc_hba *phba, struct lpfc_queue *cq,
  14566	bool (*handler)(struct lpfc_hba *, struct lpfc_queue *,
  14567			struct lpfc_cqe *), unsigned long *delay,
  14568			enum lpfc_poll_mode poll_mode)
  14569{
  14570	struct lpfc_cqe *cqe;
  14571	bool workposted = false;
  14572	int count = 0, consumed = 0;
  14573	bool arm = true;
  14574
  14575	/* default - no reschedule */
  14576	*delay = 0;
  14577
  14578	if (cmpxchg(&cq->queue_claimed, 0, 1) != 0)
  14579		goto rearm_and_exit;
  14580
  14581	/* Process all the entries to the CQ */
  14582	cq->q_flag = 0;
  14583	cqe = lpfc_sli4_cq_get(cq);
  14584	while (cqe) {
  14585		workposted |= handler(phba, cq, cqe);
  14586		__lpfc_sli4_consume_cqe(phba, cq, cqe);
  14587
  14588		consumed++;
  14589		if (!(++count % cq->max_proc_limit))
  14590			break;
  14591
  14592		if (!(count % cq->notify_interval)) {
  14593			phba->sli4_hba.sli4_write_cq_db(phba, cq, consumed,
  14594						LPFC_QUEUE_NOARM);
  14595			consumed = 0;
  14596			cq->assoc_qp->q_flag |= HBA_EQ_DELAY_CHK;
  14597		}
  14598
  14599		if (count == LPFC_NVMET_CQ_NOTIFY)
  14600			cq->q_flag |= HBA_NVMET_CQ_NOTIFY;
  14601
  14602		cqe = lpfc_sli4_cq_get(cq);
  14603	}
  14604	if (count >= phba->cfg_cq_poll_threshold) {
  14605		*delay = 1;
  14606		arm = false;
  14607	}
  14608
  14609	/* Note: complete the irq_poll softirq before rearming CQ */
  14610	if (poll_mode == LPFC_IRQ_POLL)
  14611		irq_poll_complete(&cq->iop);
  14612
  14613	/* Track the max number of CQEs processed in 1 EQ */
  14614	if (count > cq->CQ_max_cqe)
  14615		cq->CQ_max_cqe = count;
  14616
  14617	cq->assoc_qp->EQ_cqe_cnt += count;
  14618
  14619	/* Catch the no cq entry condition */
  14620	if (unlikely(count == 0))
  14621		lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
  14622				"0369 No entry from completion queue "
  14623				"qid=%d\n", cq->queue_id);
  14624
  14625	xchg(&cq->queue_claimed, 0);
  14626
  14627rearm_and_exit:
  14628	phba->sli4_hba.sli4_write_cq_db(phba, cq, consumed,
  14629			arm ?  LPFC_QUEUE_REARM : LPFC_QUEUE_NOARM);
  14630
  14631	return workposted;
  14632}
  14633
  14634/**
  14635 * __lpfc_sli4_sp_process_cq - Process a slow-path event queue entry
  14636 * @cq: pointer to CQ to process
  14637 *
  14638 * This routine calls the cq processing routine with a handler specific
  14639 * to the type of queue bound to it.
  14640 *
  14641 * The CQ routine returns two values: the first is the calling status,
  14642 * which indicates whether work was queued to the  background discovery
  14643 * thread. If true, the routine should wakeup the discovery thread;
  14644 * the second is the delay parameter. If non-zero, rather than rearming
  14645 * the CQ and yet another interrupt, the CQ handler should be queued so
  14646 * that it is processed in a subsequent polling action. The value of
  14647 * the delay indicates when to reschedule it.
  14648 **/
  14649static void
  14650__lpfc_sli4_sp_process_cq(struct lpfc_queue *cq)
  14651{
  14652	struct lpfc_hba *phba = cq->phba;
  14653	unsigned long delay;
  14654	bool workposted = false;
  14655	int ret = 0;
  14656
  14657	/* Process and rearm the CQ */
  14658	switch (cq->type) {
  14659	case LPFC_MCQ:
  14660		workposted |= __lpfc_sli4_process_cq(phba, cq,
  14661						lpfc_sli4_sp_handle_mcqe,
  14662						&delay, LPFC_QUEUE_WORK);
  14663		break;
  14664	case LPFC_WCQ:
  14665		if (cq->subtype == LPFC_IO)
  14666			workposted |= __lpfc_sli4_process_cq(phba, cq,
  14667						lpfc_sli4_fp_handle_cqe,
  14668						&delay, LPFC_QUEUE_WORK);
  14669		else
  14670			workposted |= __lpfc_sli4_process_cq(phba, cq,
  14671						lpfc_sli4_sp_handle_cqe,
  14672						&delay, LPFC_QUEUE_WORK);
  14673		break;
  14674	default:
  14675		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  14676				"0370 Invalid completion queue type (%d)\n",
  14677				cq->type);
  14678		return;
  14679	}
  14680
  14681	if (delay) {
  14682		if (is_kdump_kernel())
  14683			ret = queue_delayed_work(phba->wq, &cq->sched_spwork,
  14684						delay);
  14685		else
  14686			ret = queue_delayed_work_on(cq->chann, phba->wq,
  14687						&cq->sched_spwork, delay);
  14688		if (!ret)
  14689			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  14690				"0394 Cannot schedule queue work "
  14691				"for cqid=%d on CPU %d\n",
  14692				cq->queue_id, cq->chann);
  14693	}
  14694
  14695	/* wake up worker thread if there are works to be done */
  14696	if (workposted)
  14697		lpfc_worker_wake_up(phba);
  14698}
  14699
  14700/**
  14701 * lpfc_sli4_sp_process_cq - slow-path work handler when started by
  14702 *   interrupt
  14703 * @work: pointer to work element
  14704 *
  14705 * translates from the work handler and calls the slow-path handler.
  14706 **/
  14707static void
  14708lpfc_sli4_sp_process_cq(struct work_struct *work)
  14709{
  14710	struct lpfc_queue *cq = container_of(work, struct lpfc_queue, spwork);
  14711
  14712	__lpfc_sli4_sp_process_cq(cq);
  14713}
  14714
  14715/**
  14716 * lpfc_sli4_dly_sp_process_cq - slow-path work handler when started by timer
  14717 * @work: pointer to work element
  14718 *
  14719 * translates from the work handler and calls the slow-path handler.
  14720 **/
  14721static void
  14722lpfc_sli4_dly_sp_process_cq(struct work_struct *work)
  14723{
  14724	struct lpfc_queue *cq = container_of(to_delayed_work(work),
  14725					struct lpfc_queue, sched_spwork);
  14726
  14727	__lpfc_sli4_sp_process_cq(cq);
  14728}
  14729
  14730/**
  14731 * lpfc_sli4_fp_handle_fcp_wcqe - Process fast-path work queue completion entry
  14732 * @phba: Pointer to HBA context object.
  14733 * @cq: Pointer to associated CQ
  14734 * @wcqe: Pointer to work-queue completion queue entry.
  14735 *
  14736 * This routine process a fast-path work queue completion entry from fast-path
  14737 * event queue for FCP command response completion.
  14738 **/
  14739static void
  14740lpfc_sli4_fp_handle_fcp_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
  14741			     struct lpfc_wcqe_complete *wcqe)
  14742{
  14743	struct lpfc_sli_ring *pring = cq->pring;
  14744	struct lpfc_iocbq *cmdiocbq;
  14745	unsigned long iflags;
  14746
  14747	/* Check for response status */
  14748	if (unlikely(bf_get(lpfc_wcqe_c_status, wcqe))) {
  14749		/* If resource errors reported from HBA, reduce queue
  14750		 * depth of the SCSI device.
  14751		 */
  14752		if (((bf_get(lpfc_wcqe_c_status, wcqe) ==
  14753		     IOSTAT_LOCAL_REJECT)) &&
  14754		    ((wcqe->parameter & IOERR_PARAM_MASK) ==
  14755		     IOERR_NO_RESOURCES))
  14756			phba->lpfc_rampdown_queue_depth(phba);
  14757
  14758		/* Log the cmpl status */
  14759		lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
  14760				"0373 FCP CQE cmpl: status=x%x: "
  14761				"CQE: %08x %08x %08x %08x\n",
  14762				bf_get(lpfc_wcqe_c_status, wcqe),
  14763				wcqe->word0, wcqe->total_data_placed,
  14764				wcqe->parameter, wcqe->word3);
  14765	}
  14766
  14767	/* Look up the FCP command IOCB and create pseudo response IOCB */
  14768	spin_lock_irqsave(&pring->ring_lock, iflags);
  14769	pring->stats.iocb_event++;
  14770	cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring,
  14771				bf_get(lpfc_wcqe_c_request_tag, wcqe));
  14772	spin_unlock_irqrestore(&pring->ring_lock, iflags);
  14773	if (unlikely(!cmdiocbq)) {
  14774		lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
  14775				"0374 FCP complete with no corresponding "
  14776				"cmdiocb: iotag (%d)\n",
  14777				bf_get(lpfc_wcqe_c_request_tag, wcqe));
  14778		return;
  14779	}
  14780#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
  14781	cmdiocbq->isr_timestamp = cq->isr_timestamp;
  14782#endif
  14783	if (bf_get(lpfc_wcqe_c_xb, wcqe)) {
  14784		spin_lock_irqsave(&phba->hbalock, iflags);
  14785		cmdiocbq->cmd_flag |= LPFC_EXCHANGE_BUSY;
  14786		spin_unlock_irqrestore(&phba->hbalock, iflags);
  14787	}
  14788
  14789	if (cmdiocbq->cmd_cmpl) {
  14790		/* For FCP the flag is cleared in cmd_cmpl */
  14791		if (!(cmdiocbq->cmd_flag & LPFC_IO_FCP) &&
  14792		    cmdiocbq->cmd_flag & LPFC_DRIVER_ABORTED) {
  14793			spin_lock_irqsave(&phba->hbalock, iflags);
  14794			cmdiocbq->cmd_flag &= ~LPFC_DRIVER_ABORTED;
  14795			spin_unlock_irqrestore(&phba->hbalock, iflags);
  14796		}
  14797
  14798		/* Pass the cmd_iocb and the wcqe to the upper layer */
  14799		memcpy(&cmdiocbq->wcqe_cmpl, wcqe,
  14800		       sizeof(struct lpfc_wcqe_complete));
  14801		cmdiocbq->cmd_cmpl(phba, cmdiocbq, cmdiocbq);
  14802	} else {
  14803		lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
  14804				"0375 FCP cmdiocb not callback function "
  14805				"iotag: (%d)\n",
  14806				bf_get(lpfc_wcqe_c_request_tag, wcqe));
  14807	}
  14808}
  14809
  14810/**
  14811 * lpfc_sli4_fp_handle_rel_wcqe - Handle fast-path WQ entry consumed event
  14812 * @phba: Pointer to HBA context object.
  14813 * @cq: Pointer to completion queue.
  14814 * @wcqe: Pointer to work-queue completion queue entry.
  14815 *
  14816 * This routine handles an fast-path WQ entry consumed event by invoking the
  14817 * proper WQ release routine to the slow-path WQ.
  14818 **/
  14819static void
  14820lpfc_sli4_fp_handle_rel_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
  14821			     struct lpfc_wcqe_release *wcqe)
  14822{
  14823	struct lpfc_queue *childwq;
  14824	bool wqid_matched = false;
  14825	uint16_t hba_wqid;
  14826
  14827	/* Check for fast-path FCP work queue release */
  14828	hba_wqid = bf_get(lpfc_wcqe_r_wq_id, wcqe);
  14829	list_for_each_entry(childwq, &cq->child_list, list) {
  14830		if (childwq->queue_id == hba_wqid) {
  14831			lpfc_sli4_wq_release(childwq,
  14832					bf_get(lpfc_wcqe_r_wqe_index, wcqe));
  14833			if (childwq->q_flag & HBA_NVMET_WQFULL)
  14834				lpfc_nvmet_wqfull_process(phba, childwq);
  14835			wqid_matched = true;
  14836			break;
  14837		}
  14838	}
  14839	/* Report warning log message if no match found */
  14840	if (wqid_matched != true)
  14841		lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
  14842				"2580 Fast-path wqe consume event carries "
  14843				"miss-matched qid: wcqe-qid=x%x\n", hba_wqid);
  14844}
  14845
  14846/**
  14847 * lpfc_sli4_nvmet_handle_rcqe - Process a receive-queue completion queue entry
  14848 * @phba: Pointer to HBA context object.
  14849 * @cq: Pointer to completion queue.
  14850 * @rcqe: Pointer to receive-queue completion queue entry.
  14851 *
  14852 * This routine process a receive-queue completion queue entry.
  14853 *
  14854 * Return: true if work posted to worker thread, otherwise false.
  14855 **/
  14856static bool
  14857lpfc_sli4_nvmet_handle_rcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
  14858			    struct lpfc_rcqe *rcqe)
  14859{
  14860	bool workposted = false;
  14861	struct lpfc_queue *hrq;
  14862	struct lpfc_queue *drq;
  14863	struct rqb_dmabuf *dma_buf;
  14864	struct fc_frame_header *fc_hdr;
  14865	struct lpfc_nvmet_tgtport *tgtp;
  14866	uint32_t status, rq_id;
  14867	unsigned long iflags;
  14868	uint32_t fctl, idx;
  14869
  14870	if ((phba->nvmet_support == 0) ||
  14871	    (phba->sli4_hba.nvmet_cqset == NULL))
  14872		return workposted;
  14873
  14874	idx = cq->queue_id - phba->sli4_hba.nvmet_cqset[0]->queue_id;
  14875	hrq = phba->sli4_hba.nvmet_mrq_hdr[idx];
  14876	drq = phba->sli4_hba.nvmet_mrq_data[idx];
  14877
  14878	/* sanity check on queue memory */
  14879	if (unlikely(!hrq) || unlikely(!drq))
  14880		return workposted;
  14881
  14882	if (bf_get(lpfc_cqe_code, rcqe) == CQE_CODE_RECEIVE_V1)
  14883		rq_id = bf_get(lpfc_rcqe_rq_id_v1, rcqe);
  14884	else
  14885		rq_id = bf_get(lpfc_rcqe_rq_id, rcqe);
  14886
  14887	if ((phba->nvmet_support == 0) ||
  14888	    (rq_id != hrq->queue_id))
  14889		return workposted;
  14890
  14891	status = bf_get(lpfc_rcqe_status, rcqe);
  14892	switch (status) {
  14893	case FC_STATUS_RQ_BUF_LEN_EXCEEDED:
  14894		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  14895				"6126 Receive Frame Truncated!!\n");
  14896		fallthrough;
  14897	case FC_STATUS_RQ_SUCCESS:
  14898		spin_lock_irqsave(&phba->hbalock, iflags);
  14899		lpfc_sli4_rq_release(hrq, drq);
  14900		dma_buf = lpfc_sli_rqbuf_get(phba, hrq);
  14901		if (!dma_buf) {
  14902			hrq->RQ_no_buf_found++;
  14903			spin_unlock_irqrestore(&phba->hbalock, iflags);
  14904			goto out;
  14905		}
  14906		spin_unlock_irqrestore(&phba->hbalock, iflags);
  14907		hrq->RQ_rcv_buf++;
  14908		hrq->RQ_buf_posted--;
  14909		fc_hdr = (struct fc_frame_header *)dma_buf->hbuf.virt;
  14910
  14911		/* Just some basic sanity checks on FCP Command frame */
  14912		fctl = (fc_hdr->fh_f_ctl[0] << 16 |
  14913			fc_hdr->fh_f_ctl[1] << 8 |
  14914			fc_hdr->fh_f_ctl[2]);
  14915		if (((fctl &
  14916		    (FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT)) !=
  14917		    (FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT)) ||
  14918		    (fc_hdr->fh_seq_cnt != 0)) /* 0 byte swapped is still 0 */
  14919			goto drop;
  14920
  14921		if (fc_hdr->fh_type == FC_TYPE_FCP) {
  14922			dma_buf->bytes_recv = bf_get(lpfc_rcqe_length, rcqe);
  14923			lpfc_nvmet_unsol_fcp_event(
  14924				phba, idx, dma_buf, cq->isr_timestamp,
  14925				cq->q_flag & HBA_NVMET_CQ_NOTIFY);
  14926			return false;
  14927		}
  14928drop:
  14929		lpfc_rq_buf_free(phba, &dma_buf->hbuf);
  14930		break;
  14931	case FC_STATUS_INSUFF_BUF_FRM_DISC:
  14932		if (phba->nvmet_support) {
  14933			tgtp = phba->targetport->private;
  14934			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  14935					"6401 RQE Error x%x, posted %d err_cnt "
  14936					"%d: %x %x %x\n",
  14937					status, hrq->RQ_buf_posted,
  14938					hrq->RQ_no_posted_buf,
  14939					atomic_read(&tgtp->rcv_fcp_cmd_in),
  14940					atomic_read(&tgtp->rcv_fcp_cmd_out),
  14941					atomic_read(&tgtp->xmt_fcp_release));
  14942		}
  14943		fallthrough;
  14944
  14945	case FC_STATUS_INSUFF_BUF_NEED_BUF:
  14946		hrq->RQ_no_posted_buf++;
  14947		/* Post more buffers if possible */
  14948		break;
  14949	}
  14950out:
  14951	return workposted;
  14952}
  14953
  14954/**
  14955 * lpfc_sli4_fp_handle_cqe - Process fast-path work queue completion entry
  14956 * @phba: adapter with cq
  14957 * @cq: Pointer to the completion queue.
  14958 * @cqe: Pointer to fast-path completion queue entry.
  14959 *
  14960 * This routine process a fast-path work queue completion entry from fast-path
  14961 * event queue for FCP command response completion.
  14962 *
  14963 * Return: true if work posted to worker thread, otherwise false.
  14964 **/
  14965static bool
  14966lpfc_sli4_fp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
  14967			 struct lpfc_cqe *cqe)
  14968{
  14969	struct lpfc_wcqe_release wcqe;
  14970	bool workposted = false;
  14971
  14972	/* Copy the work queue CQE and convert endian order if needed */
  14973	lpfc_sli4_pcimem_bcopy(cqe, &wcqe, sizeof(struct lpfc_cqe));
  14974
  14975	/* Check and process for different type of WCQE and dispatch */
  14976	switch (bf_get(lpfc_wcqe_c_code, &wcqe)) {
  14977	case CQE_CODE_COMPL_WQE:
  14978	case CQE_CODE_NVME_ERSP:
  14979		cq->CQ_wq++;
  14980		/* Process the WQ complete event */
  14981		phba->last_completion_time = jiffies;
  14982		if (cq->subtype == LPFC_IO || cq->subtype == LPFC_NVME_LS)
  14983			lpfc_sli4_fp_handle_fcp_wcqe(phba, cq,
  14984				(struct lpfc_wcqe_complete *)&wcqe);
  14985		break;
  14986	case CQE_CODE_RELEASE_WQE:
  14987		cq->CQ_release_wqe++;
  14988		/* Process the WQ release event */
  14989		lpfc_sli4_fp_handle_rel_wcqe(phba, cq,
  14990				(struct lpfc_wcqe_release *)&wcqe);
  14991		break;
  14992	case CQE_CODE_XRI_ABORTED:
  14993		cq->CQ_xri_aborted++;
  14994		/* Process the WQ XRI abort event */
  14995		phba->last_completion_time = jiffies;
  14996		workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq,
  14997				(struct sli4_wcqe_xri_aborted *)&wcqe);
  14998		break;
  14999	case CQE_CODE_RECEIVE_V1:
  15000	case CQE_CODE_RECEIVE:
  15001		phba->last_completion_time = jiffies;
  15002		if (cq->subtype == LPFC_NVMET) {
  15003			workposted = lpfc_sli4_nvmet_handle_rcqe(
  15004				phba, cq, (struct lpfc_rcqe *)&wcqe);
  15005		}
  15006		break;
  15007	default:
  15008		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  15009				"0144 Not a valid CQE code: x%x\n",
  15010				bf_get(lpfc_wcqe_c_code, &wcqe));
  15011		break;
  15012	}
  15013	return workposted;
  15014}
  15015
  15016/**
  15017 * lpfc_sli4_sched_cq_work - Schedules cq work
  15018 * @phba: Pointer to HBA context object.
  15019 * @cq: Pointer to CQ
  15020 * @cqid: CQ ID
  15021 *
  15022 * This routine checks the poll mode of the CQ corresponding to
  15023 * cq->chann, then either schedules a softirq or queue_work to complete
  15024 * cq work.
  15025 *
  15026 * queue_work path is taken if in NVMET mode, or if poll_mode is in
  15027 * LPFC_QUEUE_WORK mode.  Otherwise, softirq path is taken.
  15028 *
  15029 **/
  15030static void lpfc_sli4_sched_cq_work(struct lpfc_hba *phba,
  15031				    struct lpfc_queue *cq, uint16_t cqid)
  15032{
  15033	int ret = 0;
  15034
  15035	switch (cq->poll_mode) {
  15036	case LPFC_IRQ_POLL:
  15037		/* CGN mgmt is mutually exclusive from softirq processing */
  15038		if (phba->cmf_active_mode == LPFC_CFG_OFF) {
  15039			irq_poll_sched(&cq->iop);
  15040			break;
  15041		}
  15042		fallthrough;
  15043	case LPFC_QUEUE_WORK:
  15044	default:
  15045		if (is_kdump_kernel())
  15046			ret = queue_work(phba->wq, &cq->irqwork);
  15047		else
  15048			ret = queue_work_on(cq->chann, phba->wq, &cq->irqwork);
  15049		if (!ret)
  15050			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  15051					"0383 Cannot schedule queue work "
  15052					"for CQ eqcqid=%d, cqid=%d on CPU %d\n",
  15053					cqid, cq->queue_id,
  15054					raw_smp_processor_id());
  15055	}
  15056}
  15057
  15058/**
  15059 * lpfc_sli4_hba_handle_eqe - Process a fast-path event queue entry
  15060 * @phba: Pointer to HBA context object.
  15061 * @eq: Pointer to the queue structure.
  15062 * @eqe: Pointer to fast-path event queue entry.
  15063 *
  15064 * This routine process a event queue entry from the fast-path event queue.
  15065 * It will check the MajorCode and MinorCode to determine this is for a
  15066 * completion event on a completion queue, if not, an error shall be logged
  15067 * and just return. Otherwise, it will get to the corresponding completion
  15068 * queue and process all the entries on the completion queue, rearm the
  15069 * completion queue, and then return.
  15070 **/
  15071static void
  15072lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba, struct lpfc_queue *eq,
  15073			 struct lpfc_eqe *eqe)
  15074{
  15075	struct lpfc_queue *cq = NULL;
  15076	uint32_t qidx = eq->hdwq;
  15077	uint16_t cqid, id;
  15078
  15079	if (unlikely(bf_get_le32(lpfc_eqe_major_code, eqe) != 0)) {
  15080		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  15081				"0366 Not a valid completion "
  15082				"event: majorcode=x%x, minorcode=x%x\n",
  15083				bf_get_le32(lpfc_eqe_major_code, eqe),
  15084				bf_get_le32(lpfc_eqe_minor_code, eqe));
  15085		return;
  15086	}
  15087
  15088	/* Get the reference to the corresponding CQ */
  15089	cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
  15090
  15091	/* Use the fast lookup method first */
  15092	if (cqid <= phba->sli4_hba.cq_max) {
  15093		cq = phba->sli4_hba.cq_lookup[cqid];
  15094		if (cq)
  15095			goto  work_cq;
  15096	}
  15097
  15098	/* Next check for NVMET completion */
  15099	if (phba->cfg_nvmet_mrq && phba->sli4_hba.nvmet_cqset) {
  15100		id = phba->sli4_hba.nvmet_cqset[0]->queue_id;
  15101		if ((cqid >= id) && (cqid < (id + phba->cfg_nvmet_mrq))) {
  15102			/* Process NVMET unsol rcv */
  15103			cq = phba->sli4_hba.nvmet_cqset[cqid - id];
  15104			goto  process_cq;
  15105		}
  15106	}
  15107
  15108	if (phba->sli4_hba.nvmels_cq &&
  15109	    (cqid == phba->sli4_hba.nvmels_cq->queue_id)) {
  15110		/* Process NVME unsol rcv */
  15111		cq = phba->sli4_hba.nvmels_cq;
  15112	}
  15113
  15114	/* Otherwise this is a Slow path event */
  15115	if (cq == NULL) {
  15116		lpfc_sli4_sp_handle_eqe(phba, eqe,
  15117					phba->sli4_hba.hdwq[qidx].hba_eq);
  15118		return;
  15119	}
  15120
  15121process_cq:
  15122	if (unlikely(cqid != cq->queue_id)) {
  15123		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  15124				"0368 Miss-matched fast-path completion "
  15125				"queue identifier: eqcqid=%d, fcpcqid=%d\n",
  15126				cqid, cq->queue_id);
  15127		return;
  15128	}
  15129
  15130work_cq:
  15131#if defined(CONFIG_SCSI_LPFC_DEBUG_FS)
  15132	if (phba->ktime_on)
  15133		cq->isr_timestamp = ktime_get_ns();
  15134	else
  15135		cq->isr_timestamp = 0;
  15136#endif
  15137	lpfc_sli4_sched_cq_work(phba, cq, cqid);
  15138}
  15139
  15140/**
  15141 * __lpfc_sli4_hba_process_cq - Process a fast-path event queue entry
  15142 * @cq: Pointer to CQ to be processed
  15143 * @poll_mode: Enum lpfc_poll_state to determine poll mode
  15144 *
  15145 * This routine calls the cq processing routine with the handler for
  15146 * fast path CQEs.
  15147 *
  15148 * The CQ routine returns two values: the first is the calling status,
  15149 * which indicates whether work was queued to the  background discovery
  15150 * thread. If true, the routine should wakeup the discovery thread;
  15151 * the second is the delay parameter. If non-zero, rather than rearming
  15152 * the CQ and yet another interrupt, the CQ handler should be queued so
  15153 * that it is processed in a subsequent polling action. The value of
  15154 * the delay indicates when to reschedule it.
  15155 **/
  15156static void
  15157__lpfc_sli4_hba_process_cq(struct lpfc_queue *cq,
  15158			   enum lpfc_poll_mode poll_mode)
  15159{
  15160	struct lpfc_hba *phba = cq->phba;
  15161	unsigned long delay;
  15162	bool workposted = false;
  15163	int ret = 0;
  15164
  15165	/* process and rearm the CQ */
  15166	workposted |= __lpfc_sli4_process_cq(phba, cq, lpfc_sli4_fp_handle_cqe,
  15167					     &delay, poll_mode);
  15168
  15169	if (delay) {
  15170		if (is_kdump_kernel())
  15171			ret = queue_delayed_work(phba->wq, &cq->sched_irqwork,
  15172						delay);
  15173		else
  15174			ret = queue_delayed_work_on(cq->chann, phba->wq,
  15175						&cq->sched_irqwork, delay);
  15176		if (!ret)
  15177			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  15178					"0367 Cannot schedule queue work "
  15179					"for cqid=%d on CPU %d\n",
  15180					cq->queue_id, cq->chann);
  15181	}
  15182
  15183	/* wake up worker thread if there are works to be done */
  15184	if (workposted)
  15185		lpfc_worker_wake_up(phba);
  15186}
  15187
  15188/**
  15189 * lpfc_sli4_hba_process_cq - fast-path work handler when started by
  15190 *   interrupt
  15191 * @work: pointer to work element
  15192 *
  15193 * translates from the work handler and calls the fast-path handler.
  15194 **/
  15195static void
  15196lpfc_sli4_hba_process_cq(struct work_struct *work)
  15197{
  15198	struct lpfc_queue *cq = container_of(work, struct lpfc_queue, irqwork);
  15199
  15200	__lpfc_sli4_hba_process_cq(cq, LPFC_QUEUE_WORK);
  15201}
  15202
  15203/**
  15204 * lpfc_sli4_dly_hba_process_cq - fast-path work handler when started by timer
  15205 * @work: pointer to work element
  15206 *
  15207 * translates from the work handler and calls the fast-path handler.
  15208 **/
  15209static void
  15210lpfc_sli4_dly_hba_process_cq(struct work_struct *work)
  15211{
  15212	struct lpfc_queue *cq = container_of(to_delayed_work(work),
  15213					struct lpfc_queue, sched_irqwork);
  15214
  15215	__lpfc_sli4_hba_process_cq(cq, LPFC_QUEUE_WORK);
  15216}
  15217
  15218/**
  15219 * lpfc_sli4_hba_intr_handler - HBA interrupt handler to SLI-4 device
  15220 * @irq: Interrupt number.
  15221 * @dev_id: The device context pointer.
  15222 *
  15223 * This function is directly called from the PCI layer as an interrupt
  15224 * service routine when device with SLI-4 interface spec is enabled with
  15225 * MSI-X multi-message interrupt mode and there is a fast-path FCP IOCB
  15226 * ring event in the HBA. However, when the device is enabled with either
  15227 * MSI or Pin-IRQ interrupt mode, this function is called as part of the
  15228 * device-level interrupt handler. When the PCI slot is in error recovery
  15229 * or the HBA is undergoing initialization, the interrupt handler will not
  15230 * process the interrupt. The SCSI FCP fast-path ring event are handled in
  15231 * the intrrupt context. This function is called without any lock held.
  15232 * It gets the hbalock to access and update SLI data structures. Note that,
  15233 * the FCP EQ to FCP CQ are one-to-one map such that the FCP EQ index is
  15234 * equal to that of FCP CQ index.
  15235 *
  15236 * The link attention and ELS ring attention events are handled
  15237 * by the worker thread. The interrupt handler signals the worker thread
  15238 * and returns for these events. This function is called without any lock
  15239 * held. It gets the hbalock to access and update SLI data structures.
  15240 *
  15241 * This function returns IRQ_HANDLED when interrupt is handled else it
  15242 * returns IRQ_NONE.
  15243 **/
  15244irqreturn_t
  15245lpfc_sli4_hba_intr_handler(int irq, void *dev_id)
  15246{
  15247	struct lpfc_hba *phba;
  15248	struct lpfc_hba_eq_hdl *hba_eq_hdl;
  15249	struct lpfc_queue *fpeq;
  15250	unsigned long iflag;
  15251	int ecount = 0;
  15252	int hba_eqidx;
  15253	struct lpfc_eq_intr_info *eqi;
  15254
  15255	/* Get the driver's phba structure from the dev_id */
  15256	hba_eq_hdl = (struct lpfc_hba_eq_hdl *)dev_id;
  15257	phba = hba_eq_hdl->phba;
  15258	hba_eqidx = hba_eq_hdl->idx;
  15259
  15260	if (unlikely(!phba))
  15261		return IRQ_NONE;
  15262	if (unlikely(!phba->sli4_hba.hdwq))
  15263		return IRQ_NONE;
  15264
  15265	/* Get to the EQ struct associated with this vector */
  15266	fpeq = phba->sli4_hba.hba_eq_hdl[hba_eqidx].eq;
  15267	if (unlikely(!fpeq))
  15268		return IRQ_NONE;
  15269
  15270	/* Check device state for handling interrupt */
  15271	if (unlikely(lpfc_intr_state_check(phba))) {
  15272		/* Check again for link_state with lock held */
  15273		spin_lock_irqsave(&phba->hbalock, iflag);
  15274		if (phba->link_state < LPFC_LINK_DOWN)
  15275			/* Flush, clear interrupt, and rearm the EQ */
  15276			lpfc_sli4_eqcq_flush(phba, fpeq);
  15277		spin_unlock_irqrestore(&phba->hbalock, iflag);
  15278		return IRQ_NONE;
  15279	}
  15280
  15281	eqi = this_cpu_ptr(phba->sli4_hba.eq_info);
  15282	eqi->icnt++;
  15283
  15284	fpeq->last_cpu = raw_smp_processor_id();
  15285
  15286	if (eqi->icnt > LPFC_EQD_ISR_TRIGGER &&
  15287	    fpeq->q_flag & HBA_EQ_DELAY_CHK &&
  15288	    phba->cfg_auto_imax &&
  15289	    fpeq->q_mode != LPFC_MAX_AUTO_EQ_DELAY &&
  15290	    phba->sli.sli_flag & LPFC_SLI_USE_EQDR)
  15291		lpfc_sli4_mod_hba_eq_delay(phba, fpeq, LPFC_MAX_AUTO_EQ_DELAY);
  15292
  15293	/* process and rearm the EQ */
  15294	ecount = lpfc_sli4_process_eq(phba, fpeq, LPFC_QUEUE_REARM);
  15295
  15296	if (unlikely(ecount == 0)) {
  15297		fpeq->EQ_no_entry++;
  15298		if (phba->intr_type == MSIX)
  15299			/* MSI-X treated interrupt served as no EQ share INT */
  15300			lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
  15301					"0358 MSI-X interrupt with no EQE\n");
  15302		else
  15303			/* Non MSI-X treated on interrupt as EQ share INT */
  15304			return IRQ_NONE;
  15305	}
  15306
  15307	return IRQ_HANDLED;
  15308} /* lpfc_sli4_hba_intr_handler */
  15309
  15310/**
  15311 * lpfc_sli4_intr_handler - Device-level interrupt handler for SLI-4 device
  15312 * @irq: Interrupt number.
  15313 * @dev_id: The device context pointer.
  15314 *
  15315 * This function is the device-level interrupt handler to device with SLI-4
  15316 * interface spec, called from the PCI layer when either MSI or Pin-IRQ
  15317 * interrupt mode is enabled and there is an event in the HBA which requires
  15318 * driver attention. This function invokes the slow-path interrupt attention
  15319 * handling function and fast-path interrupt attention handling function in
  15320 * turn to process the relevant HBA attention events. This function is called
  15321 * without any lock held. It gets the hbalock to access and update SLI data
  15322 * structures.
  15323 *
  15324 * This function returns IRQ_HANDLED when interrupt is handled, else it
  15325 * returns IRQ_NONE.
  15326 **/
  15327irqreturn_t
  15328lpfc_sli4_intr_handler(int irq, void *dev_id)
  15329{
  15330	struct lpfc_hba  *phba;
  15331	irqreturn_t hba_irq_rc;
  15332	bool hba_handled = false;
  15333	int qidx;
  15334
  15335	/* Get the driver's phba structure from the dev_id */
  15336	phba = (struct lpfc_hba *)dev_id;
  15337
  15338	if (unlikely(!phba))
  15339		return IRQ_NONE;
  15340
  15341	/*
  15342	 * Invoke fast-path host attention interrupt handling as appropriate.
  15343	 */
  15344	for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) {
  15345		hba_irq_rc = lpfc_sli4_hba_intr_handler(irq,
  15346					&phba->sli4_hba.hba_eq_hdl[qidx]);
  15347		if (hba_irq_rc == IRQ_HANDLED)
  15348			hba_handled |= true;
  15349	}
  15350
  15351	return (hba_handled == true) ? IRQ_HANDLED : IRQ_NONE;
  15352} /* lpfc_sli4_intr_handler */
  15353
  15354void lpfc_sli4_poll_hbtimer(struct timer_list *t)
  15355{
  15356	struct lpfc_hba *phba = from_timer(phba, t, cpuhp_poll_timer);
  15357	struct lpfc_queue *eq;
  15358	int i = 0;
  15359
  15360	rcu_read_lock();
  15361
  15362	list_for_each_entry_rcu(eq, &phba->poll_list, _poll_list)
  15363		i += lpfc_sli4_poll_eq(eq, LPFC_POLL_SLOWPATH);
  15364	if (!list_empty(&phba->poll_list))
  15365		mod_timer(&phba->cpuhp_poll_timer,
  15366			  jiffies + msecs_to_jiffies(LPFC_POLL_HB));
  15367
  15368	rcu_read_unlock();
  15369}
  15370
  15371inline int lpfc_sli4_poll_eq(struct lpfc_queue *eq, uint8_t path)
  15372{
  15373	struct lpfc_hba *phba = eq->phba;
  15374	int i = 0;
  15375
  15376	/*
  15377	 * Unlocking an irq is one of the entry point to check
  15378	 * for re-schedule, but we are good for io submission
  15379	 * path as midlayer does a get_cpu to glue us in. Flush
  15380	 * out the invalidate queue so we can see the updated
  15381	 * value for flag.
  15382	 */
  15383	smp_rmb();
  15384
  15385	if (READ_ONCE(eq->mode) == LPFC_EQ_POLL)
  15386		/* We will not likely get the completion for the caller
  15387		 * during this iteration but i guess that's fine.
  15388		 * Future io's coming on this eq should be able to
  15389		 * pick it up.  As for the case of single io's, they
  15390		 * will be handled through a sched from polling timer
  15391		 * function which is currently triggered every 1msec.
  15392		 */
  15393		i = lpfc_sli4_process_eq(phba, eq, LPFC_QUEUE_NOARM);
  15394
  15395	return i;
  15396}
  15397
  15398static inline void lpfc_sli4_add_to_poll_list(struct lpfc_queue *eq)
  15399{
  15400	struct lpfc_hba *phba = eq->phba;
  15401
  15402	/* kickstart slowpath processing if needed */
  15403	if (list_empty(&phba->poll_list))
  15404		mod_timer(&phba->cpuhp_poll_timer,
  15405			  jiffies + msecs_to_jiffies(LPFC_POLL_HB));
  15406
  15407	list_add_rcu(&eq->_poll_list, &phba->poll_list);
  15408	synchronize_rcu();
  15409}
  15410
  15411static inline void lpfc_sli4_remove_from_poll_list(struct lpfc_queue *eq)
  15412{
  15413	struct lpfc_hba *phba = eq->phba;
  15414
  15415	/* Disable slowpath processing for this eq.  Kick start the eq
  15416	 * by RE-ARMING the eq's ASAP
  15417	 */
  15418	list_del_rcu(&eq->_poll_list);
  15419	synchronize_rcu();
  15420
  15421	if (list_empty(&phba->poll_list))
  15422		del_timer_sync(&phba->cpuhp_poll_timer);
  15423}
  15424
  15425void lpfc_sli4_cleanup_poll_list(struct lpfc_hba *phba)
  15426{
  15427	struct lpfc_queue *eq, *next;
  15428
  15429	list_for_each_entry_safe(eq, next, &phba->poll_list, _poll_list)
  15430		list_del(&eq->_poll_list);
  15431
  15432	INIT_LIST_HEAD(&phba->poll_list);
  15433	synchronize_rcu();
  15434}
  15435
  15436static inline void
  15437__lpfc_sli4_switch_eqmode(struct lpfc_queue *eq, uint8_t mode)
  15438{
  15439	if (mode == eq->mode)
  15440		return;
  15441	/*
  15442	 * currently this function is only called during a hotplug
  15443	 * event and the cpu on which this function is executing
  15444	 * is going offline.  By now the hotplug has instructed
  15445	 * the scheduler to remove this cpu from cpu active mask.
  15446	 * So we don't need to work about being put aside by the
  15447	 * scheduler for a high priority process.  Yes, the inte-
  15448	 * rrupts could come but they are known to retire ASAP.
  15449	 */
  15450
  15451	/* Disable polling in the fastpath */
  15452	WRITE_ONCE(eq->mode, mode);
  15453	/* flush out the store buffer */
  15454	smp_wmb();
  15455
  15456	/*
  15457	 * Add this eq to the polling list and start polling. For
  15458	 * a grace period both interrupt handler and poller will
  15459	 * try to process the eq _but_ that's fine.  We have a
  15460	 * synchronization mechanism in place (queue_claimed) to
  15461	 * deal with it.  This is just a draining phase for int-
  15462	 * errupt handler (not eq's) as we have guranteed through
  15463	 * barrier that all the CPUs have seen the new CQ_POLLED
  15464	 * state. which will effectively disable the REARMING of
  15465	 * the EQ.  The whole idea is eq's die off eventually as
  15466	 * we are not rearming EQ's anymore.
  15467	 */
  15468	mode ? lpfc_sli4_add_to_poll_list(eq) :
  15469	       lpfc_sli4_remove_from_poll_list(eq);
  15470}
  15471
  15472void lpfc_sli4_start_polling(struct lpfc_queue *eq)
  15473{
  15474	__lpfc_sli4_switch_eqmode(eq, LPFC_EQ_POLL);
  15475}
  15476
  15477void lpfc_sli4_stop_polling(struct lpfc_queue *eq)
  15478{
  15479	struct lpfc_hba *phba = eq->phba;
  15480
  15481	__lpfc_sli4_switch_eqmode(eq, LPFC_EQ_INTERRUPT);
  15482
  15483	/* Kick start for the pending io's in h/w.
  15484	 * Once we switch back to interrupt processing on a eq
  15485	 * the io path completion will only arm eq's when it
  15486	 * receives a completion.  But since eq's are in disa-
  15487	 * rmed state it doesn't receive a completion.  This
  15488	 * creates a deadlock scenaro.
  15489	 */
  15490	phba->sli4_hba.sli4_write_eq_db(phba, eq, 0, LPFC_QUEUE_REARM);
  15491}
  15492
  15493/**
  15494 * lpfc_sli4_queue_free - free a queue structure and associated memory
  15495 * @queue: The queue structure to free.
  15496 *
  15497 * This function frees a queue structure and the DMAable memory used for
  15498 * the host resident queue. This function must be called after destroying the
  15499 * queue on the HBA.
  15500 **/
  15501void
  15502lpfc_sli4_queue_free(struct lpfc_queue *queue)
  15503{
  15504	struct lpfc_dmabuf *dmabuf;
  15505
  15506	if (!queue)
  15507		return;
  15508
  15509	if (!list_empty(&queue->wq_list))
  15510		list_del(&queue->wq_list);
  15511
  15512	while (!list_empty(&queue->page_list)) {
  15513		list_remove_head(&queue->page_list, dmabuf, struct lpfc_dmabuf,
  15514				 list);
  15515		dma_free_coherent(&queue->phba->pcidev->dev, queue->page_size,
  15516				  dmabuf->virt, dmabuf->phys);
  15517		kfree(dmabuf);
  15518	}
  15519	if (queue->rqbp) {
  15520		lpfc_free_rq_buffer(queue->phba, queue);
  15521		kfree(queue->rqbp);
  15522	}
  15523
  15524	if (!list_empty(&queue->cpu_list))
  15525		list_del(&queue->cpu_list);
  15526
  15527	kfree(queue);
  15528	return;
  15529}
  15530
  15531/**
  15532 * lpfc_sli4_queue_alloc - Allocate and initialize a queue structure
  15533 * @phba: The HBA that this queue is being created on.
  15534 * @page_size: The size of a queue page
  15535 * @entry_size: The size of each queue entry for this queue.
  15536 * @entry_count: The number of entries that this queue will handle.
  15537 * @cpu: The cpu that will primarily utilize this queue.
  15538 *
  15539 * This function allocates a queue structure and the DMAable memory used for
  15540 * the host resident queue. This function must be called before creating the
  15541 * queue on the HBA.
  15542 **/
  15543struct lpfc_queue *
  15544lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t page_size,
  15545		      uint32_t entry_size, uint32_t entry_count, int cpu)
  15546{
  15547	struct lpfc_queue *queue;
  15548	struct lpfc_dmabuf *dmabuf;
  15549	uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
  15550	uint16_t x, pgcnt;
  15551
  15552	if (!phba->sli4_hba.pc_sli4_params.supported)
  15553		hw_page_size = page_size;
  15554
  15555	pgcnt = ALIGN(entry_size * entry_count, hw_page_size) / hw_page_size;
  15556
  15557	/* If needed, Adjust page count to match the max the adapter supports */
  15558	if (pgcnt > phba->sli4_hba.pc_sli4_params.wqpcnt)
  15559		pgcnt = phba->sli4_hba.pc_sli4_params.wqpcnt;
  15560
  15561	queue = kzalloc_node(sizeof(*queue) + (sizeof(void *) * pgcnt),
  15562			     GFP_KERNEL, cpu_to_node(cpu));
  15563	if (!queue)
  15564		return NULL;
  15565
  15566	INIT_LIST_HEAD(&queue->list);
  15567	INIT_LIST_HEAD(&queue->_poll_list);
  15568	INIT_LIST_HEAD(&queue->wq_list);
  15569	INIT_LIST_HEAD(&queue->wqfull_list);
  15570	INIT_LIST_HEAD(&queue->page_list);
  15571	INIT_LIST_HEAD(&queue->child_list);
  15572	INIT_LIST_HEAD(&queue->cpu_list);
  15573
  15574	/* Set queue parameters now.  If the system cannot provide memory
  15575	 * resources, the free routine needs to know what was allocated.
  15576	 */
  15577	queue->page_count = pgcnt;
  15578	queue->q_pgs = (void **)&queue[1];
  15579	queue->entry_cnt_per_pg = hw_page_size / entry_size;
  15580	queue->entry_size = entry_size;
  15581	queue->entry_count = entry_count;
  15582	queue->page_size = hw_page_size;
  15583	queue->phba = phba;
  15584
  15585	for (x = 0; x < queue->page_count; x++) {
  15586		dmabuf = kzalloc_node(sizeof(*dmabuf), GFP_KERNEL,
  15587				      dev_to_node(&phba->pcidev->dev));
  15588		if (!dmabuf)
  15589			goto out_fail;
  15590		dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
  15591						  hw_page_size, &dmabuf->phys,
  15592						  GFP_KERNEL);
  15593		if (!dmabuf->virt) {
  15594			kfree(dmabuf);
  15595			goto out_fail;
  15596		}
  15597		dmabuf->buffer_tag = x;
  15598		list_add_tail(&dmabuf->list, &queue->page_list);
  15599		/* use lpfc_sli4_qe to index a paritcular entry in this page */
  15600		queue->q_pgs[x] = dmabuf->virt;
  15601	}
  15602	INIT_WORK(&queue->irqwork, lpfc_sli4_hba_process_cq);
  15603	INIT_WORK(&queue->spwork, lpfc_sli4_sp_process_cq);
  15604	INIT_DELAYED_WORK(&queue->sched_irqwork, lpfc_sli4_dly_hba_process_cq);
  15605	INIT_DELAYED_WORK(&queue->sched_spwork, lpfc_sli4_dly_sp_process_cq);
  15606
  15607	/* notify_interval will be set during q creation */
  15608
  15609	return queue;
  15610out_fail:
  15611	lpfc_sli4_queue_free(queue);
  15612	return NULL;
  15613}
  15614
  15615/**
  15616 * lpfc_dual_chute_pci_bar_map - Map pci base address register to host memory
  15617 * @phba: HBA structure that indicates port to create a queue on.
  15618 * @pci_barset: PCI BAR set flag.
  15619 *
  15620 * This function shall perform iomap of the specified PCI BAR address to host
  15621 * memory address if not already done so and return it. The returned host
  15622 * memory address can be NULL.
  15623 */
  15624static void __iomem *
  15625lpfc_dual_chute_pci_bar_map(struct lpfc_hba *phba, uint16_t pci_barset)
  15626{
  15627	if (!phba->pcidev)
  15628		return NULL;
  15629
  15630	switch (pci_barset) {
  15631	case WQ_PCI_BAR_0_AND_1:
  15632		return phba->pci_bar0_memmap_p;
  15633	case WQ_PCI_BAR_2_AND_3:
  15634		return phba->pci_bar2_memmap_p;
  15635	case WQ_PCI_BAR_4_AND_5:
  15636		return phba->pci_bar4_memmap_p;
  15637	default:
  15638		break;
  15639	}
  15640	return NULL;
  15641}
  15642
  15643/**
  15644 * lpfc_modify_hba_eq_delay - Modify Delay Multiplier on EQs
  15645 * @phba: HBA structure that EQs are on.
  15646 * @startq: The starting EQ index to modify
  15647 * @numq: The number of EQs (consecutive indexes) to modify
  15648 * @usdelay: amount of delay
  15649 *
  15650 * This function revises the EQ delay on 1 or more EQs. The EQ delay
  15651 * is set either by writing to a register (if supported by the SLI Port)
  15652 * or by mailbox command. The mailbox command allows several EQs to be
  15653 * updated at once.
  15654 *
  15655 * The @phba struct is used to send a mailbox command to HBA. The @startq
  15656 * is used to get the starting EQ index to change. The @numq value is
  15657 * used to specify how many consecutive EQ indexes, starting at EQ index,
  15658 * are to be changed. This function is asynchronous and will wait for any
  15659 * mailbox commands to finish before returning.
  15660 *
  15661 * On success this function will return a zero. If unable to allocate
  15662 * enough memory this function will return -ENOMEM. If a mailbox command
  15663 * fails this function will return -ENXIO. Note: on ENXIO, some EQs may
  15664 * have had their delay multipler changed.
  15665 **/
  15666void
  15667lpfc_modify_hba_eq_delay(struct lpfc_hba *phba, uint32_t startq,
  15668			 uint32_t numq, uint32_t usdelay)
  15669{
  15670	struct lpfc_mbx_modify_eq_delay *eq_delay;
  15671	LPFC_MBOXQ_t *mbox;
  15672	struct lpfc_queue *eq;
  15673	int cnt = 0, rc, length;
  15674	uint32_t shdr_status, shdr_add_status;
  15675	uint32_t dmult;
  15676	int qidx;
  15677	union lpfc_sli4_cfg_shdr *shdr;
  15678
  15679	if (startq >= phba->cfg_irq_chann)
  15680		return;
  15681
  15682	if (usdelay > 0xFFFF) {
  15683		lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP | LOG_NVME,
  15684				"6429 usdelay %d too large. Scaled down to "
  15685				"0xFFFF.\n", usdelay);
  15686		usdelay = 0xFFFF;
  15687	}
  15688
  15689	/* set values by EQ_DELAY register if supported */
  15690	if (phba->sli.sli_flag & LPFC_SLI_USE_EQDR) {
  15691		for (qidx = startq; qidx < phba->cfg_irq_chann; qidx++) {
  15692			eq = phba->sli4_hba.hba_eq_hdl[qidx].eq;
  15693			if (!eq)
  15694				continue;
  15695
  15696			lpfc_sli4_mod_hba_eq_delay(phba, eq, usdelay);
  15697
  15698			if (++cnt >= numq)
  15699				break;
  15700		}
  15701		return;
  15702	}
  15703
  15704	/* Otherwise, set values by mailbox cmd */
  15705
  15706	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
  15707	if (!mbox) {
  15708		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  15709				"6428 Failed allocating mailbox cmd buffer."
  15710				" EQ delay was not set.\n");
  15711		return;
  15712	}
  15713	length = (sizeof(struct lpfc_mbx_modify_eq_delay) -
  15714		  sizeof(struct lpfc_sli4_cfg_mhdr));
  15715	lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
  15716			 LPFC_MBOX_OPCODE_MODIFY_EQ_DELAY,
  15717			 length, LPFC_SLI4_MBX_EMBED);
  15718	eq_delay = &mbox->u.mqe.un.eq_delay;
  15719
  15720	/* Calculate delay multiper from maximum interrupt per second */
  15721	dmult = (usdelay * LPFC_DMULT_CONST) / LPFC_SEC_TO_USEC;
  15722	if (dmult)
  15723		dmult--;
  15724	if (dmult > LPFC_DMULT_MAX)
  15725		dmult = LPFC_DMULT_MAX;
  15726
  15727	for (qidx = startq; qidx < phba->cfg_irq_chann; qidx++) {
  15728		eq = phba->sli4_hba.hba_eq_hdl[qidx].eq;
  15729		if (!eq)
  15730			continue;
  15731		eq->q_mode = usdelay;
  15732		eq_delay->u.request.eq[cnt].eq_id = eq->queue_id;
  15733		eq_delay->u.request.eq[cnt].phase = 0;
  15734		eq_delay->u.request.eq[cnt].delay_multi = dmult;
  15735
  15736		if (++cnt >= numq)
  15737			break;
  15738	}
  15739	eq_delay->u.request.num_eq = cnt;
  15740
  15741	mbox->vport = phba->pport;
  15742	mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
  15743	mbox->ctx_ndlp = NULL;
  15744	rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
  15745	shdr = (union lpfc_sli4_cfg_shdr *) &eq_delay->header.cfg_shdr;
  15746	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
  15747	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
  15748	if (shdr_status || shdr_add_status || rc) {
  15749		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  15750				"2512 MODIFY_EQ_DELAY mailbox failed with "
  15751				"status x%x add_status x%x, mbx status x%x\n",
  15752				shdr_status, shdr_add_status, rc);
  15753	}
  15754	mempool_free(mbox, phba->mbox_mem_pool);
  15755	return;
  15756}
  15757
  15758/**
  15759 * lpfc_eq_create - Create an Event Queue on the HBA
  15760 * @phba: HBA structure that indicates port to create a queue on.
  15761 * @eq: The queue structure to use to create the event queue.
  15762 * @imax: The maximum interrupt per second limit.
  15763 *
  15764 * This function creates an event queue, as detailed in @eq, on a port,
  15765 * described by @phba by sending an EQ_CREATE mailbox command to the HBA.
  15766 *
  15767 * The @phba struct is used to send mailbox command to HBA. The @eq struct
  15768 * is used to get the entry count and entry size that are necessary to
  15769 * determine the number of pages to allocate and use for this queue. This
  15770 * function will send the EQ_CREATE mailbox command to the HBA to setup the
  15771 * event queue. This function is asynchronous and will wait for the mailbox
  15772 * command to finish before continuing.
  15773 *
  15774 * On success this function will return a zero. If unable to allocate enough
  15775 * memory this function will return -ENOMEM. If the queue create mailbox command
  15776 * fails this function will return -ENXIO.
  15777 **/
  15778int
  15779lpfc_eq_create(struct lpfc_hba *phba, struct lpfc_queue *eq, uint32_t imax)
  15780{
  15781	struct lpfc_mbx_eq_create *eq_create;
  15782	LPFC_MBOXQ_t *mbox;
  15783	int rc, length, status = 0;
  15784	struct lpfc_dmabuf *dmabuf;
  15785	uint32_t shdr_status, shdr_add_status;
  15786	union lpfc_sli4_cfg_shdr *shdr;
  15787	uint16_t dmult;
  15788	uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
  15789
  15790	/* sanity check on queue memory */
  15791	if (!eq)
  15792		return -ENODEV;
  15793	if (!phba->sli4_hba.pc_sli4_params.supported)
  15794		hw_page_size = SLI4_PAGE_SIZE;
  15795
  15796	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
  15797	if (!mbox)
  15798		return -ENOMEM;
  15799	length = (sizeof(struct lpfc_mbx_eq_create) -
  15800		  sizeof(struct lpfc_sli4_cfg_mhdr));
  15801	lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
  15802			 LPFC_MBOX_OPCODE_EQ_CREATE,
  15803			 length, LPFC_SLI4_MBX_EMBED);
  15804	eq_create = &mbox->u.mqe.un.eq_create;
  15805	shdr = (union lpfc_sli4_cfg_shdr *) &eq_create->header.cfg_shdr;
  15806	bf_set(lpfc_mbx_eq_create_num_pages, &eq_create->u.request,
  15807	       eq->page_count);
  15808	bf_set(lpfc_eq_context_size, &eq_create->u.request.context,
  15809	       LPFC_EQE_SIZE);
  15810	bf_set(lpfc_eq_context_valid, &eq_create->u.request.context, 1);
  15811
  15812	/* Use version 2 of CREATE_EQ if eqav is set */
  15813	if (phba->sli4_hba.pc_sli4_params.eqav) {
  15814		bf_set(lpfc_mbox_hdr_version, &shdr->request,
  15815		       LPFC_Q_CREATE_VERSION_2);
  15816		bf_set(lpfc_eq_context_autovalid, &eq_create->u.request.context,
  15817		       phba->sli4_hba.pc_sli4_params.eqav);
  15818	}
  15819
  15820	/* don't setup delay multiplier using EQ_CREATE */
  15821	dmult = 0;
  15822	bf_set(lpfc_eq_context_delay_multi, &eq_create->u.request.context,
  15823	       dmult);
  15824	switch (eq->entry_count) {
  15825	default:
  15826		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  15827				"0360 Unsupported EQ count. (%d)\n",
  15828				eq->entry_count);
  15829		if (eq->entry_count < 256) {
  15830			status = -EINVAL;
  15831			goto out;
  15832		}
  15833		fallthrough;	/* otherwise default to smallest count */
  15834	case 256:
  15835		bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
  15836		       LPFC_EQ_CNT_256);
  15837		break;
  15838	case 512:
  15839		bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
  15840		       LPFC_EQ_CNT_512);
  15841		break;
  15842	case 1024:
  15843		bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
  15844		       LPFC_EQ_CNT_1024);
  15845		break;
  15846	case 2048:
  15847		bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
  15848		       LPFC_EQ_CNT_2048);
  15849		break;
  15850	case 4096:
  15851		bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
  15852		       LPFC_EQ_CNT_4096);
  15853		break;
  15854	}
  15855	list_for_each_entry(dmabuf, &eq->page_list, list) {
  15856		memset(dmabuf->virt, 0, hw_page_size);
  15857		eq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
  15858					putPaddrLow(dmabuf->phys);
  15859		eq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
  15860					putPaddrHigh(dmabuf->phys);
  15861	}
  15862	mbox->vport = phba->pport;
  15863	mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
  15864	mbox->ctx_buf = NULL;
  15865	mbox->ctx_ndlp = NULL;
  15866	rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
  15867	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
  15868	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
  15869	if (shdr_status || shdr_add_status || rc) {
  15870		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  15871				"2500 EQ_CREATE mailbox failed with "
  15872				"status x%x add_status x%x, mbx status x%x\n",
  15873				shdr_status, shdr_add_status, rc);
  15874		status = -ENXIO;
  15875	}
  15876	eq->type = LPFC_EQ;
  15877	eq->subtype = LPFC_NONE;
  15878	eq->queue_id = bf_get(lpfc_mbx_eq_create_q_id, &eq_create->u.response);
  15879	if (eq->queue_id == 0xFFFF)
  15880		status = -ENXIO;
  15881	eq->host_index = 0;
  15882	eq->notify_interval = LPFC_EQ_NOTIFY_INTRVL;
  15883	eq->max_proc_limit = LPFC_EQ_MAX_PROC_LIMIT;
  15884out:
  15885	mempool_free(mbox, phba->mbox_mem_pool);
  15886	return status;
  15887}
  15888
  15889static int lpfc_cq_poll_hdler(struct irq_poll *iop, int budget)
  15890{
  15891	struct lpfc_queue *cq = container_of(iop, struct lpfc_queue, iop);
  15892
  15893	__lpfc_sli4_hba_process_cq(cq, LPFC_IRQ_POLL);
  15894
  15895	return 1;
  15896}
  15897
  15898/**
  15899 * lpfc_cq_create - Create a Completion Queue on the HBA
  15900 * @phba: HBA structure that indicates port to create a queue on.
  15901 * @cq: The queue structure to use to create the completion queue.
  15902 * @eq: The event queue to bind this completion queue to.
  15903 * @type: Type of queue (EQ, GCQ, MCQ, WCQ, etc).
  15904 * @subtype: Functional purpose of the queue (MBOX, IO, ELS, NVMET, etc).
  15905 *
  15906 * This function creates a completion queue, as detailed in @wq, on a port,
  15907 * described by @phba by sending a CQ_CREATE mailbox command to the HBA.
  15908 *
  15909 * The @phba struct is used to send mailbox command to HBA. The @cq struct
  15910 * is used to get the entry count and entry size that are necessary to
  15911 * determine the number of pages to allocate and use for this queue. The @eq
  15912 * is used to indicate which event queue to bind this completion queue to. This
  15913 * function will send the CQ_CREATE mailbox command to the HBA to setup the
  15914 * completion queue. This function is asynchronous and will wait for the mailbox
  15915 * command to finish before continuing.
  15916 *
  15917 * On success this function will return a zero. If unable to allocate enough
  15918 * memory this function will return -ENOMEM. If the queue create mailbox command
  15919 * fails this function will return -ENXIO.
  15920 **/
  15921int
  15922lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq,
  15923	       struct lpfc_queue *eq, uint32_t type, uint32_t subtype)
  15924{
  15925	struct lpfc_mbx_cq_create *cq_create;
  15926	struct lpfc_dmabuf *dmabuf;
  15927	LPFC_MBOXQ_t *mbox;
  15928	int rc, length, status = 0;
  15929	uint32_t shdr_status, shdr_add_status;
  15930	union lpfc_sli4_cfg_shdr *shdr;
  15931
  15932	/* sanity check on queue memory */
  15933	if (!cq || !eq)
  15934		return -ENODEV;
  15935
  15936	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
  15937	if (!mbox)
  15938		return -ENOMEM;
  15939	length = (sizeof(struct lpfc_mbx_cq_create) -
  15940		  sizeof(struct lpfc_sli4_cfg_mhdr));
  15941	lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
  15942			 LPFC_MBOX_OPCODE_CQ_CREATE,
  15943			 length, LPFC_SLI4_MBX_EMBED);
  15944	cq_create = &mbox->u.mqe.un.cq_create;
  15945	shdr = (union lpfc_sli4_cfg_shdr *) &cq_create->header.cfg_shdr;
  15946	bf_set(lpfc_mbx_cq_create_num_pages, &cq_create->u.request,
  15947		    cq->page_count);
  15948	bf_set(lpfc_cq_context_event, &cq_create->u.request.context, 1);
  15949	bf_set(lpfc_cq_context_valid, &cq_create->u.request.context, 1);
  15950	bf_set(lpfc_mbox_hdr_version, &shdr->request,
  15951	       phba->sli4_hba.pc_sli4_params.cqv);
  15952	if (phba->sli4_hba.pc_sli4_params.cqv == LPFC_Q_CREATE_VERSION_2) {
  15953		bf_set(lpfc_mbx_cq_create_page_size, &cq_create->u.request,
  15954		       (cq->page_size / SLI4_PAGE_SIZE));
  15955		bf_set(lpfc_cq_eq_id_2, &cq_create->u.request.context,
  15956		       eq->queue_id);
  15957		bf_set(lpfc_cq_context_autovalid, &cq_create->u.request.context,
  15958		       phba->sli4_hba.pc_sli4_params.cqav);
  15959	} else {
  15960		bf_set(lpfc_cq_eq_id, &cq_create->u.request.context,
  15961		       eq->queue_id);
  15962	}
  15963	switch (cq->entry_count) {
  15964	case 2048:
  15965	case 4096:
  15966		if (phba->sli4_hba.pc_sli4_params.cqv ==
  15967		    LPFC_Q_CREATE_VERSION_2) {
  15968			cq_create->u.request.context.lpfc_cq_context_count =
  15969				cq->entry_count;
  15970			bf_set(lpfc_cq_context_count,
  15971			       &cq_create->u.request.context,
  15972			       LPFC_CQ_CNT_WORD7);
  15973			break;
  15974		}
  15975		fallthrough;
  15976	default:
  15977		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  15978				"0361 Unsupported CQ count: "
  15979				"entry cnt %d sz %d pg cnt %d\n",
  15980				cq->entry_count, cq->entry_size,
  15981				cq->page_count);
  15982		if (cq->entry_count < 256) {
  15983			status = -EINVAL;
  15984			goto out;
  15985		}
  15986		fallthrough;	/* otherwise default to smallest count */
  15987	case 256:
  15988		bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
  15989		       LPFC_CQ_CNT_256);
  15990		break;
  15991	case 512:
  15992		bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
  15993		       LPFC_CQ_CNT_512);
  15994		break;
  15995	case 1024:
  15996		bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
  15997		       LPFC_CQ_CNT_1024);
  15998		break;
  15999	}
  16000	list_for_each_entry(dmabuf, &cq->page_list, list) {
  16001		memset(dmabuf->virt, 0, cq->page_size);
  16002		cq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
  16003					putPaddrLow(dmabuf->phys);
  16004		cq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
  16005					putPaddrHigh(dmabuf->phys);
  16006	}
  16007	rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
  16008
  16009	/* The IOCTL status is embedded in the mailbox subheader. */
  16010	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
  16011	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
  16012	if (shdr_status || shdr_add_status || rc) {
  16013		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  16014				"2501 CQ_CREATE mailbox failed with "
  16015				"status x%x add_status x%x, mbx status x%x\n",
  16016				shdr_status, shdr_add_status, rc);
  16017		status = -ENXIO;
  16018		goto out;
  16019	}
  16020	cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response);
  16021	if (cq->queue_id == 0xFFFF) {
  16022		status = -ENXIO;
  16023		goto out;
  16024	}
  16025	/* link the cq onto the parent eq child list */
  16026	list_add_tail(&cq->list, &eq->child_list);
  16027	/* Set up completion queue's type and subtype */
  16028	cq->type = type;
  16029	cq->subtype = subtype;
  16030	cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response);
  16031	cq->assoc_qid = eq->queue_id;
  16032	cq->assoc_qp = eq;
  16033	cq->host_index = 0;
  16034	cq->notify_interval = LPFC_CQ_NOTIFY_INTRVL;
  16035	cq->max_proc_limit = min(phba->cfg_cq_max_proc_limit, cq->entry_count);
  16036
  16037	if (cq->queue_id > phba->sli4_hba.cq_max)
  16038		phba->sli4_hba.cq_max = cq->queue_id;
  16039
  16040	irq_poll_init(&cq->iop, LPFC_IRQ_POLL_WEIGHT, lpfc_cq_poll_hdler);
  16041out:
  16042	mempool_free(mbox, phba->mbox_mem_pool);
  16043	return status;
  16044}
  16045
  16046/**
  16047 * lpfc_cq_create_set - Create a set of Completion Queues on the HBA for MRQ
  16048 * @phba: HBA structure that indicates port to create a queue on.
  16049 * @cqp: The queue structure array to use to create the completion queues.
  16050 * @hdwq: The hardware queue array  with the EQ to bind completion queues to.
  16051 * @type: Type of queue (EQ, GCQ, MCQ, WCQ, etc).
  16052 * @subtype: Functional purpose of the queue (MBOX, IO, ELS, NVMET, etc).
  16053 *
  16054 * This function creates a set of  completion queue, s to support MRQ
  16055 * as detailed in @cqp, on a port,
  16056 * described by @phba by sending a CREATE_CQ_SET mailbox command to the HBA.
  16057 *
  16058 * The @phba struct is used to send mailbox command to HBA. The @cq struct
  16059 * is used to get the entry count and entry size that are necessary to
  16060 * determine the number of pages to allocate and use for this queue. The @eq
  16061 * is used to indicate which event queue to bind this completion queue to. This
  16062 * function will send the CREATE_CQ_SET mailbox command to the HBA to setup the
  16063 * completion queue. This function is asynchronous and will wait for the mailbox
  16064 * command to finish before continuing.
  16065 *
  16066 * On success this function will return a zero. If unable to allocate enough
  16067 * memory this function will return -ENOMEM. If the queue create mailbox command
  16068 * fails this function will return -ENXIO.
  16069 **/
  16070int
  16071lpfc_cq_create_set(struct lpfc_hba *phba, struct lpfc_queue **cqp,
  16072		   struct lpfc_sli4_hdw_queue *hdwq, uint32_t type,
  16073		   uint32_t subtype)
  16074{
  16075	struct lpfc_queue *cq;
  16076	struct lpfc_queue *eq;
  16077	struct lpfc_mbx_cq_create_set *cq_set;
  16078	struct lpfc_dmabuf *dmabuf;
  16079	LPFC_MBOXQ_t *mbox;
  16080	int rc, length, alloclen, status = 0;
  16081	int cnt, idx, numcq, page_idx = 0;
  16082	uint32_t shdr_status, shdr_add_status;
  16083	union lpfc_sli4_cfg_shdr *shdr;
  16084	uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
  16085
  16086	/* sanity check on queue memory */
  16087	numcq = phba->cfg_nvmet_mrq;
  16088	if (!cqp || !hdwq || !numcq)
  16089		return -ENODEV;
  16090
  16091	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
  16092	if (!mbox)
  16093		return -ENOMEM;
  16094
  16095	length = sizeof(struct lpfc_mbx_cq_create_set);
  16096	length += ((numcq * cqp[0]->page_count) *
  16097		   sizeof(struct dma_address));
  16098	alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
  16099			LPFC_MBOX_OPCODE_FCOE_CQ_CREATE_SET, length,
  16100			LPFC_SLI4_MBX_NEMBED);
  16101	if (alloclen < length) {
  16102		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  16103				"3098 Allocated DMA memory size (%d) is "
  16104				"less than the requested DMA memory size "
  16105				"(%d)\n", alloclen, length);
  16106		status = -ENOMEM;
  16107		goto out;
  16108	}
  16109	cq_set = mbox->sge_array->addr[0];
  16110	shdr = (union lpfc_sli4_cfg_shdr *)&cq_set->cfg_shdr;
  16111	bf_set(lpfc_mbox_hdr_version, &shdr->request, 0);
  16112
  16113	for (idx = 0; idx < numcq; idx++) {
  16114		cq = cqp[idx];
  16115		eq = hdwq[idx].hba_eq;
  16116		if (!cq || !eq) {
  16117			status = -ENOMEM;
  16118			goto out;
  16119		}
  16120		if (!phba->sli4_hba.pc_sli4_params.supported)
  16121			hw_page_size = cq->page_size;
  16122
  16123		switch (idx) {
  16124		case 0:
  16125			bf_set(lpfc_mbx_cq_create_set_page_size,
  16126			       &cq_set->u.request,
  16127			       (hw_page_size / SLI4_PAGE_SIZE));
  16128			bf_set(lpfc_mbx_cq_create_set_num_pages,
  16129			       &cq_set->u.request, cq->page_count);
  16130			bf_set(lpfc_mbx_cq_create_set_evt,
  16131			       &cq_set->u.request, 1);
  16132			bf_set(lpfc_mbx_cq_create_set_valid,
  16133			       &cq_set->u.request, 1);
  16134			bf_set(lpfc_mbx_cq_create_set_cqe_size,
  16135			       &cq_set->u.request, 0);
  16136			bf_set(lpfc_mbx_cq_create_set_num_cq,
  16137			       &cq_set->u.request, numcq);
  16138			bf_set(lpfc_mbx_cq_create_set_autovalid,
  16139			       &cq_set->u.request,
  16140			       phba->sli4_hba.pc_sli4_params.cqav);
  16141			switch (cq->entry_count) {
  16142			case 2048:
  16143			case 4096:
  16144				if (phba->sli4_hba.pc_sli4_params.cqv ==
  16145				    LPFC_Q_CREATE_VERSION_2) {
  16146					bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
  16147					       &cq_set->u.request,
  16148						cq->entry_count);
  16149					bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
  16150					       &cq_set->u.request,
  16151					       LPFC_CQ_CNT_WORD7);
  16152					break;
  16153				}
  16154				fallthrough;
  16155			default:
  16156				lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  16157						"3118 Bad CQ count. (%d)\n",
  16158						cq->entry_count);
  16159				if (cq->entry_count < 256) {
  16160					status = -EINVAL;
  16161					goto out;
  16162				}
  16163				fallthrough;	/* otherwise default to smallest */
  16164			case 256:
  16165				bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
  16166				       &cq_set->u.request, LPFC_CQ_CNT_256);
  16167				break;
  16168			case 512:
  16169				bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
  16170				       &cq_set->u.request, LPFC_CQ_CNT_512);
  16171				break;
  16172			case 1024:
  16173				bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
  16174				       &cq_set->u.request, LPFC_CQ_CNT_1024);
  16175				break;
  16176			}
  16177			bf_set(lpfc_mbx_cq_create_set_eq_id0,
  16178			       &cq_set->u.request, eq->queue_id);
  16179			break;
  16180		case 1:
  16181			bf_set(lpfc_mbx_cq_create_set_eq_id1,
  16182			       &cq_set->u.request, eq->queue_id);
  16183			break;
  16184		case 2:
  16185			bf_set(lpfc_mbx_cq_create_set_eq_id2,
  16186			       &cq_set->u.request, eq->queue_id);
  16187			break;
  16188		case 3:
  16189			bf_set(lpfc_mbx_cq_create_set_eq_id3,
  16190			       &cq_set->u.request, eq->queue_id);
  16191			break;
  16192		case 4:
  16193			bf_set(lpfc_mbx_cq_create_set_eq_id4,
  16194			       &cq_set->u.request, eq->queue_id);
  16195			break;
  16196		case 5:
  16197			bf_set(lpfc_mbx_cq_create_set_eq_id5,
  16198			       &cq_set->u.request, eq->queue_id);
  16199			break;
  16200		case 6:
  16201			bf_set(lpfc_mbx_cq_create_set_eq_id6,
  16202			       &cq_set->u.request, eq->queue_id);
  16203			break;
  16204		case 7:
  16205			bf_set(lpfc_mbx_cq_create_set_eq_id7,
  16206			       &cq_set->u.request, eq->queue_id);
  16207			break;
  16208		case 8:
  16209			bf_set(lpfc_mbx_cq_create_set_eq_id8,
  16210			       &cq_set->u.request, eq->queue_id);
  16211			break;
  16212		case 9:
  16213			bf_set(lpfc_mbx_cq_create_set_eq_id9,
  16214			       &cq_set->u.request, eq->queue_id);
  16215			break;
  16216		case 10:
  16217			bf_set(lpfc_mbx_cq_create_set_eq_id10,
  16218			       &cq_set->u.request, eq->queue_id);
  16219			break;
  16220		case 11:
  16221			bf_set(lpfc_mbx_cq_create_set_eq_id11,
  16222			       &cq_set->u.request, eq->queue_id);
  16223			break;
  16224		case 12:
  16225			bf_set(lpfc_mbx_cq_create_set_eq_id12,
  16226			       &cq_set->u.request, eq->queue_id);
  16227			break;
  16228		case 13:
  16229			bf_set(lpfc_mbx_cq_create_set_eq_id13,
  16230			       &cq_set->u.request, eq->queue_id);
  16231			break;
  16232		case 14:
  16233			bf_set(lpfc_mbx_cq_create_set_eq_id14,
  16234			       &cq_set->u.request, eq->queue_id);
  16235			break;
  16236		case 15:
  16237			bf_set(lpfc_mbx_cq_create_set_eq_id15,
  16238			       &cq_set->u.request, eq->queue_id);
  16239			break;
  16240		}
  16241
  16242		/* link the cq onto the parent eq child list */
  16243		list_add_tail(&cq->list, &eq->child_list);
  16244		/* Set up completion queue's type and subtype */
  16245		cq->type = type;
  16246		cq->subtype = subtype;
  16247		cq->assoc_qid = eq->queue_id;
  16248		cq->assoc_qp = eq;
  16249		cq->host_index = 0;
  16250		cq->notify_interval = LPFC_CQ_NOTIFY_INTRVL;
  16251		cq->max_proc_limit = min(phba->cfg_cq_max_proc_limit,
  16252					 cq->entry_count);
  16253		cq->chann = idx;
  16254
  16255		rc = 0;
  16256		list_for_each_entry(dmabuf, &cq->page_list, list) {
  16257			memset(dmabuf->virt, 0, hw_page_size);
  16258			cnt = page_idx + dmabuf->buffer_tag;
  16259			cq_set->u.request.page[cnt].addr_lo =
  16260					putPaddrLow(dmabuf->phys);
  16261			cq_set->u.request.page[cnt].addr_hi =
  16262					putPaddrHigh(dmabuf->phys);
  16263			rc++;
  16264		}
  16265		page_idx += rc;
  16266	}
  16267
  16268	rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
  16269
  16270	/* The IOCTL status is embedded in the mailbox subheader. */
  16271	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
  16272	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
  16273	if (shdr_status || shdr_add_status || rc) {
  16274		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  16275				"3119 CQ_CREATE_SET mailbox failed with "
  16276				"status x%x add_status x%x, mbx status x%x\n",
  16277				shdr_status, shdr_add_status, rc);
  16278		status = -ENXIO;
  16279		goto out;
  16280	}
  16281	rc = bf_get(lpfc_mbx_cq_create_set_base_id, &cq_set->u.response);
  16282	if (rc == 0xFFFF) {
  16283		status = -ENXIO;
  16284		goto out;
  16285	}
  16286
  16287	for (idx = 0; idx < numcq; idx++) {
  16288		cq = cqp[idx];
  16289		cq->queue_id = rc + idx;
  16290		if (cq->queue_id > phba->sli4_hba.cq_max)
  16291			phba->sli4_hba.cq_max = cq->queue_id;
  16292	}
  16293
  16294out:
  16295	lpfc_sli4_mbox_cmd_free(phba, mbox);
  16296	return status;
  16297}
  16298
  16299/**
  16300 * lpfc_mq_create_fb_init - Send MCC_CREATE without async events registration
  16301 * @phba: HBA structure that indicates port to create a queue on.
  16302 * @mq: The queue structure to use to create the mailbox queue.
  16303 * @mbox: An allocated pointer to type LPFC_MBOXQ_t
  16304 * @cq: The completion queue to associate with this cq.
  16305 *
  16306 * This function provides failback (fb) functionality when the
  16307 * mq_create_ext fails on older FW generations.  It's purpose is identical
  16308 * to mq_create_ext otherwise.
  16309 *
  16310 * This routine cannot fail as all attributes were previously accessed and
  16311 * initialized in mq_create_ext.
  16312 **/
  16313static void
  16314lpfc_mq_create_fb_init(struct lpfc_hba *phba, struct lpfc_queue *mq,
  16315		       LPFC_MBOXQ_t *mbox, struct lpfc_queue *cq)
  16316{
  16317	struct lpfc_mbx_mq_create *mq_create;
  16318	struct lpfc_dmabuf *dmabuf;
  16319	int length;
  16320
  16321	length = (sizeof(struct lpfc_mbx_mq_create) -
  16322		  sizeof(struct lpfc_sli4_cfg_mhdr));
  16323	lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
  16324			 LPFC_MBOX_OPCODE_MQ_CREATE,
  16325			 length, LPFC_SLI4_MBX_EMBED);
  16326	mq_create = &mbox->u.mqe.un.mq_create;
  16327	bf_set(lpfc_mbx_mq_create_num_pages, &mq_create->u.request,
  16328	       mq->page_count);
  16329	bf_set(lpfc_mq_context_cq_id, &mq_create->u.request.context,
  16330	       cq->queue_id);
  16331	bf_set(lpfc_mq_context_valid, &mq_create->u.request.context, 1);
  16332	switch (mq->entry_count) {
  16333	case 16:
  16334		bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
  16335		       LPFC_MQ_RING_SIZE_16);
  16336		break;
  16337	case 32:
  16338		bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
  16339		       LPFC_MQ_RING_SIZE_32);
  16340		break;
  16341	case 64:
  16342		bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
  16343		       LPFC_MQ_RING_SIZE_64);
  16344		break;
  16345	case 128:
  16346		bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
  16347		       LPFC_MQ_RING_SIZE_128);
  16348		break;
  16349	}
  16350	list_for_each_entry(dmabuf, &mq->page_list, list) {
  16351		mq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
  16352			putPaddrLow(dmabuf->phys);
  16353		mq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
  16354			putPaddrHigh(dmabuf->phys);
  16355	}
  16356}
  16357
  16358/**
  16359 * lpfc_mq_create - Create a mailbox Queue on the HBA
  16360 * @phba: HBA structure that indicates port to create a queue on.
  16361 * @mq: The queue structure to use to create the mailbox queue.
  16362 * @cq: The completion queue to associate with this cq.
  16363 * @subtype: The queue's subtype.
  16364 *
  16365 * This function creates a mailbox queue, as detailed in @mq, on a port,
  16366 * described by @phba by sending a MQ_CREATE mailbox command to the HBA.
  16367 *
  16368 * The @phba struct is used to send mailbox command to HBA. The @cq struct
  16369 * is used to get the entry count and entry size that are necessary to
  16370 * determine the number of pages to allocate and use for this queue. This
  16371 * function will send the MQ_CREATE mailbox command to the HBA to setup the
  16372 * mailbox queue. This function is asynchronous and will wait for the mailbox
  16373 * command to finish before continuing.
  16374 *
  16375 * On success this function will return a zero. If unable to allocate enough
  16376 * memory this function will return -ENOMEM. If the queue create mailbox command
  16377 * fails this function will return -ENXIO.
  16378 **/
  16379int32_t
  16380lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq,
  16381	       struct lpfc_queue *cq, uint32_t subtype)
  16382{
  16383	struct lpfc_mbx_mq_create *mq_create;
  16384	struct lpfc_mbx_mq_create_ext *mq_create_ext;
  16385	struct lpfc_dmabuf *dmabuf;
  16386	LPFC_MBOXQ_t *mbox;
  16387	int rc, length, status = 0;
  16388	uint32_t shdr_status, shdr_add_status;
  16389	union lpfc_sli4_cfg_shdr *shdr;
  16390	uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
  16391
  16392	/* sanity check on queue memory */
  16393	if (!mq || !cq)
  16394		return -ENODEV;
  16395	if (!phba->sli4_hba.pc_sli4_params.supported)
  16396		hw_page_size = SLI4_PAGE_SIZE;
  16397
  16398	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
  16399	if (!mbox)
  16400		return -ENOMEM;
  16401	length = (sizeof(struct lpfc_mbx_mq_create_ext) -
  16402		  sizeof(struct lpfc_sli4_cfg_mhdr));
  16403	lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
  16404			 LPFC_MBOX_OPCODE_MQ_CREATE_EXT,
  16405			 length, LPFC_SLI4_MBX_EMBED);
  16406
  16407	mq_create_ext = &mbox->u.mqe.un.mq_create_ext;
  16408	shdr = (union lpfc_sli4_cfg_shdr *) &mq_create_ext->header.cfg_shdr;
  16409	bf_set(lpfc_mbx_mq_create_ext_num_pages,
  16410	       &mq_create_ext->u.request, mq->page_count);
  16411	bf_set(lpfc_mbx_mq_create_ext_async_evt_link,
  16412	       &mq_create_ext->u.request, 1);
  16413	bf_set(lpfc_mbx_mq_create_ext_async_evt_fip,
  16414	       &mq_create_ext->u.request, 1);
  16415	bf_set(lpfc_mbx_mq_create_ext_async_evt_group5,
  16416	       &mq_create_ext->u.request, 1);
  16417	bf_set(lpfc_mbx_mq_create_ext_async_evt_fc,
  16418	       &mq_create_ext->u.request, 1);
  16419	bf_set(lpfc_mbx_mq_create_ext_async_evt_sli,
  16420	       &mq_create_ext->u.request, 1);
  16421	bf_set(lpfc_mq_context_valid, &mq_create_ext->u.request.context, 1);
  16422	bf_set(lpfc_mbox_hdr_version, &shdr->request,
  16423	       phba->sli4_hba.pc_sli4_params.mqv);
  16424	if (phba->sli4_hba.pc_sli4_params.mqv == LPFC_Q_CREATE_VERSION_1)
  16425		bf_set(lpfc_mbx_mq_create_ext_cq_id, &mq_create_ext->u.request,
  16426		       cq->queue_id);
  16427	else
  16428		bf_set(lpfc_mq_context_cq_id, &mq_create_ext->u.request.context,
  16429		       cq->queue_id);
  16430	switch (mq->entry_count) {
  16431	default:
  16432		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  16433				"0362 Unsupported MQ count. (%d)\n",
  16434				mq->entry_count);
  16435		if (mq->entry_count < 16) {
  16436			status = -EINVAL;
  16437			goto out;
  16438		}
  16439		fallthrough;	/* otherwise default to smallest count */
  16440	case 16:
  16441		bf_set(lpfc_mq_context_ring_size,
  16442		       &mq_create_ext->u.request.context,
  16443		       LPFC_MQ_RING_SIZE_16);
  16444		break;
  16445	case 32:
  16446		bf_set(lpfc_mq_context_ring_size,
  16447		       &mq_create_ext->u.request.context,
  16448		       LPFC_MQ_RING_SIZE_32);
  16449		break;
  16450	case 64:
  16451		bf_set(lpfc_mq_context_ring_size,
  16452		       &mq_create_ext->u.request.context,
  16453		       LPFC_MQ_RING_SIZE_64);
  16454		break;
  16455	case 128:
  16456		bf_set(lpfc_mq_context_ring_size,
  16457		       &mq_create_ext->u.request.context,
  16458		       LPFC_MQ_RING_SIZE_128);
  16459		break;
  16460	}
  16461	list_for_each_entry(dmabuf, &mq->page_list, list) {
  16462		memset(dmabuf->virt, 0, hw_page_size);
  16463		mq_create_ext->u.request.page[dmabuf->buffer_tag].addr_lo =
  16464					putPaddrLow(dmabuf->phys);
  16465		mq_create_ext->u.request.page[dmabuf->buffer_tag].addr_hi =
  16466					putPaddrHigh(dmabuf->phys);
  16467	}
  16468	rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
  16469	mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id,
  16470			      &mq_create_ext->u.response);
  16471	if (rc != MBX_SUCCESS) {
  16472		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
  16473				"2795 MQ_CREATE_EXT failed with "
  16474				"status x%x. Failback to MQ_CREATE.\n",
  16475				rc);
  16476		lpfc_mq_create_fb_init(phba, mq, mbox, cq);
  16477		mq_create = &mbox->u.mqe.un.mq_create;
  16478		rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
  16479		shdr = (union lpfc_sli4_cfg_shdr *) &mq_create->header.cfg_shdr;
  16480		mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id,
  16481				      &mq_create->u.response);
  16482	}
  16483
  16484	/* The IOCTL status is embedded in the mailbox subheader. */
  16485	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
  16486	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
  16487	if (shdr_status || shdr_add_status || rc) {
  16488		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  16489				"2502 MQ_CREATE mailbox failed with "
  16490				"status x%x add_status x%x, mbx status x%x\n",
  16491				shdr_status, shdr_add_status, rc);
  16492		status = -ENXIO;
  16493		goto out;
  16494	}
  16495	if (mq->queue_id == 0xFFFF) {
  16496		status = -ENXIO;
  16497		goto out;
  16498	}
  16499	mq->type = LPFC_MQ;
  16500	mq->assoc_qid = cq->queue_id;
  16501	mq->subtype = subtype;
  16502	mq->host_index = 0;
  16503	mq->hba_index = 0;
  16504
  16505	/* link the mq onto the parent cq child list */
  16506	list_add_tail(&mq->list, &cq->child_list);
  16507out:
  16508	mempool_free(mbox, phba->mbox_mem_pool);
  16509	return status;
  16510}
  16511
  16512/**
  16513 * lpfc_wq_create - Create a Work Queue on the HBA
  16514 * @phba: HBA structure that indicates port to create a queue on.
  16515 * @wq: The queue structure to use to create the work queue.
  16516 * @cq: The completion queue to bind this work queue to.
  16517 * @subtype: The subtype of the work queue indicating its functionality.
  16518 *
  16519 * This function creates a work queue, as detailed in @wq, on a port, described
  16520 * by @phba by sending a WQ_CREATE mailbox command to the HBA.
  16521 *
  16522 * The @phba struct is used to send mailbox command to HBA. The @wq struct
  16523 * is used to get the entry count and entry size that are necessary to
  16524 * determine the number of pages to allocate and use for this queue. The @cq
  16525 * is used to indicate which completion queue to bind this work queue to. This
  16526 * function will send the WQ_CREATE mailbox command to the HBA to setup the
  16527 * work queue. This function is asynchronous and will wait for the mailbox
  16528 * command to finish before continuing.
  16529 *
  16530 * On success this function will return a zero. If unable to allocate enough
  16531 * memory this function will return -ENOMEM. If the queue create mailbox command
  16532 * fails this function will return -ENXIO.
  16533 **/
  16534int
  16535lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
  16536	       struct lpfc_queue *cq, uint32_t subtype)
  16537{
  16538	struct lpfc_mbx_wq_create *wq_create;
  16539	struct lpfc_dmabuf *dmabuf;
  16540	LPFC_MBOXQ_t *mbox;
  16541	int rc, length, status = 0;
  16542	uint32_t shdr_status, shdr_add_status;
  16543	union lpfc_sli4_cfg_shdr *shdr;
  16544	uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
  16545	struct dma_address *page;
  16546	void __iomem *bar_memmap_p;
  16547	uint32_t db_offset;
  16548	uint16_t pci_barset;
  16549	uint8_t dpp_barset;
  16550	uint32_t dpp_offset;
  16551	uint8_t wq_create_version;
  16552#ifdef CONFIG_X86
  16553	unsigned long pg_addr;
  16554#endif
  16555
  16556	/* sanity check on queue memory */
  16557	if (!wq || !cq)
  16558		return -ENODEV;
  16559	if (!phba->sli4_hba.pc_sli4_params.supported)
  16560		hw_page_size = wq->page_size;
  16561
  16562	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
  16563	if (!mbox)
  16564		return -ENOMEM;
  16565	length = (sizeof(struct lpfc_mbx_wq_create) -
  16566		  sizeof(struct lpfc_sli4_cfg_mhdr));
  16567	lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
  16568			 LPFC_MBOX_OPCODE_FCOE_WQ_CREATE,
  16569			 length, LPFC_SLI4_MBX_EMBED);
  16570	wq_create = &mbox->u.mqe.un.wq_create;
  16571	shdr = (union lpfc_sli4_cfg_shdr *) &wq_create->header.cfg_shdr;
  16572	bf_set(lpfc_mbx_wq_create_num_pages, &wq_create->u.request,
  16573		    wq->page_count);
  16574	bf_set(lpfc_mbx_wq_create_cq_id, &wq_create->u.request,
  16575		    cq->queue_id);
  16576
  16577	/* wqv is the earliest version supported, NOT the latest */
  16578	bf_set(lpfc_mbox_hdr_version, &shdr->request,
  16579	       phba->sli4_hba.pc_sli4_params.wqv);
  16580
  16581	if ((phba->sli4_hba.pc_sli4_params.wqsize & LPFC_WQ_SZ128_SUPPORT) ||
  16582	    (wq->page_size > SLI4_PAGE_SIZE))
  16583		wq_create_version = LPFC_Q_CREATE_VERSION_1;
  16584	else
  16585		wq_create_version = LPFC_Q_CREATE_VERSION_0;
  16586
  16587	switch (wq_create_version) {
  16588	case LPFC_Q_CREATE_VERSION_1:
  16589		bf_set(lpfc_mbx_wq_create_wqe_count, &wq_create->u.request_1,
  16590		       wq->entry_count);
  16591		bf_set(lpfc_mbox_hdr_version, &shdr->request,
  16592		       LPFC_Q_CREATE_VERSION_1);
  16593
  16594		switch (wq->entry_size) {
  16595		default:
  16596		case 64:
  16597			bf_set(lpfc_mbx_wq_create_wqe_size,
  16598			       &wq_create->u.request_1,
  16599			       LPFC_WQ_WQE_SIZE_64);
  16600			break;
  16601		case 128:
  16602			bf_set(lpfc_mbx_wq_create_wqe_size,
  16603			       &wq_create->u.request_1,
  16604			       LPFC_WQ_WQE_SIZE_128);
  16605			break;
  16606		}
  16607		/* Request DPP by default */
  16608		bf_set(lpfc_mbx_wq_create_dpp_req, &wq_create->u.request_1, 1);
  16609		bf_set(lpfc_mbx_wq_create_page_size,
  16610		       &wq_create->u.request_1,
  16611		       (wq->page_size / SLI4_PAGE_SIZE));
  16612		page = wq_create->u.request_1.page;
  16613		break;
  16614	default:
  16615		page = wq_create->u.request.page;
  16616		break;
  16617	}
  16618
  16619	list_for_each_entry(dmabuf, &wq->page_list, list) {
  16620		memset(dmabuf->virt, 0, hw_page_size);
  16621		page[dmabuf->buffer_tag].addr_lo = putPaddrLow(dmabuf->phys);
  16622		page[dmabuf->buffer_tag].addr_hi = putPaddrHigh(dmabuf->phys);
  16623	}
  16624
  16625	if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE)
  16626		bf_set(lpfc_mbx_wq_create_dua, &wq_create->u.request, 1);
  16627
  16628	rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
  16629	/* The IOCTL status is embedded in the mailbox subheader. */
  16630	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
  16631	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
  16632	if (shdr_status || shdr_add_status || rc) {
  16633		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  16634				"2503 WQ_CREATE mailbox failed with "
  16635				"status x%x add_status x%x, mbx status x%x\n",
  16636				shdr_status, shdr_add_status, rc);
  16637		status = -ENXIO;
  16638		goto out;
  16639	}
  16640
  16641	if (wq_create_version == LPFC_Q_CREATE_VERSION_0)
  16642		wq->queue_id = bf_get(lpfc_mbx_wq_create_q_id,
  16643					&wq_create->u.response);
  16644	else
  16645		wq->queue_id = bf_get(lpfc_mbx_wq_create_v1_q_id,
  16646					&wq_create->u.response_1);
  16647
  16648	if (wq->queue_id == 0xFFFF) {
  16649		status = -ENXIO;
  16650		goto out;
  16651	}
  16652
  16653	wq->db_format = LPFC_DB_LIST_FORMAT;
  16654	if (wq_create_version == LPFC_Q_CREATE_VERSION_0) {
  16655		if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) {
  16656			wq->db_format = bf_get(lpfc_mbx_wq_create_db_format,
  16657					       &wq_create->u.response);
  16658			if ((wq->db_format != LPFC_DB_LIST_FORMAT) &&
  16659			    (wq->db_format != LPFC_DB_RING_FORMAT)) {
  16660				lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  16661						"3265 WQ[%d] doorbell format "
  16662						"not supported: x%x\n",
  16663						wq->queue_id, wq->db_format);
  16664				status = -EINVAL;
  16665				goto out;
  16666			}
  16667			pci_barset = bf_get(lpfc_mbx_wq_create_bar_set,
  16668					    &wq_create->u.response);
  16669			bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba,
  16670								   pci_barset);
  16671			if (!bar_memmap_p) {
  16672				lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  16673						"3263 WQ[%d] failed to memmap "
  16674						"pci barset:x%x\n",
  16675						wq->queue_id, pci_barset);
  16676				status = -ENOMEM;
  16677				goto out;
  16678			}
  16679			db_offset = wq_create->u.response.doorbell_offset;
  16680			if ((db_offset != LPFC_ULP0_WQ_DOORBELL) &&
  16681			    (db_offset != LPFC_ULP1_WQ_DOORBELL)) {
  16682				lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  16683						"3252 WQ[%d] doorbell offset "
  16684						"not supported: x%x\n",
  16685						wq->queue_id, db_offset);
  16686				status = -EINVAL;
  16687				goto out;
  16688			}
  16689			wq->db_regaddr = bar_memmap_p + db_offset;
  16690			lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
  16691					"3264 WQ[%d]: barset:x%x, offset:x%x, "
  16692					"format:x%x\n", wq->queue_id,
  16693					pci_barset, db_offset, wq->db_format);
  16694		} else
  16695			wq->db_regaddr = phba->sli4_hba.WQDBregaddr;
  16696	} else {
  16697		/* Check if DPP was honored by the firmware */
  16698		wq->dpp_enable = bf_get(lpfc_mbx_wq_create_dpp_rsp,
  16699				    &wq_create->u.response_1);
  16700		if (wq->dpp_enable) {
  16701			pci_barset = bf_get(lpfc_mbx_wq_create_v1_bar_set,
  16702					    &wq_create->u.response_1);
  16703			bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba,
  16704								   pci_barset);
  16705			if (!bar_memmap_p) {
  16706				lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  16707						"3267 WQ[%d] failed to memmap "
  16708						"pci barset:x%x\n",
  16709						wq->queue_id, pci_barset);
  16710				status = -ENOMEM;
  16711				goto out;
  16712			}
  16713			db_offset = wq_create->u.response_1.doorbell_offset;
  16714			wq->db_regaddr = bar_memmap_p + db_offset;
  16715			wq->dpp_id = bf_get(lpfc_mbx_wq_create_dpp_id,
  16716					    &wq_create->u.response_1);
  16717			dpp_barset = bf_get(lpfc_mbx_wq_create_dpp_bar,
  16718					    &wq_create->u.response_1);
  16719			bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba,
  16720								   dpp_barset);
  16721			if (!bar_memmap_p) {
  16722				lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  16723						"3268 WQ[%d] failed to memmap "
  16724						"pci barset:x%x\n",
  16725						wq->queue_id, dpp_barset);
  16726				status = -ENOMEM;
  16727				goto out;
  16728			}
  16729			dpp_offset = wq_create->u.response_1.dpp_offset;
  16730			wq->dpp_regaddr = bar_memmap_p + dpp_offset;
  16731			lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
  16732					"3271 WQ[%d]: barset:x%x, offset:x%x, "
  16733					"dpp_id:x%x dpp_barset:x%x "
  16734					"dpp_offset:x%x\n",
  16735					wq->queue_id, pci_barset, db_offset,
  16736					wq->dpp_id, dpp_barset, dpp_offset);
  16737
  16738#ifdef CONFIG_X86
  16739			/* Enable combined writes for DPP aperture */
  16740			pg_addr = (unsigned long)(wq->dpp_regaddr) & PAGE_MASK;
  16741			rc = set_memory_wc(pg_addr, 1);
  16742			if (rc) {
  16743				lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  16744					"3272 Cannot setup Combined "
  16745					"Write on WQ[%d] - disable DPP\n",
  16746					wq->queue_id);
  16747				phba->cfg_enable_dpp = 0;
  16748			}
  16749#else
  16750			phba->cfg_enable_dpp = 0;
  16751#endif
  16752		} else
  16753			wq->db_regaddr = phba->sli4_hba.WQDBregaddr;
  16754	}
  16755	wq->pring = kzalloc(sizeof(struct lpfc_sli_ring), GFP_KERNEL);
  16756	if (wq->pring == NULL) {
  16757		status = -ENOMEM;
  16758		goto out;
  16759	}
  16760	wq->type = LPFC_WQ;
  16761	wq->assoc_qid = cq->queue_id;
  16762	wq->subtype = subtype;
  16763	wq->host_index = 0;
  16764	wq->hba_index = 0;
  16765	wq->notify_interval = LPFC_WQ_NOTIFY_INTRVL;
  16766
  16767	/* link the wq onto the parent cq child list */
  16768	list_add_tail(&wq->list, &cq->child_list);
  16769out:
  16770	mempool_free(mbox, phba->mbox_mem_pool);
  16771	return status;
  16772}
  16773
  16774/**
  16775 * lpfc_rq_create - Create a Receive Queue on the HBA
  16776 * @phba: HBA structure that indicates port to create a queue on.
  16777 * @hrq: The queue structure to use to create the header receive queue.
  16778 * @drq: The queue structure to use to create the data receive queue.
  16779 * @cq: The completion queue to bind this work queue to.
  16780 * @subtype: The subtype of the work queue indicating its functionality.
  16781 *
  16782 * This function creates a receive buffer queue pair , as detailed in @hrq and
  16783 * @drq, on a port, described by @phba by sending a RQ_CREATE mailbox command
  16784 * to the HBA.
  16785 *
  16786 * The @phba struct is used to send mailbox command to HBA. The @drq and @hrq
  16787 * struct is used to get the entry count that is necessary to determine the
  16788 * number of pages to use for this queue. The @cq is used to indicate which
  16789 * completion queue to bind received buffers that are posted to these queues to.
  16790 * This function will send the RQ_CREATE mailbox command to the HBA to setup the
  16791 * receive queue pair. This function is asynchronous and will wait for the
  16792 * mailbox command to finish before continuing.
  16793 *
  16794 * On success this function will return a zero. If unable to allocate enough
  16795 * memory this function will return -ENOMEM. If the queue create mailbox command
  16796 * fails this function will return -ENXIO.
  16797 **/
  16798int
  16799lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
  16800	       struct lpfc_queue *drq, struct lpfc_queue *cq, uint32_t subtype)
  16801{
  16802	struct lpfc_mbx_rq_create *rq_create;
  16803	struct lpfc_dmabuf *dmabuf;
  16804	LPFC_MBOXQ_t *mbox;
  16805	int rc, length, status = 0;
  16806	uint32_t shdr_status, shdr_add_status;
  16807	union lpfc_sli4_cfg_shdr *shdr;
  16808	uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
  16809	void __iomem *bar_memmap_p;
  16810	uint32_t db_offset;
  16811	uint16_t pci_barset;
  16812
  16813	/* sanity check on queue memory */
  16814	if (!hrq || !drq || !cq)
  16815		return -ENODEV;
  16816	if (!phba->sli4_hba.pc_sli4_params.supported)
  16817		hw_page_size = SLI4_PAGE_SIZE;
  16818
  16819	if (hrq->entry_count != drq->entry_count)
  16820		return -EINVAL;
  16821	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
  16822	if (!mbox)
  16823		return -ENOMEM;
  16824	length = (sizeof(struct lpfc_mbx_rq_create) -
  16825		  sizeof(struct lpfc_sli4_cfg_mhdr));
  16826	lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
  16827			 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE,
  16828			 length, LPFC_SLI4_MBX_EMBED);
  16829	rq_create = &mbox->u.mqe.un.rq_create;
  16830	shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr;
  16831	bf_set(lpfc_mbox_hdr_version, &shdr->request,
  16832	       phba->sli4_hba.pc_sli4_params.rqv);
  16833	if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) {
  16834		bf_set(lpfc_rq_context_rqe_count_1,
  16835		       &rq_create->u.request.context,
  16836		       hrq->entry_count);
  16837		rq_create->u.request.context.buffer_size = LPFC_HDR_BUF_SIZE;
  16838		bf_set(lpfc_rq_context_rqe_size,
  16839		       &rq_create->u.request.context,
  16840		       LPFC_RQE_SIZE_8);
  16841		bf_set(lpfc_rq_context_page_size,
  16842		       &rq_create->u.request.context,
  16843		       LPFC_RQ_PAGE_SIZE_4096);
  16844	} else {
  16845		switch (hrq->entry_count) {
  16846		default:
  16847			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  16848					"2535 Unsupported RQ count. (%d)\n",
  16849					hrq->entry_count);
  16850			if (hrq->entry_count < 512) {
  16851				status = -EINVAL;
  16852				goto out;
  16853			}
  16854			fallthrough;	/* otherwise default to smallest count */
  16855		case 512:
  16856			bf_set(lpfc_rq_context_rqe_count,
  16857			       &rq_create->u.request.context,
  16858			       LPFC_RQ_RING_SIZE_512);
  16859			break;
  16860		case 1024:
  16861			bf_set(lpfc_rq_context_rqe_count,
  16862			       &rq_create->u.request.context,
  16863			       LPFC_RQ_RING_SIZE_1024);
  16864			break;
  16865		case 2048:
  16866			bf_set(lpfc_rq_context_rqe_count,
  16867			       &rq_create->u.request.context,
  16868			       LPFC_RQ_RING_SIZE_2048);
  16869			break;
  16870		case 4096:
  16871			bf_set(lpfc_rq_context_rqe_count,
  16872			       &rq_create->u.request.context,
  16873			       LPFC_RQ_RING_SIZE_4096);
  16874			break;
  16875		}
  16876		bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context,
  16877		       LPFC_HDR_BUF_SIZE);
  16878	}
  16879	bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context,
  16880	       cq->queue_id);
  16881	bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request,
  16882	       hrq->page_count);
  16883	list_for_each_entry(dmabuf, &hrq->page_list, list) {
  16884		memset(dmabuf->virt, 0, hw_page_size);
  16885		rq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
  16886					putPaddrLow(dmabuf->phys);
  16887		rq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
  16888					putPaddrHigh(dmabuf->phys);
  16889	}
  16890	if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE)
  16891		bf_set(lpfc_mbx_rq_create_dua, &rq_create->u.request, 1);
  16892
  16893	rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
  16894	/* The IOCTL status is embedded in the mailbox subheader. */
  16895	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
  16896	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
  16897	if (shdr_status || shdr_add_status || rc) {
  16898		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  16899				"2504 RQ_CREATE mailbox failed with "
  16900				"status x%x add_status x%x, mbx status x%x\n",
  16901				shdr_status, shdr_add_status, rc);
  16902		status = -ENXIO;
  16903		goto out;
  16904	}
  16905	hrq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response);
  16906	if (hrq->queue_id == 0xFFFF) {
  16907		status = -ENXIO;
  16908		goto out;
  16909	}
  16910
  16911	if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) {
  16912		hrq->db_format = bf_get(lpfc_mbx_rq_create_db_format,
  16913					&rq_create->u.response);
  16914		if ((hrq->db_format != LPFC_DB_LIST_FORMAT) &&
  16915		    (hrq->db_format != LPFC_DB_RING_FORMAT)) {
  16916			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  16917					"3262 RQ [%d] doorbell format not "
  16918					"supported: x%x\n", hrq->queue_id,
  16919					hrq->db_format);
  16920			status = -EINVAL;
  16921			goto out;
  16922		}
  16923
  16924		pci_barset = bf_get(lpfc_mbx_rq_create_bar_set,
  16925				    &rq_create->u.response);
  16926		bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba, pci_barset);
  16927		if (!bar_memmap_p) {
  16928			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  16929					"3269 RQ[%d] failed to memmap pci "
  16930					"barset:x%x\n", hrq->queue_id,
  16931					pci_barset);
  16932			status = -ENOMEM;
  16933			goto out;
  16934		}
  16935
  16936		db_offset = rq_create->u.response.doorbell_offset;
  16937		if ((db_offset != LPFC_ULP0_RQ_DOORBELL) &&
  16938		    (db_offset != LPFC_ULP1_RQ_DOORBELL)) {
  16939			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  16940					"3270 RQ[%d] doorbell offset not "
  16941					"supported: x%x\n", hrq->queue_id,
  16942					db_offset);
  16943			status = -EINVAL;
  16944			goto out;
  16945		}
  16946		hrq->db_regaddr = bar_memmap_p + db_offset;
  16947		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
  16948				"3266 RQ[qid:%d]: barset:x%x, offset:x%x, "
  16949				"format:x%x\n", hrq->queue_id, pci_barset,
  16950				db_offset, hrq->db_format);
  16951	} else {
  16952		hrq->db_format = LPFC_DB_RING_FORMAT;
  16953		hrq->db_regaddr = phba->sli4_hba.RQDBregaddr;
  16954	}
  16955	hrq->type = LPFC_HRQ;
  16956	hrq->assoc_qid = cq->queue_id;
  16957	hrq->subtype = subtype;
  16958	hrq->host_index = 0;
  16959	hrq->hba_index = 0;
  16960	hrq->notify_interval = LPFC_RQ_NOTIFY_INTRVL;
  16961
  16962	/* now create the data queue */
  16963	lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
  16964			 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE,
  16965			 length, LPFC_SLI4_MBX_EMBED);
  16966	bf_set(lpfc_mbox_hdr_version, &shdr->request,
  16967	       phba->sli4_hba.pc_sli4_params.rqv);
  16968	if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) {
  16969		bf_set(lpfc_rq_context_rqe_count_1,
  16970		       &rq_create->u.request.context, hrq->entry_count);
  16971		if (subtype == LPFC_NVMET)
  16972			rq_create->u.request.context.buffer_size =
  16973				LPFC_NVMET_DATA_BUF_SIZE;
  16974		else
  16975			rq_create->u.request.context.buffer_size =
  16976				LPFC_DATA_BUF_SIZE;
  16977		bf_set(lpfc_rq_context_rqe_size, &rq_create->u.request.context,
  16978		       LPFC_RQE_SIZE_8);
  16979		bf_set(lpfc_rq_context_page_size, &rq_create->u.request.context,
  16980		       (PAGE_SIZE/SLI4_PAGE_SIZE));
  16981	} else {
  16982		switch (drq->entry_count) {
  16983		default:
  16984			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  16985					"2536 Unsupported RQ count. (%d)\n",
  16986					drq->entry_count);
  16987			if (drq->entry_count < 512) {
  16988				status = -EINVAL;
  16989				goto out;
  16990			}
  16991			fallthrough;	/* otherwise default to smallest count */
  16992		case 512:
  16993			bf_set(lpfc_rq_context_rqe_count,
  16994			       &rq_create->u.request.context,
  16995			       LPFC_RQ_RING_SIZE_512);
  16996			break;
  16997		case 1024:
  16998			bf_set(lpfc_rq_context_rqe_count,
  16999			       &rq_create->u.request.context,
  17000			       LPFC_RQ_RING_SIZE_1024);
  17001			break;
  17002		case 2048:
  17003			bf_set(lpfc_rq_context_rqe_count,
  17004			       &rq_create->u.request.context,
  17005			       LPFC_RQ_RING_SIZE_2048);
  17006			break;
  17007		case 4096:
  17008			bf_set(lpfc_rq_context_rqe_count,
  17009			       &rq_create->u.request.context,
  17010			       LPFC_RQ_RING_SIZE_4096);
  17011			break;
  17012		}
  17013		if (subtype == LPFC_NVMET)
  17014			bf_set(lpfc_rq_context_buf_size,
  17015			       &rq_create->u.request.context,
  17016			       LPFC_NVMET_DATA_BUF_SIZE);
  17017		else
  17018			bf_set(lpfc_rq_context_buf_size,
  17019			       &rq_create->u.request.context,
  17020			       LPFC_DATA_BUF_SIZE);
  17021	}
  17022	bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context,
  17023	       cq->queue_id);
  17024	bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request,
  17025	       drq->page_count);
  17026	list_for_each_entry(dmabuf, &drq->page_list, list) {
  17027		rq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
  17028					putPaddrLow(dmabuf->phys);
  17029		rq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
  17030					putPaddrHigh(dmabuf->phys);
  17031	}
  17032	if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE)
  17033		bf_set(lpfc_mbx_rq_create_dua, &rq_create->u.request, 1);
  17034	rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
  17035	/* The IOCTL status is embedded in the mailbox subheader. */
  17036	shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr;
  17037	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
  17038	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
  17039	if (shdr_status || shdr_add_status || rc) {
  17040		status = -ENXIO;
  17041		goto out;
  17042	}
  17043	drq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response);
  17044	if (drq->queue_id == 0xFFFF) {
  17045		status = -ENXIO;
  17046		goto out;
  17047	}
  17048	drq->type = LPFC_DRQ;
  17049	drq->assoc_qid = cq->queue_id;
  17050	drq->subtype = subtype;
  17051	drq->host_index = 0;
  17052	drq->hba_index = 0;
  17053	drq->notify_interval = LPFC_RQ_NOTIFY_INTRVL;
  17054
  17055	/* link the header and data RQs onto the parent cq child list */
  17056	list_add_tail(&hrq->list, &cq->child_list);
  17057	list_add_tail(&drq->list, &cq->child_list);
  17058
  17059out:
  17060	mempool_free(mbox, phba->mbox_mem_pool);
  17061	return status;
  17062}
  17063
  17064/**
  17065 * lpfc_mrq_create - Create MRQ Receive Queues on the HBA
  17066 * @phba: HBA structure that indicates port to create a queue on.
  17067 * @hrqp: The queue structure array to use to create the header receive queues.
  17068 * @drqp: The queue structure array to use to create the data receive queues.
  17069 * @cqp: The completion queue array to bind these receive queues to.
  17070 * @subtype: Functional purpose of the queue (MBOX, IO, ELS, NVMET, etc).
  17071 *
  17072 * This function creates a receive buffer queue pair , as detailed in @hrq and
  17073 * @drq, on a port, described by @phba by sending a RQ_CREATE mailbox command
  17074 * to the HBA.
  17075 *
  17076 * The @phba struct is used to send mailbox command to HBA. The @drq and @hrq
  17077 * struct is used to get the entry count that is necessary to determine the
  17078 * number of pages to use for this queue. The @cq is used to indicate which
  17079 * completion queue to bind received buffers that are posted to these queues to.
  17080 * This function will send the RQ_CREATE mailbox command to the HBA to setup the
  17081 * receive queue pair. This function is asynchronous and will wait for the
  17082 * mailbox command to finish before continuing.
  17083 *
  17084 * On success this function will return a zero. If unable to allocate enough
  17085 * memory this function will return -ENOMEM. If the queue create mailbox command
  17086 * fails this function will return -ENXIO.
  17087 **/
  17088int
  17089lpfc_mrq_create(struct lpfc_hba *phba, struct lpfc_queue **hrqp,
  17090		struct lpfc_queue **drqp, struct lpfc_queue **cqp,
  17091		uint32_t subtype)
  17092{
  17093	struct lpfc_queue *hrq, *drq, *cq;
  17094	struct lpfc_mbx_rq_create_v2 *rq_create;
  17095	struct lpfc_dmabuf *dmabuf;
  17096	LPFC_MBOXQ_t *mbox;
  17097	int rc, length, alloclen, status = 0;
  17098	int cnt, idx, numrq, page_idx = 0;
  17099	uint32_t shdr_status, shdr_add_status;
  17100	union lpfc_sli4_cfg_shdr *shdr;
  17101	uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
  17102
  17103	numrq = phba->cfg_nvmet_mrq;
  17104	/* sanity check on array memory */
  17105	if (!hrqp || !drqp || !cqp || !numrq)
  17106		return -ENODEV;
  17107	if (!phba->sli4_hba.pc_sli4_params.supported)
  17108		hw_page_size = SLI4_PAGE_SIZE;
  17109
  17110	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
  17111	if (!mbox)
  17112		return -ENOMEM;
  17113
  17114	length = sizeof(struct lpfc_mbx_rq_create_v2);
  17115	length += ((2 * numrq * hrqp[0]->page_count) *
  17116		   sizeof(struct dma_address));
  17117
  17118	alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
  17119				    LPFC_MBOX_OPCODE_FCOE_RQ_CREATE, length,
  17120				    LPFC_SLI4_MBX_NEMBED);
  17121	if (alloclen < length) {
  17122		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  17123				"3099 Allocated DMA memory size (%d) is "
  17124				"less than the requested DMA memory size "
  17125				"(%d)\n", alloclen, length);
  17126		status = -ENOMEM;
  17127		goto out;
  17128	}
  17129
  17130
  17131
  17132	rq_create = mbox->sge_array->addr[0];
  17133	shdr = (union lpfc_sli4_cfg_shdr *)&rq_create->cfg_shdr;
  17134
  17135	bf_set(lpfc_mbox_hdr_version, &shdr->request, LPFC_Q_CREATE_VERSION_2);
  17136	cnt = 0;
  17137
  17138	for (idx = 0; idx < numrq; idx++) {
  17139		hrq = hrqp[idx];
  17140		drq = drqp[idx];
  17141		cq  = cqp[idx];
  17142
  17143		/* sanity check on queue memory */
  17144		if (!hrq || !drq || !cq) {
  17145			status = -ENODEV;
  17146			goto out;
  17147		}
  17148
  17149		if (hrq->entry_count != drq->entry_count) {
  17150			status = -EINVAL;
  17151			goto out;
  17152		}
  17153
  17154		if (idx == 0) {
  17155			bf_set(lpfc_mbx_rq_create_num_pages,
  17156			       &rq_create->u.request,
  17157			       hrq->page_count);
  17158			bf_set(lpfc_mbx_rq_create_rq_cnt,
  17159			       &rq_create->u.request, (numrq * 2));
  17160			bf_set(lpfc_mbx_rq_create_dnb, &rq_create->u.request,
  17161			       1);
  17162			bf_set(lpfc_rq_context_base_cq,
  17163			       &rq_create->u.request.context,
  17164			       cq->queue_id);
  17165			bf_set(lpfc_rq_context_data_size,
  17166			       &rq_create->u.request.context,
  17167			       LPFC_NVMET_DATA_BUF_SIZE);
  17168			bf_set(lpfc_rq_context_hdr_size,
  17169			       &rq_create->u.request.context,
  17170			       LPFC_HDR_BUF_SIZE);
  17171			bf_set(lpfc_rq_context_rqe_count_1,
  17172			       &rq_create->u.request.context,
  17173			       hrq->entry_count);
  17174			bf_set(lpfc_rq_context_rqe_size,
  17175			       &rq_create->u.request.context,
  17176			       LPFC_RQE_SIZE_8);
  17177			bf_set(lpfc_rq_context_page_size,
  17178			       &rq_create->u.request.context,
  17179			       (PAGE_SIZE/SLI4_PAGE_SIZE));
  17180		}
  17181		rc = 0;
  17182		list_for_each_entry(dmabuf, &hrq->page_list, list) {
  17183			memset(dmabuf->virt, 0, hw_page_size);
  17184			cnt = page_idx + dmabuf->buffer_tag;
  17185			rq_create->u.request.page[cnt].addr_lo =
  17186					putPaddrLow(dmabuf->phys);
  17187			rq_create->u.request.page[cnt].addr_hi =
  17188					putPaddrHigh(dmabuf->phys);
  17189			rc++;
  17190		}
  17191		page_idx += rc;
  17192
  17193		rc = 0;
  17194		list_for_each_entry(dmabuf, &drq->page_list, list) {
  17195			memset(dmabuf->virt, 0, hw_page_size);
  17196			cnt = page_idx + dmabuf->buffer_tag;
  17197			rq_create->u.request.page[cnt].addr_lo =
  17198					putPaddrLow(dmabuf->phys);
  17199			rq_create->u.request.page[cnt].addr_hi =
  17200					putPaddrHigh(dmabuf->phys);
  17201			rc++;
  17202		}
  17203		page_idx += rc;
  17204
  17205		hrq->db_format = LPFC_DB_RING_FORMAT;
  17206		hrq->db_regaddr = phba->sli4_hba.RQDBregaddr;
  17207		hrq->type = LPFC_HRQ;
  17208		hrq->assoc_qid = cq->queue_id;
  17209		hrq->subtype = subtype;
  17210		hrq->host_index = 0;
  17211		hrq->hba_index = 0;
  17212		hrq->notify_interval = LPFC_RQ_NOTIFY_INTRVL;
  17213
  17214		drq->db_format = LPFC_DB_RING_FORMAT;
  17215		drq->db_regaddr = phba->sli4_hba.RQDBregaddr;
  17216		drq->type = LPFC_DRQ;
  17217		drq->assoc_qid = cq->queue_id;
  17218		drq->subtype = subtype;
  17219		drq->host_index = 0;
  17220		drq->hba_index = 0;
  17221		drq->notify_interval = LPFC_RQ_NOTIFY_INTRVL;
  17222
  17223		list_add_tail(&hrq->list, &cq->child_list);
  17224		list_add_tail(&drq->list, &cq->child_list);
  17225	}
  17226
  17227	rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
  17228	/* The IOCTL status is embedded in the mailbox subheader. */
  17229	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
  17230	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
  17231	if (shdr_status || shdr_add_status || rc) {
  17232		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  17233				"3120 RQ_CREATE mailbox failed with "
  17234				"status x%x add_status x%x, mbx status x%x\n",
  17235				shdr_status, shdr_add_status, rc);
  17236		status = -ENXIO;
  17237		goto out;
  17238	}
  17239	rc = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response);
  17240	if (rc == 0xFFFF) {
  17241		status = -ENXIO;
  17242		goto out;
  17243	}
  17244
  17245	/* Initialize all RQs with associated queue id */
  17246	for (idx = 0; idx < numrq; idx++) {
  17247		hrq = hrqp[idx];
  17248		hrq->queue_id = rc + (2 * idx);
  17249		drq = drqp[idx];
  17250		drq->queue_id = rc + (2 * idx) + 1;
  17251	}
  17252
  17253out:
  17254	lpfc_sli4_mbox_cmd_free(phba, mbox);
  17255	return status;
  17256}
  17257
  17258/**
  17259 * lpfc_eq_destroy - Destroy an event Queue on the HBA
  17260 * @phba: HBA structure that indicates port to destroy a queue on.
  17261 * @eq: The queue structure associated with the queue to destroy.
  17262 *
  17263 * This function destroys a queue, as detailed in @eq by sending an mailbox
  17264 * command, specific to the type of queue, to the HBA.
  17265 *
  17266 * The @eq struct is used to get the queue ID of the queue to destroy.
  17267 *
  17268 * On success this function will return a zero. If the queue destroy mailbox
  17269 * command fails this function will return -ENXIO.
  17270 **/
  17271int
  17272lpfc_eq_destroy(struct lpfc_hba *phba, struct lpfc_queue *eq)
  17273{
  17274	LPFC_MBOXQ_t *mbox;
  17275	int rc, length, status = 0;
  17276	uint32_t shdr_status, shdr_add_status;
  17277	union lpfc_sli4_cfg_shdr *shdr;
  17278
  17279	/* sanity check on queue memory */
  17280	if (!eq)
  17281		return -ENODEV;
  17282
  17283	mbox = mempool_alloc(eq->phba->mbox_mem_pool, GFP_KERNEL);
  17284	if (!mbox)
  17285		return -ENOMEM;
  17286	length = (sizeof(struct lpfc_mbx_eq_destroy) -
  17287		  sizeof(struct lpfc_sli4_cfg_mhdr));
  17288	lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
  17289			 LPFC_MBOX_OPCODE_EQ_DESTROY,
  17290			 length, LPFC_SLI4_MBX_EMBED);
  17291	bf_set(lpfc_mbx_eq_destroy_q_id, &mbox->u.mqe.un.eq_destroy.u.request,
  17292	       eq->queue_id);
  17293	mbox->vport = eq->phba->pport;
  17294	mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
  17295
  17296	rc = lpfc_sli_issue_mbox(eq->phba, mbox, MBX_POLL);
  17297	/* The IOCTL status is embedded in the mailbox subheader. */
  17298	shdr = (union lpfc_sli4_cfg_shdr *)
  17299		&mbox->u.mqe.un.eq_destroy.header.cfg_shdr;
  17300	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
  17301	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
  17302	if (shdr_status || shdr_add_status || rc) {
  17303		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  17304				"2505 EQ_DESTROY mailbox failed with "
  17305				"status x%x add_status x%x, mbx status x%x\n",
  17306				shdr_status, shdr_add_status, rc);
  17307		status = -ENXIO;
  17308	}
  17309
  17310	/* Remove eq from any list */
  17311	list_del_init(&eq->list);
  17312	mempool_free(mbox, eq->phba->mbox_mem_pool);
  17313	return status;
  17314}
  17315
  17316/**
  17317 * lpfc_cq_destroy - Destroy a Completion Queue on the HBA
  17318 * @phba: HBA structure that indicates port to destroy a queue on.
  17319 * @cq: The queue structure associated with the queue to destroy.
  17320 *
  17321 * This function destroys a queue, as detailed in @cq by sending an mailbox
  17322 * command, specific to the type of queue, to the HBA.
  17323 *
  17324 * The @cq struct is used to get the queue ID of the queue to destroy.
  17325 *
  17326 * On success this function will return a zero. If the queue destroy mailbox
  17327 * command fails this function will return -ENXIO.
  17328 **/
  17329int
  17330lpfc_cq_destroy(struct lpfc_hba *phba, struct lpfc_queue *cq)
  17331{
  17332	LPFC_MBOXQ_t *mbox;
  17333	int rc, length, status = 0;
  17334	uint32_t shdr_status, shdr_add_status;
  17335	union lpfc_sli4_cfg_shdr *shdr;
  17336
  17337	/* sanity check on queue memory */
  17338	if (!cq)
  17339		return -ENODEV;
  17340	mbox = mempool_alloc(cq->phba->mbox_mem_pool, GFP_KERNEL);
  17341	if (!mbox)
  17342		return -ENOMEM;
  17343	length = (sizeof(struct lpfc_mbx_cq_destroy) -
  17344		  sizeof(struct lpfc_sli4_cfg_mhdr));
  17345	lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
  17346			 LPFC_MBOX_OPCODE_CQ_DESTROY,
  17347			 length, LPFC_SLI4_MBX_EMBED);
  17348	bf_set(lpfc_mbx_cq_destroy_q_id, &mbox->u.mqe.un.cq_destroy.u.request,
  17349	       cq->queue_id);
  17350	mbox->vport = cq->phba->pport;
  17351	mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
  17352	rc = lpfc_sli_issue_mbox(cq->phba, mbox, MBX_POLL);
  17353	/* The IOCTL status is embedded in the mailbox subheader. */
  17354	shdr = (union lpfc_sli4_cfg_shdr *)
  17355		&mbox->u.mqe.un.wq_create.header.cfg_shdr;
  17356	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
  17357	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
  17358	if (shdr_status || shdr_add_status || rc) {
  17359		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  17360				"2506 CQ_DESTROY mailbox failed with "
  17361				"status x%x add_status x%x, mbx status x%x\n",
  17362				shdr_status, shdr_add_status, rc);
  17363		status = -ENXIO;
  17364	}
  17365	/* Remove cq from any list */
  17366	list_del_init(&cq->list);
  17367	mempool_free(mbox, cq->phba->mbox_mem_pool);
  17368	return status;
  17369}
  17370
  17371/**
  17372 * lpfc_mq_destroy - Destroy a Mailbox Queue on the HBA
  17373 * @phba: HBA structure that indicates port to destroy a queue on.
  17374 * @mq: The queue structure associated with the queue to destroy.
  17375 *
  17376 * This function destroys a queue, as detailed in @mq by sending an mailbox
  17377 * command, specific to the type of queue, to the HBA.
  17378 *
  17379 * The @mq struct is used to get the queue ID of the queue to destroy.
  17380 *
  17381 * On success this function will return a zero. If the queue destroy mailbox
  17382 * command fails this function will return -ENXIO.
  17383 **/
  17384int
  17385lpfc_mq_destroy(struct lpfc_hba *phba, struct lpfc_queue *mq)
  17386{
  17387	LPFC_MBOXQ_t *mbox;
  17388	int rc, length, status = 0;
  17389	uint32_t shdr_status, shdr_add_status;
  17390	union lpfc_sli4_cfg_shdr *shdr;
  17391
  17392	/* sanity check on queue memory */
  17393	if (!mq)
  17394		return -ENODEV;
  17395	mbox = mempool_alloc(mq->phba->mbox_mem_pool, GFP_KERNEL);
  17396	if (!mbox)
  17397		return -ENOMEM;
  17398	length = (sizeof(struct lpfc_mbx_mq_destroy) -
  17399		  sizeof(struct lpfc_sli4_cfg_mhdr));
  17400	lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
  17401			 LPFC_MBOX_OPCODE_MQ_DESTROY,
  17402			 length, LPFC_SLI4_MBX_EMBED);
  17403	bf_set(lpfc_mbx_mq_destroy_q_id, &mbox->u.mqe.un.mq_destroy.u.request,
  17404	       mq->queue_id);
  17405	mbox->vport = mq->phba->pport;
  17406	mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
  17407	rc = lpfc_sli_issue_mbox(mq->phba, mbox, MBX_POLL);
  17408	/* The IOCTL status is embedded in the mailbox subheader. */
  17409	shdr = (union lpfc_sli4_cfg_shdr *)
  17410		&mbox->u.mqe.un.mq_destroy.header.cfg_shdr;
  17411	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
  17412	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
  17413	if (shdr_status || shdr_add_status || rc) {
  17414		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  17415				"2507 MQ_DESTROY mailbox failed with "
  17416				"status x%x add_status x%x, mbx status x%x\n",
  17417				shdr_status, shdr_add_status, rc);
  17418		status = -ENXIO;
  17419	}
  17420	/* Remove mq from any list */
  17421	list_del_init(&mq->list);
  17422	mempool_free(mbox, mq->phba->mbox_mem_pool);
  17423	return status;
  17424}
  17425
  17426/**
  17427 * lpfc_wq_destroy - Destroy a Work Queue on the HBA
  17428 * @phba: HBA structure that indicates port to destroy a queue on.
  17429 * @wq: The queue structure associated with the queue to destroy.
  17430 *
  17431 * This function destroys a queue, as detailed in @wq by sending an mailbox
  17432 * command, specific to the type of queue, to the HBA.
  17433 *
  17434 * The @wq struct is used to get the queue ID of the queue to destroy.
  17435 *
  17436 * On success this function will return a zero. If the queue destroy mailbox
  17437 * command fails this function will return -ENXIO.
  17438 **/
  17439int
  17440lpfc_wq_destroy(struct lpfc_hba *phba, struct lpfc_queue *wq)
  17441{
  17442	LPFC_MBOXQ_t *mbox;
  17443	int rc, length, status = 0;
  17444	uint32_t shdr_status, shdr_add_status;
  17445	union lpfc_sli4_cfg_shdr *shdr;
  17446
  17447	/* sanity check on queue memory */
  17448	if (!wq)
  17449		return -ENODEV;
  17450	mbox = mempool_alloc(wq->phba->mbox_mem_pool, GFP_KERNEL);
  17451	if (!mbox)
  17452		return -ENOMEM;
  17453	length = (sizeof(struct lpfc_mbx_wq_destroy) -
  17454		  sizeof(struct lpfc_sli4_cfg_mhdr));
  17455	lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
  17456			 LPFC_MBOX_OPCODE_FCOE_WQ_DESTROY,
  17457			 length, LPFC_SLI4_MBX_EMBED);
  17458	bf_set(lpfc_mbx_wq_destroy_q_id, &mbox->u.mqe.un.wq_destroy.u.request,
  17459	       wq->queue_id);
  17460	mbox->vport = wq->phba->pport;
  17461	mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
  17462	rc = lpfc_sli_issue_mbox(wq->phba, mbox, MBX_POLL);
  17463	shdr = (union lpfc_sli4_cfg_shdr *)
  17464		&mbox->u.mqe.un.wq_destroy.header.cfg_shdr;
  17465	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
  17466	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
  17467	if (shdr_status || shdr_add_status || rc) {
  17468		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  17469				"2508 WQ_DESTROY mailbox failed with "
  17470				"status x%x add_status x%x, mbx status x%x\n",
  17471				shdr_status, shdr_add_status, rc);
  17472		status = -ENXIO;
  17473	}
  17474	/* Remove wq from any list */
  17475	list_del_init(&wq->list);
  17476	kfree(wq->pring);
  17477	wq->pring = NULL;
  17478	mempool_free(mbox, wq->phba->mbox_mem_pool);
  17479	return status;
  17480}
  17481
  17482/**
  17483 * lpfc_rq_destroy - Destroy a Receive Queue on the HBA
  17484 * @phba: HBA structure that indicates port to destroy a queue on.
  17485 * @hrq: The queue structure associated with the queue to destroy.
  17486 * @drq: The queue structure associated with the queue to destroy.
  17487 *
  17488 * This function destroys a queue, as detailed in @rq by sending an mailbox
  17489 * command, specific to the type of queue, to the HBA.
  17490 *
  17491 * The @rq struct is used to get the queue ID of the queue to destroy.
  17492 *
  17493 * On success this function will return a zero. If the queue destroy mailbox
  17494 * command fails this function will return -ENXIO.
  17495 **/
  17496int
  17497lpfc_rq_destroy(struct lpfc_hba *phba, struct lpfc_queue *hrq,
  17498		struct lpfc_queue *drq)
  17499{
  17500	LPFC_MBOXQ_t *mbox;
  17501	int rc, length, status = 0;
  17502	uint32_t shdr_status, shdr_add_status;
  17503	union lpfc_sli4_cfg_shdr *shdr;
  17504
  17505	/* sanity check on queue memory */
  17506	if (!hrq || !drq)
  17507		return -ENODEV;
  17508	mbox = mempool_alloc(hrq->phba->mbox_mem_pool, GFP_KERNEL);
  17509	if (!mbox)
  17510		return -ENOMEM;
  17511	length = (sizeof(struct lpfc_mbx_rq_destroy) -
  17512		  sizeof(struct lpfc_sli4_cfg_mhdr));
  17513	lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
  17514			 LPFC_MBOX_OPCODE_FCOE_RQ_DESTROY,
  17515			 length, LPFC_SLI4_MBX_EMBED);
  17516	bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request,
  17517	       hrq->queue_id);
  17518	mbox->vport = hrq->phba->pport;
  17519	mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
  17520	rc = lpfc_sli_issue_mbox(hrq->phba, mbox, MBX_POLL);
  17521	/* The IOCTL status is embedded in the mailbox subheader. */
  17522	shdr = (union lpfc_sli4_cfg_shdr *)
  17523		&mbox->u.mqe.un.rq_destroy.header.cfg_shdr;
  17524	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
  17525	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
  17526	if (shdr_status || shdr_add_status || rc) {
  17527		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  17528				"2509 RQ_DESTROY mailbox failed with "
  17529				"status x%x add_status x%x, mbx status x%x\n",
  17530				shdr_status, shdr_add_status, rc);
  17531		mempool_free(mbox, hrq->phba->mbox_mem_pool);
  17532		return -ENXIO;
  17533	}
  17534	bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request,
  17535	       drq->queue_id);
  17536	rc = lpfc_sli_issue_mbox(drq->phba, mbox, MBX_POLL);
  17537	shdr = (union lpfc_sli4_cfg_shdr *)
  17538		&mbox->u.mqe.un.rq_destroy.header.cfg_shdr;
  17539	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
  17540	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
  17541	if (shdr_status || shdr_add_status || rc) {
  17542		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  17543				"2510 RQ_DESTROY mailbox failed with "
  17544				"status x%x add_status x%x, mbx status x%x\n",
  17545				shdr_status, shdr_add_status, rc);
  17546		status = -ENXIO;
  17547	}
  17548	list_del_init(&hrq->list);
  17549	list_del_init(&drq->list);
  17550	mempool_free(mbox, hrq->phba->mbox_mem_pool);
  17551	return status;
  17552}
  17553
  17554/**
  17555 * lpfc_sli4_post_sgl - Post scatter gather list for an XRI to HBA
  17556 * @phba: The virtual port for which this call being executed.
  17557 * @pdma_phys_addr0: Physical address of the 1st SGL page.
  17558 * @pdma_phys_addr1: Physical address of the 2nd SGL page.
  17559 * @xritag: the xritag that ties this io to the SGL pages.
  17560 *
  17561 * This routine will post the sgl pages for the IO that has the xritag
  17562 * that is in the iocbq structure. The xritag is assigned during iocbq
  17563 * creation and persists for as long as the driver is loaded.
  17564 * if the caller has fewer than 256 scatter gather segments to map then
  17565 * pdma_phys_addr1 should be 0.
  17566 * If the caller needs to map more than 256 scatter gather segment then
  17567 * pdma_phys_addr1 should be a valid physical address.
  17568 * physical address for SGLs must be 64 byte aligned.
  17569 * If you are going to map 2 SGL's then the first one must have 256 entries
  17570 * the second sgl can have between 1 and 256 entries.
  17571 *
  17572 * Return codes:
  17573 * 	0 - Success
  17574 * 	-ENXIO, -ENOMEM - Failure
  17575 **/
  17576int
  17577lpfc_sli4_post_sgl(struct lpfc_hba *phba,
  17578		dma_addr_t pdma_phys_addr0,
  17579		dma_addr_t pdma_phys_addr1,
  17580		uint16_t xritag)
  17581{
  17582	struct lpfc_mbx_post_sgl_pages *post_sgl_pages;
  17583	LPFC_MBOXQ_t *mbox;
  17584	int rc;
  17585	uint32_t shdr_status, shdr_add_status;
  17586	uint32_t mbox_tmo;
  17587	union lpfc_sli4_cfg_shdr *shdr;
  17588
  17589	if (xritag == NO_XRI) {
  17590		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  17591				"0364 Invalid param:\n");
  17592		return -EINVAL;
  17593	}
  17594
  17595	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
  17596	if (!mbox)
  17597		return -ENOMEM;
  17598
  17599	lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
  17600			LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES,
  17601			sizeof(struct lpfc_mbx_post_sgl_pages) -
  17602			sizeof(struct lpfc_sli4_cfg_mhdr), LPFC_SLI4_MBX_EMBED);
  17603
  17604	post_sgl_pages = (struct lpfc_mbx_post_sgl_pages *)
  17605				&mbox->u.mqe.un.post_sgl_pages;
  17606	bf_set(lpfc_post_sgl_pages_xri, post_sgl_pages, xritag);
  17607	bf_set(lpfc_post_sgl_pages_xricnt, post_sgl_pages, 1);
  17608
  17609	post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_lo	=
  17610				cpu_to_le32(putPaddrLow(pdma_phys_addr0));
  17611	post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_hi =
  17612				cpu_to_le32(putPaddrHigh(pdma_phys_addr0));
  17613
  17614	post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_lo	=
  17615				cpu_to_le32(putPaddrLow(pdma_phys_addr1));
  17616	post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_hi =
  17617				cpu_to_le32(putPaddrHigh(pdma_phys_addr1));
  17618	if (!phba->sli4_hba.intr_enable)
  17619		rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
  17620	else {
  17621		mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
  17622		rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
  17623	}
  17624	/* The IOCTL status is embedded in the mailbox subheader. */
  17625	shdr = (union lpfc_sli4_cfg_shdr *) &post_sgl_pages->header.cfg_shdr;
  17626	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
  17627	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
  17628	if (!phba->sli4_hba.intr_enable)
  17629		mempool_free(mbox, phba->mbox_mem_pool);
  17630	else if (rc != MBX_TIMEOUT)
  17631		mempool_free(mbox, phba->mbox_mem_pool);
  17632	if (shdr_status || shdr_add_status || rc) {
  17633		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  17634				"2511 POST_SGL mailbox failed with "
  17635				"status x%x add_status x%x, mbx status x%x\n",
  17636				shdr_status, shdr_add_status, rc);
  17637	}
  17638	return 0;
  17639}
  17640
  17641/**
  17642 * lpfc_sli4_alloc_xri - Get an available rpi in the device's range
  17643 * @phba: pointer to lpfc hba data structure.
  17644 *
  17645 * This routine is invoked to post rpi header templates to the
  17646 * HBA consistent with the SLI-4 interface spec.  This routine
  17647 * posts a SLI4_PAGE_SIZE memory region to the port to hold up to
  17648 * SLI4_PAGE_SIZE modulo 64 rpi context headers.
  17649 *
  17650 * Returns
  17651 *	A nonzero rpi defined as rpi_base <= rpi < max_rpi if successful
  17652 *	LPFC_RPI_ALLOC_ERROR if no rpis are available.
  17653 **/
  17654static uint16_t
  17655lpfc_sli4_alloc_xri(struct lpfc_hba *phba)
  17656{
  17657	unsigned long xri;
  17658
  17659	/*
  17660	 * Fetch the next logical xri.  Because this index is logical,
  17661	 * the driver starts at 0 each time.
  17662	 */
  17663	spin_lock_irq(&phba->hbalock);
  17664	xri = find_first_zero_bit(phba->sli4_hba.xri_bmask,
  17665				 phba->sli4_hba.max_cfg_param.max_xri);
  17666	if (xri >= phba->sli4_hba.max_cfg_param.max_xri) {
  17667		spin_unlock_irq(&phba->hbalock);
  17668		return NO_XRI;
  17669	} else {
  17670		set_bit(xri, phba->sli4_hba.xri_bmask);
  17671		phba->sli4_hba.max_cfg_param.xri_used++;
  17672	}
  17673	spin_unlock_irq(&phba->hbalock);
  17674	return xri;
  17675}
  17676
  17677/**
  17678 * __lpfc_sli4_free_xri - Release an xri for reuse.
  17679 * @phba: pointer to lpfc hba data structure.
  17680 * @xri: xri to release.
  17681 *
  17682 * This routine is invoked to release an xri to the pool of
  17683 * available rpis maintained by the driver.
  17684 **/
  17685static void
  17686__lpfc_sli4_free_xri(struct lpfc_hba *phba, int xri)
  17687{
  17688	if (test_and_clear_bit(xri, phba->sli4_hba.xri_bmask)) {
  17689		phba->sli4_hba.max_cfg_param.xri_used--;
  17690	}
  17691}
  17692
  17693/**
  17694 * lpfc_sli4_free_xri - Release an xri for reuse.
  17695 * @phba: pointer to lpfc hba data structure.
  17696 * @xri: xri to release.
  17697 *
  17698 * This routine is invoked to release an xri to the pool of
  17699 * available rpis maintained by the driver.
  17700 **/
  17701void
  17702lpfc_sli4_free_xri(struct lpfc_hba *phba, int xri)
  17703{
  17704	spin_lock_irq(&phba->hbalock);
  17705	__lpfc_sli4_free_xri(phba, xri);
  17706	spin_unlock_irq(&phba->hbalock);
  17707}
  17708
  17709/**
  17710 * lpfc_sli4_next_xritag - Get an xritag for the io
  17711 * @phba: Pointer to HBA context object.
  17712 *
  17713 * This function gets an xritag for the iocb. If there is no unused xritag
  17714 * it will return 0xffff.
  17715 * The function returns the allocated xritag if successful, else returns zero.
  17716 * Zero is not a valid xritag.
  17717 * The caller is not required to hold any lock.
  17718 **/
  17719uint16_t
  17720lpfc_sli4_next_xritag(struct lpfc_hba *phba)
  17721{
  17722	uint16_t xri_index;
  17723
  17724	xri_index = lpfc_sli4_alloc_xri(phba);
  17725	if (xri_index == NO_XRI)
  17726		lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
  17727				"2004 Failed to allocate XRI.last XRITAG is %d"
  17728				" Max XRI is %d, Used XRI is %d\n",
  17729				xri_index,
  17730				phba->sli4_hba.max_cfg_param.max_xri,
  17731				phba->sli4_hba.max_cfg_param.xri_used);
  17732	return xri_index;
  17733}
  17734
  17735/**
  17736 * lpfc_sli4_post_sgl_list - post a block of ELS sgls to the port.
  17737 * @phba: pointer to lpfc hba data structure.
  17738 * @post_sgl_list: pointer to els sgl entry list.
  17739 * @post_cnt: number of els sgl entries on the list.
  17740 *
  17741 * This routine is invoked to post a block of driver's sgl pages to the
  17742 * HBA using non-embedded mailbox command. No Lock is held. This routine
  17743 * is only called when the driver is loading and after all IO has been
  17744 * stopped.
  17745 **/
  17746static int
  17747lpfc_sli4_post_sgl_list(struct lpfc_hba *phba,
  17748			    struct list_head *post_sgl_list,
  17749			    int post_cnt)
  17750{
  17751	struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
  17752	struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
  17753	struct sgl_page_pairs *sgl_pg_pairs;
  17754	void *viraddr;
  17755	LPFC_MBOXQ_t *mbox;
  17756	uint32_t reqlen, alloclen, pg_pairs;
  17757	uint32_t mbox_tmo;
  17758	uint16_t xritag_start = 0;
  17759	int rc = 0;
  17760	uint32_t shdr_status, shdr_add_status;
  17761	union lpfc_sli4_cfg_shdr *shdr;
  17762
  17763	reqlen = post_cnt * sizeof(struct sgl_page_pairs) +
  17764		 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
  17765	if (reqlen > SLI4_PAGE_SIZE) {
  17766		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  17767				"2559 Block sgl registration required DMA "
  17768				"size (%d) great than a page\n", reqlen);
  17769		return -ENOMEM;
  17770	}
  17771
  17772	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
  17773	if (!mbox)
  17774		return -ENOMEM;
  17775
  17776	/* Allocate DMA memory and set up the non-embedded mailbox command */
  17777	alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
  17778			 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, reqlen,
  17779			 LPFC_SLI4_MBX_NEMBED);
  17780
  17781	if (alloclen < reqlen) {
  17782		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  17783				"0285 Allocated DMA memory size (%d) is "
  17784				"less than the requested DMA memory "
  17785				"size (%d)\n", alloclen, reqlen);
  17786		lpfc_sli4_mbox_cmd_free(phba, mbox);
  17787		return -ENOMEM;
  17788	}
  17789	/* Set up the SGL pages in the non-embedded DMA pages */
  17790	viraddr = mbox->sge_array->addr[0];
  17791	sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
  17792	sgl_pg_pairs = &sgl->sgl_pg_pairs;
  17793
  17794	pg_pairs = 0;
  17795	list_for_each_entry_safe(sglq_entry, sglq_next, post_sgl_list, list) {
  17796		/* Set up the sge entry */
  17797		sgl_pg_pairs->sgl_pg0_addr_lo =
  17798				cpu_to_le32(putPaddrLow(sglq_entry->phys));
  17799		sgl_pg_pairs->sgl_pg0_addr_hi =
  17800				cpu_to_le32(putPaddrHigh(sglq_entry->phys));
  17801		sgl_pg_pairs->sgl_pg1_addr_lo =
  17802				cpu_to_le32(putPaddrLow(0));
  17803		sgl_pg_pairs->sgl_pg1_addr_hi =
  17804				cpu_to_le32(putPaddrHigh(0));
  17805
  17806		/* Keep the first xritag on the list */
  17807		if (pg_pairs == 0)
  17808			xritag_start = sglq_entry->sli4_xritag;
  17809		sgl_pg_pairs++;
  17810		pg_pairs++;
  17811	}
  17812
  17813	/* Complete initialization and perform endian conversion. */
  17814	bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
  17815	bf_set(lpfc_post_sgl_pages_xricnt, sgl, post_cnt);
  17816	sgl->word0 = cpu_to_le32(sgl->word0);
  17817
  17818	if (!phba->sli4_hba.intr_enable)
  17819		rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
  17820	else {
  17821		mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
  17822		rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
  17823	}
  17824	shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr;
  17825	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
  17826	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
  17827	if (!phba->sli4_hba.intr_enable)
  17828		lpfc_sli4_mbox_cmd_free(phba, mbox);
  17829	else if (rc != MBX_TIMEOUT)
  17830		lpfc_sli4_mbox_cmd_free(phba, mbox);
  17831	if (shdr_status || shdr_add_status || rc) {
  17832		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  17833				"2513 POST_SGL_BLOCK mailbox command failed "
  17834				"status x%x add_status x%x mbx status x%x\n",
  17835				shdr_status, shdr_add_status, rc);
  17836		rc = -ENXIO;
  17837	}
  17838	return rc;
  17839}
  17840
  17841/**
  17842 * lpfc_sli4_post_io_sgl_block - post a block of nvme sgl list to firmware
  17843 * @phba: pointer to lpfc hba data structure.
  17844 * @nblist: pointer to nvme buffer list.
  17845 * @count: number of scsi buffers on the list.
  17846 *
  17847 * This routine is invoked to post a block of @count scsi sgl pages from a
  17848 * SCSI buffer list @nblist to the HBA using non-embedded mailbox command.
  17849 * No Lock is held.
  17850 *
  17851 **/
  17852static int
  17853lpfc_sli4_post_io_sgl_block(struct lpfc_hba *phba, struct list_head *nblist,
  17854			    int count)
  17855{
  17856	struct lpfc_io_buf *lpfc_ncmd;
  17857	struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
  17858	struct sgl_page_pairs *sgl_pg_pairs;
  17859	void *viraddr;
  17860	LPFC_MBOXQ_t *mbox;
  17861	uint32_t reqlen, alloclen, pg_pairs;
  17862	uint32_t mbox_tmo;
  17863	uint16_t xritag_start = 0;
  17864	int rc = 0;
  17865	uint32_t shdr_status, shdr_add_status;
  17866	dma_addr_t pdma_phys_bpl1;
  17867	union lpfc_sli4_cfg_shdr *shdr;
  17868
  17869	/* Calculate the requested length of the dma memory */
  17870	reqlen = count * sizeof(struct sgl_page_pairs) +
  17871		 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
  17872	if (reqlen > SLI4_PAGE_SIZE) {
  17873		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
  17874				"6118 Block sgl registration required DMA "
  17875				"size (%d) great than a page\n", reqlen);
  17876		return -ENOMEM;
  17877	}
  17878	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
  17879	if (!mbox) {
  17880		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  17881				"6119 Failed to allocate mbox cmd memory\n");
  17882		return -ENOMEM;
  17883	}
  17884
  17885	/* Allocate DMA memory and set up the non-embedded mailbox command */
  17886	alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
  17887				    LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES,
  17888				    reqlen, LPFC_SLI4_MBX_NEMBED);
  17889
  17890	if (alloclen < reqlen) {
  17891		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  17892				"6120 Allocated DMA memory size (%d) is "
  17893				"less than the requested DMA memory "
  17894				"size (%d)\n", alloclen, reqlen);
  17895		lpfc_sli4_mbox_cmd_free(phba, mbox);
  17896		return -ENOMEM;
  17897	}
  17898
  17899	/* Get the first SGE entry from the non-embedded DMA memory */
  17900	viraddr = mbox->sge_array->addr[0];
  17901
  17902	/* Set up the SGL pages in the non-embedded DMA pages */
  17903	sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
  17904	sgl_pg_pairs = &sgl->sgl_pg_pairs;
  17905
  17906	pg_pairs = 0;
  17907	list_for_each_entry(lpfc_ncmd, nblist, list) {
  17908		/* Set up the sge entry */
  17909		sgl_pg_pairs->sgl_pg0_addr_lo =
  17910			cpu_to_le32(putPaddrLow(lpfc_ncmd->dma_phys_sgl));
  17911		sgl_pg_pairs->sgl_pg0_addr_hi =
  17912			cpu_to_le32(putPaddrHigh(lpfc_ncmd->dma_phys_sgl));
  17913		if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE)
  17914			pdma_phys_bpl1 = lpfc_ncmd->dma_phys_sgl +
  17915						SGL_PAGE_SIZE;
  17916		else
  17917			pdma_phys_bpl1 = 0;
  17918		sgl_pg_pairs->sgl_pg1_addr_lo =
  17919			cpu_to_le32(putPaddrLow(pdma_phys_bpl1));
  17920		sgl_pg_pairs->sgl_pg1_addr_hi =
  17921			cpu_to_le32(putPaddrHigh(pdma_phys_bpl1));
  17922		/* Keep the first xritag on the list */
  17923		if (pg_pairs == 0)
  17924			xritag_start = lpfc_ncmd->cur_iocbq.sli4_xritag;
  17925		sgl_pg_pairs++;
  17926		pg_pairs++;
  17927	}
  17928	bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
  17929	bf_set(lpfc_post_sgl_pages_xricnt, sgl, pg_pairs);
  17930	/* Perform endian conversion if necessary */
  17931	sgl->word0 = cpu_to_le32(sgl->word0);
  17932
  17933	if (!phba->sli4_hba.intr_enable) {
  17934		rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
  17935	} else {
  17936		mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
  17937		rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
  17938	}
  17939	shdr = (union lpfc_sli4_cfg_shdr *)&sgl->cfg_shdr;
  17940	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
  17941	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
  17942	if (!phba->sli4_hba.intr_enable)
  17943		lpfc_sli4_mbox_cmd_free(phba, mbox);
  17944	else if (rc != MBX_TIMEOUT)
  17945		lpfc_sli4_mbox_cmd_free(phba, mbox);
  17946	if (shdr_status || shdr_add_status || rc) {
  17947		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  17948				"6125 POST_SGL_BLOCK mailbox command failed "
  17949				"status x%x add_status x%x mbx status x%x\n",
  17950				shdr_status, shdr_add_status, rc);
  17951		rc = -ENXIO;
  17952	}
  17953	return rc;
  17954}
  17955
  17956/**
  17957 * lpfc_sli4_post_io_sgl_list - Post blocks of nvme buffer sgls from a list
  17958 * @phba: pointer to lpfc hba data structure.
  17959 * @post_nblist: pointer to the nvme buffer list.
  17960 * @sb_count: number of nvme buffers.
  17961 *
  17962 * This routine walks a list of nvme buffers that was passed in. It attempts
  17963 * to construct blocks of nvme buffer sgls which contains contiguous xris and
  17964 * uses the non-embedded SGL block post mailbox commands to post to the port.
  17965 * For single NVME buffer sgl with non-contiguous xri, if any, it shall use
  17966 * embedded SGL post mailbox command for posting. The @post_nblist passed in
  17967 * must be local list, thus no lock is needed when manipulate the list.
  17968 *
  17969 * Returns: 0 = failure, non-zero number of successfully posted buffers.
  17970 **/
  17971int
  17972lpfc_sli4_post_io_sgl_list(struct lpfc_hba *phba,
  17973			   struct list_head *post_nblist, int sb_count)
  17974{
  17975	struct lpfc_io_buf *lpfc_ncmd, *lpfc_ncmd_next;
  17976	int status, sgl_size;
  17977	int post_cnt = 0, block_cnt = 0, num_posting = 0, num_posted = 0;
  17978	dma_addr_t pdma_phys_sgl1;
  17979	int last_xritag = NO_XRI;
  17980	int cur_xritag;
  17981	LIST_HEAD(prep_nblist);
  17982	LIST_HEAD(blck_nblist);
  17983	LIST_HEAD(nvme_nblist);
  17984
  17985	/* sanity check */
  17986	if (sb_count <= 0)
  17987		return -EINVAL;
  17988
  17989	sgl_size = phba->cfg_sg_dma_buf_size;
  17990	list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, post_nblist, list) {
  17991		list_del_init(&lpfc_ncmd->list);
  17992		block_cnt++;
  17993		if ((last_xritag != NO_XRI) &&
  17994		    (lpfc_ncmd->cur_iocbq.sli4_xritag != last_xritag + 1)) {
  17995			/* a hole in xri block, form a sgl posting block */
  17996			list_splice_init(&prep_nblist, &blck_nblist);
  17997			post_cnt = block_cnt - 1;
  17998			/* prepare list for next posting block */
  17999			list_add_tail(&lpfc_ncmd->list, &prep_nblist);
  18000			block_cnt = 1;
  18001		} else {
  18002			/* prepare list for next posting block */
  18003			list_add_tail(&lpfc_ncmd->list, &prep_nblist);
  18004			/* enough sgls for non-embed sgl mbox command */
  18005			if (block_cnt == LPFC_NEMBED_MBOX_SGL_CNT) {
  18006				list_splice_init(&prep_nblist, &blck_nblist);
  18007				post_cnt = block_cnt;
  18008				block_cnt = 0;
  18009			}
  18010		}
  18011		num_posting++;
  18012		last_xritag = lpfc_ncmd->cur_iocbq.sli4_xritag;
  18013
  18014		/* end of repost sgl list condition for NVME buffers */
  18015		if (num_posting == sb_count) {
  18016			if (post_cnt == 0) {
  18017				/* last sgl posting block */
  18018				list_splice_init(&prep_nblist, &blck_nblist);
  18019				post_cnt = block_cnt;
  18020			} else if (block_cnt == 1) {
  18021				/* last single sgl with non-contiguous xri */
  18022				if (sgl_size > SGL_PAGE_SIZE)
  18023					pdma_phys_sgl1 =
  18024						lpfc_ncmd->dma_phys_sgl +
  18025						SGL_PAGE_SIZE;
  18026				else
  18027					pdma_phys_sgl1 = 0;
  18028				cur_xritag = lpfc_ncmd->cur_iocbq.sli4_xritag;
  18029				status = lpfc_sli4_post_sgl(
  18030						phba, lpfc_ncmd->dma_phys_sgl,
  18031						pdma_phys_sgl1, cur_xritag);
  18032				if (status) {
  18033					/* Post error.  Buffer unavailable. */
  18034					lpfc_ncmd->flags |=
  18035						LPFC_SBUF_NOT_POSTED;
  18036				} else {
  18037					/* Post success. Bffer available. */
  18038					lpfc_ncmd->flags &=
  18039						~LPFC_SBUF_NOT_POSTED;
  18040					lpfc_ncmd->status = IOSTAT_SUCCESS;
  18041					num_posted++;
  18042				}
  18043				/* success, put on NVME buffer sgl list */
  18044				list_add_tail(&lpfc_ncmd->list, &nvme_nblist);
  18045			}
  18046		}
  18047
  18048		/* continue until a nembed page worth of sgls */
  18049		if (post_cnt == 0)
  18050			continue;
  18051
  18052		/* post block of NVME buffer list sgls */
  18053		status = lpfc_sli4_post_io_sgl_block(phba, &blck_nblist,
  18054						     post_cnt);
  18055
  18056		/* don't reset xirtag due to hole in xri block */
  18057		if (block_cnt == 0)
  18058			last_xritag = NO_XRI;
  18059
  18060		/* reset NVME buffer post count for next round of posting */
  18061		post_cnt = 0;
  18062
  18063		/* put posted NVME buffer-sgl posted on NVME buffer sgl list */
  18064		while (!list_empty(&blck_nblist)) {
  18065			list_remove_head(&blck_nblist, lpfc_ncmd,
  18066					 struct lpfc_io_buf, list);
  18067			if (status) {
  18068				/* Post error.  Mark buffer unavailable. */
  18069				lpfc_ncmd->flags |= LPFC_SBUF_NOT_POSTED;
  18070			} else {
  18071				/* Post success, Mark buffer available. */
  18072				lpfc_ncmd->flags &= ~LPFC_SBUF_NOT_POSTED;
  18073				lpfc_ncmd->status = IOSTAT_SUCCESS;
  18074				num_posted++;
  18075			}
  18076			list_add_tail(&lpfc_ncmd->list, &nvme_nblist);
  18077		}
  18078	}
  18079	/* Push NVME buffers with sgl posted to the available list */
  18080	lpfc_io_buf_replenish(phba, &nvme_nblist);
  18081
  18082	return num_posted;
  18083}
  18084
  18085/**
  18086 * lpfc_fc_frame_check - Check that this frame is a valid frame to handle
  18087 * @phba: pointer to lpfc_hba struct that the frame was received on
  18088 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
  18089 *
  18090 * This function checks the fields in the @fc_hdr to see if the FC frame is a
  18091 * valid type of frame that the LPFC driver will handle. This function will
  18092 * return a zero if the frame is a valid frame or a non zero value when the
  18093 * frame does not pass the check.
  18094 **/
  18095static int
  18096lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr)
  18097{
  18098	/*  make rctl_names static to save stack space */
  18099	struct fc_vft_header *fc_vft_hdr;
  18100	uint32_t *header = (uint32_t *) fc_hdr;
  18101
  18102#define FC_RCTL_MDS_DIAGS	0xF4
  18103
  18104	switch (fc_hdr->fh_r_ctl) {
  18105	case FC_RCTL_DD_UNCAT:		/* uncategorized information */
  18106	case FC_RCTL_DD_SOL_DATA:	/* solicited data */
  18107	case FC_RCTL_DD_UNSOL_CTL:	/* unsolicited control */
  18108	case FC_RCTL_DD_SOL_CTL:	/* solicited control or reply */
  18109	case FC_RCTL_DD_UNSOL_DATA:	/* unsolicited data */
  18110	case FC_RCTL_DD_DATA_DESC:	/* data descriptor */
  18111	case FC_RCTL_DD_UNSOL_CMD:	/* unsolicited command */
  18112	case FC_RCTL_DD_CMD_STATUS:	/* command status */
  18113	case FC_RCTL_ELS_REQ:	/* extended link services request */
  18114	case FC_RCTL_ELS_REP:	/* extended link services reply */
  18115	case FC_RCTL_ELS4_REQ:	/* FC-4 ELS request */
  18116	case FC_RCTL_ELS4_REP:	/* FC-4 ELS reply */
  18117	case FC_RCTL_BA_ABTS: 	/* basic link service abort */
  18118	case FC_RCTL_BA_RMC: 	/* remove connection */
  18119	case FC_RCTL_BA_ACC:	/* basic accept */
  18120	case FC_RCTL_BA_RJT:	/* basic reject */
  18121	case FC_RCTL_BA_PRMT:
  18122	case FC_RCTL_ACK_1:	/* acknowledge_1 */
  18123	case FC_RCTL_ACK_0:	/* acknowledge_0 */
  18124	case FC_RCTL_P_RJT:	/* port reject */
  18125	case FC_RCTL_F_RJT:	/* fabric reject */
  18126	case FC_RCTL_P_BSY:	/* port busy */
  18127	case FC_RCTL_F_BSY:	/* fabric busy to data frame */
  18128	case FC_RCTL_F_BSYL:	/* fabric busy to link control frame */
  18129	case FC_RCTL_LCR:	/* link credit reset */
  18130	case FC_RCTL_MDS_DIAGS: /* MDS Diagnostics */
  18131	case FC_RCTL_END:	/* end */
  18132		break;
  18133	case FC_RCTL_VFTH:	/* Virtual Fabric tagging Header */
  18134		fc_vft_hdr = (struct fc_vft_header *)fc_hdr;
  18135		fc_hdr = &((struct fc_frame_header *)fc_vft_hdr)[1];
  18136		return lpfc_fc_frame_check(phba, fc_hdr);
  18137	case FC_RCTL_BA_NOP:	/* basic link service NOP */
  18138	default:
  18139		goto drop;
  18140	}
  18141
  18142	switch (fc_hdr->fh_type) {
  18143	case FC_TYPE_BLS:
  18144	case FC_TYPE_ELS:
  18145	case FC_TYPE_FCP:
  18146	case FC_TYPE_CT:
  18147	case FC_TYPE_NVME:
  18148		break;
  18149	case FC_TYPE_IP:
  18150	case FC_TYPE_ILS:
  18151	default:
  18152		goto drop;
  18153	}
  18154
  18155	lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
  18156			"2538 Received frame rctl:x%x, type:x%x, "
  18157			"frame Data:%08x %08x %08x %08x %08x %08x %08x\n",
  18158			fc_hdr->fh_r_ctl, fc_hdr->fh_type,
  18159			be32_to_cpu(header[0]), be32_to_cpu(header[1]),
  18160			be32_to_cpu(header[2]), be32_to_cpu(header[3]),
  18161			be32_to_cpu(header[4]), be32_to_cpu(header[5]),
  18162			be32_to_cpu(header[6]));
  18163	return 0;
  18164drop:
  18165	lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
  18166			"2539 Dropped frame rctl:x%x type:x%x\n",
  18167			fc_hdr->fh_r_ctl, fc_hdr->fh_type);
  18168	return 1;
  18169}
  18170
  18171/**
  18172 * lpfc_fc_hdr_get_vfi - Get the VFI from an FC frame
  18173 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
  18174 *
  18175 * This function processes the FC header to retrieve the VFI from the VF
  18176 * header, if one exists. This function will return the VFI if one exists
  18177 * or 0 if no VSAN Header exists.
  18178 **/
  18179static uint32_t
  18180lpfc_fc_hdr_get_vfi(struct fc_frame_header *fc_hdr)
  18181{
  18182	struct fc_vft_header *fc_vft_hdr = (struct fc_vft_header *)fc_hdr;
  18183
  18184	if (fc_hdr->fh_r_ctl != FC_RCTL_VFTH)
  18185		return 0;
  18186	return bf_get(fc_vft_hdr_vf_id, fc_vft_hdr);
  18187}
  18188
  18189/**
  18190 * lpfc_fc_frame_to_vport - Finds the vport that a frame is destined to
  18191 * @phba: Pointer to the HBA structure to search for the vport on
  18192 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
  18193 * @fcfi: The FC Fabric ID that the frame came from
  18194 * @did: Destination ID to match against
  18195 *
  18196 * This function searches the @phba for a vport that matches the content of the
  18197 * @fc_hdr passed in and the @fcfi. This function uses the @fc_hdr to fetch the
  18198 * VFI, if the Virtual Fabric Tagging Header exists, and the DID. This function
  18199 * returns the matching vport pointer or NULL if unable to match frame to a
  18200 * vport.
  18201 **/
  18202static struct lpfc_vport *
  18203lpfc_fc_frame_to_vport(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr,
  18204		       uint16_t fcfi, uint32_t did)
  18205{
  18206	struct lpfc_vport **vports;
  18207	struct lpfc_vport *vport = NULL;
  18208	int i;
  18209
  18210	if (did == Fabric_DID)
  18211		return phba->pport;
  18212	if ((phba->pport->fc_flag & FC_PT2PT) &&
  18213		!(phba->link_state == LPFC_HBA_READY))
  18214		return phba->pport;
  18215
  18216	vports = lpfc_create_vport_work_array(phba);
  18217	if (vports != NULL) {
  18218		for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
  18219			if (phba->fcf.fcfi == fcfi &&
  18220			    vports[i]->vfi == lpfc_fc_hdr_get_vfi(fc_hdr) &&
  18221			    vports[i]->fc_myDID == did) {
  18222				vport = vports[i];
  18223				break;
  18224			}
  18225		}
  18226	}
  18227	lpfc_destroy_vport_work_array(phba, vports);
  18228	return vport;
  18229}
  18230
  18231/**
  18232 * lpfc_update_rcv_time_stamp - Update vport's rcv seq time stamp
  18233 * @vport: The vport to work on.
  18234 *
  18235 * This function updates the receive sequence time stamp for this vport. The
  18236 * receive sequence time stamp indicates the time that the last frame of the
  18237 * the sequence that has been idle for the longest amount of time was received.
  18238 * the driver uses this time stamp to indicate if any received sequences have
  18239 * timed out.
  18240 **/
  18241static void
  18242lpfc_update_rcv_time_stamp(struct lpfc_vport *vport)
  18243{
  18244	struct lpfc_dmabuf *h_buf;
  18245	struct hbq_dmabuf *dmabuf = NULL;
  18246
  18247	/* get the oldest sequence on the rcv list */
  18248	h_buf = list_get_first(&vport->rcv_buffer_list,
  18249			       struct lpfc_dmabuf, list);
  18250	if (!h_buf)
  18251		return;
  18252	dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
  18253	vport->rcv_buffer_time_stamp = dmabuf->time_stamp;
  18254}
  18255
  18256/**
  18257 * lpfc_cleanup_rcv_buffers - Cleans up all outstanding receive sequences.
  18258 * @vport: The vport that the received sequences were sent to.
  18259 *
  18260 * This function cleans up all outstanding received sequences. This is called
  18261 * by the driver when a link event or user action invalidates all the received
  18262 * sequences.
  18263 **/
  18264void
  18265lpfc_cleanup_rcv_buffers(struct lpfc_vport *vport)
  18266{
  18267	struct lpfc_dmabuf *h_buf, *hnext;
  18268	struct lpfc_dmabuf *d_buf, *dnext;
  18269	struct hbq_dmabuf *dmabuf = NULL;
  18270
  18271	/* start with the oldest sequence on the rcv list */
  18272	list_for_each_entry_safe(h_buf, hnext, &vport->rcv_buffer_list, list) {
  18273		dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
  18274		list_del_init(&dmabuf->hbuf.list);
  18275		list_for_each_entry_safe(d_buf, dnext,
  18276					 &dmabuf->dbuf.list, list) {
  18277			list_del_init(&d_buf->list);
  18278			lpfc_in_buf_free(vport->phba, d_buf);
  18279		}
  18280		lpfc_in_buf_free(vport->phba, &dmabuf->dbuf);
  18281	}
  18282}
  18283
  18284/**
  18285 * lpfc_rcv_seq_check_edtov - Cleans up timed out receive sequences.
  18286 * @vport: The vport that the received sequences were sent to.
  18287 *
  18288 * This function determines whether any received sequences have timed out by
  18289 * first checking the vport's rcv_buffer_time_stamp. If this time_stamp
  18290 * indicates that there is at least one timed out sequence this routine will
  18291 * go through the received sequences one at a time from most inactive to most
  18292 * active to determine which ones need to be cleaned up. Once it has determined
  18293 * that a sequence needs to be cleaned up it will simply free up the resources
  18294 * without sending an abort.
  18295 **/
  18296void
  18297lpfc_rcv_seq_check_edtov(struct lpfc_vport *vport)
  18298{
  18299	struct lpfc_dmabuf *h_buf, *hnext;
  18300	struct lpfc_dmabuf *d_buf, *dnext;
  18301	struct hbq_dmabuf *dmabuf = NULL;
  18302	unsigned long timeout;
  18303	int abort_count = 0;
  18304
  18305	timeout = (msecs_to_jiffies(vport->phba->fc_edtov) +
  18306		   vport->rcv_buffer_time_stamp);
  18307	if (list_empty(&vport->rcv_buffer_list) ||
  18308	    time_before(jiffies, timeout))
  18309		return;
  18310	/* start with the oldest sequence on the rcv list */
  18311	list_for_each_entry_safe(h_buf, hnext, &vport->rcv_buffer_list, list) {
  18312		dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
  18313		timeout = (msecs_to_jiffies(vport->phba->fc_edtov) +
  18314			   dmabuf->time_stamp);
  18315		if (time_before(jiffies, timeout))
  18316			break;
  18317		abort_count++;
  18318		list_del_init(&dmabuf->hbuf.list);
  18319		list_for_each_entry_safe(d_buf, dnext,
  18320					 &dmabuf->dbuf.list, list) {
  18321			list_del_init(&d_buf->list);
  18322			lpfc_in_buf_free(vport->phba, d_buf);
  18323		}
  18324		lpfc_in_buf_free(vport->phba, &dmabuf->dbuf);
  18325	}
  18326	if (abort_count)
  18327		lpfc_update_rcv_time_stamp(vport);
  18328}
  18329
  18330/**
  18331 * lpfc_fc_frame_add - Adds a frame to the vport's list of received sequences
  18332 * @vport: pointer to a vitural port
  18333 * @dmabuf: pointer to a dmabuf that describes the hdr and data of the FC frame
  18334 *
  18335 * This function searches through the existing incomplete sequences that have
  18336 * been sent to this @vport. If the frame matches one of the incomplete
  18337 * sequences then the dbuf in the @dmabuf is added to the list of frames that
  18338 * make up that sequence. If no sequence is found that matches this frame then
  18339 * the function will add the hbuf in the @dmabuf to the @vport's rcv_buffer_list
  18340 * This function returns a pointer to the first dmabuf in the sequence list that
  18341 * the frame was linked to.
  18342 **/
  18343static struct hbq_dmabuf *
  18344lpfc_fc_frame_add(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf)
  18345{
  18346	struct fc_frame_header *new_hdr;
  18347	struct fc_frame_header *temp_hdr;
  18348	struct lpfc_dmabuf *d_buf;
  18349	struct lpfc_dmabuf *h_buf;
  18350	struct hbq_dmabuf *seq_dmabuf = NULL;
  18351	struct hbq_dmabuf *temp_dmabuf = NULL;
  18352	uint8_t	found = 0;
  18353
  18354	INIT_LIST_HEAD(&dmabuf->dbuf.list);
  18355	dmabuf->time_stamp = jiffies;
  18356	new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
  18357
  18358	/* Use the hdr_buf to find the sequence that this frame belongs to */
  18359	list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) {
  18360		temp_hdr = (struct fc_frame_header *)h_buf->virt;
  18361		if ((temp_hdr->fh_seq_id != new_hdr->fh_seq_id) ||
  18362		    (temp_hdr->fh_ox_id != new_hdr->fh_ox_id) ||
  18363		    (memcmp(&temp_hdr->fh_s_id, &new_hdr->fh_s_id, 3)))
  18364			continue;
  18365		/* found a pending sequence that matches this frame */
  18366		seq_dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
  18367		break;
  18368	}
  18369	if (!seq_dmabuf) {
  18370		/*
  18371		 * This indicates first frame received for this sequence.
  18372		 * Queue the buffer on the vport's rcv_buffer_list.
  18373		 */
  18374		list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list);
  18375		lpfc_update_rcv_time_stamp(vport);
  18376		return dmabuf;
  18377	}
  18378	temp_hdr = seq_dmabuf->hbuf.virt;
  18379	if (be16_to_cpu(new_hdr->fh_seq_cnt) <
  18380		be16_to_cpu(temp_hdr->fh_seq_cnt)) {
  18381		list_del_init(&seq_dmabuf->hbuf.list);
  18382		list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list);
  18383		list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list);
  18384		lpfc_update_rcv_time_stamp(vport);
  18385		return dmabuf;
  18386	}
  18387	/* move this sequence to the tail to indicate a young sequence */
  18388	list_move_tail(&seq_dmabuf->hbuf.list, &vport->rcv_buffer_list);
  18389	seq_dmabuf->time_stamp = jiffies;
  18390	lpfc_update_rcv_time_stamp(vport);
  18391	if (list_empty(&seq_dmabuf->dbuf.list)) {
  18392		list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list);
  18393		return seq_dmabuf;
  18394	}
  18395	/* find the correct place in the sequence to insert this frame */
  18396	d_buf = list_entry(seq_dmabuf->dbuf.list.prev, typeof(*d_buf), list);
  18397	while (!found) {
  18398		temp_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
  18399		temp_hdr = (struct fc_frame_header *)temp_dmabuf->hbuf.virt;
  18400		/*
  18401		 * If the frame's sequence count is greater than the frame on
  18402		 * the list then insert the frame right after this frame
  18403		 */
  18404		if (be16_to_cpu(new_hdr->fh_seq_cnt) >
  18405			be16_to_cpu(temp_hdr->fh_seq_cnt)) {
  18406			list_add(&dmabuf->dbuf.list, &temp_dmabuf->dbuf.list);
  18407			found = 1;
  18408			break;
  18409		}
  18410
  18411		if (&d_buf->list == &seq_dmabuf->dbuf.list)
  18412			break;
  18413		d_buf = list_entry(d_buf->list.prev, typeof(*d_buf), list);
  18414	}
  18415
  18416	if (found)
  18417		return seq_dmabuf;
  18418	return NULL;
  18419}
  18420
  18421/**
  18422 * lpfc_sli4_abort_partial_seq - Abort partially assembled unsol sequence
  18423 * @vport: pointer to a vitural port
  18424 * @dmabuf: pointer to a dmabuf that describes the FC sequence
  18425 *
  18426 * This function tries to abort from the partially assembed sequence, described
  18427 * by the information from basic abbort @dmabuf. It checks to see whether such
  18428 * partially assembled sequence held by the driver. If so, it shall free up all
  18429 * the frames from the partially assembled sequence.
  18430 *
  18431 * Return
  18432 * true  -- if there is matching partially assembled sequence present and all
  18433 *          the frames freed with the sequence;
  18434 * false -- if there is no matching partially assembled sequence present so
  18435 *          nothing got aborted in the lower layer driver
  18436 **/
  18437static bool
  18438lpfc_sli4_abort_partial_seq(struct lpfc_vport *vport,
  18439			    struct hbq_dmabuf *dmabuf)
  18440{
  18441	struct fc_frame_header *new_hdr;
  18442	struct fc_frame_header *temp_hdr;
  18443	struct lpfc_dmabuf *d_buf, *n_buf, *h_buf;
  18444	struct hbq_dmabuf *seq_dmabuf = NULL;
  18445
  18446	/* Use the hdr_buf to find the sequence that matches this frame */
  18447	INIT_LIST_HEAD(&dmabuf->dbuf.list);
  18448	INIT_LIST_HEAD(&dmabuf->hbuf.list);
  18449	new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
  18450	list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) {
  18451		temp_hdr = (struct fc_frame_header *)h_buf->virt;
  18452		if ((temp_hdr->fh_seq_id != new_hdr->fh_seq_id) ||
  18453		    (temp_hdr->fh_ox_id != new_hdr->fh_ox_id) ||
  18454		    (memcmp(&temp_hdr->fh_s_id, &new_hdr->fh_s_id, 3)))
  18455			continue;
  18456		/* found a pending sequence that matches this frame */
  18457		seq_dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
  18458		break;
  18459	}
  18460
  18461	/* Free up all the frames from the partially assembled sequence */
  18462	if (seq_dmabuf) {
  18463		list_for_each_entry_safe(d_buf, n_buf,
  18464					 &seq_dmabuf->dbuf.list, list) {
  18465			list_del_init(&d_buf->list);
  18466			lpfc_in_buf_free(vport->phba, d_buf);
  18467		}
  18468		return true;
  18469	}
  18470	return false;
  18471}
  18472
  18473/**
  18474 * lpfc_sli4_abort_ulp_seq - Abort assembled unsol sequence from ulp
  18475 * @vport: pointer to a vitural port
  18476 * @dmabuf: pointer to a dmabuf that describes the FC sequence
  18477 *
  18478 * This function tries to abort from the assembed sequence from upper level
  18479 * protocol, described by the information from basic abbort @dmabuf. It
  18480 * checks to see whether such pending context exists at upper level protocol.
  18481 * If so, it shall clean up the pending context.
  18482 *
  18483 * Return
  18484 * true  -- if there is matching pending context of the sequence cleaned
  18485 *          at ulp;
  18486 * false -- if there is no matching pending context of the sequence present
  18487 *          at ulp.
  18488 **/
  18489static bool
  18490lpfc_sli4_abort_ulp_seq(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf)
  18491{
  18492	struct lpfc_hba *phba = vport->phba;
  18493	int handled;
  18494
  18495	/* Accepting abort at ulp with SLI4 only */
  18496	if (phba->sli_rev < LPFC_SLI_REV4)
  18497		return false;
  18498
  18499	/* Register all caring upper level protocols to attend abort */
  18500	handled = lpfc_ct_handle_unsol_abort(phba, dmabuf);
  18501	if (handled)
  18502		return true;
  18503
  18504	return false;
  18505}
  18506
  18507/**
  18508 * lpfc_sli4_seq_abort_rsp_cmpl - BLS ABORT RSP seq abort iocb complete handler
  18509 * @phba: Pointer to HBA context object.
  18510 * @cmd_iocbq: pointer to the command iocbq structure.
  18511 * @rsp_iocbq: pointer to the response iocbq structure.
  18512 *
  18513 * This function handles the sequence abort response iocb command complete
  18514 * event. It properly releases the memory allocated to the sequence abort
  18515 * accept iocb.
  18516 **/
  18517static void
  18518lpfc_sli4_seq_abort_rsp_cmpl(struct lpfc_hba *phba,
  18519			     struct lpfc_iocbq *cmd_iocbq,
  18520			     struct lpfc_iocbq *rsp_iocbq)
  18521{
  18522	if (cmd_iocbq) {
  18523		lpfc_nlp_put(cmd_iocbq->ndlp);
  18524		lpfc_sli_release_iocbq(phba, cmd_iocbq);
  18525	}
  18526
  18527	/* Failure means BLS ABORT RSP did not get delivered to remote node*/
  18528	if (rsp_iocbq && rsp_iocbq->iocb.ulpStatus)
  18529		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  18530			"3154 BLS ABORT RSP failed, data:  x%x/x%x\n",
  18531			get_job_ulpstatus(phba, rsp_iocbq),
  18532			get_job_word4(phba, rsp_iocbq));
  18533}
  18534
  18535/**
  18536 * lpfc_sli4_xri_inrange - check xri is in range of xris owned by driver.
  18537 * @phba: Pointer to HBA context object.
  18538 * @xri: xri id in transaction.
  18539 *
  18540 * This function validates the xri maps to the known range of XRIs allocated an
  18541 * used by the driver.
  18542 **/
  18543uint16_t
  18544lpfc_sli4_xri_inrange(struct lpfc_hba *phba,
  18545		      uint16_t xri)
  18546{
  18547	uint16_t i;
  18548
  18549	for (i = 0; i < phba->sli4_hba.max_cfg_param.max_xri; i++) {
  18550		if (xri == phba->sli4_hba.xri_ids[i])
  18551			return i;
  18552	}
  18553	return NO_XRI;
  18554}
  18555
  18556/**
  18557 * lpfc_sli4_seq_abort_rsp - bls rsp to sequence abort
  18558 * @vport: pointer to a virtual port.
  18559 * @fc_hdr: pointer to a FC frame header.
  18560 * @aborted: was the partially assembled receive sequence successfully aborted
  18561 *
  18562 * This function sends a basic response to a previous unsol sequence abort
  18563 * event after aborting the sequence handling.
  18564 **/
  18565void
  18566lpfc_sli4_seq_abort_rsp(struct lpfc_vport *vport,
  18567			struct fc_frame_header *fc_hdr, bool aborted)
  18568{
  18569	struct lpfc_hba *phba = vport->phba;
  18570	struct lpfc_iocbq *ctiocb = NULL;
  18571	struct lpfc_nodelist *ndlp;
  18572	uint16_t oxid, rxid, xri, lxri;
  18573	uint32_t sid, fctl;
  18574	union lpfc_wqe128 *icmd;
  18575	int rc;
  18576
  18577	if (!lpfc_is_link_up(phba))
  18578		return;
  18579
  18580	sid = sli4_sid_from_fc_hdr(fc_hdr);
  18581	oxid = be16_to_cpu(fc_hdr->fh_ox_id);
  18582	rxid = be16_to_cpu(fc_hdr->fh_rx_id);
  18583
  18584	ndlp = lpfc_findnode_did(vport, sid);
  18585	if (!ndlp) {
  18586		ndlp = lpfc_nlp_init(vport, sid);
  18587		if (!ndlp) {
  18588			lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS,
  18589					 "1268 Failed to allocate ndlp for "
  18590					 "oxid:x%x SID:x%x\n", oxid, sid);
  18591			return;
  18592		}
  18593		/* Put ndlp onto pport node list */
  18594		lpfc_enqueue_node(vport, ndlp);
  18595	}
  18596
  18597	/* Allocate buffer for rsp iocb */
  18598	ctiocb = lpfc_sli_get_iocbq(phba);
  18599	if (!ctiocb)
  18600		return;
  18601
  18602	icmd = &ctiocb->wqe;
  18603
  18604	/* Extract the F_CTL field from FC_HDR */
  18605	fctl = sli4_fctl_from_fc_hdr(fc_hdr);
  18606
  18607	ctiocb->ndlp = lpfc_nlp_get(ndlp);
  18608	if (!ctiocb->ndlp) {
  18609		lpfc_sli_release_iocbq(phba, ctiocb);
  18610		return;
  18611	}
  18612
  18613	ctiocb->vport = phba->pport;
  18614	ctiocb->cmd_cmpl = lpfc_sli4_seq_abort_rsp_cmpl;
  18615	ctiocb->sli4_lxritag = NO_XRI;
  18616	ctiocb->sli4_xritag = NO_XRI;
  18617	ctiocb->abort_rctl = FC_RCTL_BA_ACC;
  18618
  18619	if (fctl & FC_FC_EX_CTX)
  18620		/* Exchange responder sent the abort so we
  18621		 * own the oxid.
  18622		 */
  18623		xri = oxid;
  18624	else
  18625		xri = rxid;
  18626	lxri = lpfc_sli4_xri_inrange(phba, xri);
  18627	if (lxri != NO_XRI)
  18628		lpfc_set_rrq_active(phba, ndlp, lxri,
  18629			(xri == oxid) ? rxid : oxid, 0);
  18630	/* For BA_ABTS from exchange responder, if the logical xri with
  18631	 * the oxid maps to the FCP XRI range, the port no longer has
  18632	 * that exchange context, send a BLS_RJT. Override the IOCB for
  18633	 * a BA_RJT.
  18634	 */
  18635	if ((fctl & FC_FC_EX_CTX) &&
  18636	    (lxri > lpfc_sli4_get_iocb_cnt(phba))) {
  18637		ctiocb->abort_rctl = FC_RCTL_BA_RJT;
  18638		bf_set(xmit_bls_rsp64_rjt_vspec, &icmd->xmit_bls_rsp, 0);
  18639		bf_set(xmit_bls_rsp64_rjt_expc, &icmd->xmit_bls_rsp,
  18640		       FC_BA_RJT_INV_XID);
  18641		bf_set(xmit_bls_rsp64_rjt_rsnc, &icmd->xmit_bls_rsp,
  18642		       FC_BA_RJT_UNABLE);
  18643	}
  18644
  18645	/* If BA_ABTS failed to abort a partially assembled receive sequence,
  18646	 * the driver no longer has that exchange, send a BLS_RJT. Override
  18647	 * the IOCB for a BA_RJT.
  18648	 */
  18649	if (aborted == false) {
  18650		ctiocb->abort_rctl = FC_RCTL_BA_RJT;
  18651		bf_set(xmit_bls_rsp64_rjt_vspec, &icmd->xmit_bls_rsp, 0);
  18652		bf_set(xmit_bls_rsp64_rjt_expc, &icmd->xmit_bls_rsp,
  18653		       FC_BA_RJT_INV_XID);
  18654		bf_set(xmit_bls_rsp64_rjt_rsnc, &icmd->xmit_bls_rsp,
  18655		       FC_BA_RJT_UNABLE);
  18656	}
  18657
  18658	if (fctl & FC_FC_EX_CTX) {
  18659		/* ABTS sent by responder to CT exchange, construction
  18660		 * of BA_ACC will use OX_ID from ABTS for the XRI_TAG
  18661		 * field and RX_ID from ABTS for RX_ID field.
  18662		 */
  18663		ctiocb->abort_bls = LPFC_ABTS_UNSOL_RSP;
  18664		bf_set(xmit_bls_rsp64_rxid, &icmd->xmit_bls_rsp, rxid);
  18665	} else {
  18666		/* ABTS sent by initiator to CT exchange, construction
  18667		 * of BA_ACC will need to allocate a new XRI as for the
  18668		 * XRI_TAG field.
  18669		 */
  18670		ctiocb->abort_bls = LPFC_ABTS_UNSOL_INT;
  18671	}
  18672
  18673	/* OX_ID is invariable to who sent ABTS to CT exchange */
  18674	bf_set(xmit_bls_rsp64_oxid, &icmd->xmit_bls_rsp, oxid);
  18675	bf_set(xmit_bls_rsp64_oxid, &icmd->xmit_bls_rsp, rxid);
  18676
  18677	/* Use CT=VPI */
  18678	bf_set(wqe_els_did, &icmd->xmit_bls_rsp.wqe_dest,
  18679	       ndlp->nlp_DID);
  18680	bf_set(xmit_bls_rsp64_temprpi, &icmd->xmit_bls_rsp,
  18681	       phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
  18682	bf_set(wqe_cmnd, &icmd->generic.wqe_com, CMD_XMIT_BLS_RSP64_CX);
  18683
  18684	/* Xmit CT abts response on exchange <xid> */
  18685	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
  18686			 "1200 Send BLS cmd x%x on oxid x%x Data: x%x\n",
  18687			 ctiocb->abort_rctl, oxid, phba->link_state);
  18688
  18689	rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0);
  18690	if (rc == IOCB_ERROR) {
  18691		lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
  18692				 "2925 Failed to issue CT ABTS RSP x%x on "
  18693				 "xri x%x, Data x%x\n",
  18694				 ctiocb->abort_rctl, oxid,
  18695				 phba->link_state);
  18696		lpfc_nlp_put(ndlp);
  18697		ctiocb->ndlp = NULL;
  18698		lpfc_sli_release_iocbq(phba, ctiocb);
  18699	}
  18700}
  18701
  18702/**
  18703 * lpfc_sli4_handle_unsol_abort - Handle sli-4 unsolicited abort event
  18704 * @vport: Pointer to the vport on which this sequence was received
  18705 * @dmabuf: pointer to a dmabuf that describes the FC sequence
  18706 *
  18707 * This function handles an SLI-4 unsolicited abort event. If the unsolicited
  18708 * receive sequence is only partially assembed by the driver, it shall abort
  18709 * the partially assembled frames for the sequence. Otherwise, if the
  18710 * unsolicited receive sequence has been completely assembled and passed to
  18711 * the Upper Layer Protocol (ULP), it then mark the per oxid status for the
  18712 * unsolicited sequence has been aborted. After that, it will issue a basic
  18713 * accept to accept the abort.
  18714 **/
  18715static void
  18716lpfc_sli4_handle_unsol_abort(struct lpfc_vport *vport,
  18717			     struct hbq_dmabuf *dmabuf)
  18718{
  18719	struct lpfc_hba *phba = vport->phba;
  18720	struct fc_frame_header fc_hdr;
  18721	uint32_t fctl;
  18722	bool aborted;
  18723
  18724	/* Make a copy of fc_hdr before the dmabuf being released */
  18725	memcpy(&fc_hdr, dmabuf->hbuf.virt, sizeof(struct fc_frame_header));
  18726	fctl = sli4_fctl_from_fc_hdr(&fc_hdr);
  18727
  18728	if (fctl & FC_FC_EX_CTX) {
  18729		/* ABTS by responder to exchange, no cleanup needed */
  18730		aborted = true;
  18731	} else {
  18732		/* ABTS by initiator to exchange, need to do cleanup */
  18733		aborted = lpfc_sli4_abort_partial_seq(vport, dmabuf);
  18734		if (aborted == false)
  18735			aborted = lpfc_sli4_abort_ulp_seq(vport, dmabuf);
  18736	}
  18737	lpfc_in_buf_free(phba, &dmabuf->dbuf);
  18738
  18739	if (phba->nvmet_support) {
  18740		lpfc_nvmet_rcv_unsol_abort(vport, &fc_hdr);
  18741		return;
  18742	}
  18743
  18744	/* Respond with BA_ACC or BA_RJT accordingly */
  18745	lpfc_sli4_seq_abort_rsp(vport, &fc_hdr, aborted);
  18746}
  18747
  18748/**
  18749 * lpfc_seq_complete - Indicates if a sequence is complete
  18750 * @dmabuf: pointer to a dmabuf that describes the FC sequence
  18751 *
  18752 * This function checks the sequence, starting with the frame described by
  18753 * @dmabuf, to see if all the frames associated with this sequence are present.
  18754 * the frames associated with this sequence are linked to the @dmabuf using the
  18755 * dbuf list. This function looks for two major things. 1) That the first frame
  18756 * has a sequence count of zero. 2) There is a frame with last frame of sequence
  18757 * set. 3) That there are no holes in the sequence count. The function will
  18758 * return 1 when the sequence is complete, otherwise it will return 0.
  18759 **/
  18760static int
  18761lpfc_seq_complete(struct hbq_dmabuf *dmabuf)
  18762{
  18763	struct fc_frame_header *hdr;
  18764	struct lpfc_dmabuf *d_buf;
  18765	struct hbq_dmabuf *seq_dmabuf;
  18766	uint32_t fctl;
  18767	int seq_count = 0;
  18768
  18769	hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
  18770	/* make sure first fame of sequence has a sequence count of zero */
  18771	if (hdr->fh_seq_cnt != seq_count)
  18772		return 0;
  18773	fctl = (hdr->fh_f_ctl[0] << 16 |
  18774		hdr->fh_f_ctl[1] << 8 |
  18775		hdr->fh_f_ctl[2]);
  18776	/* If last frame of sequence we can return success. */
  18777	if (fctl & FC_FC_END_SEQ)
  18778		return 1;
  18779	list_for_each_entry(d_buf, &dmabuf->dbuf.list, list) {
  18780		seq_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
  18781		hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
  18782		/* If there is a hole in the sequence count then fail. */
  18783		if (++seq_count != be16_to_cpu(hdr->fh_seq_cnt))
  18784			return 0;
  18785		fctl = (hdr->fh_f_ctl[0] << 16 |
  18786			hdr->fh_f_ctl[1] << 8 |
  18787			hdr->fh_f_ctl[2]);
  18788		/* If last frame of sequence we can return success. */
  18789		if (fctl & FC_FC_END_SEQ)
  18790			return 1;
  18791	}
  18792	return 0;
  18793}
  18794
  18795/**
  18796 * lpfc_prep_seq - Prep sequence for ULP processing
  18797 * @vport: Pointer to the vport on which this sequence was received
  18798 * @seq_dmabuf: pointer to a dmabuf that describes the FC sequence
  18799 *
  18800 * This function takes a sequence, described by a list of frames, and creates
  18801 * a list of iocbq structures to describe the sequence. This iocbq list will be
  18802 * used to issue to the generic unsolicited sequence handler. This routine
  18803 * returns a pointer to the first iocbq in the list. If the function is unable
  18804 * to allocate an iocbq then it throw out the received frames that were not
  18805 * able to be described and return a pointer to the first iocbq. If unable to
  18806 * allocate any iocbqs (including the first) this function will return NULL.
  18807 **/
  18808static struct lpfc_iocbq *
  18809lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
  18810{
  18811	struct hbq_dmabuf *hbq_buf;
  18812	struct lpfc_dmabuf *d_buf, *n_buf;
  18813	struct lpfc_iocbq *first_iocbq, *iocbq;
  18814	struct fc_frame_header *fc_hdr;
  18815	uint32_t sid;
  18816	uint32_t len, tot_len;
  18817
  18818	fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
  18819	/* remove from receive buffer list */
  18820	list_del_init(&seq_dmabuf->hbuf.list);
  18821	lpfc_update_rcv_time_stamp(vport);
  18822	/* get the Remote Port's SID */
  18823	sid = sli4_sid_from_fc_hdr(fc_hdr);
  18824	tot_len = 0;
  18825	/* Get an iocbq struct to fill in. */
  18826	first_iocbq = lpfc_sli_get_iocbq(vport->phba);
  18827	if (first_iocbq) {
  18828		/* Initialize the first IOCB. */
  18829		first_iocbq->wcqe_cmpl.total_data_placed = 0;
  18830		bf_set(lpfc_wcqe_c_status, &first_iocbq->wcqe_cmpl,
  18831		       IOSTAT_SUCCESS);
  18832		first_iocbq->vport = vport;
  18833
  18834		/* Check FC Header to see what TYPE of frame we are rcv'ing */
  18835		if (sli4_type_from_fc_hdr(fc_hdr) == FC_TYPE_ELS) {
  18836			bf_set(els_rsp64_sid, &first_iocbq->wqe.xmit_els_rsp,
  18837			       sli4_did_from_fc_hdr(fc_hdr));
  18838		}
  18839
  18840		bf_set(wqe_ctxt_tag, &first_iocbq->wqe.xmit_els_rsp.wqe_com,
  18841		       NO_XRI);
  18842		bf_set(wqe_rcvoxid, &first_iocbq->wqe.xmit_els_rsp.wqe_com,
  18843		       be16_to_cpu(fc_hdr->fh_ox_id));
  18844
  18845		/* put the first buffer into the first iocb */
  18846		tot_len = bf_get(lpfc_rcqe_length,
  18847				 &seq_dmabuf->cq_event.cqe.rcqe_cmpl);
  18848
  18849		first_iocbq->cmd_dmabuf = &seq_dmabuf->dbuf;
  18850		first_iocbq->bpl_dmabuf = NULL;
  18851		/* Keep track of the BDE count */
  18852		first_iocbq->wcqe_cmpl.word3 = 1;
  18853
  18854		if (tot_len > LPFC_DATA_BUF_SIZE)
  18855			first_iocbq->wqe.gen_req.bde.tus.f.bdeSize =
  18856				LPFC_DATA_BUF_SIZE;
  18857		else
  18858			first_iocbq->wqe.gen_req.bde.tus.f.bdeSize = tot_len;
  18859
  18860		first_iocbq->wcqe_cmpl.total_data_placed = tot_len;
  18861		bf_set(wqe_els_did, &first_iocbq->wqe.xmit_els_rsp.wqe_dest,
  18862		       sid);
  18863	}
  18864	iocbq = first_iocbq;
  18865	/*
  18866	 * Each IOCBq can have two Buffers assigned, so go through the list
  18867	 * of buffers for this sequence and save two buffers in each IOCBq
  18868	 */
  18869	list_for_each_entry_safe(d_buf, n_buf, &seq_dmabuf->dbuf.list, list) {
  18870		if (!iocbq) {
  18871			lpfc_in_buf_free(vport->phba, d_buf);
  18872			continue;
  18873		}
  18874		if (!iocbq->bpl_dmabuf) {
  18875			iocbq->bpl_dmabuf = d_buf;
  18876			iocbq->wcqe_cmpl.word3++;
  18877			/* We need to get the size out of the right CQE */
  18878			hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
  18879			len = bf_get(lpfc_rcqe_length,
  18880				       &hbq_buf->cq_event.cqe.rcqe_cmpl);
  18881			iocbq->unsol_rcv_len = len;
  18882			iocbq->wcqe_cmpl.total_data_placed += len;
  18883			tot_len += len;
  18884		} else {
  18885			iocbq = lpfc_sli_get_iocbq(vport->phba);
  18886			if (!iocbq) {
  18887				if (first_iocbq) {
  18888					bf_set(lpfc_wcqe_c_status,
  18889					       &first_iocbq->wcqe_cmpl,
  18890					       IOSTAT_SUCCESS);
  18891					first_iocbq->wcqe_cmpl.parameter =
  18892						IOERR_NO_RESOURCES;
  18893				}
  18894				lpfc_in_buf_free(vport->phba, d_buf);
  18895				continue;
  18896			}
  18897			/* We need to get the size out of the right CQE */
  18898			hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
  18899			len = bf_get(lpfc_rcqe_length,
  18900				       &hbq_buf->cq_event.cqe.rcqe_cmpl);
  18901			iocbq->cmd_dmabuf = d_buf;
  18902			iocbq->bpl_dmabuf = NULL;
  18903			iocbq->wcqe_cmpl.word3 = 1;
  18904
  18905			if (len > LPFC_DATA_BUF_SIZE)
  18906				iocbq->wqe.xmit_els_rsp.bde.tus.f.bdeSize =
  18907					LPFC_DATA_BUF_SIZE;
  18908			else
  18909				iocbq->wqe.xmit_els_rsp.bde.tus.f.bdeSize =
  18910					len;
  18911
  18912			tot_len += len;
  18913			iocbq->wcqe_cmpl.total_data_placed = tot_len;
  18914			bf_set(wqe_els_did, &iocbq->wqe.xmit_els_rsp.wqe_dest,
  18915			       sid);
  18916			list_add_tail(&iocbq->list, &first_iocbq->list);
  18917		}
  18918	}
  18919	/* Free the sequence's header buffer */
  18920	if (!first_iocbq)
  18921		lpfc_in_buf_free(vport->phba, &seq_dmabuf->dbuf);
  18922
  18923	return first_iocbq;
  18924}
  18925
  18926static void
  18927lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *vport,
  18928			  struct hbq_dmabuf *seq_dmabuf)
  18929{
  18930	struct fc_frame_header *fc_hdr;
  18931	struct lpfc_iocbq *iocbq, *curr_iocb, *next_iocb;
  18932	struct lpfc_hba *phba = vport->phba;
  18933
  18934	fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
  18935	iocbq = lpfc_prep_seq(vport, seq_dmabuf);
  18936	if (!iocbq) {
  18937		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  18938				"2707 Ring %d handler: Failed to allocate "
  18939				"iocb Rctl x%x Type x%x received\n",
  18940				LPFC_ELS_RING,
  18941				fc_hdr->fh_r_ctl, fc_hdr->fh_type);
  18942		return;
  18943	}
  18944	if (!lpfc_complete_unsol_iocb(phba,
  18945				      phba->sli4_hba.els_wq->pring,
  18946				      iocbq, fc_hdr->fh_r_ctl,
  18947				      fc_hdr->fh_type)) {
  18948		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  18949				"2540 Ring %d handler: unexpected Rctl "
  18950				"x%x Type x%x received\n",
  18951				LPFC_ELS_RING,
  18952				fc_hdr->fh_r_ctl, fc_hdr->fh_type);
  18953		lpfc_in_buf_free(phba, &seq_dmabuf->dbuf);
  18954	}
  18955
  18956	/* Free iocb created in lpfc_prep_seq */
  18957	list_for_each_entry_safe(curr_iocb, next_iocb,
  18958				 &iocbq->list, list) {
  18959		list_del_init(&curr_iocb->list);
  18960		lpfc_sli_release_iocbq(phba, curr_iocb);
  18961	}
  18962	lpfc_sli_release_iocbq(phba, iocbq);
  18963}
  18964
  18965static void
  18966lpfc_sli4_mds_loopback_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
  18967			    struct lpfc_iocbq *rspiocb)
  18968{
  18969	struct lpfc_dmabuf *pcmd = cmdiocb->cmd_dmabuf;
  18970
  18971	if (pcmd && pcmd->virt)
  18972		dma_pool_free(phba->lpfc_drb_pool, pcmd->virt, pcmd->phys);
  18973	kfree(pcmd);
  18974	lpfc_sli_release_iocbq(phba, cmdiocb);
  18975	lpfc_drain_txq(phba);
  18976}
  18977
  18978static void
  18979lpfc_sli4_handle_mds_loopback(struct lpfc_vport *vport,
  18980			      struct hbq_dmabuf *dmabuf)
  18981{
  18982	struct fc_frame_header *fc_hdr;
  18983	struct lpfc_hba *phba = vport->phba;
  18984	struct lpfc_iocbq *iocbq = NULL;
  18985	union  lpfc_wqe128 *pwqe;
  18986	struct lpfc_dmabuf *pcmd = NULL;
  18987	uint32_t frame_len;
  18988	int rc;
  18989	unsigned long iflags;
  18990
  18991	fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
  18992	frame_len = bf_get(lpfc_rcqe_length, &dmabuf->cq_event.cqe.rcqe_cmpl);
  18993
  18994	/* Send the received frame back */
  18995	iocbq = lpfc_sli_get_iocbq(phba);
  18996	if (!iocbq) {
  18997		/* Queue cq event and wakeup worker thread to process it */
  18998		spin_lock_irqsave(&phba->hbalock, iflags);
  18999		list_add_tail(&dmabuf->cq_event.list,
  19000			      &phba->sli4_hba.sp_queue_event);
  19001		phba->hba_flag |= HBA_SP_QUEUE_EVT;
  19002		spin_unlock_irqrestore(&phba->hbalock, iflags);
  19003		lpfc_worker_wake_up(phba);
  19004		return;
  19005	}
  19006
  19007	/* Allocate buffer for command payload */
  19008	pcmd = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
  19009	if (pcmd)
  19010		pcmd->virt = dma_pool_alloc(phba->lpfc_drb_pool, GFP_KERNEL,
  19011					    &pcmd->phys);
  19012	if (!pcmd || !pcmd->virt)
  19013		goto exit;
  19014
  19015	INIT_LIST_HEAD(&pcmd->list);
  19016
  19017	/* copyin the payload */
  19018	memcpy(pcmd->virt, dmabuf->dbuf.virt, frame_len);
  19019
  19020	iocbq->cmd_dmabuf = pcmd;
  19021	iocbq->vport = vport;
  19022	iocbq->cmd_flag &= ~LPFC_FIP_ELS_ID_MASK;
  19023	iocbq->cmd_flag |= LPFC_USE_FCPWQIDX;
  19024	iocbq->num_bdes = 0;
  19025
  19026	pwqe = &iocbq->wqe;
  19027	/* fill in BDE's for command */
  19028	pwqe->gen_req.bde.addrHigh = putPaddrHigh(pcmd->phys);
  19029	pwqe->gen_req.bde.addrLow = putPaddrLow(pcmd->phys);
  19030	pwqe->gen_req.bde.tus.f.bdeSize = frame_len;
  19031	pwqe->gen_req.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
  19032
  19033	pwqe->send_frame.frame_len = frame_len;
  19034	pwqe->send_frame.fc_hdr_wd0 = be32_to_cpu(*((__be32 *)fc_hdr));
  19035	pwqe->send_frame.fc_hdr_wd1 = be32_to_cpu(*((__be32 *)fc_hdr + 1));
  19036	pwqe->send_frame.fc_hdr_wd2 = be32_to_cpu(*((__be32 *)fc_hdr + 2));
  19037	pwqe->send_frame.fc_hdr_wd3 = be32_to_cpu(*((__be32 *)fc_hdr + 3));
  19038	pwqe->send_frame.fc_hdr_wd4 = be32_to_cpu(*((__be32 *)fc_hdr + 4));
  19039	pwqe->send_frame.fc_hdr_wd5 = be32_to_cpu(*((__be32 *)fc_hdr + 5));
  19040
  19041	pwqe->generic.wqe_com.word7 = 0;
  19042	pwqe->generic.wqe_com.word10 = 0;
  19043
  19044	bf_set(wqe_cmnd, &pwqe->generic.wqe_com, CMD_SEND_FRAME);
  19045	bf_set(wqe_sof, &pwqe->generic.wqe_com, 0x2E); /* SOF byte */
  19046	bf_set(wqe_eof, &pwqe->generic.wqe_com, 0x41); /* EOF byte */
  19047	bf_set(wqe_lenloc, &pwqe->generic.wqe_com, 1);
  19048	bf_set(wqe_xbl, &pwqe->generic.wqe_com, 1);
  19049	bf_set(wqe_dbde, &pwqe->generic.wqe_com, 1);
  19050	bf_set(wqe_xc, &pwqe->generic.wqe_com, 1);
  19051	bf_set(wqe_cmd_type, &pwqe->generic.wqe_com, 0xA);
  19052	bf_set(wqe_cqid, &pwqe->generic.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
  19053	bf_set(wqe_xri_tag, &pwqe->generic.wqe_com, iocbq->sli4_xritag);
  19054	bf_set(wqe_reqtag, &pwqe->generic.wqe_com, iocbq->iotag);
  19055	bf_set(wqe_class, &pwqe->generic.wqe_com, CLASS3);
  19056	pwqe->generic.wqe_com.abort_tag = iocbq->iotag;
  19057
  19058	iocbq->cmd_cmpl = lpfc_sli4_mds_loopback_cmpl;
  19059
  19060	rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, iocbq, 0);
  19061	if (rc == IOCB_ERROR)
  19062		goto exit;
  19063
  19064	lpfc_in_buf_free(phba, &dmabuf->dbuf);
  19065	return;
  19066
  19067exit:
  19068	lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
  19069			"2023 Unable to process MDS loopback frame\n");
  19070	if (pcmd && pcmd->virt)
  19071		dma_pool_free(phba->lpfc_drb_pool, pcmd->virt, pcmd->phys);
  19072	kfree(pcmd);
  19073	if (iocbq)
  19074		lpfc_sli_release_iocbq(phba, iocbq);
  19075	lpfc_in_buf_free(phba, &dmabuf->dbuf);
  19076}
  19077
  19078/**
  19079 * lpfc_sli4_handle_received_buffer - Handle received buffers from firmware
  19080 * @phba: Pointer to HBA context object.
  19081 * @dmabuf: Pointer to a dmabuf that describes the FC sequence.
  19082 *
  19083 * This function is called with no lock held. This function processes all
  19084 * the received buffers and gives it to upper layers when a received buffer
  19085 * indicates that it is the final frame in the sequence. The interrupt
  19086 * service routine processes received buffers at interrupt contexts.
  19087 * Worker thread calls lpfc_sli4_handle_received_buffer, which will call the
  19088 * appropriate receive function when the final frame in a sequence is received.
  19089 **/
  19090void
  19091lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba,
  19092				 struct hbq_dmabuf *dmabuf)
  19093{
  19094	struct hbq_dmabuf *seq_dmabuf;
  19095	struct fc_frame_header *fc_hdr;
  19096	struct lpfc_vport *vport;
  19097	uint32_t fcfi;
  19098	uint32_t did;
  19099
  19100	/* Process each received buffer */
  19101	fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
  19102
  19103	if (fc_hdr->fh_r_ctl == FC_RCTL_MDS_DIAGS ||
  19104	    fc_hdr->fh_r_ctl == FC_RCTL_DD_UNSOL_DATA) {
  19105		vport = phba->pport;
  19106		/* Handle MDS Loopback frames */
  19107		if  (!(phba->pport->load_flag & FC_UNLOADING))
  19108			lpfc_sli4_handle_mds_loopback(vport, dmabuf);
  19109		else
  19110			lpfc_in_buf_free(phba, &dmabuf->dbuf);
  19111		return;
  19112	}
  19113
  19114	/* check to see if this a valid type of frame */
  19115	if (lpfc_fc_frame_check(phba, fc_hdr)) {
  19116		lpfc_in_buf_free(phba, &dmabuf->dbuf);
  19117		return;
  19118	}
  19119
  19120	if ((bf_get(lpfc_cqe_code,
  19121		    &dmabuf->cq_event.cqe.rcqe_cmpl) == CQE_CODE_RECEIVE_V1))
  19122		fcfi = bf_get(lpfc_rcqe_fcf_id_v1,
  19123			      &dmabuf->cq_event.cqe.rcqe_cmpl);
  19124	else
  19125		fcfi = bf_get(lpfc_rcqe_fcf_id,
  19126			      &dmabuf->cq_event.cqe.rcqe_cmpl);
  19127
  19128	if (fc_hdr->fh_r_ctl == 0xF4 && fc_hdr->fh_type == 0xFF) {
  19129		vport = phba->pport;
  19130		lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
  19131				"2023 MDS Loopback %d bytes\n",
  19132				bf_get(lpfc_rcqe_length,
  19133				       &dmabuf->cq_event.cqe.rcqe_cmpl));
  19134		/* Handle MDS Loopback frames */
  19135		lpfc_sli4_handle_mds_loopback(vport, dmabuf);
  19136		return;
  19137	}
  19138
  19139	/* d_id this frame is directed to */
  19140	did = sli4_did_from_fc_hdr(fc_hdr);
  19141
  19142	vport = lpfc_fc_frame_to_vport(phba, fc_hdr, fcfi, did);
  19143	if (!vport) {
  19144		/* throw out the frame */
  19145		lpfc_in_buf_free(phba, &dmabuf->dbuf);
  19146		return;
  19147	}
  19148
  19149	/* vport is registered unless we rcv a FLOGI directed to Fabric_DID */
  19150	if (!(vport->vpi_state & LPFC_VPI_REGISTERED) &&
  19151		(did != Fabric_DID)) {
  19152		/*
  19153		 * Throw out the frame if we are not pt2pt.
  19154		 * The pt2pt protocol allows for discovery frames
  19155		 * to be received without a registered VPI.
  19156		 */
  19157		if (!(vport->fc_flag & FC_PT2PT) ||
  19158			(phba->link_state == LPFC_HBA_READY)) {
  19159			lpfc_in_buf_free(phba, &dmabuf->dbuf);
  19160			return;
  19161		}
  19162	}
  19163
  19164	/* Handle the basic abort sequence (BA_ABTS) event */
  19165	if (fc_hdr->fh_r_ctl == FC_RCTL_BA_ABTS) {
  19166		lpfc_sli4_handle_unsol_abort(vport, dmabuf);
  19167		return;
  19168	}
  19169
  19170	/* Link this frame */
  19171	seq_dmabuf = lpfc_fc_frame_add(vport, dmabuf);
  19172	if (!seq_dmabuf) {
  19173		/* unable to add frame to vport - throw it out */
  19174		lpfc_in_buf_free(phba, &dmabuf->dbuf);
  19175		return;
  19176	}
  19177	/* If not last frame in sequence continue processing frames. */
  19178	if (!lpfc_seq_complete(seq_dmabuf))
  19179		return;
  19180
  19181	/* Send the complete sequence to the upper layer protocol */
  19182	lpfc_sli4_send_seq_to_ulp(vport, seq_dmabuf);
  19183}
  19184
  19185/**
  19186 * lpfc_sli4_post_all_rpi_hdrs - Post the rpi header memory region to the port
  19187 * @phba: pointer to lpfc hba data structure.
  19188 *
  19189 * This routine is invoked to post rpi header templates to the
  19190 * HBA consistent with the SLI-4 interface spec.  This routine
  19191 * posts a SLI4_PAGE_SIZE memory region to the port to hold up to
  19192 * SLI4_PAGE_SIZE modulo 64 rpi context headers.
  19193 *
  19194 * This routine does not require any locks.  It's usage is expected
  19195 * to be driver load or reset recovery when the driver is
  19196 * sequential.
  19197 *
  19198 * Return codes
  19199 * 	0 - successful
  19200 *      -EIO - The mailbox failed to complete successfully.
  19201 * 	When this error occurs, the driver is not guaranteed
  19202 *	to have any rpi regions posted to the device and
  19203 *	must either attempt to repost the regions or take a
  19204 *	fatal error.
  19205 **/
  19206int
  19207lpfc_sli4_post_all_rpi_hdrs(struct lpfc_hba *phba)
  19208{
  19209	struct lpfc_rpi_hdr *rpi_page;
  19210	uint32_t rc = 0;
  19211	uint16_t lrpi = 0;
  19212
  19213	/* SLI4 ports that support extents do not require RPI headers. */
  19214	if (!phba->sli4_hba.rpi_hdrs_in_use)
  19215		goto exit;
  19216	if (phba->sli4_hba.extents_in_use)
  19217		return -EIO;
  19218
  19219	list_for_each_entry(rpi_page, &phba->sli4_hba.lpfc_rpi_hdr_list, list) {
  19220		/*
  19221		 * Assign the rpi headers a physical rpi only if the driver
  19222		 * has not initialized those resources.  A port reset only
  19223		 * needs the headers posted.
  19224		 */
  19225		if (bf_get(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags) !=
  19226		    LPFC_RPI_RSRC_RDY)
  19227			rpi_page->start_rpi = phba->sli4_hba.rpi_ids[lrpi];
  19228
  19229		rc = lpfc_sli4_post_rpi_hdr(phba, rpi_page);
  19230		if (rc != MBX_SUCCESS) {
  19231			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  19232					"2008 Error %d posting all rpi "
  19233					"headers\n", rc);
  19234			rc = -EIO;
  19235			break;
  19236		}
  19237	}
  19238
  19239 exit:
  19240	bf_set(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags,
  19241	       LPFC_RPI_RSRC_RDY);
  19242	return rc;
  19243}
  19244
  19245/**
  19246 * lpfc_sli4_post_rpi_hdr - Post an rpi header memory region to the port
  19247 * @phba: pointer to lpfc hba data structure.
  19248 * @rpi_page:  pointer to the rpi memory region.
  19249 *
  19250 * This routine is invoked to post a single rpi header to the
  19251 * HBA consistent with the SLI-4 interface spec.  This memory region
  19252 * maps up to 64 rpi context regions.
  19253 *
  19254 * Return codes
  19255 * 	0 - successful
  19256 * 	-ENOMEM - No available memory
  19257 *      -EIO - The mailbox failed to complete successfully.
  19258 **/
  19259int
  19260lpfc_sli4_post_rpi_hdr(struct lpfc_hba *phba, struct lpfc_rpi_hdr *rpi_page)
  19261{
  19262	LPFC_MBOXQ_t *mboxq;
  19263	struct lpfc_mbx_post_hdr_tmpl *hdr_tmpl;
  19264	uint32_t rc = 0;
  19265	uint32_t shdr_status, shdr_add_status;
  19266	union lpfc_sli4_cfg_shdr *shdr;
  19267
  19268	/* SLI4 ports that support extents do not require RPI headers. */
  19269	if (!phba->sli4_hba.rpi_hdrs_in_use)
  19270		return rc;
  19271	if (phba->sli4_hba.extents_in_use)
  19272		return -EIO;
  19273
  19274	/* The port is notified of the header region via a mailbox command. */
  19275	mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
  19276	if (!mboxq) {
  19277		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  19278				"2001 Unable to allocate memory for issuing "
  19279				"SLI_CONFIG_SPECIAL mailbox command\n");
  19280		return -ENOMEM;
  19281	}
  19282
  19283	/* Post all rpi memory regions to the port. */
  19284	hdr_tmpl = &mboxq->u.mqe.un.hdr_tmpl;
  19285	lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
  19286			 LPFC_MBOX_OPCODE_FCOE_POST_HDR_TEMPLATE,
  19287			 sizeof(struct lpfc_mbx_post_hdr_tmpl) -
  19288			 sizeof(struct lpfc_sli4_cfg_mhdr),
  19289			 LPFC_SLI4_MBX_EMBED);
  19290
  19291
  19292	/* Post the physical rpi to the port for this rpi header. */
  19293	bf_set(lpfc_mbx_post_hdr_tmpl_rpi_offset, hdr_tmpl,
  19294	       rpi_page->start_rpi);
  19295	bf_set(lpfc_mbx_post_hdr_tmpl_page_cnt,
  19296	       hdr_tmpl, rpi_page->page_count);
  19297
  19298	hdr_tmpl->rpi_paddr_lo = putPaddrLow(rpi_page->dmabuf->phys);
  19299	hdr_tmpl->rpi_paddr_hi = putPaddrHigh(rpi_page->dmabuf->phys);
  19300	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
  19301	shdr = (union lpfc_sli4_cfg_shdr *) &hdr_tmpl->header.cfg_shdr;
  19302	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
  19303	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
  19304	mempool_free(mboxq, phba->mbox_mem_pool);
  19305	if (shdr_status || shdr_add_status || rc) {
  19306		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  19307				"2514 POST_RPI_HDR mailbox failed with "
  19308				"status x%x add_status x%x, mbx status x%x\n",
  19309				shdr_status, shdr_add_status, rc);
  19310		rc = -ENXIO;
  19311	} else {
  19312		/*
  19313		 * The next_rpi stores the next logical module-64 rpi value used
  19314		 * to post physical rpis in subsequent rpi postings.
  19315		 */
  19316		spin_lock_irq(&phba->hbalock);
  19317		phba->sli4_hba.next_rpi = rpi_page->next_rpi;
  19318		spin_unlock_irq(&phba->hbalock);
  19319	}
  19320	return rc;
  19321}
  19322
  19323/**
  19324 * lpfc_sli4_alloc_rpi - Get an available rpi in the device's range
  19325 * @phba: pointer to lpfc hba data structure.
  19326 *
  19327 * This routine is invoked to post rpi header templates to the
  19328 * HBA consistent with the SLI-4 interface spec.  This routine
  19329 * posts a SLI4_PAGE_SIZE memory region to the port to hold up to
  19330 * SLI4_PAGE_SIZE modulo 64 rpi context headers.
  19331 *
  19332 * Returns
  19333 * 	A nonzero rpi defined as rpi_base <= rpi < max_rpi if successful
  19334 * 	LPFC_RPI_ALLOC_ERROR if no rpis are available.
  19335 **/
  19336int
  19337lpfc_sli4_alloc_rpi(struct lpfc_hba *phba)
  19338{
  19339	unsigned long rpi;
  19340	uint16_t max_rpi, rpi_limit;
  19341	uint16_t rpi_remaining, lrpi = 0;
  19342	struct lpfc_rpi_hdr *rpi_hdr;
  19343	unsigned long iflag;
  19344
  19345	/*
  19346	 * Fetch the next logical rpi.  Because this index is logical,
  19347	 * the  driver starts at 0 each time.
  19348	 */
  19349	spin_lock_irqsave(&phba->hbalock, iflag);
  19350	max_rpi = phba->sli4_hba.max_cfg_param.max_rpi;
  19351	rpi_limit = phba->sli4_hba.next_rpi;
  19352
  19353	rpi = find_first_zero_bit(phba->sli4_hba.rpi_bmask, rpi_limit);
  19354	if (rpi >= rpi_limit)
  19355		rpi = LPFC_RPI_ALLOC_ERROR;
  19356	else {
  19357		set_bit(rpi, phba->sli4_hba.rpi_bmask);
  19358		phba->sli4_hba.max_cfg_param.rpi_used++;
  19359		phba->sli4_hba.rpi_count++;
  19360	}
  19361	lpfc_printf_log(phba, KERN_INFO,
  19362			LOG_NODE | LOG_DISCOVERY,
  19363			"0001 Allocated rpi:x%x max:x%x lim:x%x\n",
  19364			(int) rpi, max_rpi, rpi_limit);
  19365
  19366	/*
  19367	 * Don't try to allocate more rpi header regions if the device limit
  19368	 * has been exhausted.
  19369	 */
  19370	if ((rpi == LPFC_RPI_ALLOC_ERROR) &&
  19371	    (phba->sli4_hba.rpi_count >= max_rpi)) {
  19372		spin_unlock_irqrestore(&phba->hbalock, iflag);
  19373		return rpi;
  19374	}
  19375
  19376	/*
  19377	 * RPI header postings are not required for SLI4 ports capable of
  19378	 * extents.
  19379	 */
  19380	if (!phba->sli4_hba.rpi_hdrs_in_use) {
  19381		spin_unlock_irqrestore(&phba->hbalock, iflag);
  19382		return rpi;
  19383	}
  19384
  19385	/*
  19386	 * If the driver is running low on rpi resources, allocate another
  19387	 * page now.  Note that the next_rpi value is used because
  19388	 * it represents how many are actually in use whereas max_rpi notes
  19389	 * how many are supported max by the device.
  19390	 */
  19391	rpi_remaining = phba->sli4_hba.next_rpi - phba->sli4_hba.rpi_count;
  19392	spin_unlock_irqrestore(&phba->hbalock, iflag);
  19393	if (rpi_remaining < LPFC_RPI_LOW_WATER_MARK) {
  19394		rpi_hdr = lpfc_sli4_create_rpi_hdr(phba);
  19395		if (!rpi_hdr) {
  19396			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  19397					"2002 Error Could not grow rpi "
  19398					"count\n");
  19399		} else {
  19400			lrpi = rpi_hdr->start_rpi;
  19401			rpi_hdr->start_rpi = phba->sli4_hba.rpi_ids[lrpi];
  19402			lpfc_sli4_post_rpi_hdr(phba, rpi_hdr);
  19403		}
  19404	}
  19405
  19406	return rpi;
  19407}
  19408
  19409/**
  19410 * __lpfc_sli4_free_rpi - Release an rpi for reuse.
  19411 * @phba: pointer to lpfc hba data structure.
  19412 * @rpi: rpi to free
  19413 *
  19414 * This routine is invoked to release an rpi to the pool of
  19415 * available rpis maintained by the driver.
  19416 **/
  19417static void
  19418__lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi)
  19419{
  19420	/*
  19421	 * if the rpi value indicates a prior unreg has already
  19422	 * been done, skip the unreg.
  19423	 */
  19424	if (rpi == LPFC_RPI_ALLOC_ERROR)
  19425		return;
  19426
  19427	if (test_and_clear_bit(rpi, phba->sli4_hba.rpi_bmask)) {
  19428		phba->sli4_hba.rpi_count--;
  19429		phba->sli4_hba.max_cfg_param.rpi_used--;
  19430	} else {
  19431		lpfc_printf_log(phba, KERN_INFO,
  19432				LOG_NODE | LOG_DISCOVERY,
  19433				"2016 rpi %x not inuse\n",
  19434				rpi);
  19435	}
  19436}
  19437
  19438/**
  19439 * lpfc_sli4_free_rpi - Release an rpi for reuse.
  19440 * @phba: pointer to lpfc hba data structure.
  19441 * @rpi: rpi to free
  19442 *
  19443 * This routine is invoked to release an rpi to the pool of
  19444 * available rpis maintained by the driver.
  19445 **/
  19446void
  19447lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi)
  19448{
  19449	spin_lock_irq(&phba->hbalock);
  19450	__lpfc_sli4_free_rpi(phba, rpi);
  19451	spin_unlock_irq(&phba->hbalock);
  19452}
  19453
  19454/**
  19455 * lpfc_sli4_remove_rpis - Remove the rpi bitmask region
  19456 * @phba: pointer to lpfc hba data structure.
  19457 *
  19458 * This routine is invoked to remove the memory region that
  19459 * provided rpi via a bitmask.
  19460 **/
  19461void
  19462lpfc_sli4_remove_rpis(struct lpfc_hba *phba)
  19463{
  19464	kfree(phba->sli4_hba.rpi_bmask);
  19465	kfree(phba->sli4_hba.rpi_ids);
  19466	bf_set(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
  19467}
  19468
  19469/**
  19470 * lpfc_sli4_resume_rpi - Remove the rpi bitmask region
  19471 * @ndlp: pointer to lpfc nodelist data structure.
  19472 * @cmpl: completion call-back.
  19473 * @arg: data to load as MBox 'caller buffer information'
  19474 *
  19475 * This routine is invoked to remove the memory region that
  19476 * provided rpi via a bitmask.
  19477 **/
  19478int
  19479lpfc_sli4_resume_rpi(struct lpfc_nodelist *ndlp,
  19480	void (*cmpl)(struct lpfc_hba *, LPFC_MBOXQ_t *), void *arg)
  19481{
  19482	LPFC_MBOXQ_t *mboxq;
  19483	struct lpfc_hba *phba = ndlp->phba;
  19484	int rc;
  19485
  19486	/* The port is notified of the header region via a mailbox command. */
  19487	mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
  19488	if (!mboxq)
  19489		return -ENOMEM;
  19490
  19491	/* If cmpl assigned, then this nlp_get pairs with
  19492	 * lpfc_mbx_cmpl_resume_rpi.
  19493	 *
  19494	 * Else cmpl is NULL, then this nlp_get pairs with
  19495	 * lpfc_sli_def_mbox_cmpl.
  19496	 */
  19497	if (!lpfc_nlp_get(ndlp)) {
  19498		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  19499				"2122 %s: Failed to get nlp ref\n",
  19500				__func__);
  19501		mempool_free(mboxq, phba->mbox_mem_pool);
  19502		return -EIO;
  19503	}
  19504
  19505	/* Post all rpi memory regions to the port. */
  19506	lpfc_resume_rpi(mboxq, ndlp);
  19507	if (cmpl) {
  19508		mboxq->mbox_cmpl = cmpl;
  19509		mboxq->ctx_buf = arg;
  19510	} else
  19511		mboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
  19512	mboxq->ctx_ndlp = ndlp;
  19513	mboxq->vport = ndlp->vport;
  19514	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
  19515	if (rc == MBX_NOT_FINISHED) {
  19516		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  19517				"2010 Resume RPI Mailbox failed "
  19518				"status %d, mbxStatus x%x\n", rc,
  19519				bf_get(lpfc_mqe_status, &mboxq->u.mqe));
  19520		lpfc_nlp_put(ndlp);
  19521		mempool_free(mboxq, phba->mbox_mem_pool);
  19522		return -EIO;
  19523	}
  19524	return 0;
  19525}
  19526
  19527/**
  19528 * lpfc_sli4_init_vpi - Initialize a vpi with the port
  19529 * @vport: Pointer to the vport for which the vpi is being initialized
  19530 *
  19531 * This routine is invoked to activate a vpi with the port.
  19532 *
  19533 * Returns:
  19534 *    0 success
  19535 *    -Evalue otherwise
  19536 **/
  19537int
  19538lpfc_sli4_init_vpi(struct lpfc_vport *vport)
  19539{
  19540	LPFC_MBOXQ_t *mboxq;
  19541	int rc = 0;
  19542	int retval = MBX_SUCCESS;
  19543	uint32_t mbox_tmo;
  19544	struct lpfc_hba *phba = vport->phba;
  19545	mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
  19546	if (!mboxq)
  19547		return -ENOMEM;
  19548	lpfc_init_vpi(phba, mboxq, vport->vpi);
  19549	mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq);
  19550	rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
  19551	if (rc != MBX_SUCCESS) {
  19552		lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
  19553				"2022 INIT VPI Mailbox failed "
  19554				"status %d, mbxStatus x%x\n", rc,
  19555				bf_get(lpfc_mqe_status, &mboxq->u.mqe));
  19556		retval = -EIO;
  19557	}
  19558	if (rc != MBX_TIMEOUT)
  19559		mempool_free(mboxq, vport->phba->mbox_mem_pool);
  19560
  19561	return retval;
  19562}
  19563
  19564/**
  19565 * lpfc_mbx_cmpl_add_fcf_record - add fcf mbox completion handler.
  19566 * @phba: pointer to lpfc hba data structure.
  19567 * @mboxq: Pointer to mailbox object.
  19568 *
  19569 * This routine is invoked to manually add a single FCF record. The caller
  19570 * must pass a completely initialized FCF_Record.  This routine takes
  19571 * care of the nonembedded mailbox operations.
  19572 **/
  19573static void
  19574lpfc_mbx_cmpl_add_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
  19575{
  19576	void *virt_addr;
  19577	union lpfc_sli4_cfg_shdr *shdr;
  19578	uint32_t shdr_status, shdr_add_status;
  19579
  19580	virt_addr = mboxq->sge_array->addr[0];
  19581	/* The IOCTL status is embedded in the mailbox subheader. */
  19582	shdr = (union lpfc_sli4_cfg_shdr *) virt_addr;
  19583	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
  19584	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
  19585
  19586	if ((shdr_status || shdr_add_status) &&
  19587		(shdr_status != STATUS_FCF_IN_USE))
  19588		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  19589			"2558 ADD_FCF_RECORD mailbox failed with "
  19590			"status x%x add_status x%x\n",
  19591			shdr_status, shdr_add_status);
  19592
  19593	lpfc_sli4_mbox_cmd_free(phba, mboxq);
  19594}
  19595
  19596/**
  19597 * lpfc_sli4_add_fcf_record - Manually add an FCF Record.
  19598 * @phba: pointer to lpfc hba data structure.
  19599 * @fcf_record:  pointer to the initialized fcf record to add.
  19600 *
  19601 * This routine is invoked to manually add a single FCF record. The caller
  19602 * must pass a completely initialized FCF_Record.  This routine takes
  19603 * care of the nonembedded mailbox operations.
  19604 **/
  19605int
  19606lpfc_sli4_add_fcf_record(struct lpfc_hba *phba, struct fcf_record *fcf_record)
  19607{
  19608	int rc = 0;
  19609	LPFC_MBOXQ_t *mboxq;
  19610	uint8_t *bytep;
  19611	void *virt_addr;
  19612	struct lpfc_mbx_sge sge;
  19613	uint32_t alloc_len, req_len;
  19614	uint32_t fcfindex;
  19615
  19616	mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
  19617	if (!mboxq) {
  19618		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  19619			"2009 Failed to allocate mbox for ADD_FCF cmd\n");
  19620		return -ENOMEM;
  19621	}
  19622
  19623	req_len = sizeof(struct fcf_record) + sizeof(union lpfc_sli4_cfg_shdr) +
  19624		  sizeof(uint32_t);
  19625
  19626	/* Allocate DMA memory and set up the non-embedded mailbox command */
  19627	alloc_len = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
  19628				     LPFC_MBOX_OPCODE_FCOE_ADD_FCF,
  19629				     req_len, LPFC_SLI4_MBX_NEMBED);
  19630	if (alloc_len < req_len) {
  19631		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  19632			"2523 Allocated DMA memory size (x%x) is "
  19633			"less than the requested DMA memory "
  19634			"size (x%x)\n", alloc_len, req_len);
  19635		lpfc_sli4_mbox_cmd_free(phba, mboxq);
  19636		return -ENOMEM;
  19637	}
  19638
  19639	/*
  19640	 * Get the first SGE entry from the non-embedded DMA memory.  This
  19641	 * routine only uses a single SGE.
  19642	 */
  19643	lpfc_sli4_mbx_sge_get(mboxq, 0, &sge);
  19644	virt_addr = mboxq->sge_array->addr[0];
  19645	/*
  19646	 * Configure the FCF record for FCFI 0.  This is the driver's
  19647	 * hardcoded default and gets used in nonFIP mode.
  19648	 */
  19649	fcfindex = bf_get(lpfc_fcf_record_fcf_index, fcf_record);
  19650	bytep = virt_addr + sizeof(union lpfc_sli4_cfg_shdr);
  19651	lpfc_sli_pcimem_bcopy(&fcfindex, bytep, sizeof(uint32_t));
  19652
  19653	/*
  19654	 * Copy the fcf_index and the FCF Record Data. The data starts after
  19655	 * the FCoE header plus word10. The data copy needs to be endian
  19656	 * correct.
  19657	 */
  19658	bytep += sizeof(uint32_t);
  19659	lpfc_sli_pcimem_bcopy(fcf_record, bytep, sizeof(struct fcf_record));
  19660	mboxq->vport = phba->pport;
  19661	mboxq->mbox_cmpl = lpfc_mbx_cmpl_add_fcf_record;
  19662	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
  19663	if (rc == MBX_NOT_FINISHED) {
  19664		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  19665			"2515 ADD_FCF_RECORD mailbox failed with "
  19666			"status 0x%x\n", rc);
  19667		lpfc_sli4_mbox_cmd_free(phba, mboxq);
  19668		rc = -EIO;
  19669	} else
  19670		rc = 0;
  19671
  19672	return rc;
  19673}
  19674
  19675/**
  19676 * lpfc_sli4_build_dflt_fcf_record - Build the driver's default FCF Record.
  19677 * @phba: pointer to lpfc hba data structure.
  19678 * @fcf_record:  pointer to the fcf record to write the default data.
  19679 * @fcf_index: FCF table entry index.
  19680 *
  19681 * This routine is invoked to build the driver's default FCF record.  The
  19682 * values used are hardcoded.  This routine handles memory initialization.
  19683 *
  19684 **/
  19685void
  19686lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba *phba,
  19687				struct fcf_record *fcf_record,
  19688				uint16_t fcf_index)
  19689{
  19690	memset(fcf_record, 0, sizeof(struct fcf_record));
  19691	fcf_record->max_rcv_size = LPFC_FCOE_MAX_RCV_SIZE;
  19692	fcf_record->fka_adv_period = LPFC_FCOE_FKA_ADV_PER;
  19693	fcf_record->fip_priority = LPFC_FCOE_FIP_PRIORITY;
  19694	bf_set(lpfc_fcf_record_mac_0, fcf_record, phba->fc_map[0]);
  19695	bf_set(lpfc_fcf_record_mac_1, fcf_record, phba->fc_map[1]);
  19696	bf_set(lpfc_fcf_record_mac_2, fcf_record, phba->fc_map[2]);
  19697	bf_set(lpfc_fcf_record_mac_3, fcf_record, LPFC_FCOE_FCF_MAC3);
  19698	bf_set(lpfc_fcf_record_mac_4, fcf_record, LPFC_FCOE_FCF_MAC4);
  19699	bf_set(lpfc_fcf_record_mac_5, fcf_record, LPFC_FCOE_FCF_MAC5);
  19700	bf_set(lpfc_fcf_record_fc_map_0, fcf_record, phba->fc_map[0]);
  19701	bf_set(lpfc_fcf_record_fc_map_1, fcf_record, phba->fc_map[1]);
  19702	bf_set(lpfc_fcf_record_fc_map_2, fcf_record, phba->fc_map[2]);
  19703	bf_set(lpfc_fcf_record_fcf_valid, fcf_record, 1);
  19704	bf_set(lpfc_fcf_record_fcf_avail, fcf_record, 1);
  19705	bf_set(lpfc_fcf_record_fcf_index, fcf_record, fcf_index);
  19706	bf_set(lpfc_fcf_record_mac_addr_prov, fcf_record,
  19707		LPFC_FCF_FPMA | LPFC_FCF_SPMA);
  19708	/* Set the VLAN bit map */
  19709	if (phba->valid_vlan) {
  19710		fcf_record->vlan_bitmap[phba->vlan_id / 8]
  19711			= 1 << (phba->vlan_id % 8);
  19712	}
  19713}
  19714
  19715/**
  19716 * lpfc_sli4_fcf_scan_read_fcf_rec - Read hba fcf record for fcf scan.
  19717 * @phba: pointer to lpfc hba data structure.
  19718 * @fcf_index: FCF table entry offset.
  19719 *
  19720 * This routine is invoked to scan the entire FCF table by reading FCF
  19721 * record and processing it one at a time starting from the @fcf_index
  19722 * for initial FCF discovery or fast FCF failover rediscovery.
  19723 *
  19724 * Return 0 if the mailbox command is submitted successfully, none 0
  19725 * otherwise.
  19726 **/
  19727int
  19728lpfc_sli4_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
  19729{
  19730	int rc = 0, error;
  19731	LPFC_MBOXQ_t *mboxq;
  19732
  19733	phba->fcoe_eventtag_at_fcf_scan = phba->fcoe_eventtag;
  19734	phba->fcoe_cvl_eventtag_attn = phba->fcoe_cvl_eventtag;
  19735	mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
  19736	if (!mboxq) {
  19737		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  19738				"2000 Failed to allocate mbox for "
  19739				"READ_FCF cmd\n");
  19740		error = -ENOMEM;
  19741		goto fail_fcf_scan;
  19742	}
  19743	/* Construct the read FCF record mailbox command */
  19744	rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
  19745	if (rc) {
  19746		error = -EINVAL;
  19747		goto fail_fcf_scan;
  19748	}
  19749	/* Issue the mailbox command asynchronously */
  19750	mboxq->vport = phba->pport;
  19751	mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_scan_read_fcf_rec;
  19752
  19753	spin_lock_irq(&phba->hbalock);
  19754	phba->hba_flag |= FCF_TS_INPROG;
  19755	spin_unlock_irq(&phba->hbalock);
  19756
  19757	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
  19758	if (rc == MBX_NOT_FINISHED)
  19759		error = -EIO;
  19760	else {
  19761		/* Reset eligible FCF count for new scan */
  19762		if (fcf_index == LPFC_FCOE_FCF_GET_FIRST)
  19763			phba->fcf.eligible_fcf_cnt = 0;
  19764		error = 0;
  19765	}
  19766fail_fcf_scan:
  19767	if (error) {
  19768		if (mboxq)
  19769			lpfc_sli4_mbox_cmd_free(phba, mboxq);
  19770		/* FCF scan failed, clear FCF_TS_INPROG flag */
  19771		spin_lock_irq(&phba->hbalock);
  19772		phba->hba_flag &= ~FCF_TS_INPROG;
  19773		spin_unlock_irq(&phba->hbalock);
  19774	}
  19775	return error;
  19776}
  19777
  19778/**
  19779 * lpfc_sli4_fcf_rr_read_fcf_rec - Read hba fcf record for roundrobin fcf.
  19780 * @phba: pointer to lpfc hba data structure.
  19781 * @fcf_index: FCF table entry offset.
  19782 *
  19783 * This routine is invoked to read an FCF record indicated by @fcf_index
  19784 * and to use it for FLOGI roundrobin FCF failover.
  19785 *
  19786 * Return 0 if the mailbox command is submitted successfully, none 0
  19787 * otherwise.
  19788 **/
  19789int
  19790lpfc_sli4_fcf_rr_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
  19791{
  19792	int rc = 0, error;
  19793	LPFC_MBOXQ_t *mboxq;
  19794
  19795	mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
  19796	if (!mboxq) {
  19797		lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT,
  19798				"2763 Failed to allocate mbox for "
  19799				"READ_FCF cmd\n");
  19800		error = -ENOMEM;
  19801		goto fail_fcf_read;
  19802	}
  19803	/* Construct the read FCF record mailbox command */
  19804	rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
  19805	if (rc) {
  19806		error = -EINVAL;
  19807		goto fail_fcf_read;
  19808	}
  19809	/* Issue the mailbox command asynchronously */
  19810	mboxq->vport = phba->pport;
  19811	mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_rr_read_fcf_rec;
  19812	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
  19813	if (rc == MBX_NOT_FINISHED)
  19814		error = -EIO;
  19815	else
  19816		error = 0;
  19817
  19818fail_fcf_read:
  19819	if (error && mboxq)
  19820		lpfc_sli4_mbox_cmd_free(phba, mboxq);
  19821	return error;
  19822}
  19823
  19824/**
  19825 * lpfc_sli4_read_fcf_rec - Read hba fcf record for update eligible fcf bmask.
  19826 * @phba: pointer to lpfc hba data structure.
  19827 * @fcf_index: FCF table entry offset.
  19828 *
  19829 * This routine is invoked to read an FCF record indicated by @fcf_index to
  19830 * determine whether it's eligible for FLOGI roundrobin failover list.
  19831 *
  19832 * Return 0 if the mailbox command is submitted successfully, none 0
  19833 * otherwise.
  19834 **/
  19835int
  19836lpfc_sli4_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
  19837{
  19838	int rc = 0, error;
  19839	LPFC_MBOXQ_t *mboxq;
  19840
  19841	mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
  19842	if (!mboxq) {
  19843		lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT,
  19844				"2758 Failed to allocate mbox for "
  19845				"READ_FCF cmd\n");
  19846				error = -ENOMEM;
  19847				goto fail_fcf_read;
  19848	}
  19849	/* Construct the read FCF record mailbox command */
  19850	rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
  19851	if (rc) {
  19852		error = -EINVAL;
  19853		goto fail_fcf_read;
  19854	}
  19855	/* Issue the mailbox command asynchronously */
  19856	mboxq->vport = phba->pport;
  19857	mboxq->mbox_cmpl = lpfc_mbx_cmpl_read_fcf_rec;
  19858	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
  19859	if (rc == MBX_NOT_FINISHED)
  19860		error = -EIO;
  19861	else
  19862		error = 0;
  19863
  19864fail_fcf_read:
  19865	if (error && mboxq)
  19866		lpfc_sli4_mbox_cmd_free(phba, mboxq);
  19867	return error;
  19868}
  19869
  19870/**
  19871 * lpfc_check_next_fcf_pri_level
  19872 * @phba: pointer to the lpfc_hba struct for this port.
  19873 * This routine is called from the lpfc_sli4_fcf_rr_next_index_get
  19874 * routine when the rr_bmask is empty. The FCF indecies are put into the
  19875 * rr_bmask based on their priority level. Starting from the highest priority
  19876 * to the lowest. The most likely FCF candidate will be in the highest
  19877 * priority group. When this routine is called it searches the fcf_pri list for
  19878 * next lowest priority group and repopulates the rr_bmask with only those
  19879 * fcf_indexes.
  19880 * returns:
  19881 * 1=success 0=failure
  19882 **/
  19883static int
  19884lpfc_check_next_fcf_pri_level(struct lpfc_hba *phba)
  19885{
  19886	uint16_t next_fcf_pri;
  19887	uint16_t last_index;
  19888	struct lpfc_fcf_pri *fcf_pri;
  19889	int rc;
  19890	int ret = 0;
  19891
  19892	last_index = find_first_bit(phba->fcf.fcf_rr_bmask,
  19893			LPFC_SLI4_FCF_TBL_INDX_MAX);
  19894	lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
  19895			"3060 Last IDX %d\n", last_index);
  19896
  19897	/* Verify the priority list has 2 or more entries */
  19898	spin_lock_irq(&phba->hbalock);
  19899	if (list_empty(&phba->fcf.fcf_pri_list) ||
  19900	    list_is_singular(&phba->fcf.fcf_pri_list)) {
  19901		spin_unlock_irq(&phba->hbalock);
  19902		lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
  19903			"3061 Last IDX %d\n", last_index);
  19904		return 0; /* Empty rr list */
  19905	}
  19906	spin_unlock_irq(&phba->hbalock);
  19907
  19908	next_fcf_pri = 0;
  19909	/*
  19910	 * Clear the rr_bmask and set all of the bits that are at this
  19911	 * priority.
  19912	 */
  19913	memset(phba->fcf.fcf_rr_bmask, 0,
  19914			sizeof(*phba->fcf.fcf_rr_bmask));
  19915	spin_lock_irq(&phba->hbalock);
  19916	list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) {
  19917		if (fcf_pri->fcf_rec.flag & LPFC_FCF_FLOGI_FAILED)
  19918			continue;
  19919		/*
  19920		 * the 1st priority that has not FLOGI failed
  19921		 * will be the highest.
  19922		 */
  19923		if (!next_fcf_pri)
  19924			next_fcf_pri = fcf_pri->fcf_rec.priority;
  19925		spin_unlock_irq(&phba->hbalock);
  19926		if (fcf_pri->fcf_rec.priority == next_fcf_pri) {
  19927			rc = lpfc_sli4_fcf_rr_index_set(phba,
  19928						fcf_pri->fcf_rec.fcf_index);
  19929			if (rc)
  19930				return 0;
  19931		}
  19932		spin_lock_irq(&phba->hbalock);
  19933	}
  19934	/*
  19935	 * if next_fcf_pri was not set above and the list is not empty then
  19936	 * we have failed flogis on all of them. So reset flogi failed
  19937	 * and start at the beginning.
  19938	 */
  19939	if (!next_fcf_pri && !list_empty(&phba->fcf.fcf_pri_list)) {
  19940		list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) {
  19941			fcf_pri->fcf_rec.flag &= ~LPFC_FCF_FLOGI_FAILED;
  19942			/*
  19943			 * the 1st priority that has not FLOGI failed
  19944			 * will be the highest.
  19945			 */
  19946			if (!next_fcf_pri)
  19947				next_fcf_pri = fcf_pri->fcf_rec.priority;
  19948			spin_unlock_irq(&phba->hbalock);
  19949			if (fcf_pri->fcf_rec.priority == next_fcf_pri) {
  19950				rc = lpfc_sli4_fcf_rr_index_set(phba,
  19951						fcf_pri->fcf_rec.fcf_index);
  19952				if (rc)
  19953					return 0;
  19954			}
  19955			spin_lock_irq(&phba->hbalock);
  19956		}
  19957	} else
  19958		ret = 1;
  19959	spin_unlock_irq(&phba->hbalock);
  19960
  19961	return ret;
  19962}
  19963/**
  19964 * lpfc_sli4_fcf_rr_next_index_get - Get next eligible fcf record index
  19965 * @phba: pointer to lpfc hba data structure.
  19966 *
  19967 * This routine is to get the next eligible FCF record index in a round
  19968 * robin fashion. If the next eligible FCF record index equals to the
  19969 * initial roundrobin FCF record index, LPFC_FCOE_FCF_NEXT_NONE (0xFFFF)
  19970 * shall be returned, otherwise, the next eligible FCF record's index
  19971 * shall be returned.
  19972 **/
  19973uint16_t
  19974lpfc_sli4_fcf_rr_next_index_get(struct lpfc_hba *phba)
  19975{
  19976	uint16_t next_fcf_index;
  19977
  19978initial_priority:
  19979	/* Search start from next bit of currently registered FCF index */
  19980	next_fcf_index = phba->fcf.current_rec.fcf_indx;
  19981
  19982next_priority:
  19983	/* Determine the next fcf index to check */
  19984	next_fcf_index = (next_fcf_index + 1) % LPFC_SLI4_FCF_TBL_INDX_MAX;
  19985	next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask,
  19986				       LPFC_SLI4_FCF_TBL_INDX_MAX,
  19987				       next_fcf_index);
  19988
  19989	/* Wrap around condition on phba->fcf.fcf_rr_bmask */
  19990	if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
  19991		/*
  19992		 * If we have wrapped then we need to clear the bits that
  19993		 * have been tested so that we can detect when we should
  19994		 * change the priority level.
  19995		 */
  19996		next_fcf_index = find_first_bit(phba->fcf.fcf_rr_bmask,
  19997					       LPFC_SLI4_FCF_TBL_INDX_MAX);
  19998	}
  19999
  20000
  20001	/* Check roundrobin failover list empty condition */
  20002	if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX ||
  20003		next_fcf_index == phba->fcf.current_rec.fcf_indx) {
  20004		/*
  20005		 * If next fcf index is not found check if there are lower
  20006		 * Priority level fcf's in the fcf_priority list.
  20007		 * Set up the rr_bmask with all of the avaiable fcf bits
  20008		 * at that level and continue the selection process.
  20009		 */
  20010		if (lpfc_check_next_fcf_pri_level(phba))
  20011			goto initial_priority;
  20012		lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
  20013				"2844 No roundrobin failover FCF available\n");
  20014
  20015		return LPFC_FCOE_FCF_NEXT_NONE;
  20016	}
  20017
  20018	if (next_fcf_index < LPFC_SLI4_FCF_TBL_INDX_MAX &&
  20019		phba->fcf.fcf_pri[next_fcf_index].fcf_rec.flag &
  20020		LPFC_FCF_FLOGI_FAILED) {
  20021		if (list_is_singular(&phba->fcf.fcf_pri_list))
  20022			return LPFC_FCOE_FCF_NEXT_NONE;
  20023
  20024		goto next_priority;
  20025	}
  20026
  20027	lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
  20028			"2845 Get next roundrobin failover FCF (x%x)\n",
  20029			next_fcf_index);
  20030
  20031	return next_fcf_index;
  20032}
  20033
  20034/**
  20035 * lpfc_sli4_fcf_rr_index_set - Set bmask with eligible fcf record index
  20036 * @phba: pointer to lpfc hba data structure.
  20037 * @fcf_index: index into the FCF table to 'set'
  20038 *
  20039 * This routine sets the FCF record index in to the eligible bmask for
  20040 * roundrobin failover search. It checks to make sure that the index
  20041 * does not go beyond the range of the driver allocated bmask dimension
  20042 * before setting the bit.
  20043 *
  20044 * Returns 0 if the index bit successfully set, otherwise, it returns
  20045 * -EINVAL.
  20046 **/
  20047int
  20048lpfc_sli4_fcf_rr_index_set(struct lpfc_hba *phba, uint16_t fcf_index)
  20049{
  20050	if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
  20051		lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
  20052				"2610 FCF (x%x) reached driver's book "
  20053				"keeping dimension:x%x\n",
  20054				fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX);
  20055		return -EINVAL;
  20056	}
  20057	/* Set the eligible FCF record index bmask */
  20058	set_bit(fcf_index, phba->fcf.fcf_rr_bmask);
  20059
  20060	lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
  20061			"2790 Set FCF (x%x) to roundrobin FCF failover "
  20062			"bmask\n", fcf_index);
  20063
  20064	return 0;
  20065}
  20066
  20067/**
  20068 * lpfc_sli4_fcf_rr_index_clear - Clear bmask from eligible fcf record index
  20069 * @phba: pointer to lpfc hba data structure.
  20070 * @fcf_index: index into the FCF table to 'clear'
  20071 *
  20072 * This routine clears the FCF record index from the eligible bmask for
  20073 * roundrobin failover search. It checks to make sure that the index
  20074 * does not go beyond the range of the driver allocated bmask dimension
  20075 * before clearing the bit.
  20076 **/
  20077void
  20078lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *phba, uint16_t fcf_index)
  20079{
  20080	struct lpfc_fcf_pri *fcf_pri, *fcf_pri_next;
  20081	if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
  20082		lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
  20083				"2762 FCF (x%x) reached driver's book "
  20084				"keeping dimension:x%x\n",
  20085				fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX);
  20086		return;
  20087	}
  20088	/* Clear the eligible FCF record index bmask */
  20089	spin_lock_irq(&phba->hbalock);
  20090	list_for_each_entry_safe(fcf_pri, fcf_pri_next, &phba->fcf.fcf_pri_list,
  20091				 list) {
  20092		if (fcf_pri->fcf_rec.fcf_index == fcf_index) {
  20093			list_del_init(&fcf_pri->list);
  20094			break;
  20095		}
  20096	}
  20097	spin_unlock_irq(&phba->hbalock);
  20098	clear_bit(fcf_index, phba->fcf.fcf_rr_bmask);
  20099
  20100	lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
  20101			"2791 Clear FCF (x%x) from roundrobin failover "
  20102			"bmask\n", fcf_index);
  20103}
  20104
  20105/**
  20106 * lpfc_mbx_cmpl_redisc_fcf_table - completion routine for rediscover FCF table
  20107 * @phba: pointer to lpfc hba data structure.
  20108 * @mbox: An allocated pointer to type LPFC_MBOXQ_t
  20109 *
  20110 * This routine is the completion routine for the rediscover FCF table mailbox
  20111 * command. If the mailbox command returned failure, it will try to stop the
  20112 * FCF rediscover wait timer.
  20113 **/
  20114static void
  20115lpfc_mbx_cmpl_redisc_fcf_table(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
  20116{
  20117	struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf;
  20118	uint32_t shdr_status, shdr_add_status;
  20119
  20120	redisc_fcf = &mbox->u.mqe.un.redisc_fcf_tbl;
  20121
  20122	shdr_status = bf_get(lpfc_mbox_hdr_status,
  20123			     &redisc_fcf->header.cfg_shdr.response);
  20124	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
  20125			     &redisc_fcf->header.cfg_shdr.response);
  20126	if (shdr_status || shdr_add_status) {
  20127		lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
  20128				"2746 Requesting for FCF rediscovery failed "
  20129				"status x%x add_status x%x\n",
  20130				shdr_status, shdr_add_status);
  20131		if (phba->fcf.fcf_flag & FCF_ACVL_DISC) {
  20132			spin_lock_irq(&phba->hbalock);
  20133			phba->fcf.fcf_flag &= ~FCF_ACVL_DISC;
  20134			spin_unlock_irq(&phba->hbalock);
  20135			/*
  20136			 * CVL event triggered FCF rediscover request failed,
  20137			 * last resort to re-try current registered FCF entry.
  20138			 */
  20139			lpfc_retry_pport_discovery(phba);
  20140		} else {
  20141			spin_lock_irq(&phba->hbalock);
  20142			phba->fcf.fcf_flag &= ~FCF_DEAD_DISC;
  20143			spin_unlock_irq(&phba->hbalock);
  20144			/*
  20145			 * DEAD FCF event triggered FCF rediscover request
  20146			 * failed, last resort to fail over as a link down
  20147			 * to FCF registration.
  20148			 */
  20149			lpfc_sli4_fcf_dead_failthrough(phba);
  20150		}
  20151	} else {
  20152		lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
  20153				"2775 Start FCF rediscover quiescent timer\n");
  20154		/*
  20155		 * Start FCF rediscovery wait timer for pending FCF
  20156		 * before rescan FCF record table.
  20157		 */
  20158		lpfc_fcf_redisc_wait_start_timer(phba);
  20159	}
  20160
  20161	mempool_free(mbox, phba->mbox_mem_pool);
  20162}
  20163
  20164/**
  20165 * lpfc_sli4_redisc_fcf_table - Request to rediscover entire FCF table by port.
  20166 * @phba: pointer to lpfc hba data structure.
  20167 *
  20168 * This routine is invoked to request for rediscovery of the entire FCF table
  20169 * by the port.
  20170 **/
  20171int
  20172lpfc_sli4_redisc_fcf_table(struct lpfc_hba *phba)
  20173{
  20174	LPFC_MBOXQ_t *mbox;
  20175	struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf;
  20176	int rc, length;
  20177
  20178	/* Cancel retry delay timers to all vports before FCF rediscover */
  20179	lpfc_cancel_all_vport_retry_delay_timer(phba);
  20180
  20181	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
  20182	if (!mbox) {
  20183		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  20184				"2745 Failed to allocate mbox for "
  20185				"requesting FCF rediscover.\n");
  20186		return -ENOMEM;
  20187	}
  20188
  20189	length = (sizeof(struct lpfc_mbx_redisc_fcf_tbl) -
  20190		  sizeof(struct lpfc_sli4_cfg_mhdr));
  20191	lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
  20192			 LPFC_MBOX_OPCODE_FCOE_REDISCOVER_FCF,
  20193			 length, LPFC_SLI4_MBX_EMBED);
  20194
  20195	redisc_fcf = &mbox->u.mqe.un.redisc_fcf_tbl;
  20196	/* Set count to 0 for invalidating the entire FCF database */
  20197	bf_set(lpfc_mbx_redisc_fcf_count, redisc_fcf, 0);
  20198
  20199	/* Issue the mailbox command asynchronously */
  20200	mbox->vport = phba->pport;
  20201	mbox->mbox_cmpl = lpfc_mbx_cmpl_redisc_fcf_table;
  20202	rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
  20203
  20204	if (rc == MBX_NOT_FINISHED) {
  20205		mempool_free(mbox, phba->mbox_mem_pool);
  20206		return -EIO;
  20207	}
  20208	return 0;
  20209}
  20210
  20211/**
  20212 * lpfc_sli4_fcf_dead_failthrough - Failthrough routine to fcf dead event
  20213 * @phba: pointer to lpfc hba data structure.
  20214 *
  20215 * This function is the failover routine as a last resort to the FCF DEAD
  20216 * event when driver failed to perform fast FCF failover.
  20217 **/
  20218void
  20219lpfc_sli4_fcf_dead_failthrough(struct lpfc_hba *phba)
  20220{
  20221	uint32_t link_state;
  20222
  20223	/*
  20224	 * Last resort as FCF DEAD event failover will treat this as
  20225	 * a link down, but save the link state because we don't want
  20226	 * it to be changed to Link Down unless it is already down.
  20227	 */
  20228	link_state = phba->link_state;
  20229	lpfc_linkdown(phba);
  20230	phba->link_state = link_state;
  20231
  20232	/* Unregister FCF if no devices connected to it */
  20233	lpfc_unregister_unused_fcf(phba);
  20234}
  20235
  20236/**
  20237 * lpfc_sli_get_config_region23 - Get sli3 port region 23 data.
  20238 * @phba: pointer to lpfc hba data structure.
  20239 * @rgn23_data: pointer to configure region 23 data.
  20240 *
  20241 * This function gets SLI3 port configure region 23 data through memory dump
  20242 * mailbox command. When it successfully retrieves data, the size of the data
  20243 * will be returned, otherwise, 0 will be returned.
  20244 **/
  20245static uint32_t
  20246lpfc_sli_get_config_region23(struct lpfc_hba *phba, char *rgn23_data)
  20247{
  20248	LPFC_MBOXQ_t *pmb = NULL;
  20249	MAILBOX_t *mb;
  20250	uint32_t offset = 0;
  20251	int rc;
  20252
  20253	if (!rgn23_data)
  20254		return 0;
  20255
  20256	pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
  20257	if (!pmb) {
  20258		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  20259				"2600 failed to allocate mailbox memory\n");
  20260		return 0;
  20261	}
  20262	mb = &pmb->u.mb;
  20263
  20264	do {
  20265		lpfc_dump_mem(phba, pmb, offset, DMP_REGION_23);
  20266		rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
  20267
  20268		if (rc != MBX_SUCCESS) {
  20269			lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
  20270					"2601 failed to read config "
  20271					"region 23, rc 0x%x Status 0x%x\n",
  20272					rc, mb->mbxStatus);
  20273			mb->un.varDmp.word_cnt = 0;
  20274		}
  20275		/*
  20276		 * dump mem may return a zero when finished or we got a
  20277		 * mailbox error, either way we are done.
  20278		 */
  20279		if (mb->un.varDmp.word_cnt == 0)
  20280			break;
  20281
  20282		if (mb->un.varDmp.word_cnt > DMP_RGN23_SIZE - offset)
  20283			mb->un.varDmp.word_cnt = DMP_RGN23_SIZE - offset;
  20284
  20285		lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
  20286				       rgn23_data + offset,
  20287				       mb->un.varDmp.word_cnt);
  20288		offset += mb->un.varDmp.word_cnt;
  20289	} while (mb->un.varDmp.word_cnt && offset < DMP_RGN23_SIZE);
  20290
  20291	mempool_free(pmb, phba->mbox_mem_pool);
  20292	return offset;
  20293}
  20294
  20295/**
  20296 * lpfc_sli4_get_config_region23 - Get sli4 port region 23 data.
  20297 * @phba: pointer to lpfc hba data structure.
  20298 * @rgn23_data: pointer to configure region 23 data.
  20299 *
  20300 * This function gets SLI4 port configure region 23 data through memory dump
  20301 * mailbox command. When it successfully retrieves data, the size of the data
  20302 * will be returned, otherwise, 0 will be returned.
  20303 **/
  20304static uint32_t
  20305lpfc_sli4_get_config_region23(struct lpfc_hba *phba, char *rgn23_data)
  20306{
  20307	LPFC_MBOXQ_t *mboxq = NULL;
  20308	struct lpfc_dmabuf *mp = NULL;
  20309	struct lpfc_mqe *mqe;
  20310	uint32_t data_length = 0;
  20311	int rc;
  20312
  20313	if (!rgn23_data)
  20314		return 0;
  20315
  20316	mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
  20317	if (!mboxq) {
  20318		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  20319				"3105 failed to allocate mailbox memory\n");
  20320		return 0;
  20321	}
  20322
  20323	if (lpfc_sli4_dump_cfg_rg23(phba, mboxq))
  20324		goto out;
  20325	mqe = &mboxq->u.mqe;
  20326	mp = (struct lpfc_dmabuf *)mboxq->ctx_buf;
  20327	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
  20328	if (rc)
  20329		goto out;
  20330	data_length = mqe->un.mb_words[5];
  20331	if (data_length == 0)
  20332		goto out;
  20333	if (data_length > DMP_RGN23_SIZE) {
  20334		data_length = 0;
  20335		goto out;
  20336	}
  20337	lpfc_sli_pcimem_bcopy((char *)mp->virt, rgn23_data, data_length);
  20338out:
  20339	lpfc_mbox_rsrc_cleanup(phba, mboxq, MBOX_THD_UNLOCKED);
  20340	return data_length;
  20341}
  20342
  20343/**
  20344 * lpfc_sli_read_link_ste - Read region 23 to decide if link is disabled.
  20345 * @phba: pointer to lpfc hba data structure.
  20346 *
  20347 * This function read region 23 and parse TLV for port status to
  20348 * decide if the user disaled the port. If the TLV indicates the
  20349 * port is disabled, the hba_flag is set accordingly.
  20350 **/
  20351void
  20352lpfc_sli_read_link_ste(struct lpfc_hba *phba)
  20353{
  20354	uint8_t *rgn23_data = NULL;
  20355	uint32_t if_type, data_size, sub_tlv_len, tlv_offset;
  20356	uint32_t offset = 0;
  20357
  20358	/* Get adapter Region 23 data */
  20359	rgn23_data = kzalloc(DMP_RGN23_SIZE, GFP_KERNEL);
  20360	if (!rgn23_data)
  20361		goto out;
  20362
  20363	if (phba->sli_rev < LPFC_SLI_REV4)
  20364		data_size = lpfc_sli_get_config_region23(phba, rgn23_data);
  20365	else {
  20366		if_type = bf_get(lpfc_sli_intf_if_type,
  20367				 &phba->sli4_hba.sli_intf);
  20368		if (if_type == LPFC_SLI_INTF_IF_TYPE_0)
  20369			goto out;
  20370		data_size = lpfc_sli4_get_config_region23(phba, rgn23_data);
  20371	}
  20372
  20373	if (!data_size)
  20374		goto out;
  20375
  20376	/* Check the region signature first */
  20377	if (memcmp(&rgn23_data[offset], LPFC_REGION23_SIGNATURE, 4)) {
  20378		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  20379			"2619 Config region 23 has bad signature\n");
  20380			goto out;
  20381	}
  20382	offset += 4;
  20383
  20384	/* Check the data structure version */
  20385	if (rgn23_data[offset] != LPFC_REGION23_VERSION) {
  20386		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  20387			"2620 Config region 23 has bad version\n");
  20388		goto out;
  20389	}
  20390	offset += 4;
  20391
  20392	/* Parse TLV entries in the region */
  20393	while (offset < data_size) {
  20394		if (rgn23_data[offset] == LPFC_REGION23_LAST_REC)
  20395			break;
  20396		/*
  20397		 * If the TLV is not driver specific TLV or driver id is
  20398		 * not linux driver id, skip the record.
  20399		 */
  20400		if ((rgn23_data[offset] != DRIVER_SPECIFIC_TYPE) ||
  20401		    (rgn23_data[offset + 2] != LINUX_DRIVER_ID) ||
  20402		    (rgn23_data[offset + 3] != 0)) {
  20403			offset += rgn23_data[offset + 1] * 4 + 4;
  20404			continue;
  20405		}
  20406
  20407		/* Driver found a driver specific TLV in the config region */
  20408		sub_tlv_len = rgn23_data[offset + 1] * 4;
  20409		offset += 4;
  20410		tlv_offset = 0;
  20411
  20412		/*
  20413		 * Search for configured port state sub-TLV.
  20414		 */
  20415		while ((offset < data_size) &&
  20416			(tlv_offset < sub_tlv_len)) {
  20417			if (rgn23_data[offset] == LPFC_REGION23_LAST_REC) {
  20418				offset += 4;
  20419				tlv_offset += 4;
  20420				break;
  20421			}
  20422			if (rgn23_data[offset] != PORT_STE_TYPE) {
  20423				offset += rgn23_data[offset + 1] * 4 + 4;
  20424				tlv_offset += rgn23_data[offset + 1] * 4 + 4;
  20425				continue;
  20426			}
  20427
  20428			/* This HBA contains PORT_STE configured */
  20429			if (!rgn23_data[offset + 2])
  20430				phba->hba_flag |= LINK_DISABLED;
  20431
  20432			goto out;
  20433		}
  20434	}
  20435
  20436out:
  20437	kfree(rgn23_data);
  20438	return;
  20439}
  20440
  20441/**
  20442 * lpfc_log_fw_write_cmpl - logs firmware write completion status
  20443 * @phba: pointer to lpfc hba data structure
  20444 * @shdr_status: wr_object rsp's status field
  20445 * @shdr_add_status: wr_object rsp's add_status field
  20446 * @shdr_add_status_2: wr_object rsp's add_status_2 field
  20447 * @shdr_change_status: wr_object rsp's change_status field
  20448 * @shdr_csf: wr_object rsp's csf bit
  20449 *
  20450 * This routine is intended to be called after a firmware write completes.
  20451 * It will log next action items to be performed by the user to instantiate
  20452 * the newly downloaded firmware or reason for incompatibility.
  20453 **/
  20454static void
  20455lpfc_log_fw_write_cmpl(struct lpfc_hba *phba, u32 shdr_status,
  20456		       u32 shdr_add_status, u32 shdr_add_status_2,
  20457		       u32 shdr_change_status, u32 shdr_csf)
  20458{
  20459	lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
  20460			"4198 %s: flash_id x%02x, asic_rev x%02x, "
  20461			"status x%02x, add_status x%02x, add_status_2 x%02x, "
  20462			"change_status x%02x, csf %01x\n", __func__,
  20463			phba->sli4_hba.flash_id, phba->sli4_hba.asic_rev,
  20464			shdr_status, shdr_add_status, shdr_add_status_2,
  20465			shdr_change_status, shdr_csf);
  20466
  20467	if (shdr_add_status == LPFC_ADD_STATUS_INCOMPAT_OBJ) {
  20468		switch (shdr_add_status_2) {
  20469		case LPFC_ADD_STATUS_2_INCOMPAT_FLASH:
  20470			lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
  20471					"4199 Firmware write failed: "
  20472					"image incompatible with flash x%02x\n",
  20473					phba->sli4_hba.flash_id);
  20474			break;
  20475		case LPFC_ADD_STATUS_2_INCORRECT_ASIC:
  20476			lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
  20477					"4200 Firmware write failed: "
  20478					"image incompatible with ASIC "
  20479					"architecture x%02x\n",
  20480					phba->sli4_hba.asic_rev);
  20481			break;
  20482		default:
  20483			lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
  20484					"4210 Firmware write failed: "
  20485					"add_status_2 x%02x\n",
  20486					shdr_add_status_2);
  20487			break;
  20488		}
  20489	} else if (!shdr_status && !shdr_add_status) {
  20490		if (shdr_change_status == LPFC_CHANGE_STATUS_FW_RESET ||
  20491		    shdr_change_status == LPFC_CHANGE_STATUS_PORT_MIGRATION) {
  20492			if (shdr_csf)
  20493				shdr_change_status =
  20494						   LPFC_CHANGE_STATUS_PCI_RESET;
  20495		}
  20496
  20497		switch (shdr_change_status) {
  20498		case (LPFC_CHANGE_STATUS_PHYS_DEV_RESET):
  20499			lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
  20500					"3198 Firmware write complete: System "
  20501					"reboot required to instantiate\n");
  20502			break;
  20503		case (LPFC_CHANGE_STATUS_FW_RESET):
  20504			lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
  20505					"3199 Firmware write complete: "
  20506					"Firmware reset required to "
  20507					"instantiate\n");
  20508			break;
  20509		case (LPFC_CHANGE_STATUS_PORT_MIGRATION):
  20510			lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
  20511					"3200 Firmware write complete: Port "
  20512					"Migration or PCI Reset required to "
  20513					"instantiate\n");
  20514			break;
  20515		case (LPFC_CHANGE_STATUS_PCI_RESET):
  20516			lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
  20517					"3201 Firmware write complete: PCI "
  20518					"Reset required to instantiate\n");
  20519			break;
  20520		default:
  20521			break;
  20522		}
  20523	}
  20524}
  20525
  20526/**
  20527 * lpfc_wr_object - write an object to the firmware
  20528 * @phba: HBA structure that indicates port to create a queue on.
  20529 * @dmabuf_list: list of dmabufs to write to the port.
  20530 * @size: the total byte value of the objects to write to the port.
  20531 * @offset: the current offset to be used to start the transfer.
  20532 *
  20533 * This routine will create a wr_object mailbox command to send to the port.
  20534 * the mailbox command will be constructed using the dma buffers described in
  20535 * @dmabuf_list to create a list of BDEs. This routine will fill in as many
  20536 * BDEs that the imbedded mailbox can support. The @offset variable will be
  20537 * used to indicate the starting offset of the transfer and will also return
  20538 * the offset after the write object mailbox has completed. @size is used to
  20539 * determine the end of the object and whether the eof bit should be set.
  20540 *
  20541 * Return 0 is successful and offset will contain the the new offset to use
  20542 * for the next write.
  20543 * Return negative value for error cases.
  20544 **/
  20545int
  20546lpfc_wr_object(struct lpfc_hba *phba, struct list_head *dmabuf_list,
  20547	       uint32_t size, uint32_t *offset)
  20548{
  20549	struct lpfc_mbx_wr_object *wr_object;
  20550	LPFC_MBOXQ_t *mbox;
  20551	int rc = 0, i = 0;
  20552	uint32_t shdr_status, shdr_add_status, shdr_add_status_2;
  20553	uint32_t shdr_change_status = 0, shdr_csf = 0;
  20554	uint32_t mbox_tmo;
  20555	struct lpfc_dmabuf *dmabuf;
  20556	uint32_t written = 0;
  20557	bool check_change_status = false;
  20558
  20559	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
  20560	if (!mbox)
  20561		return -ENOMEM;
  20562
  20563	lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
  20564			LPFC_MBOX_OPCODE_WRITE_OBJECT,
  20565			sizeof(struct lpfc_mbx_wr_object) -
  20566			sizeof(struct lpfc_sli4_cfg_mhdr), LPFC_SLI4_MBX_EMBED);
  20567
  20568	wr_object = (struct lpfc_mbx_wr_object *)&mbox->u.mqe.un.wr_object;
  20569	wr_object->u.request.write_offset = *offset;
  20570	sprintf((uint8_t *)wr_object->u.request.object_name, "/");
  20571	wr_object->u.request.object_name[0] =
  20572		cpu_to_le32(wr_object->u.request.object_name[0]);
  20573	bf_set(lpfc_wr_object_eof, &wr_object->u.request, 0);
  20574	list_for_each_entry(dmabuf, dmabuf_list, list) {
  20575		if (i >= LPFC_MBX_WR_CONFIG_MAX_BDE || written >= size)
  20576			break;
  20577		wr_object->u.request.bde[i].addrLow = putPaddrLow(dmabuf->phys);
  20578		wr_object->u.request.bde[i].addrHigh =
  20579			putPaddrHigh(dmabuf->phys);
  20580		if (written + SLI4_PAGE_SIZE >= size) {
  20581			wr_object->u.request.bde[i].tus.f.bdeSize =
  20582				(size - written);
  20583			written += (size - written);
  20584			bf_set(lpfc_wr_object_eof, &wr_object->u.request, 1);
  20585			bf_set(lpfc_wr_object_eas, &wr_object->u.request, 1);
  20586			check_change_status = true;
  20587		} else {
  20588			wr_object->u.request.bde[i].tus.f.bdeSize =
  20589				SLI4_PAGE_SIZE;
  20590			written += SLI4_PAGE_SIZE;
  20591		}
  20592		i++;
  20593	}
  20594	wr_object->u.request.bde_count = i;
  20595	bf_set(lpfc_wr_object_write_length, &wr_object->u.request, written);
  20596	if (!phba->sli4_hba.intr_enable)
  20597		rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
  20598	else {
  20599		mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
  20600		rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
  20601	}
  20602	/* The IOCTL status is embedded in the mailbox subheader. */
  20603	shdr_status = bf_get(lpfc_mbox_hdr_status,
  20604			     &wr_object->header.cfg_shdr.response);
  20605	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
  20606				 &wr_object->header.cfg_shdr.response);
  20607	shdr_add_status_2 = bf_get(lpfc_mbox_hdr_add_status_2,
  20608				   &wr_object->header.cfg_shdr.response);
  20609	if (check_change_status) {
  20610		shdr_change_status = bf_get(lpfc_wr_object_change_status,
  20611					    &wr_object->u.response);
  20612		shdr_csf = bf_get(lpfc_wr_object_csf,
  20613				  &wr_object->u.response);
  20614	}
  20615
  20616	if (!phba->sli4_hba.intr_enable)
  20617		mempool_free(mbox, phba->mbox_mem_pool);
  20618	else if (rc != MBX_TIMEOUT)
  20619		mempool_free(mbox, phba->mbox_mem_pool);
  20620	if (shdr_status || shdr_add_status || shdr_add_status_2 || rc) {
  20621		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  20622				"3025 Write Object mailbox failed with "
  20623				"status x%x add_status x%x, add_status_2 x%x, "
  20624				"mbx status x%x\n",
  20625				shdr_status, shdr_add_status, shdr_add_status_2,
  20626				rc);
  20627		rc = -ENXIO;
  20628		*offset = shdr_add_status;
  20629	} else {
  20630		*offset += wr_object->u.response.actual_write_length;
  20631	}
  20632
  20633	if (rc || check_change_status)
  20634		lpfc_log_fw_write_cmpl(phba, shdr_status, shdr_add_status,
  20635				       shdr_add_status_2, shdr_change_status,
  20636				       shdr_csf);
  20637	return rc;
  20638}
  20639
  20640/**
  20641 * lpfc_cleanup_pending_mbox - Free up vport discovery mailbox commands.
  20642 * @vport: pointer to vport data structure.
  20643 *
  20644 * This function iterate through the mailboxq and clean up all REG_LOGIN
  20645 * and REG_VPI mailbox commands associated with the vport. This function
  20646 * is called when driver want to restart discovery of the vport due to
  20647 * a Clear Virtual Link event.
  20648 **/
  20649void
  20650lpfc_cleanup_pending_mbox(struct lpfc_vport *vport)
  20651{
  20652	struct lpfc_hba *phba = vport->phba;
  20653	LPFC_MBOXQ_t *mb, *nextmb;
  20654	struct lpfc_nodelist *ndlp;
  20655	struct lpfc_nodelist *act_mbx_ndlp = NULL;
  20656	LIST_HEAD(mbox_cmd_list);
  20657	uint8_t restart_loop;
  20658
  20659	/* Clean up internally queued mailbox commands with the vport */
  20660	spin_lock_irq(&phba->hbalock);
  20661	list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
  20662		if (mb->vport != vport)
  20663			continue;
  20664
  20665		if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) &&
  20666			(mb->u.mb.mbxCommand != MBX_REG_VPI))
  20667			continue;
  20668
  20669		list_move_tail(&mb->list, &mbox_cmd_list);
  20670	}
  20671	/* Clean up active mailbox command with the vport */
  20672	mb = phba->sli.mbox_active;
  20673	if (mb && (mb->vport == vport)) {
  20674		if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) ||
  20675			(mb->u.mb.mbxCommand == MBX_REG_VPI))
  20676			mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
  20677		if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
  20678			act_mbx_ndlp = (struct lpfc_nodelist *)mb->ctx_ndlp;
  20679
  20680			/* This reference is local to this routine.  The
  20681			 * reference is removed at routine exit.
  20682			 */
  20683			act_mbx_ndlp = lpfc_nlp_get(act_mbx_ndlp);
  20684
  20685			/* Unregister the RPI when mailbox complete */
  20686			mb->mbox_flag |= LPFC_MBX_IMED_UNREG;
  20687		}
  20688	}
  20689	/* Cleanup any mailbox completions which are not yet processed */
  20690	do {
  20691		restart_loop = 0;
  20692		list_for_each_entry(mb, &phba->sli.mboxq_cmpl, list) {
  20693			/*
  20694			 * If this mailox is already processed or it is
  20695			 * for another vport ignore it.
  20696			 */
  20697			if ((mb->vport != vport) ||
  20698				(mb->mbox_flag & LPFC_MBX_IMED_UNREG))
  20699				continue;
  20700
  20701			if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) &&
  20702				(mb->u.mb.mbxCommand != MBX_REG_VPI))
  20703				continue;
  20704
  20705			mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
  20706			if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
  20707				ndlp = (struct lpfc_nodelist *)mb->ctx_ndlp;
  20708				/* Unregister the RPI when mailbox complete */
  20709				mb->mbox_flag |= LPFC_MBX_IMED_UNREG;
  20710				restart_loop = 1;
  20711				spin_unlock_irq(&phba->hbalock);
  20712				spin_lock(&ndlp->lock);
  20713				ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
  20714				spin_unlock(&ndlp->lock);
  20715				spin_lock_irq(&phba->hbalock);
  20716				break;
  20717			}
  20718		}
  20719	} while (restart_loop);
  20720
  20721	spin_unlock_irq(&phba->hbalock);
  20722
  20723	/* Release the cleaned-up mailbox commands */
  20724	while (!list_empty(&mbox_cmd_list)) {
  20725		list_remove_head(&mbox_cmd_list, mb, LPFC_MBOXQ_t, list);
  20726		if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
  20727			ndlp = (struct lpfc_nodelist *)mb->ctx_ndlp;
  20728			mb->ctx_ndlp = NULL;
  20729			if (ndlp) {
  20730				spin_lock(&ndlp->lock);
  20731				ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
  20732				spin_unlock(&ndlp->lock);
  20733				lpfc_nlp_put(ndlp);
  20734			}
  20735		}
  20736		lpfc_mbox_rsrc_cleanup(phba, mb, MBOX_THD_UNLOCKED);
  20737	}
  20738
  20739	/* Release the ndlp with the cleaned-up active mailbox command */
  20740	if (act_mbx_ndlp) {
  20741		spin_lock(&act_mbx_ndlp->lock);
  20742		act_mbx_ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
  20743		spin_unlock(&act_mbx_ndlp->lock);
  20744		lpfc_nlp_put(act_mbx_ndlp);
  20745	}
  20746}
  20747
  20748/**
  20749 * lpfc_drain_txq - Drain the txq
  20750 * @phba: Pointer to HBA context object.
  20751 *
  20752 * This function attempt to submit IOCBs on the txq
  20753 * to the adapter.  For SLI4 adapters, the txq contains
  20754 * ELS IOCBs that have been deferred because the there
  20755 * are no SGLs.  This congestion can occur with large
  20756 * vport counts during node discovery.
  20757 **/
  20758
  20759uint32_t
  20760lpfc_drain_txq(struct lpfc_hba *phba)
  20761{
  20762	LIST_HEAD(completions);
  20763	struct lpfc_sli_ring *pring;
  20764	struct lpfc_iocbq *piocbq = NULL;
  20765	unsigned long iflags = 0;
  20766	char *fail_msg = NULL;
  20767	uint32_t txq_cnt = 0;
  20768	struct lpfc_queue *wq;
  20769	int ret = 0;
  20770
  20771	if (phba->link_flag & LS_MDS_LOOPBACK) {
  20772		/* MDS WQE are posted only to first WQ*/
  20773		wq = phba->sli4_hba.hdwq[0].io_wq;
  20774		if (unlikely(!wq))
  20775			return 0;
  20776		pring = wq->pring;
  20777	} else {
  20778		wq = phba->sli4_hba.els_wq;
  20779		if (unlikely(!wq))
  20780			return 0;
  20781		pring = lpfc_phba_elsring(phba);
  20782	}
  20783
  20784	if (unlikely(!pring) || list_empty(&pring->txq))
  20785		return 0;
  20786
  20787	spin_lock_irqsave(&pring->ring_lock, iflags);
  20788	list_for_each_entry(piocbq, &pring->txq, list) {
  20789		txq_cnt++;
  20790	}
  20791
  20792	if (txq_cnt > pring->txq_max)
  20793		pring->txq_max = txq_cnt;
  20794
  20795	spin_unlock_irqrestore(&pring->ring_lock, iflags);
  20796
  20797	while (!list_empty(&pring->txq)) {
  20798		spin_lock_irqsave(&pring->ring_lock, iflags);
  20799
  20800		piocbq = lpfc_sli_ringtx_get(phba, pring);
  20801		if (!piocbq) {
  20802			spin_unlock_irqrestore(&pring->ring_lock, iflags);
  20803			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  20804				"2823 txq empty and txq_cnt is %d\n ",
  20805				txq_cnt);
  20806			break;
  20807		}
  20808		txq_cnt--;
  20809
  20810		ret = __lpfc_sli_issue_iocb(phba, pring->ringno, piocbq, 0);
  20811
  20812		if (ret && ret != IOCB_BUSY) {
  20813			fail_msg = " - Cannot send IO ";
  20814			piocbq->cmd_flag &= ~LPFC_DRIVER_ABORTED;
  20815		}
  20816		if (fail_msg) {
  20817			piocbq->cmd_flag |= LPFC_DRIVER_ABORTED;
  20818			/* Failed means we can't issue and need to cancel */
  20819			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  20820					"2822 IOCB failed %s iotag 0x%x "
  20821					"xri 0x%x %d flg x%x\n",
  20822					fail_msg, piocbq->iotag,
  20823					piocbq->sli4_xritag, ret,
  20824					piocbq->cmd_flag);
  20825			list_add_tail(&piocbq->list, &completions);
  20826			fail_msg = NULL;
  20827		}
  20828		spin_unlock_irqrestore(&pring->ring_lock, iflags);
  20829		if (txq_cnt == 0 || ret == IOCB_BUSY)
  20830			break;
  20831	}
  20832	/* Cancel all the IOCBs that cannot be issued */
  20833	lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
  20834			      IOERR_SLI_ABORTED);
  20835
  20836	return txq_cnt;
  20837}
  20838
  20839/**
  20840 * lpfc_wqe_bpl2sgl - Convert the bpl/bde to a sgl.
  20841 * @phba: Pointer to HBA context object.
  20842 * @pwqeq: Pointer to command WQE.
  20843 * @sglq: Pointer to the scatter gather queue object.
  20844 *
  20845 * This routine converts the bpl or bde that is in the WQE
  20846 * to a sgl list for the sli4 hardware. The physical address
  20847 * of the bpl/bde is converted back to a virtual address.
  20848 * If the WQE contains a BPL then the list of BDE's is
  20849 * converted to sli4_sge's. If the WQE contains a single
  20850 * BDE then it is converted to a single sli_sge.
  20851 * The WQE is still in cpu endianness so the contents of
  20852 * the bpl can be used without byte swapping.
  20853 *
  20854 * Returns valid XRI = Success, NO_XRI = Failure.
  20855 */
  20856static uint16_t
  20857lpfc_wqe_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeq,
  20858		 struct lpfc_sglq *sglq)
  20859{
  20860	uint16_t xritag = NO_XRI;
  20861	struct ulp_bde64 *bpl = NULL;
  20862	struct ulp_bde64 bde;
  20863	struct sli4_sge *sgl  = NULL;
  20864	struct lpfc_dmabuf *dmabuf;
  20865	union lpfc_wqe128 *wqe;
  20866	int numBdes = 0;
  20867	int i = 0;
  20868	uint32_t offset = 0; /* accumulated offset in the sg request list */
  20869	int inbound = 0; /* number of sg reply entries inbound from firmware */
  20870	uint32_t cmd;
  20871
  20872	if (!pwqeq || !sglq)
  20873		return xritag;
  20874
  20875	sgl  = (struct sli4_sge *)sglq->sgl;
  20876	wqe = &pwqeq->wqe;
  20877	pwqeq->iocb.ulpIoTag = pwqeq->iotag;
  20878
  20879	cmd = bf_get(wqe_cmnd, &wqe->generic.wqe_com);
  20880	if (cmd == CMD_XMIT_BLS_RSP64_WQE)
  20881		return sglq->sli4_xritag;
  20882	numBdes = pwqeq->num_bdes;
  20883	if (numBdes) {
  20884		/* The addrHigh and addrLow fields within the WQE
  20885		 * have not been byteswapped yet so there is no
  20886		 * need to swap them back.
  20887		 */
  20888		if (pwqeq->bpl_dmabuf)
  20889			dmabuf = pwqeq->bpl_dmabuf;
  20890		else
  20891			return xritag;
  20892
  20893		bpl  = (struct ulp_bde64 *)dmabuf->virt;
  20894		if (!bpl)
  20895			return xritag;
  20896
  20897		for (i = 0; i < numBdes; i++) {
  20898			/* Should already be byte swapped. */
  20899			sgl->addr_hi = bpl->addrHigh;
  20900			sgl->addr_lo = bpl->addrLow;
  20901
  20902			sgl->word2 = le32_to_cpu(sgl->word2);
  20903			if ((i+1) == numBdes)
  20904				bf_set(lpfc_sli4_sge_last, sgl, 1);
  20905			else
  20906				bf_set(lpfc_sli4_sge_last, sgl, 0);
  20907			/* swap the size field back to the cpu so we
  20908			 * can assign it to the sgl.
  20909			 */
  20910			bde.tus.w = le32_to_cpu(bpl->tus.w);
  20911			sgl->sge_len = cpu_to_le32(bde.tus.f.bdeSize);
  20912			/* The offsets in the sgl need to be accumulated
  20913			 * separately for the request and reply lists.
  20914			 * The request is always first, the reply follows.
  20915			 */
  20916			switch (cmd) {
  20917			case CMD_GEN_REQUEST64_WQE:
  20918				/* add up the reply sg entries */
  20919				if (bpl->tus.f.bdeFlags == BUFF_TYPE_BDE_64I)
  20920					inbound++;
  20921				/* first inbound? reset the offset */
  20922				if (inbound == 1)
  20923					offset = 0;
  20924				bf_set(lpfc_sli4_sge_offset, sgl, offset);
  20925				bf_set(lpfc_sli4_sge_type, sgl,
  20926					LPFC_SGE_TYPE_DATA);
  20927				offset += bde.tus.f.bdeSize;
  20928				break;
  20929			case CMD_FCP_TRSP64_WQE:
  20930				bf_set(lpfc_sli4_sge_offset, sgl, 0);
  20931				bf_set(lpfc_sli4_sge_type, sgl,
  20932					LPFC_SGE_TYPE_DATA);
  20933				break;
  20934			case CMD_FCP_TSEND64_WQE:
  20935			case CMD_FCP_TRECEIVE64_WQE:
  20936				bf_set(lpfc_sli4_sge_type, sgl,
  20937					bpl->tus.f.bdeFlags);
  20938				if (i < 3)
  20939					offset = 0;
  20940				else
  20941					offset += bde.tus.f.bdeSize;
  20942				bf_set(lpfc_sli4_sge_offset, sgl, offset);
  20943				break;
  20944			}
  20945			sgl->word2 = cpu_to_le32(sgl->word2);
  20946			bpl++;
  20947			sgl++;
  20948		}
  20949	} else if (wqe->gen_req.bde.tus.f.bdeFlags == BUFF_TYPE_BDE_64) {
  20950		/* The addrHigh and addrLow fields of the BDE have not
  20951		 * been byteswapped yet so they need to be swapped
  20952		 * before putting them in the sgl.
  20953		 */
  20954		sgl->addr_hi = cpu_to_le32(wqe->gen_req.bde.addrHigh);
  20955		sgl->addr_lo = cpu_to_le32(wqe->gen_req.bde.addrLow);
  20956		sgl->word2 = le32_to_cpu(sgl->word2);
  20957		bf_set(lpfc_sli4_sge_last, sgl, 1);
  20958		sgl->word2 = cpu_to_le32(sgl->word2);
  20959		sgl->sge_len = cpu_to_le32(wqe->gen_req.bde.tus.f.bdeSize);
  20960	}
  20961	return sglq->sli4_xritag;
  20962}
  20963
  20964/**
  20965 * lpfc_sli4_issue_wqe - Issue an SLI4 Work Queue Entry (WQE)
  20966 * @phba: Pointer to HBA context object.
  20967 * @qp: Pointer to HDW queue.
  20968 * @pwqe: Pointer to command WQE.
  20969 **/
  20970int
  20971lpfc_sli4_issue_wqe(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp,
  20972		    struct lpfc_iocbq *pwqe)
  20973{
  20974	union lpfc_wqe128 *wqe = &pwqe->wqe;
  20975	struct lpfc_async_xchg_ctx *ctxp;
  20976	struct lpfc_queue *wq;
  20977	struct lpfc_sglq *sglq;
  20978	struct lpfc_sli_ring *pring;
  20979	unsigned long iflags;
  20980	uint32_t ret = 0;
  20981
  20982	/* NVME_LS and NVME_LS ABTS requests. */
  20983	if (pwqe->cmd_flag & LPFC_IO_NVME_LS) {
  20984		pring =  phba->sli4_hba.nvmels_wq->pring;
  20985		lpfc_qp_spin_lock_irqsave(&pring->ring_lock, iflags,
  20986					  qp, wq_access);
  20987		sglq = __lpfc_sli_get_els_sglq(phba, pwqe);
  20988		if (!sglq) {
  20989			spin_unlock_irqrestore(&pring->ring_lock, iflags);
  20990			return WQE_BUSY;
  20991		}
  20992		pwqe->sli4_lxritag = sglq->sli4_lxritag;
  20993		pwqe->sli4_xritag = sglq->sli4_xritag;
  20994		if (lpfc_wqe_bpl2sgl(phba, pwqe, sglq) == NO_XRI) {
  20995			spin_unlock_irqrestore(&pring->ring_lock, iflags);
  20996			return WQE_ERROR;
  20997		}
  20998		bf_set(wqe_xri_tag, &pwqe->wqe.xmit_bls_rsp.wqe_com,
  20999		       pwqe->sli4_xritag);
  21000		ret = lpfc_sli4_wq_put(phba->sli4_hba.nvmels_wq, wqe);
  21001		if (ret) {
  21002			spin_unlock_irqrestore(&pring->ring_lock, iflags);
  21003			return ret;
  21004		}
  21005
  21006		lpfc_sli_ringtxcmpl_put(phba, pring, pwqe);
  21007		spin_unlock_irqrestore(&pring->ring_lock, iflags);
  21008
  21009		lpfc_sli4_poll_eq(qp->hba_eq, LPFC_POLL_FASTPATH);
  21010		return 0;
  21011	}
  21012
  21013	/* NVME_FCREQ and NVME_ABTS requests */
  21014	if (pwqe->cmd_flag & (LPFC_IO_NVME | LPFC_IO_FCP | LPFC_IO_CMF)) {
  21015		/* Get the IO distribution (hba_wqidx) for WQ assignment. */
  21016		wq = qp->io_wq;
  21017		pring = wq->pring;
  21018
  21019		bf_set(wqe_cqid, &wqe->generic.wqe_com, qp->io_cq_map);
  21020
  21021		lpfc_qp_spin_lock_irqsave(&pring->ring_lock, iflags,
  21022					  qp, wq_access);
  21023		ret = lpfc_sli4_wq_put(wq, wqe);
  21024		if (ret) {
  21025			spin_unlock_irqrestore(&pring->ring_lock, iflags);
  21026			return ret;
  21027		}
  21028		lpfc_sli_ringtxcmpl_put(phba, pring, pwqe);
  21029		spin_unlock_irqrestore(&pring->ring_lock, iflags);
  21030
  21031		lpfc_sli4_poll_eq(qp->hba_eq, LPFC_POLL_FASTPATH);
  21032		return 0;
  21033	}
  21034
  21035	/* NVMET requests */
  21036	if (pwqe->cmd_flag & LPFC_IO_NVMET) {
  21037		/* Get the IO distribution (hba_wqidx) for WQ assignment. */
  21038		wq = qp->io_wq;
  21039		pring = wq->pring;
  21040
  21041		ctxp = pwqe->context_un.axchg;
  21042		sglq = ctxp->ctxbuf->sglq;
  21043		if (pwqe->sli4_xritag ==  NO_XRI) {
  21044			pwqe->sli4_lxritag = sglq->sli4_lxritag;
  21045			pwqe->sli4_xritag = sglq->sli4_xritag;
  21046		}
  21047		bf_set(wqe_xri_tag, &pwqe->wqe.xmit_bls_rsp.wqe_com,
  21048		       pwqe->sli4_xritag);
  21049		bf_set(wqe_cqid, &wqe->generic.wqe_com, qp->io_cq_map);
  21050
  21051		lpfc_qp_spin_lock_irqsave(&pring->ring_lock, iflags,
  21052					  qp, wq_access);
  21053		ret = lpfc_sli4_wq_put(wq, wqe);
  21054		if (ret) {
  21055			spin_unlock_irqrestore(&pring->ring_lock, iflags);
  21056			return ret;
  21057		}
  21058		lpfc_sli_ringtxcmpl_put(phba, pring, pwqe);
  21059		spin_unlock_irqrestore(&pring->ring_lock, iflags);
  21060
  21061		lpfc_sli4_poll_eq(qp->hba_eq, LPFC_POLL_FASTPATH);
  21062		return 0;
  21063	}
  21064	return WQE_ERROR;
  21065}
  21066
  21067/**
  21068 * lpfc_sli4_issue_abort_iotag - SLI-4 WQE init & issue for the Abort
  21069 * @phba: Pointer to HBA context object.
  21070 * @cmdiocb: Pointer to driver command iocb object.
  21071 * @cmpl: completion function.
  21072 *
  21073 * Fill the appropriate fields for the abort WQE and call
  21074 * internal routine lpfc_sli4_issue_wqe to send the WQE
  21075 * This function is called with hbalock held and no ring_lock held.
  21076 *
  21077 * RETURNS 0 - SUCCESS
  21078 **/
  21079
  21080int
  21081lpfc_sli4_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
  21082			    void *cmpl)
  21083{
  21084	struct lpfc_vport *vport = cmdiocb->vport;
  21085	struct lpfc_iocbq *abtsiocb = NULL;
  21086	union lpfc_wqe128 *abtswqe;
  21087	struct lpfc_io_buf *lpfc_cmd;
  21088	int retval = IOCB_ERROR;
  21089	u16 xritag = cmdiocb->sli4_xritag;
  21090
  21091	/*
  21092	 * The scsi command can not be in txq and it is in flight because the
  21093	 * pCmd is still pointing at the SCSI command we have to abort. There
  21094	 * is no need to search the txcmplq. Just send an abort to the FW.
  21095	 */
  21096
  21097	abtsiocb = __lpfc_sli_get_iocbq(phba);
  21098	if (!abtsiocb)
  21099		return WQE_NORESOURCE;
  21100
  21101	/* Indicate the IO is being aborted by the driver. */
  21102	cmdiocb->cmd_flag |= LPFC_DRIVER_ABORTED;
  21103
  21104	abtswqe = &abtsiocb->wqe;
  21105	memset(abtswqe, 0, sizeof(*abtswqe));
  21106
  21107	if (!lpfc_is_link_up(phba) || (phba->link_flag & LS_EXTERNAL_LOOPBACK))
  21108		bf_set(abort_cmd_ia, &abtswqe->abort_cmd, 1);
  21109	bf_set(abort_cmd_criteria, &abtswqe->abort_cmd, T_XRI_TAG);
  21110	abtswqe->abort_cmd.rsrvd5 = 0;
  21111	abtswqe->abort_cmd.wqe_com.abort_tag = xritag;
  21112	bf_set(wqe_reqtag, &abtswqe->abort_cmd.wqe_com, abtsiocb->iotag);
  21113	bf_set(wqe_cmnd, &abtswqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX);
  21114	bf_set(wqe_xri_tag, &abtswqe->generic.wqe_com, 0);
  21115	bf_set(wqe_qosd, &abtswqe->abort_cmd.wqe_com, 1);
  21116	bf_set(wqe_lenloc, &abtswqe->abort_cmd.wqe_com, LPFC_WQE_LENLOC_NONE);
  21117	bf_set(wqe_cmd_type, &abtswqe->abort_cmd.wqe_com, OTHER_COMMAND);
  21118
  21119	/* ABTS WQE must go to the same WQ as the WQE to be aborted */
  21120	abtsiocb->hba_wqidx = cmdiocb->hba_wqidx;
  21121	abtsiocb->cmd_flag |= LPFC_USE_FCPWQIDX;
  21122	if (cmdiocb->cmd_flag & LPFC_IO_FCP)
  21123		abtsiocb->cmd_flag |= LPFC_IO_FCP;
  21124	if (cmdiocb->cmd_flag & LPFC_IO_NVME)
  21125		abtsiocb->cmd_flag |= LPFC_IO_NVME;
  21126	if (cmdiocb->cmd_flag & LPFC_IO_FOF)
  21127		abtsiocb->cmd_flag |= LPFC_IO_FOF;
  21128	abtsiocb->vport = vport;
  21129	abtsiocb->cmd_cmpl = cmpl;
  21130
  21131	lpfc_cmd = container_of(cmdiocb, struct lpfc_io_buf, cur_iocbq);
  21132	retval = lpfc_sli4_issue_wqe(phba, lpfc_cmd->hdwq, abtsiocb);
  21133
  21134	lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI | LOG_NVME_ABTS | LOG_FCP,
  21135			 "0359 Abort xri x%x, original iotag x%x, "
  21136			 "abort cmd iotag x%x retval x%x\n",
  21137			 xritag, cmdiocb->iotag, abtsiocb->iotag, retval);
  21138
  21139	if (retval) {
  21140		cmdiocb->cmd_flag &= ~LPFC_DRIVER_ABORTED;
  21141		__lpfc_sli_release_iocbq(phba, abtsiocb);
  21142	}
  21143
  21144	return retval;
  21145}
  21146
  21147#ifdef LPFC_MXP_STAT
  21148/**
  21149 * lpfc_snapshot_mxp - Snapshot pbl, pvt and busy count
  21150 * @phba: pointer to lpfc hba data structure.
  21151 * @hwqid: belong to which HWQ.
  21152 *
  21153 * The purpose of this routine is to take a snapshot of pbl, pvt and busy count
  21154 * 15 seconds after a test case is running.
  21155 *
  21156 * The user should call lpfc_debugfs_multixripools_write before running a test
  21157 * case to clear stat_snapshot_taken. Then the user starts a test case. During
  21158 * test case is running, stat_snapshot_taken is incremented by 1 every time when
  21159 * this routine is called from heartbeat timer. When stat_snapshot_taken is
  21160 * equal to LPFC_MXP_SNAPSHOT_TAKEN, a snapshot is taken.
  21161 **/
  21162void lpfc_snapshot_mxp(struct lpfc_hba *phba, u32 hwqid)
  21163{
  21164	struct lpfc_sli4_hdw_queue *qp;
  21165	struct lpfc_multixri_pool *multixri_pool;
  21166	struct lpfc_pvt_pool *pvt_pool;
  21167	struct lpfc_pbl_pool *pbl_pool;
  21168	u32 txcmplq_cnt;
  21169
  21170	qp = &phba->sli4_hba.hdwq[hwqid];
  21171	multixri_pool = qp->p_multixri_pool;
  21172	if (!multixri_pool)
  21173		return;
  21174
  21175	if (multixri_pool->stat_snapshot_taken == LPFC_MXP_SNAPSHOT_TAKEN) {
  21176		pvt_pool = &qp->p_multixri_pool->pvt_pool;
  21177		pbl_pool = &qp->p_multixri_pool->pbl_pool;
  21178		txcmplq_cnt = qp->io_wq->pring->txcmplq_cnt;
  21179
  21180		multixri_pool->stat_pbl_count = pbl_pool->count;
  21181		multixri_pool->stat_pvt_count = pvt_pool->count;
  21182		multixri_pool->stat_busy_count = txcmplq_cnt;
  21183	}
  21184
  21185	multixri_pool->stat_snapshot_taken++;
  21186}
  21187#endif
  21188
  21189/**
  21190 * lpfc_adjust_pvt_pool_count - Adjust private pool count
  21191 * @phba: pointer to lpfc hba data structure.
  21192 * @hwqid: belong to which HWQ.
  21193 *
  21194 * This routine moves some XRIs from private to public pool when private pool
  21195 * is not busy.
  21196 **/
  21197void lpfc_adjust_pvt_pool_count(struct lpfc_hba *phba, u32 hwqid)
  21198{
  21199	struct lpfc_multixri_pool *multixri_pool;
  21200	u32 io_req_count;
  21201	u32 prev_io_req_count;
  21202
  21203	multixri_pool = phba->sli4_hba.hdwq[hwqid].p_multixri_pool;
  21204	if (!multixri_pool)
  21205		return;
  21206	io_req_count = multixri_pool->io_req_count;
  21207	prev_io_req_count = multixri_pool->prev_io_req_count;
  21208
  21209	if (prev_io_req_count != io_req_count) {
  21210		/* Private pool is busy */
  21211		multixri_pool->prev_io_req_count = io_req_count;
  21212	} else {
  21213		/* Private pool is not busy.
  21214		 * Move XRIs from private to public pool.
  21215		 */
  21216		lpfc_move_xri_pvt_to_pbl(phba, hwqid);
  21217	}
  21218}
  21219
  21220/**
  21221 * lpfc_adjust_high_watermark - Adjust high watermark
  21222 * @phba: pointer to lpfc hba data structure.
  21223 * @hwqid: belong to which HWQ.
  21224 *
  21225 * This routine sets high watermark as number of outstanding XRIs,
  21226 * but make sure the new value is between xri_limit/2 and xri_limit.
  21227 **/
  21228void lpfc_adjust_high_watermark(struct lpfc_hba *phba, u32 hwqid)
  21229{
  21230	u32 new_watermark;
  21231	u32 watermark_max;
  21232	u32 watermark_min;
  21233	u32 xri_limit;
  21234	u32 txcmplq_cnt;
  21235	u32 abts_io_bufs;
  21236	struct lpfc_multixri_pool *multixri_pool;
  21237	struct lpfc_sli4_hdw_queue *qp;
  21238
  21239	qp = &phba->sli4_hba.hdwq[hwqid];
  21240	multixri_pool = qp->p_multixri_pool;
  21241	if (!multixri_pool)
  21242		return;
  21243	xri_limit = multixri_pool->xri_limit;
  21244
  21245	watermark_max = xri_limit;
  21246	watermark_min = xri_limit / 2;
  21247
  21248	txcmplq_cnt = qp->io_wq->pring->txcmplq_cnt;
  21249	abts_io_bufs = qp->abts_scsi_io_bufs;
  21250	abts_io_bufs += qp->abts_nvme_io_bufs;
  21251
  21252	new_watermark = txcmplq_cnt + abts_io_bufs;
  21253	new_watermark = min(watermark_max, new_watermark);
  21254	new_watermark = max(watermark_min, new_watermark);
  21255	multixri_pool->pvt_pool.high_watermark = new_watermark;
  21256
  21257#ifdef LPFC_MXP_STAT
  21258	multixri_pool->stat_max_hwm = max(multixri_pool->stat_max_hwm,
  21259					  new_watermark);
  21260#endif
  21261}
  21262
  21263/**
  21264 * lpfc_move_xri_pvt_to_pbl - Move some XRIs from private to public pool
  21265 * @phba: pointer to lpfc hba data structure.
  21266 * @hwqid: belong to which HWQ.
  21267 *
  21268 * This routine is called from hearbeat timer when pvt_pool is idle.
  21269 * All free XRIs are moved from private to public pool on hwqid with 2 steps.
  21270 * The first step moves (all - low_watermark) amount of XRIs.
  21271 * The second step moves the rest of XRIs.
  21272 **/
  21273void lpfc_move_xri_pvt_to_pbl(struct lpfc_hba *phba, u32 hwqid)
  21274{
  21275	struct lpfc_pbl_pool *pbl_pool;
  21276	struct lpfc_pvt_pool *pvt_pool;
  21277	struct lpfc_sli4_hdw_queue *qp;
  21278	struct lpfc_io_buf *lpfc_ncmd;
  21279	struct lpfc_io_buf *lpfc_ncmd_next;
  21280	unsigned long iflag;
  21281	struct list_head tmp_list;
  21282	u32 tmp_count;
  21283
  21284	qp = &phba->sli4_hba.hdwq[hwqid];
  21285	pbl_pool = &qp->p_multixri_pool->pbl_pool;
  21286	pvt_pool = &qp->p_multixri_pool->pvt_pool;
  21287	tmp_count = 0;
  21288
  21289	lpfc_qp_spin_lock_irqsave(&pbl_pool->lock, iflag, qp, mv_to_pub_pool);
  21290	lpfc_qp_spin_lock(&pvt_pool->lock, qp, mv_from_pvt_pool);
  21291
  21292	if (pvt_pool->count > pvt_pool->low_watermark) {
  21293		/* Step 1: move (all - low_watermark) from pvt_pool
  21294		 * to pbl_pool
  21295		 */
  21296
  21297		/* Move low watermark of bufs from pvt_pool to tmp_list */
  21298		INIT_LIST_HEAD(&tmp_list);
  21299		list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
  21300					 &pvt_pool->list, list) {
  21301			list_move_tail(&lpfc_ncmd->list, &tmp_list);
  21302			tmp_count++;
  21303			if (tmp_count >= pvt_pool->low_watermark)
  21304				break;
  21305		}
  21306
  21307		/* Move all bufs from pvt_pool to pbl_pool */
  21308		list_splice_init(&pvt_pool->list, &pbl_pool->list);
  21309
  21310		/* Move all bufs from tmp_list to pvt_pool */
  21311		list_splice(&tmp_list, &pvt_pool->list);
  21312
  21313		pbl_pool->count += (pvt_pool->count - tmp_count);
  21314		pvt_pool->count = tmp_count;
  21315	} else {
  21316		/* Step 2: move the rest from pvt_pool to pbl_pool */
  21317		list_splice_init(&pvt_pool->list, &pbl_pool->list);
  21318		pbl_pool->count += pvt_pool->count;
  21319		pvt_pool->count = 0;
  21320	}
  21321
  21322	spin_unlock(&pvt_pool->lock);
  21323	spin_unlock_irqrestore(&pbl_pool->lock, iflag);
  21324}
  21325
  21326/**
  21327 * _lpfc_move_xri_pbl_to_pvt - Move some XRIs from public to private pool
  21328 * @phba: pointer to lpfc hba data structure
  21329 * @qp: pointer to HDW queue
  21330 * @pbl_pool: specified public free XRI pool
  21331 * @pvt_pool: specified private free XRI pool
  21332 * @count: number of XRIs to move
  21333 *
  21334 * This routine tries to move some free common bufs from the specified pbl_pool
  21335 * to the specified pvt_pool. It might move less than count XRIs if there's not
  21336 * enough in public pool.
  21337 *
  21338 * Return:
  21339 *   true - if XRIs are successfully moved from the specified pbl_pool to the
  21340 *          specified pvt_pool
  21341 *   false - if the specified pbl_pool is empty or locked by someone else
  21342 **/
  21343static bool
  21344_lpfc_move_xri_pbl_to_pvt(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp,
  21345			  struct lpfc_pbl_pool *pbl_pool,
  21346			  struct lpfc_pvt_pool *pvt_pool, u32 count)
  21347{
  21348	struct lpfc_io_buf *lpfc_ncmd;
  21349	struct lpfc_io_buf *lpfc_ncmd_next;
  21350	unsigned long iflag;
  21351	int ret;
  21352
  21353	ret = spin_trylock_irqsave(&pbl_pool->lock, iflag);
  21354	if (ret) {
  21355		if (pbl_pool->count) {
  21356			/* Move a batch of XRIs from public to private pool */
  21357			lpfc_qp_spin_lock(&pvt_pool->lock, qp, mv_to_pvt_pool);
  21358			list_for_each_entry_safe(lpfc_ncmd,
  21359						 lpfc_ncmd_next,
  21360						 &pbl_pool->list,
  21361						 list) {
  21362				list_move_tail(&lpfc_ncmd->list,
  21363					       &pvt_pool->list);
  21364				pvt_pool->count++;
  21365				pbl_pool->count--;
  21366				count--;
  21367				if (count == 0)
  21368					break;
  21369			}
  21370
  21371			spin_unlock(&pvt_pool->lock);
  21372			spin_unlock_irqrestore(&pbl_pool->lock, iflag);
  21373			return true;
  21374		}
  21375		spin_unlock_irqrestore(&pbl_pool->lock, iflag);
  21376	}
  21377
  21378	return false;
  21379}
  21380
  21381/**
  21382 * lpfc_move_xri_pbl_to_pvt - Move some XRIs from public to private pool
  21383 * @phba: pointer to lpfc hba data structure.
  21384 * @hwqid: belong to which HWQ.
  21385 * @count: number of XRIs to move
  21386 *
  21387 * This routine tries to find some free common bufs in one of public pools with
  21388 * Round Robin method. The search always starts from local hwqid, then the next
  21389 * HWQ which was found last time (rrb_next_hwqid). Once a public pool is found,
  21390 * a batch of free common bufs are moved to private pool on hwqid.
  21391 * It might move less than count XRIs if there's not enough in public pool.
  21392 **/
  21393void lpfc_move_xri_pbl_to_pvt(struct lpfc_hba *phba, u32 hwqid, u32 count)
  21394{
  21395	struct lpfc_multixri_pool *multixri_pool;
  21396	struct lpfc_multixri_pool *next_multixri_pool;
  21397	struct lpfc_pvt_pool *pvt_pool;
  21398	struct lpfc_pbl_pool *pbl_pool;
  21399	struct lpfc_sli4_hdw_queue *qp;
  21400	u32 next_hwqid;
  21401	u32 hwq_count;
  21402	int ret;
  21403
  21404	qp = &phba->sli4_hba.hdwq[hwqid];
  21405	multixri_pool = qp->p_multixri_pool;
  21406	pvt_pool = &multixri_pool->pvt_pool;
  21407	pbl_pool = &multixri_pool->pbl_pool;
  21408
  21409	/* Check if local pbl_pool is available */
  21410	ret = _lpfc_move_xri_pbl_to_pvt(phba, qp, pbl_pool, pvt_pool, count);
  21411	if (ret) {
  21412#ifdef LPFC_MXP_STAT
  21413		multixri_pool->local_pbl_hit_count++;
  21414#endif
  21415		return;
  21416	}
  21417
  21418	hwq_count = phba->cfg_hdw_queue;
  21419
  21420	/* Get the next hwqid which was found last time */
  21421	next_hwqid = multixri_pool->rrb_next_hwqid;
  21422
  21423	do {
  21424		/* Go to next hwq */
  21425		next_hwqid = (next_hwqid + 1) % hwq_count;
  21426
  21427		next_multixri_pool =
  21428			phba->sli4_hba.hdwq[next_hwqid].p_multixri_pool;
  21429		pbl_pool = &next_multixri_pool->pbl_pool;
  21430
  21431		/* Check if the public free xri pool is available */
  21432		ret = _lpfc_move_xri_pbl_to_pvt(
  21433			phba, qp, pbl_pool, pvt_pool, count);
  21434
  21435		/* Exit while-loop if success or all hwqid are checked */
  21436	} while (!ret && next_hwqid != multixri_pool->rrb_next_hwqid);
  21437
  21438	/* Starting point for the next time */
  21439	multixri_pool->rrb_next_hwqid = next_hwqid;
  21440
  21441	if (!ret) {
  21442		/* stats: all public pools are empty*/
  21443		multixri_pool->pbl_empty_count++;
  21444	}
  21445
  21446#ifdef LPFC_MXP_STAT
  21447	if (ret) {
  21448		if (next_hwqid == hwqid)
  21449			multixri_pool->local_pbl_hit_count++;
  21450		else
  21451			multixri_pool->other_pbl_hit_count++;
  21452	}
  21453#endif
  21454}
  21455
  21456/**
  21457 * lpfc_keep_pvt_pool_above_lowwm - Keep pvt_pool above low watermark
  21458 * @phba: pointer to lpfc hba data structure.
  21459 * @hwqid: belong to which HWQ.
  21460 *
  21461 * This routine get a batch of XRIs from pbl_pool if pvt_pool is less than
  21462 * low watermark.
  21463 **/
  21464void lpfc_keep_pvt_pool_above_lowwm(struct lpfc_hba *phba, u32 hwqid)
  21465{
  21466	struct lpfc_multixri_pool *multixri_pool;
  21467	struct lpfc_pvt_pool *pvt_pool;
  21468
  21469	multixri_pool = phba->sli4_hba.hdwq[hwqid].p_multixri_pool;
  21470	pvt_pool = &multixri_pool->pvt_pool;
  21471
  21472	if (pvt_pool->count < pvt_pool->low_watermark)
  21473		lpfc_move_xri_pbl_to_pvt(phba, hwqid, XRI_BATCH);
  21474}
  21475
  21476/**
  21477 * lpfc_release_io_buf - Return one IO buf back to free pool
  21478 * @phba: pointer to lpfc hba data structure.
  21479 * @lpfc_ncmd: IO buf to be returned.
  21480 * @qp: belong to which HWQ.
  21481 *
  21482 * This routine returns one IO buf back to free pool. If this is an urgent IO,
  21483 * the IO buf is returned to expedite pool. If cfg_xri_rebalancing==1,
  21484 * the IO buf is returned to pbl_pool or pvt_pool based on watermark and
  21485 * xri_limit.  If cfg_xri_rebalancing==0, the IO buf is returned to
  21486 * lpfc_io_buf_list_put.
  21487 **/
  21488void lpfc_release_io_buf(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_ncmd,
  21489			 struct lpfc_sli4_hdw_queue *qp)
  21490{
  21491	unsigned long iflag;
  21492	struct lpfc_pbl_pool *pbl_pool;
  21493	struct lpfc_pvt_pool *pvt_pool;
  21494	struct lpfc_epd_pool *epd_pool;
  21495	u32 txcmplq_cnt;
  21496	u32 xri_owned;
  21497	u32 xri_limit;
  21498	u32 abts_io_bufs;
  21499
  21500	/* MUST zero fields if buffer is reused by another protocol */
  21501	lpfc_ncmd->nvmeCmd = NULL;
  21502	lpfc_ncmd->cur_iocbq.cmd_cmpl = NULL;
  21503
  21504	if (phba->cfg_xpsgl && !phba->nvmet_support &&
  21505	    !list_empty(&lpfc_ncmd->dma_sgl_xtra_list))
  21506		lpfc_put_sgl_per_hdwq(phba, lpfc_ncmd);
  21507
  21508	if (!list_empty(&lpfc_ncmd->dma_cmd_rsp_list))
  21509		lpfc_put_cmd_rsp_buf_per_hdwq(phba, lpfc_ncmd);
  21510
  21511	if (phba->cfg_xri_rebalancing) {
  21512		if (lpfc_ncmd->expedite) {
  21513			/* Return to expedite pool */
  21514			epd_pool = &phba->epd_pool;
  21515			spin_lock_irqsave(&epd_pool->lock, iflag);
  21516			list_add_tail(&lpfc_ncmd->list, &epd_pool->list);
  21517			epd_pool->count++;
  21518			spin_unlock_irqrestore(&epd_pool->lock, iflag);
  21519			return;
  21520		}
  21521
  21522		/* Avoid invalid access if an IO sneaks in and is being rejected
  21523		 * just _after_ xri pools are destroyed in lpfc_offline.
  21524		 * Nothing much can be done at this point.
  21525		 */
  21526		if (!qp->p_multixri_pool)
  21527			return;
  21528
  21529		pbl_pool = &qp->p_multixri_pool->pbl_pool;
  21530		pvt_pool = &qp->p_multixri_pool->pvt_pool;
  21531
  21532		txcmplq_cnt = qp->io_wq->pring->txcmplq_cnt;
  21533		abts_io_bufs = qp->abts_scsi_io_bufs;
  21534		abts_io_bufs += qp->abts_nvme_io_bufs;
  21535
  21536		xri_owned = pvt_pool->count + txcmplq_cnt + abts_io_bufs;
  21537		xri_limit = qp->p_multixri_pool->xri_limit;
  21538
  21539#ifdef LPFC_MXP_STAT
  21540		if (xri_owned <= xri_limit)
  21541			qp->p_multixri_pool->below_limit_count++;
  21542		else
  21543			qp->p_multixri_pool->above_limit_count++;
  21544#endif
  21545
  21546		/* XRI goes to either public or private free xri pool
  21547		 *     based on watermark and xri_limit
  21548		 */
  21549		if ((pvt_pool->count < pvt_pool->low_watermark) ||
  21550		    (xri_owned < xri_limit &&
  21551		     pvt_pool->count < pvt_pool->high_watermark)) {
  21552			lpfc_qp_spin_lock_irqsave(&pvt_pool->lock, iflag,
  21553						  qp, free_pvt_pool);
  21554			list_add_tail(&lpfc_ncmd->list,
  21555				      &pvt_pool->list);
  21556			pvt_pool->count++;
  21557			spin_unlock_irqrestore(&pvt_pool->lock, iflag);
  21558		} else {
  21559			lpfc_qp_spin_lock_irqsave(&pbl_pool->lock, iflag,
  21560						  qp, free_pub_pool);
  21561			list_add_tail(&lpfc_ncmd->list,
  21562				      &pbl_pool->list);
  21563			pbl_pool->count++;
  21564			spin_unlock_irqrestore(&pbl_pool->lock, iflag);
  21565		}
  21566	} else {
  21567		lpfc_qp_spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag,
  21568					  qp, free_xri);
  21569		list_add_tail(&lpfc_ncmd->list,
  21570			      &qp->lpfc_io_buf_list_put);
  21571		qp->put_io_bufs++;
  21572		spin_unlock_irqrestore(&qp->io_buf_list_put_lock,
  21573				       iflag);
  21574	}
  21575}
  21576
  21577/**
  21578 * lpfc_get_io_buf_from_private_pool - Get one free IO buf from private pool
  21579 * @phba: pointer to lpfc hba data structure.
  21580 * @qp: pointer to HDW queue
  21581 * @pvt_pool: pointer to private pool data structure.
  21582 * @ndlp: pointer to lpfc nodelist data structure.
  21583 *
  21584 * This routine tries to get one free IO buf from private pool.
  21585 *
  21586 * Return:
  21587 *   pointer to one free IO buf - if private pool is not empty
  21588 *   NULL - if private pool is empty
  21589 **/
  21590static struct lpfc_io_buf *
  21591lpfc_get_io_buf_from_private_pool(struct lpfc_hba *phba,
  21592				  struct lpfc_sli4_hdw_queue *qp,
  21593				  struct lpfc_pvt_pool *pvt_pool,
  21594				  struct lpfc_nodelist *ndlp)
  21595{
  21596	struct lpfc_io_buf *lpfc_ncmd;
  21597	struct lpfc_io_buf *lpfc_ncmd_next;
  21598	unsigned long iflag;
  21599
  21600	lpfc_qp_spin_lock_irqsave(&pvt_pool->lock, iflag, qp, alloc_pvt_pool);
  21601	list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
  21602				 &pvt_pool->list, list) {
  21603		if (lpfc_test_rrq_active(
  21604			phba, ndlp, lpfc_ncmd->cur_iocbq.sli4_lxritag))
  21605			continue;
  21606		list_del(&lpfc_ncmd->list);
  21607		pvt_pool->count--;
  21608		spin_unlock_irqrestore(&pvt_pool->lock, iflag);
  21609		return lpfc_ncmd;
  21610	}
  21611	spin_unlock_irqrestore(&pvt_pool->lock, iflag);
  21612
  21613	return NULL;
  21614}
  21615
  21616/**
  21617 * lpfc_get_io_buf_from_expedite_pool - Get one free IO buf from expedite pool
  21618 * @phba: pointer to lpfc hba data structure.
  21619 *
  21620 * This routine tries to get one free IO buf from expedite pool.
  21621 *
  21622 * Return:
  21623 *   pointer to one free IO buf - if expedite pool is not empty
  21624 *   NULL - if expedite pool is empty
  21625 **/
  21626static struct lpfc_io_buf *
  21627lpfc_get_io_buf_from_expedite_pool(struct lpfc_hba *phba)
  21628{
  21629	struct lpfc_io_buf *lpfc_ncmd;
  21630	struct lpfc_io_buf *lpfc_ncmd_next;
  21631	unsigned long iflag;
  21632	struct lpfc_epd_pool *epd_pool;
  21633
  21634	epd_pool = &phba->epd_pool;
  21635	lpfc_ncmd = NULL;
  21636
  21637	spin_lock_irqsave(&epd_pool->lock, iflag);
  21638	if (epd_pool->count > 0) {
  21639		list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
  21640					 &epd_pool->list, list) {
  21641			list_del(&lpfc_ncmd->list);
  21642			epd_pool->count--;
  21643			break;
  21644		}
  21645	}
  21646	spin_unlock_irqrestore(&epd_pool->lock, iflag);
  21647
  21648	return lpfc_ncmd;
  21649}
  21650
  21651/**
  21652 * lpfc_get_io_buf_from_multixri_pools - Get one free IO bufs
  21653 * @phba: pointer to lpfc hba data structure.
  21654 * @ndlp: pointer to lpfc nodelist data structure.
  21655 * @hwqid: belong to which HWQ
  21656 * @expedite: 1 means this request is urgent.
  21657 *
  21658 * This routine will do the following actions and then return a pointer to
  21659 * one free IO buf.
  21660 *
  21661 * 1. If private free xri count is empty, move some XRIs from public to
  21662 *    private pool.
  21663 * 2. Get one XRI from private free xri pool.
  21664 * 3. If we fail to get one from pvt_pool and this is an expedite request,
  21665 *    get one free xri from expedite pool.
  21666 *
  21667 * Note: ndlp is only used on SCSI side for RRQ testing.
  21668 *       The caller should pass NULL for ndlp on NVME side.
  21669 *
  21670 * Return:
  21671 *   pointer to one free IO buf - if private pool is not empty
  21672 *   NULL - if private pool is empty
  21673 **/
  21674static struct lpfc_io_buf *
  21675lpfc_get_io_buf_from_multixri_pools(struct lpfc_hba *phba,
  21676				    struct lpfc_nodelist *ndlp,
  21677				    int hwqid, int expedite)
  21678{
  21679	struct lpfc_sli4_hdw_queue *qp;
  21680	struct lpfc_multixri_pool *multixri_pool;
  21681	struct lpfc_pvt_pool *pvt_pool;
  21682	struct lpfc_io_buf *lpfc_ncmd;
  21683
  21684	qp = &phba->sli4_hba.hdwq[hwqid];
  21685	lpfc_ncmd = NULL;
  21686	if (!qp) {
  21687		lpfc_printf_log(phba, KERN_INFO,
  21688				LOG_SLI | LOG_NVME_ABTS | LOG_FCP,
  21689				"5556 NULL qp for hwqid  x%x\n", hwqid);
  21690		return lpfc_ncmd;
  21691	}
  21692	multixri_pool = qp->p_multixri_pool;
  21693	if (!multixri_pool) {
  21694		lpfc_printf_log(phba, KERN_INFO,
  21695				LOG_SLI | LOG_NVME_ABTS | LOG_FCP,
  21696				"5557 NULL multixri for hwqid  x%x\n", hwqid);
  21697		return lpfc_ncmd;
  21698	}
  21699	pvt_pool = &multixri_pool->pvt_pool;
  21700	if (!pvt_pool) {
  21701		lpfc_printf_log(phba, KERN_INFO,
  21702				LOG_SLI | LOG_NVME_ABTS | LOG_FCP,
  21703				"5558 NULL pvt_pool for hwqid  x%x\n", hwqid);
  21704		return lpfc_ncmd;
  21705	}
  21706	multixri_pool->io_req_count++;
  21707
  21708	/* If pvt_pool is empty, move some XRIs from public to private pool */
  21709	if (pvt_pool->count == 0)
  21710		lpfc_move_xri_pbl_to_pvt(phba, hwqid, XRI_BATCH);
  21711
  21712	/* Get one XRI from private free xri pool */
  21713	lpfc_ncmd = lpfc_get_io_buf_from_private_pool(phba, qp, pvt_pool, ndlp);
  21714
  21715	if (lpfc_ncmd) {
  21716		lpfc_ncmd->hdwq = qp;
  21717		lpfc_ncmd->hdwq_no = hwqid;
  21718	} else if (expedite) {
  21719		/* If we fail to get one from pvt_pool and this is an expedite
  21720		 * request, get one free xri from expedite pool.
  21721		 */
  21722		lpfc_ncmd = lpfc_get_io_buf_from_expedite_pool(phba);
  21723	}
  21724
  21725	return lpfc_ncmd;
  21726}
  21727
  21728static inline struct lpfc_io_buf *
  21729lpfc_io_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, int idx)
  21730{
  21731	struct lpfc_sli4_hdw_queue *qp;
  21732	struct lpfc_io_buf *lpfc_cmd, *lpfc_cmd_next;
  21733
  21734	qp = &phba->sli4_hba.hdwq[idx];
  21735	list_for_each_entry_safe(lpfc_cmd, lpfc_cmd_next,
  21736				 &qp->lpfc_io_buf_list_get, list) {
  21737		if (lpfc_test_rrq_active(phba, ndlp,
  21738					 lpfc_cmd->cur_iocbq.sli4_lxritag))
  21739			continue;
  21740
  21741		if (lpfc_cmd->flags & LPFC_SBUF_NOT_POSTED)
  21742			continue;
  21743
  21744		list_del_init(&lpfc_cmd->list);
  21745		qp->get_io_bufs--;
  21746		lpfc_cmd->hdwq = qp;
  21747		lpfc_cmd->hdwq_no = idx;
  21748		return lpfc_cmd;
  21749	}
  21750	return NULL;
  21751}
  21752
  21753/**
  21754 * lpfc_get_io_buf - Get one IO buffer from free pool
  21755 * @phba: The HBA for which this call is being executed.
  21756 * @ndlp: pointer to lpfc nodelist data structure.
  21757 * @hwqid: belong to which HWQ
  21758 * @expedite: 1 means this request is urgent.
  21759 *
  21760 * This routine gets one IO buffer from free pool. If cfg_xri_rebalancing==1,
  21761 * removes a IO buffer from multiXRI pools. If cfg_xri_rebalancing==0, removes
  21762 * a IO buffer from head of @hdwq io_buf_list and returns to caller.
  21763 *
  21764 * Note: ndlp is only used on SCSI side for RRQ testing.
  21765 *       The caller should pass NULL for ndlp on NVME side.
  21766 *
  21767 * Return codes:
  21768 *   NULL - Error
  21769 *   Pointer to lpfc_io_buf - Success
  21770 **/
  21771struct lpfc_io_buf *lpfc_get_io_buf(struct lpfc_hba *phba,
  21772				    struct lpfc_nodelist *ndlp,
  21773				    u32 hwqid, int expedite)
  21774{
  21775	struct lpfc_sli4_hdw_queue *qp;
  21776	unsigned long iflag;
  21777	struct lpfc_io_buf *lpfc_cmd;
  21778
  21779	qp = &phba->sli4_hba.hdwq[hwqid];
  21780	lpfc_cmd = NULL;
  21781	if (!qp) {
  21782		lpfc_printf_log(phba, KERN_WARNING,
  21783				LOG_SLI | LOG_NVME_ABTS | LOG_FCP,
  21784				"5555 NULL qp for hwqid  x%x\n", hwqid);
  21785		return lpfc_cmd;
  21786	}
  21787
  21788	if (phba->cfg_xri_rebalancing)
  21789		lpfc_cmd = lpfc_get_io_buf_from_multixri_pools(
  21790			phba, ndlp, hwqid, expedite);
  21791	else {
  21792		lpfc_qp_spin_lock_irqsave(&qp->io_buf_list_get_lock, iflag,
  21793					  qp, alloc_xri_get);
  21794		if (qp->get_io_bufs > LPFC_NVME_EXPEDITE_XRICNT || expedite)
  21795			lpfc_cmd = lpfc_io_buf(phba, ndlp, hwqid);
  21796		if (!lpfc_cmd) {
  21797			lpfc_qp_spin_lock(&qp->io_buf_list_put_lock,
  21798					  qp, alloc_xri_put);
  21799			list_splice(&qp->lpfc_io_buf_list_put,
  21800				    &qp->lpfc_io_buf_list_get);
  21801			qp->get_io_bufs += qp->put_io_bufs;
  21802			INIT_LIST_HEAD(&qp->lpfc_io_buf_list_put);
  21803			qp->put_io_bufs = 0;
  21804			spin_unlock(&qp->io_buf_list_put_lock);
  21805			if (qp->get_io_bufs > LPFC_NVME_EXPEDITE_XRICNT ||
  21806			    expedite)
  21807				lpfc_cmd = lpfc_io_buf(phba, ndlp, hwqid);
  21808		}
  21809		spin_unlock_irqrestore(&qp->io_buf_list_get_lock, iflag);
  21810	}
  21811
  21812	return lpfc_cmd;
  21813}
  21814
  21815/**
  21816 * lpfc_read_object - Retrieve object data from HBA
  21817 * @phba: The HBA for which this call is being executed.
  21818 * @rdobject: Pathname of object data we want to read.
  21819 * @datap: Pointer to where data will be copied to.
  21820 * @datasz: size of data area
  21821 *
  21822 * This routine is limited to object sizes of LPFC_BPL_SIZE (1024) or less.
  21823 * The data will be truncated if datasz is not large enough.
  21824 * Version 1 is not supported with Embedded mbox cmd, so we must use version 0.
  21825 * Returns the actual bytes read from the object.
  21826 */
  21827int
  21828lpfc_read_object(struct lpfc_hba *phba, char *rdobject, uint32_t *datap,
  21829		 uint32_t datasz)
  21830{
  21831	struct lpfc_mbx_read_object *read_object;
  21832	LPFC_MBOXQ_t *mbox;
  21833	int rc, length, eof, j, byte_cnt = 0;
  21834	uint32_t shdr_status, shdr_add_status;
  21835	union lpfc_sli4_cfg_shdr *shdr;
  21836	struct lpfc_dmabuf *pcmd;
  21837	u32 rd_object_name[LPFC_MBX_OBJECT_NAME_LEN_DW] = {0};
  21838
  21839	/* sanity check on queue memory */
  21840	if (!datap)
  21841		return -ENODEV;
  21842
  21843	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
  21844	if (!mbox)
  21845		return -ENOMEM;
  21846	length = (sizeof(struct lpfc_mbx_read_object) -
  21847		  sizeof(struct lpfc_sli4_cfg_mhdr));
  21848	lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
  21849			 LPFC_MBOX_OPCODE_READ_OBJECT,
  21850			 length, LPFC_SLI4_MBX_EMBED);
  21851	read_object = &mbox->u.mqe.un.read_object;
  21852	shdr = (union lpfc_sli4_cfg_shdr *)&read_object->header.cfg_shdr;
  21853
  21854	bf_set(lpfc_mbox_hdr_version, &shdr->request, LPFC_Q_CREATE_VERSION_0);
  21855	bf_set(lpfc_mbx_rd_object_rlen, &read_object->u.request, datasz);
  21856	read_object->u.request.rd_object_offset = 0;
  21857	read_object->u.request.rd_object_cnt = 1;
  21858
  21859	memset((void *)read_object->u.request.rd_object_name, 0,
  21860	       LPFC_OBJ_NAME_SZ);
  21861	scnprintf((char *)rd_object_name, sizeof(rd_object_name), rdobject);
  21862	for (j = 0; j < strlen(rdobject); j++)
  21863		read_object->u.request.rd_object_name[j] =
  21864			cpu_to_le32(rd_object_name[j]);
  21865
  21866	pcmd = kmalloc(sizeof(*pcmd), GFP_KERNEL);
  21867	if (pcmd)
  21868		pcmd->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &pcmd->phys);
  21869	if (!pcmd || !pcmd->virt) {
  21870		kfree(pcmd);
  21871		mempool_free(mbox, phba->mbox_mem_pool);
  21872		return -ENOMEM;
  21873	}
  21874	memset((void *)pcmd->virt, 0, LPFC_BPL_SIZE);
  21875	read_object->u.request.rd_object_hbuf[0].pa_lo =
  21876		putPaddrLow(pcmd->phys);
  21877	read_object->u.request.rd_object_hbuf[0].pa_hi =
  21878		putPaddrHigh(pcmd->phys);
  21879	read_object->u.request.rd_object_hbuf[0].length = LPFC_BPL_SIZE;
  21880
  21881	mbox->vport = phba->pport;
  21882	mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
  21883	mbox->ctx_ndlp = NULL;
  21884
  21885	rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
  21886	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
  21887	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
  21888
  21889	if (shdr_status == STATUS_FAILED &&
  21890	    shdr_add_status == ADD_STATUS_INVALID_OBJECT_NAME) {
  21891		lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_CGN_MGMT,
  21892				"4674 No port cfg file in FW.\n");
  21893		byte_cnt = -ENOENT;
  21894	} else if (shdr_status || shdr_add_status || rc) {
  21895		lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_CGN_MGMT,
  21896				"2625 READ_OBJECT mailbox failed with "
  21897				"status x%x add_status x%x, mbx status x%x\n",
  21898				shdr_status, shdr_add_status, rc);
  21899		byte_cnt = -ENXIO;
  21900	} else {
  21901		/* Success */
  21902		length = read_object->u.response.rd_object_actual_rlen;
  21903		eof = bf_get(lpfc_mbx_rd_object_eof, &read_object->u.response);
  21904		lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_CGN_MGMT,
  21905				"2626 READ_OBJECT Success len %d:%d, EOF %d\n",
  21906				length, datasz, eof);
  21907
  21908		/* Detect the port config file exists but is empty */
  21909		if (!length && eof) {
  21910			byte_cnt = 0;
  21911			goto exit;
  21912		}
  21913
  21914		byte_cnt = length;
  21915		lpfc_sli_pcimem_bcopy(pcmd->virt, datap, byte_cnt);
  21916	}
  21917
  21918 exit:
  21919	/* This is an embedded SLI4 mailbox with an external buffer allocated.
  21920	 * Free the pcmd and then cleanup with the correct routine.
  21921	 */
  21922	lpfc_mbuf_free(phba, pcmd->virt, pcmd->phys);
  21923	kfree(pcmd);
  21924	lpfc_sli4_mbox_cmd_free(phba, mbox);
  21925	return byte_cnt;
  21926}
  21927
  21928/**
  21929 * lpfc_get_sgl_per_hdwq - Get one SGL chunk from hdwq's pool
  21930 * @phba: The HBA for which this call is being executed.
  21931 * @lpfc_buf: IO buf structure to append the SGL chunk
  21932 *
  21933 * This routine gets one SGL chunk buffer from hdwq's SGL chunk pool,
  21934 * and will allocate an SGL chunk if the pool is empty.
  21935 *
  21936 * Return codes:
  21937 *   NULL - Error
  21938 *   Pointer to sli4_hybrid_sgl - Success
  21939 **/
  21940struct sli4_hybrid_sgl *
  21941lpfc_get_sgl_per_hdwq(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_buf)
  21942{
  21943	struct sli4_hybrid_sgl *list_entry = NULL;
  21944	struct sli4_hybrid_sgl *tmp = NULL;
  21945	struct sli4_hybrid_sgl *allocated_sgl = NULL;
  21946	struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq;
  21947	struct list_head *buf_list = &hdwq->sgl_list;
  21948	unsigned long iflags;
  21949
  21950	spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
  21951
  21952	if (likely(!list_empty(buf_list))) {
  21953		/* break off 1 chunk from the sgl_list */
  21954		list_for_each_entry_safe(list_entry, tmp,
  21955					 buf_list, list_node) {
  21956			list_move_tail(&list_entry->list_node,
  21957				       &lpfc_buf->dma_sgl_xtra_list);
  21958			break;
  21959		}
  21960	} else {
  21961		/* allocate more */
  21962		spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
  21963		tmp = kmalloc_node(sizeof(*tmp), GFP_ATOMIC,
  21964				   cpu_to_node(hdwq->io_wq->chann));
  21965		if (!tmp) {
  21966			lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
  21967					"8353 error kmalloc memory for HDWQ "
  21968					"%d %s\n",
  21969					lpfc_buf->hdwq_no, __func__);
  21970			return NULL;
  21971		}
  21972
  21973		tmp->dma_sgl = dma_pool_alloc(phba->lpfc_sg_dma_buf_pool,
  21974					      GFP_ATOMIC, &tmp->dma_phys_sgl);
  21975		if (!tmp->dma_sgl) {
  21976			lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
  21977					"8354 error pool_alloc memory for HDWQ "
  21978					"%d %s\n",
  21979					lpfc_buf->hdwq_no, __func__);
  21980			kfree(tmp);
  21981			return NULL;
  21982		}
  21983
  21984		spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
  21985		list_add_tail(&tmp->list_node, &lpfc_buf->dma_sgl_xtra_list);
  21986	}
  21987
  21988	allocated_sgl = list_last_entry(&lpfc_buf->dma_sgl_xtra_list,
  21989					struct sli4_hybrid_sgl,
  21990					list_node);
  21991
  21992	spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
  21993
  21994	return allocated_sgl;
  21995}
  21996
  21997/**
  21998 * lpfc_put_sgl_per_hdwq - Put one SGL chunk into hdwq pool
  21999 * @phba: The HBA for which this call is being executed.
  22000 * @lpfc_buf: IO buf structure with the SGL chunk
  22001 *
  22002 * This routine puts one SGL chunk buffer into hdwq's SGL chunk pool.
  22003 *
  22004 * Return codes:
  22005 *   0 - Success
  22006 *   -EINVAL - Error
  22007 **/
  22008int
  22009lpfc_put_sgl_per_hdwq(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_buf)
  22010{
  22011	int rc = 0;
  22012	struct sli4_hybrid_sgl *list_entry = NULL;
  22013	struct sli4_hybrid_sgl *tmp = NULL;
  22014	struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq;
  22015	struct list_head *buf_list = &hdwq->sgl_list;
  22016	unsigned long iflags;
  22017
  22018	spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
  22019
  22020	if (likely(!list_empty(&lpfc_buf->dma_sgl_xtra_list))) {
  22021		list_for_each_entry_safe(list_entry, tmp,
  22022					 &lpfc_buf->dma_sgl_xtra_list,
  22023					 list_node) {
  22024			list_move_tail(&list_entry->list_node,
  22025				       buf_list);
  22026		}
  22027	} else {
  22028		rc = -EINVAL;
  22029	}
  22030
  22031	spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
  22032	return rc;
  22033}
  22034
  22035/**
  22036 * lpfc_free_sgl_per_hdwq - Free all SGL chunks of hdwq pool
  22037 * @phba: phba object
  22038 * @hdwq: hdwq to cleanup sgl buff resources on
  22039 *
  22040 * This routine frees all SGL chunks of hdwq SGL chunk pool.
  22041 *
  22042 * Return codes:
  22043 *   None
  22044 **/
  22045void
  22046lpfc_free_sgl_per_hdwq(struct lpfc_hba *phba,
  22047		       struct lpfc_sli4_hdw_queue *hdwq)
  22048{
  22049	struct list_head *buf_list = &hdwq->sgl_list;
  22050	struct sli4_hybrid_sgl *list_entry = NULL;
  22051	struct sli4_hybrid_sgl *tmp = NULL;
  22052	unsigned long iflags;
  22053
  22054	spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
  22055
  22056	/* Free sgl pool */
  22057	list_for_each_entry_safe(list_entry, tmp,
  22058				 buf_list, list_node) {
  22059		dma_pool_free(phba->lpfc_sg_dma_buf_pool,
  22060			      list_entry->dma_sgl,
  22061			      list_entry->dma_phys_sgl);
  22062		list_del(&list_entry->list_node);
  22063		kfree(list_entry);
  22064	}
  22065
  22066	spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
  22067}
  22068
  22069/**
  22070 * lpfc_get_cmd_rsp_buf_per_hdwq - Get one CMD/RSP buffer from hdwq
  22071 * @phba: The HBA for which this call is being executed.
  22072 * @lpfc_buf: IO buf structure to attach the CMD/RSP buffer
  22073 *
  22074 * This routine gets one CMD/RSP buffer from hdwq's CMD/RSP pool,
  22075 * and will allocate an CMD/RSP buffer if the pool is empty.
  22076 *
  22077 * Return codes:
  22078 *   NULL - Error
  22079 *   Pointer to fcp_cmd_rsp_buf - Success
  22080 **/
  22081struct fcp_cmd_rsp_buf *
  22082lpfc_get_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba,
  22083			      struct lpfc_io_buf *lpfc_buf)
  22084{
  22085	struct fcp_cmd_rsp_buf *list_entry = NULL;
  22086	struct fcp_cmd_rsp_buf *tmp = NULL;
  22087	struct fcp_cmd_rsp_buf *allocated_buf = NULL;
  22088	struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq;
  22089	struct list_head *buf_list = &hdwq->cmd_rsp_buf_list;
  22090	unsigned long iflags;
  22091
  22092	spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
  22093
  22094	if (likely(!list_empty(buf_list))) {
  22095		/* break off 1 chunk from the list */
  22096		list_for_each_entry_safe(list_entry, tmp,
  22097					 buf_list,
  22098					 list_node) {
  22099			list_move_tail(&list_entry->list_node,
  22100				       &lpfc_buf->dma_cmd_rsp_list);
  22101			break;
  22102		}
  22103	} else {
  22104		/* allocate more */
  22105		spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
  22106		tmp = kmalloc_node(sizeof(*tmp), GFP_ATOMIC,
  22107				   cpu_to_node(hdwq->io_wq->chann));
  22108		if (!tmp) {
  22109			lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
  22110					"8355 error kmalloc memory for HDWQ "
  22111					"%d %s\n",
  22112					lpfc_buf->hdwq_no, __func__);
  22113			return NULL;
  22114		}
  22115
  22116		tmp->fcp_cmnd = dma_pool_zalloc(phba->lpfc_cmd_rsp_buf_pool,
  22117						GFP_ATOMIC,
  22118						&tmp->fcp_cmd_rsp_dma_handle);
  22119
  22120		if (!tmp->fcp_cmnd) {
  22121			lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
  22122					"8356 error pool_alloc memory for HDWQ "
  22123					"%d %s\n",
  22124					lpfc_buf->hdwq_no, __func__);
  22125			kfree(tmp);
  22126			return NULL;
  22127		}
  22128
  22129		tmp->fcp_rsp = (struct fcp_rsp *)((uint8_t *)tmp->fcp_cmnd +
  22130				sizeof(struct fcp_cmnd));
  22131
  22132		spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
  22133		list_add_tail(&tmp->list_node, &lpfc_buf->dma_cmd_rsp_list);
  22134	}
  22135
  22136	allocated_buf = list_last_entry(&lpfc_buf->dma_cmd_rsp_list,
  22137					struct fcp_cmd_rsp_buf,
  22138					list_node);
  22139
  22140	spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
  22141
  22142	return allocated_buf;
  22143}
  22144
  22145/**
  22146 * lpfc_put_cmd_rsp_buf_per_hdwq - Put one CMD/RSP buffer into hdwq pool
  22147 * @phba: The HBA for which this call is being executed.
  22148 * @lpfc_buf: IO buf structure with the CMD/RSP buf
  22149 *
  22150 * This routine puts one CMD/RSP buffer into executing CPU's CMD/RSP pool.
  22151 *
  22152 * Return codes:
  22153 *   0 - Success
  22154 *   -EINVAL - Error
  22155 **/
  22156int
  22157lpfc_put_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba,
  22158			      struct lpfc_io_buf *lpfc_buf)
  22159{
  22160	int rc = 0;
  22161	struct fcp_cmd_rsp_buf *list_entry = NULL;
  22162	struct fcp_cmd_rsp_buf *tmp = NULL;
  22163	struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq;
  22164	struct list_head *buf_list = &hdwq->cmd_rsp_buf_list;
  22165	unsigned long iflags;
  22166
  22167	spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
  22168
  22169	if (likely(!list_empty(&lpfc_buf->dma_cmd_rsp_list))) {
  22170		list_for_each_entry_safe(list_entry, tmp,
  22171					 &lpfc_buf->dma_cmd_rsp_list,
  22172					 list_node) {
  22173			list_move_tail(&list_entry->list_node,
  22174				       buf_list);
  22175		}
  22176	} else {
  22177		rc = -EINVAL;
  22178	}
  22179
  22180	spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
  22181	return rc;
  22182}
  22183
  22184/**
  22185 * lpfc_free_cmd_rsp_buf_per_hdwq - Free all CMD/RSP chunks of hdwq pool
  22186 * @phba: phba object
  22187 * @hdwq: hdwq to cleanup cmd rsp buff resources on
  22188 *
  22189 * This routine frees all CMD/RSP buffers of hdwq's CMD/RSP buf pool.
  22190 *
  22191 * Return codes:
  22192 *   None
  22193 **/
  22194void
  22195lpfc_free_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba,
  22196			       struct lpfc_sli4_hdw_queue *hdwq)
  22197{
  22198	struct list_head *buf_list = &hdwq->cmd_rsp_buf_list;
  22199	struct fcp_cmd_rsp_buf *list_entry = NULL;
  22200	struct fcp_cmd_rsp_buf *tmp = NULL;
  22201	unsigned long iflags;
  22202
  22203	spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
  22204
  22205	/* Free cmd_rsp buf pool */
  22206	list_for_each_entry_safe(list_entry, tmp,
  22207				 buf_list,
  22208				 list_node) {
  22209		dma_pool_free(phba->lpfc_cmd_rsp_buf_pool,
  22210			      list_entry->fcp_cmnd,
  22211			      list_entry->fcp_cmd_rsp_dma_handle);
  22212		list_del(&list_entry->list_node);
  22213		kfree(list_entry);
  22214	}
  22215
  22216	spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
  22217}
  22218
  22219/**
  22220 * lpfc_sli_prep_wqe - Prepare WQE for the command to be posted
  22221 * @phba: phba object
  22222 * @job: job entry of the command to be posted.
  22223 *
  22224 * Fill the common fields of the wqe for each of the command.
  22225 *
  22226 * Return codes:
  22227 *	None
  22228 **/
  22229void
  22230lpfc_sli_prep_wqe(struct lpfc_hba *phba, struct lpfc_iocbq *job)
  22231{
  22232	u8 cmnd;
  22233	u32 *pcmd;
  22234	u32 if_type = 0;
  22235	u32 fip, abort_tag;
  22236	struct lpfc_nodelist *ndlp = NULL;
  22237	union lpfc_wqe128 *wqe = &job->wqe;
  22238	u8 command_type = ELS_COMMAND_NON_FIP;
  22239
  22240	fip = phba->hba_flag & HBA_FIP_SUPPORT;
  22241	/* The fcp commands will set command type */
  22242	if (job->cmd_flag &  LPFC_IO_FCP)
  22243		command_type = FCP_COMMAND;
  22244	else if (fip && (job->cmd_flag & LPFC_FIP_ELS_ID_MASK))
  22245		command_type = ELS_COMMAND_FIP;
  22246	else
  22247		command_type = ELS_COMMAND_NON_FIP;
  22248
  22249	abort_tag = job->iotag;
  22250	cmnd = bf_get(wqe_cmnd, &wqe->els_req.wqe_com);
  22251
  22252	switch (cmnd) {
  22253	case CMD_ELS_REQUEST64_WQE:
  22254		ndlp = job->ndlp;
  22255
  22256		if_type = bf_get(lpfc_sli_intf_if_type,
  22257				 &phba->sli4_hba.sli_intf);
  22258		if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) {
  22259			pcmd = (u32 *)job->cmd_dmabuf->virt;
  22260			if (pcmd && (*pcmd == ELS_CMD_FLOGI ||
  22261				     *pcmd == ELS_CMD_SCR ||
  22262				     *pcmd == ELS_CMD_RDF ||
  22263				     *pcmd == ELS_CMD_EDC ||
  22264				     *pcmd == ELS_CMD_RSCN_XMT ||
  22265				     *pcmd == ELS_CMD_FDISC ||
  22266				     *pcmd == ELS_CMD_LOGO ||
  22267				     *pcmd == ELS_CMD_QFPA ||
  22268				     *pcmd == ELS_CMD_UVEM ||
  22269				     *pcmd == ELS_CMD_PLOGI)) {
  22270				bf_set(els_req64_sp, &wqe->els_req, 1);
  22271				bf_set(els_req64_sid, &wqe->els_req,
  22272				       job->vport->fc_myDID);
  22273
  22274				if ((*pcmd == ELS_CMD_FLOGI) &&
  22275				    !(phba->fc_topology ==
  22276				      LPFC_TOPOLOGY_LOOP))
  22277					bf_set(els_req64_sid, &wqe->els_req, 0);
  22278
  22279				bf_set(wqe_ct, &wqe->els_req.wqe_com, 1);
  22280				bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
  22281				       phba->vpi_ids[job->vport->vpi]);
  22282			} else if (pcmd) {
  22283				bf_set(wqe_ct, &wqe->els_req.wqe_com, 0);
  22284				bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
  22285				       phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
  22286			}
  22287		}
  22288
  22289		bf_set(wqe_temp_rpi, &wqe->els_req.wqe_com,
  22290		       phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
  22291
  22292		bf_set(wqe_dbde, &wqe->els_req.wqe_com, 1);
  22293		bf_set(wqe_iod, &wqe->els_req.wqe_com, LPFC_WQE_IOD_READ);
  22294		bf_set(wqe_qosd, &wqe->els_req.wqe_com, 1);
  22295		bf_set(wqe_lenloc, &wqe->els_req.wqe_com, LPFC_WQE_LENLOC_NONE);
  22296		bf_set(wqe_ebde_cnt, &wqe->els_req.wqe_com, 0);
  22297		break;
  22298	case CMD_XMIT_ELS_RSP64_WQE:
  22299		ndlp = job->ndlp;
  22300
  22301		/* word4 */
  22302		wqe->xmit_els_rsp.word4 = 0;
  22303
  22304		if_type = bf_get(lpfc_sli_intf_if_type,
  22305				 &phba->sli4_hba.sli_intf);
  22306		if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) {
  22307			if (job->vport->fc_flag & FC_PT2PT) {
  22308				bf_set(els_rsp64_sp, &wqe->xmit_els_rsp, 1);
  22309				bf_set(els_rsp64_sid, &wqe->xmit_els_rsp,
  22310				       job->vport->fc_myDID);
  22311				if (job->vport->fc_myDID == Fabric_DID) {
  22312					bf_set(wqe_els_did,
  22313					       &wqe->xmit_els_rsp.wqe_dest, 0);
  22314				}
  22315			}
  22316		}
  22317
  22318		bf_set(wqe_dbde, &wqe->xmit_els_rsp.wqe_com, 1);
  22319		bf_set(wqe_iod, &wqe->xmit_els_rsp.wqe_com, LPFC_WQE_IOD_WRITE);
  22320		bf_set(wqe_qosd, &wqe->xmit_els_rsp.wqe_com, 1);
  22321		bf_set(wqe_lenloc, &wqe->xmit_els_rsp.wqe_com,
  22322		       LPFC_WQE_LENLOC_WORD3);
  22323		bf_set(wqe_ebde_cnt, &wqe->xmit_els_rsp.wqe_com, 0);
  22324
  22325		if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
  22326			bf_set(els_rsp64_sp, &wqe->xmit_els_rsp, 1);
  22327			bf_set(els_rsp64_sid, &wqe->xmit_els_rsp,
  22328			       job->vport->fc_myDID);
  22329			bf_set(wqe_ct, &wqe->xmit_els_rsp.wqe_com, 1);
  22330		}
  22331
  22332		if (phba->sli_rev == LPFC_SLI_REV4) {
  22333			bf_set(wqe_rsp_temp_rpi, &wqe->xmit_els_rsp,
  22334			       phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
  22335
  22336			if (bf_get(wqe_ct, &wqe->xmit_els_rsp.wqe_com))
  22337				bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com,
  22338				       phba->vpi_ids[job->vport->vpi]);
  22339		}
  22340		command_type = OTHER_COMMAND;
  22341		break;
  22342	case CMD_GEN_REQUEST64_WQE:
  22343		/* Word 10 */
  22344		bf_set(wqe_dbde, &wqe->gen_req.wqe_com, 1);
  22345		bf_set(wqe_iod, &wqe->gen_req.wqe_com, LPFC_WQE_IOD_READ);
  22346		bf_set(wqe_qosd, &wqe->gen_req.wqe_com, 1);
  22347		bf_set(wqe_lenloc, &wqe->gen_req.wqe_com, LPFC_WQE_LENLOC_NONE);
  22348		bf_set(wqe_ebde_cnt, &wqe->gen_req.wqe_com, 0);
  22349		command_type = OTHER_COMMAND;
  22350		break;
  22351	case CMD_XMIT_SEQUENCE64_WQE:
  22352		if (phba->link_flag & LS_LOOPBACK_MODE)
  22353			bf_set(wqe_xo, &wqe->xmit_sequence.wge_ctl, 1);
  22354
  22355		wqe->xmit_sequence.rsvd3 = 0;
  22356		bf_set(wqe_pu, &wqe->xmit_sequence.wqe_com, 0);
  22357		bf_set(wqe_dbde, &wqe->xmit_sequence.wqe_com, 1);
  22358		bf_set(wqe_iod, &wqe->xmit_sequence.wqe_com,
  22359		       LPFC_WQE_IOD_WRITE);
  22360		bf_set(wqe_lenloc, &wqe->xmit_sequence.wqe_com,
  22361		       LPFC_WQE_LENLOC_WORD12);
  22362		bf_set(wqe_ebde_cnt, &wqe->xmit_sequence.wqe_com, 0);
  22363		command_type = OTHER_COMMAND;
  22364		break;
  22365	case CMD_XMIT_BLS_RSP64_WQE:
  22366		bf_set(xmit_bls_rsp64_seqcnthi, &wqe->xmit_bls_rsp, 0xffff);
  22367		bf_set(wqe_xmit_bls_pt, &wqe->xmit_bls_rsp.wqe_dest, 0x1);
  22368		bf_set(wqe_ct, &wqe->xmit_bls_rsp.wqe_com, 1);
  22369		bf_set(wqe_ctxt_tag, &wqe->xmit_bls_rsp.wqe_com,
  22370		       phba->vpi_ids[phba->pport->vpi]);
  22371		bf_set(wqe_qosd, &wqe->xmit_bls_rsp.wqe_com, 1);
  22372		bf_set(wqe_lenloc, &wqe->xmit_bls_rsp.wqe_com,
  22373		       LPFC_WQE_LENLOC_NONE);
  22374		/* Overwrite the pre-set comnd type with OTHER_COMMAND */
  22375		command_type = OTHER_COMMAND;
  22376		break;
  22377	case CMD_FCP_ICMND64_WQE:	/* task mgmt commands */
  22378	case CMD_ABORT_XRI_WQE:		/* abort iotag */
  22379	case CMD_SEND_FRAME:		/* mds loopback */
  22380		/* cases already formatted for sli4 wqe - no chgs necessary */
  22381		return;
  22382	default:
  22383		dump_stack();
  22384		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  22385				"6207 Invalid command 0x%x\n",
  22386				cmnd);
  22387		break;
  22388	}
  22389
  22390	wqe->generic.wqe_com.abort_tag = abort_tag;
  22391	bf_set(wqe_reqtag, &wqe->generic.wqe_com, job->iotag);
  22392	bf_set(wqe_cmd_type, &wqe->generic.wqe_com, command_type);
  22393	bf_set(wqe_cqid, &wqe->generic.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
  22394}