cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

lpfc_hbadisc.c (217359B)


      1/*******************************************************************
      2 * This file is part of the Emulex Linux Device Driver for         *
      3 * Fibre Channel Host Bus Adapters.                                *
      4 * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term *
      5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.     *
      6 * Copyright (C) 2004-2016 Emulex.  All rights reserved.           *
      7 * EMULEX and SLI are trademarks of Emulex.                        *
      8 * www.broadcom.com                                                *
      9 * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
     10 *                                                                 *
     11 * This program is free software; you can redistribute it and/or   *
     12 * modify it under the terms of version 2 of the GNU General       *
     13 * Public License as published by the Free Software Foundation.    *
     14 * This program is distributed in the hope that it will be useful. *
     15 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
     16 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
     17 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
     18 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
     19 * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
     20 * more details, a copy of which can be found in the file COPYING  *
     21 * included with this package.                                     *
     22 *******************************************************************/
     23
     24#include <linux/blkdev.h>
     25#include <linux/delay.h>
     26#include <linux/slab.h>
     27#include <linux/pci.h>
     28#include <linux/kthread.h>
     29#include <linux/interrupt.h>
     30#include <linux/lockdep.h>
     31#include <linux/utsname.h>
     32
     33#include <scsi/scsi.h>
     34#include <scsi/scsi_device.h>
     35#include <scsi/scsi_host.h>
     36#include <scsi/scsi_transport_fc.h>
     37#include <scsi/fc/fc_fs.h>
     38
     39#include "lpfc_hw4.h"
     40#include "lpfc_hw.h"
     41#include "lpfc_nl.h"
     42#include "lpfc_disc.h"
     43#include "lpfc_sli.h"
     44#include "lpfc_sli4.h"
     45#include "lpfc.h"
     46#include "lpfc_scsi.h"
     47#include "lpfc_nvme.h"
     48#include "lpfc_logmsg.h"
     49#include "lpfc_crtn.h"
     50#include "lpfc_vport.h"
     51#include "lpfc_debugfs.h"
     52
     53/* AlpaArray for assignment of scsid for scan-down and bind_method */
     54static uint8_t lpfcAlpaArray[] = {
     55	0xEF, 0xE8, 0xE4, 0xE2, 0xE1, 0xE0, 0xDC, 0xDA, 0xD9, 0xD6,
     56	0xD5, 0xD4, 0xD3, 0xD2, 0xD1, 0xCE, 0xCD, 0xCC, 0xCB, 0xCA,
     57	0xC9, 0xC7, 0xC6, 0xC5, 0xC3, 0xBC, 0xBA, 0xB9, 0xB6, 0xB5,
     58	0xB4, 0xB3, 0xB2, 0xB1, 0xAE, 0xAD, 0xAC, 0xAB, 0xAA, 0xA9,
     59	0xA7, 0xA6, 0xA5, 0xA3, 0x9F, 0x9E, 0x9D, 0x9B, 0x98, 0x97,
     60	0x90, 0x8F, 0x88, 0x84, 0x82, 0x81, 0x80, 0x7C, 0x7A, 0x79,
     61	0x76, 0x75, 0x74, 0x73, 0x72, 0x71, 0x6E, 0x6D, 0x6C, 0x6B,
     62	0x6A, 0x69, 0x67, 0x66, 0x65, 0x63, 0x5C, 0x5A, 0x59, 0x56,
     63	0x55, 0x54, 0x53, 0x52, 0x51, 0x4E, 0x4D, 0x4C, 0x4B, 0x4A,
     64	0x49, 0x47, 0x46, 0x45, 0x43, 0x3C, 0x3A, 0x39, 0x36, 0x35,
     65	0x34, 0x33, 0x32, 0x31, 0x2E, 0x2D, 0x2C, 0x2B, 0x2A, 0x29,
     66	0x27, 0x26, 0x25, 0x23, 0x1F, 0x1E, 0x1D, 0x1B, 0x18, 0x17,
     67	0x10, 0x0F, 0x08, 0x04, 0x02, 0x01
     68};
     69
     70static void lpfc_disc_timeout_handler(struct lpfc_vport *);
     71static void lpfc_disc_flush_list(struct lpfc_vport *vport);
     72static void lpfc_unregister_fcfi_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *);
     73static int lpfc_fcf_inuse(struct lpfc_hba *);
     74static void lpfc_mbx_cmpl_read_sparam(struct lpfc_hba *, LPFC_MBOXQ_t *);
     75static void lpfc_check_inactive_vmid(struct lpfc_hba *phba);
     76static void lpfc_check_vmid_qfpa_issue(struct lpfc_hba *phba);
     77
     78static int
     79lpfc_valid_xpt_node(struct lpfc_nodelist *ndlp)
     80{
     81	if (ndlp->nlp_fc4_type ||
     82	    ndlp->nlp_type & NLP_FABRIC)
     83		return 1;
     84	return 0;
     85}
     86/* The source of a terminate rport I/O is either a dev_loss_tmo
     87 * event or a call to fc_remove_host.  While the rport should be
     88 * valid during these downcalls, the transport can call twice
     89 * in a single event.  This routine provides somoe protection
     90 * as the NDLP isn't really free, just released to the pool.
     91 */
     92static int
     93lpfc_rport_invalid(struct fc_rport *rport)
     94{
     95	struct lpfc_rport_data *rdata;
     96	struct lpfc_nodelist *ndlp;
     97
     98	if (!rport) {
     99		pr_err("**** %s: NULL rport, exit.\n", __func__);
    100		return -EINVAL;
    101	}
    102
    103	rdata = rport->dd_data;
    104	if (!rdata) {
    105		pr_err("**** %s: NULL dd_data on rport x%px SID x%x\n",
    106		       __func__, rport, rport->scsi_target_id);
    107		return -EINVAL;
    108	}
    109
    110	ndlp = rdata->pnode;
    111	if (!rdata->pnode) {
    112		pr_info("**** %s: NULL ndlp on rport x%px SID x%x\n",
    113			__func__, rport, rport->scsi_target_id);
    114		return -EINVAL;
    115	}
    116
    117	if (!ndlp->vport) {
    118		pr_err("**** %s: Null vport on ndlp x%px, DID x%x rport x%px "
    119		       "SID x%x\n", __func__, ndlp, ndlp->nlp_DID, rport,
    120		       rport->scsi_target_id);
    121		return -EINVAL;
    122	}
    123	return 0;
    124}
    125
    126void
    127lpfc_terminate_rport_io(struct fc_rport *rport)
    128{
    129	struct lpfc_rport_data *rdata;
    130	struct lpfc_nodelist *ndlp;
    131	struct lpfc_vport *vport;
    132
    133	if (lpfc_rport_invalid(rport))
    134		return;
    135
    136	rdata = rport->dd_data;
    137	ndlp = rdata->pnode;
    138	vport = ndlp->vport;
    139	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT,
    140			      "rport terminate: sid:x%x did:x%x flg:x%x",
    141			      ndlp->nlp_sid, ndlp->nlp_DID, ndlp->nlp_flag);
    142
    143	if (ndlp->nlp_sid != NLP_NO_SID)
    144		lpfc_sli_abort_iocb(vport, ndlp->nlp_sid, 0, LPFC_CTX_TGT);
    145}
    146
    147/*
    148 * This function will be called when dev_loss_tmo fire.
    149 */
    150void
    151lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
    152{
    153	struct lpfc_nodelist *ndlp;
    154	struct lpfc_vport *vport;
    155	struct lpfc_hba   *phba;
    156	struct lpfc_work_evt *evtp;
    157	unsigned long iflags;
    158
    159	ndlp = ((struct lpfc_rport_data *)rport->dd_data)->pnode;
    160	if (!ndlp)
    161		return;
    162
    163	vport = ndlp->vport;
    164	phba  = vport->phba;
    165
    166	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT,
    167		"rport devlosscb: sid:x%x did:x%x flg:x%x",
    168		ndlp->nlp_sid, ndlp->nlp_DID, ndlp->nlp_flag);
    169
    170	lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
    171			 "3181 dev_loss_callbk x%06x, rport x%px flg x%x "
    172			 "load_flag x%x refcnt %d state %d xpt x%x\n",
    173			 ndlp->nlp_DID, ndlp->rport, ndlp->nlp_flag,
    174			 vport->load_flag, kref_read(&ndlp->kref),
    175			 ndlp->nlp_state, ndlp->fc4_xpt_flags);
    176
    177	/* Don't schedule a worker thread event if the vport is going down.
    178	 * The teardown process cleans up the node via lpfc_drop_node.
    179	 */
    180	if (vport->load_flag & FC_UNLOADING) {
    181		((struct lpfc_rport_data *)rport->dd_data)->pnode = NULL;
    182		ndlp->rport = NULL;
    183
    184		ndlp->fc4_xpt_flags &= ~SCSI_XPT_REGD;
    185		/* clear the NLP_XPT_REGD if the node is not registered
    186		 * with nvme-fc
    187		 */
    188		if (ndlp->fc4_xpt_flags == NLP_XPT_REGD)
    189			ndlp->fc4_xpt_flags &= ~NLP_XPT_REGD;
    190
    191		/* Remove the node reference from remote_port_add now.
    192		 * The driver will not call remote_port_delete.
    193		 */
    194		lpfc_nlp_put(ndlp);
    195		return;
    196	}
    197
    198	if (ndlp->nlp_state == NLP_STE_MAPPED_NODE)
    199		return;
    200
    201	if (rport->port_name != wwn_to_u64(ndlp->nlp_portname.u.wwn))
    202		lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
    203				 "6789 rport name %llx != node port name %llx",
    204				 rport->port_name,
    205				 wwn_to_u64(ndlp->nlp_portname.u.wwn));
    206
    207	evtp = &ndlp->dev_loss_evt;
    208
    209	if (!list_empty(&evtp->evt_listp)) {
    210		lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
    211				 "6790 rport name %llx dev_loss_evt pending\n",
    212				 rport->port_name);
    213		return;
    214	}
    215
    216	spin_lock_irqsave(&ndlp->lock, iflags);
    217	ndlp->nlp_flag |= NLP_IN_DEV_LOSS;
    218
    219	/* If there is a PLOGI in progress, and we are in a
    220	 * NLP_NPR_2B_DISC state, don't turn off the flag.
    221	 */
    222	if (ndlp->nlp_state != NLP_STE_PLOGI_ISSUE)
    223		ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
    224
    225	/*
    226	 * The backend does not expect any more calls associated with this
    227	 * rport. Remove the association between rport and ndlp.
    228	 */
    229	ndlp->fc4_xpt_flags &= ~SCSI_XPT_REGD;
    230	((struct lpfc_rport_data *)rport->dd_data)->pnode = NULL;
    231	ndlp->rport = NULL;
    232	spin_unlock_irqrestore(&ndlp->lock, iflags);
    233
    234	if (phba->worker_thread) {
    235		/* We need to hold the node by incrementing the reference
    236		 * count until this queued work is done
    237		 */
    238		evtp->evt_arg1 = lpfc_nlp_get(ndlp);
    239
    240		spin_lock_irqsave(&phba->hbalock, iflags);
    241		if (evtp->evt_arg1) {
    242			evtp->evt = LPFC_EVT_DEV_LOSS;
    243			list_add_tail(&evtp->evt_listp, &phba->work_list);
    244			lpfc_worker_wake_up(phba);
    245		}
    246		spin_unlock_irqrestore(&phba->hbalock, iflags);
    247	} else {
    248		lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
    249				 "3188 worker thread is stopped %s x%06x, "
    250				 " rport x%px flg x%x load_flag x%x refcnt "
    251				 "%d\n", __func__, ndlp->nlp_DID,
    252				 ndlp->rport, ndlp->nlp_flag,
    253				 vport->load_flag, kref_read(&ndlp->kref));
    254		if (!(ndlp->fc4_xpt_flags & NVME_XPT_REGD)) {
    255			spin_lock_irqsave(&ndlp->lock, iflags);
    256			/* Node is in dev loss.  No further transaction. */
    257			ndlp->nlp_flag &= ~NLP_IN_DEV_LOSS;
    258			spin_unlock_irqrestore(&ndlp->lock, iflags);
    259			lpfc_disc_state_machine(vport, ndlp, NULL,
    260						NLP_EVT_DEVICE_RM);
    261		}
    262
    263	}
    264
    265	return;
    266}
    267
    268/**
    269 * lpfc_check_inactive_vmid_one - VMID inactivity checker for a vport
    270 * @vport: Pointer to vport context object.
    271 *
    272 * This function checks for idle VMID entries related to a particular vport. If
    273 * found unused/idle, free them accordingly.
    274 **/
    275static void lpfc_check_inactive_vmid_one(struct lpfc_vport *vport)
    276{
    277	u16 keep;
    278	u32 difftime = 0, r, bucket;
    279	u64 *lta;
    280	int cpu;
    281	struct lpfc_vmid *vmp;
    282
    283	write_lock(&vport->vmid_lock);
    284
    285	if (!vport->cur_vmid_cnt)
    286		goto out;
    287
    288	/* iterate through the table */
    289	hash_for_each(vport->hash_table, bucket, vmp, hnode) {
    290		keep = 0;
    291		if (vmp->flag & LPFC_VMID_REGISTERED) {
    292			/* check if the particular VMID is in use */
    293			/* for all available per cpu variable */
    294			for_each_possible_cpu(cpu) {
    295				/* if last access time is less than timeout */
    296				lta = per_cpu_ptr(vmp->last_io_time, cpu);
    297				if (!lta)
    298					continue;
    299				difftime = (jiffies) - (*lta);
    300				if ((vport->vmid_inactivity_timeout *
    301				     JIFFIES_PER_HR) > difftime) {
    302					keep = 1;
    303					break;
    304				}
    305			}
    306
    307			/* if none of the cpus have been used by the vm, */
    308			/*  remove the entry if already registered */
    309			if (!keep) {
    310				/* mark the entry for deregistration */
    311				vmp->flag = LPFC_VMID_DE_REGISTER;
    312				write_unlock(&vport->vmid_lock);
    313				if (vport->vmid_priority_tagging)
    314					r = lpfc_vmid_uvem(vport, vmp, false);
    315				else
    316					r = lpfc_vmid_cmd(vport,
    317							  SLI_CTAS_DAPP_IDENT,
    318							  vmp);
    319
    320				/* decrement number of active vms and mark */
    321				/* entry in slot as free */
    322				write_lock(&vport->vmid_lock);
    323				if (!r) {
    324					struct lpfc_vmid *ht = vmp;
    325
    326					vport->cur_vmid_cnt--;
    327					ht->flag = LPFC_VMID_SLOT_FREE;
    328					free_percpu(ht->last_io_time);
    329					ht->last_io_time = NULL;
    330					hash_del(&ht->hnode);
    331				}
    332			}
    333		}
    334	}
    335 out:
    336	write_unlock(&vport->vmid_lock);
    337}
    338
    339/**
    340 * lpfc_check_inactive_vmid - VMID inactivity checker
    341 * @phba: Pointer to hba context object.
    342 *
    343 * This function is called from the worker thread to determine if an entry in
    344 * the VMID table can be released since there was no I/O activity seen from that
    345 * particular VM for the specified time. When this happens, the entry in the
    346 * table is released and also the resources on the switch cleared.
    347 **/
    348
    349static void lpfc_check_inactive_vmid(struct lpfc_hba *phba)
    350{
    351	struct lpfc_vport *vport;
    352	struct lpfc_vport **vports;
    353	int i;
    354
    355	vports = lpfc_create_vport_work_array(phba);
    356	if (!vports)
    357		return;
    358
    359	for (i = 0; i <= phba->max_vports; i++) {
    360		if ((!vports[i]) && (i == 0))
    361			vport = phba->pport;
    362		else
    363			vport = vports[i];
    364		if (!vport)
    365			break;
    366
    367		lpfc_check_inactive_vmid_one(vport);
    368	}
    369	lpfc_destroy_vport_work_array(phba, vports);
    370}
    371
    372/**
    373 * lpfc_check_nlp_post_devloss - Check to restore ndlp refcnt after devloss
    374 * @vport: Pointer to vport object.
    375 * @ndlp: Pointer to remote node object.
    376 *
    377 * If NLP_IN_RECOV_POST_DEV_LOSS flag was set due to outstanding recovery of
    378 * node during dev_loss_tmo processing, then this function restores the nlp_put
    379 * kref decrement from lpfc_dev_loss_tmo_handler.
    380 **/
    381void
    382lpfc_check_nlp_post_devloss(struct lpfc_vport *vport,
    383			    struct lpfc_nodelist *ndlp)
    384{
    385	unsigned long iflags;
    386
    387	spin_lock_irqsave(&ndlp->lock, iflags);
    388	if (ndlp->save_flags & NLP_IN_RECOV_POST_DEV_LOSS) {
    389		ndlp->save_flags &= ~NLP_IN_RECOV_POST_DEV_LOSS;
    390		spin_unlock_irqrestore(&ndlp->lock, iflags);
    391		lpfc_nlp_get(ndlp);
    392		lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY | LOG_NODE,
    393				 "8438 Devloss timeout reversed on DID x%x "
    394				 "refcnt %d ndlp %p flag x%x "
    395				 "port_state = x%x\n",
    396				 ndlp->nlp_DID, kref_read(&ndlp->kref), ndlp,
    397				 ndlp->nlp_flag, vport->port_state);
    398		spin_lock_irqsave(&ndlp->lock, iflags);
    399	}
    400	spin_unlock_irqrestore(&ndlp->lock, iflags);
    401}
    402
    403/**
    404 * lpfc_dev_loss_tmo_handler - Remote node devloss timeout handler
    405 * @ndlp: Pointer to remote node object.
    406 *
    407 * This function is called from the worker thread when devloss timeout timer
    408 * expires. For SLI4 host, this routine shall return 1 when at lease one
    409 * remote node, including this @ndlp, is still in use of FCF; otherwise, this
    410 * routine shall return 0 when there is no remote node is still in use of FCF
    411 * when devloss timeout happened to this @ndlp.
    412 **/
    413static int
    414lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
    415{
    416	struct lpfc_vport *vport;
    417	struct lpfc_hba   *phba;
    418	uint8_t *name;
    419	int warn_on = 0;
    420	int fcf_inuse = 0;
    421	bool recovering = false;
    422	struct fc_vport *fc_vport = NULL;
    423	unsigned long iflags;
    424
    425	vport = ndlp->vport;
    426	name = (uint8_t *)&ndlp->nlp_portname;
    427	phba = vport->phba;
    428
    429	spin_lock_irqsave(&ndlp->lock, iflags);
    430	ndlp->nlp_flag &= ~NLP_IN_DEV_LOSS;
    431	spin_unlock_irqrestore(&ndlp->lock, iflags);
    432
    433	if (phba->sli_rev == LPFC_SLI_REV4)
    434		fcf_inuse = lpfc_fcf_inuse(phba);
    435
    436	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT,
    437			      "rport devlosstmo:did:x%x type:x%x id:x%x",
    438			      ndlp->nlp_DID, ndlp->nlp_type, ndlp->nlp_sid);
    439
    440	lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
    441			 "3182 %s x%06x, nflag x%x xflags x%x refcnt %d\n",
    442			 __func__, ndlp->nlp_DID, ndlp->nlp_flag,
    443			 ndlp->fc4_xpt_flags, kref_read(&ndlp->kref));
    444
    445	/* If the driver is recovering the rport, ignore devloss. */
    446	if (ndlp->nlp_state == NLP_STE_MAPPED_NODE) {
    447		lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
    448				 "0284 Devloss timeout Ignored on "
    449				 "WWPN %x:%x:%x:%x:%x:%x:%x:%x "
    450				 "NPort x%x\n",
    451				 *name, *(name+1), *(name+2), *(name+3),
    452				 *(name+4), *(name+5), *(name+6), *(name+7),
    453				 ndlp->nlp_DID);
    454		return fcf_inuse;
    455	}
    456
    457	/* Fabric nodes are done. */
    458	if (ndlp->nlp_type & NLP_FABRIC) {
    459		spin_lock_irqsave(&ndlp->lock, iflags);
    460		/* In massive vport configuration settings, it's possible
    461		 * dev_loss_tmo fired during node recovery.  So, check if
    462		 * fabric nodes are in discovery states outstanding.
    463		 */
    464		switch (ndlp->nlp_DID) {
    465		case Fabric_DID:
    466			fc_vport = vport->fc_vport;
    467			if (fc_vport &&
    468			    fc_vport->vport_state == FC_VPORT_INITIALIZING)
    469				recovering = true;
    470			break;
    471		case Fabric_Cntl_DID:
    472			if (ndlp->nlp_flag & NLP_REG_LOGIN_SEND)
    473				recovering = true;
    474			break;
    475		case FDMI_DID:
    476			fallthrough;
    477		case NameServer_DID:
    478			if (ndlp->nlp_state >= NLP_STE_PLOGI_ISSUE &&
    479			    ndlp->nlp_state <= NLP_STE_REG_LOGIN_ISSUE)
    480				recovering = true;
    481			break;
    482		}
    483		spin_unlock_irqrestore(&ndlp->lock, iflags);
    484
    485		/* Mark an NLP_IN_RECOV_POST_DEV_LOSS flag to know if reversing
    486		 * the following lpfc_nlp_put is necessary after fabric node is
    487		 * recovered.
    488		 */
    489		if (recovering) {
    490			lpfc_printf_vlog(vport, KERN_INFO,
    491					 LOG_DISCOVERY | LOG_NODE,
    492					 "8436 Devloss timeout marked on "
    493					 "DID x%x refcnt %d ndlp %p "
    494					 "flag x%x port_state = x%x\n",
    495					 ndlp->nlp_DID, kref_read(&ndlp->kref),
    496					 ndlp, ndlp->nlp_flag,
    497					 vport->port_state);
    498			spin_lock_irqsave(&ndlp->lock, iflags);
    499			ndlp->save_flags |= NLP_IN_RECOV_POST_DEV_LOSS;
    500			spin_unlock_irqrestore(&ndlp->lock, iflags);
    501		} else if (ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) {
    502			/* Fabric node fully recovered before this dev_loss_tmo
    503			 * queue work is processed.  Thus, ignore the
    504			 * dev_loss_tmo event.
    505			 */
    506			lpfc_printf_vlog(vport, KERN_INFO,
    507					 LOG_DISCOVERY | LOG_NODE,
    508					 "8437 Devloss timeout ignored on "
    509					 "DID x%x refcnt %d ndlp %p "
    510					 "flag x%x port_state = x%x\n",
    511					 ndlp->nlp_DID, kref_read(&ndlp->kref),
    512					 ndlp, ndlp->nlp_flag,
    513					 vport->port_state);
    514			return fcf_inuse;
    515		}
    516
    517		lpfc_nlp_put(ndlp);
    518		return fcf_inuse;
    519	}
    520
    521	if (ndlp->nlp_sid != NLP_NO_SID) {
    522		warn_on = 1;
    523		lpfc_sli_abort_iocb(vport, ndlp->nlp_sid, 0, LPFC_CTX_TGT);
    524	}
    525
    526	if (warn_on) {
    527		lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
    528				 "0203 Devloss timeout on "
    529				 "WWPN %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x "
    530				 "NPort x%06x Data: x%x x%x x%x refcnt %d\n",
    531				 *name, *(name+1), *(name+2), *(name+3),
    532				 *(name+4), *(name+5), *(name+6), *(name+7),
    533				 ndlp->nlp_DID, ndlp->nlp_flag,
    534				 ndlp->nlp_state, ndlp->nlp_rpi,
    535				 kref_read(&ndlp->kref));
    536	} else {
    537		lpfc_printf_vlog(vport, KERN_INFO, LOG_TRACE_EVENT,
    538				 "0204 Devloss timeout on "
    539				 "WWPN %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x "
    540				 "NPort x%06x Data: x%x x%x x%x\n",
    541				 *name, *(name+1), *(name+2), *(name+3),
    542				 *(name+4), *(name+5), *(name+6), *(name+7),
    543				 ndlp->nlp_DID, ndlp->nlp_flag,
    544				 ndlp->nlp_state, ndlp->nlp_rpi);
    545	}
    546
    547	/* If we are devloss, but we are in the process of rediscovering the
    548	 * ndlp, don't issue a NLP_EVT_DEVICE_RM event.
    549	 */
    550	if (ndlp->nlp_state >= NLP_STE_PLOGI_ISSUE &&
    551	    ndlp->nlp_state <= NLP_STE_PRLI_ISSUE) {
    552		return fcf_inuse;
    553	}
    554
    555	if (!(ndlp->fc4_xpt_flags & NVME_XPT_REGD))
    556		lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM);
    557
    558	return fcf_inuse;
    559}
    560
    561static void lpfc_check_vmid_qfpa_issue(struct lpfc_hba *phba)
    562{
    563	struct lpfc_vport *vport;
    564	struct lpfc_vport **vports;
    565	int i;
    566
    567	vports = lpfc_create_vport_work_array(phba);
    568	if (!vports)
    569		return;
    570
    571	for (i = 0; i <= phba->max_vports; i++) {
    572		if ((!vports[i]) && (i == 0))
    573			vport = phba->pport;
    574		else
    575			vport = vports[i];
    576		if (!vport)
    577			break;
    578
    579		if (vport->vmid_flag & LPFC_VMID_ISSUE_QFPA) {
    580			if (!lpfc_issue_els_qfpa(vport))
    581				vport->vmid_flag &= ~LPFC_VMID_ISSUE_QFPA;
    582		}
    583	}
    584	lpfc_destroy_vport_work_array(phba, vports);
    585}
    586
    587/**
    588 * lpfc_sli4_post_dev_loss_tmo_handler - SLI4 post devloss timeout handler
    589 * @phba: Pointer to hba context object.
    590 * @fcf_inuse: SLI4 FCF in-use state reported from devloss timeout handler.
    591 * @nlp_did: remote node identifer with devloss timeout.
    592 *
    593 * This function is called from the worker thread after invoking devloss
    594 * timeout handler and releasing the reference count for the ndlp with
    595 * which the devloss timeout was handled for SLI4 host. For the devloss
    596 * timeout of the last remote node which had been in use of FCF, when this
    597 * routine is invoked, it shall be guaranteed that none of the remote are
    598 * in-use of FCF. When devloss timeout to the last remote using the FCF,
    599 * if the FIP engine is neither in FCF table scan process nor roundrobin
    600 * failover process, the in-use FCF shall be unregistered. If the FIP
    601 * engine is in FCF discovery process, the devloss timeout state shall
    602 * be set for either the FCF table scan process or roundrobin failover
    603 * process to unregister the in-use FCF.
    604 **/
    605static void
    606lpfc_sli4_post_dev_loss_tmo_handler(struct lpfc_hba *phba, int fcf_inuse,
    607				    uint32_t nlp_did)
    608{
    609	/* If devloss timeout happened to a remote node when FCF had no
    610	 * longer been in-use, do nothing.
    611	 */
    612	if (!fcf_inuse)
    613		return;
    614
    615	if ((phba->hba_flag & HBA_FIP_SUPPORT) && !lpfc_fcf_inuse(phba)) {
    616		spin_lock_irq(&phba->hbalock);
    617		if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
    618			if (phba->hba_flag & HBA_DEVLOSS_TMO) {
    619				spin_unlock_irq(&phba->hbalock);
    620				return;
    621			}
    622			phba->hba_flag |= HBA_DEVLOSS_TMO;
    623			lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
    624					"2847 Last remote node (x%x) using "
    625					"FCF devloss tmo\n", nlp_did);
    626		}
    627		if (phba->fcf.fcf_flag & FCF_REDISC_PROG) {
    628			spin_unlock_irq(&phba->hbalock);
    629			lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
    630					"2868 Devloss tmo to FCF rediscovery "
    631					"in progress\n");
    632			return;
    633		}
    634		if (!(phba->hba_flag & (FCF_TS_INPROG | FCF_RR_INPROG))) {
    635			spin_unlock_irq(&phba->hbalock);
    636			lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
    637					"2869 Devloss tmo to idle FIP engine, "
    638					"unreg in-use FCF and rescan.\n");
    639			/* Unregister in-use FCF and rescan */
    640			lpfc_unregister_fcf_rescan(phba);
    641			return;
    642		}
    643		spin_unlock_irq(&phba->hbalock);
    644		if (phba->hba_flag & FCF_TS_INPROG)
    645			lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
    646					"2870 FCF table scan in progress\n");
    647		if (phba->hba_flag & FCF_RR_INPROG)
    648			lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
    649					"2871 FLOGI roundrobin FCF failover "
    650					"in progress\n");
    651	}
    652	lpfc_unregister_unused_fcf(phba);
    653}
    654
    655/**
    656 * lpfc_alloc_fast_evt - Allocates data structure for posting event
    657 * @phba: Pointer to hba context object.
    658 *
    659 * This function is called from the functions which need to post
    660 * events from interrupt context. This function allocates data
    661 * structure required for posting event. It also keeps track of
    662 * number of events pending and prevent event storm when there are
    663 * too many events.
    664 **/
    665struct lpfc_fast_path_event *
    666lpfc_alloc_fast_evt(struct lpfc_hba *phba) {
    667	struct lpfc_fast_path_event *ret;
    668
    669	/* If there are lot of fast event do not exhaust memory due to this */
    670	if (atomic_read(&phba->fast_event_count) > LPFC_MAX_EVT_COUNT)
    671		return NULL;
    672
    673	ret = kzalloc(sizeof(struct lpfc_fast_path_event),
    674			GFP_ATOMIC);
    675	if (ret) {
    676		atomic_inc(&phba->fast_event_count);
    677		INIT_LIST_HEAD(&ret->work_evt.evt_listp);
    678		ret->work_evt.evt = LPFC_EVT_FASTPATH_MGMT_EVT;
    679	}
    680	return ret;
    681}
    682
    683/**
    684 * lpfc_free_fast_evt - Frees event data structure
    685 * @phba: Pointer to hba context object.
    686 * @evt:  Event object which need to be freed.
    687 *
    688 * This function frees the data structure required for posting
    689 * events.
    690 **/
    691void
    692lpfc_free_fast_evt(struct lpfc_hba *phba,
    693		struct lpfc_fast_path_event *evt) {
    694
    695	atomic_dec(&phba->fast_event_count);
    696	kfree(evt);
    697}
    698
    699/**
    700 * lpfc_send_fastpath_evt - Posts events generated from fast path
    701 * @phba: Pointer to hba context object.
    702 * @evtp: Event data structure.
    703 *
    704 * This function is called from worker thread, when the interrupt
    705 * context need to post an event. This function posts the event
    706 * to fc transport netlink interface.
    707 **/
    708static void
    709lpfc_send_fastpath_evt(struct lpfc_hba *phba,
    710		struct lpfc_work_evt *evtp)
    711{
    712	unsigned long evt_category, evt_sub_category;
    713	struct lpfc_fast_path_event *fast_evt_data;
    714	char *evt_data;
    715	uint32_t evt_data_size;
    716	struct Scsi_Host *shost;
    717
    718	fast_evt_data = container_of(evtp, struct lpfc_fast_path_event,
    719		work_evt);
    720
    721	evt_category = (unsigned long) fast_evt_data->un.fabric_evt.event_type;
    722	evt_sub_category = (unsigned long) fast_evt_data->un.
    723			fabric_evt.subcategory;
    724	shost = lpfc_shost_from_vport(fast_evt_data->vport);
    725	if (evt_category == FC_REG_FABRIC_EVENT) {
    726		if (evt_sub_category == LPFC_EVENT_FCPRDCHKERR) {
    727			evt_data = (char *) &fast_evt_data->un.read_check_error;
    728			evt_data_size = sizeof(fast_evt_data->un.
    729				read_check_error);
    730		} else if ((evt_sub_category == LPFC_EVENT_FABRIC_BUSY) ||
    731			(evt_sub_category == LPFC_EVENT_PORT_BUSY)) {
    732			evt_data = (char *) &fast_evt_data->un.fabric_evt;
    733			evt_data_size = sizeof(fast_evt_data->un.fabric_evt);
    734		} else {
    735			lpfc_free_fast_evt(phba, fast_evt_data);
    736			return;
    737		}
    738	} else if (evt_category == FC_REG_SCSI_EVENT) {
    739		switch (evt_sub_category) {
    740		case LPFC_EVENT_QFULL:
    741		case LPFC_EVENT_DEVBSY:
    742			evt_data = (char *) &fast_evt_data->un.scsi_evt;
    743			evt_data_size = sizeof(fast_evt_data->un.scsi_evt);
    744			break;
    745		case LPFC_EVENT_CHECK_COND:
    746			evt_data = (char *) &fast_evt_data->un.check_cond_evt;
    747			evt_data_size =  sizeof(fast_evt_data->un.
    748				check_cond_evt);
    749			break;
    750		case LPFC_EVENT_VARQUEDEPTH:
    751			evt_data = (char *) &fast_evt_data->un.queue_depth_evt;
    752			evt_data_size = sizeof(fast_evt_data->un.
    753				queue_depth_evt);
    754			break;
    755		default:
    756			lpfc_free_fast_evt(phba, fast_evt_data);
    757			return;
    758		}
    759	} else {
    760		lpfc_free_fast_evt(phba, fast_evt_data);
    761		return;
    762	}
    763
    764	if (phba->cfg_enable_fc4_type != LPFC_ENABLE_NVME)
    765		fc_host_post_vendor_event(shost,
    766			fc_get_event_number(),
    767			evt_data_size,
    768			evt_data,
    769			LPFC_NL_VENDOR_ID);
    770
    771	lpfc_free_fast_evt(phba, fast_evt_data);
    772	return;
    773}
    774
    775static void
    776lpfc_work_list_done(struct lpfc_hba *phba)
    777{
    778	struct lpfc_work_evt  *evtp = NULL;
    779	struct lpfc_nodelist  *ndlp;
    780	int free_evt;
    781	int fcf_inuse;
    782	uint32_t nlp_did;
    783	bool hba_pci_err;
    784
    785	spin_lock_irq(&phba->hbalock);
    786	while (!list_empty(&phba->work_list)) {
    787		list_remove_head((&phba->work_list), evtp, typeof(*evtp),
    788				 evt_listp);
    789		spin_unlock_irq(&phba->hbalock);
    790		hba_pci_err = test_bit(HBA_PCI_ERR, &phba->bit_flags);
    791		free_evt = 1;
    792		switch (evtp->evt) {
    793		case LPFC_EVT_ELS_RETRY:
    794			ndlp = (struct lpfc_nodelist *) (evtp->evt_arg1);
    795			if (!hba_pci_err) {
    796				lpfc_els_retry_delay_handler(ndlp);
    797				free_evt = 0; /* evt is part of ndlp */
    798			}
    799			/* decrement the node reference count held
    800			 * for this queued work
    801			 */
    802			lpfc_nlp_put(ndlp);
    803			break;
    804		case LPFC_EVT_DEV_LOSS:
    805			ndlp = (struct lpfc_nodelist *)(evtp->evt_arg1);
    806			fcf_inuse = lpfc_dev_loss_tmo_handler(ndlp);
    807			free_evt = 0;
    808			/* decrement the node reference count held for
    809			 * this queued work
    810			 */
    811			nlp_did = ndlp->nlp_DID;
    812			lpfc_nlp_put(ndlp);
    813			if (phba->sli_rev == LPFC_SLI_REV4)
    814				lpfc_sli4_post_dev_loss_tmo_handler(phba,
    815								    fcf_inuse,
    816								    nlp_did);
    817			break;
    818		case LPFC_EVT_RECOVER_PORT:
    819			ndlp = (struct lpfc_nodelist *)(evtp->evt_arg1);
    820			if (!hba_pci_err) {
    821				lpfc_sli_abts_recover_port(ndlp->vport, ndlp);
    822				free_evt = 0;
    823			}
    824			/* decrement the node reference count held for
    825			 * this queued work
    826			 */
    827			lpfc_nlp_put(ndlp);
    828			break;
    829		case LPFC_EVT_ONLINE:
    830			if (phba->link_state < LPFC_LINK_DOWN)
    831				*(int *) (evtp->evt_arg1) = lpfc_online(phba);
    832			else
    833				*(int *) (evtp->evt_arg1) = 0;
    834			complete((struct completion *)(evtp->evt_arg2));
    835			break;
    836		case LPFC_EVT_OFFLINE_PREP:
    837			if (phba->link_state >= LPFC_LINK_DOWN)
    838				lpfc_offline_prep(phba, LPFC_MBX_WAIT);
    839			*(int *)(evtp->evt_arg1) = 0;
    840			complete((struct completion *)(evtp->evt_arg2));
    841			break;
    842		case LPFC_EVT_OFFLINE:
    843			lpfc_offline(phba);
    844			lpfc_sli_brdrestart(phba);
    845			*(int *)(evtp->evt_arg1) =
    846				lpfc_sli_brdready(phba, HS_FFRDY | HS_MBRDY);
    847			lpfc_unblock_mgmt_io(phba);
    848			complete((struct completion *)(evtp->evt_arg2));
    849			break;
    850		case LPFC_EVT_WARM_START:
    851			lpfc_offline(phba);
    852			lpfc_reset_barrier(phba);
    853			lpfc_sli_brdreset(phba);
    854			lpfc_hba_down_post(phba);
    855			*(int *)(evtp->evt_arg1) =
    856				lpfc_sli_brdready(phba, HS_MBRDY);
    857			lpfc_unblock_mgmt_io(phba);
    858			complete((struct completion *)(evtp->evt_arg2));
    859			break;
    860		case LPFC_EVT_KILL:
    861			lpfc_offline(phba);
    862			*(int *)(evtp->evt_arg1)
    863				= (phba->pport->stopped)
    864				        ? 0 : lpfc_sli_brdkill(phba);
    865			lpfc_unblock_mgmt_io(phba);
    866			complete((struct completion *)(evtp->evt_arg2));
    867			break;
    868		case LPFC_EVT_FASTPATH_MGMT_EVT:
    869			lpfc_send_fastpath_evt(phba, evtp);
    870			free_evt = 0;
    871			break;
    872		case LPFC_EVT_RESET_HBA:
    873			if (!(phba->pport->load_flag & FC_UNLOADING))
    874				lpfc_reset_hba(phba);
    875			break;
    876		}
    877		if (free_evt)
    878			kfree(evtp);
    879		spin_lock_irq(&phba->hbalock);
    880	}
    881	spin_unlock_irq(&phba->hbalock);
    882
    883}
    884
    885static void
    886lpfc_work_done(struct lpfc_hba *phba)
    887{
    888	struct lpfc_sli_ring *pring;
    889	uint32_t ha_copy, status, control, work_port_events;
    890	struct lpfc_vport **vports;
    891	struct lpfc_vport *vport;
    892	int i;
    893	bool hba_pci_err;
    894
    895	hba_pci_err = test_bit(HBA_PCI_ERR, &phba->bit_flags);
    896	spin_lock_irq(&phba->hbalock);
    897	ha_copy = phba->work_ha;
    898	phba->work_ha = 0;
    899	spin_unlock_irq(&phba->hbalock);
    900	if (hba_pci_err)
    901		ha_copy = 0;
    902
    903	/* First, try to post the next mailbox command to SLI4 device */
    904	if (phba->pci_dev_grp == LPFC_PCI_DEV_OC && !hba_pci_err)
    905		lpfc_sli4_post_async_mbox(phba);
    906
    907	if (ha_copy & HA_ERATT) {
    908		/* Handle the error attention event */
    909		lpfc_handle_eratt(phba);
    910
    911		if (phba->fw_dump_cmpl) {
    912			complete(phba->fw_dump_cmpl);
    913			phba->fw_dump_cmpl = NULL;
    914		}
    915	}
    916
    917	if (ha_copy & HA_MBATT)
    918		lpfc_sli_handle_mb_event(phba);
    919
    920	if (ha_copy & HA_LATT)
    921		lpfc_handle_latt(phba);
    922
    923	/* Handle VMID Events */
    924	if (lpfc_is_vmid_enabled(phba) && !hba_pci_err) {
    925		if (phba->pport->work_port_events &
    926		    WORKER_CHECK_VMID_ISSUE_QFPA) {
    927			lpfc_check_vmid_qfpa_issue(phba);
    928			phba->pport->work_port_events &=
    929				~WORKER_CHECK_VMID_ISSUE_QFPA;
    930		}
    931		if (phba->pport->work_port_events &
    932		    WORKER_CHECK_INACTIVE_VMID) {
    933			lpfc_check_inactive_vmid(phba);
    934			phba->pport->work_port_events &=
    935			    ~WORKER_CHECK_INACTIVE_VMID;
    936		}
    937	}
    938
    939	/* Process SLI4 events */
    940	if (phba->pci_dev_grp == LPFC_PCI_DEV_OC) {
    941		if (phba->hba_flag & HBA_RRQ_ACTIVE)
    942			lpfc_handle_rrq_active(phba);
    943		if (phba->hba_flag & ELS_XRI_ABORT_EVENT)
    944			lpfc_sli4_els_xri_abort_event_proc(phba);
    945		if (phba->hba_flag & ASYNC_EVENT)
    946			lpfc_sli4_async_event_proc(phba);
    947		if (phba->hba_flag & HBA_POST_RECEIVE_BUFFER) {
    948			spin_lock_irq(&phba->hbalock);
    949			phba->hba_flag &= ~HBA_POST_RECEIVE_BUFFER;
    950			spin_unlock_irq(&phba->hbalock);
    951			lpfc_sli_hbqbuf_add_hbqs(phba, LPFC_ELS_HBQ);
    952		}
    953		if (phba->fcf.fcf_flag & FCF_REDISC_EVT)
    954			lpfc_sli4_fcf_redisc_event_proc(phba);
    955	}
    956
    957	vports = lpfc_create_vport_work_array(phba);
    958	if (vports != NULL)
    959		for (i = 0; i <= phba->max_vports; i++) {
    960			/*
    961			 * We could have no vports in array if unloading, so if
    962			 * this happens then just use the pport
    963			 */
    964			if (vports[i] == NULL && i == 0)
    965				vport = phba->pport;
    966			else
    967				vport = vports[i];
    968			if (vport == NULL)
    969				break;
    970			spin_lock_irq(&vport->work_port_lock);
    971			work_port_events = vport->work_port_events;
    972			vport->work_port_events &= ~work_port_events;
    973			spin_unlock_irq(&vport->work_port_lock);
    974			if (hba_pci_err)
    975				continue;
    976			if (work_port_events & WORKER_DISC_TMO)
    977				lpfc_disc_timeout_handler(vport);
    978			if (work_port_events & WORKER_ELS_TMO)
    979				lpfc_els_timeout_handler(vport);
    980			if (work_port_events & WORKER_HB_TMO)
    981				lpfc_hb_timeout_handler(phba);
    982			if (work_port_events & WORKER_MBOX_TMO)
    983				lpfc_mbox_timeout_handler(phba);
    984			if (work_port_events & WORKER_FABRIC_BLOCK_TMO)
    985				lpfc_unblock_fabric_iocbs(phba);
    986			if (work_port_events & WORKER_RAMP_DOWN_QUEUE)
    987				lpfc_ramp_down_queue_handler(phba);
    988			if (work_port_events & WORKER_DELAYED_DISC_TMO)
    989				lpfc_delayed_disc_timeout_handler(vport);
    990		}
    991	lpfc_destroy_vport_work_array(phba, vports);
    992
    993	pring = lpfc_phba_elsring(phba);
    994	status = (ha_copy & (HA_RXMASK  << (4*LPFC_ELS_RING)));
    995	status >>= (4*LPFC_ELS_RING);
    996	if (pring && (status & HA_RXMASK ||
    997		      pring->flag & LPFC_DEFERRED_RING_EVENT ||
    998		      phba->hba_flag & HBA_SP_QUEUE_EVT)) {
    999		if (pring->flag & LPFC_STOP_IOCB_EVENT) {
   1000			pring->flag |= LPFC_DEFERRED_RING_EVENT;
   1001			/* Preserve legacy behavior. */
   1002			if (!(phba->hba_flag & HBA_SP_QUEUE_EVT))
   1003				set_bit(LPFC_DATA_READY, &phba->data_flags);
   1004		} else {
   1005			/* Driver could have abort request completed in queue
   1006			 * when link goes down.  Allow for this transition.
   1007			 */
   1008			if (phba->link_state >= LPFC_LINK_DOWN ||
   1009			    phba->link_flag & LS_MDS_LOOPBACK) {
   1010				pring->flag &= ~LPFC_DEFERRED_RING_EVENT;
   1011				lpfc_sli_handle_slow_ring_event(phba, pring,
   1012								(status &
   1013								HA_RXMASK));
   1014			}
   1015		}
   1016		if (phba->sli_rev == LPFC_SLI_REV4)
   1017			lpfc_drain_txq(phba);
   1018		/*
   1019		 * Turn on Ring interrupts
   1020		 */
   1021		if (phba->sli_rev <= LPFC_SLI_REV3) {
   1022			spin_lock_irq(&phba->hbalock);
   1023			control = readl(phba->HCregaddr);
   1024			if (!(control & (HC_R0INT_ENA << LPFC_ELS_RING))) {
   1025				lpfc_debugfs_slow_ring_trc(phba,
   1026					"WRK Enable ring: cntl:x%x hacopy:x%x",
   1027					control, ha_copy, 0);
   1028
   1029				control |= (HC_R0INT_ENA << LPFC_ELS_RING);
   1030				writel(control, phba->HCregaddr);
   1031				readl(phba->HCregaddr); /* flush */
   1032			} else {
   1033				lpfc_debugfs_slow_ring_trc(phba,
   1034					"WRK Ring ok:     cntl:x%x hacopy:x%x",
   1035					control, ha_copy, 0);
   1036			}
   1037			spin_unlock_irq(&phba->hbalock);
   1038		}
   1039	}
   1040	lpfc_work_list_done(phba);
   1041}
   1042
   1043int
   1044lpfc_do_work(void *p)
   1045{
   1046	struct lpfc_hba *phba = p;
   1047	int rc;
   1048
   1049	set_user_nice(current, MIN_NICE);
   1050	current->flags |= PF_NOFREEZE;
   1051	phba->data_flags = 0;
   1052
   1053	while (!kthread_should_stop()) {
   1054		/* wait and check worker queue activities */
   1055		rc = wait_event_interruptible(phba->work_waitq,
   1056					(test_and_clear_bit(LPFC_DATA_READY,
   1057							    &phba->data_flags)
   1058					 || kthread_should_stop()));
   1059		/* Signal wakeup shall terminate the worker thread */
   1060		if (rc) {
   1061			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
   1062					"0433 Wakeup on signal: rc=x%x\n", rc);
   1063			break;
   1064		}
   1065
   1066		/* Attend pending lpfc data processing */
   1067		lpfc_work_done(phba);
   1068	}
   1069	phba->worker_thread = NULL;
   1070	lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
   1071			"0432 Worker thread stopped.\n");
   1072	return 0;
   1073}
   1074
   1075/*
   1076 * This is only called to handle FC worker events. Since this a rare
   1077 * occurrence, we allocate a struct lpfc_work_evt structure here instead of
   1078 * embedding it in the IOCB.
   1079 */
   1080int
   1081lpfc_workq_post_event(struct lpfc_hba *phba, void *arg1, void *arg2,
   1082		      uint32_t evt)
   1083{
   1084	struct lpfc_work_evt  *evtp;
   1085	unsigned long flags;
   1086
   1087	/*
   1088	 * All Mailbox completions and LPFC_ELS_RING rcv ring IOCB events will
   1089	 * be queued to worker thread for processing
   1090	 */
   1091	evtp = kmalloc(sizeof(struct lpfc_work_evt), GFP_ATOMIC);
   1092	if (!evtp)
   1093		return 0;
   1094
   1095	evtp->evt_arg1  = arg1;
   1096	evtp->evt_arg2  = arg2;
   1097	evtp->evt       = evt;
   1098
   1099	spin_lock_irqsave(&phba->hbalock, flags);
   1100	list_add_tail(&evtp->evt_listp, &phba->work_list);
   1101	spin_unlock_irqrestore(&phba->hbalock, flags);
   1102
   1103	lpfc_worker_wake_up(phba);
   1104
   1105	return 1;
   1106}
   1107
   1108void
   1109lpfc_cleanup_rpis(struct lpfc_vport *vport, int remove)
   1110{
   1111	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
   1112	struct lpfc_hba  *phba = vport->phba;
   1113	struct lpfc_nodelist *ndlp, *next_ndlp;
   1114
   1115	list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
   1116		if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) {
   1117			/* It's possible the FLOGI to the fabric node never
   1118			 * successfully completed and never registered with the
   1119			 * transport.  In this case there is no way to clean up
   1120			 * the node.
   1121			 */
   1122			if (ndlp->nlp_DID == Fabric_DID) {
   1123				if (ndlp->nlp_prev_state ==
   1124				    NLP_STE_UNUSED_NODE &&
   1125				    !ndlp->fc4_xpt_flags)
   1126					lpfc_nlp_put(ndlp);
   1127			}
   1128			continue;
   1129		}
   1130
   1131		if ((phba->sli3_options & LPFC_SLI3_VPORT_TEARDOWN) ||
   1132		    ((vport->port_type == LPFC_NPIV_PORT) &&
   1133		     ((ndlp->nlp_DID == NameServer_DID) ||
   1134		      (ndlp->nlp_DID == FDMI_DID) ||
   1135		      (ndlp->nlp_DID == Fabric_Cntl_DID))))
   1136			lpfc_unreg_rpi(vport, ndlp);
   1137
   1138		/* Leave Fabric nodes alone on link down */
   1139		if ((phba->sli_rev < LPFC_SLI_REV4) &&
   1140		    (!remove && ndlp->nlp_type & NLP_FABRIC))
   1141			continue;
   1142
   1143		/* Notify transport of connectivity loss to trigger cleanup. */
   1144		if (phba->nvmet_support &&
   1145		    ndlp->nlp_state == NLP_STE_UNMAPPED_NODE)
   1146			lpfc_nvmet_invalidate_host(phba, ndlp);
   1147
   1148		lpfc_disc_state_machine(vport, ndlp, NULL,
   1149					remove
   1150					? NLP_EVT_DEVICE_RM
   1151					: NLP_EVT_DEVICE_RECOVERY);
   1152	}
   1153	if (phba->sli3_options & LPFC_SLI3_VPORT_TEARDOWN) {
   1154		if (phba->sli_rev == LPFC_SLI_REV4)
   1155			lpfc_sli4_unreg_all_rpis(vport);
   1156		lpfc_mbx_unreg_vpi(vport);
   1157		spin_lock_irq(shost->host_lock);
   1158		vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
   1159		spin_unlock_irq(shost->host_lock);
   1160	}
   1161}
   1162
   1163void
   1164lpfc_port_link_failure(struct lpfc_vport *vport)
   1165{
   1166	lpfc_vport_set_state(vport, FC_VPORT_LINKDOWN);
   1167
   1168	/* Cleanup any outstanding received buffers */
   1169	lpfc_cleanup_rcv_buffers(vport);
   1170
   1171	/* Cleanup any outstanding RSCN activity */
   1172	lpfc_els_flush_rscn(vport);
   1173
   1174	/* Cleanup any outstanding ELS commands */
   1175	lpfc_els_flush_cmd(vport);
   1176
   1177	lpfc_cleanup_rpis(vport, 0);
   1178
   1179	/* Turn off discovery timer if its running */
   1180	lpfc_can_disctmo(vport);
   1181}
   1182
   1183void
   1184lpfc_linkdown_port(struct lpfc_vport *vport)
   1185{
   1186	struct lpfc_hba *phba = vport->phba;
   1187	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
   1188
   1189	if (vport->cfg_enable_fc4_type != LPFC_ENABLE_NVME)
   1190		fc_host_post_event(shost, fc_get_event_number(),
   1191				   FCH_EVT_LINKDOWN, 0);
   1192
   1193	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
   1194		"Link Down:       state:x%x rtry:x%x flg:x%x",
   1195		vport->port_state, vport->fc_ns_retry, vport->fc_flag);
   1196
   1197	lpfc_port_link_failure(vport);
   1198
   1199	/* Stop delayed Nport discovery */
   1200	spin_lock_irq(shost->host_lock);
   1201	vport->fc_flag &= ~FC_DISC_DELAYED;
   1202	spin_unlock_irq(shost->host_lock);
   1203	del_timer_sync(&vport->delayed_disc_tmo);
   1204
   1205	if (phba->sli_rev == LPFC_SLI_REV4 &&
   1206	    vport->port_type == LPFC_PHYSICAL_PORT &&
   1207	    phba->sli4_hba.fawwpn_flag & LPFC_FAWWPN_CONFIG) {
   1208		/* Assume success on link up */
   1209		phba->sli4_hba.fawwpn_flag |= LPFC_FAWWPN_FABRIC;
   1210	}
   1211}
   1212
   1213int
   1214lpfc_linkdown(struct lpfc_hba *phba)
   1215{
   1216	struct lpfc_vport *vport = phba->pport;
   1217	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
   1218	struct lpfc_vport **vports;
   1219	LPFC_MBOXQ_t          *mb;
   1220	int i;
   1221	int offline;
   1222
   1223	if (phba->link_state == LPFC_LINK_DOWN)
   1224		return 0;
   1225
   1226	/* Block all SCSI stack I/Os */
   1227	lpfc_scsi_dev_block(phba);
   1228	offline = pci_channel_offline(phba->pcidev);
   1229
   1230	phba->defer_flogi_acc_flag = false;
   1231
   1232	/* Clear external loopback plug detected flag */
   1233	phba->link_flag &= ~LS_EXTERNAL_LOOPBACK;
   1234
   1235	spin_lock_irq(&phba->hbalock);
   1236	phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_SCAN_DONE);
   1237	spin_unlock_irq(&phba->hbalock);
   1238	if (phba->link_state > LPFC_LINK_DOWN) {
   1239		phba->link_state = LPFC_LINK_DOWN;
   1240		if (phba->sli4_hba.conf_trunk) {
   1241			phba->trunk_link.link0.state = 0;
   1242			phba->trunk_link.link1.state = 0;
   1243			phba->trunk_link.link2.state = 0;
   1244			phba->trunk_link.link3.state = 0;
   1245			phba->sli4_hba.link_state.logical_speed =
   1246						LPFC_LINK_SPEED_UNKNOWN;
   1247		}
   1248		spin_lock_irq(shost->host_lock);
   1249		phba->pport->fc_flag &= ~FC_LBIT;
   1250		spin_unlock_irq(shost->host_lock);
   1251	}
   1252	vports = lpfc_create_vport_work_array(phba);
   1253	if (vports != NULL) {
   1254		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
   1255			/* Issue a LINK DOWN event to all nodes */
   1256			lpfc_linkdown_port(vports[i]);
   1257
   1258			vports[i]->fc_myDID = 0;
   1259
   1260			if ((vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) ||
   1261			    (vport->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) {
   1262				if (phba->nvmet_support)
   1263					lpfc_nvmet_update_targetport(phba);
   1264				else
   1265					lpfc_nvme_update_localport(vports[i]);
   1266			}
   1267		}
   1268	}
   1269	lpfc_destroy_vport_work_array(phba, vports);
   1270
   1271	/* Clean up any SLI3 firmware default rpi's */
   1272	if (phba->sli_rev > LPFC_SLI_REV3 || offline)
   1273		goto skip_unreg_did;
   1274
   1275	mb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
   1276	if (mb) {
   1277		lpfc_unreg_did(phba, 0xffff, LPFC_UNREG_ALL_DFLT_RPIS, mb);
   1278		mb->vport = vport;
   1279		mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
   1280		if (lpfc_sli_issue_mbox(phba, mb, MBX_NOWAIT)
   1281		    == MBX_NOT_FINISHED) {
   1282			mempool_free(mb, phba->mbox_mem_pool);
   1283		}
   1284	}
   1285
   1286 skip_unreg_did:
   1287	/* Setup myDID for link up if we are in pt2pt mode */
   1288	if (phba->pport->fc_flag & FC_PT2PT) {
   1289		mb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
   1290		if (mb) {
   1291			lpfc_config_link(phba, mb);
   1292			mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
   1293			mb->vport = vport;
   1294			if (lpfc_sli_issue_mbox(phba, mb, MBX_NOWAIT)
   1295			    == MBX_NOT_FINISHED) {
   1296				mempool_free(mb, phba->mbox_mem_pool);
   1297			}
   1298		}
   1299		spin_lock_irq(shost->host_lock);
   1300		phba->pport->fc_flag &= ~(FC_PT2PT | FC_PT2PT_PLOGI);
   1301		phba->pport->rcv_flogi_cnt = 0;
   1302		spin_unlock_irq(shost->host_lock);
   1303	}
   1304	return 0;
   1305}
   1306
   1307static void
   1308lpfc_linkup_cleanup_nodes(struct lpfc_vport *vport)
   1309{
   1310	struct lpfc_nodelist *ndlp;
   1311
   1312	list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
   1313		ndlp->nlp_fc4_type &= ~(NLP_FC4_FCP | NLP_FC4_NVME);
   1314
   1315		if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
   1316			continue;
   1317		if (ndlp->nlp_type & NLP_FABRIC) {
   1318			/* On Linkup its safe to clean up the ndlp
   1319			 * from Fabric connections.
   1320			 */
   1321			if (ndlp->nlp_DID != Fabric_DID)
   1322				lpfc_unreg_rpi(vport, ndlp);
   1323			lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
   1324		} else if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) {
   1325			/* Fail outstanding IO now since device is
   1326			 * marked for PLOGI.
   1327			 */
   1328			lpfc_unreg_rpi(vport, ndlp);
   1329		}
   1330	}
   1331}
   1332
   1333static void
   1334lpfc_linkup_port(struct lpfc_vport *vport)
   1335{
   1336	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
   1337	struct lpfc_hba  *phba = vport->phba;
   1338
   1339	if ((vport->load_flag & FC_UNLOADING) != 0)
   1340		return;
   1341
   1342	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
   1343		"Link Up:         top:x%x speed:x%x flg:x%x",
   1344		phba->fc_topology, phba->fc_linkspeed, phba->link_flag);
   1345
   1346	/* If NPIV is not enabled, only bring the physical port up */
   1347	if (!(phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
   1348		(vport != phba->pport))
   1349		return;
   1350
   1351	if (vport->cfg_enable_fc4_type != LPFC_ENABLE_NVME)
   1352		fc_host_post_event(shost, fc_get_event_number(),
   1353				   FCH_EVT_LINKUP, 0);
   1354
   1355	spin_lock_irq(shost->host_lock);
   1356	vport->fc_flag &= ~(FC_PT2PT | FC_PT2PT_PLOGI | FC_ABORT_DISCOVERY |
   1357			    FC_RSCN_MEMENTO | FC_RSCN_MODE |
   1358			    FC_NLP_MORE | FC_RSCN_DISCOVERY);
   1359	vport->fc_flag |= FC_NDISC_ACTIVE;
   1360	vport->fc_ns_retry = 0;
   1361	spin_unlock_irq(shost->host_lock);
   1362	lpfc_setup_fdmi_mask(vport);
   1363
   1364	lpfc_linkup_cleanup_nodes(vport);
   1365}
   1366
   1367static int
   1368lpfc_linkup(struct lpfc_hba *phba)
   1369{
   1370	struct lpfc_vport **vports;
   1371	int i;
   1372	struct Scsi_Host  *shost = lpfc_shost_from_vport(phba->pport);
   1373
   1374	phba->link_state = LPFC_LINK_UP;
   1375
   1376	/* Unblock fabric iocbs if they are blocked */
   1377	clear_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags);
   1378	del_timer_sync(&phba->fabric_block_timer);
   1379
   1380	vports = lpfc_create_vport_work_array(phba);
   1381	if (vports != NULL)
   1382		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
   1383			lpfc_linkup_port(vports[i]);
   1384	lpfc_destroy_vport_work_array(phba, vports);
   1385
   1386	/* Clear the pport flogi counter in case the link down was
   1387	 * absorbed without an ACQE. No lock here - in worker thread
   1388	 * and discovery is synchronized.
   1389	 */
   1390	spin_lock_irq(shost->host_lock);
   1391	phba->pport->rcv_flogi_cnt = 0;
   1392	spin_unlock_irq(shost->host_lock);
   1393
   1394	/* reinitialize initial HBA flag */
   1395	phba->hba_flag &= ~(HBA_FLOGI_ISSUED | HBA_RHBA_CMPL);
   1396	phba->defer_flogi_acc_flag = false;
   1397
   1398	return 0;
   1399}
   1400
   1401/*
   1402 * This routine handles processing a CLEAR_LA mailbox
   1403 * command upon completion. It is setup in the LPFC_MBOXQ
   1404 * as the completion routine when the command is
   1405 * handed off to the SLI layer. SLI3 only.
   1406 */
   1407static void
   1408lpfc_mbx_cmpl_clear_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
   1409{
   1410	struct lpfc_vport *vport = pmb->vport;
   1411	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
   1412	struct lpfc_sli   *psli = &phba->sli;
   1413	MAILBOX_t *mb = &pmb->u.mb;
   1414	uint32_t control;
   1415
   1416	/* Since we don't do discovery right now, turn these off here */
   1417	psli->sli3_ring[LPFC_EXTRA_RING].flag &= ~LPFC_STOP_IOCB_EVENT;
   1418	psli->sli3_ring[LPFC_FCP_RING].flag &= ~LPFC_STOP_IOCB_EVENT;
   1419
   1420	/* Check for error */
   1421	if ((mb->mbxStatus) && (mb->mbxStatus != 0x1601)) {
   1422		/* CLEAR_LA mbox error <mbxStatus> state <hba_state> */
   1423		lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
   1424				 "0320 CLEAR_LA mbxStatus error x%x hba "
   1425				 "state x%x\n",
   1426				 mb->mbxStatus, vport->port_state);
   1427		phba->link_state = LPFC_HBA_ERROR;
   1428		goto out;
   1429	}
   1430
   1431	if (vport->port_type == LPFC_PHYSICAL_PORT)
   1432		phba->link_state = LPFC_HBA_READY;
   1433
   1434	spin_lock_irq(&phba->hbalock);
   1435	psli->sli_flag |= LPFC_PROCESS_LA;
   1436	control = readl(phba->HCregaddr);
   1437	control |= HC_LAINT_ENA;
   1438	writel(control, phba->HCregaddr);
   1439	readl(phba->HCregaddr); /* flush */
   1440	spin_unlock_irq(&phba->hbalock);
   1441	mempool_free(pmb, phba->mbox_mem_pool);
   1442	return;
   1443
   1444out:
   1445	/* Device Discovery completes */
   1446	lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
   1447			 "0225 Device Discovery completes\n");
   1448	mempool_free(pmb, phba->mbox_mem_pool);
   1449
   1450	spin_lock_irq(shost->host_lock);
   1451	vport->fc_flag &= ~FC_ABORT_DISCOVERY;
   1452	spin_unlock_irq(shost->host_lock);
   1453
   1454	lpfc_can_disctmo(vport);
   1455
   1456	/* turn on Link Attention interrupts */
   1457
   1458	spin_lock_irq(&phba->hbalock);
   1459	psli->sli_flag |= LPFC_PROCESS_LA;
   1460	control = readl(phba->HCregaddr);
   1461	control |= HC_LAINT_ENA;
   1462	writel(control, phba->HCregaddr);
   1463	readl(phba->HCregaddr); /* flush */
   1464	spin_unlock_irq(&phba->hbalock);
   1465
   1466	return;
   1467}
   1468
   1469void
   1470lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
   1471{
   1472	struct lpfc_vport *vport = pmb->vport;
   1473	LPFC_MBOXQ_t *sparam_mb;
   1474	u16 status = pmb->u.mb.mbxStatus;
   1475	int rc;
   1476
   1477	mempool_free(pmb, phba->mbox_mem_pool);
   1478
   1479	if (status)
   1480		goto out;
   1481
   1482	/* don't perform discovery for SLI4 loopback diagnostic test */
   1483	if ((phba->sli_rev == LPFC_SLI_REV4) &&
   1484	    !(phba->hba_flag & HBA_FCOE_MODE) &&
   1485	    (phba->link_flag & LS_LOOPBACK_MODE))
   1486		return;
   1487
   1488	if (phba->fc_topology == LPFC_TOPOLOGY_LOOP &&
   1489	    vport->fc_flag & FC_PUBLIC_LOOP &&
   1490	    !(vport->fc_flag & FC_LBIT)) {
   1491			/* Need to wait for FAN - use discovery timer
   1492			 * for timeout.  port_state is identically
   1493			 * LPFC_LOCAL_CFG_LINK while waiting for FAN
   1494			 */
   1495			lpfc_set_disctmo(vport);
   1496			return;
   1497	}
   1498
   1499	/* Start discovery by sending a FLOGI. port_state is identically
   1500	 * LPFC_FLOGI while waiting for FLOGI cmpl.
   1501	 */
   1502	if (vport->port_state != LPFC_FLOGI) {
   1503		/* Issue MBX_READ_SPARAM to update CSPs before FLOGI if
   1504		 * bb-credit recovery is in place.
   1505		 */
   1506		if (phba->bbcredit_support && phba->cfg_enable_bbcr &&
   1507		    !(phba->link_flag & LS_LOOPBACK_MODE)) {
   1508			sparam_mb = mempool_alloc(phba->mbox_mem_pool,
   1509						  GFP_KERNEL);
   1510			if (!sparam_mb)
   1511				goto sparam_out;
   1512
   1513			rc = lpfc_read_sparam(phba, sparam_mb, 0);
   1514			if (rc) {
   1515				mempool_free(sparam_mb, phba->mbox_mem_pool);
   1516				goto sparam_out;
   1517			}
   1518			sparam_mb->vport = vport;
   1519			sparam_mb->mbox_cmpl = lpfc_mbx_cmpl_read_sparam;
   1520			rc = lpfc_sli_issue_mbox(phba, sparam_mb, MBX_NOWAIT);
   1521			if (rc == MBX_NOT_FINISHED) {
   1522				lpfc_mbox_rsrc_cleanup(phba, sparam_mb,
   1523						       MBOX_THD_UNLOCKED);
   1524				goto sparam_out;
   1525			}
   1526
   1527			phba->hba_flag |= HBA_DEFER_FLOGI;
   1528		}  else {
   1529			lpfc_initial_flogi(vport);
   1530		}
   1531	} else {
   1532		if (vport->fc_flag & FC_PT2PT)
   1533			lpfc_disc_start(vport);
   1534	}
   1535	return;
   1536
   1537out:
   1538	lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
   1539			 "0306 CONFIG_LINK mbxStatus error x%x HBA state x%x\n",
   1540			 status, vport->port_state);
   1541
   1542sparam_out:
   1543	lpfc_linkdown(phba);
   1544
   1545	lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
   1546			 "0200 CONFIG_LINK bad hba state x%x\n",
   1547			 vport->port_state);
   1548
   1549	lpfc_issue_clear_la(phba, vport);
   1550	return;
   1551}
   1552
   1553/**
   1554 * lpfc_sli4_clear_fcf_rr_bmask
   1555 * @phba: pointer to the struct lpfc_hba for this port.
   1556 * This fucnction resets the round robin bit mask and clears the
   1557 * fcf priority list. The list deletions are done while holding the
   1558 * hbalock. The ON_LIST flag and the FLOGI_FAILED flags are cleared
   1559 * from the lpfc_fcf_pri record.
   1560 **/
   1561void
   1562lpfc_sli4_clear_fcf_rr_bmask(struct lpfc_hba *phba)
   1563{
   1564	struct lpfc_fcf_pri *fcf_pri;
   1565	struct lpfc_fcf_pri *next_fcf_pri;
   1566	memset(phba->fcf.fcf_rr_bmask, 0, sizeof(*phba->fcf.fcf_rr_bmask));
   1567	spin_lock_irq(&phba->hbalock);
   1568	list_for_each_entry_safe(fcf_pri, next_fcf_pri,
   1569				&phba->fcf.fcf_pri_list, list) {
   1570		list_del_init(&fcf_pri->list);
   1571		fcf_pri->fcf_rec.flag = 0;
   1572	}
   1573	spin_unlock_irq(&phba->hbalock);
   1574}
   1575static void
   1576lpfc_mbx_cmpl_reg_fcfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
   1577{
   1578	struct lpfc_vport *vport = mboxq->vport;
   1579
   1580	if (mboxq->u.mb.mbxStatus) {
   1581		lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
   1582				 "2017 REG_FCFI mbxStatus error x%x "
   1583				 "HBA state x%x\n", mboxq->u.mb.mbxStatus,
   1584				 vport->port_state);
   1585		goto fail_out;
   1586	}
   1587
   1588	/* Start FCoE discovery by sending a FLOGI. */
   1589	phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_fcfi, &mboxq->u.mqe.un.reg_fcfi);
   1590	/* Set the FCFI registered flag */
   1591	spin_lock_irq(&phba->hbalock);
   1592	phba->fcf.fcf_flag |= FCF_REGISTERED;
   1593	spin_unlock_irq(&phba->hbalock);
   1594
   1595	/* If there is a pending FCoE event, restart FCF table scan. */
   1596	if ((!(phba->hba_flag & FCF_RR_INPROG)) &&
   1597		lpfc_check_pending_fcoe_event(phba, LPFC_UNREG_FCF))
   1598		goto fail_out;
   1599
   1600	/* Mark successful completion of FCF table scan */
   1601	spin_lock_irq(&phba->hbalock);
   1602	phba->fcf.fcf_flag |= (FCF_SCAN_DONE | FCF_IN_USE);
   1603	phba->hba_flag &= ~FCF_TS_INPROG;
   1604	if (vport->port_state != LPFC_FLOGI) {
   1605		phba->hba_flag |= FCF_RR_INPROG;
   1606		spin_unlock_irq(&phba->hbalock);
   1607		lpfc_issue_init_vfi(vport);
   1608		goto out;
   1609	}
   1610	spin_unlock_irq(&phba->hbalock);
   1611	goto out;
   1612
   1613fail_out:
   1614	spin_lock_irq(&phba->hbalock);
   1615	phba->hba_flag &= ~FCF_RR_INPROG;
   1616	spin_unlock_irq(&phba->hbalock);
   1617out:
   1618	mempool_free(mboxq, phba->mbox_mem_pool);
   1619}
   1620
   1621/**
   1622 * lpfc_fab_name_match - Check if the fcf fabric name match.
   1623 * @fab_name: pointer to fabric name.
   1624 * @new_fcf_record: pointer to fcf record.
   1625 *
   1626 * This routine compare the fcf record's fabric name with provided
   1627 * fabric name. If the fabric name are identical this function
   1628 * returns 1 else return 0.
   1629 **/
   1630static uint32_t
   1631lpfc_fab_name_match(uint8_t *fab_name, struct fcf_record *new_fcf_record)
   1632{
   1633	if (fab_name[0] != bf_get(lpfc_fcf_record_fab_name_0, new_fcf_record))
   1634		return 0;
   1635	if (fab_name[1] != bf_get(lpfc_fcf_record_fab_name_1, new_fcf_record))
   1636		return 0;
   1637	if (fab_name[2] != bf_get(lpfc_fcf_record_fab_name_2, new_fcf_record))
   1638		return 0;
   1639	if (fab_name[3] != bf_get(lpfc_fcf_record_fab_name_3, new_fcf_record))
   1640		return 0;
   1641	if (fab_name[4] != bf_get(lpfc_fcf_record_fab_name_4, new_fcf_record))
   1642		return 0;
   1643	if (fab_name[5] != bf_get(lpfc_fcf_record_fab_name_5, new_fcf_record))
   1644		return 0;
   1645	if (fab_name[6] != bf_get(lpfc_fcf_record_fab_name_6, new_fcf_record))
   1646		return 0;
   1647	if (fab_name[7] != bf_get(lpfc_fcf_record_fab_name_7, new_fcf_record))
   1648		return 0;
   1649	return 1;
   1650}
   1651
   1652/**
   1653 * lpfc_sw_name_match - Check if the fcf switch name match.
   1654 * @sw_name: pointer to switch name.
   1655 * @new_fcf_record: pointer to fcf record.
   1656 *
   1657 * This routine compare the fcf record's switch name with provided
   1658 * switch name. If the switch name are identical this function
   1659 * returns 1 else return 0.
   1660 **/
   1661static uint32_t
   1662lpfc_sw_name_match(uint8_t *sw_name, struct fcf_record *new_fcf_record)
   1663{
   1664	if (sw_name[0] != bf_get(lpfc_fcf_record_switch_name_0, new_fcf_record))
   1665		return 0;
   1666	if (sw_name[1] != bf_get(lpfc_fcf_record_switch_name_1, new_fcf_record))
   1667		return 0;
   1668	if (sw_name[2] != bf_get(lpfc_fcf_record_switch_name_2, new_fcf_record))
   1669		return 0;
   1670	if (sw_name[3] != bf_get(lpfc_fcf_record_switch_name_3, new_fcf_record))
   1671		return 0;
   1672	if (sw_name[4] != bf_get(lpfc_fcf_record_switch_name_4, new_fcf_record))
   1673		return 0;
   1674	if (sw_name[5] != bf_get(lpfc_fcf_record_switch_name_5, new_fcf_record))
   1675		return 0;
   1676	if (sw_name[6] != bf_get(lpfc_fcf_record_switch_name_6, new_fcf_record))
   1677		return 0;
   1678	if (sw_name[7] != bf_get(lpfc_fcf_record_switch_name_7, new_fcf_record))
   1679		return 0;
   1680	return 1;
   1681}
   1682
   1683/**
   1684 * lpfc_mac_addr_match - Check if the fcf mac address match.
   1685 * @mac_addr: pointer to mac address.
   1686 * @new_fcf_record: pointer to fcf record.
   1687 *
   1688 * This routine compare the fcf record's mac address with HBA's
   1689 * FCF mac address. If the mac addresses are identical this function
   1690 * returns 1 else return 0.
   1691 **/
   1692static uint32_t
   1693lpfc_mac_addr_match(uint8_t *mac_addr, struct fcf_record *new_fcf_record)
   1694{
   1695	if (mac_addr[0] != bf_get(lpfc_fcf_record_mac_0, new_fcf_record))
   1696		return 0;
   1697	if (mac_addr[1] != bf_get(lpfc_fcf_record_mac_1, new_fcf_record))
   1698		return 0;
   1699	if (mac_addr[2] != bf_get(lpfc_fcf_record_mac_2, new_fcf_record))
   1700		return 0;
   1701	if (mac_addr[3] != bf_get(lpfc_fcf_record_mac_3, new_fcf_record))
   1702		return 0;
   1703	if (mac_addr[4] != bf_get(lpfc_fcf_record_mac_4, new_fcf_record))
   1704		return 0;
   1705	if (mac_addr[5] != bf_get(lpfc_fcf_record_mac_5, new_fcf_record))
   1706		return 0;
   1707	return 1;
   1708}
   1709
   1710static bool
   1711lpfc_vlan_id_match(uint16_t curr_vlan_id, uint16_t new_vlan_id)
   1712{
   1713	return (curr_vlan_id == new_vlan_id);
   1714}
   1715
   1716/**
   1717 * __lpfc_update_fcf_record_pri - update the lpfc_fcf_pri record.
   1718 * @phba: pointer to lpfc hba data structure.
   1719 * @fcf_index: Index for the lpfc_fcf_record.
   1720 * @new_fcf_record: pointer to hba fcf record.
   1721 *
   1722 * This routine updates the driver FCF priority record from the new HBA FCF
   1723 * record. The hbalock is asserted held in the code path calling this
   1724 * routine.
   1725 **/
   1726static void
   1727__lpfc_update_fcf_record_pri(struct lpfc_hba *phba, uint16_t fcf_index,
   1728				 struct fcf_record *new_fcf_record
   1729				 )
   1730{
   1731	struct lpfc_fcf_pri *fcf_pri;
   1732
   1733	fcf_pri = &phba->fcf.fcf_pri[fcf_index];
   1734	fcf_pri->fcf_rec.fcf_index = fcf_index;
   1735	/* FCF record priority */
   1736	fcf_pri->fcf_rec.priority = new_fcf_record->fip_priority;
   1737
   1738}
   1739
   1740/**
   1741 * lpfc_copy_fcf_record - Copy fcf information to lpfc_hba.
   1742 * @fcf_rec: pointer to driver fcf record.
   1743 * @new_fcf_record: pointer to fcf record.
   1744 *
   1745 * This routine copies the FCF information from the FCF
   1746 * record to lpfc_hba data structure.
   1747 **/
   1748static void
   1749lpfc_copy_fcf_record(struct lpfc_fcf_rec *fcf_rec,
   1750		     struct fcf_record *new_fcf_record)
   1751{
   1752	/* Fabric name */
   1753	fcf_rec->fabric_name[0] =
   1754		bf_get(lpfc_fcf_record_fab_name_0, new_fcf_record);
   1755	fcf_rec->fabric_name[1] =
   1756		bf_get(lpfc_fcf_record_fab_name_1, new_fcf_record);
   1757	fcf_rec->fabric_name[2] =
   1758		bf_get(lpfc_fcf_record_fab_name_2, new_fcf_record);
   1759	fcf_rec->fabric_name[3] =
   1760		bf_get(lpfc_fcf_record_fab_name_3, new_fcf_record);
   1761	fcf_rec->fabric_name[4] =
   1762		bf_get(lpfc_fcf_record_fab_name_4, new_fcf_record);
   1763	fcf_rec->fabric_name[5] =
   1764		bf_get(lpfc_fcf_record_fab_name_5, new_fcf_record);
   1765	fcf_rec->fabric_name[6] =
   1766		bf_get(lpfc_fcf_record_fab_name_6, new_fcf_record);
   1767	fcf_rec->fabric_name[7] =
   1768		bf_get(lpfc_fcf_record_fab_name_7, new_fcf_record);
   1769	/* Mac address */
   1770	fcf_rec->mac_addr[0] = bf_get(lpfc_fcf_record_mac_0, new_fcf_record);
   1771	fcf_rec->mac_addr[1] = bf_get(lpfc_fcf_record_mac_1, new_fcf_record);
   1772	fcf_rec->mac_addr[2] = bf_get(lpfc_fcf_record_mac_2, new_fcf_record);
   1773	fcf_rec->mac_addr[3] = bf_get(lpfc_fcf_record_mac_3, new_fcf_record);
   1774	fcf_rec->mac_addr[4] = bf_get(lpfc_fcf_record_mac_4, new_fcf_record);
   1775	fcf_rec->mac_addr[5] = bf_get(lpfc_fcf_record_mac_5, new_fcf_record);
   1776	/* FCF record index */
   1777	fcf_rec->fcf_indx = bf_get(lpfc_fcf_record_fcf_index, new_fcf_record);
   1778	/* FCF record priority */
   1779	fcf_rec->priority = new_fcf_record->fip_priority;
   1780	/* Switch name */
   1781	fcf_rec->switch_name[0] =
   1782		bf_get(lpfc_fcf_record_switch_name_0, new_fcf_record);
   1783	fcf_rec->switch_name[1] =
   1784		bf_get(lpfc_fcf_record_switch_name_1, new_fcf_record);
   1785	fcf_rec->switch_name[2] =
   1786		bf_get(lpfc_fcf_record_switch_name_2, new_fcf_record);
   1787	fcf_rec->switch_name[3] =
   1788		bf_get(lpfc_fcf_record_switch_name_3, new_fcf_record);
   1789	fcf_rec->switch_name[4] =
   1790		bf_get(lpfc_fcf_record_switch_name_4, new_fcf_record);
   1791	fcf_rec->switch_name[5] =
   1792		bf_get(lpfc_fcf_record_switch_name_5, new_fcf_record);
   1793	fcf_rec->switch_name[6] =
   1794		bf_get(lpfc_fcf_record_switch_name_6, new_fcf_record);
   1795	fcf_rec->switch_name[7] =
   1796		bf_get(lpfc_fcf_record_switch_name_7, new_fcf_record);
   1797}
   1798
   1799/**
   1800 * __lpfc_update_fcf_record - Update driver fcf record
   1801 * @phba: pointer to lpfc hba data structure.
   1802 * @fcf_rec: pointer to driver fcf record.
   1803 * @new_fcf_record: pointer to hba fcf record.
   1804 * @addr_mode: address mode to be set to the driver fcf record.
   1805 * @vlan_id: vlan tag to be set to the driver fcf record.
   1806 * @flag: flag bits to be set to the driver fcf record.
   1807 *
   1808 * This routine updates the driver FCF record from the new HBA FCF record
   1809 * together with the address mode, vlan_id, and other informations. This
   1810 * routine is called with the hbalock held.
   1811 **/
   1812static void
   1813__lpfc_update_fcf_record(struct lpfc_hba *phba, struct lpfc_fcf_rec *fcf_rec,
   1814		       struct fcf_record *new_fcf_record, uint32_t addr_mode,
   1815		       uint16_t vlan_id, uint32_t flag)
   1816{
   1817	lockdep_assert_held(&phba->hbalock);
   1818
   1819	/* Copy the fields from the HBA's FCF record */
   1820	lpfc_copy_fcf_record(fcf_rec, new_fcf_record);
   1821	/* Update other fields of driver FCF record */
   1822	fcf_rec->addr_mode = addr_mode;
   1823	fcf_rec->vlan_id = vlan_id;
   1824	fcf_rec->flag |= (flag | RECORD_VALID);
   1825	__lpfc_update_fcf_record_pri(phba,
   1826		bf_get(lpfc_fcf_record_fcf_index, new_fcf_record),
   1827				 new_fcf_record);
   1828}
   1829
   1830/**
   1831 * lpfc_register_fcf - Register the FCF with hba.
   1832 * @phba: pointer to lpfc hba data structure.
   1833 *
   1834 * This routine issues a register fcfi mailbox command to register
   1835 * the fcf with HBA.
   1836 **/
   1837static void
   1838lpfc_register_fcf(struct lpfc_hba *phba)
   1839{
   1840	LPFC_MBOXQ_t *fcf_mbxq;
   1841	int rc;
   1842
   1843	spin_lock_irq(&phba->hbalock);
   1844	/* If the FCF is not available do nothing. */
   1845	if (!(phba->fcf.fcf_flag & FCF_AVAILABLE)) {
   1846		phba->hba_flag &= ~(FCF_TS_INPROG | FCF_RR_INPROG);
   1847		spin_unlock_irq(&phba->hbalock);
   1848		return;
   1849	}
   1850
   1851	/* The FCF is already registered, start discovery */
   1852	if (phba->fcf.fcf_flag & FCF_REGISTERED) {
   1853		phba->fcf.fcf_flag |= (FCF_SCAN_DONE | FCF_IN_USE);
   1854		phba->hba_flag &= ~FCF_TS_INPROG;
   1855		if (phba->pport->port_state != LPFC_FLOGI &&
   1856		    phba->pport->fc_flag & FC_FABRIC) {
   1857			phba->hba_flag |= FCF_RR_INPROG;
   1858			spin_unlock_irq(&phba->hbalock);
   1859			lpfc_initial_flogi(phba->pport);
   1860			return;
   1861		}
   1862		spin_unlock_irq(&phba->hbalock);
   1863		return;
   1864	}
   1865	spin_unlock_irq(&phba->hbalock);
   1866
   1867	fcf_mbxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
   1868	if (!fcf_mbxq) {
   1869		spin_lock_irq(&phba->hbalock);
   1870		phba->hba_flag &= ~(FCF_TS_INPROG | FCF_RR_INPROG);
   1871		spin_unlock_irq(&phba->hbalock);
   1872		return;
   1873	}
   1874
   1875	lpfc_reg_fcfi(phba, fcf_mbxq);
   1876	fcf_mbxq->vport = phba->pport;
   1877	fcf_mbxq->mbox_cmpl = lpfc_mbx_cmpl_reg_fcfi;
   1878	rc = lpfc_sli_issue_mbox(phba, fcf_mbxq, MBX_NOWAIT);
   1879	if (rc == MBX_NOT_FINISHED) {
   1880		spin_lock_irq(&phba->hbalock);
   1881		phba->hba_flag &= ~(FCF_TS_INPROG | FCF_RR_INPROG);
   1882		spin_unlock_irq(&phba->hbalock);
   1883		mempool_free(fcf_mbxq, phba->mbox_mem_pool);
   1884	}
   1885
   1886	return;
   1887}
   1888
   1889/**
   1890 * lpfc_match_fcf_conn_list - Check if the FCF record can be used for discovery.
   1891 * @phba: pointer to lpfc hba data structure.
   1892 * @new_fcf_record: pointer to fcf record.
   1893 * @boot_flag: Indicates if this record used by boot bios.
   1894 * @addr_mode: The address mode to be used by this FCF
   1895 * @vlan_id: The vlan id to be used as vlan tagging by this FCF.
   1896 *
   1897 * This routine compare the fcf record with connect list obtained from the
   1898 * config region to decide if this FCF can be used for SAN discovery. It returns
   1899 * 1 if this record can be used for SAN discovery else return zero. If this FCF
   1900 * record can be used for SAN discovery, the boot_flag will indicate if this FCF
   1901 * is used by boot bios and addr_mode will indicate the addressing mode to be
   1902 * used for this FCF when the function returns.
   1903 * If the FCF record need to be used with a particular vlan id, the vlan is
   1904 * set in the vlan_id on return of the function. If not VLAN tagging need to
   1905 * be used with the FCF vlan_id will be set to LPFC_FCOE_NULL_VID;
   1906 **/
   1907static int
   1908lpfc_match_fcf_conn_list(struct lpfc_hba *phba,
   1909			struct fcf_record *new_fcf_record,
   1910			uint32_t *boot_flag, uint32_t *addr_mode,
   1911			uint16_t *vlan_id)
   1912{
   1913	struct lpfc_fcf_conn_entry *conn_entry;
   1914	int i, j, fcf_vlan_id = 0;
   1915
   1916	/* Find the lowest VLAN id in the FCF record */
   1917	for (i = 0; i < 512; i++) {
   1918		if (new_fcf_record->vlan_bitmap[i]) {
   1919			fcf_vlan_id = i * 8;
   1920			j = 0;
   1921			while (!((new_fcf_record->vlan_bitmap[i] >> j) & 1)) {
   1922				j++;
   1923				fcf_vlan_id++;
   1924			}
   1925			break;
   1926		}
   1927	}
   1928
   1929	/* FCF not valid/available or solicitation in progress */
   1930	if (!bf_get(lpfc_fcf_record_fcf_avail, new_fcf_record) ||
   1931	    !bf_get(lpfc_fcf_record_fcf_valid, new_fcf_record) ||
   1932	    bf_get(lpfc_fcf_record_fcf_sol, new_fcf_record))
   1933		return 0;
   1934
   1935	if (!(phba->hba_flag & HBA_FIP_SUPPORT)) {
   1936		*boot_flag = 0;
   1937		*addr_mode = bf_get(lpfc_fcf_record_mac_addr_prov,
   1938				new_fcf_record);
   1939		if (phba->valid_vlan)
   1940			*vlan_id = phba->vlan_id;
   1941		else
   1942			*vlan_id = LPFC_FCOE_NULL_VID;
   1943		return 1;
   1944	}
   1945
   1946	/*
   1947	 * If there are no FCF connection table entry, driver connect to all
   1948	 * FCFs.
   1949	 */
   1950	if (list_empty(&phba->fcf_conn_rec_list)) {
   1951		*boot_flag = 0;
   1952		*addr_mode = bf_get(lpfc_fcf_record_mac_addr_prov,
   1953			new_fcf_record);
   1954
   1955		/*
   1956		 * When there are no FCF connect entries, use driver's default
   1957		 * addressing mode - FPMA.
   1958		 */
   1959		if (*addr_mode & LPFC_FCF_FPMA)
   1960			*addr_mode = LPFC_FCF_FPMA;
   1961
   1962		/* If FCF record report a vlan id use that vlan id */
   1963		if (fcf_vlan_id)
   1964			*vlan_id = fcf_vlan_id;
   1965		else
   1966			*vlan_id = LPFC_FCOE_NULL_VID;
   1967		return 1;
   1968	}
   1969
   1970	list_for_each_entry(conn_entry,
   1971			    &phba->fcf_conn_rec_list, list) {
   1972		if (!(conn_entry->conn_rec.flags & FCFCNCT_VALID))
   1973			continue;
   1974
   1975		if ((conn_entry->conn_rec.flags & FCFCNCT_FBNM_VALID) &&
   1976			!lpfc_fab_name_match(conn_entry->conn_rec.fabric_name,
   1977					     new_fcf_record))
   1978			continue;
   1979		if ((conn_entry->conn_rec.flags & FCFCNCT_SWNM_VALID) &&
   1980			!lpfc_sw_name_match(conn_entry->conn_rec.switch_name,
   1981					    new_fcf_record))
   1982			continue;
   1983		if (conn_entry->conn_rec.flags & FCFCNCT_VLAN_VALID) {
   1984			/*
   1985			 * If the vlan bit map does not have the bit set for the
   1986			 * vlan id to be used, then it is not a match.
   1987			 */
   1988			if (!(new_fcf_record->vlan_bitmap
   1989				[conn_entry->conn_rec.vlan_tag / 8] &
   1990				(1 << (conn_entry->conn_rec.vlan_tag % 8))))
   1991				continue;
   1992		}
   1993
   1994		/*
   1995		 * If connection record does not support any addressing mode,
   1996		 * skip the FCF record.
   1997		 */
   1998		if (!(bf_get(lpfc_fcf_record_mac_addr_prov, new_fcf_record)
   1999			& (LPFC_FCF_FPMA | LPFC_FCF_SPMA)))
   2000			continue;
   2001
   2002		/*
   2003		 * Check if the connection record specifies a required
   2004		 * addressing mode.
   2005		 */
   2006		if ((conn_entry->conn_rec.flags & FCFCNCT_AM_VALID) &&
   2007			!(conn_entry->conn_rec.flags & FCFCNCT_AM_PREFERRED)) {
   2008
   2009			/*
   2010			 * If SPMA required but FCF not support this continue.
   2011			 */
   2012			if ((conn_entry->conn_rec.flags & FCFCNCT_AM_SPMA) &&
   2013				!(bf_get(lpfc_fcf_record_mac_addr_prov,
   2014					new_fcf_record) & LPFC_FCF_SPMA))
   2015				continue;
   2016
   2017			/*
   2018			 * If FPMA required but FCF not support this continue.
   2019			 */
   2020			if (!(conn_entry->conn_rec.flags & FCFCNCT_AM_SPMA) &&
   2021				!(bf_get(lpfc_fcf_record_mac_addr_prov,
   2022				new_fcf_record) & LPFC_FCF_FPMA))
   2023				continue;
   2024		}
   2025
   2026		/*
   2027		 * This fcf record matches filtering criteria.
   2028		 */
   2029		if (conn_entry->conn_rec.flags & FCFCNCT_BOOT)
   2030			*boot_flag = 1;
   2031		else
   2032			*boot_flag = 0;
   2033
   2034		/*
   2035		 * If user did not specify any addressing mode, or if the
   2036		 * preferred addressing mode specified by user is not supported
   2037		 * by FCF, allow fabric to pick the addressing mode.
   2038		 */
   2039		*addr_mode = bf_get(lpfc_fcf_record_mac_addr_prov,
   2040				new_fcf_record);
   2041		/*
   2042		 * If the user specified a required address mode, assign that
   2043		 * address mode
   2044		 */
   2045		if ((conn_entry->conn_rec.flags & FCFCNCT_AM_VALID) &&
   2046			(!(conn_entry->conn_rec.flags & FCFCNCT_AM_PREFERRED)))
   2047			*addr_mode = (conn_entry->conn_rec.flags &
   2048				FCFCNCT_AM_SPMA) ?
   2049				LPFC_FCF_SPMA : LPFC_FCF_FPMA;
   2050		/*
   2051		 * If the user specified a preferred address mode, use the
   2052		 * addr mode only if FCF support the addr_mode.
   2053		 */
   2054		else if ((conn_entry->conn_rec.flags & FCFCNCT_AM_VALID) &&
   2055			(conn_entry->conn_rec.flags & FCFCNCT_AM_PREFERRED) &&
   2056			(conn_entry->conn_rec.flags & FCFCNCT_AM_SPMA) &&
   2057			(*addr_mode & LPFC_FCF_SPMA))
   2058				*addr_mode = LPFC_FCF_SPMA;
   2059		else if ((conn_entry->conn_rec.flags & FCFCNCT_AM_VALID) &&
   2060			(conn_entry->conn_rec.flags & FCFCNCT_AM_PREFERRED) &&
   2061			!(conn_entry->conn_rec.flags & FCFCNCT_AM_SPMA) &&
   2062			(*addr_mode & LPFC_FCF_FPMA))
   2063				*addr_mode = LPFC_FCF_FPMA;
   2064
   2065		/* If matching connect list has a vlan id, use it */
   2066		if (conn_entry->conn_rec.flags & FCFCNCT_VLAN_VALID)
   2067			*vlan_id = conn_entry->conn_rec.vlan_tag;
   2068		/*
   2069		 * If no vlan id is specified in connect list, use the vlan id
   2070		 * in the FCF record
   2071		 */
   2072		else if (fcf_vlan_id)
   2073			*vlan_id = fcf_vlan_id;
   2074		else
   2075			*vlan_id = LPFC_FCOE_NULL_VID;
   2076
   2077		return 1;
   2078	}
   2079
   2080	return 0;
   2081}
   2082
   2083/**
   2084 * lpfc_check_pending_fcoe_event - Check if there is pending fcoe event.
   2085 * @phba: pointer to lpfc hba data structure.
   2086 * @unreg_fcf: Unregister FCF if FCF table need to be re-scaned.
   2087 *
   2088 * This function check if there is any fcoe event pending while driver
   2089 * scan FCF entries. If there is any pending event, it will restart the
   2090 * FCF saning and return 1 else return 0.
   2091 */
   2092int
   2093lpfc_check_pending_fcoe_event(struct lpfc_hba *phba, uint8_t unreg_fcf)
   2094{
   2095	/*
   2096	 * If the Link is up and no FCoE events while in the
   2097	 * FCF discovery, no need to restart FCF discovery.
   2098	 */
   2099	if ((phba->link_state  >= LPFC_LINK_UP) &&
   2100	    (phba->fcoe_eventtag == phba->fcoe_eventtag_at_fcf_scan))
   2101		return 0;
   2102
   2103	lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
   2104			"2768 Pending link or FCF event during current "
   2105			"handling of the previous event: link_state:x%x, "
   2106			"evt_tag_at_scan:x%x, evt_tag_current:x%x\n",
   2107			phba->link_state, phba->fcoe_eventtag_at_fcf_scan,
   2108			phba->fcoe_eventtag);
   2109
   2110	spin_lock_irq(&phba->hbalock);
   2111	phba->fcf.fcf_flag &= ~FCF_AVAILABLE;
   2112	spin_unlock_irq(&phba->hbalock);
   2113
   2114	if (phba->link_state >= LPFC_LINK_UP) {
   2115		lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
   2116				"2780 Restart FCF table scan due to "
   2117				"pending FCF event:evt_tag_at_scan:x%x, "
   2118				"evt_tag_current:x%x\n",
   2119				phba->fcoe_eventtag_at_fcf_scan,
   2120				phba->fcoe_eventtag);
   2121		lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST);
   2122	} else {
   2123		/*
   2124		 * Do not continue FCF discovery and clear FCF_TS_INPROG
   2125		 * flag
   2126		 */
   2127		lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
   2128				"2833 Stop FCF discovery process due to link "
   2129				"state change (x%x)\n", phba->link_state);
   2130		spin_lock_irq(&phba->hbalock);
   2131		phba->hba_flag &= ~(FCF_TS_INPROG | FCF_RR_INPROG);
   2132		phba->fcf.fcf_flag &= ~(FCF_REDISC_FOV | FCF_DISCOVERY);
   2133		spin_unlock_irq(&phba->hbalock);
   2134	}
   2135
   2136	/* Unregister the currently registered FCF if required */
   2137	if (unreg_fcf) {
   2138		spin_lock_irq(&phba->hbalock);
   2139		phba->fcf.fcf_flag &= ~FCF_REGISTERED;
   2140		spin_unlock_irq(&phba->hbalock);
   2141		lpfc_sli4_unregister_fcf(phba);
   2142	}
   2143	return 1;
   2144}
   2145
   2146/**
   2147 * lpfc_sli4_new_fcf_random_select - Randomly select an eligible new fcf record
   2148 * @phba: pointer to lpfc hba data structure.
   2149 * @fcf_cnt: number of eligible fcf record seen so far.
   2150 *
   2151 * This function makes an running random selection decision on FCF record to
   2152 * use through a sequence of @fcf_cnt eligible FCF records with equal
   2153 * probability. To perform integer manunipulation of random numbers with
   2154 * size unit32_t, the lower 16 bits of the 32-bit random number returned
   2155 * from prandom_u32() are taken as the random random number generated.
   2156 *
   2157 * Returns true when outcome is for the newly read FCF record should be
   2158 * chosen; otherwise, return false when outcome is for keeping the previously
   2159 * chosen FCF record.
   2160 **/
   2161static bool
   2162lpfc_sli4_new_fcf_random_select(struct lpfc_hba *phba, uint32_t fcf_cnt)
   2163{
   2164	uint32_t rand_num;
   2165
   2166	/* Get 16-bit uniform random number */
   2167	rand_num = 0xFFFF & prandom_u32();
   2168
   2169	/* Decision with probability 1/fcf_cnt */
   2170	if ((fcf_cnt * rand_num) < 0xFFFF)
   2171		return true;
   2172	else
   2173		return false;
   2174}
   2175
   2176/**
   2177 * lpfc_sli4_fcf_rec_mbox_parse - Parse read_fcf mbox command.
   2178 * @phba: pointer to lpfc hba data structure.
   2179 * @mboxq: pointer to mailbox object.
   2180 * @next_fcf_index: pointer to holder of next fcf index.
   2181 *
   2182 * This routine parses the non-embedded fcf mailbox command by performing the
   2183 * necessarily error checking, non-embedded read FCF record mailbox command
   2184 * SGE parsing, and endianness swapping.
   2185 *
   2186 * Returns the pointer to the new FCF record in the non-embedded mailbox
   2187 * command DMA memory if successfully, other NULL.
   2188 */
   2189static struct fcf_record *
   2190lpfc_sli4_fcf_rec_mbox_parse(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
   2191			     uint16_t *next_fcf_index)
   2192{
   2193	void *virt_addr;
   2194	struct lpfc_mbx_sge sge;
   2195	struct lpfc_mbx_read_fcf_tbl *read_fcf;
   2196	uint32_t shdr_status, shdr_add_status, if_type;
   2197	union lpfc_sli4_cfg_shdr *shdr;
   2198	struct fcf_record *new_fcf_record;
   2199
   2200	/* Get the first SGE entry from the non-embedded DMA memory. This
   2201	 * routine only uses a single SGE.
   2202	 */
   2203	lpfc_sli4_mbx_sge_get(mboxq, 0, &sge);
   2204	if (unlikely(!mboxq->sge_array)) {
   2205		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
   2206				"2524 Failed to get the non-embedded SGE "
   2207				"virtual address\n");
   2208		return NULL;
   2209	}
   2210	virt_addr = mboxq->sge_array->addr[0];
   2211
   2212	shdr = (union lpfc_sli4_cfg_shdr *)virt_addr;
   2213	lpfc_sli_pcimem_bcopy(shdr, shdr,
   2214			      sizeof(union lpfc_sli4_cfg_shdr));
   2215	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
   2216	if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
   2217	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
   2218	if (shdr_status || shdr_add_status) {
   2219		if (shdr_status == STATUS_FCF_TABLE_EMPTY ||
   2220					if_type == LPFC_SLI_INTF_IF_TYPE_2)
   2221			lpfc_printf_log(phba, KERN_ERR,
   2222					LOG_TRACE_EVENT,
   2223					"2726 READ_FCF_RECORD Indicates empty "
   2224					"FCF table.\n");
   2225		else
   2226			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
   2227					"2521 READ_FCF_RECORD mailbox failed "
   2228					"with status x%x add_status x%x, "
   2229					"mbx\n", shdr_status, shdr_add_status);
   2230		return NULL;
   2231	}
   2232
   2233	/* Interpreting the returned information of the FCF record */
   2234	read_fcf = (struct lpfc_mbx_read_fcf_tbl *)virt_addr;
   2235	lpfc_sli_pcimem_bcopy(read_fcf, read_fcf,
   2236			      sizeof(struct lpfc_mbx_read_fcf_tbl));
   2237	*next_fcf_index = bf_get(lpfc_mbx_read_fcf_tbl_nxt_vindx, read_fcf);
   2238	new_fcf_record = (struct fcf_record *)(virt_addr +
   2239			  sizeof(struct lpfc_mbx_read_fcf_tbl));
   2240	lpfc_sli_pcimem_bcopy(new_fcf_record, new_fcf_record,
   2241				offsetof(struct fcf_record, vlan_bitmap));
   2242	new_fcf_record->word137 = le32_to_cpu(new_fcf_record->word137);
   2243	new_fcf_record->word138 = le32_to_cpu(new_fcf_record->word138);
   2244
   2245	return new_fcf_record;
   2246}
   2247
   2248/**
   2249 * lpfc_sli4_log_fcf_record_info - Log the information of a fcf record
   2250 * @phba: pointer to lpfc hba data structure.
   2251 * @fcf_record: pointer to the fcf record.
   2252 * @vlan_id: the lowest vlan identifier associated to this fcf record.
   2253 * @next_fcf_index: the index to the next fcf record in hba's fcf table.
   2254 *
   2255 * This routine logs the detailed FCF record if the LOG_FIP loggin is
   2256 * enabled.
   2257 **/
   2258static void
   2259lpfc_sli4_log_fcf_record_info(struct lpfc_hba *phba,
   2260			      struct fcf_record *fcf_record,
   2261			      uint16_t vlan_id,
   2262			      uint16_t next_fcf_index)
   2263{
   2264	lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
   2265			"2764 READ_FCF_RECORD:\n"
   2266			"\tFCF_Index     : x%x\n"
   2267			"\tFCF_Avail     : x%x\n"
   2268			"\tFCF_Valid     : x%x\n"
   2269			"\tFCF_SOL       : x%x\n"
   2270			"\tFIP_Priority  : x%x\n"
   2271			"\tMAC_Provider  : x%x\n"
   2272			"\tLowest VLANID : x%x\n"
   2273			"\tFCF_MAC Addr  : x%x:%x:%x:%x:%x:%x\n"
   2274			"\tFabric_Name   : x%x:%x:%x:%x:%x:%x:%x:%x\n"
   2275			"\tSwitch_Name   : x%x:%x:%x:%x:%x:%x:%x:%x\n"
   2276			"\tNext_FCF_Index: x%x\n",
   2277			bf_get(lpfc_fcf_record_fcf_index, fcf_record),
   2278			bf_get(lpfc_fcf_record_fcf_avail, fcf_record),
   2279			bf_get(lpfc_fcf_record_fcf_valid, fcf_record),
   2280			bf_get(lpfc_fcf_record_fcf_sol, fcf_record),
   2281			fcf_record->fip_priority,
   2282			bf_get(lpfc_fcf_record_mac_addr_prov, fcf_record),
   2283			vlan_id,
   2284			bf_get(lpfc_fcf_record_mac_0, fcf_record),
   2285			bf_get(lpfc_fcf_record_mac_1, fcf_record),
   2286			bf_get(lpfc_fcf_record_mac_2, fcf_record),
   2287			bf_get(lpfc_fcf_record_mac_3, fcf_record),
   2288			bf_get(lpfc_fcf_record_mac_4, fcf_record),
   2289			bf_get(lpfc_fcf_record_mac_5, fcf_record),
   2290			bf_get(lpfc_fcf_record_fab_name_0, fcf_record),
   2291			bf_get(lpfc_fcf_record_fab_name_1, fcf_record),
   2292			bf_get(lpfc_fcf_record_fab_name_2, fcf_record),
   2293			bf_get(lpfc_fcf_record_fab_name_3, fcf_record),
   2294			bf_get(lpfc_fcf_record_fab_name_4, fcf_record),
   2295			bf_get(lpfc_fcf_record_fab_name_5, fcf_record),
   2296			bf_get(lpfc_fcf_record_fab_name_6, fcf_record),
   2297			bf_get(lpfc_fcf_record_fab_name_7, fcf_record),
   2298			bf_get(lpfc_fcf_record_switch_name_0, fcf_record),
   2299			bf_get(lpfc_fcf_record_switch_name_1, fcf_record),
   2300			bf_get(lpfc_fcf_record_switch_name_2, fcf_record),
   2301			bf_get(lpfc_fcf_record_switch_name_3, fcf_record),
   2302			bf_get(lpfc_fcf_record_switch_name_4, fcf_record),
   2303			bf_get(lpfc_fcf_record_switch_name_5, fcf_record),
   2304			bf_get(lpfc_fcf_record_switch_name_6, fcf_record),
   2305			bf_get(lpfc_fcf_record_switch_name_7, fcf_record),
   2306			next_fcf_index);
   2307}
   2308
   2309/**
   2310 * lpfc_sli4_fcf_record_match - testing new FCF record for matching existing FCF
   2311 * @phba: pointer to lpfc hba data structure.
   2312 * @fcf_rec: pointer to an existing FCF record.
   2313 * @new_fcf_record: pointer to a new FCF record.
   2314 * @new_vlan_id: vlan id from the new FCF record.
   2315 *
   2316 * This function performs matching test of a new FCF record against an existing
   2317 * FCF record. If the new_vlan_id passed in is LPFC_FCOE_IGNORE_VID, vlan id
   2318 * will not be used as part of the FCF record matching criteria.
   2319 *
   2320 * Returns true if all the fields matching, otherwise returns false.
   2321 */
   2322static bool
   2323lpfc_sli4_fcf_record_match(struct lpfc_hba *phba,
   2324			   struct lpfc_fcf_rec *fcf_rec,
   2325			   struct fcf_record *new_fcf_record,
   2326			   uint16_t new_vlan_id)
   2327{
   2328	if (new_vlan_id != LPFC_FCOE_IGNORE_VID)
   2329		if (!lpfc_vlan_id_match(fcf_rec->vlan_id, new_vlan_id))
   2330			return false;
   2331	if (!lpfc_mac_addr_match(fcf_rec->mac_addr, new_fcf_record))
   2332		return false;
   2333	if (!lpfc_sw_name_match(fcf_rec->switch_name, new_fcf_record))
   2334		return false;
   2335	if (!lpfc_fab_name_match(fcf_rec->fabric_name, new_fcf_record))
   2336		return false;
   2337	if (fcf_rec->priority != new_fcf_record->fip_priority)
   2338		return false;
   2339	return true;
   2340}
   2341
   2342/**
   2343 * lpfc_sli4_fcf_rr_next_proc - processing next roundrobin fcf
   2344 * @vport: Pointer to vport object.
   2345 * @fcf_index: index to next fcf.
   2346 *
   2347 * This function processing the roundrobin fcf failover to next fcf index.
   2348 * When this function is invoked, there will be a current fcf registered
   2349 * for flogi.
   2350 * Return: 0 for continue retrying flogi on currently registered fcf;
   2351 *         1 for stop flogi on currently registered fcf;
   2352 */
   2353int lpfc_sli4_fcf_rr_next_proc(struct lpfc_vport *vport, uint16_t fcf_index)
   2354{
   2355	struct lpfc_hba *phba = vport->phba;
   2356	int rc;
   2357
   2358	if (fcf_index == LPFC_FCOE_FCF_NEXT_NONE) {
   2359		spin_lock_irq(&phba->hbalock);
   2360		if (phba->hba_flag & HBA_DEVLOSS_TMO) {
   2361			spin_unlock_irq(&phba->hbalock);
   2362			lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
   2363					"2872 Devloss tmo with no eligible "
   2364					"FCF, unregister in-use FCF (x%x) "
   2365					"and rescan FCF table\n",
   2366					phba->fcf.current_rec.fcf_indx);
   2367			lpfc_unregister_fcf_rescan(phba);
   2368			goto stop_flogi_current_fcf;
   2369		}
   2370		/* Mark the end to FLOGI roundrobin failover */
   2371		phba->hba_flag &= ~FCF_RR_INPROG;
   2372		/* Allow action to new fcf asynchronous event */
   2373		phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_SCAN_DONE);
   2374		spin_unlock_irq(&phba->hbalock);
   2375		lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
   2376				"2865 No FCF available, stop roundrobin FCF "
   2377				"failover and change port state:x%x/x%x\n",
   2378				phba->pport->port_state, LPFC_VPORT_UNKNOWN);
   2379		phba->pport->port_state = LPFC_VPORT_UNKNOWN;
   2380
   2381		if (!phba->fcf.fcf_redisc_attempted) {
   2382			lpfc_unregister_fcf(phba);
   2383
   2384			rc = lpfc_sli4_redisc_fcf_table(phba);
   2385			if (!rc) {
   2386				lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
   2387						"3195 Rediscover FCF table\n");
   2388				phba->fcf.fcf_redisc_attempted = 1;
   2389				lpfc_sli4_clear_fcf_rr_bmask(phba);
   2390			} else {
   2391				lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
   2392						"3196 Rediscover FCF table "
   2393						"failed. Status:x%x\n", rc);
   2394			}
   2395		} else {
   2396			lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
   2397					"3197 Already rediscover FCF table "
   2398					"attempted. No more retry\n");
   2399		}
   2400		goto stop_flogi_current_fcf;
   2401	} else {
   2402		lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_ELS,
   2403				"2794 Try FLOGI roundrobin FCF failover to "
   2404				"(x%x)\n", fcf_index);
   2405		rc = lpfc_sli4_fcf_rr_read_fcf_rec(phba, fcf_index);
   2406		if (rc)
   2407			lpfc_printf_log(phba, KERN_WARNING, LOG_FIP | LOG_ELS,
   2408					"2761 FLOGI roundrobin FCF failover "
   2409					"failed (rc:x%x) to read FCF (x%x)\n",
   2410					rc, phba->fcf.current_rec.fcf_indx);
   2411		else
   2412			goto stop_flogi_current_fcf;
   2413	}
   2414	return 0;
   2415
   2416stop_flogi_current_fcf:
   2417	lpfc_can_disctmo(vport);
   2418	return 1;
   2419}
   2420
   2421/**
   2422 * lpfc_sli4_fcf_pri_list_del
   2423 * @phba: pointer to lpfc hba data structure.
   2424 * @fcf_index: the index of the fcf record to delete
   2425 * This routine checks the on list flag of the fcf_index to be deleted.
   2426 * If it is one the list then it is removed from the list, and the flag
   2427 * is cleared. This routine grab the hbalock before removing the fcf
   2428 * record from the list.
   2429 **/
   2430static void lpfc_sli4_fcf_pri_list_del(struct lpfc_hba *phba,
   2431			uint16_t fcf_index)
   2432{
   2433	struct lpfc_fcf_pri *new_fcf_pri;
   2434
   2435	new_fcf_pri = &phba->fcf.fcf_pri[fcf_index];
   2436	lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
   2437		"3058 deleting idx x%x pri x%x flg x%x\n",
   2438		fcf_index, new_fcf_pri->fcf_rec.priority,
   2439		 new_fcf_pri->fcf_rec.flag);
   2440	spin_lock_irq(&phba->hbalock);
   2441	if (new_fcf_pri->fcf_rec.flag & LPFC_FCF_ON_PRI_LIST) {
   2442		if (phba->fcf.current_rec.priority ==
   2443				new_fcf_pri->fcf_rec.priority)
   2444			phba->fcf.eligible_fcf_cnt--;
   2445		list_del_init(&new_fcf_pri->list);
   2446		new_fcf_pri->fcf_rec.flag &= ~LPFC_FCF_ON_PRI_LIST;
   2447	}
   2448	spin_unlock_irq(&phba->hbalock);
   2449}
   2450
   2451/**
   2452 * lpfc_sli4_set_fcf_flogi_fail
   2453 * @phba: pointer to lpfc hba data structure.
   2454 * @fcf_index: the index of the fcf record to update
   2455 * This routine acquires the hbalock and then set the LPFC_FCF_FLOGI_FAILED
   2456 * flag so the the round robin slection for the particular priority level
   2457 * will try a different fcf record that does not have this bit set.
   2458 * If the fcf record is re-read for any reason this flag is cleared brfore
   2459 * adding it to the priority list.
   2460 **/
   2461void
   2462lpfc_sli4_set_fcf_flogi_fail(struct lpfc_hba *phba, uint16_t fcf_index)
   2463{
   2464	struct lpfc_fcf_pri *new_fcf_pri;
   2465	new_fcf_pri = &phba->fcf.fcf_pri[fcf_index];
   2466	spin_lock_irq(&phba->hbalock);
   2467	new_fcf_pri->fcf_rec.flag |= LPFC_FCF_FLOGI_FAILED;
   2468	spin_unlock_irq(&phba->hbalock);
   2469}
   2470
   2471/**
   2472 * lpfc_sli4_fcf_pri_list_add
   2473 * @phba: pointer to lpfc hba data structure.
   2474 * @fcf_index: the index of the fcf record to add
   2475 * @new_fcf_record: pointer to a new FCF record.
   2476 * This routine checks the priority of the fcf_index to be added.
   2477 * If it is a lower priority than the current head of the fcf_pri list
   2478 * then it is added to the list in the right order.
   2479 * If it is the same priority as the current head of the list then it
   2480 * is added to the head of the list and its bit in the rr_bmask is set.
   2481 * If the fcf_index to be added is of a higher priority than the current
   2482 * head of the list then the rr_bmask is cleared, its bit is set in the
   2483 * rr_bmask and it is added to the head of the list.
   2484 * returns:
   2485 * 0=success 1=failure
   2486 **/
   2487static int lpfc_sli4_fcf_pri_list_add(struct lpfc_hba *phba,
   2488	uint16_t fcf_index,
   2489	struct fcf_record *new_fcf_record)
   2490{
   2491	uint16_t current_fcf_pri;
   2492	uint16_t last_index;
   2493	struct lpfc_fcf_pri *fcf_pri;
   2494	struct lpfc_fcf_pri *next_fcf_pri;
   2495	struct lpfc_fcf_pri *new_fcf_pri;
   2496	int ret;
   2497
   2498	new_fcf_pri = &phba->fcf.fcf_pri[fcf_index];
   2499	lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
   2500		"3059 adding idx x%x pri x%x flg x%x\n",
   2501		fcf_index, new_fcf_record->fip_priority,
   2502		 new_fcf_pri->fcf_rec.flag);
   2503	spin_lock_irq(&phba->hbalock);
   2504	if (new_fcf_pri->fcf_rec.flag & LPFC_FCF_ON_PRI_LIST)
   2505		list_del_init(&new_fcf_pri->list);
   2506	new_fcf_pri->fcf_rec.fcf_index = fcf_index;
   2507	new_fcf_pri->fcf_rec.priority = new_fcf_record->fip_priority;
   2508	if (list_empty(&phba->fcf.fcf_pri_list)) {
   2509		list_add(&new_fcf_pri->list, &phba->fcf.fcf_pri_list);
   2510		ret = lpfc_sli4_fcf_rr_index_set(phba,
   2511				new_fcf_pri->fcf_rec.fcf_index);
   2512		goto out;
   2513	}
   2514
   2515	last_index = find_first_bit(phba->fcf.fcf_rr_bmask,
   2516				LPFC_SLI4_FCF_TBL_INDX_MAX);
   2517	if (last_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
   2518		ret = 0; /* Empty rr list */
   2519		goto out;
   2520	}
   2521	current_fcf_pri = phba->fcf.fcf_pri[last_index].fcf_rec.priority;
   2522	if (new_fcf_pri->fcf_rec.priority <=  current_fcf_pri) {
   2523		list_add(&new_fcf_pri->list, &phba->fcf.fcf_pri_list);
   2524		if (new_fcf_pri->fcf_rec.priority <  current_fcf_pri) {
   2525			memset(phba->fcf.fcf_rr_bmask, 0,
   2526				sizeof(*phba->fcf.fcf_rr_bmask));
   2527			/* fcfs_at_this_priority_level = 1; */
   2528			phba->fcf.eligible_fcf_cnt = 1;
   2529		} else
   2530			/* fcfs_at_this_priority_level++; */
   2531			phba->fcf.eligible_fcf_cnt++;
   2532		ret = lpfc_sli4_fcf_rr_index_set(phba,
   2533				new_fcf_pri->fcf_rec.fcf_index);
   2534		goto out;
   2535	}
   2536
   2537	list_for_each_entry_safe(fcf_pri, next_fcf_pri,
   2538				&phba->fcf.fcf_pri_list, list) {
   2539		if (new_fcf_pri->fcf_rec.priority <=
   2540				fcf_pri->fcf_rec.priority) {
   2541			if (fcf_pri->list.prev == &phba->fcf.fcf_pri_list)
   2542				list_add(&new_fcf_pri->list,
   2543						&phba->fcf.fcf_pri_list);
   2544			else
   2545				list_add(&new_fcf_pri->list,
   2546					 &((struct lpfc_fcf_pri *)
   2547					fcf_pri->list.prev)->list);
   2548			ret = 0;
   2549			goto out;
   2550		} else if (fcf_pri->list.next == &phba->fcf.fcf_pri_list
   2551			|| new_fcf_pri->fcf_rec.priority <
   2552				next_fcf_pri->fcf_rec.priority) {
   2553			list_add(&new_fcf_pri->list, &fcf_pri->list);
   2554			ret = 0;
   2555			goto out;
   2556		}
   2557		if (new_fcf_pri->fcf_rec.priority > fcf_pri->fcf_rec.priority)
   2558			continue;
   2559
   2560	}
   2561	ret = 1;
   2562out:
   2563	/* we use = instead of |= to clear the FLOGI_FAILED flag. */
   2564	new_fcf_pri->fcf_rec.flag = LPFC_FCF_ON_PRI_LIST;
   2565	spin_unlock_irq(&phba->hbalock);
   2566	return ret;
   2567}
   2568
   2569/**
   2570 * lpfc_mbx_cmpl_fcf_scan_read_fcf_rec - fcf scan read_fcf mbox cmpl handler.
   2571 * @phba: pointer to lpfc hba data structure.
   2572 * @mboxq: pointer to mailbox object.
   2573 *
   2574 * This function iterates through all the fcf records available in
   2575 * HBA and chooses the optimal FCF record for discovery. After finding
   2576 * the FCF for discovery it registers the FCF record and kicks start
   2577 * discovery.
   2578 * If FCF_IN_USE flag is set in currently used FCF, the routine tries to
   2579 * use an FCF record which matches fabric name and mac address of the
   2580 * currently used FCF record.
   2581 * If the driver supports only one FCF, it will try to use the FCF record
   2582 * used by BOOT_BIOS.
   2583 */
   2584void
   2585lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
   2586{
   2587	struct fcf_record *new_fcf_record;
   2588	uint32_t boot_flag, addr_mode;
   2589	uint16_t fcf_index, next_fcf_index;
   2590	struct lpfc_fcf_rec *fcf_rec = NULL;
   2591	uint16_t vlan_id = LPFC_FCOE_NULL_VID;
   2592	bool select_new_fcf;
   2593	int rc;
   2594
   2595	/* If there is pending FCoE event restart FCF table scan */
   2596	if (lpfc_check_pending_fcoe_event(phba, LPFC_SKIP_UNREG_FCF)) {
   2597		lpfc_sli4_mbox_cmd_free(phba, mboxq);
   2598		return;
   2599	}
   2600
   2601	/* Parse the FCF record from the non-embedded mailbox command */
   2602	new_fcf_record = lpfc_sli4_fcf_rec_mbox_parse(phba, mboxq,
   2603						      &next_fcf_index);
   2604	if (!new_fcf_record) {
   2605		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
   2606				"2765 Mailbox command READ_FCF_RECORD "
   2607				"failed to retrieve a FCF record.\n");
   2608		/* Let next new FCF event trigger fast failover */
   2609		spin_lock_irq(&phba->hbalock);
   2610		phba->hba_flag &= ~FCF_TS_INPROG;
   2611		spin_unlock_irq(&phba->hbalock);
   2612		lpfc_sli4_mbox_cmd_free(phba, mboxq);
   2613		return;
   2614	}
   2615
   2616	/* Check the FCF record against the connection list */
   2617	rc = lpfc_match_fcf_conn_list(phba, new_fcf_record, &boot_flag,
   2618				      &addr_mode, &vlan_id);
   2619
   2620	/* Log the FCF record information if turned on */
   2621	lpfc_sli4_log_fcf_record_info(phba, new_fcf_record, vlan_id,
   2622				      next_fcf_index);
   2623
   2624	/*
   2625	 * If the fcf record does not match with connect list entries
   2626	 * read the next entry; otherwise, this is an eligible FCF
   2627	 * record for roundrobin FCF failover.
   2628	 */
   2629	if (!rc) {
   2630		lpfc_sli4_fcf_pri_list_del(phba,
   2631					bf_get(lpfc_fcf_record_fcf_index,
   2632					       new_fcf_record));
   2633		lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
   2634				"2781 FCF (x%x) failed connection "
   2635				"list check: (x%x/x%x/%x)\n",
   2636				bf_get(lpfc_fcf_record_fcf_index,
   2637				       new_fcf_record),
   2638				bf_get(lpfc_fcf_record_fcf_avail,
   2639				       new_fcf_record),
   2640				bf_get(lpfc_fcf_record_fcf_valid,
   2641				       new_fcf_record),
   2642				bf_get(lpfc_fcf_record_fcf_sol,
   2643				       new_fcf_record));
   2644		if ((phba->fcf.fcf_flag & FCF_IN_USE) &&
   2645		    lpfc_sli4_fcf_record_match(phba, &phba->fcf.current_rec,
   2646		    new_fcf_record, LPFC_FCOE_IGNORE_VID)) {
   2647			if (bf_get(lpfc_fcf_record_fcf_index, new_fcf_record) !=
   2648			    phba->fcf.current_rec.fcf_indx) {
   2649				lpfc_printf_log(phba, KERN_ERR,
   2650						LOG_TRACE_EVENT,
   2651					"2862 FCF (x%x) matches property "
   2652					"of in-use FCF (x%x)\n",
   2653					bf_get(lpfc_fcf_record_fcf_index,
   2654					       new_fcf_record),
   2655					phba->fcf.current_rec.fcf_indx);
   2656				goto read_next_fcf;
   2657			}
   2658			/*
   2659			 * In case the current in-use FCF record becomes
   2660			 * invalid/unavailable during FCF discovery that
   2661			 * was not triggered by fast FCF failover process,
   2662			 * treat it as fast FCF failover.
   2663			 */
   2664			if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND) &&
   2665			    !(phba->fcf.fcf_flag & FCF_REDISC_FOV)) {
   2666				lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
   2667						"2835 Invalid in-use FCF "
   2668						"(x%x), enter FCF failover "
   2669						"table scan.\n",
   2670						phba->fcf.current_rec.fcf_indx);
   2671				spin_lock_irq(&phba->hbalock);
   2672				phba->fcf.fcf_flag |= FCF_REDISC_FOV;
   2673				spin_unlock_irq(&phba->hbalock);
   2674				lpfc_sli4_mbox_cmd_free(phba, mboxq);
   2675				lpfc_sli4_fcf_scan_read_fcf_rec(phba,
   2676						LPFC_FCOE_FCF_GET_FIRST);
   2677				return;
   2678			}
   2679		}
   2680		goto read_next_fcf;
   2681	} else {
   2682		fcf_index = bf_get(lpfc_fcf_record_fcf_index, new_fcf_record);
   2683		rc = lpfc_sli4_fcf_pri_list_add(phba, fcf_index,
   2684							new_fcf_record);
   2685		if (rc)
   2686			goto read_next_fcf;
   2687	}
   2688
   2689	/*
   2690	 * If this is not the first FCF discovery of the HBA, use last
   2691	 * FCF record for the discovery. The condition that a rescan
   2692	 * matches the in-use FCF record: fabric name, switch name, mac
   2693	 * address, and vlan_id.
   2694	 */
   2695	spin_lock_irq(&phba->hbalock);
   2696	if (phba->fcf.fcf_flag & FCF_IN_USE) {
   2697		if (phba->cfg_fcf_failover_policy == LPFC_FCF_FOV &&
   2698			lpfc_sli4_fcf_record_match(phba, &phba->fcf.current_rec,
   2699		    new_fcf_record, vlan_id)) {
   2700			if (bf_get(lpfc_fcf_record_fcf_index, new_fcf_record) ==
   2701			    phba->fcf.current_rec.fcf_indx) {
   2702				phba->fcf.fcf_flag |= FCF_AVAILABLE;
   2703				if (phba->fcf.fcf_flag & FCF_REDISC_PEND)
   2704					/* Stop FCF redisc wait timer */
   2705					__lpfc_sli4_stop_fcf_redisc_wait_timer(
   2706									phba);
   2707				else if (phba->fcf.fcf_flag & FCF_REDISC_FOV)
   2708					/* Fast failover, mark completed */
   2709					phba->fcf.fcf_flag &= ~FCF_REDISC_FOV;
   2710				spin_unlock_irq(&phba->hbalock);
   2711				lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
   2712						"2836 New FCF matches in-use "
   2713						"FCF (x%x), port_state:x%x, "
   2714						"fc_flag:x%x\n",
   2715						phba->fcf.current_rec.fcf_indx,
   2716						phba->pport->port_state,
   2717						phba->pport->fc_flag);
   2718				goto out;
   2719			} else
   2720				lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
   2721					"2863 New FCF (x%x) matches "
   2722					"property of in-use FCF (x%x)\n",
   2723					bf_get(lpfc_fcf_record_fcf_index,
   2724					       new_fcf_record),
   2725					phba->fcf.current_rec.fcf_indx);
   2726		}
   2727		/*
   2728		 * Read next FCF record from HBA searching for the matching
   2729		 * with in-use record only if not during the fast failover
   2730		 * period. In case of fast failover period, it shall try to
   2731		 * determine whether the FCF record just read should be the
   2732		 * next candidate.
   2733		 */
   2734		if (!(phba->fcf.fcf_flag & FCF_REDISC_FOV)) {
   2735			spin_unlock_irq(&phba->hbalock);
   2736			goto read_next_fcf;
   2737		}
   2738	}
   2739	/*
   2740	 * Update on failover FCF record only if it's in FCF fast-failover
   2741	 * period; otherwise, update on current FCF record.
   2742	 */
   2743	if (phba->fcf.fcf_flag & FCF_REDISC_FOV)
   2744		fcf_rec = &phba->fcf.failover_rec;
   2745	else
   2746		fcf_rec = &phba->fcf.current_rec;
   2747
   2748	if (phba->fcf.fcf_flag & FCF_AVAILABLE) {
   2749		/*
   2750		 * If the driver FCF record does not have boot flag
   2751		 * set and new hba fcf record has boot flag set, use
   2752		 * the new hba fcf record.
   2753		 */
   2754		if (boot_flag && !(fcf_rec->flag & BOOT_ENABLE)) {
   2755			/* Choose this FCF record */
   2756			lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
   2757					"2837 Update current FCF record "
   2758					"(x%x) with new FCF record (x%x)\n",
   2759					fcf_rec->fcf_indx,
   2760					bf_get(lpfc_fcf_record_fcf_index,
   2761					new_fcf_record));
   2762			__lpfc_update_fcf_record(phba, fcf_rec, new_fcf_record,
   2763					addr_mode, vlan_id, BOOT_ENABLE);
   2764			spin_unlock_irq(&phba->hbalock);
   2765			goto read_next_fcf;
   2766		}
   2767		/*
   2768		 * If the driver FCF record has boot flag set and the
   2769		 * new hba FCF record does not have boot flag, read
   2770		 * the next FCF record.
   2771		 */
   2772		if (!boot_flag && (fcf_rec->flag & BOOT_ENABLE)) {
   2773			spin_unlock_irq(&phba->hbalock);
   2774			goto read_next_fcf;
   2775		}
   2776		/*
   2777		 * If the new hba FCF record has lower priority value
   2778		 * than the driver FCF record, use the new record.
   2779		 */
   2780		if (new_fcf_record->fip_priority < fcf_rec->priority) {
   2781			/* Choose the new FCF record with lower priority */
   2782			lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
   2783					"2838 Update current FCF record "
   2784					"(x%x) with new FCF record (x%x)\n",
   2785					fcf_rec->fcf_indx,
   2786					bf_get(lpfc_fcf_record_fcf_index,
   2787					       new_fcf_record));
   2788			__lpfc_update_fcf_record(phba, fcf_rec, new_fcf_record,
   2789					addr_mode, vlan_id, 0);
   2790			/* Reset running random FCF selection count */
   2791			phba->fcf.eligible_fcf_cnt = 1;
   2792		} else if (new_fcf_record->fip_priority == fcf_rec->priority) {
   2793			/* Update running random FCF selection count */
   2794			phba->fcf.eligible_fcf_cnt++;
   2795			select_new_fcf = lpfc_sli4_new_fcf_random_select(phba,
   2796						phba->fcf.eligible_fcf_cnt);
   2797			if (select_new_fcf) {
   2798				lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
   2799					"2839 Update current FCF record "
   2800					"(x%x) with new FCF record (x%x)\n",
   2801					fcf_rec->fcf_indx,
   2802					bf_get(lpfc_fcf_record_fcf_index,
   2803					       new_fcf_record));
   2804				/* Choose the new FCF by random selection */
   2805				__lpfc_update_fcf_record(phba, fcf_rec,
   2806							 new_fcf_record,
   2807							 addr_mode, vlan_id, 0);
   2808			}
   2809		}
   2810		spin_unlock_irq(&phba->hbalock);
   2811		goto read_next_fcf;
   2812	}
   2813	/*
   2814	 * This is the first suitable FCF record, choose this record for
   2815	 * initial best-fit FCF.
   2816	 */
   2817	if (fcf_rec) {
   2818		lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
   2819				"2840 Update initial FCF candidate "
   2820				"with FCF (x%x)\n",
   2821				bf_get(lpfc_fcf_record_fcf_index,
   2822				       new_fcf_record));
   2823		__lpfc_update_fcf_record(phba, fcf_rec, new_fcf_record,
   2824					 addr_mode, vlan_id, (boot_flag ?
   2825					 BOOT_ENABLE : 0));
   2826		phba->fcf.fcf_flag |= FCF_AVAILABLE;
   2827		/* Setup initial running random FCF selection count */
   2828		phba->fcf.eligible_fcf_cnt = 1;
   2829	}
   2830	spin_unlock_irq(&phba->hbalock);
   2831	goto read_next_fcf;
   2832
   2833read_next_fcf:
   2834	lpfc_sli4_mbox_cmd_free(phba, mboxq);
   2835	if (next_fcf_index == LPFC_FCOE_FCF_NEXT_NONE || next_fcf_index == 0) {
   2836		if (phba->fcf.fcf_flag & FCF_REDISC_FOV) {
   2837			/*
   2838			 * Case of FCF fast failover scan
   2839			 */
   2840
   2841			/*
   2842			 * It has not found any suitable FCF record, cancel
   2843			 * FCF scan inprogress, and do nothing
   2844			 */
   2845			if (!(phba->fcf.failover_rec.flag & RECORD_VALID)) {
   2846				lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
   2847					       "2782 No suitable FCF found: "
   2848					       "(x%x/x%x)\n",
   2849					       phba->fcoe_eventtag_at_fcf_scan,
   2850					       bf_get(lpfc_fcf_record_fcf_index,
   2851						      new_fcf_record));
   2852				spin_lock_irq(&phba->hbalock);
   2853				if (phba->hba_flag & HBA_DEVLOSS_TMO) {
   2854					phba->hba_flag &= ~FCF_TS_INPROG;
   2855					spin_unlock_irq(&phba->hbalock);
   2856					/* Unregister in-use FCF and rescan */
   2857					lpfc_printf_log(phba, KERN_INFO,
   2858							LOG_FIP,
   2859							"2864 On devloss tmo "
   2860							"unreg in-use FCF and "
   2861							"rescan FCF table\n");
   2862					lpfc_unregister_fcf_rescan(phba);
   2863					return;
   2864				}
   2865				/*
   2866				 * Let next new FCF event trigger fast failover
   2867				 */
   2868				phba->hba_flag &= ~FCF_TS_INPROG;
   2869				spin_unlock_irq(&phba->hbalock);
   2870				return;
   2871			}
   2872			/*
   2873			 * It has found a suitable FCF record that is not
   2874			 * the same as in-use FCF record, unregister the
   2875			 * in-use FCF record, replace the in-use FCF record
   2876			 * with the new FCF record, mark FCF fast failover
   2877			 * completed, and then start register the new FCF
   2878			 * record.
   2879			 */
   2880
   2881			/* Unregister the current in-use FCF record */
   2882			lpfc_unregister_fcf(phba);
   2883
   2884			/* Replace in-use record with the new record */
   2885			lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
   2886					"2842 Replace in-use FCF (x%x) "
   2887					"with failover FCF (x%x)\n",
   2888					phba->fcf.current_rec.fcf_indx,
   2889					phba->fcf.failover_rec.fcf_indx);
   2890			memcpy(&phba->fcf.current_rec,
   2891			       &phba->fcf.failover_rec,
   2892			       sizeof(struct lpfc_fcf_rec));
   2893			/*
   2894			 * Mark the fast FCF failover rediscovery completed
   2895			 * and the start of the first round of the roundrobin
   2896			 * FCF failover.
   2897			 */
   2898			spin_lock_irq(&phba->hbalock);
   2899			phba->fcf.fcf_flag &= ~FCF_REDISC_FOV;
   2900			spin_unlock_irq(&phba->hbalock);
   2901			/* Register to the new FCF record */
   2902			lpfc_register_fcf(phba);
   2903		} else {
   2904			/*
   2905			 * In case of transaction period to fast FCF failover,
   2906			 * do nothing when search to the end of the FCF table.
   2907			 */
   2908			if ((phba->fcf.fcf_flag & FCF_REDISC_EVT) ||
   2909			    (phba->fcf.fcf_flag & FCF_REDISC_PEND))
   2910				return;
   2911
   2912			if (phba->cfg_fcf_failover_policy == LPFC_FCF_FOV &&
   2913				phba->fcf.fcf_flag & FCF_IN_USE) {
   2914				/*
   2915				 * In case the current in-use FCF record no
   2916				 * longer existed during FCF discovery that
   2917				 * was not triggered by fast FCF failover
   2918				 * process, treat it as fast FCF failover.
   2919				 */
   2920				lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
   2921						"2841 In-use FCF record (x%x) "
   2922						"not reported, entering fast "
   2923						"FCF failover mode scanning.\n",
   2924						phba->fcf.current_rec.fcf_indx);
   2925				spin_lock_irq(&phba->hbalock);
   2926				phba->fcf.fcf_flag |= FCF_REDISC_FOV;
   2927				spin_unlock_irq(&phba->hbalock);
   2928				lpfc_sli4_fcf_scan_read_fcf_rec(phba,
   2929						LPFC_FCOE_FCF_GET_FIRST);
   2930				return;
   2931			}
   2932			/* Register to the new FCF record */
   2933			lpfc_register_fcf(phba);
   2934		}
   2935	} else
   2936		lpfc_sli4_fcf_scan_read_fcf_rec(phba, next_fcf_index);
   2937	return;
   2938
   2939out:
   2940	lpfc_sli4_mbox_cmd_free(phba, mboxq);
   2941	lpfc_register_fcf(phba);
   2942
   2943	return;
   2944}
   2945
   2946/**
   2947 * lpfc_mbx_cmpl_fcf_rr_read_fcf_rec - fcf roundrobin read_fcf mbox cmpl hdler
   2948 * @phba: pointer to lpfc hba data structure.
   2949 * @mboxq: pointer to mailbox object.
   2950 *
   2951 * This is the callback function for FLOGI failure roundrobin FCF failover
   2952 * read FCF record mailbox command from the eligible FCF record bmask for
   2953 * performing the failover. If the FCF read back is not valid/available, it
   2954 * fails through to retrying FLOGI to the currently registered FCF again.
   2955 * Otherwise, if the FCF read back is valid and available, it will set the
   2956 * newly read FCF record to the failover FCF record, unregister currently
   2957 * registered FCF record, copy the failover FCF record to the current
   2958 * FCF record, and then register the current FCF record before proceeding
   2959 * to trying FLOGI on the new failover FCF.
   2960 */
   2961void
   2962lpfc_mbx_cmpl_fcf_rr_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
   2963{
   2964	struct fcf_record *new_fcf_record;
   2965	uint32_t boot_flag, addr_mode;
   2966	uint16_t next_fcf_index, fcf_index;
   2967	uint16_t current_fcf_index;
   2968	uint16_t vlan_id;
   2969	int rc;
   2970
   2971	/* If link state is not up, stop the roundrobin failover process */
   2972	if (phba->link_state < LPFC_LINK_UP) {
   2973		spin_lock_irq(&phba->hbalock);
   2974		phba->fcf.fcf_flag &= ~FCF_DISCOVERY;
   2975		phba->hba_flag &= ~FCF_RR_INPROG;
   2976		spin_unlock_irq(&phba->hbalock);
   2977		goto out;
   2978	}
   2979
   2980	/* Parse the FCF record from the non-embedded mailbox command */
   2981	new_fcf_record = lpfc_sli4_fcf_rec_mbox_parse(phba, mboxq,
   2982						      &next_fcf_index);
   2983	if (!new_fcf_record) {
   2984		lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
   2985				"2766 Mailbox command READ_FCF_RECORD "
   2986				"failed to retrieve a FCF record. "
   2987				"hba_flg x%x fcf_flg x%x\n", phba->hba_flag,
   2988				phba->fcf.fcf_flag);
   2989		lpfc_unregister_fcf_rescan(phba);
   2990		goto out;
   2991	}
   2992
   2993	/* Get the needed parameters from FCF record */
   2994	rc = lpfc_match_fcf_conn_list(phba, new_fcf_record, &boot_flag,
   2995				      &addr_mode, &vlan_id);
   2996
   2997	/* Log the FCF record information if turned on */
   2998	lpfc_sli4_log_fcf_record_info(phba, new_fcf_record, vlan_id,
   2999				      next_fcf_index);
   3000
   3001	fcf_index = bf_get(lpfc_fcf_record_fcf_index, new_fcf_record);
   3002	if (!rc) {
   3003		lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
   3004				"2848 Remove ineligible FCF (x%x) from "
   3005				"from roundrobin bmask\n", fcf_index);
   3006		/* Clear roundrobin bmask bit for ineligible FCF */
   3007		lpfc_sli4_fcf_rr_index_clear(phba, fcf_index);
   3008		/* Perform next round of roundrobin FCF failover */
   3009		fcf_index = lpfc_sli4_fcf_rr_next_index_get(phba);
   3010		rc = lpfc_sli4_fcf_rr_next_proc(phba->pport, fcf_index);
   3011		if (rc)
   3012			goto out;
   3013		goto error_out;
   3014	}
   3015
   3016	if (fcf_index == phba->fcf.current_rec.fcf_indx) {
   3017		lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
   3018				"2760 Perform FLOGI roundrobin FCF failover: "
   3019				"FCF (x%x) back to FCF (x%x)\n",
   3020				phba->fcf.current_rec.fcf_indx, fcf_index);
   3021		/* Wait 500 ms before retrying FLOGI to current FCF */
   3022		msleep(500);
   3023		lpfc_issue_init_vfi(phba->pport);
   3024		goto out;
   3025	}
   3026
   3027	/* Upload new FCF record to the failover FCF record */
   3028	lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
   3029			"2834 Update current FCF (x%x) with new FCF (x%x)\n",
   3030			phba->fcf.failover_rec.fcf_indx, fcf_index);
   3031	spin_lock_irq(&phba->hbalock);
   3032	__lpfc_update_fcf_record(phba, &phba->fcf.failover_rec,
   3033				 new_fcf_record, addr_mode, vlan_id,
   3034				 (boot_flag ? BOOT_ENABLE : 0));
   3035	spin_unlock_irq(&phba->hbalock);
   3036
   3037	current_fcf_index = phba->fcf.current_rec.fcf_indx;
   3038
   3039	/* Unregister the current in-use FCF record */
   3040	lpfc_unregister_fcf(phba);
   3041
   3042	/* Replace in-use record with the new record */
   3043	memcpy(&phba->fcf.current_rec, &phba->fcf.failover_rec,
   3044	       sizeof(struct lpfc_fcf_rec));
   3045
   3046	lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
   3047			"2783 Perform FLOGI roundrobin FCF failover: FCF "
   3048			"(x%x) to FCF (x%x)\n", current_fcf_index, fcf_index);
   3049
   3050error_out:
   3051	lpfc_register_fcf(phba);
   3052out:
   3053	lpfc_sli4_mbox_cmd_free(phba, mboxq);
   3054}
   3055
   3056/**
   3057 * lpfc_mbx_cmpl_read_fcf_rec - read fcf completion handler.
   3058 * @phba: pointer to lpfc hba data structure.
   3059 * @mboxq: pointer to mailbox object.
   3060 *
   3061 * This is the callback function of read FCF record mailbox command for
   3062 * updating the eligible FCF bmask for FLOGI failure roundrobin FCF
   3063 * failover when a new FCF event happened. If the FCF read back is
   3064 * valid/available and it passes the connection list check, it updates
   3065 * the bmask for the eligible FCF record for roundrobin failover.
   3066 */
   3067void
   3068lpfc_mbx_cmpl_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
   3069{
   3070	struct fcf_record *new_fcf_record;
   3071	uint32_t boot_flag, addr_mode;
   3072	uint16_t fcf_index, next_fcf_index;
   3073	uint16_t vlan_id;
   3074	int rc;
   3075
   3076	/* If link state is not up, no need to proceed */
   3077	if (phba->link_state < LPFC_LINK_UP)
   3078		goto out;
   3079
   3080	/* If FCF discovery period is over, no need to proceed */
   3081	if (!(phba->fcf.fcf_flag & FCF_DISCOVERY))
   3082		goto out;
   3083
   3084	/* Parse the FCF record from the non-embedded mailbox command */
   3085	new_fcf_record = lpfc_sli4_fcf_rec_mbox_parse(phba, mboxq,
   3086						      &next_fcf_index);
   3087	if (!new_fcf_record) {
   3088		lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
   3089				"2767 Mailbox command READ_FCF_RECORD "
   3090				"failed to retrieve a FCF record.\n");
   3091		goto out;
   3092	}
   3093
   3094	/* Check the connection list for eligibility */
   3095	rc = lpfc_match_fcf_conn_list(phba, new_fcf_record, &boot_flag,
   3096				      &addr_mode, &vlan_id);
   3097
   3098	/* Log the FCF record information if turned on */
   3099	lpfc_sli4_log_fcf_record_info(phba, new_fcf_record, vlan_id,
   3100				      next_fcf_index);
   3101
   3102	if (!rc)
   3103		goto out;
   3104
   3105	/* Update the eligible FCF record index bmask */
   3106	fcf_index = bf_get(lpfc_fcf_record_fcf_index, new_fcf_record);
   3107
   3108	rc = lpfc_sli4_fcf_pri_list_add(phba, fcf_index, new_fcf_record);
   3109
   3110out:
   3111	lpfc_sli4_mbox_cmd_free(phba, mboxq);
   3112}
   3113
   3114/**
   3115 * lpfc_init_vfi_cmpl - Completion handler for init_vfi mbox command.
   3116 * @phba: pointer to lpfc hba data structure.
   3117 * @mboxq: pointer to mailbox data structure.
   3118 *
   3119 * This function handles completion of init vfi mailbox command.
   3120 */
   3121static void
   3122lpfc_init_vfi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
   3123{
   3124	struct lpfc_vport *vport = mboxq->vport;
   3125
   3126	/*
   3127	 * VFI not supported on interface type 0, just do the flogi
   3128	 * Also continue if the VFI is in use - just use the same one.
   3129	 */
   3130	if (mboxq->u.mb.mbxStatus &&
   3131	    (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
   3132			LPFC_SLI_INTF_IF_TYPE_0) &&
   3133	    mboxq->u.mb.mbxStatus != MBX_VFI_IN_USE) {
   3134		lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
   3135				 "2891 Init VFI mailbox failed 0x%x\n",
   3136				 mboxq->u.mb.mbxStatus);
   3137		mempool_free(mboxq, phba->mbox_mem_pool);
   3138		lpfc_vport_set_state(vport, FC_VPORT_FAILED);
   3139		return;
   3140	}
   3141
   3142	lpfc_initial_flogi(vport);
   3143	mempool_free(mboxq, phba->mbox_mem_pool);
   3144	return;
   3145}
   3146
   3147/**
   3148 * lpfc_issue_init_vfi - Issue init_vfi mailbox command.
   3149 * @vport: pointer to lpfc_vport data structure.
   3150 *
   3151 * This function issue a init_vfi mailbox command to initialize the VFI and
   3152 * VPI for the physical port.
   3153 */
   3154void
   3155lpfc_issue_init_vfi(struct lpfc_vport *vport)
   3156{
   3157	LPFC_MBOXQ_t *mboxq;
   3158	int rc;
   3159	struct lpfc_hba *phba = vport->phba;
   3160
   3161	mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
   3162	if (!mboxq) {
   3163		lpfc_printf_vlog(vport, KERN_ERR,
   3164			LOG_TRACE_EVENT, "2892 Failed to allocate "
   3165			"init_vfi mailbox\n");
   3166		return;
   3167	}
   3168	lpfc_init_vfi(mboxq, vport);
   3169	mboxq->mbox_cmpl = lpfc_init_vfi_cmpl;
   3170	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
   3171	if (rc == MBX_NOT_FINISHED) {
   3172		lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
   3173				 "2893 Failed to issue init_vfi mailbox\n");
   3174		mempool_free(mboxq, vport->phba->mbox_mem_pool);
   3175	}
   3176}
   3177
   3178/**
   3179 * lpfc_init_vpi_cmpl - Completion handler for init_vpi mbox command.
   3180 * @phba: pointer to lpfc hba data structure.
   3181 * @mboxq: pointer to mailbox data structure.
   3182 *
   3183 * This function handles completion of init vpi mailbox command.
   3184 */
   3185void
   3186lpfc_init_vpi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
   3187{
   3188	struct lpfc_vport *vport = mboxq->vport;
   3189	struct lpfc_nodelist *ndlp;
   3190	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
   3191
   3192	if (mboxq->u.mb.mbxStatus) {
   3193		lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
   3194				 "2609 Init VPI mailbox failed 0x%x\n",
   3195				 mboxq->u.mb.mbxStatus);
   3196		mempool_free(mboxq, phba->mbox_mem_pool);
   3197		lpfc_vport_set_state(vport, FC_VPORT_FAILED);
   3198		return;
   3199	}
   3200	spin_lock_irq(shost->host_lock);
   3201	vport->fc_flag &= ~FC_VPORT_NEEDS_INIT_VPI;
   3202	spin_unlock_irq(shost->host_lock);
   3203
   3204	/* If this port is physical port or FDISC is done, do reg_vpi */
   3205	if ((phba->pport == vport) || (vport->port_state == LPFC_FDISC)) {
   3206			ndlp = lpfc_findnode_did(vport, Fabric_DID);
   3207			if (!ndlp)
   3208				lpfc_printf_vlog(vport, KERN_ERR,
   3209					LOG_TRACE_EVENT,
   3210					"2731 Cannot find fabric "
   3211					"controller node\n");
   3212			else
   3213				lpfc_register_new_vport(phba, vport, ndlp);
   3214			mempool_free(mboxq, phba->mbox_mem_pool);
   3215			return;
   3216	}
   3217
   3218	if (phba->link_flag & LS_NPIV_FAB_SUPPORTED)
   3219		lpfc_initial_fdisc(vport);
   3220	else {
   3221		lpfc_vport_set_state(vport, FC_VPORT_NO_FABRIC_SUPP);
   3222		lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
   3223				 "2606 No NPIV Fabric support\n");
   3224	}
   3225	mempool_free(mboxq, phba->mbox_mem_pool);
   3226	return;
   3227}
   3228
   3229/**
   3230 * lpfc_issue_init_vpi - Issue init_vpi mailbox command.
   3231 * @vport: pointer to lpfc_vport data structure.
   3232 *
   3233 * This function issue a init_vpi mailbox command to initialize
   3234 * VPI for the vport.
   3235 */
   3236void
   3237lpfc_issue_init_vpi(struct lpfc_vport *vport)
   3238{
   3239	LPFC_MBOXQ_t *mboxq;
   3240	int rc, vpi;
   3241
   3242	if ((vport->port_type != LPFC_PHYSICAL_PORT) && (!vport->vpi)) {
   3243		vpi = lpfc_alloc_vpi(vport->phba);
   3244		if (!vpi) {
   3245			lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
   3246					 "3303 Failed to obtain vport vpi\n");
   3247			lpfc_vport_set_state(vport, FC_VPORT_FAILED);
   3248			return;
   3249		}
   3250		vport->vpi = vpi;
   3251	}
   3252
   3253	mboxq = mempool_alloc(vport->phba->mbox_mem_pool, GFP_KERNEL);
   3254	if (!mboxq) {
   3255		lpfc_printf_vlog(vport, KERN_ERR,
   3256			LOG_TRACE_EVENT, "2607 Failed to allocate "
   3257			"init_vpi mailbox\n");
   3258		return;
   3259	}
   3260	lpfc_init_vpi(vport->phba, mboxq, vport->vpi);
   3261	mboxq->vport = vport;
   3262	mboxq->mbox_cmpl = lpfc_init_vpi_cmpl;
   3263	rc = lpfc_sli_issue_mbox(vport->phba, mboxq, MBX_NOWAIT);
   3264	if (rc == MBX_NOT_FINISHED) {
   3265		lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
   3266				 "2608 Failed to issue init_vpi mailbox\n");
   3267		mempool_free(mboxq, vport->phba->mbox_mem_pool);
   3268	}
   3269}
   3270
   3271/**
   3272 * lpfc_start_fdiscs - send fdiscs for each vports on this port.
   3273 * @phba: pointer to lpfc hba data structure.
   3274 *
   3275 * This function loops through the list of vports on the @phba and issues an
   3276 * FDISC if possible.
   3277 */
   3278void
   3279lpfc_start_fdiscs(struct lpfc_hba *phba)
   3280{
   3281	struct lpfc_vport **vports;
   3282	int i;
   3283
   3284	vports = lpfc_create_vport_work_array(phba);
   3285	if (vports != NULL) {
   3286		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
   3287			if (vports[i]->port_type == LPFC_PHYSICAL_PORT)
   3288				continue;
   3289			/* There are no vpi for this vport */
   3290			if (vports[i]->vpi > phba->max_vpi) {
   3291				lpfc_vport_set_state(vports[i],
   3292						     FC_VPORT_FAILED);
   3293				continue;
   3294			}
   3295			if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
   3296				lpfc_vport_set_state(vports[i],
   3297						     FC_VPORT_LINKDOWN);
   3298				continue;
   3299			}
   3300			if (vports[i]->fc_flag & FC_VPORT_NEEDS_INIT_VPI) {
   3301				lpfc_issue_init_vpi(vports[i]);
   3302				continue;
   3303			}
   3304			if (phba->link_flag & LS_NPIV_FAB_SUPPORTED)
   3305				lpfc_initial_fdisc(vports[i]);
   3306			else {
   3307				lpfc_vport_set_state(vports[i],
   3308						     FC_VPORT_NO_FABRIC_SUPP);
   3309				lpfc_printf_vlog(vports[i], KERN_ERR,
   3310						 LOG_TRACE_EVENT,
   3311						 "0259 No NPIV "
   3312						 "Fabric support\n");
   3313			}
   3314		}
   3315	}
   3316	lpfc_destroy_vport_work_array(phba, vports);
   3317}
   3318
   3319void
   3320lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
   3321{
   3322	struct lpfc_vport *vport = mboxq->vport;
   3323	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
   3324
   3325	/*
   3326	 * VFI not supported for interface type 0, so ignore any mailbox
   3327	 * error (except VFI in use) and continue with the discovery.
   3328	 */
   3329	if (mboxq->u.mb.mbxStatus &&
   3330	    (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
   3331			LPFC_SLI_INTF_IF_TYPE_0) &&
   3332	    mboxq->u.mb.mbxStatus != MBX_VFI_IN_USE) {
   3333		lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
   3334				 "2018 REG_VFI mbxStatus error x%x "
   3335				 "HBA state x%x\n",
   3336				 mboxq->u.mb.mbxStatus, vport->port_state);
   3337		if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
   3338			/* FLOGI failed, use loop map to make discovery list */
   3339			lpfc_disc_list_loopmap(vport);
   3340			/* Start discovery */
   3341			lpfc_disc_start(vport);
   3342			goto out_free_mem;
   3343		}
   3344		lpfc_vport_set_state(vport, FC_VPORT_FAILED);
   3345		goto out_free_mem;
   3346	}
   3347
   3348	/* If the VFI is already registered, there is nothing else to do
   3349	 * Unless this was a VFI update and we are in PT2PT mode, then
   3350	 * we should drop through to set the port state to ready.
   3351	 */
   3352	if (vport->fc_flag & FC_VFI_REGISTERED)
   3353		if (!(phba->sli_rev == LPFC_SLI_REV4 &&
   3354		      vport->fc_flag & FC_PT2PT))
   3355			goto out_free_mem;
   3356
   3357	/* The VPI is implicitly registered when the VFI is registered */
   3358	spin_lock_irq(shost->host_lock);
   3359	vport->vpi_state |= LPFC_VPI_REGISTERED;
   3360	vport->fc_flag |= FC_VFI_REGISTERED;
   3361	vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
   3362	vport->fc_flag &= ~FC_VPORT_NEEDS_INIT_VPI;
   3363	spin_unlock_irq(shost->host_lock);
   3364
   3365	/* In case SLI4 FC loopback test, we are ready */
   3366	if ((phba->sli_rev == LPFC_SLI_REV4) &&
   3367	    (phba->link_flag & LS_LOOPBACK_MODE)) {
   3368		phba->link_state = LPFC_HBA_READY;
   3369		goto out_free_mem;
   3370	}
   3371
   3372	lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
   3373			 "3313 cmpl reg vfi  port_state:%x fc_flag:%x myDid:%x "
   3374			 "alpacnt:%d LinkState:%x topology:%x\n",
   3375			 vport->port_state, vport->fc_flag, vport->fc_myDID,
   3376			 vport->phba->alpa_map[0],
   3377			 phba->link_state, phba->fc_topology);
   3378
   3379	if (vport->port_state == LPFC_FABRIC_CFG_LINK) {
   3380		/*
   3381		 * For private loop or for NPort pt2pt,
   3382		 * just start discovery and we are done.
   3383		 */
   3384		if ((vport->fc_flag & FC_PT2PT) ||
   3385		    ((phba->fc_topology == LPFC_TOPOLOGY_LOOP) &&
   3386		    !(vport->fc_flag & FC_PUBLIC_LOOP))) {
   3387
   3388			/* Use loop map to make discovery list */
   3389			lpfc_disc_list_loopmap(vport);
   3390			/* Start discovery */
   3391			if (vport->fc_flag & FC_PT2PT)
   3392				vport->port_state = LPFC_VPORT_READY;
   3393			else
   3394				lpfc_disc_start(vport);
   3395		} else {
   3396			lpfc_start_fdiscs(phba);
   3397			lpfc_do_scr_ns_plogi(phba, vport);
   3398		}
   3399	}
   3400
   3401out_free_mem:
   3402	lpfc_mbox_rsrc_cleanup(phba, mboxq, MBOX_THD_UNLOCKED);
   3403}
   3404
   3405static void
   3406lpfc_mbx_cmpl_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
   3407{
   3408	MAILBOX_t *mb = &pmb->u.mb;
   3409	struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *)pmb->ctx_buf;
   3410	struct lpfc_vport  *vport = pmb->vport;
   3411	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
   3412	struct serv_parm *sp = &vport->fc_sparam;
   3413	uint32_t ed_tov;
   3414
   3415	/* Check for error */
   3416	if (mb->mbxStatus) {
   3417		/* READ_SPARAM mbox error <mbxStatus> state <hba_state> */
   3418		lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
   3419				 "0319 READ_SPARAM mbxStatus error x%x "
   3420				 "hba state x%x>\n",
   3421				 mb->mbxStatus, vport->port_state);
   3422		lpfc_linkdown(phba);
   3423		goto out;
   3424	}
   3425
   3426	memcpy((uint8_t *) &vport->fc_sparam, (uint8_t *) mp->virt,
   3427	       sizeof (struct serv_parm));
   3428
   3429	ed_tov = be32_to_cpu(sp->cmn.e_d_tov);
   3430	if (sp->cmn.edtovResolution)	/* E_D_TOV ticks are in nanoseconds */
   3431		ed_tov = (ed_tov + 999999) / 1000000;
   3432
   3433	phba->fc_edtov = ed_tov;
   3434	phba->fc_ratov = (2 * ed_tov) / 1000;
   3435	if (phba->fc_ratov < FF_DEF_RATOV) {
   3436		/* RA_TOV should be atleast 10sec for initial flogi */
   3437		phba->fc_ratov = FF_DEF_RATOV;
   3438	}
   3439
   3440	lpfc_update_vport_wwn(vport);
   3441	fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
   3442	if (vport->port_type == LPFC_PHYSICAL_PORT) {
   3443		memcpy(&phba->wwnn, &vport->fc_nodename, sizeof(phba->wwnn));
   3444		memcpy(&phba->wwpn, &vport->fc_portname, sizeof(phba->wwnn));
   3445	}
   3446
   3447	lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED);
   3448
   3449	/* Check if sending the FLOGI is being deferred to after we get
   3450	 * up to date CSPs from MBX_READ_SPARAM.
   3451	 */
   3452	if (phba->hba_flag & HBA_DEFER_FLOGI) {
   3453		lpfc_initial_flogi(vport);
   3454		phba->hba_flag &= ~HBA_DEFER_FLOGI;
   3455	}
   3456	return;
   3457
   3458out:
   3459	lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED);
   3460	lpfc_issue_clear_la(phba, vport);
   3461}
   3462
   3463static void
   3464lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la)
   3465{
   3466	struct lpfc_vport *vport = phba->pport;
   3467	LPFC_MBOXQ_t *sparam_mbox, *cfglink_mbox = NULL;
   3468	struct Scsi_Host *shost;
   3469	int i;
   3470	int rc;
   3471	struct fcf_record *fcf_record;
   3472	uint32_t fc_flags = 0;
   3473	unsigned long iflags;
   3474
   3475	spin_lock_irqsave(&phba->hbalock, iflags);
   3476	phba->fc_linkspeed = bf_get(lpfc_mbx_read_top_link_spd, la);
   3477
   3478	if (!(phba->hba_flag & HBA_FCOE_MODE)) {
   3479		switch (bf_get(lpfc_mbx_read_top_link_spd, la)) {
   3480		case LPFC_LINK_SPEED_1GHZ:
   3481		case LPFC_LINK_SPEED_2GHZ:
   3482		case LPFC_LINK_SPEED_4GHZ:
   3483		case LPFC_LINK_SPEED_8GHZ:
   3484		case LPFC_LINK_SPEED_10GHZ:
   3485		case LPFC_LINK_SPEED_16GHZ:
   3486		case LPFC_LINK_SPEED_32GHZ:
   3487		case LPFC_LINK_SPEED_64GHZ:
   3488		case LPFC_LINK_SPEED_128GHZ:
   3489		case LPFC_LINK_SPEED_256GHZ:
   3490			break;
   3491		default:
   3492			phba->fc_linkspeed = LPFC_LINK_SPEED_UNKNOWN;
   3493			break;
   3494		}
   3495	}
   3496
   3497	if (phba->fc_topology &&
   3498	    phba->fc_topology != bf_get(lpfc_mbx_read_top_topology, la)) {
   3499		lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
   3500				"3314 Toplogy changed was 0x%x is 0x%x\n",
   3501				phba->fc_topology,
   3502				bf_get(lpfc_mbx_read_top_topology, la));
   3503		phba->fc_topology_changed = 1;
   3504	}
   3505
   3506	phba->fc_topology = bf_get(lpfc_mbx_read_top_topology, la);
   3507	phba->link_flag &= ~(LS_NPIV_FAB_SUPPORTED | LS_CT_VEN_RPA);
   3508
   3509	shost = lpfc_shost_from_vport(vport);
   3510	if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
   3511		phba->sli3_options &= ~LPFC_SLI3_NPIV_ENABLED;
   3512
   3513		/* if npiv is enabled and this adapter supports npiv log
   3514		 * a message that npiv is not supported in this topology
   3515		 */
   3516		if (phba->cfg_enable_npiv && phba->max_vpi)
   3517			lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
   3518				"1309 Link Up Event npiv not supported in loop "
   3519				"topology\n");
   3520				/* Get Loop Map information */
   3521		if (bf_get(lpfc_mbx_read_top_il, la))
   3522			fc_flags |= FC_LBIT;
   3523
   3524		vport->fc_myDID = bf_get(lpfc_mbx_read_top_alpa_granted, la);
   3525		i = la->lilpBde64.tus.f.bdeSize;
   3526
   3527		if (i == 0) {
   3528			phba->alpa_map[0] = 0;
   3529		} else {
   3530			if (vport->cfg_log_verbose & LOG_LINK_EVENT) {
   3531				int numalpa, j, k;
   3532				union {
   3533					uint8_t pamap[16];
   3534					struct {
   3535						uint32_t wd1;
   3536						uint32_t wd2;
   3537						uint32_t wd3;
   3538						uint32_t wd4;
   3539					} pa;
   3540				} un;
   3541				numalpa = phba->alpa_map[0];
   3542				j = 0;
   3543				while (j < numalpa) {
   3544					memset(un.pamap, 0, 16);
   3545					for (k = 1; j < numalpa; k++) {
   3546						un.pamap[k - 1] =
   3547							phba->alpa_map[j + 1];
   3548						j++;
   3549						if (k == 16)
   3550							break;
   3551					}
   3552					/* Link Up Event ALPA map */
   3553					lpfc_printf_log(phba,
   3554							KERN_WARNING,
   3555							LOG_LINK_EVENT,
   3556							"1304 Link Up Event "
   3557							"ALPA map Data: x%x "
   3558							"x%x x%x x%x\n",
   3559							un.pa.wd1, un.pa.wd2,
   3560							un.pa.wd3, un.pa.wd4);
   3561				}
   3562			}
   3563		}
   3564	} else {
   3565		if (!(phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)) {
   3566			if (phba->max_vpi && phba->cfg_enable_npiv &&
   3567			   (phba->sli_rev >= LPFC_SLI_REV3))
   3568				phba->sli3_options |= LPFC_SLI3_NPIV_ENABLED;
   3569		}
   3570		vport->fc_myDID = phba->fc_pref_DID;
   3571		fc_flags |= FC_LBIT;
   3572	}
   3573	spin_unlock_irqrestore(&phba->hbalock, iflags);
   3574
   3575	if (fc_flags) {
   3576		spin_lock_irqsave(shost->host_lock, iflags);
   3577		vport->fc_flag |= fc_flags;
   3578		spin_unlock_irqrestore(shost->host_lock, iflags);
   3579	}
   3580
   3581	lpfc_linkup(phba);
   3582	sparam_mbox = NULL;
   3583
   3584	sparam_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
   3585	if (!sparam_mbox)
   3586		goto out;
   3587
   3588	rc = lpfc_read_sparam(phba, sparam_mbox, 0);
   3589	if (rc) {
   3590		mempool_free(sparam_mbox, phba->mbox_mem_pool);
   3591		goto out;
   3592	}
   3593	sparam_mbox->vport = vport;
   3594	sparam_mbox->mbox_cmpl = lpfc_mbx_cmpl_read_sparam;
   3595	rc = lpfc_sli_issue_mbox(phba, sparam_mbox, MBX_NOWAIT);
   3596	if (rc == MBX_NOT_FINISHED) {
   3597		lpfc_mbox_rsrc_cleanup(phba, sparam_mbox, MBOX_THD_UNLOCKED);
   3598		goto out;
   3599	}
   3600
   3601	if (!(phba->hba_flag & HBA_FCOE_MODE)) {
   3602		cfglink_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
   3603		if (!cfglink_mbox)
   3604			goto out;
   3605		vport->port_state = LPFC_LOCAL_CFG_LINK;
   3606		lpfc_config_link(phba, cfglink_mbox);
   3607		cfglink_mbox->vport = vport;
   3608		cfglink_mbox->mbox_cmpl = lpfc_mbx_cmpl_local_config_link;
   3609		rc = lpfc_sli_issue_mbox(phba, cfglink_mbox, MBX_NOWAIT);
   3610		if (rc == MBX_NOT_FINISHED) {
   3611			mempool_free(cfglink_mbox, phba->mbox_mem_pool);
   3612			goto out;
   3613		}
   3614	} else {
   3615		vport->port_state = LPFC_VPORT_UNKNOWN;
   3616		/*
   3617		 * Add the driver's default FCF record at FCF index 0 now. This
   3618		 * is phase 1 implementation that support FCF index 0 and driver
   3619		 * defaults.
   3620		 */
   3621		if (!(phba->hba_flag & HBA_FIP_SUPPORT)) {
   3622			fcf_record = kzalloc(sizeof(struct fcf_record),
   3623					GFP_KERNEL);
   3624			if (unlikely(!fcf_record)) {
   3625				lpfc_printf_log(phba, KERN_ERR,
   3626					LOG_TRACE_EVENT,
   3627					"2554 Could not allocate memory for "
   3628					"fcf record\n");
   3629				rc = -ENODEV;
   3630				goto out;
   3631			}
   3632
   3633			lpfc_sli4_build_dflt_fcf_record(phba, fcf_record,
   3634						LPFC_FCOE_FCF_DEF_INDEX);
   3635			rc = lpfc_sli4_add_fcf_record(phba, fcf_record);
   3636			if (unlikely(rc)) {
   3637				lpfc_printf_log(phba, KERN_ERR,
   3638					LOG_TRACE_EVENT,
   3639					"2013 Could not manually add FCF "
   3640					"record 0, status %d\n", rc);
   3641				rc = -ENODEV;
   3642				kfree(fcf_record);
   3643				goto out;
   3644			}
   3645			kfree(fcf_record);
   3646		}
   3647		/*
   3648		 * The driver is expected to do FIP/FCF. Call the port
   3649		 * and get the FCF Table.
   3650		 */
   3651		spin_lock_irqsave(&phba->hbalock, iflags);
   3652		if (phba->hba_flag & FCF_TS_INPROG) {
   3653			spin_unlock_irqrestore(&phba->hbalock, iflags);
   3654			return;
   3655		}
   3656		/* This is the initial FCF discovery scan */
   3657		phba->fcf.fcf_flag |= FCF_INIT_DISC;
   3658		spin_unlock_irqrestore(&phba->hbalock, iflags);
   3659		lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
   3660				"2778 Start FCF table scan at linkup\n");
   3661		rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba,
   3662						     LPFC_FCOE_FCF_GET_FIRST);
   3663		if (rc) {
   3664			spin_lock_irqsave(&phba->hbalock, iflags);
   3665			phba->fcf.fcf_flag &= ~FCF_INIT_DISC;
   3666			spin_unlock_irqrestore(&phba->hbalock, iflags);
   3667			goto out;
   3668		}
   3669		/* Reset FCF roundrobin bmask for new discovery */
   3670		lpfc_sli4_clear_fcf_rr_bmask(phba);
   3671	}
   3672
   3673	/* Prepare for LINK up registrations */
   3674	memset(phba->os_host_name, 0, sizeof(phba->os_host_name));
   3675	scnprintf(phba->os_host_name, sizeof(phba->os_host_name), "%s",
   3676		  init_utsname()->nodename);
   3677	return;
   3678out:
   3679	lpfc_vport_set_state(vport, FC_VPORT_FAILED);
   3680	lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
   3681			 "0263 Discovery Mailbox error: state: 0x%x : x%px x%px\n",
   3682			 vport->port_state, sparam_mbox, cfglink_mbox);
   3683	lpfc_issue_clear_la(phba, vport);
   3684	return;
   3685}
   3686
   3687static void
   3688lpfc_enable_la(struct lpfc_hba *phba)
   3689{
   3690	uint32_t control;
   3691	struct lpfc_sli *psli = &phba->sli;
   3692	spin_lock_irq(&phba->hbalock);
   3693	psli->sli_flag |= LPFC_PROCESS_LA;
   3694	if (phba->sli_rev <= LPFC_SLI_REV3) {
   3695		control = readl(phba->HCregaddr);
   3696		control |= HC_LAINT_ENA;
   3697		writel(control, phba->HCregaddr);
   3698		readl(phba->HCregaddr); /* flush */
   3699	}
   3700	spin_unlock_irq(&phba->hbalock);
   3701}
   3702
   3703static void
   3704lpfc_mbx_issue_link_down(struct lpfc_hba *phba)
   3705{
   3706	lpfc_linkdown(phba);
   3707	lpfc_enable_la(phba);
   3708	lpfc_unregister_unused_fcf(phba);
   3709	/* turn on Link Attention interrupts - no CLEAR_LA needed */
   3710}
   3711
   3712
   3713/*
   3714 * This routine handles processing a READ_TOPOLOGY mailbox
   3715 * command upon completion. It is setup in the LPFC_MBOXQ
   3716 * as the completion routine when the command is
   3717 * handed off to the SLI layer. SLI4 only.
   3718 */
   3719void
   3720lpfc_mbx_cmpl_read_topology(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
   3721{
   3722	struct lpfc_vport *vport = pmb->vport;
   3723	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
   3724	struct lpfc_mbx_read_top *la;
   3725	struct lpfc_sli_ring *pring;
   3726	MAILBOX_t *mb = &pmb->u.mb;
   3727	struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *)(pmb->ctx_buf);
   3728	uint8_t attn_type;
   3729	unsigned long iflags;
   3730
   3731	/* Unblock ELS traffic */
   3732	pring = lpfc_phba_elsring(phba);
   3733	if (pring)
   3734		pring->flag &= ~LPFC_STOP_IOCB_EVENT;
   3735
   3736	/* Check for error */
   3737	if (mb->mbxStatus) {
   3738		lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
   3739				"1307 READ_LA mbox error x%x state x%x\n",
   3740				mb->mbxStatus, vport->port_state);
   3741		lpfc_mbx_issue_link_down(phba);
   3742		phba->link_state = LPFC_HBA_ERROR;
   3743		goto lpfc_mbx_cmpl_read_topology_free_mbuf;
   3744	}
   3745
   3746	la = (struct lpfc_mbx_read_top *) &pmb->u.mb.un.varReadTop;
   3747	attn_type = bf_get(lpfc_mbx_read_top_att_type, la);
   3748
   3749	memcpy(&phba->alpa_map[0], mp->virt, 128);
   3750
   3751	spin_lock_irqsave(shost->host_lock, iflags);
   3752	if (bf_get(lpfc_mbx_read_top_pb, la))
   3753		vport->fc_flag |= FC_BYPASSED_MODE;
   3754	else
   3755		vport->fc_flag &= ~FC_BYPASSED_MODE;
   3756	spin_unlock_irqrestore(shost->host_lock, iflags);
   3757
   3758	if (phba->fc_eventTag <= la->eventTag) {
   3759		phba->fc_stat.LinkMultiEvent++;
   3760		if (attn_type == LPFC_ATT_LINK_UP)
   3761			if (phba->fc_eventTag != 0)
   3762				lpfc_linkdown(phba);
   3763	}
   3764
   3765	phba->fc_eventTag = la->eventTag;
   3766	if (phba->sli_rev < LPFC_SLI_REV4) {
   3767		spin_lock_irqsave(&phba->hbalock, iflags);
   3768		if (bf_get(lpfc_mbx_read_top_mm, la))
   3769			phba->sli.sli_flag |= LPFC_MENLO_MAINT;
   3770		else
   3771			phba->sli.sli_flag &= ~LPFC_MENLO_MAINT;
   3772		spin_unlock_irqrestore(&phba->hbalock, iflags);
   3773	}
   3774
   3775	phba->link_events++;
   3776	if ((attn_type == LPFC_ATT_LINK_UP) &&
   3777	    !(phba->sli.sli_flag & LPFC_MENLO_MAINT)) {
   3778		phba->fc_stat.LinkUp++;
   3779		if (phba->link_flag & LS_LOOPBACK_MODE) {
   3780			lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
   3781					"1306 Link Up Event in loop back mode "
   3782					"x%x received Data: x%x x%x x%x x%x\n",
   3783					la->eventTag, phba->fc_eventTag,
   3784					bf_get(lpfc_mbx_read_top_alpa_granted,
   3785					       la),
   3786					bf_get(lpfc_mbx_read_top_link_spd, la),
   3787					phba->alpa_map[0]);
   3788		} else {
   3789			lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
   3790					"1303 Link Up Event x%x received "
   3791					"Data: x%x x%x x%x x%x x%x x%x %d\n",
   3792					la->eventTag, phba->fc_eventTag,
   3793					bf_get(lpfc_mbx_read_top_alpa_granted,
   3794					       la),
   3795					bf_get(lpfc_mbx_read_top_link_spd, la),
   3796					phba->alpa_map[0],
   3797					bf_get(lpfc_mbx_read_top_mm, la),
   3798					bf_get(lpfc_mbx_read_top_fa, la),
   3799					phba->wait_4_mlo_maint_flg);
   3800		}
   3801		lpfc_mbx_process_link_up(phba, la);
   3802
   3803		if (phba->cmf_active_mode != LPFC_CFG_OFF)
   3804			lpfc_cmf_signal_init(phba);
   3805
   3806	} else if (attn_type == LPFC_ATT_LINK_DOWN ||
   3807		   attn_type == LPFC_ATT_UNEXP_WWPN) {
   3808		phba->fc_stat.LinkDown++;
   3809		if (phba->link_flag & LS_LOOPBACK_MODE)
   3810			lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
   3811				"1308 Link Down Event in loop back mode "
   3812				"x%x received "
   3813				"Data: x%x x%x x%x\n",
   3814				la->eventTag, phba->fc_eventTag,
   3815				phba->pport->port_state, vport->fc_flag);
   3816		else if (attn_type == LPFC_ATT_UNEXP_WWPN)
   3817			lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
   3818				"1313 Link Down Unexpected FA WWPN Event x%x "
   3819				"received Data: x%x x%x x%x x%x x%x\n",
   3820				la->eventTag, phba->fc_eventTag,
   3821				phba->pport->port_state, vport->fc_flag,
   3822				bf_get(lpfc_mbx_read_top_mm, la),
   3823				bf_get(lpfc_mbx_read_top_fa, la));
   3824		else
   3825			lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
   3826				"1305 Link Down Event x%x received "
   3827				"Data: x%x x%x x%x x%x x%x\n",
   3828				la->eventTag, phba->fc_eventTag,
   3829				phba->pport->port_state, vport->fc_flag,
   3830				bf_get(lpfc_mbx_read_top_mm, la),
   3831				bf_get(lpfc_mbx_read_top_fa, la));
   3832		lpfc_mbx_issue_link_down(phba);
   3833	}
   3834	if (phba->sli.sli_flag & LPFC_MENLO_MAINT &&
   3835	    attn_type == LPFC_ATT_LINK_UP) {
   3836		if (phba->link_state != LPFC_LINK_DOWN) {
   3837			phba->fc_stat.LinkDown++;
   3838			lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
   3839				"1312 Link Down Event x%x received "
   3840				"Data: x%x x%x x%x\n",
   3841				la->eventTag, phba->fc_eventTag,
   3842				phba->pport->port_state, vport->fc_flag);
   3843			lpfc_mbx_issue_link_down(phba);
   3844		} else
   3845			lpfc_enable_la(phba);
   3846
   3847		lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
   3848				"1310 Menlo Maint Mode Link up Event x%x rcvd "
   3849				"Data: x%x x%x x%x\n",
   3850				la->eventTag, phba->fc_eventTag,
   3851				phba->pport->port_state, vport->fc_flag);
   3852		/*
   3853		 * The cmnd that triggered this will be waiting for this
   3854		 * signal.
   3855		 */
   3856		/* WAKEUP for MENLO_SET_MODE or MENLO_RESET command. */
   3857		if (phba->wait_4_mlo_maint_flg) {
   3858			phba->wait_4_mlo_maint_flg = 0;
   3859			wake_up_interruptible(&phba->wait_4_mlo_m_q);
   3860		}
   3861	}
   3862
   3863	if ((phba->sli_rev < LPFC_SLI_REV4) &&
   3864	    bf_get(lpfc_mbx_read_top_fa, la)) {
   3865		if (phba->sli.sli_flag & LPFC_MENLO_MAINT)
   3866			lpfc_issue_clear_la(phba, vport);
   3867		lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
   3868				"1311 fa %d\n",
   3869				bf_get(lpfc_mbx_read_top_fa, la));
   3870	}
   3871
   3872lpfc_mbx_cmpl_read_topology_free_mbuf:
   3873	lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED);
   3874}
   3875
   3876/*
   3877 * This routine handles processing a REG_LOGIN mailbox
   3878 * command upon completion. It is setup in the LPFC_MBOXQ
   3879 * as the completion routine when the command is
   3880 * handed off to the SLI layer.
   3881 */
   3882void
   3883lpfc_mbx_cmpl_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
   3884{
   3885	struct lpfc_vport  *vport = pmb->vport;
   3886	struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *)pmb->ctx_buf;
   3887	struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
   3888
   3889	/* The driver calls the state machine with the pmb pointer
   3890	 * but wants to make sure a stale ctx_buf isn't acted on.
   3891	 * The ctx_buf is restored later and cleaned up.
   3892	 */
   3893	pmb->ctx_buf = NULL;
   3894	pmb->ctx_ndlp = NULL;
   3895
   3896	lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI | LOG_NODE | LOG_DISCOVERY,
   3897			 "0002 rpi:%x DID:%x flg:%x %d x%px\n",
   3898			 ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag,
   3899			 kref_read(&ndlp->kref),
   3900			 ndlp);
   3901	if (ndlp->nlp_flag & NLP_REG_LOGIN_SEND)
   3902		ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND;
   3903
   3904	if (ndlp->nlp_flag & NLP_IGNR_REG_CMPL ||
   3905	    ndlp->nlp_state != NLP_STE_REG_LOGIN_ISSUE) {
   3906		/* We rcvd a rscn after issuing this
   3907		 * mbox reg login, we may have cycled
   3908		 * back through the state and be
   3909		 * back at reg login state so this
   3910		 * mbox needs to be ignored becase
   3911		 * there is another reg login in
   3912		 * process.
   3913		 */
   3914		spin_lock_irq(&ndlp->lock);
   3915		ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
   3916		spin_unlock_irq(&ndlp->lock);
   3917
   3918		/*
   3919		 * We cannot leave the RPI registered because
   3920		 * if we go thru discovery again for this ndlp
   3921		 * a subsequent REG_RPI will fail.
   3922		 */
   3923		ndlp->nlp_flag |= NLP_RPI_REGISTERED;
   3924		lpfc_unreg_rpi(vport, ndlp);
   3925	}
   3926
   3927	/* Call state machine */
   3928	lpfc_disc_state_machine(vport, ndlp, pmb, NLP_EVT_CMPL_REG_LOGIN);
   3929	pmb->ctx_buf = mp;
   3930	lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED);
   3931
   3932	/* decrement the node reference count held for this callback
   3933	 * function.
   3934	 */
   3935	lpfc_nlp_put(ndlp);
   3936
   3937	return;
   3938}
   3939
   3940static void
   3941lpfc_mbx_cmpl_unreg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
   3942{
   3943	MAILBOX_t *mb = &pmb->u.mb;
   3944	struct lpfc_vport *vport = pmb->vport;
   3945	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
   3946
   3947	switch (mb->mbxStatus) {
   3948	case 0x0011:
   3949	case 0x0020:
   3950		lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
   3951				 "0911 cmpl_unreg_vpi, mb status = 0x%x\n",
   3952				 mb->mbxStatus);
   3953		break;
   3954	/* If VPI is busy, reset the HBA */
   3955	case 0x9700:
   3956		lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
   3957			"2798 Unreg_vpi failed vpi 0x%x, mb status = 0x%x\n",
   3958			vport->vpi, mb->mbxStatus);
   3959		if (!(phba->pport->load_flag & FC_UNLOADING))
   3960			lpfc_workq_post_event(phba, NULL, NULL,
   3961				LPFC_EVT_RESET_HBA);
   3962	}
   3963	spin_lock_irq(shost->host_lock);
   3964	vport->vpi_state &= ~LPFC_VPI_REGISTERED;
   3965	vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
   3966	spin_unlock_irq(shost->host_lock);
   3967	mempool_free(pmb, phba->mbox_mem_pool);
   3968	lpfc_cleanup_vports_rrqs(vport, NULL);
   3969	/*
   3970	 * This shost reference might have been taken at the beginning of
   3971	 * lpfc_vport_delete()
   3972	 */
   3973	if ((vport->load_flag & FC_UNLOADING) && (vport != phba->pport))
   3974		scsi_host_put(shost);
   3975}
   3976
   3977int
   3978lpfc_mbx_unreg_vpi(struct lpfc_vport *vport)
   3979{
   3980	struct lpfc_hba  *phba = vport->phba;
   3981	LPFC_MBOXQ_t *mbox;
   3982	int rc;
   3983
   3984	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
   3985	if (!mbox)
   3986		return 1;
   3987
   3988	lpfc_unreg_vpi(phba, vport->vpi, mbox);
   3989	mbox->vport = vport;
   3990	mbox->mbox_cmpl = lpfc_mbx_cmpl_unreg_vpi;
   3991	rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
   3992	if (rc == MBX_NOT_FINISHED) {
   3993		lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
   3994				 "1800 Could not issue unreg_vpi\n");
   3995		mempool_free(mbox, phba->mbox_mem_pool);
   3996		return rc;
   3997	}
   3998	return 0;
   3999}
   4000
   4001static void
   4002lpfc_mbx_cmpl_reg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
   4003{
   4004	struct lpfc_vport *vport = pmb->vport;
   4005	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
   4006	MAILBOX_t *mb = &pmb->u.mb;
   4007
   4008	switch (mb->mbxStatus) {
   4009	case 0x0011:
   4010	case 0x9601:
   4011	case 0x9602:
   4012		lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
   4013				 "0912 cmpl_reg_vpi, mb status = 0x%x\n",
   4014				 mb->mbxStatus);
   4015		lpfc_vport_set_state(vport, FC_VPORT_FAILED);
   4016		spin_lock_irq(shost->host_lock);
   4017		vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
   4018		spin_unlock_irq(shost->host_lock);
   4019		vport->fc_myDID = 0;
   4020
   4021		if ((vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) ||
   4022		    (vport->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) {
   4023			if (phba->nvmet_support)
   4024				lpfc_nvmet_update_targetport(phba);
   4025			else
   4026				lpfc_nvme_update_localport(vport);
   4027		}
   4028		goto out;
   4029	}
   4030
   4031	spin_lock_irq(shost->host_lock);
   4032	vport->vpi_state |= LPFC_VPI_REGISTERED;
   4033	vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
   4034	spin_unlock_irq(shost->host_lock);
   4035	vport->num_disc_nodes = 0;
   4036	/* go thru NPR list and issue ELS PLOGIs */
   4037	if (vport->fc_npr_cnt)
   4038		lpfc_els_disc_plogi(vport);
   4039
   4040	if (!vport->num_disc_nodes) {
   4041		spin_lock_irq(shost->host_lock);
   4042		vport->fc_flag &= ~FC_NDISC_ACTIVE;
   4043		spin_unlock_irq(shost->host_lock);
   4044		lpfc_can_disctmo(vport);
   4045	}
   4046	vport->port_state = LPFC_VPORT_READY;
   4047
   4048out:
   4049	mempool_free(pmb, phba->mbox_mem_pool);
   4050	return;
   4051}
   4052
   4053/**
   4054 * lpfc_create_static_vport - Read HBA config region to create static vports.
   4055 * @phba: pointer to lpfc hba data structure.
   4056 *
   4057 * This routine issue a DUMP mailbox command for config region 22 to get
   4058 * the list of static vports to be created. The function create vports
   4059 * based on the information returned from the HBA.
   4060 **/
   4061void
   4062lpfc_create_static_vport(struct lpfc_hba *phba)
   4063{
   4064	LPFC_MBOXQ_t *pmb = NULL;
   4065	MAILBOX_t *mb;
   4066	struct static_vport_info *vport_info;
   4067	int mbx_wait_rc = 0, i;
   4068	struct fc_vport_identifiers vport_id;
   4069	struct fc_vport *new_fc_vport;
   4070	struct Scsi_Host *shost;
   4071	struct lpfc_vport *vport;
   4072	uint16_t offset = 0;
   4073	uint8_t *vport_buff;
   4074	struct lpfc_dmabuf *mp;
   4075	uint32_t byte_count = 0;
   4076
   4077	pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
   4078	if (!pmb) {
   4079		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
   4080				"0542 lpfc_create_static_vport failed to"
   4081				" allocate mailbox memory\n");
   4082		return;
   4083	}
   4084	memset(pmb, 0, sizeof(LPFC_MBOXQ_t));
   4085	mb = &pmb->u.mb;
   4086
   4087	vport_info = kzalloc(sizeof(struct static_vport_info), GFP_KERNEL);
   4088	if (!vport_info) {
   4089		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
   4090				"0543 lpfc_create_static_vport failed to"
   4091				" allocate vport_info\n");
   4092		mempool_free(pmb, phba->mbox_mem_pool);
   4093		return;
   4094	}
   4095
   4096	vport_buff = (uint8_t *) vport_info;
   4097	do {
   4098		/* While loop iteration forces a free dma buffer from
   4099		 * the previous loop because the mbox is reused and
   4100		 * the dump routine is a single-use construct.
   4101		 */
   4102		if (pmb->ctx_buf) {
   4103			mp = (struct lpfc_dmabuf *)pmb->ctx_buf;
   4104			lpfc_mbuf_free(phba, mp->virt, mp->phys);
   4105			kfree(mp);
   4106			pmb->ctx_buf = NULL;
   4107		}
   4108		if (lpfc_dump_static_vport(phba, pmb, offset))
   4109			goto out;
   4110
   4111		pmb->vport = phba->pport;
   4112		mbx_wait_rc = lpfc_sli_issue_mbox_wait(phba, pmb,
   4113							LPFC_MBOX_TMO);
   4114
   4115		if ((mbx_wait_rc != MBX_SUCCESS) || mb->mbxStatus) {
   4116			lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
   4117				"0544 lpfc_create_static_vport failed to"
   4118				" issue dump mailbox command ret 0x%x "
   4119				"status 0x%x\n",
   4120				mbx_wait_rc, mb->mbxStatus);
   4121			goto out;
   4122		}
   4123
   4124		if (phba->sli_rev == LPFC_SLI_REV4) {
   4125			byte_count = pmb->u.mqe.un.mb_words[5];
   4126			mp = (struct lpfc_dmabuf *)pmb->ctx_buf;
   4127			if (byte_count > sizeof(struct static_vport_info) -
   4128					offset)
   4129				byte_count = sizeof(struct static_vport_info)
   4130					- offset;
   4131			memcpy(vport_buff + offset, mp->virt, byte_count);
   4132			offset += byte_count;
   4133		} else {
   4134			if (mb->un.varDmp.word_cnt >
   4135				sizeof(struct static_vport_info) - offset)
   4136				mb->un.varDmp.word_cnt =
   4137					sizeof(struct static_vport_info)
   4138						- offset;
   4139			byte_count = mb->un.varDmp.word_cnt;
   4140			lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
   4141				vport_buff + offset,
   4142				byte_count);
   4143
   4144			offset += byte_count;
   4145		}
   4146
   4147	} while (byte_count &&
   4148		offset < sizeof(struct static_vport_info));
   4149
   4150
   4151	if ((le32_to_cpu(vport_info->signature) != VPORT_INFO_SIG) ||
   4152		((le32_to_cpu(vport_info->rev) & VPORT_INFO_REV_MASK)
   4153			!= VPORT_INFO_REV)) {
   4154		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
   4155				"0545 lpfc_create_static_vport bad"
   4156				" information header 0x%x 0x%x\n",
   4157				le32_to_cpu(vport_info->signature),
   4158				le32_to_cpu(vport_info->rev) &
   4159				VPORT_INFO_REV_MASK);
   4160
   4161		goto out;
   4162	}
   4163
   4164	shost = lpfc_shost_from_vport(phba->pport);
   4165
   4166	for (i = 0; i < MAX_STATIC_VPORT_COUNT; i++) {
   4167		memset(&vport_id, 0, sizeof(vport_id));
   4168		vport_id.port_name = wwn_to_u64(vport_info->vport_list[i].wwpn);
   4169		vport_id.node_name = wwn_to_u64(vport_info->vport_list[i].wwnn);
   4170		if (!vport_id.port_name || !vport_id.node_name)
   4171			continue;
   4172
   4173		vport_id.roles = FC_PORT_ROLE_FCP_INITIATOR;
   4174		vport_id.vport_type = FC_PORTTYPE_NPIV;
   4175		vport_id.disable = false;
   4176		new_fc_vport = fc_vport_create(shost, 0, &vport_id);
   4177
   4178		if (!new_fc_vport) {
   4179			lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
   4180				"0546 lpfc_create_static_vport failed to"
   4181				" create vport\n");
   4182			continue;
   4183		}
   4184
   4185		vport = *(struct lpfc_vport **)new_fc_vport->dd_data;
   4186		vport->vport_flag |= STATIC_VPORT;
   4187	}
   4188
   4189out:
   4190	kfree(vport_info);
   4191	if (mbx_wait_rc != MBX_TIMEOUT)
   4192		lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED);
   4193}
   4194
   4195/*
   4196 * This routine handles processing a Fabric REG_LOGIN mailbox
   4197 * command upon completion. It is setup in the LPFC_MBOXQ
   4198 * as the completion routine when the command is
   4199 * handed off to the SLI layer.
   4200 */
   4201void
   4202lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
   4203{
   4204	struct lpfc_vport *vport = pmb->vport;
   4205	MAILBOX_t *mb = &pmb->u.mb;
   4206	struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
   4207	struct Scsi_Host *shost;
   4208
   4209	pmb->ctx_ndlp = NULL;
   4210
   4211	if (mb->mbxStatus) {
   4212		lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
   4213				 "0258 Register Fabric login error: 0x%x\n",
   4214				 mb->mbxStatus);
   4215		lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED);
   4216		if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
   4217			/* FLOGI failed, use loop map to make discovery list */
   4218			lpfc_disc_list_loopmap(vport);
   4219
   4220			/* Start discovery */
   4221			lpfc_disc_start(vport);
   4222			/* Decrement the reference count to ndlp after the
   4223			 * reference to the ndlp are done.
   4224			 */
   4225			lpfc_nlp_put(ndlp);
   4226			return;
   4227		}
   4228
   4229		lpfc_vport_set_state(vport, FC_VPORT_FAILED);
   4230		/* Decrement the reference count to ndlp after the reference
   4231		 * to the ndlp are done.
   4232		 */
   4233		lpfc_nlp_put(ndlp);
   4234		return;
   4235	}
   4236
   4237	if (phba->sli_rev < LPFC_SLI_REV4)
   4238		ndlp->nlp_rpi = mb->un.varWords[0];
   4239	ndlp->nlp_flag |= NLP_RPI_REGISTERED;
   4240	ndlp->nlp_type |= NLP_FABRIC;
   4241	lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
   4242
   4243	if (vport->port_state == LPFC_FABRIC_CFG_LINK) {
   4244		/* when physical port receive logo donot start
   4245		 * vport discovery */
   4246		if (!(vport->fc_flag & FC_LOGO_RCVD_DID_CHNG))
   4247			lpfc_start_fdiscs(phba);
   4248		else {
   4249			shost = lpfc_shost_from_vport(vport);
   4250			spin_lock_irq(shost->host_lock);
   4251			vport->fc_flag &= ~FC_LOGO_RCVD_DID_CHNG ;
   4252			spin_unlock_irq(shost->host_lock);
   4253		}
   4254		lpfc_do_scr_ns_plogi(phba, vport);
   4255	}
   4256
   4257	lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED);
   4258
   4259	/* Drop the reference count from the mbox at the end after
   4260	 * all the current reference to the ndlp have been done.
   4261	 */
   4262	lpfc_nlp_put(ndlp);
   4263	return;
   4264}
   4265
   4266 /*
   4267  * This routine will issue a GID_FT for each FC4 Type supported
   4268  * by the driver. ALL GID_FTs must complete before discovery is started.
   4269  */
   4270int
   4271lpfc_issue_gidft(struct lpfc_vport *vport)
   4272{
   4273	/* Good status, issue CT Request to NameServer */
   4274	if ((vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) ||
   4275	    (vport->cfg_enable_fc4_type == LPFC_ENABLE_FCP)) {
   4276		if (lpfc_ns_cmd(vport, SLI_CTNS_GID_FT, 0, SLI_CTPT_FCP)) {
   4277			/* Cannot issue NameServer FCP Query, so finish up
   4278			 * discovery
   4279			 */
   4280			lpfc_printf_vlog(vport, KERN_ERR,
   4281					 LOG_TRACE_EVENT,
   4282					 "0604 %s FC TYPE %x %s\n",
   4283					 "Failed to issue GID_FT to ",
   4284					 FC_TYPE_FCP,
   4285					 "Finishing discovery.");
   4286			return 0;
   4287		}
   4288		vport->gidft_inp++;
   4289	}
   4290
   4291	if ((vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) ||
   4292	    (vport->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) {
   4293		if (lpfc_ns_cmd(vport, SLI_CTNS_GID_FT, 0, SLI_CTPT_NVME)) {
   4294			/* Cannot issue NameServer NVME Query, so finish up
   4295			 * discovery
   4296			 */
   4297			lpfc_printf_vlog(vport, KERN_ERR,
   4298					 LOG_TRACE_EVENT,
   4299					 "0605 %s FC_TYPE %x %s %d\n",
   4300					 "Failed to issue GID_FT to ",
   4301					 FC_TYPE_NVME,
   4302					 "Finishing discovery: gidftinp ",
   4303					 vport->gidft_inp);
   4304			if (vport->gidft_inp == 0)
   4305				return 0;
   4306		} else
   4307			vport->gidft_inp++;
   4308	}
   4309	return vport->gidft_inp;
   4310}
   4311
   4312/**
   4313 * lpfc_issue_gidpt - issue a GID_PT for all N_Ports
   4314 * @vport: The virtual port for which this call is being executed.
   4315 *
   4316 * This routine will issue a GID_PT to get a list of all N_Ports
   4317 *
   4318 * Return value :
   4319 *   0 - Failure to issue a GID_PT
   4320 *   1 - GID_PT issued
   4321 **/
   4322int
   4323lpfc_issue_gidpt(struct lpfc_vport *vport)
   4324{
   4325	/* Good status, issue CT Request to NameServer */
   4326	if (lpfc_ns_cmd(vport, SLI_CTNS_GID_PT, 0, GID_PT_N_PORT)) {
   4327		/* Cannot issue NameServer FCP Query, so finish up
   4328		 * discovery
   4329		 */
   4330		lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
   4331				 "0606 %s Port TYPE %x %s\n",
   4332				 "Failed to issue GID_PT to ",
   4333				 GID_PT_N_PORT,
   4334				 "Finishing discovery.");
   4335		return 0;
   4336	}
   4337	vport->gidft_inp++;
   4338	return 1;
   4339}
   4340
   4341/*
   4342 * This routine handles processing a NameServer REG_LOGIN mailbox
   4343 * command upon completion. It is setup in the LPFC_MBOXQ
   4344 * as the completion routine when the command is
   4345 * handed off to the SLI layer.
   4346 */
   4347void
   4348lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
   4349{
   4350	MAILBOX_t *mb = &pmb->u.mb;
   4351	struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
   4352	struct lpfc_vport *vport = pmb->vport;
   4353	int rc;
   4354
   4355	pmb->ctx_ndlp = NULL;
   4356	vport->gidft_inp = 0;
   4357
   4358	if (mb->mbxStatus) {
   4359		lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
   4360				 "0260 Register NameServer error: 0x%x\n",
   4361				 mb->mbxStatus);
   4362
   4363out:
   4364		/* decrement the node reference count held for this
   4365		 * callback function.
   4366		 */
   4367		lpfc_nlp_put(ndlp);
   4368		lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED);
   4369
   4370		/* If the node is not registered with the scsi or nvme
   4371		 * transport, remove the fabric node.  The failed reg_login
   4372		 * is terminal.
   4373		 */
   4374		if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD))) {
   4375			spin_lock_irq(&ndlp->lock);
   4376			ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
   4377			spin_unlock_irq(&ndlp->lock);
   4378			lpfc_nlp_not_used(ndlp);
   4379		}
   4380
   4381		if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
   4382			/*
   4383			 * RegLogin failed, use loop map to make discovery
   4384			 * list
   4385			 */
   4386			lpfc_disc_list_loopmap(vport);
   4387
   4388			/* Start discovery */
   4389			lpfc_disc_start(vport);
   4390			return;
   4391		}
   4392		lpfc_vport_set_state(vport, FC_VPORT_FAILED);
   4393		return;
   4394	}
   4395
   4396	if (phba->sli_rev < LPFC_SLI_REV4)
   4397		ndlp->nlp_rpi = mb->un.varWords[0];
   4398	ndlp->nlp_flag |= NLP_RPI_REGISTERED;
   4399	ndlp->nlp_type |= NLP_FABRIC;
   4400	lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
   4401	lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE | LOG_DISCOVERY,
   4402			 "0003 rpi:%x DID:%x flg:%x %d x%px\n",
   4403			 ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag,
   4404			 kref_read(&ndlp->kref),
   4405			 ndlp);
   4406
   4407	if (vport->port_state < LPFC_VPORT_READY) {
   4408		/* Link up discovery requires Fabric registration. */
   4409		lpfc_ns_cmd(vport, SLI_CTNS_RNN_ID, 0, 0);
   4410		lpfc_ns_cmd(vport, SLI_CTNS_RSNN_NN, 0, 0);
   4411		lpfc_ns_cmd(vport, SLI_CTNS_RSPN_ID, 0, 0);
   4412		lpfc_ns_cmd(vport, SLI_CTNS_RFT_ID, 0, 0);
   4413
   4414		if ((vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) ||
   4415		    (vport->cfg_enable_fc4_type == LPFC_ENABLE_FCP))
   4416			lpfc_ns_cmd(vport, SLI_CTNS_RFF_ID, 0, FC_TYPE_FCP);
   4417
   4418		if ((vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) ||
   4419		    (vport->cfg_enable_fc4_type == LPFC_ENABLE_NVME))
   4420			lpfc_ns_cmd(vport, SLI_CTNS_RFF_ID, 0,
   4421				    FC_TYPE_NVME);
   4422
   4423		/* Issue SCR just before NameServer GID_FT Query */
   4424		lpfc_issue_els_scr(vport, 0);
   4425
   4426		/* Link was bounced or a Fabric LOGO occurred.  Start EDC
   4427		 * with initial FW values provided the congestion mode is
   4428		 * not off.  Note that signals may or may not be supported
   4429		 * by the adapter but FPIN is provided by default for 1
   4430		 * or both missing signals support.
   4431		 */
   4432		if (phba->cmf_active_mode != LPFC_CFG_OFF) {
   4433			phba->cgn_reg_fpin = phba->cgn_init_reg_fpin;
   4434			phba->cgn_reg_signal = phba->cgn_init_reg_signal;
   4435			rc = lpfc_issue_els_edc(vport, 0);
   4436			lpfc_printf_log(phba, KERN_INFO,
   4437					LOG_INIT | LOG_ELS | LOG_DISCOVERY,
   4438					"4220 EDC issue error x%x, Data: x%x\n",
   4439					rc, phba->cgn_init_reg_signal);
   4440		} else {
   4441			lpfc_issue_els_rdf(vport, 0);
   4442		}
   4443	}
   4444
   4445	vport->fc_ns_retry = 0;
   4446	if (lpfc_issue_gidft(vport) == 0)
   4447		goto out;
   4448
   4449	/*
   4450	 * At this point in time we may need to wait for multiple
   4451	 * SLI_CTNS_GID_FT CT commands to complete before we start discovery.
   4452	 *
   4453	 * decrement the node reference count held for this
   4454	 * callback function.
   4455	 */
   4456	lpfc_nlp_put(ndlp);
   4457	lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED);
   4458	return;
   4459}
   4460
   4461/*
   4462 * This routine handles processing a Fabric Controller REG_LOGIN mailbox
   4463 * command upon completion. It is setup in the LPFC_MBOXQ
   4464 * as the completion routine when the command is handed off to the SLI layer.
   4465 */
   4466void
   4467lpfc_mbx_cmpl_fc_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
   4468{
   4469	struct lpfc_vport *vport = pmb->vport;
   4470	MAILBOX_t *mb = &pmb->u.mb;
   4471	struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
   4472
   4473	pmb->ctx_ndlp = NULL;
   4474	if (mb->mbxStatus) {
   4475		lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
   4476				 "0933 %s: Register FC login error: 0x%x\n",
   4477				 __func__, mb->mbxStatus);
   4478		goto out;
   4479	}
   4480
   4481	lpfc_check_nlp_post_devloss(vport, ndlp);
   4482
   4483	if (phba->sli_rev < LPFC_SLI_REV4)
   4484		ndlp->nlp_rpi = mb->un.varWords[0];
   4485
   4486	lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
   4487			 "0934 %s: Complete FC x%x RegLogin rpi x%x ste x%x\n",
   4488			 __func__, ndlp->nlp_DID, ndlp->nlp_rpi,
   4489			 ndlp->nlp_state);
   4490
   4491	ndlp->nlp_flag |= NLP_RPI_REGISTERED;
   4492	ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND;
   4493	ndlp->nlp_type |= NLP_FABRIC;
   4494	lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
   4495
   4496 out:
   4497	lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED);
   4498
   4499	/* Drop the reference count from the mbox at the end after
   4500	 * all the current reference to the ndlp have been done.
   4501	 */
   4502	lpfc_nlp_put(ndlp);
   4503}
   4504
   4505static void
   4506lpfc_register_remote_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
   4507{
   4508	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
   4509	struct fc_rport  *rport;
   4510	struct lpfc_rport_data *rdata;
   4511	struct fc_rport_identifiers rport_ids;
   4512	struct lpfc_hba  *phba = vport->phba;
   4513	unsigned long flags;
   4514
   4515	if (vport->cfg_enable_fc4_type == LPFC_ENABLE_NVME)
   4516		return;
   4517
   4518	/* Remote port has reappeared. Re-register w/ FC transport */
   4519	rport_ids.node_name = wwn_to_u64(ndlp->nlp_nodename.u.wwn);
   4520	rport_ids.port_name = wwn_to_u64(ndlp->nlp_portname.u.wwn);
   4521	rport_ids.port_id = ndlp->nlp_DID;
   4522	rport_ids.roles = FC_RPORT_ROLE_UNKNOWN;
   4523
   4524
   4525	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT,
   4526			      "rport add:       did:x%x flg:x%x type x%x",
   4527			      ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type);
   4528
   4529	/* Don't add the remote port if unloading. */
   4530	if (vport->load_flag & FC_UNLOADING)
   4531		return;
   4532
   4533	/*
   4534	 * Disassociate any older association between this ndlp and rport
   4535	 */
   4536	if (ndlp->rport) {
   4537		rdata = ndlp->rport->dd_data;
   4538		rdata->pnode = NULL;
   4539	}
   4540
   4541	ndlp->rport = rport = fc_remote_port_add(shost, 0, &rport_ids);
   4542	if (!rport) {
   4543		dev_printk(KERN_WARNING, &phba->pcidev->dev,
   4544			   "Warning: fc_remote_port_add failed\n");
   4545		return;
   4546	}
   4547
   4548	/* Successful port add.  Complete initializing node data */
   4549	rport->maxframe_size = ndlp->nlp_maxframe;
   4550	rport->supported_classes = ndlp->nlp_class_sup;
   4551	rdata = rport->dd_data;
   4552	rdata->pnode = lpfc_nlp_get(ndlp);
   4553	if (!rdata->pnode) {
   4554		dev_warn(&phba->pcidev->dev,
   4555			 "Warning - node ref failed. Unreg rport\n");
   4556		fc_remote_port_delete(rport);
   4557		ndlp->rport = NULL;
   4558		return;
   4559	}
   4560
   4561	spin_lock_irqsave(&ndlp->lock, flags);
   4562	ndlp->fc4_xpt_flags |= SCSI_XPT_REGD;
   4563	spin_unlock_irqrestore(&ndlp->lock, flags);
   4564
   4565	if (ndlp->nlp_type & NLP_FCP_TARGET)
   4566		rport_ids.roles |= FC_PORT_ROLE_FCP_TARGET;
   4567	if (ndlp->nlp_type & NLP_FCP_INITIATOR)
   4568		rport_ids.roles |= FC_PORT_ROLE_FCP_INITIATOR;
   4569	if (ndlp->nlp_type & NLP_NVME_INITIATOR)
   4570		rport_ids.roles |= FC_PORT_ROLE_NVME_INITIATOR;
   4571	if (ndlp->nlp_type & NLP_NVME_TARGET)
   4572		rport_ids.roles |= FC_PORT_ROLE_NVME_TARGET;
   4573	if (ndlp->nlp_type & NLP_NVME_DISCOVERY)
   4574		rport_ids.roles |= FC_PORT_ROLE_NVME_DISCOVERY;
   4575
   4576	if (rport_ids.roles !=  FC_RPORT_ROLE_UNKNOWN)
   4577		fc_remote_port_rolechg(rport, rport_ids.roles);
   4578
   4579	lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
   4580			 "3183 %s rport x%px DID x%x, role x%x refcnt %d\n",
   4581			 __func__, rport, rport->port_id, rport->roles,
   4582			 kref_read(&ndlp->kref));
   4583
   4584	if ((rport->scsi_target_id != -1) &&
   4585	    (rport->scsi_target_id < LPFC_MAX_TARGET)) {
   4586		ndlp->nlp_sid = rport->scsi_target_id;
   4587	}
   4588
   4589	return;
   4590}
   4591
   4592static void
   4593lpfc_unregister_remote_port(struct lpfc_nodelist *ndlp)
   4594{
   4595	struct fc_rport *rport = ndlp->rport;
   4596	struct lpfc_vport *vport = ndlp->vport;
   4597
   4598	if (vport->cfg_enable_fc4_type == LPFC_ENABLE_NVME)
   4599		return;
   4600
   4601	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT,
   4602		"rport delete:    did:x%x flg:x%x type x%x",
   4603		ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type);
   4604
   4605	lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
   4606			 "3184 rport unregister x%06x, rport x%px "
   4607			 "xptflg x%x refcnt %d\n",
   4608			 ndlp->nlp_DID, rport, ndlp->fc4_xpt_flags,
   4609			 kref_read(&ndlp->kref));
   4610
   4611	fc_remote_port_delete(rport);
   4612	lpfc_nlp_put(ndlp);
   4613}
   4614
   4615static void
   4616lpfc_nlp_counters(struct lpfc_vport *vport, int state, int count)
   4617{
   4618	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
   4619	unsigned long iflags;
   4620
   4621	spin_lock_irqsave(shost->host_lock, iflags);
   4622	switch (state) {
   4623	case NLP_STE_UNUSED_NODE:
   4624		vport->fc_unused_cnt += count;
   4625		break;
   4626	case NLP_STE_PLOGI_ISSUE:
   4627		vport->fc_plogi_cnt += count;
   4628		break;
   4629	case NLP_STE_ADISC_ISSUE:
   4630		vport->fc_adisc_cnt += count;
   4631		break;
   4632	case NLP_STE_REG_LOGIN_ISSUE:
   4633		vport->fc_reglogin_cnt += count;
   4634		break;
   4635	case NLP_STE_PRLI_ISSUE:
   4636		vport->fc_prli_cnt += count;
   4637		break;
   4638	case NLP_STE_UNMAPPED_NODE:
   4639		vport->fc_unmap_cnt += count;
   4640		break;
   4641	case NLP_STE_MAPPED_NODE:
   4642		vport->fc_map_cnt += count;
   4643		break;
   4644	case NLP_STE_NPR_NODE:
   4645		if (vport->fc_npr_cnt == 0 && count == -1)
   4646			vport->fc_npr_cnt = 0;
   4647		else
   4648			vport->fc_npr_cnt += count;
   4649		break;
   4650	}
   4651	spin_unlock_irqrestore(shost->host_lock, iflags);
   4652}
   4653
   4654/* Register a node with backend if not already done */
   4655void
   4656lpfc_nlp_reg_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
   4657{
   4658	unsigned long iflags;
   4659
   4660	lpfc_check_nlp_post_devloss(vport, ndlp);
   4661
   4662	spin_lock_irqsave(&ndlp->lock, iflags);
   4663	if (ndlp->fc4_xpt_flags & NLP_XPT_REGD) {
   4664		/* Already registered with backend, trigger rescan */
   4665		spin_unlock_irqrestore(&ndlp->lock, iflags);
   4666
   4667		if (ndlp->fc4_xpt_flags & NVME_XPT_REGD &&
   4668		    ndlp->nlp_type & (NLP_NVME_TARGET | NLP_NVME_DISCOVERY)) {
   4669			lpfc_nvme_rescan_port(vport, ndlp);
   4670		}
   4671		return;
   4672	}
   4673
   4674	ndlp->fc4_xpt_flags |= NLP_XPT_REGD;
   4675	spin_unlock_irqrestore(&ndlp->lock, iflags);
   4676
   4677	if (lpfc_valid_xpt_node(ndlp)) {
   4678		vport->phba->nport_event_cnt++;
   4679		/*
   4680		 * Tell the fc transport about the port, if we haven't
   4681		 * already. If we have, and it's a scsi entity, be
   4682		 */
   4683		lpfc_register_remote_port(vport, ndlp);
   4684	}
   4685
   4686	/* We are done if we do not have any NVME remote node */
   4687	if (!(ndlp->nlp_fc4_type & NLP_FC4_NVME))
   4688		return;
   4689
   4690	/* Notify the NVME transport of this new rport. */
   4691	if (vport->phba->sli_rev >= LPFC_SLI_REV4 &&
   4692			ndlp->nlp_fc4_type & NLP_FC4_NVME) {
   4693		if (vport->phba->nvmet_support == 0) {
   4694			/* Register this rport with the transport.
   4695			 * Only NVME Target Rports are registered with
   4696			 * the transport.
   4697			 */
   4698			if (ndlp->nlp_type & NLP_NVME_TARGET) {
   4699				vport->phba->nport_event_cnt++;
   4700				lpfc_nvme_register_port(vport, ndlp);
   4701			}
   4702		} else {
   4703			/* Just take an NDLP ref count since the
   4704			 * target does not register rports.
   4705			 */
   4706			lpfc_nlp_get(ndlp);
   4707		}
   4708	}
   4709}
   4710
   4711/* Unregister a node with backend if not already done */
   4712void
   4713lpfc_nlp_unreg_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
   4714{
   4715	unsigned long iflags;
   4716
   4717	spin_lock_irqsave(&ndlp->lock, iflags);
   4718	if (!(ndlp->fc4_xpt_flags & NLP_XPT_REGD)) {
   4719		spin_unlock_irqrestore(&ndlp->lock, iflags);
   4720		lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
   4721				 "0999 %s Not regd: ndlp x%px rport x%px DID "
   4722				 "x%x FLG x%x XPT x%x\n",
   4723				  __func__, ndlp, ndlp->rport, ndlp->nlp_DID,
   4724				  ndlp->nlp_flag, ndlp->fc4_xpt_flags);
   4725		return;
   4726	}
   4727
   4728	ndlp->fc4_xpt_flags &= ~NLP_XPT_REGD;
   4729	spin_unlock_irqrestore(&ndlp->lock, iflags);
   4730
   4731	if (ndlp->rport &&
   4732	    ndlp->fc4_xpt_flags & SCSI_XPT_REGD) {
   4733		vport->phba->nport_event_cnt++;
   4734		lpfc_unregister_remote_port(ndlp);
   4735	} else if (!ndlp->rport) {
   4736		lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
   4737				 "1999 %s NDLP in devloss x%px DID x%x FLG x%x"
   4738				 " XPT x%x refcnt %d\n",
   4739				 __func__, ndlp, ndlp->nlp_DID, ndlp->nlp_flag,
   4740				 ndlp->fc4_xpt_flags,
   4741				 kref_read(&ndlp->kref));
   4742	}
   4743
   4744	if (ndlp->fc4_xpt_flags & NVME_XPT_REGD) {
   4745		vport->phba->nport_event_cnt++;
   4746		if (vport->phba->nvmet_support == 0) {
   4747			/* Start devloss if target. */
   4748			if (ndlp->nlp_type & NLP_NVME_TARGET)
   4749				lpfc_nvme_unregister_port(vport, ndlp);
   4750		} else {
   4751			/* NVMET has no upcall. */
   4752			lpfc_nlp_put(ndlp);
   4753		}
   4754	}
   4755
   4756}
   4757
   4758/*
   4759 * Adisc state change handling
   4760 */
   4761static void
   4762lpfc_handle_adisc_state(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
   4763		int new_state)
   4764{
   4765	switch (new_state) {
   4766	/*
   4767	 * Any state to ADISC_ISSUE
   4768	 * Do nothing, adisc cmpl handling will trigger state changes
   4769	 */
   4770	case NLP_STE_ADISC_ISSUE:
   4771		break;
   4772
   4773	/*
   4774	 * ADISC_ISSUE to mapped states
   4775	 * Trigger a registration with backend, it will be nop if
   4776	 * already registered
   4777	 */
   4778	case NLP_STE_UNMAPPED_NODE:
   4779		ndlp->nlp_type |= NLP_FC_NODE;
   4780		fallthrough;
   4781	case NLP_STE_MAPPED_NODE:
   4782		ndlp->nlp_flag &= ~NLP_NODEV_REMOVE;
   4783		lpfc_nlp_reg_node(vport, ndlp);
   4784		break;
   4785
   4786	/*
   4787	 * ADISC_ISSUE to non-mapped states
   4788	 * We are moving from ADISC_ISSUE to a non-mapped state because
   4789	 * ADISC failed, we would have skipped unregistering with
   4790	 * backend, attempt it now
   4791	 */
   4792	case NLP_STE_NPR_NODE:
   4793		ndlp->nlp_flag &= ~NLP_RCV_PLOGI;
   4794		fallthrough;
   4795	default:
   4796		lpfc_nlp_unreg_node(vport, ndlp);
   4797		break;
   4798	}
   4799
   4800}
   4801
   4802static void
   4803lpfc_nlp_state_cleanup(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
   4804		       int old_state, int new_state)
   4805{
   4806	/* Trap ADISC changes here */
   4807	if (new_state == NLP_STE_ADISC_ISSUE ||
   4808	    old_state == NLP_STE_ADISC_ISSUE) {
   4809		lpfc_handle_adisc_state(vport, ndlp, new_state);
   4810		return;
   4811	}
   4812
   4813	if (new_state == NLP_STE_UNMAPPED_NODE) {
   4814		ndlp->nlp_flag &= ~NLP_NODEV_REMOVE;
   4815		ndlp->nlp_type |= NLP_FC_NODE;
   4816	}
   4817	if (new_state == NLP_STE_MAPPED_NODE)
   4818		ndlp->nlp_flag &= ~NLP_NODEV_REMOVE;
   4819	if (new_state == NLP_STE_NPR_NODE)
   4820		ndlp->nlp_flag &= ~NLP_RCV_PLOGI;
   4821
   4822	/* Reg/Unreg for FCP and NVME Transport interface */
   4823	if ((old_state == NLP_STE_MAPPED_NODE ||
   4824	     old_state == NLP_STE_UNMAPPED_NODE)) {
   4825		/* For nodes marked for ADISC, Handle unreg in ADISC cmpl
   4826		 * if linkup. In linkdown do unreg_node
   4827		 */
   4828		if (!(ndlp->nlp_flag & NLP_NPR_ADISC) ||
   4829		    !lpfc_is_link_up(vport->phba))
   4830			lpfc_nlp_unreg_node(vport, ndlp);
   4831	}
   4832
   4833	if (new_state ==  NLP_STE_MAPPED_NODE ||
   4834	    new_state == NLP_STE_UNMAPPED_NODE)
   4835		lpfc_nlp_reg_node(vport, ndlp);
   4836
   4837	if ((new_state ==  NLP_STE_MAPPED_NODE) &&
   4838		(vport->stat_data_enabled)) {
   4839		/*
   4840		 * A new target is discovered, if there is no buffer for
   4841		 * statistical data collection allocate buffer.
   4842		 */
   4843		ndlp->lat_data = kcalloc(LPFC_MAX_BUCKET_COUNT,
   4844					 sizeof(struct lpfc_scsicmd_bkt),
   4845					 GFP_KERNEL);
   4846
   4847		if (!ndlp->lat_data)
   4848			lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
   4849				"0286 lpfc_nlp_state_cleanup failed to "
   4850				"allocate statistical data buffer DID "
   4851				"0x%x\n", ndlp->nlp_DID);
   4852	}
   4853	/*
   4854	 * If the node just added to Mapped list was an FCP target,
   4855	 * but the remote port registration failed or assigned a target
   4856	 * id outside the presentable range - move the node to the
   4857	 * Unmapped List.
   4858	 */
   4859	if ((new_state == NLP_STE_MAPPED_NODE) &&
   4860	    (ndlp->nlp_type & NLP_FCP_TARGET) &&
   4861	    (!ndlp->rport ||
   4862	     ndlp->rport->scsi_target_id == -1 ||
   4863	     ndlp->rport->scsi_target_id >= LPFC_MAX_TARGET)) {
   4864		spin_lock_irq(&ndlp->lock);
   4865		ndlp->nlp_flag |= NLP_TGT_NO_SCSIID;
   4866		spin_unlock_irq(&ndlp->lock);
   4867		lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
   4868	}
   4869}
   4870
   4871static char *
   4872lpfc_nlp_state_name(char *buffer, size_t size, int state)
   4873{
   4874	static char *states[] = {
   4875		[NLP_STE_UNUSED_NODE] = "UNUSED",
   4876		[NLP_STE_PLOGI_ISSUE] = "PLOGI",
   4877		[NLP_STE_ADISC_ISSUE] = "ADISC",
   4878		[NLP_STE_REG_LOGIN_ISSUE] = "REGLOGIN",
   4879		[NLP_STE_PRLI_ISSUE] = "PRLI",
   4880		[NLP_STE_LOGO_ISSUE] = "LOGO",
   4881		[NLP_STE_UNMAPPED_NODE] = "UNMAPPED",
   4882		[NLP_STE_MAPPED_NODE] = "MAPPED",
   4883		[NLP_STE_NPR_NODE] = "NPR",
   4884	};
   4885
   4886	if (state < NLP_STE_MAX_STATE && states[state])
   4887		strlcpy(buffer, states[state], size);
   4888	else
   4889		snprintf(buffer, size, "unknown (%d)", state);
   4890	return buffer;
   4891}
   4892
   4893void
   4894lpfc_nlp_set_state(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
   4895		   int state)
   4896{
   4897	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
   4898	int  old_state = ndlp->nlp_state;
   4899	int node_dropped = ndlp->nlp_flag & NLP_DROPPED;
   4900	char name1[16], name2[16];
   4901
   4902	lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
   4903			 "0904 NPort state transition x%06x, %s -> %s\n",
   4904			 ndlp->nlp_DID,
   4905			 lpfc_nlp_state_name(name1, sizeof(name1), old_state),
   4906			 lpfc_nlp_state_name(name2, sizeof(name2), state));
   4907
   4908	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_NODE,
   4909		"node statechg    did:x%x old:%d ste:%d",
   4910		ndlp->nlp_DID, old_state, state);
   4911
   4912	if (node_dropped && old_state == NLP_STE_UNUSED_NODE &&
   4913	    state != NLP_STE_UNUSED_NODE) {
   4914		ndlp->nlp_flag &= ~NLP_DROPPED;
   4915		lpfc_nlp_get(ndlp);
   4916	}
   4917
   4918	if (old_state == NLP_STE_NPR_NODE &&
   4919	    state != NLP_STE_NPR_NODE)
   4920		lpfc_cancel_retry_delay_tmo(vport, ndlp);
   4921	if (old_state == NLP_STE_UNMAPPED_NODE) {
   4922		ndlp->nlp_flag &= ~NLP_TGT_NO_SCSIID;
   4923		ndlp->nlp_type &= ~NLP_FC_NODE;
   4924	}
   4925
   4926	if (list_empty(&ndlp->nlp_listp)) {
   4927		spin_lock_irq(shost->host_lock);
   4928		list_add_tail(&ndlp->nlp_listp, &vport->fc_nodes);
   4929		spin_unlock_irq(shost->host_lock);
   4930	} else if (old_state)
   4931		lpfc_nlp_counters(vport, old_state, -1);
   4932
   4933	ndlp->nlp_state = state;
   4934	lpfc_nlp_counters(vport, state, 1);
   4935	lpfc_nlp_state_cleanup(vport, ndlp, old_state, state);
   4936}
   4937
   4938void
   4939lpfc_enqueue_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
   4940{
   4941	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
   4942
   4943	if (list_empty(&ndlp->nlp_listp)) {
   4944		spin_lock_irq(shost->host_lock);
   4945		list_add_tail(&ndlp->nlp_listp, &vport->fc_nodes);
   4946		spin_unlock_irq(shost->host_lock);
   4947	}
   4948}
   4949
   4950void
   4951lpfc_dequeue_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
   4952{
   4953	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
   4954
   4955	lpfc_cancel_retry_delay_tmo(vport, ndlp);
   4956	if (ndlp->nlp_state && !list_empty(&ndlp->nlp_listp))
   4957		lpfc_nlp_counters(vport, ndlp->nlp_state, -1);
   4958	spin_lock_irq(shost->host_lock);
   4959	list_del_init(&ndlp->nlp_listp);
   4960	spin_unlock_irq(shost->host_lock);
   4961	lpfc_nlp_state_cleanup(vport, ndlp, ndlp->nlp_state,
   4962				NLP_STE_UNUSED_NODE);
   4963}
   4964
   4965/**
   4966 * lpfc_initialize_node - Initialize all fields of node object
   4967 * @vport: Pointer to Virtual Port object.
   4968 * @ndlp: Pointer to FC node object.
   4969 * @did: FC_ID of the node.
   4970 *
   4971 * This function is always called when node object need to be initialized.
   4972 * It initializes all the fields of the node object. Although the reference
   4973 * to phba from @ndlp can be obtained indirectly through it's reference to
   4974 * @vport, a direct reference to phba is taken here by @ndlp. This is due
   4975 * to the life-span of the @ndlp might go beyond the existence of @vport as
   4976 * the final release of ndlp is determined by its reference count. And, the
   4977 * operation on @ndlp needs the reference to phba.
   4978 **/
   4979static inline void
   4980lpfc_initialize_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
   4981	uint32_t did)
   4982{
   4983	INIT_LIST_HEAD(&ndlp->els_retry_evt.evt_listp);
   4984	INIT_LIST_HEAD(&ndlp->dev_loss_evt.evt_listp);
   4985	timer_setup(&ndlp->nlp_delayfunc, lpfc_els_retry_delay, 0);
   4986	INIT_LIST_HEAD(&ndlp->recovery_evt.evt_listp);
   4987
   4988	ndlp->nlp_DID = did;
   4989	ndlp->vport = vport;
   4990	ndlp->phba = vport->phba;
   4991	ndlp->nlp_sid = NLP_NO_SID;
   4992	ndlp->nlp_fc4_type = NLP_FC4_NONE;
   4993	kref_init(&ndlp->kref);
   4994	atomic_set(&ndlp->cmd_pending, 0);
   4995	ndlp->cmd_qdepth = vport->cfg_tgt_queue_depth;
   4996	ndlp->nlp_defer_did = NLP_EVT_NOTHING_PENDING;
   4997}
   4998
   4999void
   5000lpfc_drop_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
   5001{
   5002	/*
   5003	 * Use of lpfc_drop_node and UNUSED list: lpfc_drop_node should
   5004	 * be used if we wish to issue the "last" lpfc_nlp_put() to remove
   5005	 * the ndlp from the vport. The ndlp marked as UNUSED on the list
   5006	 * until ALL other outstanding threads have completed. We check
   5007	 * that the ndlp not already in the UNUSED state before we proceed.
   5008	 */
   5009	if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
   5010		return;
   5011	lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNUSED_NODE);
   5012	ndlp->nlp_flag |= NLP_DROPPED;
   5013	if (vport->phba->sli_rev == LPFC_SLI_REV4) {
   5014		lpfc_cleanup_vports_rrqs(vport, ndlp);
   5015		lpfc_unreg_rpi(vport, ndlp);
   5016	}
   5017
   5018	lpfc_nlp_put(ndlp);
   5019	return;
   5020}
   5021
   5022/*
   5023 * Start / ReStart rescue timer for Discovery / RSCN handling
   5024 */
   5025void
   5026lpfc_set_disctmo(struct lpfc_vport *vport)
   5027{
   5028	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
   5029	struct lpfc_hba  *phba = vport->phba;
   5030	uint32_t tmo;
   5031
   5032	if (vport->port_state == LPFC_LOCAL_CFG_LINK) {
   5033		/* For FAN, timeout should be greater than edtov */
   5034		tmo = (((phba->fc_edtov + 999) / 1000) + 1);
   5035	} else {
   5036		/* Normal discovery timeout should be > than ELS/CT timeout
   5037		 * FC spec states we need 3 * ratov for CT requests
   5038		 */
   5039		tmo = ((phba->fc_ratov * 3) + 3);
   5040	}
   5041
   5042
   5043	if (!timer_pending(&vport->fc_disctmo)) {
   5044		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
   5045			"set disc timer:  tmo:x%x state:x%x flg:x%x",
   5046			tmo, vport->port_state, vport->fc_flag);
   5047	}
   5048
   5049	mod_timer(&vport->fc_disctmo, jiffies + msecs_to_jiffies(1000 * tmo));
   5050	spin_lock_irq(shost->host_lock);
   5051	vport->fc_flag |= FC_DISC_TMO;
   5052	spin_unlock_irq(shost->host_lock);
   5053
   5054	/* Start Discovery Timer state <hba_state> */
   5055	lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
   5056			 "0247 Start Discovery Timer state x%x "
   5057			 "Data: x%x x%lx x%x x%x\n",
   5058			 vport->port_state, tmo,
   5059			 (unsigned long)&vport->fc_disctmo, vport->fc_plogi_cnt,
   5060			 vport->fc_adisc_cnt);
   5061
   5062	return;
   5063}
   5064
   5065/*
   5066 * Cancel rescue timer for Discovery / RSCN handling
   5067 */
   5068int
   5069lpfc_can_disctmo(struct lpfc_vport *vport)
   5070{
   5071	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
   5072	unsigned long iflags;
   5073
   5074	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
   5075		"can disc timer:  state:x%x rtry:x%x flg:x%x",
   5076		vport->port_state, vport->fc_ns_retry, vport->fc_flag);
   5077
   5078	/* Turn off discovery timer if its running */
   5079	if (vport->fc_flag & FC_DISC_TMO ||
   5080	    timer_pending(&vport->fc_disctmo)) {
   5081		spin_lock_irqsave(shost->host_lock, iflags);
   5082		vport->fc_flag &= ~FC_DISC_TMO;
   5083		spin_unlock_irqrestore(shost->host_lock, iflags);
   5084		del_timer_sync(&vport->fc_disctmo);
   5085		spin_lock_irqsave(&vport->work_port_lock, iflags);
   5086		vport->work_port_events &= ~WORKER_DISC_TMO;
   5087		spin_unlock_irqrestore(&vport->work_port_lock, iflags);
   5088	}
   5089
   5090	/* Cancel Discovery Timer state <hba_state> */
   5091	lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
   5092			 "0248 Cancel Discovery Timer state x%x "
   5093			 "Data: x%x x%x x%x\n",
   5094			 vport->port_state, vport->fc_flag,
   5095			 vport->fc_plogi_cnt, vport->fc_adisc_cnt);
   5096	return 0;
   5097}
   5098
   5099/*
   5100 * Check specified ring for outstanding IOCB on the SLI queue
   5101 * Return true if iocb matches the specified nport
   5102 */
   5103int
   5104lpfc_check_sli_ndlp(struct lpfc_hba *phba,
   5105		    struct lpfc_sli_ring *pring,
   5106		    struct lpfc_iocbq *iocb,
   5107		    struct lpfc_nodelist *ndlp)
   5108{
   5109	struct lpfc_vport *vport = ndlp->vport;
   5110	u8 ulp_command;
   5111	u16 ulp_context;
   5112	u32 remote_id;
   5113
   5114	if (iocb->vport != vport)
   5115		return 0;
   5116
   5117	ulp_command = get_job_cmnd(phba, iocb);
   5118	ulp_context = get_job_ulpcontext(phba, iocb);
   5119	remote_id = get_job_els_rsp64_did(phba, iocb);
   5120
   5121	if (pring->ringno == LPFC_ELS_RING) {
   5122		switch (ulp_command) {
   5123		case CMD_GEN_REQUEST64_CR:
   5124			if (iocb->ndlp == ndlp)
   5125				return 1;
   5126			fallthrough;
   5127		case CMD_ELS_REQUEST64_CR:
   5128			if (remote_id == ndlp->nlp_DID)
   5129				return 1;
   5130			fallthrough;
   5131		case CMD_XMIT_ELS_RSP64_CX:
   5132			if (iocb->ndlp == ndlp)
   5133				return 1;
   5134		}
   5135	} else if (pring->ringno == LPFC_FCP_RING) {
   5136		/* Skip match check if waiting to relogin to FCP target */
   5137		if ((ndlp->nlp_type & NLP_FCP_TARGET) &&
   5138		    (ndlp->nlp_flag & NLP_DELAY_TMO)) {
   5139			return 0;
   5140		}
   5141		if (ulp_context == ndlp->nlp_rpi)
   5142			return 1;
   5143	}
   5144	return 0;
   5145}
   5146
   5147static void
   5148__lpfc_dequeue_nport_iocbs(struct lpfc_hba *phba,
   5149		struct lpfc_nodelist *ndlp, struct lpfc_sli_ring *pring,
   5150		struct list_head *dequeue_list)
   5151{
   5152	struct lpfc_iocbq *iocb, *next_iocb;
   5153
   5154	list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) {
   5155		/* Check to see if iocb matches the nport */
   5156		if (lpfc_check_sli_ndlp(phba, pring, iocb, ndlp))
   5157			/* match, dequeue */
   5158			list_move_tail(&iocb->list, dequeue_list);
   5159	}
   5160}
   5161
   5162static void
   5163lpfc_sli3_dequeue_nport_iocbs(struct lpfc_hba *phba,
   5164		struct lpfc_nodelist *ndlp, struct list_head *dequeue_list)
   5165{
   5166	struct lpfc_sli *psli = &phba->sli;
   5167	uint32_t i;
   5168
   5169	spin_lock_irq(&phba->hbalock);
   5170	for (i = 0; i < psli->num_rings; i++)
   5171		__lpfc_dequeue_nport_iocbs(phba, ndlp, &psli->sli3_ring[i],
   5172						dequeue_list);
   5173	spin_unlock_irq(&phba->hbalock);
   5174}
   5175
   5176static void
   5177lpfc_sli4_dequeue_nport_iocbs(struct lpfc_hba *phba,
   5178		struct lpfc_nodelist *ndlp, struct list_head *dequeue_list)
   5179{
   5180	struct lpfc_sli_ring *pring;
   5181	struct lpfc_queue *qp = NULL;
   5182
   5183	spin_lock_irq(&phba->hbalock);
   5184	list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
   5185		pring = qp->pring;
   5186		if (!pring)
   5187			continue;
   5188		spin_lock(&pring->ring_lock);
   5189		__lpfc_dequeue_nport_iocbs(phba, ndlp, pring, dequeue_list);
   5190		spin_unlock(&pring->ring_lock);
   5191	}
   5192	spin_unlock_irq(&phba->hbalock);
   5193}
   5194
   5195/*
   5196 * Free resources / clean up outstanding I/Os
   5197 * associated with nlp_rpi in the LPFC_NODELIST entry.
   5198 */
   5199static int
   5200lpfc_no_rpi(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
   5201{
   5202	LIST_HEAD(completions);
   5203
   5204	lpfc_fabric_abort_nport(ndlp);
   5205
   5206	/*
   5207	 * Everything that matches on txcmplq will be returned
   5208	 * by firmware with a no rpi error.
   5209	 */
   5210	if (ndlp->nlp_flag & NLP_RPI_REGISTERED) {
   5211		if (phba->sli_rev != LPFC_SLI_REV4)
   5212			lpfc_sli3_dequeue_nport_iocbs(phba, ndlp, &completions);
   5213		else
   5214			lpfc_sli4_dequeue_nport_iocbs(phba, ndlp, &completions);
   5215	}
   5216
   5217	/* Cancel all the IOCBs from the completions list */
   5218	lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
   5219			      IOERR_SLI_ABORTED);
   5220
   5221	return 0;
   5222}
   5223
   5224/**
   5225 * lpfc_nlp_logo_unreg - Unreg mailbox completion handler before LOGO
   5226 * @phba: Pointer to HBA context object.
   5227 * @pmb: Pointer to mailbox object.
   5228 *
   5229 * This function will issue an ELS LOGO command after completing
   5230 * the UNREG_RPI.
   5231 **/
   5232static void
   5233lpfc_nlp_logo_unreg(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
   5234{
   5235	struct lpfc_vport  *vport = pmb->vport;
   5236	struct lpfc_nodelist *ndlp;
   5237
   5238	ndlp = (struct lpfc_nodelist *)(pmb->ctx_ndlp);
   5239	if (!ndlp)
   5240		return;
   5241	lpfc_issue_els_logo(vport, ndlp, 0);
   5242
   5243	/* Check to see if there are any deferred events to process */
   5244	if ((ndlp->nlp_flag & NLP_UNREG_INP) &&
   5245	    (ndlp->nlp_defer_did != NLP_EVT_NOTHING_PENDING)) {
   5246		lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
   5247				 "1434 UNREG cmpl deferred logo x%x "
   5248				 "on NPort x%x Data: x%x x%px\n",
   5249				 ndlp->nlp_rpi, ndlp->nlp_DID,
   5250				 ndlp->nlp_defer_did, ndlp);
   5251
   5252		ndlp->nlp_flag &= ~NLP_UNREG_INP;
   5253		ndlp->nlp_defer_did = NLP_EVT_NOTHING_PENDING;
   5254		lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
   5255	} else {
   5256		/* NLP_RELEASE_RPI is only set for SLI4 ports. */
   5257		if (ndlp->nlp_flag & NLP_RELEASE_RPI) {
   5258			lpfc_sli4_free_rpi(vport->phba, ndlp->nlp_rpi);
   5259			spin_lock_irq(&ndlp->lock);
   5260			ndlp->nlp_flag &= ~NLP_RELEASE_RPI;
   5261			ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR;
   5262			spin_unlock_irq(&ndlp->lock);
   5263		}
   5264		spin_lock_irq(&ndlp->lock);
   5265		ndlp->nlp_flag &= ~NLP_UNREG_INP;
   5266		spin_unlock_irq(&ndlp->lock);
   5267	}
   5268
   5269	/* The node has an outstanding reference for the unreg. Now
   5270	 * that the LOGO action and cleanup are finished, release
   5271	 * resources.
   5272	 */
   5273	lpfc_nlp_put(ndlp);
   5274	mempool_free(pmb, phba->mbox_mem_pool);
   5275}
   5276
   5277/*
   5278 * Sets the mailbox completion handler to be used for the
   5279 * unreg_rpi command. The handler varies based on the state of
   5280 * the port and what will be happening to the rpi next.
   5281 */
   5282static void
   5283lpfc_set_unreg_login_mbx_cmpl(struct lpfc_hba *phba, struct lpfc_vport *vport,
   5284	struct lpfc_nodelist *ndlp, LPFC_MBOXQ_t *mbox)
   5285{
   5286	unsigned long iflags;
   5287
   5288	/* Driver always gets a reference on the mailbox job
   5289	 * in support of async jobs.
   5290	 */
   5291	mbox->ctx_ndlp = lpfc_nlp_get(ndlp);
   5292	if (!mbox->ctx_ndlp)
   5293		return;
   5294
   5295	if (ndlp->nlp_flag & NLP_ISSUE_LOGO) {
   5296		mbox->mbox_cmpl = lpfc_nlp_logo_unreg;
   5297
   5298	} else if (phba->sli_rev == LPFC_SLI_REV4 &&
   5299		   (!(vport->load_flag & FC_UNLOADING)) &&
   5300		    (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >=
   5301				      LPFC_SLI_INTF_IF_TYPE_2) &&
   5302		    (kref_read(&ndlp->kref) > 0)) {
   5303		mbox->mbox_cmpl = lpfc_sli4_unreg_rpi_cmpl_clr;
   5304	} else {
   5305		if (vport->load_flag & FC_UNLOADING) {
   5306			if (phba->sli_rev == LPFC_SLI_REV4) {
   5307				spin_lock_irqsave(&ndlp->lock, iflags);
   5308				ndlp->nlp_flag |= NLP_RELEASE_RPI;
   5309				spin_unlock_irqrestore(&ndlp->lock, iflags);
   5310			}
   5311		}
   5312		mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
   5313	}
   5314}
   5315
   5316/*
   5317 * Free rpi associated with LPFC_NODELIST entry.
   5318 * This routine is called from lpfc_freenode(), when we are removing
   5319 * a LPFC_NODELIST entry. It is also called if the driver initiates a
   5320 * LOGO that completes successfully, and we are waiting to PLOGI back
   5321 * to the remote NPort. In addition, it is called after we receive
   5322 * and unsolicated ELS cmd, send back a rsp, the rsp completes and
   5323 * we are waiting to PLOGI back to the remote NPort.
   5324 */
   5325int
   5326lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
   5327{
   5328	struct lpfc_hba *phba = vport->phba;
   5329	LPFC_MBOXQ_t    *mbox;
   5330	int rc, acc_plogi = 1;
   5331	uint16_t rpi;
   5332
   5333	if (ndlp->nlp_flag & NLP_RPI_REGISTERED ||
   5334	    ndlp->nlp_flag & NLP_REG_LOGIN_SEND) {
   5335		if (ndlp->nlp_flag & NLP_REG_LOGIN_SEND)
   5336			lpfc_printf_vlog(vport, KERN_INFO,
   5337					 LOG_NODE | LOG_DISCOVERY,
   5338					 "3366 RPI x%x needs to be "
   5339					 "unregistered nlp_flag x%x "
   5340					 "did x%x\n",
   5341					 ndlp->nlp_rpi, ndlp->nlp_flag,
   5342					 ndlp->nlp_DID);
   5343
   5344		/* If there is already an UNREG in progress for this ndlp,
   5345		 * no need to queue up another one.
   5346		 */
   5347		if (ndlp->nlp_flag & NLP_UNREG_INP) {
   5348			lpfc_printf_vlog(vport, KERN_INFO,
   5349					 LOG_NODE | LOG_DISCOVERY,
   5350					 "1436 unreg_rpi SKIP UNREG x%x on "
   5351					 "NPort x%x deferred x%x  flg x%x "
   5352					 "Data: x%px\n",
   5353					 ndlp->nlp_rpi, ndlp->nlp_DID,
   5354					 ndlp->nlp_defer_did,
   5355					 ndlp->nlp_flag, ndlp);
   5356			goto out;
   5357		}
   5358
   5359		mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
   5360		if (mbox) {
   5361			/* SLI4 ports require the physical rpi value. */
   5362			rpi = ndlp->nlp_rpi;
   5363			if (phba->sli_rev == LPFC_SLI_REV4)
   5364				rpi = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
   5365
   5366			lpfc_unreg_login(phba, vport->vpi, rpi, mbox);
   5367			mbox->vport = vport;
   5368			lpfc_set_unreg_login_mbx_cmpl(phba, vport, ndlp, mbox);
   5369			if (!mbox->ctx_ndlp) {
   5370				mempool_free(mbox, phba->mbox_mem_pool);
   5371				return 1;
   5372			}
   5373
   5374			if (mbox->mbox_cmpl == lpfc_sli4_unreg_rpi_cmpl_clr)
   5375				/*
   5376				 * accept PLOGIs after unreg_rpi_cmpl
   5377				 */
   5378				acc_plogi = 0;
   5379			if (((ndlp->nlp_DID & Fabric_DID_MASK) !=
   5380			    Fabric_DID_MASK) &&
   5381			    (!(vport->fc_flag & FC_OFFLINE_MODE)))
   5382				ndlp->nlp_flag |= NLP_UNREG_INP;
   5383
   5384			lpfc_printf_vlog(vport, KERN_INFO,
   5385					 LOG_NODE | LOG_DISCOVERY,
   5386					 "1433 unreg_rpi UNREG x%x on "
   5387					 "NPort x%x deferred flg x%x "
   5388					 "Data:x%px\n",
   5389					 ndlp->nlp_rpi, ndlp->nlp_DID,
   5390					 ndlp->nlp_flag, ndlp);
   5391
   5392			rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
   5393			if (rc == MBX_NOT_FINISHED) {
   5394				ndlp->nlp_flag &= ~NLP_UNREG_INP;
   5395				mempool_free(mbox, phba->mbox_mem_pool);
   5396				acc_plogi = 1;
   5397				lpfc_nlp_put(ndlp);
   5398			}
   5399		} else {
   5400			lpfc_printf_vlog(vport, KERN_INFO,
   5401					 LOG_NODE | LOG_DISCOVERY,
   5402					 "1444 Failed to allocate mempool "
   5403					 "unreg_rpi UNREG x%x, "
   5404					 "DID x%x, flag x%x, "
   5405					 "ndlp x%px\n",
   5406					 ndlp->nlp_rpi, ndlp->nlp_DID,
   5407					 ndlp->nlp_flag, ndlp);
   5408
   5409			/* Because mempool_alloc failed, we
   5410			 * will issue a LOGO here and keep the rpi alive if
   5411			 * not unloading.
   5412			 */
   5413			if (!(vport->load_flag & FC_UNLOADING)) {
   5414				ndlp->nlp_flag &= ~NLP_UNREG_INP;
   5415				lpfc_issue_els_logo(vport, ndlp, 0);
   5416				ndlp->nlp_prev_state = ndlp->nlp_state;
   5417				lpfc_nlp_set_state(vport, ndlp,
   5418						   NLP_STE_NPR_NODE);
   5419			}
   5420
   5421			return 1;
   5422		}
   5423		lpfc_no_rpi(phba, ndlp);
   5424out:
   5425		if (phba->sli_rev != LPFC_SLI_REV4)
   5426			ndlp->nlp_rpi = 0;
   5427		ndlp->nlp_flag &= ~NLP_RPI_REGISTERED;
   5428		ndlp->nlp_flag &= ~NLP_NPR_ADISC;
   5429		if (acc_plogi)
   5430			ndlp->nlp_flag &= ~NLP_LOGO_ACC;
   5431		return 1;
   5432	}
   5433	ndlp->nlp_flag &= ~NLP_LOGO_ACC;
   5434	return 0;
   5435}
   5436
   5437/**
   5438 * lpfc_unreg_hba_rpis - Unregister rpis registered to the hba.
   5439 * @phba: pointer to lpfc hba data structure.
   5440 *
   5441 * This routine is invoked to unregister all the currently registered RPIs
   5442 * to the HBA.
   5443 **/
   5444void
   5445lpfc_unreg_hba_rpis(struct lpfc_hba *phba)
   5446{
   5447	struct lpfc_vport **vports;
   5448	struct lpfc_nodelist *ndlp;
   5449	struct Scsi_Host *shost;
   5450	int i;
   5451
   5452	vports = lpfc_create_vport_work_array(phba);
   5453	if (!vports) {
   5454		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
   5455				"2884 Vport array allocation failed \n");
   5456		return;
   5457	}
   5458	for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
   5459		shost = lpfc_shost_from_vport(vports[i]);
   5460		spin_lock_irq(shost->host_lock);
   5461		list_for_each_entry(ndlp, &vports[i]->fc_nodes, nlp_listp) {
   5462			if (ndlp->nlp_flag & NLP_RPI_REGISTERED) {
   5463				/* The mempool_alloc might sleep */
   5464				spin_unlock_irq(shost->host_lock);
   5465				lpfc_unreg_rpi(vports[i], ndlp);
   5466				spin_lock_irq(shost->host_lock);
   5467			}
   5468		}
   5469		spin_unlock_irq(shost->host_lock);
   5470	}
   5471	lpfc_destroy_vport_work_array(phba, vports);
   5472}
   5473
   5474void
   5475lpfc_unreg_all_rpis(struct lpfc_vport *vport)
   5476{
   5477	struct lpfc_hba  *phba  = vport->phba;
   5478	LPFC_MBOXQ_t     *mbox;
   5479	int rc;
   5480
   5481	if (phba->sli_rev == LPFC_SLI_REV4) {
   5482		lpfc_sli4_unreg_all_rpis(vport);
   5483		return;
   5484	}
   5485
   5486	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
   5487	if (mbox) {
   5488		lpfc_unreg_login(phba, vport->vpi, LPFC_UNREG_ALL_RPIS_VPORT,
   5489				 mbox);
   5490		mbox->vport = vport;
   5491		mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
   5492		mbox->ctx_ndlp = NULL;
   5493		rc = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO);
   5494		if (rc != MBX_TIMEOUT)
   5495			mempool_free(mbox, phba->mbox_mem_pool);
   5496
   5497		if ((rc == MBX_TIMEOUT) || (rc == MBX_NOT_FINISHED))
   5498			lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
   5499					 "1836 Could not issue "
   5500					 "unreg_login(all_rpis) status %d\n",
   5501					 rc);
   5502	}
   5503}
   5504
   5505void
   5506lpfc_unreg_default_rpis(struct lpfc_vport *vport)
   5507{
   5508	struct lpfc_hba  *phba  = vport->phba;
   5509	LPFC_MBOXQ_t     *mbox;
   5510	int rc;
   5511
   5512	/* Unreg DID is an SLI3 operation. */
   5513	if (phba->sli_rev > LPFC_SLI_REV3)
   5514		return;
   5515
   5516	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
   5517	if (mbox) {
   5518		lpfc_unreg_did(phba, vport->vpi, LPFC_UNREG_ALL_DFLT_RPIS,
   5519			       mbox);
   5520		mbox->vport = vport;
   5521		mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
   5522		mbox->ctx_ndlp = NULL;
   5523		rc = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO);
   5524		if (rc != MBX_TIMEOUT)
   5525			mempool_free(mbox, phba->mbox_mem_pool);
   5526
   5527		if ((rc == MBX_TIMEOUT) || (rc == MBX_NOT_FINISHED))
   5528			lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
   5529					 "1815 Could not issue "
   5530					 "unreg_did (default rpis) status %d\n",
   5531					 rc);
   5532	}
   5533}
   5534
   5535/*
   5536 * Free resources associated with LPFC_NODELIST entry
   5537 * so it can be freed.
   5538 */
   5539static int
   5540lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
   5541{
   5542	struct lpfc_hba  *phba = vport->phba;
   5543	LPFC_MBOXQ_t *mb, *nextmb;
   5544
   5545	/* Cleanup node for NPort <nlp_DID> */
   5546	lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
   5547			 "0900 Cleanup node for NPort x%x "
   5548			 "Data: x%x x%x x%x\n",
   5549			 ndlp->nlp_DID, ndlp->nlp_flag,
   5550			 ndlp->nlp_state, ndlp->nlp_rpi);
   5551	lpfc_dequeue_node(vport, ndlp);
   5552
   5553	/* Don't need to clean up REG_LOGIN64 cmds for Default RPI cleanup */
   5554
   5555	/* cleanup any ndlp on mbox q waiting for reglogin cmpl */
   5556	if ((mb = phba->sli.mbox_active)) {
   5557		if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) &&
   5558		   !(mb->mbox_flag & LPFC_MBX_IMED_UNREG) &&
   5559		   (ndlp == (struct lpfc_nodelist *)mb->ctx_ndlp)) {
   5560			mb->ctx_ndlp = NULL;
   5561			mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
   5562		}
   5563	}
   5564
   5565	spin_lock_irq(&phba->hbalock);
   5566	/* Cleanup REG_LOGIN completions which are not yet processed */
   5567	list_for_each_entry(mb, &phba->sli.mboxq_cmpl, list) {
   5568		if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) ||
   5569			(mb->mbox_flag & LPFC_MBX_IMED_UNREG) ||
   5570			(ndlp != (struct lpfc_nodelist *)mb->ctx_ndlp))
   5571			continue;
   5572
   5573		mb->ctx_ndlp = NULL;
   5574		mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
   5575	}
   5576
   5577	list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
   5578		if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) &&
   5579		   !(mb->mbox_flag & LPFC_MBX_IMED_UNREG) &&
   5580		    (ndlp == (struct lpfc_nodelist *)mb->ctx_ndlp)) {
   5581			list_del(&mb->list);
   5582			lpfc_mbox_rsrc_cleanup(phba, mb, MBOX_THD_LOCKED);
   5583
   5584			/* Don't invoke lpfc_nlp_put. The driver is in
   5585			 * lpfc_nlp_release context.
   5586			 */
   5587		}
   5588	}
   5589	spin_unlock_irq(&phba->hbalock);
   5590
   5591	lpfc_els_abort(phba, ndlp);
   5592
   5593	spin_lock_irq(&ndlp->lock);
   5594	ndlp->nlp_flag &= ~NLP_DELAY_TMO;
   5595	spin_unlock_irq(&ndlp->lock);
   5596
   5597	ndlp->nlp_last_elscmd = 0;
   5598	del_timer_sync(&ndlp->nlp_delayfunc);
   5599
   5600	list_del_init(&ndlp->els_retry_evt.evt_listp);
   5601	list_del_init(&ndlp->dev_loss_evt.evt_listp);
   5602	list_del_init(&ndlp->recovery_evt.evt_listp);
   5603	lpfc_cleanup_vports_rrqs(vport, ndlp);
   5604
   5605	if (phba->sli_rev == LPFC_SLI_REV4)
   5606		ndlp->nlp_flag |= NLP_RELEASE_RPI;
   5607
   5608	return 0;
   5609}
   5610
   5611static int
   5612lpfc_matchdid(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
   5613	      uint32_t did)
   5614{
   5615	D_ID mydid, ndlpdid, matchdid;
   5616
   5617	if (did == Bcast_DID)
   5618		return 0;
   5619
   5620	/* First check for Direct match */
   5621	if (ndlp->nlp_DID == did)
   5622		return 1;
   5623
   5624	/* Next check for area/domain identically equals 0 match */
   5625	mydid.un.word = vport->fc_myDID;
   5626	if ((mydid.un.b.domain == 0) && (mydid.un.b.area == 0)) {
   5627		return 0;
   5628	}
   5629
   5630	matchdid.un.word = did;
   5631	ndlpdid.un.word = ndlp->nlp_DID;
   5632	if (matchdid.un.b.id == ndlpdid.un.b.id) {
   5633		if ((mydid.un.b.domain == matchdid.un.b.domain) &&
   5634		    (mydid.un.b.area == matchdid.un.b.area)) {
   5635			/* This code is supposed to match the ID
   5636			 * for a private loop device that is
   5637			 * connect to fl_port. But we need to
   5638			 * check that the port did not just go
   5639			 * from pt2pt to fabric or we could end
   5640			 * up matching ndlp->nlp_DID 000001 to
   5641			 * fabric DID 0x20101
   5642			 */
   5643			if ((ndlpdid.un.b.domain == 0) &&
   5644			    (ndlpdid.un.b.area == 0)) {
   5645				if (ndlpdid.un.b.id &&
   5646				    vport->phba->fc_topology ==
   5647				    LPFC_TOPOLOGY_LOOP)
   5648					return 1;
   5649			}
   5650			return 0;
   5651		}
   5652
   5653		matchdid.un.word = ndlp->nlp_DID;
   5654		if ((mydid.un.b.domain == ndlpdid.un.b.domain) &&
   5655		    (mydid.un.b.area == ndlpdid.un.b.area)) {
   5656			if ((matchdid.un.b.domain == 0) &&
   5657			    (matchdid.un.b.area == 0)) {
   5658				if (matchdid.un.b.id)
   5659					return 1;
   5660			}
   5661		}
   5662	}
   5663	return 0;
   5664}
   5665
   5666/* Search for a nodelist entry */
   5667static struct lpfc_nodelist *
   5668__lpfc_findnode_did(struct lpfc_vport *vport, uint32_t did)
   5669{
   5670	struct lpfc_nodelist *ndlp;
   5671	uint32_t data1;
   5672
   5673	list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
   5674		if (lpfc_matchdid(vport, ndlp, did)) {
   5675			data1 = (((uint32_t)ndlp->nlp_state << 24) |
   5676				 ((uint32_t)ndlp->nlp_xri << 16) |
   5677				 ((uint32_t)ndlp->nlp_type << 8)
   5678				 );
   5679			lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
   5680					 "0929 FIND node DID "
   5681					 "Data: x%px x%x x%x x%x x%x x%px\n",
   5682					 ndlp, ndlp->nlp_DID,
   5683					 ndlp->nlp_flag, data1, ndlp->nlp_rpi,
   5684					 ndlp->active_rrqs_xri_bitmap);
   5685			return ndlp;
   5686		}
   5687	}
   5688
   5689	/* FIND node did <did> NOT FOUND */
   5690	lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
   5691			 "0932 FIND node did x%x NOT FOUND.\n", did);
   5692	return NULL;
   5693}
   5694
   5695struct lpfc_nodelist *
   5696lpfc_findnode_did(struct lpfc_vport *vport, uint32_t did)
   5697{
   5698	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
   5699	struct lpfc_nodelist *ndlp;
   5700	unsigned long iflags;
   5701
   5702	spin_lock_irqsave(shost->host_lock, iflags);
   5703	ndlp = __lpfc_findnode_did(vport, did);
   5704	spin_unlock_irqrestore(shost->host_lock, iflags);
   5705	return ndlp;
   5706}
   5707
   5708struct lpfc_nodelist *
   5709lpfc_findnode_mapped(struct lpfc_vport *vport)
   5710{
   5711	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
   5712	struct lpfc_nodelist *ndlp;
   5713	uint32_t data1;
   5714	unsigned long iflags;
   5715
   5716	spin_lock_irqsave(shost->host_lock, iflags);
   5717
   5718	list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
   5719		if (ndlp->nlp_state == NLP_STE_UNMAPPED_NODE ||
   5720		    ndlp->nlp_state == NLP_STE_MAPPED_NODE) {
   5721			data1 = (((uint32_t)ndlp->nlp_state << 24) |
   5722				 ((uint32_t)ndlp->nlp_xri << 16) |
   5723				 ((uint32_t)ndlp->nlp_type << 8) |
   5724				 ((uint32_t)ndlp->nlp_rpi & 0xff));
   5725			spin_unlock_irqrestore(shost->host_lock, iflags);
   5726			lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
   5727					 "2025 FIND node DID "
   5728					 "Data: x%px x%x x%x x%x x%px\n",
   5729					 ndlp, ndlp->nlp_DID,
   5730					 ndlp->nlp_flag, data1,
   5731					 ndlp->active_rrqs_xri_bitmap);
   5732			return ndlp;
   5733		}
   5734	}
   5735	spin_unlock_irqrestore(shost->host_lock, iflags);
   5736
   5737	/* FIND node did <did> NOT FOUND */
   5738	lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
   5739			 "2026 FIND mapped did NOT FOUND.\n");
   5740	return NULL;
   5741}
   5742
   5743struct lpfc_nodelist *
   5744lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did)
   5745{
   5746	struct lpfc_nodelist *ndlp;
   5747
   5748	ndlp = lpfc_findnode_did(vport, did);
   5749	if (!ndlp) {
   5750		if (vport->phba->nvmet_support)
   5751			return NULL;
   5752		if ((vport->fc_flag & FC_RSCN_MODE) != 0 &&
   5753		    lpfc_rscn_payload_check(vport, did) == 0)
   5754			return NULL;
   5755		ndlp = lpfc_nlp_init(vport, did);
   5756		if (!ndlp)
   5757			return NULL;
   5758		lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
   5759
   5760		lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
   5761				 "6453 Setup New Node 2B_DISC x%x "
   5762				 "Data:x%x x%x x%x\n",
   5763				 ndlp->nlp_DID, ndlp->nlp_flag,
   5764				 ndlp->nlp_state, vport->fc_flag);
   5765
   5766		spin_lock_irq(&ndlp->lock);
   5767		ndlp->nlp_flag |= NLP_NPR_2B_DISC;
   5768		spin_unlock_irq(&ndlp->lock);
   5769		return ndlp;
   5770	}
   5771
   5772	/* The NVME Target does not want to actively manage an rport.
   5773	 * The goal is to allow the target to reset its state and clear
   5774	 * pending IO in preparation for the initiator to recover.
   5775	 */
   5776	if ((vport->fc_flag & FC_RSCN_MODE) &&
   5777	    !(vport->fc_flag & FC_NDISC_ACTIVE)) {
   5778		if (lpfc_rscn_payload_check(vport, did)) {
   5779
   5780			/* Since this node is marked for discovery,
   5781			 * delay timeout is not needed.
   5782			 */
   5783			lpfc_cancel_retry_delay_tmo(vport, ndlp);
   5784
   5785			lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
   5786					 "6455 Setup RSCN Node 2B_DISC x%x "
   5787					 "Data:x%x x%x x%x\n",
   5788					 ndlp->nlp_DID, ndlp->nlp_flag,
   5789					 ndlp->nlp_state, vport->fc_flag);
   5790
   5791			/* NVME Target mode waits until rport is known to be
   5792			 * impacted by the RSCN before it transitions.  No
   5793			 * active management - just go to NPR provided the
   5794			 * node had a valid login.
   5795			 */
   5796			if (vport->phba->nvmet_support)
   5797				return ndlp;
   5798
   5799			/* If we've already received a PLOGI from this NPort
   5800			 * we don't need to try to discover it again.
   5801			 */
   5802			if (ndlp->nlp_flag & NLP_RCV_PLOGI &&
   5803			    !(ndlp->nlp_type &
   5804			     (NLP_FCP_TARGET | NLP_NVME_TARGET)))
   5805				return NULL;
   5806
   5807			ndlp->nlp_prev_state = ndlp->nlp_state;
   5808			lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
   5809
   5810			spin_lock_irq(&ndlp->lock);
   5811			ndlp->nlp_flag |= NLP_NPR_2B_DISC;
   5812			spin_unlock_irq(&ndlp->lock);
   5813		} else {
   5814			lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
   5815					 "6456 Skip Setup RSCN Node x%x "
   5816					 "Data:x%x x%x x%x\n",
   5817					 ndlp->nlp_DID, ndlp->nlp_flag,
   5818					 ndlp->nlp_state, vport->fc_flag);
   5819			ndlp = NULL;
   5820		}
   5821	} else {
   5822		lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
   5823				 "6457 Setup Active Node 2B_DISC x%x "
   5824				 "Data:x%x x%x x%x\n",
   5825				 ndlp->nlp_DID, ndlp->nlp_flag,
   5826				 ndlp->nlp_state, vport->fc_flag);
   5827
   5828		/* If the initiator received a PLOGI from this NPort or if the
   5829		 * initiator is already in the process of discovery on it,
   5830		 * there's no need to try to discover it again.
   5831		 */
   5832		if (ndlp->nlp_state == NLP_STE_ADISC_ISSUE ||
   5833		    ndlp->nlp_state == NLP_STE_PLOGI_ISSUE ||
   5834		    (!vport->phba->nvmet_support &&
   5835		     ndlp->nlp_flag & NLP_RCV_PLOGI))
   5836			return NULL;
   5837
   5838		if (vport->phba->nvmet_support)
   5839			return ndlp;
   5840
   5841		/* Moving to NPR state clears unsolicited flags and
   5842		 * allows for rediscovery
   5843		 */
   5844		lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
   5845
   5846		spin_lock_irq(&ndlp->lock);
   5847		ndlp->nlp_flag |= NLP_NPR_2B_DISC;
   5848		spin_unlock_irq(&ndlp->lock);
   5849	}
   5850	return ndlp;
   5851}
   5852
   5853/* Build a list of nodes to discover based on the loopmap */
   5854void
   5855lpfc_disc_list_loopmap(struct lpfc_vport *vport)
   5856{
   5857	struct lpfc_hba  *phba = vport->phba;
   5858	int j;
   5859	uint32_t alpa, index;
   5860
   5861	if (!lpfc_is_link_up(phba))
   5862		return;
   5863
   5864	if (phba->fc_topology != LPFC_TOPOLOGY_LOOP)
   5865		return;
   5866
   5867	/* Check for loop map present or not */
   5868	if (phba->alpa_map[0]) {
   5869		for (j = 1; j <= phba->alpa_map[0]; j++) {
   5870			alpa = phba->alpa_map[j];
   5871			if (((vport->fc_myDID & 0xff) == alpa) || (alpa == 0))
   5872				continue;
   5873			lpfc_setup_disc_node(vport, alpa);
   5874		}
   5875	} else {
   5876		/* No alpamap, so try all alpa's */
   5877		for (j = 0; j < FC_MAXLOOP; j++) {
   5878			/* If cfg_scan_down is set, start from highest
   5879			 * ALPA (0xef) to lowest (0x1).
   5880			 */
   5881			if (vport->cfg_scan_down)
   5882				index = j;
   5883			else
   5884				index = FC_MAXLOOP - j - 1;
   5885			alpa = lpfcAlpaArray[index];
   5886			if ((vport->fc_myDID & 0xff) == alpa)
   5887				continue;
   5888			lpfc_setup_disc_node(vport, alpa);
   5889		}
   5890	}
   5891	return;
   5892}
   5893
   5894/* SLI3 only */
   5895void
   5896lpfc_issue_clear_la(struct lpfc_hba *phba, struct lpfc_vport *vport)
   5897{
   5898	LPFC_MBOXQ_t *mbox;
   5899	struct lpfc_sli *psli = &phba->sli;
   5900	struct lpfc_sli_ring *extra_ring = &psli->sli3_ring[LPFC_EXTRA_RING];
   5901	struct lpfc_sli_ring *fcp_ring   = &psli->sli3_ring[LPFC_FCP_RING];
   5902	int  rc;
   5903
   5904	/*
   5905	 * if it's not a physical port or if we already send
   5906	 * clear_la then don't send it.
   5907	 */
   5908	if ((phba->link_state >= LPFC_CLEAR_LA) ||
   5909	    (vport->port_type != LPFC_PHYSICAL_PORT) ||
   5910		(phba->sli_rev == LPFC_SLI_REV4))
   5911		return;
   5912
   5913			/* Link up discovery */
   5914	if ((mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL)) != NULL) {
   5915		phba->link_state = LPFC_CLEAR_LA;
   5916		lpfc_clear_la(phba, mbox);
   5917		mbox->mbox_cmpl = lpfc_mbx_cmpl_clear_la;
   5918		mbox->vport = vport;
   5919		rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
   5920		if (rc == MBX_NOT_FINISHED) {
   5921			mempool_free(mbox, phba->mbox_mem_pool);
   5922			lpfc_disc_flush_list(vport);
   5923			extra_ring->flag &= ~LPFC_STOP_IOCB_EVENT;
   5924			fcp_ring->flag &= ~LPFC_STOP_IOCB_EVENT;
   5925			phba->link_state = LPFC_HBA_ERROR;
   5926		}
   5927	}
   5928}
   5929
   5930/* Reg_vpi to tell firmware to resume normal operations */
   5931void
   5932lpfc_issue_reg_vpi(struct lpfc_hba *phba, struct lpfc_vport *vport)
   5933{
   5934	LPFC_MBOXQ_t *regvpimbox;
   5935
   5936	regvpimbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
   5937	if (regvpimbox) {
   5938		lpfc_reg_vpi(vport, regvpimbox);
   5939		regvpimbox->mbox_cmpl = lpfc_mbx_cmpl_reg_vpi;
   5940		regvpimbox->vport = vport;
   5941		if (lpfc_sli_issue_mbox(phba, regvpimbox, MBX_NOWAIT)
   5942					== MBX_NOT_FINISHED) {
   5943			mempool_free(regvpimbox, phba->mbox_mem_pool);
   5944		}
   5945	}
   5946}
   5947
   5948/* Start Link up / RSCN discovery on NPR nodes */
   5949void
   5950lpfc_disc_start(struct lpfc_vport *vport)
   5951{
   5952	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
   5953	struct lpfc_hba  *phba = vport->phba;
   5954	uint32_t num_sent;
   5955	uint32_t clear_la_pending;
   5956
   5957	if (!lpfc_is_link_up(phba)) {
   5958		lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
   5959				 "3315 Link is not up %x\n",
   5960				 phba->link_state);
   5961		return;
   5962	}
   5963
   5964	if (phba->link_state == LPFC_CLEAR_LA)
   5965		clear_la_pending = 1;
   5966	else
   5967		clear_la_pending = 0;
   5968
   5969	if (vport->port_state < LPFC_VPORT_READY)
   5970		vport->port_state = LPFC_DISC_AUTH;
   5971
   5972	lpfc_set_disctmo(vport);
   5973
   5974	vport->fc_prevDID = vport->fc_myDID;
   5975	vport->num_disc_nodes = 0;
   5976
   5977	/* Start Discovery state <hba_state> */
   5978	lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
   5979			 "0202 Start Discovery port state x%x "
   5980			 "flg x%x Data: x%x x%x x%x\n",
   5981			 vport->port_state, vport->fc_flag, vport->fc_plogi_cnt,
   5982			 vport->fc_adisc_cnt, vport->fc_npr_cnt);
   5983
   5984	/* First do ADISCs - if any */
   5985	num_sent = lpfc_els_disc_adisc(vport);
   5986
   5987	if (num_sent)
   5988		return;
   5989
   5990	/* Register the VPI for SLI3, NPIV only. */
   5991	if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
   5992	    !(vport->fc_flag & FC_PT2PT) &&
   5993	    !(vport->fc_flag & FC_RSCN_MODE) &&
   5994	    (phba->sli_rev < LPFC_SLI_REV4)) {
   5995		lpfc_issue_clear_la(phba, vport);
   5996		lpfc_issue_reg_vpi(phba, vport);
   5997		return;
   5998	}
   5999
   6000	/*
   6001	 * For SLI2, we need to set port_state to READY and continue
   6002	 * discovery.
   6003	 */
   6004	if (vport->port_state < LPFC_VPORT_READY && !clear_la_pending) {
   6005		/* If we get here, there is nothing to ADISC */
   6006		lpfc_issue_clear_la(phba, vport);
   6007
   6008		if (!(vport->fc_flag & FC_ABORT_DISCOVERY)) {
   6009			vport->num_disc_nodes = 0;
   6010			/* go thru NPR nodes and issue ELS PLOGIs */
   6011			if (vport->fc_npr_cnt)
   6012				lpfc_els_disc_plogi(vport);
   6013
   6014			if (!vport->num_disc_nodes) {
   6015				spin_lock_irq(shost->host_lock);
   6016				vport->fc_flag &= ~FC_NDISC_ACTIVE;
   6017				spin_unlock_irq(shost->host_lock);
   6018				lpfc_can_disctmo(vport);
   6019			}
   6020		}
   6021		vport->port_state = LPFC_VPORT_READY;
   6022	} else {
   6023		/* Next do PLOGIs - if any */
   6024		num_sent = lpfc_els_disc_plogi(vport);
   6025
   6026		if (num_sent)
   6027			return;
   6028
   6029		if (vport->fc_flag & FC_RSCN_MODE) {
   6030			/* Check to see if more RSCNs came in while we
   6031			 * were processing this one.
   6032			 */
   6033			if ((vport->fc_rscn_id_cnt == 0) &&
   6034			    (!(vport->fc_flag & FC_RSCN_DISCOVERY))) {
   6035				spin_lock_irq(shost->host_lock);
   6036				vport->fc_flag &= ~FC_RSCN_MODE;
   6037				spin_unlock_irq(shost->host_lock);
   6038				lpfc_can_disctmo(vport);
   6039			} else
   6040				lpfc_els_handle_rscn(vport);
   6041		}
   6042	}
   6043	return;
   6044}
   6045
   6046/*
   6047 *  Ignore completion for all IOCBs on tx and txcmpl queue for ELS
   6048 *  ring the match the sppecified nodelist.
   6049 */
   6050static void
   6051lpfc_free_tx(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
   6052{
   6053	LIST_HEAD(completions);
   6054	struct lpfc_iocbq    *iocb, *next_iocb;
   6055	struct lpfc_sli_ring *pring;
   6056	u32 ulp_command;
   6057
   6058	pring = lpfc_phba_elsring(phba);
   6059	if (unlikely(!pring))
   6060		return;
   6061
   6062	/* Error matching iocb on txq or txcmplq
   6063	 * First check the txq.
   6064	 */
   6065	spin_lock_irq(&phba->hbalock);
   6066	list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) {
   6067		if (iocb->ndlp != ndlp)
   6068			continue;
   6069
   6070		ulp_command = get_job_cmnd(phba, iocb);
   6071
   6072		if (ulp_command == CMD_ELS_REQUEST64_CR ||
   6073		    ulp_command == CMD_XMIT_ELS_RSP64_CX) {
   6074
   6075			list_move_tail(&iocb->list, &completions);
   6076		}
   6077	}
   6078
   6079	/* Next check the txcmplq */
   6080	list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) {
   6081		if (iocb->ndlp != ndlp)
   6082			continue;
   6083
   6084		ulp_command = get_job_cmnd(phba, iocb);
   6085
   6086		if (ulp_command == CMD_ELS_REQUEST64_CR ||
   6087		    ulp_command == CMD_XMIT_ELS_RSP64_CX) {
   6088			lpfc_sli_issue_abort_iotag(phba, pring, iocb, NULL);
   6089		}
   6090	}
   6091	spin_unlock_irq(&phba->hbalock);
   6092
   6093	/* Make sure HBA is alive */
   6094	lpfc_issue_hb_tmo(phba);
   6095
   6096	/* Cancel all the IOCBs from the completions list */
   6097	lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
   6098			      IOERR_SLI_ABORTED);
   6099}
   6100
   6101static void
   6102lpfc_disc_flush_list(struct lpfc_vport *vport)
   6103{
   6104	struct lpfc_nodelist *ndlp, *next_ndlp;
   6105	struct lpfc_hba *phba = vport->phba;
   6106
   6107	if (vport->fc_plogi_cnt || vport->fc_adisc_cnt) {
   6108		list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes,
   6109					 nlp_listp) {
   6110			if (ndlp->nlp_state == NLP_STE_PLOGI_ISSUE ||
   6111			    ndlp->nlp_state == NLP_STE_ADISC_ISSUE) {
   6112				lpfc_free_tx(phba, ndlp);
   6113			}
   6114		}
   6115	}
   6116}
   6117
   6118/*
   6119 * lpfc_notify_xport_npr - notifies xport of node disappearance
   6120 * @vport: Pointer to Virtual Port object.
   6121 *
   6122 * Transitions all ndlps to NPR state.  When lpfc_nlp_set_state
   6123 * calls lpfc_nlp_state_cleanup, the ndlp->rport is unregistered
   6124 * and transport notified that the node is gone.
   6125 * Return Code:
   6126 *	none
   6127 */
   6128static void
   6129lpfc_notify_xport_npr(struct lpfc_vport *vport)
   6130{
   6131	struct lpfc_nodelist *ndlp, *next_ndlp;
   6132
   6133	list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes,
   6134				 nlp_listp) {
   6135		lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
   6136	}
   6137}
   6138void
   6139lpfc_cleanup_discovery_resources(struct lpfc_vport *vport)
   6140{
   6141	lpfc_els_flush_rscn(vport);
   6142	lpfc_els_flush_cmd(vport);
   6143	lpfc_disc_flush_list(vport);
   6144	if (pci_channel_offline(vport->phba->pcidev))
   6145		lpfc_notify_xport_npr(vport);
   6146}
   6147
   6148/*****************************************************************************/
   6149/*
   6150 * NAME:     lpfc_disc_timeout
   6151 *
   6152 * FUNCTION: Fibre Channel driver discovery timeout routine.
   6153 *
   6154 * EXECUTION ENVIRONMENT: interrupt only
   6155 *
   6156 * CALLED FROM:
   6157 *      Timer function
   6158 *
   6159 * RETURNS:
   6160 *      none
   6161 */
   6162/*****************************************************************************/
   6163void
   6164lpfc_disc_timeout(struct timer_list *t)
   6165{
   6166	struct lpfc_vport *vport = from_timer(vport, t, fc_disctmo);
   6167	struct lpfc_hba   *phba = vport->phba;
   6168	uint32_t tmo_posted;
   6169	unsigned long flags = 0;
   6170
   6171	if (unlikely(!phba))
   6172		return;
   6173
   6174	spin_lock_irqsave(&vport->work_port_lock, flags);
   6175	tmo_posted = vport->work_port_events & WORKER_DISC_TMO;
   6176	if (!tmo_posted)
   6177		vport->work_port_events |= WORKER_DISC_TMO;
   6178	spin_unlock_irqrestore(&vport->work_port_lock, flags);
   6179
   6180	if (!tmo_posted)
   6181		lpfc_worker_wake_up(phba);
   6182	return;
   6183}
   6184
   6185static void
   6186lpfc_disc_timeout_handler(struct lpfc_vport *vport)
   6187{
   6188	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
   6189	struct lpfc_hba  *phba = vport->phba;
   6190	struct lpfc_sli  *psli = &phba->sli;
   6191	struct lpfc_nodelist *ndlp, *next_ndlp;
   6192	LPFC_MBOXQ_t *initlinkmbox;
   6193	int rc, clrlaerr = 0;
   6194
   6195	if (!(vport->fc_flag & FC_DISC_TMO))
   6196		return;
   6197
   6198	spin_lock_irq(shost->host_lock);
   6199	vport->fc_flag &= ~FC_DISC_TMO;
   6200	spin_unlock_irq(shost->host_lock);
   6201
   6202	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
   6203		"disc timeout:    state:x%x rtry:x%x flg:x%x",
   6204		vport->port_state, vport->fc_ns_retry, vport->fc_flag);
   6205
   6206	switch (vport->port_state) {
   6207
   6208	case LPFC_LOCAL_CFG_LINK:
   6209		/*
   6210		 * port_state is identically  LPFC_LOCAL_CFG_LINK while
   6211		 * waiting for FAN timeout
   6212		 */
   6213		lpfc_printf_vlog(vport, KERN_WARNING, LOG_DISCOVERY,
   6214				 "0221 FAN timeout\n");
   6215
   6216		/* Start discovery by sending FLOGI, clean up old rpis */
   6217		list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes,
   6218					 nlp_listp) {
   6219			if (ndlp->nlp_state != NLP_STE_NPR_NODE)
   6220				continue;
   6221			if (ndlp->nlp_type & NLP_FABRIC) {
   6222				/* Clean up the ndlp on Fabric connections */
   6223				lpfc_drop_node(vport, ndlp);
   6224
   6225			} else if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) {
   6226				/* Fail outstanding IO now since device
   6227				 * is marked for PLOGI.
   6228				 */
   6229				lpfc_unreg_rpi(vport, ndlp);
   6230			}
   6231		}
   6232		if (vport->port_state != LPFC_FLOGI) {
   6233			if (phba->sli_rev <= LPFC_SLI_REV3)
   6234				lpfc_initial_flogi(vport);
   6235			else
   6236				lpfc_issue_init_vfi(vport);
   6237			return;
   6238		}
   6239		break;
   6240
   6241	case LPFC_FDISC:
   6242	case LPFC_FLOGI:
   6243	/* port_state is identically LPFC_FLOGI while waiting for FLOGI cmpl */
   6244		/* Initial FLOGI timeout */
   6245		lpfc_printf_vlog(vport, KERN_ERR,
   6246				 LOG_TRACE_EVENT,
   6247				 "0222 Initial %s timeout\n",
   6248				 vport->vpi ? "FDISC" : "FLOGI");
   6249
   6250		/* Assume no Fabric and go on with discovery.
   6251		 * Check for outstanding ELS FLOGI to abort.
   6252		 */
   6253
   6254		/* FLOGI failed, so just use loop map to make discovery list */
   6255		lpfc_disc_list_loopmap(vport);
   6256
   6257		/* Start discovery */
   6258		lpfc_disc_start(vport);
   6259		break;
   6260
   6261	case LPFC_FABRIC_CFG_LINK:
   6262	/* hba_state is identically LPFC_FABRIC_CFG_LINK while waiting for
   6263	   NameServer login */
   6264		lpfc_printf_vlog(vport, KERN_ERR,
   6265				 LOG_TRACE_EVENT,
   6266				 "0223 Timeout while waiting for "
   6267				 "NameServer login\n");
   6268		/* Next look for NameServer ndlp */
   6269		ndlp = lpfc_findnode_did(vport, NameServer_DID);
   6270		if (ndlp)
   6271			lpfc_els_abort(phba, ndlp);
   6272
   6273		/* ReStart discovery */
   6274		goto restart_disc;
   6275
   6276	case LPFC_NS_QRY:
   6277	/* Check for wait for NameServer Rsp timeout */
   6278		lpfc_printf_vlog(vport, KERN_ERR,
   6279				 LOG_TRACE_EVENT,
   6280				 "0224 NameServer Query timeout "
   6281				 "Data: x%x x%x\n",
   6282				 vport->fc_ns_retry, LPFC_MAX_NS_RETRY);
   6283
   6284		if (vport->fc_ns_retry < LPFC_MAX_NS_RETRY) {
   6285			/* Try it one more time */
   6286			vport->fc_ns_retry++;
   6287			vport->gidft_inp = 0;
   6288			rc = lpfc_issue_gidft(vport);
   6289			if (rc == 0)
   6290				break;
   6291		}
   6292		vport->fc_ns_retry = 0;
   6293
   6294restart_disc:
   6295		/*
   6296		 * Discovery is over.
   6297		 * set port_state to PORT_READY if SLI2.
   6298		 * cmpl_reg_vpi will set port_state to READY for SLI3.
   6299		 */
   6300		if (phba->sli_rev < LPFC_SLI_REV4) {
   6301			if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
   6302				lpfc_issue_reg_vpi(phba, vport);
   6303			else  {
   6304				lpfc_issue_clear_la(phba, vport);
   6305				vport->port_state = LPFC_VPORT_READY;
   6306			}
   6307		}
   6308
   6309		/* Setup and issue mailbox INITIALIZE LINK command */
   6310		initlinkmbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
   6311		if (!initlinkmbox) {
   6312			lpfc_printf_vlog(vport, KERN_ERR,
   6313					 LOG_TRACE_EVENT,
   6314					 "0206 Device Discovery "
   6315					 "completion error\n");
   6316			phba->link_state = LPFC_HBA_ERROR;
   6317			break;
   6318		}
   6319
   6320		lpfc_linkdown(phba);
   6321		lpfc_init_link(phba, initlinkmbox, phba->cfg_topology,
   6322			       phba->cfg_link_speed);
   6323		initlinkmbox->u.mb.un.varInitLnk.lipsr_AL_PA = 0;
   6324		initlinkmbox->vport = vport;
   6325		initlinkmbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
   6326		rc = lpfc_sli_issue_mbox(phba, initlinkmbox, MBX_NOWAIT);
   6327		lpfc_set_loopback_flag(phba);
   6328		if (rc == MBX_NOT_FINISHED)
   6329			mempool_free(initlinkmbox, phba->mbox_mem_pool);
   6330
   6331		break;
   6332
   6333	case LPFC_DISC_AUTH:
   6334	/* Node Authentication timeout */
   6335		lpfc_printf_vlog(vport, KERN_ERR,
   6336				 LOG_TRACE_EVENT,
   6337				 "0227 Node Authentication timeout\n");
   6338		lpfc_disc_flush_list(vport);
   6339
   6340		/*
   6341		 * set port_state to PORT_READY if SLI2.
   6342		 * cmpl_reg_vpi will set port_state to READY for SLI3.
   6343		 */
   6344		if (phba->sli_rev < LPFC_SLI_REV4) {
   6345			if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
   6346				lpfc_issue_reg_vpi(phba, vport);
   6347			else  {	/* NPIV Not enabled */
   6348				lpfc_issue_clear_la(phba, vport);
   6349				vport->port_state = LPFC_VPORT_READY;
   6350			}
   6351		}
   6352		break;
   6353
   6354	case LPFC_VPORT_READY:
   6355		if (vport->fc_flag & FC_RSCN_MODE) {
   6356			lpfc_printf_vlog(vport, KERN_ERR,
   6357					 LOG_TRACE_EVENT,
   6358					 "0231 RSCN timeout Data: x%x "
   6359					 "x%x x%x x%x\n",
   6360					 vport->fc_ns_retry, LPFC_MAX_NS_RETRY,
   6361					 vport->port_state, vport->gidft_inp);
   6362
   6363			/* Cleanup any outstanding ELS commands */
   6364			lpfc_els_flush_cmd(vport);
   6365
   6366			lpfc_els_flush_rscn(vport);
   6367			lpfc_disc_flush_list(vport);
   6368		}
   6369		break;
   6370
   6371	default:
   6372		lpfc_printf_vlog(vport, KERN_ERR,
   6373				 LOG_TRACE_EVENT,
   6374				 "0273 Unexpected discovery timeout, "
   6375				 "vport State x%x\n", vport->port_state);
   6376		break;
   6377	}
   6378
   6379	switch (phba->link_state) {
   6380	case LPFC_CLEAR_LA:
   6381				/* CLEAR LA timeout */
   6382		lpfc_printf_vlog(vport, KERN_ERR,
   6383				 LOG_TRACE_EVENT,
   6384				 "0228 CLEAR LA timeout\n");
   6385		clrlaerr = 1;
   6386		break;
   6387
   6388	case LPFC_LINK_UP:
   6389		lpfc_issue_clear_la(phba, vport);
   6390		fallthrough;
   6391	case LPFC_LINK_UNKNOWN:
   6392	case LPFC_WARM_START:
   6393	case LPFC_INIT_START:
   6394	case LPFC_INIT_MBX_CMDS:
   6395	case LPFC_LINK_DOWN:
   6396	case LPFC_HBA_ERROR:
   6397		lpfc_printf_vlog(vport, KERN_ERR,
   6398				 LOG_TRACE_EVENT,
   6399				 "0230 Unexpected timeout, hba link "
   6400				 "state x%x\n", phba->link_state);
   6401		clrlaerr = 1;
   6402		break;
   6403
   6404	case LPFC_HBA_READY:
   6405		break;
   6406	}
   6407
   6408	if (clrlaerr) {
   6409		lpfc_disc_flush_list(vport);
   6410		if (phba->sli_rev != LPFC_SLI_REV4) {
   6411			psli->sli3_ring[(LPFC_EXTRA_RING)].flag &=
   6412				~LPFC_STOP_IOCB_EVENT;
   6413			psli->sli3_ring[LPFC_FCP_RING].flag &=
   6414				~LPFC_STOP_IOCB_EVENT;
   6415		}
   6416		vport->port_state = LPFC_VPORT_READY;
   6417	}
   6418	return;
   6419}
   6420
   6421/*
   6422 * This routine handles processing a NameServer REG_LOGIN mailbox
   6423 * command upon completion. It is setup in the LPFC_MBOXQ
   6424 * as the completion routine when the command is
   6425 * handed off to the SLI layer.
   6426 */
   6427void
   6428lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
   6429{
   6430	MAILBOX_t *mb = &pmb->u.mb;
   6431	struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
   6432	struct lpfc_vport    *vport = pmb->vport;
   6433
   6434	pmb->ctx_ndlp = NULL;
   6435
   6436	if (phba->sli_rev < LPFC_SLI_REV4)
   6437		ndlp->nlp_rpi = mb->un.varWords[0];
   6438	ndlp->nlp_flag |= NLP_RPI_REGISTERED;
   6439	ndlp->nlp_type |= NLP_FABRIC;
   6440	lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
   6441	lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE | LOG_DISCOVERY,
   6442			 "0004 rpi:%x DID:%x flg:%x %d x%px\n",
   6443			 ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag,
   6444			 kref_read(&ndlp->kref),
   6445			 ndlp);
   6446	/*
   6447	 * Start issuing Fabric-Device Management Interface (FDMI) command to
   6448	 * 0xfffffa (FDMI well known port).
   6449	 * DHBA -> DPRT -> RHBA -> RPA  (physical port)
   6450	 * DPRT -> RPRT (vports)
   6451	 */
   6452	if (vport->port_type == LPFC_PHYSICAL_PORT) {
   6453		phba->link_flag &= ~LS_CT_VEN_RPA; /* For extra Vendor RPA */
   6454		lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DHBA, 0);
   6455	} else {
   6456		lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DPRT, 0);
   6457	}
   6458
   6459
   6460	/* decrement the node reference count held for this callback
   6461	 * function.
   6462	 */
   6463	lpfc_nlp_put(ndlp);
   6464	lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED);
   6465	return;
   6466}
   6467
   6468static int
   6469lpfc_filter_by_rpi(struct lpfc_nodelist *ndlp, void *param)
   6470{
   6471	uint16_t *rpi = param;
   6472
   6473	return ndlp->nlp_rpi == *rpi;
   6474}
   6475
   6476static int
   6477lpfc_filter_by_wwpn(struct lpfc_nodelist *ndlp, void *param)
   6478{
   6479	return memcmp(&ndlp->nlp_portname, param,
   6480		      sizeof(ndlp->nlp_portname)) == 0;
   6481}
   6482
   6483static struct lpfc_nodelist *
   6484__lpfc_find_node(struct lpfc_vport *vport, node_filter filter, void *param)
   6485{
   6486	struct lpfc_nodelist *ndlp;
   6487
   6488	list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
   6489		if (filter(ndlp, param)) {
   6490			lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
   6491					 "3185 FIND node filter %ps DID "
   6492					 "ndlp x%px did x%x flg x%x st x%x "
   6493					 "xri x%x type x%x rpi x%x\n",
   6494					 filter, ndlp, ndlp->nlp_DID,
   6495					 ndlp->nlp_flag, ndlp->nlp_state,
   6496					 ndlp->nlp_xri, ndlp->nlp_type,
   6497					 ndlp->nlp_rpi);
   6498			return ndlp;
   6499		}
   6500	}
   6501	lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
   6502			 "3186 FIND node filter %ps NOT FOUND.\n", filter);
   6503	return NULL;
   6504}
   6505
   6506/*
   6507 * This routine looks up the ndlp lists for the given RPI. If rpi found it
   6508 * returns the node list element pointer else return NULL.
   6509 */
   6510struct lpfc_nodelist *
   6511__lpfc_findnode_rpi(struct lpfc_vport *vport, uint16_t rpi)
   6512{
   6513	return __lpfc_find_node(vport, lpfc_filter_by_rpi, &rpi);
   6514}
   6515
   6516/*
   6517 * This routine looks up the ndlp lists for the given WWPN. If WWPN found it
   6518 * returns the node element list pointer else return NULL.
   6519 */
   6520struct lpfc_nodelist *
   6521lpfc_findnode_wwpn(struct lpfc_vport *vport, struct lpfc_name *wwpn)
   6522{
   6523	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
   6524	struct lpfc_nodelist *ndlp;
   6525
   6526	spin_lock_irq(shost->host_lock);
   6527	ndlp = __lpfc_find_node(vport, lpfc_filter_by_wwpn, wwpn);
   6528	spin_unlock_irq(shost->host_lock);
   6529	return ndlp;
   6530}
   6531
   6532/*
   6533 * This routine looks up the ndlp lists for the given RPI. If the rpi
   6534 * is found, the routine returns the node element list pointer else
   6535 * return NULL.
   6536 */
   6537struct lpfc_nodelist *
   6538lpfc_findnode_rpi(struct lpfc_vport *vport, uint16_t rpi)
   6539{
   6540	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
   6541	struct lpfc_nodelist *ndlp;
   6542	unsigned long flags;
   6543
   6544	spin_lock_irqsave(shost->host_lock, flags);
   6545	ndlp = __lpfc_findnode_rpi(vport, rpi);
   6546	spin_unlock_irqrestore(shost->host_lock, flags);
   6547	return ndlp;
   6548}
   6549
   6550/**
   6551 * lpfc_find_vport_by_vpid - Find a vport on a HBA through vport identifier
   6552 * @phba: pointer to lpfc hba data structure.
   6553 * @vpi: the physical host virtual N_Port identifier.
   6554 *
   6555 * This routine finds a vport on a HBA (referred by @phba) through a
   6556 * @vpi. The function walks the HBA's vport list and returns the address
   6557 * of the vport with the matching @vpi.
   6558 *
   6559 * Return code
   6560 *    NULL - No vport with the matching @vpi found
   6561 *    Otherwise - Address to the vport with the matching @vpi.
   6562 **/
   6563struct lpfc_vport *
   6564lpfc_find_vport_by_vpid(struct lpfc_hba *phba, uint16_t vpi)
   6565{
   6566	struct lpfc_vport *vport;
   6567	unsigned long flags;
   6568	int i = 0;
   6569
   6570	/* The physical ports are always vpi 0 - translate is unnecessary. */
   6571	if (vpi > 0) {
   6572		/*
   6573		 * Translate the physical vpi to the logical vpi.  The
   6574		 * vport stores the logical vpi.
   6575		 */
   6576		for (i = 0; i <= phba->max_vpi; i++) {
   6577			if (vpi == phba->vpi_ids[i])
   6578				break;
   6579		}
   6580
   6581		if (i > phba->max_vpi) {
   6582			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
   6583					"2936 Could not find Vport mapped "
   6584					"to vpi %d\n", vpi);
   6585			return NULL;
   6586		}
   6587	}
   6588
   6589	spin_lock_irqsave(&phba->port_list_lock, flags);
   6590	list_for_each_entry(vport, &phba->port_list, listentry) {
   6591		if (vport->vpi == i) {
   6592			spin_unlock_irqrestore(&phba->port_list_lock, flags);
   6593			return vport;
   6594		}
   6595	}
   6596	spin_unlock_irqrestore(&phba->port_list_lock, flags);
   6597	return NULL;
   6598}
   6599
   6600struct lpfc_nodelist *
   6601lpfc_nlp_init(struct lpfc_vport *vport, uint32_t did)
   6602{
   6603	struct lpfc_nodelist *ndlp;
   6604	int rpi = LPFC_RPI_ALLOC_ERROR;
   6605
   6606	if (vport->phba->sli_rev == LPFC_SLI_REV4) {
   6607		rpi = lpfc_sli4_alloc_rpi(vport->phba);
   6608		if (rpi == LPFC_RPI_ALLOC_ERROR)
   6609			return NULL;
   6610	}
   6611
   6612	ndlp = mempool_alloc(vport->phba->nlp_mem_pool, GFP_KERNEL);
   6613	if (!ndlp) {
   6614		if (vport->phba->sli_rev == LPFC_SLI_REV4)
   6615			lpfc_sli4_free_rpi(vport->phba, rpi);
   6616		return NULL;
   6617	}
   6618
   6619	memset(ndlp, 0, sizeof (struct lpfc_nodelist));
   6620
   6621	spin_lock_init(&ndlp->lock);
   6622
   6623	lpfc_initialize_node(vport, ndlp, did);
   6624	INIT_LIST_HEAD(&ndlp->nlp_listp);
   6625	if (vport->phba->sli_rev == LPFC_SLI_REV4) {
   6626		ndlp->nlp_rpi = rpi;
   6627		lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE | LOG_DISCOVERY,
   6628				 "0007 Init New ndlp x%px, rpi:x%x DID:%x "
   6629				 "flg:x%x refcnt:%d\n",
   6630				 ndlp, ndlp->nlp_rpi, ndlp->nlp_DID,
   6631				 ndlp->nlp_flag, kref_read(&ndlp->kref));
   6632
   6633		ndlp->active_rrqs_xri_bitmap =
   6634				mempool_alloc(vport->phba->active_rrq_pool,
   6635					      GFP_KERNEL);
   6636		if (ndlp->active_rrqs_xri_bitmap)
   6637			memset(ndlp->active_rrqs_xri_bitmap, 0,
   6638			       ndlp->phba->cfg_rrq_xri_bitmap_sz);
   6639	}
   6640
   6641
   6642
   6643	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_NODE,
   6644		"node init:       did:x%x",
   6645		ndlp->nlp_DID, 0, 0);
   6646
   6647	return ndlp;
   6648}
   6649
   6650/* This routine releases all resources associated with a specifc NPort's ndlp
   6651 * and mempool_free's the nodelist.
   6652 */
   6653static void
   6654lpfc_nlp_release(struct kref *kref)
   6655{
   6656	struct lpfc_nodelist *ndlp = container_of(kref, struct lpfc_nodelist,
   6657						  kref);
   6658	struct lpfc_vport *vport = ndlp->vport;
   6659
   6660	lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE,
   6661		"node release:    did:x%x flg:x%x type:x%x",
   6662		ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type);
   6663
   6664	lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
   6665			 "0279 %s: ndlp: x%px did %x refcnt:%d rpi:%x\n",
   6666			 __func__, ndlp, ndlp->nlp_DID,
   6667			 kref_read(&ndlp->kref), ndlp->nlp_rpi);
   6668
   6669	/* remove ndlp from action. */
   6670	lpfc_cancel_retry_delay_tmo(vport, ndlp);
   6671	lpfc_cleanup_node(vport, ndlp);
   6672
   6673	/* Not all ELS transactions have registered the RPI with the port.
   6674	 * In these cases the rpi usage is temporary and the node is
   6675	 * released when the WQE is completed.  Catch this case to free the
   6676	 * RPI to the pool.  Because this node is in the release path, a lock
   6677	 * is unnecessary.  All references are gone and the node has been
   6678	 * dequeued.
   6679	 */
   6680	if (ndlp->nlp_flag & NLP_RELEASE_RPI) {
   6681		if (ndlp->nlp_rpi != LPFC_RPI_ALLOC_ERROR &&
   6682		    !(ndlp->nlp_flag & (NLP_RPI_REGISTERED | NLP_UNREG_INP))) {
   6683			lpfc_sli4_free_rpi(vport->phba, ndlp->nlp_rpi);
   6684			ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR;
   6685		}
   6686	}
   6687
   6688	/* The node is not freed back to memory, it is released to a pool so
   6689	 * the node fields need to be cleaned up.
   6690	 */
   6691	ndlp->vport = NULL;
   6692	ndlp->nlp_state = NLP_STE_FREED_NODE;
   6693	ndlp->nlp_flag = 0;
   6694	ndlp->fc4_xpt_flags = 0;
   6695
   6696	/* free ndlp memory for final ndlp release */
   6697	kfree(ndlp->lat_data);
   6698	if (ndlp->phba->sli_rev == LPFC_SLI_REV4)
   6699		mempool_free(ndlp->active_rrqs_xri_bitmap,
   6700				ndlp->phba->active_rrq_pool);
   6701	mempool_free(ndlp, ndlp->phba->nlp_mem_pool);
   6702}
   6703
   6704/* This routine bumps the reference count for a ndlp structure to ensure
   6705 * that one discovery thread won't free a ndlp while another discovery thread
   6706 * is using it.
   6707 */
   6708struct lpfc_nodelist *
   6709lpfc_nlp_get(struct lpfc_nodelist *ndlp)
   6710{
   6711	unsigned long flags;
   6712
   6713	if (ndlp) {
   6714		lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE,
   6715			"node get:        did:x%x flg:x%x refcnt:x%x",
   6716			ndlp->nlp_DID, ndlp->nlp_flag,
   6717			kref_read(&ndlp->kref));
   6718
   6719		/* The check of ndlp usage to prevent incrementing the
   6720		 * ndlp reference count that is in the process of being
   6721		 * released.
   6722		 */
   6723		spin_lock_irqsave(&ndlp->lock, flags);
   6724		if (!kref_get_unless_zero(&ndlp->kref)) {
   6725			spin_unlock_irqrestore(&ndlp->lock, flags);
   6726			lpfc_printf_vlog(ndlp->vport, KERN_WARNING, LOG_NODE,
   6727				"0276 %s: ndlp:x%px refcnt:%d\n",
   6728				__func__, (void *)ndlp, kref_read(&ndlp->kref));
   6729			return NULL;
   6730		}
   6731		spin_unlock_irqrestore(&ndlp->lock, flags);
   6732	} else {
   6733		WARN_ONCE(!ndlp, "**** %s, get ref on NULL ndlp!", __func__);
   6734	}
   6735
   6736	return ndlp;
   6737}
   6738
   6739/* This routine decrements the reference count for a ndlp structure. If the
   6740 * count goes to 0, this indicates the associated nodelist should be freed.
   6741 */
   6742int
   6743lpfc_nlp_put(struct lpfc_nodelist *ndlp)
   6744{
   6745	if (ndlp) {
   6746		lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE,
   6747				"node put:        did:x%x flg:x%x refcnt:x%x",
   6748				ndlp->nlp_DID, ndlp->nlp_flag,
   6749				kref_read(&ndlp->kref));
   6750	} else {
   6751		WARN_ONCE(!ndlp, "**** %s, put ref on NULL ndlp!", __func__);
   6752	}
   6753
   6754	return ndlp ? kref_put(&ndlp->kref, lpfc_nlp_release) : 0;
   6755}
   6756
   6757/* This routine free's the specified nodelist if it is not in use
   6758 * by any other discovery thread. This routine returns 1 if the
   6759 * ndlp has been freed. A return value of 0 indicates the ndlp is
   6760 * not yet been released.
   6761 */
   6762int
   6763lpfc_nlp_not_used(struct lpfc_nodelist *ndlp)
   6764{
   6765	lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE,
   6766		"node not used:   did:x%x flg:x%x refcnt:x%x",
   6767		ndlp->nlp_DID, ndlp->nlp_flag,
   6768		kref_read(&ndlp->kref));
   6769
   6770	if (kref_read(&ndlp->kref) == 1)
   6771		if (lpfc_nlp_put(ndlp))
   6772			return 1;
   6773	return 0;
   6774}
   6775
   6776/**
   6777 * lpfc_fcf_inuse - Check if FCF can be unregistered.
   6778 * @phba: Pointer to hba context object.
   6779 *
   6780 * This function iterate through all FC nodes associated
   6781 * will all vports to check if there is any node with
   6782 * fc_rports associated with it. If there is an fc_rport
   6783 * associated with the node, then the node is either in
   6784 * discovered state or its devloss_timer is pending.
   6785 */
   6786static int
   6787lpfc_fcf_inuse(struct lpfc_hba *phba)
   6788{
   6789	struct lpfc_vport **vports;
   6790	int i, ret = 0;
   6791	struct lpfc_nodelist *ndlp;
   6792	struct Scsi_Host  *shost;
   6793
   6794	vports = lpfc_create_vport_work_array(phba);
   6795
   6796	/* If driver cannot allocate memory, indicate fcf is in use */
   6797	if (!vports)
   6798		return 1;
   6799
   6800	for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
   6801		shost = lpfc_shost_from_vport(vports[i]);
   6802		spin_lock_irq(shost->host_lock);
   6803		/*
   6804		 * IF the CVL_RCVD bit is not set then we have sent the
   6805		 * flogi.
   6806		 * If dev_loss fires while we are waiting we do not want to
   6807		 * unreg the fcf.
   6808		 */
   6809		if (!(vports[i]->fc_flag & FC_VPORT_CVL_RCVD)) {
   6810			spin_unlock_irq(shost->host_lock);
   6811			ret =  1;
   6812			goto out;
   6813		}
   6814		list_for_each_entry(ndlp, &vports[i]->fc_nodes, nlp_listp) {
   6815			if (ndlp->rport &&
   6816			  (ndlp->rport->roles & FC_RPORT_ROLE_FCP_TARGET)) {
   6817				ret = 1;
   6818				spin_unlock_irq(shost->host_lock);
   6819				goto out;
   6820			} else if (ndlp->nlp_flag & NLP_RPI_REGISTERED) {
   6821				ret = 1;
   6822				lpfc_printf_log(phba, KERN_INFO,
   6823						LOG_NODE | LOG_DISCOVERY,
   6824						"2624 RPI %x DID %x flag %x "
   6825						"still logged in\n",
   6826						ndlp->nlp_rpi, ndlp->nlp_DID,
   6827						ndlp->nlp_flag);
   6828			}
   6829		}
   6830		spin_unlock_irq(shost->host_lock);
   6831	}
   6832out:
   6833	lpfc_destroy_vport_work_array(phba, vports);
   6834	return ret;
   6835}
   6836
   6837/**
   6838 * lpfc_unregister_vfi_cmpl - Completion handler for unreg vfi.
   6839 * @phba: Pointer to hba context object.
   6840 * @mboxq: Pointer to mailbox object.
   6841 *
   6842 * This function frees memory associated with the mailbox command.
   6843 */
   6844void
   6845lpfc_unregister_vfi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
   6846{
   6847	struct lpfc_vport *vport = mboxq->vport;
   6848	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
   6849
   6850	if (mboxq->u.mb.mbxStatus) {
   6851		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
   6852				"2555 UNREG_VFI mbxStatus error x%x "
   6853				"HBA state x%x\n",
   6854				mboxq->u.mb.mbxStatus, vport->port_state);
   6855	}
   6856	spin_lock_irq(shost->host_lock);
   6857	phba->pport->fc_flag &= ~FC_VFI_REGISTERED;
   6858	spin_unlock_irq(shost->host_lock);
   6859	mempool_free(mboxq, phba->mbox_mem_pool);
   6860	return;
   6861}
   6862
   6863/**
   6864 * lpfc_unregister_fcfi_cmpl - Completion handler for unreg fcfi.
   6865 * @phba: Pointer to hba context object.
   6866 * @mboxq: Pointer to mailbox object.
   6867 *
   6868 * This function frees memory associated with the mailbox command.
   6869 */
   6870static void
   6871lpfc_unregister_fcfi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
   6872{
   6873	struct lpfc_vport *vport = mboxq->vport;
   6874
   6875	if (mboxq->u.mb.mbxStatus) {
   6876		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
   6877				"2550 UNREG_FCFI mbxStatus error x%x "
   6878				"HBA state x%x\n",
   6879				mboxq->u.mb.mbxStatus, vport->port_state);
   6880	}
   6881	mempool_free(mboxq, phba->mbox_mem_pool);
   6882	return;
   6883}
   6884
   6885/**
   6886 * lpfc_unregister_fcf_prep - Unregister fcf record preparation
   6887 * @phba: Pointer to hba context object.
   6888 *
   6889 * This function prepare the HBA for unregistering the currently registered
   6890 * FCF from the HBA. It performs unregistering, in order, RPIs, VPIs, and
   6891 * VFIs.
   6892 */
   6893int
   6894lpfc_unregister_fcf_prep(struct lpfc_hba *phba)
   6895{
   6896	struct lpfc_vport **vports;
   6897	struct lpfc_nodelist *ndlp;
   6898	struct Scsi_Host *shost;
   6899	int i = 0, rc;
   6900
   6901	/* Unregister RPIs */
   6902	if (lpfc_fcf_inuse(phba))
   6903		lpfc_unreg_hba_rpis(phba);
   6904
   6905	/* At this point, all discovery is aborted */
   6906	phba->pport->port_state = LPFC_VPORT_UNKNOWN;
   6907
   6908	/* Unregister VPIs */
   6909	vports = lpfc_create_vport_work_array(phba);
   6910	if (vports && (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED))
   6911		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
   6912			/* Stop FLOGI/FDISC retries */
   6913			ndlp = lpfc_findnode_did(vports[i], Fabric_DID);
   6914			if (ndlp)
   6915				lpfc_cancel_retry_delay_tmo(vports[i], ndlp);
   6916			lpfc_cleanup_pending_mbox(vports[i]);
   6917			if (phba->sli_rev == LPFC_SLI_REV4)
   6918				lpfc_sli4_unreg_all_rpis(vports[i]);
   6919			lpfc_mbx_unreg_vpi(vports[i]);
   6920			shost = lpfc_shost_from_vport(vports[i]);
   6921			spin_lock_irq(shost->host_lock);
   6922			vports[i]->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
   6923			vports[i]->vpi_state &= ~LPFC_VPI_REGISTERED;
   6924			spin_unlock_irq(shost->host_lock);
   6925		}
   6926	lpfc_destroy_vport_work_array(phba, vports);
   6927	if (i == 0 && (!(phba->sli3_options & LPFC_SLI3_NPIV_ENABLED))) {
   6928		ndlp = lpfc_findnode_did(phba->pport, Fabric_DID);
   6929		if (ndlp)
   6930			lpfc_cancel_retry_delay_tmo(phba->pport, ndlp);
   6931		lpfc_cleanup_pending_mbox(phba->pport);
   6932		if (phba->sli_rev == LPFC_SLI_REV4)
   6933			lpfc_sli4_unreg_all_rpis(phba->pport);
   6934		lpfc_mbx_unreg_vpi(phba->pport);
   6935		shost = lpfc_shost_from_vport(phba->pport);
   6936		spin_lock_irq(shost->host_lock);
   6937		phba->pport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
   6938		phba->pport->vpi_state &= ~LPFC_VPI_REGISTERED;
   6939		spin_unlock_irq(shost->host_lock);
   6940	}
   6941
   6942	/* Cleanup any outstanding ELS commands */
   6943	lpfc_els_flush_all_cmd(phba);
   6944
   6945	/* Unregister the physical port VFI */
   6946	rc = lpfc_issue_unreg_vfi(phba->pport);
   6947	return rc;
   6948}
   6949
   6950/**
   6951 * lpfc_sli4_unregister_fcf - Unregister currently registered FCF record
   6952 * @phba: Pointer to hba context object.
   6953 *
   6954 * This function issues synchronous unregister FCF mailbox command to HBA to
   6955 * unregister the currently registered FCF record. The driver does not reset
   6956 * the driver FCF usage state flags.
   6957 *
   6958 * Return 0 if successfully issued, none-zero otherwise.
   6959 */
   6960int
   6961lpfc_sli4_unregister_fcf(struct lpfc_hba *phba)
   6962{
   6963	LPFC_MBOXQ_t *mbox;
   6964	int rc;
   6965
   6966	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
   6967	if (!mbox) {
   6968		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
   6969				"2551 UNREG_FCFI mbox allocation failed"
   6970				"HBA state x%x\n", phba->pport->port_state);
   6971		return -ENOMEM;
   6972	}
   6973	lpfc_unreg_fcfi(mbox, phba->fcf.fcfi);
   6974	mbox->vport = phba->pport;
   6975	mbox->mbox_cmpl = lpfc_unregister_fcfi_cmpl;
   6976	rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
   6977
   6978	if (rc == MBX_NOT_FINISHED) {
   6979		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
   6980				"2552 Unregister FCFI command failed rc x%x "
   6981				"HBA state x%x\n",
   6982				rc, phba->pport->port_state);
   6983		return -EINVAL;
   6984	}
   6985	return 0;
   6986}
   6987
   6988/**
   6989 * lpfc_unregister_fcf_rescan - Unregister currently registered fcf and rescan
   6990 * @phba: Pointer to hba context object.
   6991 *
   6992 * This function unregisters the currently reigstered FCF. This function
   6993 * also tries to find another FCF for discovery by rescan the HBA FCF table.
   6994 */
   6995void
   6996lpfc_unregister_fcf_rescan(struct lpfc_hba *phba)
   6997{
   6998	int rc;
   6999
   7000	/* Preparation for unregistering fcf */
   7001	rc = lpfc_unregister_fcf_prep(phba);
   7002	if (rc) {
   7003		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
   7004				"2748 Failed to prepare for unregistering "
   7005				"HBA's FCF record: rc=%d\n", rc);
   7006		return;
   7007	}
   7008
   7009	/* Now, unregister FCF record and reset HBA FCF state */
   7010	rc = lpfc_sli4_unregister_fcf(phba);
   7011	if (rc)
   7012		return;
   7013	/* Reset HBA FCF states after successful unregister FCF */
   7014	phba->fcf.fcf_flag = 0;
   7015	phba->fcf.current_rec.flag = 0;
   7016
   7017	/*
   7018	 * If driver is not unloading, check if there is any other
   7019	 * FCF record that can be used for discovery.
   7020	 */
   7021	if ((phba->pport->load_flag & FC_UNLOADING) ||
   7022	    (phba->link_state < LPFC_LINK_UP))
   7023		return;
   7024
   7025	/* This is considered as the initial FCF discovery scan */
   7026	spin_lock_irq(&phba->hbalock);
   7027	phba->fcf.fcf_flag |= FCF_INIT_DISC;
   7028	spin_unlock_irq(&phba->hbalock);
   7029
   7030	/* Reset FCF roundrobin bmask for new discovery */
   7031	lpfc_sli4_clear_fcf_rr_bmask(phba);
   7032
   7033	rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST);
   7034
   7035	if (rc) {
   7036		spin_lock_irq(&phba->hbalock);
   7037		phba->fcf.fcf_flag &= ~FCF_INIT_DISC;
   7038		spin_unlock_irq(&phba->hbalock);
   7039		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
   7040				"2553 lpfc_unregister_unused_fcf failed "
   7041				"to read FCF record HBA state x%x\n",
   7042				phba->pport->port_state);
   7043	}
   7044}
   7045
   7046/**
   7047 * lpfc_unregister_fcf - Unregister the currently registered fcf record
   7048 * @phba: Pointer to hba context object.
   7049 *
   7050 * This function just unregisters the currently reigstered FCF. It does not
   7051 * try to find another FCF for discovery.
   7052 */
   7053void
   7054lpfc_unregister_fcf(struct lpfc_hba *phba)
   7055{
   7056	int rc;
   7057
   7058	/* Preparation for unregistering fcf */
   7059	rc = lpfc_unregister_fcf_prep(phba);
   7060	if (rc) {
   7061		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
   7062				"2749 Failed to prepare for unregistering "
   7063				"HBA's FCF record: rc=%d\n", rc);
   7064		return;
   7065	}
   7066
   7067	/* Now, unregister FCF record and reset HBA FCF state */
   7068	rc = lpfc_sli4_unregister_fcf(phba);
   7069	if (rc)
   7070		return;
   7071	/* Set proper HBA FCF states after successful unregister FCF */
   7072	spin_lock_irq(&phba->hbalock);
   7073	phba->fcf.fcf_flag &= ~FCF_REGISTERED;
   7074	spin_unlock_irq(&phba->hbalock);
   7075}
   7076
   7077/**
   7078 * lpfc_unregister_unused_fcf - Unregister FCF if all devices are disconnected.
   7079 * @phba: Pointer to hba context object.
   7080 *
   7081 * This function check if there are any connected remote port for the FCF and
   7082 * if all the devices are disconnected, this function unregister FCFI.
   7083 * This function also tries to use another FCF for discovery.
   7084 */
   7085void
   7086lpfc_unregister_unused_fcf(struct lpfc_hba *phba)
   7087{
   7088	/*
   7089	 * If HBA is not running in FIP mode, if HBA does not support
   7090	 * FCoE, if FCF discovery is ongoing, or if FCF has not been
   7091	 * registered, do nothing.
   7092	 */
   7093	spin_lock_irq(&phba->hbalock);
   7094	if (!(phba->hba_flag & HBA_FCOE_MODE) ||
   7095	    !(phba->fcf.fcf_flag & FCF_REGISTERED) ||
   7096	    !(phba->hba_flag & HBA_FIP_SUPPORT) ||
   7097	    (phba->fcf.fcf_flag & FCF_DISCOVERY) ||
   7098	    (phba->pport->port_state == LPFC_FLOGI)) {
   7099		spin_unlock_irq(&phba->hbalock);
   7100		return;
   7101	}
   7102	spin_unlock_irq(&phba->hbalock);
   7103
   7104	if (lpfc_fcf_inuse(phba))
   7105		return;
   7106
   7107	lpfc_unregister_fcf_rescan(phba);
   7108}
   7109
   7110/**
   7111 * lpfc_read_fcf_conn_tbl - Create driver FCF connection table.
   7112 * @phba: Pointer to hba context object.
   7113 * @buff: Buffer containing the FCF connection table as in the config
   7114 *         region.
   7115 * This function create driver data structure for the FCF connection
   7116 * record table read from config region 23.
   7117 */
   7118static void
   7119lpfc_read_fcf_conn_tbl(struct lpfc_hba *phba,
   7120	uint8_t *buff)
   7121{
   7122	struct lpfc_fcf_conn_entry *conn_entry, *next_conn_entry;
   7123	struct lpfc_fcf_conn_hdr *conn_hdr;
   7124	struct lpfc_fcf_conn_rec *conn_rec;
   7125	uint32_t record_count;
   7126	int i;
   7127
   7128	/* Free the current connect table */
   7129	list_for_each_entry_safe(conn_entry, next_conn_entry,
   7130		&phba->fcf_conn_rec_list, list) {
   7131		list_del_init(&conn_entry->list);
   7132		kfree(conn_entry);
   7133	}
   7134
   7135	conn_hdr = (struct lpfc_fcf_conn_hdr *) buff;
   7136	record_count = conn_hdr->length * sizeof(uint32_t)/
   7137		sizeof(struct lpfc_fcf_conn_rec);
   7138
   7139	conn_rec = (struct lpfc_fcf_conn_rec *)
   7140		(buff + sizeof(struct lpfc_fcf_conn_hdr));
   7141
   7142	for (i = 0; i < record_count; i++) {
   7143		if (!(conn_rec[i].flags & FCFCNCT_VALID))
   7144			continue;
   7145		conn_entry = kzalloc(sizeof(struct lpfc_fcf_conn_entry),
   7146			GFP_KERNEL);
   7147		if (!conn_entry) {
   7148			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
   7149					"2566 Failed to allocate connection"
   7150					" table entry\n");
   7151			return;
   7152		}
   7153
   7154		memcpy(&conn_entry->conn_rec, &conn_rec[i],
   7155			sizeof(struct lpfc_fcf_conn_rec));
   7156		list_add_tail(&conn_entry->list,
   7157			&phba->fcf_conn_rec_list);
   7158	}
   7159
   7160	if (!list_empty(&phba->fcf_conn_rec_list)) {
   7161		i = 0;
   7162		list_for_each_entry(conn_entry, &phba->fcf_conn_rec_list,
   7163				    list) {
   7164			conn_rec = &conn_entry->conn_rec;
   7165			lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
   7166					"3345 FCF connection list rec[%02d]: "
   7167					"flags:x%04x, vtag:x%04x, "
   7168					"fabric_name:x%02x:%02x:%02x:%02x:"
   7169					"%02x:%02x:%02x:%02x, "
   7170					"switch_name:x%02x:%02x:%02x:%02x:"
   7171					"%02x:%02x:%02x:%02x\n", i++,
   7172					conn_rec->flags, conn_rec->vlan_tag,
   7173					conn_rec->fabric_name[0],
   7174					conn_rec->fabric_name[1],
   7175					conn_rec->fabric_name[2],
   7176					conn_rec->fabric_name[3],
   7177					conn_rec->fabric_name[4],
   7178					conn_rec->fabric_name[5],
   7179					conn_rec->fabric_name[6],
   7180					conn_rec->fabric_name[7],
   7181					conn_rec->switch_name[0],
   7182					conn_rec->switch_name[1],
   7183					conn_rec->switch_name[2],
   7184					conn_rec->switch_name[3],
   7185					conn_rec->switch_name[4],
   7186					conn_rec->switch_name[5],
   7187					conn_rec->switch_name[6],
   7188					conn_rec->switch_name[7]);
   7189		}
   7190	}
   7191}
   7192
   7193/**
   7194 * lpfc_read_fcoe_param - Read FCoe parameters from conf region..
   7195 * @phba: Pointer to hba context object.
   7196 * @buff: Buffer containing the FCoE parameter data structure.
   7197 *
   7198 *  This function update driver data structure with config
   7199 *  parameters read from config region 23.
   7200 */
   7201static void
   7202lpfc_read_fcoe_param(struct lpfc_hba *phba,
   7203			uint8_t *buff)
   7204{
   7205	struct lpfc_fip_param_hdr *fcoe_param_hdr;
   7206	struct lpfc_fcoe_params *fcoe_param;
   7207
   7208	fcoe_param_hdr = (struct lpfc_fip_param_hdr *)
   7209		buff;
   7210	fcoe_param = (struct lpfc_fcoe_params *)
   7211		(buff + sizeof(struct lpfc_fip_param_hdr));
   7212
   7213	if ((fcoe_param_hdr->parm_version != FIPP_VERSION) ||
   7214		(fcoe_param_hdr->length != FCOE_PARAM_LENGTH))
   7215		return;
   7216
   7217	if (fcoe_param_hdr->parm_flags & FIPP_VLAN_VALID) {
   7218		phba->valid_vlan = 1;
   7219		phba->vlan_id = le16_to_cpu(fcoe_param->vlan_tag) &
   7220			0xFFF;
   7221	}
   7222
   7223	phba->fc_map[0] = fcoe_param->fc_map[0];
   7224	phba->fc_map[1] = fcoe_param->fc_map[1];
   7225	phba->fc_map[2] = fcoe_param->fc_map[2];
   7226	return;
   7227}
   7228
   7229/**
   7230 * lpfc_get_rec_conf23 - Get a record type in config region data.
   7231 * @buff: Buffer containing config region 23 data.
   7232 * @size: Size of the data buffer.
   7233 * @rec_type: Record type to be searched.
   7234 *
   7235 * This function searches config region data to find the beginning
   7236 * of the record specified by record_type. If record found, this
   7237 * function return pointer to the record else return NULL.
   7238 */
   7239static uint8_t *
   7240lpfc_get_rec_conf23(uint8_t *buff, uint32_t size, uint8_t rec_type)
   7241{
   7242	uint32_t offset = 0, rec_length;
   7243
   7244	if ((buff[0] == LPFC_REGION23_LAST_REC) ||
   7245		(size < sizeof(uint32_t)))
   7246		return NULL;
   7247
   7248	rec_length = buff[offset + 1];
   7249
   7250	/*
   7251	 * One TLV record has one word header and number of data words
   7252	 * specified in the rec_length field of the record header.
   7253	 */
   7254	while ((offset + rec_length * sizeof(uint32_t) + sizeof(uint32_t))
   7255		<= size) {
   7256		if (buff[offset] == rec_type)
   7257			return &buff[offset];
   7258
   7259		if (buff[offset] == LPFC_REGION23_LAST_REC)
   7260			return NULL;
   7261
   7262		offset += rec_length * sizeof(uint32_t) + sizeof(uint32_t);
   7263		rec_length = buff[offset + 1];
   7264	}
   7265	return NULL;
   7266}
   7267
   7268/**
   7269 * lpfc_parse_fcoe_conf - Parse FCoE config data read from config region 23.
   7270 * @phba: Pointer to lpfc_hba data structure.
   7271 * @buff: Buffer containing config region 23 data.
   7272 * @size: Size of the data buffer.
   7273 *
   7274 * This function parses the FCoE config parameters in config region 23 and
   7275 * populate driver data structure with the parameters.
   7276 */
   7277void
   7278lpfc_parse_fcoe_conf(struct lpfc_hba *phba,
   7279		uint8_t *buff,
   7280		uint32_t size)
   7281{
   7282	uint32_t offset = 0;
   7283	uint8_t *rec_ptr;
   7284
   7285	/*
   7286	 * If data size is less than 2 words signature and version cannot be
   7287	 * verified.
   7288	 */
   7289	if (size < 2*sizeof(uint32_t))
   7290		return;
   7291
   7292	/* Check the region signature first */
   7293	if (memcmp(buff, LPFC_REGION23_SIGNATURE, 4)) {
   7294		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
   7295			"2567 Config region 23 has bad signature\n");
   7296		return;
   7297	}
   7298
   7299	offset += 4;
   7300
   7301	/* Check the data structure version */
   7302	if (buff[offset] != LPFC_REGION23_VERSION) {
   7303		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
   7304				"2568 Config region 23 has bad version\n");
   7305		return;
   7306	}
   7307	offset += 4;
   7308
   7309	/* Read FCoE param record */
   7310	rec_ptr = lpfc_get_rec_conf23(&buff[offset],
   7311			size - offset, FCOE_PARAM_TYPE);
   7312	if (rec_ptr)
   7313		lpfc_read_fcoe_param(phba, rec_ptr);
   7314
   7315	/* Read FCF connection table */
   7316	rec_ptr = lpfc_get_rec_conf23(&buff[offset],
   7317		size - offset, FCOE_CONN_TBL_TYPE);
   7318	if (rec_ptr)
   7319		lpfc_read_fcf_conn_tbl(phba, rec_ptr);
   7320
   7321}