lpfc_els.c (379857B)
1/******************************************************************* 2 * This file is part of the Emulex Linux Device Driver for * 3 * Fibre Channel Host Bus Adapters. * 4 * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term * 5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * 6 * Copyright (C) 2004-2016 Emulex. All rights reserved. * 7 * EMULEX and SLI are trademarks of Emulex. * 8 * www.broadcom.com * 9 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 10 * * 11 * This program is free software; you can redistribute it and/or * 12 * modify it under the terms of version 2 of the GNU General * 13 * Public License as published by the Free Software Foundation. * 14 * This program is distributed in the hope that it will be useful. * 15 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 16 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 17 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 18 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 19 * TO BE LEGALLY INVALID. See the GNU General Public License for * 20 * more details, a copy of which can be found in the file COPYING * 21 * included with this package. * 22 *******************************************************************/ 23/* See Fibre Channel protocol T11 FC-LS for details */ 24#include <linux/blkdev.h> 25#include <linux/pci.h> 26#include <linux/slab.h> 27#include <linux/interrupt.h> 28#include <linux/delay.h> 29 30#include <scsi/scsi.h> 31#include <scsi/scsi_device.h> 32#include <scsi/scsi_host.h> 33#include <scsi/scsi_transport_fc.h> 34#include <uapi/scsi/fc/fc_fs.h> 35#include <uapi/scsi/fc/fc_els.h> 36 37#include "lpfc_hw4.h" 38#include "lpfc_hw.h" 39#include "lpfc_sli.h" 40#include "lpfc_sli4.h" 41#include "lpfc_nl.h" 42#include "lpfc_disc.h" 43#include "lpfc_scsi.h" 44#include "lpfc.h" 45#include "lpfc_logmsg.h" 46#include "lpfc_crtn.h" 47#include "lpfc_vport.h" 48#include "lpfc_debugfs.h" 49 50static int lpfc_els_retry(struct lpfc_hba *, struct lpfc_iocbq *, 51 struct lpfc_iocbq *); 52static void lpfc_cmpl_fabric_iocb(struct lpfc_hba *, struct lpfc_iocbq *, 53 struct lpfc_iocbq *); 54static void lpfc_fabric_abort_vport(struct lpfc_vport *vport); 55static int lpfc_issue_els_fdisc(struct lpfc_vport *vport, 56 struct lpfc_nodelist *ndlp, uint8_t retry); 57static int lpfc_issue_fabric_iocb(struct lpfc_hba *phba, 58 struct lpfc_iocbq *iocb); 59static void lpfc_cmpl_els_edc(struct lpfc_hba *phba, 60 struct lpfc_iocbq *cmdiocb, 61 struct lpfc_iocbq *rspiocb); 62static void lpfc_cmpl_els_uvem(struct lpfc_hba *, struct lpfc_iocbq *, 63 struct lpfc_iocbq *); 64 65static int lpfc_max_els_tries = 3; 66 67static void lpfc_init_cs_ctl_bitmap(struct lpfc_vport *vport); 68static void lpfc_vmid_set_cs_ctl_range(struct lpfc_vport *vport, u32 min, u32 max); 69static void lpfc_vmid_put_cs_ctl(struct lpfc_vport *vport, u32 ctcl_vmid); 70 71/** 72 * lpfc_els_chk_latt - Check host link attention event for a vport 73 * @vport: pointer to a host virtual N_Port data structure. 74 * 75 * This routine checks whether there is an outstanding host link 76 * attention event during the discovery process with the @vport. It is done 77 * by reading the HBA's Host Attention (HA) register. If there is any host 78 * link attention events during this @vport's discovery process, the @vport 79 * shall be marked as FC_ABORT_DISCOVERY, a host link attention clear shall 80 * be issued if the link state is not already in host link cleared state, 81 * and a return code shall indicate whether the host link attention event 82 * had happened. 83 * 84 * Note that, if either the host link is in state LPFC_LINK_DOWN or @vport 85 * state in LPFC_VPORT_READY, the request for checking host link attention 86 * event will be ignored and a return code shall indicate no host link 87 * attention event had happened. 88 * 89 * Return codes 90 * 0 - no host link attention event happened 91 * 1 - host link attention event happened 92 **/ 93int 94lpfc_els_chk_latt(struct lpfc_vport *vport) 95{ 96 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 97 struct lpfc_hba *phba = vport->phba; 98 uint32_t ha_copy; 99 100 if (vport->port_state >= LPFC_VPORT_READY || 101 phba->link_state == LPFC_LINK_DOWN || 102 phba->sli_rev > LPFC_SLI_REV3) 103 return 0; 104 105 /* Read the HBA Host Attention Register */ 106 if (lpfc_readl(phba->HAregaddr, &ha_copy)) 107 return 1; 108 109 if (!(ha_copy & HA_LATT)) 110 return 0; 111 112 /* Pending Link Event during Discovery */ 113 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 114 "0237 Pending Link Event during " 115 "Discovery: State x%x\n", 116 phba->pport->port_state); 117 118 /* CLEAR_LA should re-enable link attention events and 119 * we should then immediately take a LATT event. The 120 * LATT processing should call lpfc_linkdown() which 121 * will cleanup any left over in-progress discovery 122 * events. 123 */ 124 spin_lock_irq(shost->host_lock); 125 vport->fc_flag |= FC_ABORT_DISCOVERY; 126 spin_unlock_irq(shost->host_lock); 127 128 if (phba->link_state != LPFC_CLEAR_LA) 129 lpfc_issue_clear_la(phba, vport); 130 131 return 1; 132} 133 134/** 135 * lpfc_prep_els_iocb - Allocate and prepare a lpfc iocb data structure 136 * @vport: pointer to a host virtual N_Port data structure. 137 * @expect_rsp: flag indicating whether response is expected. 138 * @cmd_size: size of the ELS command. 139 * @retry: number of retries to the command when it fails. 140 * @ndlp: pointer to a node-list data structure. 141 * @did: destination identifier. 142 * @elscmd: the ELS command code. 143 * 144 * This routine is used for allocating a lpfc-IOCB data structure from 145 * the driver lpfc-IOCB free-list and prepare the IOCB with the parameters 146 * passed into the routine for discovery state machine to issue an Extended 147 * Link Service (ELS) commands. It is a generic lpfc-IOCB allocation 148 * and preparation routine that is used by all the discovery state machine 149 * routines and the ELS command-specific fields will be later set up by 150 * the individual discovery machine routines after calling this routine 151 * allocating and preparing a generic IOCB data structure. It fills in the 152 * Buffer Descriptor Entries (BDEs), allocates buffers for both command 153 * payload and response payload (if expected). The reference count on the 154 * ndlp is incremented by 1 and the reference to the ndlp is put into 155 * ndlp of the IOCB data structure for this IOCB to hold the ndlp 156 * reference for the command's callback function to access later. 157 * 158 * Return code 159 * Pointer to the newly allocated/prepared els iocb data structure 160 * NULL - when els iocb data structure allocation/preparation failed 161 **/ 162struct lpfc_iocbq * 163lpfc_prep_els_iocb(struct lpfc_vport *vport, u8 expect_rsp, 164 u16 cmd_size, u8 retry, 165 struct lpfc_nodelist *ndlp, u32 did, 166 u32 elscmd) 167{ 168 struct lpfc_hba *phba = vport->phba; 169 struct lpfc_iocbq *elsiocb; 170 struct lpfc_dmabuf *pcmd, *prsp, *pbuflist, *bmp; 171 struct ulp_bde64_le *bpl; 172 u32 timeout = 0; 173 174 if (!lpfc_is_link_up(phba)) 175 return NULL; 176 177 /* Allocate buffer for command iocb */ 178 elsiocb = lpfc_sli_get_iocbq(phba); 179 if (!elsiocb) 180 return NULL; 181 182 /* 183 * If this command is for fabric controller and HBA running 184 * in FIP mode send FLOGI, FDISC and LOGO as FIP frames. 185 */ 186 if ((did == Fabric_DID) && 187 (phba->hba_flag & HBA_FIP_SUPPORT) && 188 ((elscmd == ELS_CMD_FLOGI) || 189 (elscmd == ELS_CMD_FDISC) || 190 (elscmd == ELS_CMD_LOGO))) 191 switch (elscmd) { 192 case ELS_CMD_FLOGI: 193 elsiocb->cmd_flag |= 194 ((LPFC_ELS_ID_FLOGI << LPFC_FIP_ELS_ID_SHIFT) 195 & LPFC_FIP_ELS_ID_MASK); 196 break; 197 case ELS_CMD_FDISC: 198 elsiocb->cmd_flag |= 199 ((LPFC_ELS_ID_FDISC << LPFC_FIP_ELS_ID_SHIFT) 200 & LPFC_FIP_ELS_ID_MASK); 201 break; 202 case ELS_CMD_LOGO: 203 elsiocb->cmd_flag |= 204 ((LPFC_ELS_ID_LOGO << LPFC_FIP_ELS_ID_SHIFT) 205 & LPFC_FIP_ELS_ID_MASK); 206 break; 207 } 208 else 209 elsiocb->cmd_flag &= ~LPFC_FIP_ELS_ID_MASK; 210 211 /* fill in BDEs for command */ 212 /* Allocate buffer for command payload */ 213 pcmd = kmalloc(sizeof(*pcmd), GFP_KERNEL); 214 if (pcmd) 215 pcmd->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &pcmd->phys); 216 if (!pcmd || !pcmd->virt) 217 goto els_iocb_free_pcmb_exit; 218 219 INIT_LIST_HEAD(&pcmd->list); 220 221 /* Allocate buffer for response payload */ 222 if (expect_rsp) { 223 prsp = kmalloc(sizeof(*prsp), GFP_KERNEL); 224 if (prsp) 225 prsp->virt = lpfc_mbuf_alloc(phba, MEM_PRI, 226 &prsp->phys); 227 if (!prsp || !prsp->virt) 228 goto els_iocb_free_prsp_exit; 229 INIT_LIST_HEAD(&prsp->list); 230 } else { 231 prsp = NULL; 232 } 233 234 /* Allocate buffer for Buffer ptr list */ 235 pbuflist = kmalloc(sizeof(*pbuflist), GFP_KERNEL); 236 if (pbuflist) 237 pbuflist->virt = lpfc_mbuf_alloc(phba, MEM_PRI, 238 &pbuflist->phys); 239 if (!pbuflist || !pbuflist->virt) 240 goto els_iocb_free_pbuf_exit; 241 242 INIT_LIST_HEAD(&pbuflist->list); 243 244 if (expect_rsp) { 245 switch (elscmd) { 246 case ELS_CMD_FLOGI: 247 timeout = FF_DEF_RATOV * 2; 248 break; 249 case ELS_CMD_LOGO: 250 timeout = phba->fc_ratov; 251 break; 252 default: 253 timeout = phba->fc_ratov * 2; 254 } 255 256 /* Fill SGE for the num bde count */ 257 elsiocb->num_bdes = 2; 258 } 259 260 if (phba->sli_rev == LPFC_SLI_REV4) 261 bmp = pcmd; 262 else 263 bmp = pbuflist; 264 265 lpfc_sli_prep_els_req_rsp(phba, elsiocb, vport, bmp, cmd_size, did, 266 elscmd, timeout, expect_rsp); 267 268 bpl = (struct ulp_bde64_le *)pbuflist->virt; 269 bpl->addr_low = cpu_to_le32(putPaddrLow(pcmd->phys)); 270 bpl->addr_high = cpu_to_le32(putPaddrHigh(pcmd->phys)); 271 bpl->type_size = cpu_to_le32(cmd_size); 272 bpl->type_size |= cpu_to_le32(ULP_BDE64_TYPE_BDE_64); 273 274 if (expect_rsp) { 275 bpl++; 276 bpl->addr_low = cpu_to_le32(putPaddrLow(prsp->phys)); 277 bpl->addr_high = cpu_to_le32(putPaddrHigh(prsp->phys)); 278 bpl->type_size = cpu_to_le32(FCELSSIZE); 279 bpl->type_size |= cpu_to_le32(ULP_BDE64_TYPE_BDE_64); 280 } 281 282 elsiocb->cmd_dmabuf = pcmd; 283 elsiocb->bpl_dmabuf = pbuflist; 284 elsiocb->retry = retry; 285 elsiocb->vport = vport; 286 elsiocb->drvrTimeout = (phba->fc_ratov << 1) + LPFC_DRVR_TIMEOUT; 287 288 if (prsp) 289 list_add(&prsp->list, &pcmd->list); 290 if (expect_rsp) { 291 /* Xmit ELS command <elsCmd> to remote NPORT <did> */ 292 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 293 "0116 Xmit ELS command x%x to remote " 294 "NPORT x%x I/O tag: x%x, port state:x%x " 295 "rpi x%x fc_flag:x%x\n", 296 elscmd, did, elsiocb->iotag, 297 vport->port_state, ndlp->nlp_rpi, 298 vport->fc_flag); 299 } else { 300 /* Xmit ELS response <elsCmd> to remote NPORT <did> */ 301 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 302 "0117 Xmit ELS response x%x to remote " 303 "NPORT x%x I/O tag: x%x, size: x%x " 304 "port_state x%x rpi x%x fc_flag x%x\n", 305 elscmd, ndlp->nlp_DID, elsiocb->iotag, 306 cmd_size, vport->port_state, 307 ndlp->nlp_rpi, vport->fc_flag); 308 } 309 310 return elsiocb; 311 312els_iocb_free_pbuf_exit: 313 if (expect_rsp) 314 lpfc_mbuf_free(phba, prsp->virt, prsp->phys); 315 kfree(pbuflist); 316 317els_iocb_free_prsp_exit: 318 lpfc_mbuf_free(phba, pcmd->virt, pcmd->phys); 319 kfree(prsp); 320 321els_iocb_free_pcmb_exit: 322 kfree(pcmd); 323 lpfc_sli_release_iocbq(phba, elsiocb); 324 return NULL; 325} 326 327/** 328 * lpfc_issue_fabric_reglogin - Issue fabric registration login for a vport 329 * @vport: pointer to a host virtual N_Port data structure. 330 * 331 * This routine issues a fabric registration login for a @vport. An 332 * active ndlp node with Fabric_DID must already exist for this @vport. 333 * The routine invokes two mailbox commands to carry out fabric registration 334 * login through the HBA firmware: the first mailbox command requests the 335 * HBA to perform link configuration for the @vport; and the second mailbox 336 * command requests the HBA to perform the actual fabric registration login 337 * with the @vport. 338 * 339 * Return code 340 * 0 - successfully issued fabric registration login for @vport 341 * -ENXIO -- failed to issue fabric registration login for @vport 342 **/ 343int 344lpfc_issue_fabric_reglogin(struct lpfc_vport *vport) 345{ 346 struct lpfc_hba *phba = vport->phba; 347 LPFC_MBOXQ_t *mbox; 348 struct lpfc_nodelist *ndlp; 349 struct serv_parm *sp; 350 int rc; 351 int err = 0; 352 353 sp = &phba->fc_fabparam; 354 ndlp = lpfc_findnode_did(vport, Fabric_DID); 355 if (!ndlp) { 356 err = 1; 357 goto fail; 358 } 359 360 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 361 if (!mbox) { 362 err = 2; 363 goto fail; 364 } 365 366 vport->port_state = LPFC_FABRIC_CFG_LINK; 367 lpfc_config_link(phba, mbox); 368 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 369 mbox->vport = vport; 370 371 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 372 if (rc == MBX_NOT_FINISHED) { 373 err = 3; 374 goto fail_free_mbox; 375 } 376 377 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 378 if (!mbox) { 379 err = 4; 380 goto fail; 381 } 382 rc = lpfc_reg_rpi(phba, vport->vpi, Fabric_DID, (uint8_t *)sp, mbox, 383 ndlp->nlp_rpi); 384 if (rc) { 385 err = 5; 386 goto fail_free_mbox; 387 } 388 389 mbox->mbox_cmpl = lpfc_mbx_cmpl_fabric_reg_login; 390 mbox->vport = vport; 391 /* increment the reference count on ndlp to hold reference 392 * for the callback routine. 393 */ 394 mbox->ctx_ndlp = lpfc_nlp_get(ndlp); 395 if (!mbox->ctx_ndlp) { 396 err = 6; 397 goto fail_free_mbox; 398 } 399 400 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 401 if (rc == MBX_NOT_FINISHED) { 402 err = 7; 403 goto fail_issue_reg_login; 404 } 405 406 return 0; 407 408fail_issue_reg_login: 409 /* decrement the reference count on ndlp just incremented 410 * for the failed mbox command. 411 */ 412 lpfc_nlp_put(ndlp); 413fail_free_mbox: 414 lpfc_mbox_rsrc_cleanup(phba, mbox, MBOX_THD_UNLOCKED); 415fail: 416 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 417 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 418 "0249 Cannot issue Register Fabric login: Err %d\n", 419 err); 420 return -ENXIO; 421} 422 423/** 424 * lpfc_issue_reg_vfi - Register VFI for this vport's fabric login 425 * @vport: pointer to a host virtual N_Port data structure. 426 * 427 * This routine issues a REG_VFI mailbox for the vfi, vpi, fcfi triplet for 428 * the @vport. This mailbox command is necessary for SLI4 port only. 429 * 430 * Return code 431 * 0 - successfully issued REG_VFI for @vport 432 * A failure code otherwise. 433 **/ 434int 435lpfc_issue_reg_vfi(struct lpfc_vport *vport) 436{ 437 struct lpfc_hba *phba = vport->phba; 438 LPFC_MBOXQ_t *mboxq = NULL; 439 struct lpfc_nodelist *ndlp; 440 struct lpfc_dmabuf *dmabuf = NULL; 441 int rc = 0; 442 443 /* move forward in case of SLI4 FC port loopback test and pt2pt mode */ 444 if ((phba->sli_rev == LPFC_SLI_REV4) && 445 !(phba->link_flag & LS_LOOPBACK_MODE) && 446 !(vport->fc_flag & FC_PT2PT)) { 447 ndlp = lpfc_findnode_did(vport, Fabric_DID); 448 if (!ndlp) { 449 rc = -ENODEV; 450 goto fail; 451 } 452 } 453 454 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 455 if (!mboxq) { 456 rc = -ENOMEM; 457 goto fail; 458 } 459 460 /* Supply CSP's only if we are fabric connect or pt-to-pt connect */ 461 if ((vport->fc_flag & FC_FABRIC) || (vport->fc_flag & FC_PT2PT)) { 462 rc = lpfc_mbox_rsrc_prep(phba, mboxq); 463 if (rc) { 464 rc = -ENOMEM; 465 goto fail_mbox; 466 } 467 dmabuf = mboxq->ctx_buf; 468 memcpy(dmabuf->virt, &phba->fc_fabparam, 469 sizeof(struct serv_parm)); 470 } 471 472 vport->port_state = LPFC_FABRIC_CFG_LINK; 473 if (dmabuf) { 474 lpfc_reg_vfi(mboxq, vport, dmabuf->phys); 475 /* lpfc_reg_vfi memsets the mailbox. Restore the ctx_buf. */ 476 mboxq->ctx_buf = dmabuf; 477 } else { 478 lpfc_reg_vfi(mboxq, vport, 0); 479 } 480 481 mboxq->mbox_cmpl = lpfc_mbx_cmpl_reg_vfi; 482 mboxq->vport = vport; 483 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); 484 if (rc == MBX_NOT_FINISHED) { 485 rc = -ENXIO; 486 goto fail_mbox; 487 } 488 return 0; 489 490fail_mbox: 491 lpfc_mbox_rsrc_cleanup(phba, mboxq, MBOX_THD_UNLOCKED); 492fail: 493 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 494 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 495 "0289 Issue Register VFI failed: Err %d\n", rc); 496 return rc; 497} 498 499/** 500 * lpfc_issue_unreg_vfi - Unregister VFI for this vport's fabric login 501 * @vport: pointer to a host virtual N_Port data structure. 502 * 503 * This routine issues a UNREG_VFI mailbox with the vfi, vpi, fcfi triplet for 504 * the @vport. This mailbox command is necessary for SLI4 port only. 505 * 506 * Return code 507 * 0 - successfully issued REG_VFI for @vport 508 * A failure code otherwise. 509 **/ 510int 511lpfc_issue_unreg_vfi(struct lpfc_vport *vport) 512{ 513 struct lpfc_hba *phba = vport->phba; 514 struct Scsi_Host *shost; 515 LPFC_MBOXQ_t *mboxq; 516 int rc; 517 518 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 519 if (!mboxq) { 520 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 521 "2556 UNREG_VFI mbox allocation failed" 522 "HBA state x%x\n", phba->pport->port_state); 523 return -ENOMEM; 524 } 525 526 lpfc_unreg_vfi(mboxq, vport); 527 mboxq->vport = vport; 528 mboxq->mbox_cmpl = lpfc_unregister_vfi_cmpl; 529 530 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); 531 if (rc == MBX_NOT_FINISHED) { 532 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 533 "2557 UNREG_VFI issue mbox failed rc x%x " 534 "HBA state x%x\n", 535 rc, phba->pport->port_state); 536 mempool_free(mboxq, phba->mbox_mem_pool); 537 return -EIO; 538 } 539 540 shost = lpfc_shost_from_vport(vport); 541 spin_lock_irq(shost->host_lock); 542 vport->fc_flag &= ~FC_VFI_REGISTERED; 543 spin_unlock_irq(shost->host_lock); 544 return 0; 545} 546 547/** 548 * lpfc_check_clean_addr_bit - Check whether assigned FCID is clean. 549 * @vport: pointer to a host virtual N_Port data structure. 550 * @sp: pointer to service parameter data structure. 551 * 552 * This routine is called from FLOGI/FDISC completion handler functions. 553 * lpfc_check_clean_addr_bit return 1 when FCID/Fabric portname/ Fabric 554 * node nodename is changed in the completion service parameter else return 555 * 0. This function also set flag in the vport data structure to delay 556 * NP_Port discovery after the FLOGI/FDISC completion if Clean address bit 557 * in FLOGI/FDISC response is cleared and FCID/Fabric portname/ Fabric 558 * node nodename is changed in the completion service parameter. 559 * 560 * Return code 561 * 0 - FCID and Fabric Nodename and Fabric portname is not changed. 562 * 1 - FCID or Fabric Nodename or Fabric portname is changed. 563 * 564 **/ 565static uint8_t 566lpfc_check_clean_addr_bit(struct lpfc_vport *vport, 567 struct serv_parm *sp) 568{ 569 struct lpfc_hba *phba = vport->phba; 570 uint8_t fabric_param_changed = 0; 571 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 572 573 if ((vport->fc_prevDID != vport->fc_myDID) || 574 memcmp(&vport->fabric_portname, &sp->portName, 575 sizeof(struct lpfc_name)) || 576 memcmp(&vport->fabric_nodename, &sp->nodeName, 577 sizeof(struct lpfc_name)) || 578 (vport->vport_flag & FAWWPN_PARAM_CHG)) { 579 fabric_param_changed = 1; 580 vport->vport_flag &= ~FAWWPN_PARAM_CHG; 581 } 582 /* 583 * Word 1 Bit 31 in common service parameter is overloaded. 584 * Word 1 Bit 31 in FLOGI request is multiple NPort request 585 * Word 1 Bit 31 in FLOGI response is clean address bit 586 * 587 * If fabric parameter is changed and clean address bit is 588 * cleared delay nport discovery if 589 * - vport->fc_prevDID != 0 (not initial discovery) OR 590 * - lpfc_delay_discovery module parameter is set. 591 */ 592 if (fabric_param_changed && !sp->cmn.clean_address_bit && 593 (vport->fc_prevDID || phba->cfg_delay_discovery)) { 594 spin_lock_irq(shost->host_lock); 595 vport->fc_flag |= FC_DISC_DELAYED; 596 spin_unlock_irq(shost->host_lock); 597 } 598 599 return fabric_param_changed; 600} 601 602 603/** 604 * lpfc_cmpl_els_flogi_fabric - Completion function for flogi to a fabric port 605 * @vport: pointer to a host virtual N_Port data structure. 606 * @ndlp: pointer to a node-list data structure. 607 * @sp: pointer to service parameter data structure. 608 * @ulp_word4: command response value 609 * 610 * This routine is invoked by the lpfc_cmpl_els_flogi() completion callback 611 * function to handle the completion of a Fabric Login (FLOGI) into a fabric 612 * port in a fabric topology. It properly sets up the parameters to the @ndlp 613 * from the IOCB response. It also check the newly assigned N_Port ID to the 614 * @vport against the previously assigned N_Port ID. If it is different from 615 * the previously assigned Destination ID (DID), the lpfc_unreg_rpi() routine 616 * is invoked on all the remaining nodes with the @vport to unregister the 617 * Remote Port Indicators (RPIs). Finally, the lpfc_issue_fabric_reglogin() 618 * is invoked to register login to the fabric. 619 * 620 * Return code 621 * 0 - Success (currently, always return 0) 622 **/ 623static int 624lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 625 struct serv_parm *sp, uint32_t ulp_word4) 626{ 627 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 628 struct lpfc_hba *phba = vport->phba; 629 struct lpfc_nodelist *np; 630 struct lpfc_nodelist *next_np; 631 uint8_t fabric_param_changed; 632 633 spin_lock_irq(shost->host_lock); 634 vport->fc_flag |= FC_FABRIC; 635 spin_unlock_irq(shost->host_lock); 636 637 phba->fc_edtov = be32_to_cpu(sp->cmn.e_d_tov); 638 if (sp->cmn.edtovResolution) /* E_D_TOV ticks are in nanoseconds */ 639 phba->fc_edtov = (phba->fc_edtov + 999999) / 1000000; 640 641 phba->fc_edtovResol = sp->cmn.edtovResolution; 642 phba->fc_ratov = (be32_to_cpu(sp->cmn.w2.r_a_tov) + 999) / 1000; 643 644 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) { 645 spin_lock_irq(shost->host_lock); 646 vport->fc_flag |= FC_PUBLIC_LOOP; 647 spin_unlock_irq(shost->host_lock); 648 } 649 650 vport->fc_myDID = ulp_word4 & Mask_DID; 651 memcpy(&ndlp->nlp_portname, &sp->portName, sizeof(struct lpfc_name)); 652 memcpy(&ndlp->nlp_nodename, &sp->nodeName, sizeof(struct lpfc_name)); 653 ndlp->nlp_class_sup = 0; 654 if (sp->cls1.classValid) 655 ndlp->nlp_class_sup |= FC_COS_CLASS1; 656 if (sp->cls2.classValid) 657 ndlp->nlp_class_sup |= FC_COS_CLASS2; 658 if (sp->cls3.classValid) 659 ndlp->nlp_class_sup |= FC_COS_CLASS3; 660 if (sp->cls4.classValid) 661 ndlp->nlp_class_sup |= FC_COS_CLASS4; 662 ndlp->nlp_maxframe = ((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) | 663 sp->cmn.bbRcvSizeLsb; 664 665 fabric_param_changed = lpfc_check_clean_addr_bit(vport, sp); 666 if (fabric_param_changed) { 667 /* Reset FDMI attribute masks based on config parameter */ 668 if (phba->cfg_enable_SmartSAN || 669 (phba->cfg_fdmi_on == LPFC_FDMI_SUPPORT)) { 670 /* Setup appropriate attribute masks */ 671 vport->fdmi_hba_mask = LPFC_FDMI2_HBA_ATTR; 672 if (phba->cfg_enable_SmartSAN) 673 vport->fdmi_port_mask = LPFC_FDMI2_SMART_ATTR; 674 else 675 vport->fdmi_port_mask = LPFC_FDMI2_PORT_ATTR; 676 } else { 677 vport->fdmi_hba_mask = 0; 678 vport->fdmi_port_mask = 0; 679 } 680 681 } 682 memcpy(&vport->fabric_portname, &sp->portName, 683 sizeof(struct lpfc_name)); 684 memcpy(&vport->fabric_nodename, &sp->nodeName, 685 sizeof(struct lpfc_name)); 686 memcpy(&phba->fc_fabparam, sp, sizeof(struct serv_parm)); 687 688 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) { 689 if (sp->cmn.response_multiple_NPort) { 690 lpfc_printf_vlog(vport, KERN_WARNING, 691 LOG_ELS | LOG_VPORT, 692 "1816 FLOGI NPIV supported, " 693 "response data 0x%x\n", 694 sp->cmn.response_multiple_NPort); 695 spin_lock_irq(&phba->hbalock); 696 phba->link_flag |= LS_NPIV_FAB_SUPPORTED; 697 spin_unlock_irq(&phba->hbalock); 698 } else { 699 /* Because we asked f/w for NPIV it still expects us 700 to call reg_vnpid at least for the physical host */ 701 lpfc_printf_vlog(vport, KERN_WARNING, 702 LOG_ELS | LOG_VPORT, 703 "1817 Fabric does not support NPIV " 704 "- configuring single port mode.\n"); 705 spin_lock_irq(&phba->hbalock); 706 phba->link_flag &= ~LS_NPIV_FAB_SUPPORTED; 707 spin_unlock_irq(&phba->hbalock); 708 } 709 } 710 711 /* 712 * For FC we need to do some special processing because of the SLI 713 * Port's default settings of the Common Service Parameters. 714 */ 715 if ((phba->sli_rev == LPFC_SLI_REV4) && 716 (phba->sli4_hba.lnk_info.lnk_tp == LPFC_LNK_TYPE_FC)) { 717 /* If physical FC port changed, unreg VFI and ALL VPIs / RPIs */ 718 if (fabric_param_changed) 719 lpfc_unregister_fcf_prep(phba); 720 721 /* This should just update the VFI CSPs*/ 722 if (vport->fc_flag & FC_VFI_REGISTERED) 723 lpfc_issue_reg_vfi(vport); 724 } 725 726 if (fabric_param_changed && 727 !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) { 728 729 /* If our NportID changed, we need to ensure all 730 * remaining NPORTs get unreg_login'ed. 731 */ 732 list_for_each_entry_safe(np, next_np, 733 &vport->fc_nodes, nlp_listp) { 734 if ((np->nlp_state != NLP_STE_NPR_NODE) || 735 !(np->nlp_flag & NLP_NPR_ADISC)) 736 continue; 737 spin_lock_irq(&np->lock); 738 np->nlp_flag &= ~NLP_NPR_ADISC; 739 spin_unlock_irq(&np->lock); 740 lpfc_unreg_rpi(vport, np); 741 } 742 lpfc_cleanup_pending_mbox(vport); 743 744 if (phba->sli_rev == LPFC_SLI_REV4) { 745 lpfc_sli4_unreg_all_rpis(vport); 746 lpfc_mbx_unreg_vpi(vport); 747 spin_lock_irq(shost->host_lock); 748 vport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI; 749 spin_unlock_irq(shost->host_lock); 750 } 751 752 /* 753 * For SLI3 and SLI4, the VPI needs to be reregistered in 754 * response to this fabric parameter change event. 755 */ 756 spin_lock_irq(shost->host_lock); 757 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 758 spin_unlock_irq(shost->host_lock); 759 } else if ((phba->sli_rev == LPFC_SLI_REV4) && 760 !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) { 761 /* 762 * Driver needs to re-reg VPI in order for f/w 763 * to update the MAC address. 764 */ 765 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); 766 lpfc_register_new_vport(phba, vport, ndlp); 767 return 0; 768 } 769 770 if (phba->sli_rev < LPFC_SLI_REV4) { 771 lpfc_nlp_set_state(vport, ndlp, NLP_STE_REG_LOGIN_ISSUE); 772 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED && 773 vport->fc_flag & FC_VPORT_NEEDS_REG_VPI) 774 lpfc_register_new_vport(phba, vport, ndlp); 775 else 776 lpfc_issue_fabric_reglogin(vport); 777 } else { 778 ndlp->nlp_type |= NLP_FABRIC; 779 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); 780 if ((!(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) && 781 (vport->vpi_state & LPFC_VPI_REGISTERED)) { 782 lpfc_start_fdiscs(phba); 783 lpfc_do_scr_ns_plogi(phba, vport); 784 } else if (vport->fc_flag & FC_VFI_REGISTERED) 785 lpfc_issue_init_vpi(vport); 786 else { 787 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 788 "3135 Need register VFI: (x%x/%x)\n", 789 vport->fc_prevDID, vport->fc_myDID); 790 lpfc_issue_reg_vfi(vport); 791 } 792 } 793 return 0; 794} 795 796/** 797 * lpfc_cmpl_els_flogi_nport - Completion function for flogi to an N_Port 798 * @vport: pointer to a host virtual N_Port data structure. 799 * @ndlp: pointer to a node-list data structure. 800 * @sp: pointer to service parameter data structure. 801 * 802 * This routine is invoked by the lpfc_cmpl_els_flogi() completion callback 803 * function to handle the completion of a Fabric Login (FLOGI) into an N_Port 804 * in a point-to-point topology. First, the @vport's N_Port Name is compared 805 * with the received N_Port Name: if the @vport's N_Port Name is greater than 806 * the received N_Port Name lexicographically, this node shall assign local 807 * N_Port ID (PT2PT_LocalID: 1) and remote N_Port ID (PT2PT_RemoteID: 2) and 808 * will send out Port Login (PLOGI) with the N_Port IDs assigned. Otherwise, 809 * this node shall just wait for the remote node to issue PLOGI and assign 810 * N_Port IDs. 811 * 812 * Return code 813 * 0 - Success 814 * -ENXIO - Fail 815 **/ 816static int 817lpfc_cmpl_els_flogi_nport(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 818 struct serv_parm *sp) 819{ 820 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 821 struct lpfc_hba *phba = vport->phba; 822 LPFC_MBOXQ_t *mbox; 823 int rc; 824 825 spin_lock_irq(shost->host_lock); 826 vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP); 827 vport->fc_flag |= FC_PT2PT; 828 spin_unlock_irq(shost->host_lock); 829 830 /* If we are pt2pt with another NPort, force NPIV off! */ 831 phba->sli3_options &= ~LPFC_SLI3_NPIV_ENABLED; 832 833 /* If physical FC port changed, unreg VFI and ALL VPIs / RPIs */ 834 if ((phba->sli_rev == LPFC_SLI_REV4) && phba->fc_topology_changed) { 835 lpfc_unregister_fcf_prep(phba); 836 837 spin_lock_irq(shost->host_lock); 838 vport->fc_flag &= ~FC_VFI_REGISTERED; 839 spin_unlock_irq(shost->host_lock); 840 phba->fc_topology_changed = 0; 841 } 842 843 rc = memcmp(&vport->fc_portname, &sp->portName, 844 sizeof(vport->fc_portname)); 845 846 if (rc >= 0) { 847 /* This side will initiate the PLOGI */ 848 spin_lock_irq(shost->host_lock); 849 vport->fc_flag |= FC_PT2PT_PLOGI; 850 spin_unlock_irq(shost->host_lock); 851 852 /* 853 * N_Port ID cannot be 0, set our Id to LocalID 854 * the other side will be RemoteID. 855 */ 856 857 /* not equal */ 858 if (rc) 859 vport->fc_myDID = PT2PT_LocalID; 860 861 /* If not registered with a transport, decrement ndlp reference 862 * count indicating that ndlp can be safely released when other 863 * references are removed. 864 */ 865 if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD))) 866 lpfc_nlp_put(ndlp); 867 868 ndlp = lpfc_findnode_did(vport, PT2PT_RemoteID); 869 if (!ndlp) { 870 /* 871 * Cannot find existing Fabric ndlp, so allocate a 872 * new one 873 */ 874 ndlp = lpfc_nlp_init(vport, PT2PT_RemoteID); 875 if (!ndlp) 876 goto fail; 877 } 878 879 memcpy(&ndlp->nlp_portname, &sp->portName, 880 sizeof(struct lpfc_name)); 881 memcpy(&ndlp->nlp_nodename, &sp->nodeName, 882 sizeof(struct lpfc_name)); 883 /* Set state will put ndlp onto node list if not already done */ 884 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); 885 spin_lock_irq(&ndlp->lock); 886 ndlp->nlp_flag |= NLP_NPR_2B_DISC; 887 spin_unlock_irq(&ndlp->lock); 888 889 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 890 if (!mbox) 891 goto fail; 892 893 lpfc_config_link(phba, mbox); 894 895 mbox->mbox_cmpl = lpfc_mbx_cmpl_local_config_link; 896 mbox->vport = vport; 897 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 898 if (rc == MBX_NOT_FINISHED) { 899 mempool_free(mbox, phba->mbox_mem_pool); 900 goto fail; 901 } 902 } else { 903 /* This side will wait for the PLOGI. If not registered with 904 * a transport, decrement node reference count indicating that 905 * ndlp can be released when other references are removed. 906 */ 907 if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD))) 908 lpfc_nlp_put(ndlp); 909 910 /* Start discovery - this should just do CLEAR_LA */ 911 lpfc_disc_start(vport); 912 } 913 914 return 0; 915fail: 916 return -ENXIO; 917} 918 919/** 920 * lpfc_cmpl_els_flogi - Completion callback function for flogi 921 * @phba: pointer to lpfc hba data structure. 922 * @cmdiocb: pointer to lpfc command iocb data structure. 923 * @rspiocb: pointer to lpfc response iocb data structure. 924 * 925 * This routine is the top-level completion callback function for issuing 926 * a Fabric Login (FLOGI) command. If the response IOCB reported error, 927 * the lpfc_els_retry() routine shall be invoked to retry the FLOGI. If 928 * retry has been made (either immediately or delayed with lpfc_els_retry() 929 * returning 1), the command IOCB will be released and function returned. 930 * If the retry attempt has been given up (possibly reach the maximum 931 * number of retries), one additional decrement of ndlp reference shall be 932 * invoked before going out after releasing the command IOCB. This will 933 * actually release the remote node (Note, lpfc_els_free_iocb() will also 934 * invoke one decrement of ndlp reference count). If no error reported in 935 * the IOCB status, the command Port ID field is used to determine whether 936 * this is a point-to-point topology or a fabric topology: if the Port ID 937 * field is assigned, it is a fabric topology; otherwise, it is a 938 * point-to-point topology. The routine lpfc_cmpl_els_flogi_fabric() or 939 * lpfc_cmpl_els_flogi_nport() shall be invoked accordingly to handle the 940 * specific topology completion conditions. 941 **/ 942static void 943lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 944 struct lpfc_iocbq *rspiocb) 945{ 946 struct lpfc_vport *vport = cmdiocb->vport; 947 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 948 struct lpfc_nodelist *ndlp = cmdiocb->ndlp; 949 IOCB_t *irsp; 950 struct lpfc_dmabuf *pcmd = cmdiocb->cmd_dmabuf, *prsp; 951 struct serv_parm *sp; 952 uint16_t fcf_index; 953 int rc; 954 u32 ulp_status, ulp_word4, tmo; 955 956 /* Check to see if link went down during discovery */ 957 if (lpfc_els_chk_latt(vport)) { 958 /* One additional decrement on node reference count to 959 * trigger the release of the node 960 */ 961 if (!(ndlp->fc4_xpt_flags & SCSI_XPT_REGD)) 962 lpfc_nlp_put(ndlp); 963 goto out; 964 } 965 966 ulp_status = get_job_ulpstatus(phba, rspiocb); 967 ulp_word4 = get_job_word4(phba, rspiocb); 968 969 if (phba->sli_rev == LPFC_SLI_REV4) { 970 tmo = get_wqe_tmo(cmdiocb); 971 } else { 972 irsp = &rspiocb->iocb; 973 tmo = irsp->ulpTimeout; 974 } 975 976 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 977 "FLOGI cmpl: status:x%x/x%x state:x%x", 978 ulp_status, ulp_word4, 979 vport->port_state); 980 981 if (ulp_status) { 982 /* 983 * In case of FIP mode, perform roundrobin FCF failover 984 * due to new FCF discovery 985 */ 986 if ((phba->hba_flag & HBA_FIP_SUPPORT) && 987 (phba->fcf.fcf_flag & FCF_DISCOVERY)) { 988 if (phba->link_state < LPFC_LINK_UP) 989 goto stop_rr_fcf_flogi; 990 if ((phba->fcoe_cvl_eventtag_attn == 991 phba->fcoe_cvl_eventtag) && 992 (ulp_status == IOSTAT_LOCAL_REJECT) && 993 ((ulp_word4 & IOERR_PARAM_MASK) == 994 IOERR_SLI_ABORTED)) 995 goto stop_rr_fcf_flogi; 996 else 997 phba->fcoe_cvl_eventtag_attn = 998 phba->fcoe_cvl_eventtag; 999 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP | LOG_ELS, 1000 "2611 FLOGI failed on FCF (x%x), " 1001 "status:x%x/x%x, tmo:x%x, perform " 1002 "roundrobin FCF failover\n", 1003 phba->fcf.current_rec.fcf_indx, 1004 ulp_status, ulp_word4, tmo); 1005 lpfc_sli4_set_fcf_flogi_fail(phba, 1006 phba->fcf.current_rec.fcf_indx); 1007 fcf_index = lpfc_sli4_fcf_rr_next_index_get(phba); 1008 rc = lpfc_sli4_fcf_rr_next_proc(vport, fcf_index); 1009 if (rc) 1010 goto out; 1011 } 1012 1013stop_rr_fcf_flogi: 1014 /* FLOGI failure */ 1015 if (!(ulp_status == IOSTAT_LOCAL_REJECT && 1016 ((ulp_word4 & IOERR_PARAM_MASK) == 1017 IOERR_LOOP_OPEN_FAILURE))) 1018 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 1019 "2858 FLOGI failure Status:x%x/x%x TMO" 1020 ":x%x Data x%x x%x\n", 1021 ulp_status, ulp_word4, tmo, 1022 phba->hba_flag, phba->fcf.fcf_flag); 1023 1024 /* Check for retry */ 1025 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) 1026 goto out; 1027 1028 lpfc_printf_vlog(vport, KERN_WARNING, LOG_TRACE_EVENT, 1029 "0150 FLOGI failure Status:x%x/x%x " 1030 "xri x%x TMO:x%x refcnt %d\n", 1031 ulp_status, ulp_word4, cmdiocb->sli4_xritag, 1032 tmo, kref_read(&ndlp->kref)); 1033 1034 /* If this is not a loop open failure, bail out */ 1035 if (!(ulp_status == IOSTAT_LOCAL_REJECT && 1036 ((ulp_word4 & IOERR_PARAM_MASK) == 1037 IOERR_LOOP_OPEN_FAILURE))) { 1038 /* FLOGI failure */ 1039 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 1040 "0100 FLOGI failure Status:x%x/x%x " 1041 "TMO:x%x\n", 1042 ulp_status, ulp_word4, tmo); 1043 goto flogifail; 1044 } 1045 1046 /* FLOGI failed, so there is no fabric */ 1047 spin_lock_irq(shost->host_lock); 1048 vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP | 1049 FC_PT2PT_NO_NVME); 1050 spin_unlock_irq(shost->host_lock); 1051 1052 /* If private loop, then allow max outstanding els to be 1053 * LPFC_MAX_DISC_THREADS (32). Scanning in the case of no 1054 * alpa map would take too long otherwise. 1055 */ 1056 if (phba->alpa_map[0] == 0) 1057 vport->cfg_discovery_threads = LPFC_MAX_DISC_THREADS; 1058 if ((phba->sli_rev == LPFC_SLI_REV4) && 1059 (!(vport->fc_flag & FC_VFI_REGISTERED) || 1060 (vport->fc_prevDID != vport->fc_myDID) || 1061 phba->fc_topology_changed)) { 1062 if (vport->fc_flag & FC_VFI_REGISTERED) { 1063 if (phba->fc_topology_changed) { 1064 lpfc_unregister_fcf_prep(phba); 1065 spin_lock_irq(shost->host_lock); 1066 vport->fc_flag &= ~FC_VFI_REGISTERED; 1067 spin_unlock_irq(shost->host_lock); 1068 phba->fc_topology_changed = 0; 1069 } else { 1070 lpfc_sli4_unreg_all_rpis(vport); 1071 } 1072 } 1073 1074 /* Do not register VFI if the driver aborted FLOGI */ 1075 if (!lpfc_error_lost_link(ulp_status, ulp_word4)) 1076 lpfc_issue_reg_vfi(vport); 1077 1078 lpfc_nlp_put(ndlp); 1079 goto out; 1080 } 1081 goto flogifail; 1082 } 1083 spin_lock_irq(shost->host_lock); 1084 vport->fc_flag &= ~FC_VPORT_CVL_RCVD; 1085 vport->fc_flag &= ~FC_VPORT_LOGO_RCVD; 1086 spin_unlock_irq(shost->host_lock); 1087 1088 /* 1089 * The FLogI succeeded. Sync the data for the CPU before 1090 * accessing it. 1091 */ 1092 prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list); 1093 if (!prsp) 1094 goto out; 1095 sp = prsp->virt + sizeof(uint32_t); 1096 1097 /* FLOGI completes successfully */ 1098 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 1099 "0101 FLOGI completes successfully, I/O tag:x%x " 1100 "xri x%x Data: x%x x%x x%x x%x x%x x%x x%x %d\n", 1101 cmdiocb->iotag, cmdiocb->sli4_xritag, 1102 ulp_word4, sp->cmn.e_d_tov, 1103 sp->cmn.w2.r_a_tov, sp->cmn.edtovResolution, 1104 vport->port_state, vport->fc_flag, 1105 sp->cmn.priority_tagging, kref_read(&ndlp->kref)); 1106 1107 if (sp->cmn.priority_tagging) 1108 vport->phba->pport->vmid_flag |= (LPFC_VMID_ISSUE_QFPA | 1109 LPFC_VMID_TYPE_PRIO); 1110 1111 if (vport->port_state == LPFC_FLOGI) { 1112 /* 1113 * If Common Service Parameters indicate Nport 1114 * we are point to point, if Fport we are Fabric. 1115 */ 1116 if (sp->cmn.fPort) 1117 rc = lpfc_cmpl_els_flogi_fabric(vport, ndlp, sp, 1118 ulp_word4); 1119 else if (!(phba->hba_flag & HBA_FCOE_MODE)) 1120 rc = lpfc_cmpl_els_flogi_nport(vport, ndlp, sp); 1121 else { 1122 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 1123 "2831 FLOGI response with cleared Fabric " 1124 "bit fcf_index 0x%x " 1125 "Switch Name %02x%02x%02x%02x%02x%02x%02x%02x " 1126 "Fabric Name " 1127 "%02x%02x%02x%02x%02x%02x%02x%02x\n", 1128 phba->fcf.current_rec.fcf_indx, 1129 phba->fcf.current_rec.switch_name[0], 1130 phba->fcf.current_rec.switch_name[1], 1131 phba->fcf.current_rec.switch_name[2], 1132 phba->fcf.current_rec.switch_name[3], 1133 phba->fcf.current_rec.switch_name[4], 1134 phba->fcf.current_rec.switch_name[5], 1135 phba->fcf.current_rec.switch_name[6], 1136 phba->fcf.current_rec.switch_name[7], 1137 phba->fcf.current_rec.fabric_name[0], 1138 phba->fcf.current_rec.fabric_name[1], 1139 phba->fcf.current_rec.fabric_name[2], 1140 phba->fcf.current_rec.fabric_name[3], 1141 phba->fcf.current_rec.fabric_name[4], 1142 phba->fcf.current_rec.fabric_name[5], 1143 phba->fcf.current_rec.fabric_name[6], 1144 phba->fcf.current_rec.fabric_name[7]); 1145 1146 lpfc_nlp_put(ndlp); 1147 spin_lock_irq(&phba->hbalock); 1148 phba->fcf.fcf_flag &= ~FCF_DISCOVERY; 1149 phba->hba_flag &= ~(FCF_RR_INPROG | HBA_DEVLOSS_TMO); 1150 spin_unlock_irq(&phba->hbalock); 1151 phba->fcf.fcf_redisc_attempted = 0; /* reset */ 1152 goto out; 1153 } 1154 if (!rc) { 1155 /* Mark the FCF discovery process done */ 1156 if (phba->hba_flag & HBA_FIP_SUPPORT) 1157 lpfc_printf_vlog(vport, KERN_INFO, LOG_FIP | 1158 LOG_ELS, 1159 "2769 FLOGI to FCF (x%x) " 1160 "completed successfully\n", 1161 phba->fcf.current_rec.fcf_indx); 1162 spin_lock_irq(&phba->hbalock); 1163 phba->fcf.fcf_flag &= ~FCF_DISCOVERY; 1164 phba->hba_flag &= ~(FCF_RR_INPROG | HBA_DEVLOSS_TMO); 1165 spin_unlock_irq(&phba->hbalock); 1166 phba->fcf.fcf_redisc_attempted = 0; /* reset */ 1167 goto out; 1168 } 1169 } else if (vport->port_state > LPFC_FLOGI && 1170 vport->fc_flag & FC_PT2PT) { 1171 /* 1172 * In a p2p topology, it is possible that discovery has 1173 * already progressed, and this completion can be ignored. 1174 * Recheck the indicated topology. 1175 */ 1176 if (!sp->cmn.fPort) 1177 goto out; 1178 } 1179 1180flogifail: 1181 spin_lock_irq(&phba->hbalock); 1182 phba->fcf.fcf_flag &= ~FCF_DISCOVERY; 1183 spin_unlock_irq(&phba->hbalock); 1184 1185 if (!lpfc_error_lost_link(ulp_status, ulp_word4)) { 1186 /* FLOGI failed, so just use loop map to make discovery list */ 1187 lpfc_disc_list_loopmap(vport); 1188 1189 /* Start discovery */ 1190 lpfc_disc_start(vport); 1191 } else if (((ulp_status != IOSTAT_LOCAL_REJECT) || 1192 (((ulp_word4 & IOERR_PARAM_MASK) != 1193 IOERR_SLI_ABORTED) && 1194 ((ulp_word4 & IOERR_PARAM_MASK) != 1195 IOERR_SLI_DOWN))) && 1196 (phba->link_state != LPFC_CLEAR_LA)) { 1197 /* If FLOGI failed enable link interrupt. */ 1198 lpfc_issue_clear_la(phba, vport); 1199 } 1200out: 1201 phba->hba_flag &= ~HBA_FLOGI_OUTSTANDING; 1202 lpfc_els_free_iocb(phba, cmdiocb); 1203 lpfc_nlp_put(ndlp); 1204} 1205 1206/** 1207 * lpfc_cmpl_els_link_down - Completion callback function for ELS command 1208 * aborted during a link down 1209 * @phba: pointer to lpfc hba data structure. 1210 * @cmdiocb: pointer to lpfc command iocb data structure. 1211 * @rspiocb: pointer to lpfc response iocb data structure. 1212 * 1213 */ 1214static void 1215lpfc_cmpl_els_link_down(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 1216 struct lpfc_iocbq *rspiocb) 1217{ 1218 uint32_t *pcmd; 1219 uint32_t cmd; 1220 u32 ulp_status, ulp_word4; 1221 1222 pcmd = (uint32_t *)cmdiocb->cmd_dmabuf->virt; 1223 cmd = *pcmd; 1224 1225 ulp_status = get_job_ulpstatus(phba, rspiocb); 1226 ulp_word4 = get_job_word4(phba, rspiocb); 1227 1228 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 1229 "6445 ELS completes after LINK_DOWN: " 1230 " Status %x/%x cmd x%x flg x%x\n", 1231 ulp_status, ulp_word4, cmd, 1232 cmdiocb->cmd_flag); 1233 1234 if (cmdiocb->cmd_flag & LPFC_IO_FABRIC) { 1235 cmdiocb->cmd_flag &= ~LPFC_IO_FABRIC; 1236 atomic_dec(&phba->fabric_iocb_count); 1237 } 1238 lpfc_els_free_iocb(phba, cmdiocb); 1239} 1240 1241/** 1242 * lpfc_issue_els_flogi - Issue an flogi iocb command for a vport 1243 * @vport: pointer to a host virtual N_Port data structure. 1244 * @ndlp: pointer to a node-list data structure. 1245 * @retry: number of retries to the command IOCB. 1246 * 1247 * This routine issues a Fabric Login (FLOGI) Request ELS command 1248 * for a @vport. The initiator service parameters are put into the payload 1249 * of the FLOGI Request IOCB and the top-level callback function pointer 1250 * to lpfc_cmpl_els_flogi() routine is put to the IOCB completion callback 1251 * function field. The lpfc_issue_fabric_iocb routine is invoked to send 1252 * out FLOGI ELS command with one outstanding fabric IOCB at a time. 1253 * 1254 * Note that the ndlp reference count will be incremented by 1 for holding the 1255 * ndlp and the reference to ndlp will be stored into the ndlp field of 1256 * the IOCB for the completion callback function to the FLOGI ELS command. 1257 * 1258 * Return code 1259 * 0 - successfully issued flogi iocb for @vport 1260 * 1 - failed to issue flogi iocb for @vport 1261 **/ 1262static int 1263lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 1264 uint8_t retry) 1265{ 1266 struct lpfc_hba *phba = vport->phba; 1267 struct serv_parm *sp; 1268 union lpfc_wqe128 *wqe = NULL; 1269 IOCB_t *icmd = NULL; 1270 struct lpfc_iocbq *elsiocb; 1271 struct lpfc_iocbq defer_flogi_acc; 1272 u8 *pcmd, ct; 1273 uint16_t cmdsize; 1274 uint32_t tmo, did; 1275 int rc; 1276 1277 cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm)); 1278 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 1279 ndlp->nlp_DID, ELS_CMD_FLOGI); 1280 1281 if (!elsiocb) 1282 return 1; 1283 1284 wqe = &elsiocb->wqe; 1285 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; 1286 icmd = &elsiocb->iocb; 1287 1288 /* For FLOGI request, remainder of payload is service parameters */ 1289 *((uint32_t *) (pcmd)) = ELS_CMD_FLOGI; 1290 pcmd += sizeof(uint32_t); 1291 memcpy(pcmd, &vport->fc_sparam, sizeof(struct serv_parm)); 1292 sp = (struct serv_parm *) pcmd; 1293 1294 /* Setup CSPs accordingly for Fabric */ 1295 sp->cmn.e_d_tov = 0; 1296 sp->cmn.w2.r_a_tov = 0; 1297 sp->cmn.virtual_fabric_support = 0; 1298 sp->cls1.classValid = 0; 1299 if (sp->cmn.fcphLow < FC_PH3) 1300 sp->cmn.fcphLow = FC_PH3; 1301 if (sp->cmn.fcphHigh < FC_PH3) 1302 sp->cmn.fcphHigh = FC_PH3; 1303 1304 /* Determine if switch supports priority tagging */ 1305 if (phba->cfg_vmid_priority_tagging) { 1306 sp->cmn.priority_tagging = 1; 1307 /* lpfc_vmid_host_uuid is combination of wwpn and wwnn */ 1308 if (uuid_is_null((uuid_t *)vport->lpfc_vmid_host_uuid)) { 1309 memcpy(vport->lpfc_vmid_host_uuid, phba->wwpn, 1310 sizeof(phba->wwpn)); 1311 memcpy(&vport->lpfc_vmid_host_uuid[8], phba->wwnn, 1312 sizeof(phba->wwnn)); 1313 } 1314 } 1315 1316 if (phba->sli_rev == LPFC_SLI_REV4) { 1317 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == 1318 LPFC_SLI_INTF_IF_TYPE_0) { 1319 /* FLOGI needs to be 3 for WQE FCFI */ 1320 ct = SLI4_CT_FCFI; 1321 bf_set(wqe_ct, &wqe->els_req.wqe_com, ct); 1322 1323 /* Set the fcfi to the fcfi we registered with */ 1324 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com, 1325 phba->fcf.fcfi); 1326 } 1327 1328 /* Can't do SLI4 class2 without support sequence coalescing */ 1329 sp->cls2.classValid = 0; 1330 sp->cls2.seqDelivery = 0; 1331 } else { 1332 /* Historical, setting sequential-delivery bit for SLI3 */ 1333 sp->cls2.seqDelivery = (sp->cls2.classValid) ? 1 : 0; 1334 sp->cls3.seqDelivery = (sp->cls3.classValid) ? 1 : 0; 1335 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) { 1336 sp->cmn.request_multiple_Nport = 1; 1337 /* For FLOGI, Let FLOGI rsp set the NPortID for VPI 0 */ 1338 icmd->ulpCt_h = 1; 1339 icmd->ulpCt_l = 0; 1340 } else { 1341 sp->cmn.request_multiple_Nport = 0; 1342 } 1343 1344 if (phba->fc_topology != LPFC_TOPOLOGY_LOOP) { 1345 icmd->un.elsreq64.myID = 0; 1346 icmd->un.elsreq64.fl = 1; 1347 } 1348 } 1349 1350 tmo = phba->fc_ratov; 1351 phba->fc_ratov = LPFC_DISC_FLOGI_TMO; 1352 lpfc_set_disctmo(vport); 1353 phba->fc_ratov = tmo; 1354 1355 phba->fc_stat.elsXmitFLOGI++; 1356 elsiocb->cmd_cmpl = lpfc_cmpl_els_flogi; 1357 1358 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 1359 "Issue FLOGI: opt:x%x", 1360 phba->sli3_options, 0, 0); 1361 1362 elsiocb->ndlp = lpfc_nlp_get(ndlp); 1363 if (!elsiocb->ndlp) { 1364 lpfc_els_free_iocb(phba, elsiocb); 1365 return 1; 1366 } 1367 1368 rc = lpfc_issue_fabric_iocb(phba, elsiocb); 1369 if (rc == IOCB_ERROR) { 1370 lpfc_els_free_iocb(phba, elsiocb); 1371 lpfc_nlp_put(ndlp); 1372 return 1; 1373 } 1374 1375 phba->hba_flag |= (HBA_FLOGI_ISSUED | HBA_FLOGI_OUTSTANDING); 1376 1377 /* Clear external loopback plug detected flag */ 1378 phba->link_flag &= ~LS_EXTERNAL_LOOPBACK; 1379 1380 /* Check for a deferred FLOGI ACC condition */ 1381 if (phba->defer_flogi_acc_flag) { 1382 /* lookup ndlp for received FLOGI */ 1383 ndlp = lpfc_findnode_did(vport, 0); 1384 if (!ndlp) 1385 return 0; 1386 1387 did = vport->fc_myDID; 1388 vport->fc_myDID = Fabric_DID; 1389 1390 memset(&defer_flogi_acc, 0, sizeof(struct lpfc_iocbq)); 1391 1392 if (phba->sli_rev == LPFC_SLI_REV4) { 1393 bf_set(wqe_ctxt_tag, 1394 &defer_flogi_acc.wqe.xmit_els_rsp.wqe_com, 1395 phba->defer_flogi_acc_rx_id); 1396 bf_set(wqe_rcvoxid, 1397 &defer_flogi_acc.wqe.xmit_els_rsp.wqe_com, 1398 phba->defer_flogi_acc_ox_id); 1399 } else { 1400 icmd = &defer_flogi_acc.iocb; 1401 icmd->ulpContext = phba->defer_flogi_acc_rx_id; 1402 icmd->unsli3.rcvsli3.ox_id = 1403 phba->defer_flogi_acc_ox_id; 1404 } 1405 1406 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 1407 "3354 Xmit deferred FLOGI ACC: rx_id: x%x," 1408 " ox_id: x%x, hba_flag x%x\n", 1409 phba->defer_flogi_acc_rx_id, 1410 phba->defer_flogi_acc_ox_id, phba->hba_flag); 1411 1412 /* Send deferred FLOGI ACC */ 1413 lpfc_els_rsp_acc(vport, ELS_CMD_FLOGI, &defer_flogi_acc, 1414 ndlp, NULL); 1415 1416 phba->defer_flogi_acc_flag = false; 1417 vport->fc_myDID = did; 1418 1419 /* Decrement ndlp reference count to indicate the node can be 1420 * released when other references are removed. 1421 */ 1422 lpfc_nlp_put(ndlp); 1423 } 1424 1425 return 0; 1426} 1427 1428/** 1429 * lpfc_els_abort_flogi - Abort all outstanding flogi iocbs 1430 * @phba: pointer to lpfc hba data structure. 1431 * 1432 * This routine aborts all the outstanding Fabric Login (FLOGI) IOCBs 1433 * with a @phba. This routine walks all the outstanding IOCBs on the txcmplq 1434 * list and issues an abort IOCB commond on each outstanding IOCB that 1435 * contains a active Fabric_DID ndlp. Note that this function is to issue 1436 * the abort IOCB command on all the outstanding IOCBs, thus when this 1437 * function returns, it does not guarantee all the IOCBs are actually aborted. 1438 * 1439 * Return code 1440 * 0 - Successfully issued abort iocb on all outstanding flogis (Always 0) 1441 **/ 1442int 1443lpfc_els_abort_flogi(struct lpfc_hba *phba) 1444{ 1445 struct lpfc_sli_ring *pring; 1446 struct lpfc_iocbq *iocb, *next_iocb; 1447 struct lpfc_nodelist *ndlp; 1448 u32 ulp_command; 1449 1450 /* Abort outstanding I/O on NPort <nlp_DID> */ 1451 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY, 1452 "0201 Abort outstanding I/O on NPort x%x\n", 1453 Fabric_DID); 1454 1455 pring = lpfc_phba_elsring(phba); 1456 if (unlikely(!pring)) 1457 return -EIO; 1458 1459 /* 1460 * Check the txcmplq for an iocb that matches the nport the driver is 1461 * searching for. 1462 */ 1463 spin_lock_irq(&phba->hbalock); 1464 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) { 1465 ulp_command = get_job_cmnd(phba, iocb); 1466 if (ulp_command == CMD_ELS_REQUEST64_CR) { 1467 ndlp = iocb->ndlp; 1468 if (ndlp && ndlp->nlp_DID == Fabric_DID) { 1469 if ((phba->pport->fc_flag & FC_PT2PT) && 1470 !(phba->pport->fc_flag & FC_PT2PT_PLOGI)) 1471 iocb->fabric_cmd_cmpl = 1472 lpfc_ignore_els_cmpl; 1473 lpfc_sli_issue_abort_iotag(phba, pring, iocb, 1474 NULL); 1475 } 1476 } 1477 } 1478 /* Make sure HBA is alive */ 1479 lpfc_issue_hb_tmo(phba); 1480 1481 spin_unlock_irq(&phba->hbalock); 1482 1483 return 0; 1484} 1485 1486/** 1487 * lpfc_initial_flogi - Issue an initial fabric login for a vport 1488 * @vport: pointer to a host virtual N_Port data structure. 1489 * 1490 * This routine issues an initial Fabric Login (FLOGI) for the @vport 1491 * specified. It first searches the ndlp with the Fabric_DID (0xfffffe) from 1492 * the @vport's ndlp list. If no such ndlp found, it will create an ndlp and 1493 * put it into the @vport's ndlp list. If an inactive ndlp found on the list, 1494 * it will just be enabled and made active. The lpfc_issue_els_flogi() routine 1495 * is then invoked with the @vport and the ndlp to perform the FLOGI for the 1496 * @vport. 1497 * 1498 * Return code 1499 * 0 - failed to issue initial flogi for @vport 1500 * 1 - successfully issued initial flogi for @vport 1501 **/ 1502int 1503lpfc_initial_flogi(struct lpfc_vport *vport) 1504{ 1505 struct lpfc_nodelist *ndlp; 1506 1507 vport->port_state = LPFC_FLOGI; 1508 lpfc_set_disctmo(vport); 1509 1510 /* First look for the Fabric ndlp */ 1511 ndlp = lpfc_findnode_did(vport, Fabric_DID); 1512 if (!ndlp) { 1513 /* Cannot find existing Fabric ndlp, so allocate a new one */ 1514 ndlp = lpfc_nlp_init(vport, Fabric_DID); 1515 if (!ndlp) 1516 return 0; 1517 /* Set the node type */ 1518 ndlp->nlp_type |= NLP_FABRIC; 1519 1520 /* Put ndlp onto node list */ 1521 lpfc_enqueue_node(vport, ndlp); 1522 } 1523 1524 /* Reset the Fabric flag, topology change may have happened */ 1525 vport->fc_flag &= ~FC_FABRIC; 1526 if (lpfc_issue_els_flogi(vport, ndlp, 0)) { 1527 /* A node reference should be retained while registered with a 1528 * transport or dev-loss-evt work is pending. 1529 * Otherwise, decrement node reference to trigger release. 1530 */ 1531 if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD)) && 1532 !(ndlp->nlp_flag & NLP_IN_DEV_LOSS)) 1533 lpfc_nlp_put(ndlp); 1534 return 0; 1535 } 1536 return 1; 1537} 1538 1539/** 1540 * lpfc_initial_fdisc - Issue an initial fabric discovery for a vport 1541 * @vport: pointer to a host virtual N_Port data structure. 1542 * 1543 * This routine issues an initial Fabric Discover (FDISC) for the @vport 1544 * specified. It first searches the ndlp with the Fabric_DID (0xfffffe) from 1545 * the @vport's ndlp list. If no such ndlp found, it will create an ndlp and 1546 * put it into the @vport's ndlp list. If an inactive ndlp found on the list, 1547 * it will just be enabled and made active. The lpfc_issue_els_fdisc() routine 1548 * is then invoked with the @vport and the ndlp to perform the FDISC for the 1549 * @vport. 1550 * 1551 * Return code 1552 * 0 - failed to issue initial fdisc for @vport 1553 * 1 - successfully issued initial fdisc for @vport 1554 **/ 1555int 1556lpfc_initial_fdisc(struct lpfc_vport *vport) 1557{ 1558 struct lpfc_nodelist *ndlp; 1559 1560 /* First look for the Fabric ndlp */ 1561 ndlp = lpfc_findnode_did(vport, Fabric_DID); 1562 if (!ndlp) { 1563 /* Cannot find existing Fabric ndlp, so allocate a new one */ 1564 ndlp = lpfc_nlp_init(vport, Fabric_DID); 1565 if (!ndlp) 1566 return 0; 1567 1568 /* NPIV is only supported in Fabrics. */ 1569 ndlp->nlp_type |= NLP_FABRIC; 1570 1571 /* Put ndlp onto node list */ 1572 lpfc_enqueue_node(vport, ndlp); 1573 } 1574 1575 if (lpfc_issue_els_fdisc(vport, ndlp, 0)) { 1576 /* A node reference should be retained while registered with a 1577 * transport or dev-loss-evt work is pending. 1578 * Otherwise, decrement node reference to trigger release. 1579 */ 1580 if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD)) && 1581 !(ndlp->nlp_flag & NLP_IN_DEV_LOSS)) 1582 lpfc_nlp_put(ndlp); 1583 return 0; 1584 } 1585 return 1; 1586} 1587 1588/** 1589 * lpfc_more_plogi - Check and issue remaining plogis for a vport 1590 * @vport: pointer to a host virtual N_Port data structure. 1591 * 1592 * This routine checks whether there are more remaining Port Logins 1593 * (PLOGI) to be issued for the @vport. If so, it will invoke the routine 1594 * lpfc_els_disc_plogi() to go through the Node Port Recovery (NPR) nodes 1595 * to issue ELS PLOGIs up to the configured discover threads with the 1596 * @vport (@vport->cfg_discovery_threads). The function also decrement 1597 * the @vport's num_disc_node by 1 if it is not already 0. 1598 **/ 1599void 1600lpfc_more_plogi(struct lpfc_vport *vport) 1601{ 1602 if (vport->num_disc_nodes) 1603 vport->num_disc_nodes--; 1604 1605 /* Continue discovery with <num_disc_nodes> PLOGIs to go */ 1606 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 1607 "0232 Continue discovery with %d PLOGIs to go " 1608 "Data: x%x x%x x%x\n", 1609 vport->num_disc_nodes, vport->fc_plogi_cnt, 1610 vport->fc_flag, vport->port_state); 1611 /* Check to see if there are more PLOGIs to be sent */ 1612 if (vport->fc_flag & FC_NLP_MORE) 1613 /* go thru NPR nodes and issue any remaining ELS PLOGIs */ 1614 lpfc_els_disc_plogi(vport); 1615 1616 return; 1617} 1618 1619/** 1620 * lpfc_plogi_confirm_nport - Confirm plogi wwpn matches stored ndlp 1621 * @phba: pointer to lpfc hba data structure. 1622 * @prsp: pointer to response IOCB payload. 1623 * @ndlp: pointer to a node-list data structure. 1624 * 1625 * This routine checks and indicates whether the WWPN of an N_Port, retrieved 1626 * from a PLOGI, matches the WWPN that is stored in the @ndlp for that N_POrt. 1627 * The following cases are considered N_Port confirmed: 1628 * 1) The N_Port is a Fabric ndlp; 2) The @ndlp is on vport list and matches 1629 * the WWPN of the N_Port logged into; 3) The @ndlp is not on vport list but 1630 * it does not have WWPN assigned either. If the WWPN is confirmed, the 1631 * pointer to the @ndlp will be returned. If the WWPN is not confirmed: 1632 * 1) if there is a node on vport list other than the @ndlp with the same 1633 * WWPN of the N_Port PLOGI logged into, the lpfc_unreg_rpi() will be invoked 1634 * on that node to release the RPI associated with the node; 2) if there is 1635 * no node found on vport list with the same WWPN of the N_Port PLOGI logged 1636 * into, a new node shall be allocated (or activated). In either case, the 1637 * parameters of the @ndlp shall be copied to the new_ndlp, the @ndlp shall 1638 * be released and the new_ndlp shall be put on to the vport node list and 1639 * its pointer returned as the confirmed node. 1640 * 1641 * Note that before the @ndlp got "released", the keepDID from not-matching 1642 * or inactive "new_ndlp" on the vport node list is assigned to the nlp_DID 1643 * of the @ndlp. This is because the release of @ndlp is actually to put it 1644 * into an inactive state on the vport node list and the vport node list 1645 * management algorithm does not allow two node with a same DID. 1646 * 1647 * Return code 1648 * pointer to the PLOGI N_Port @ndlp 1649 **/ 1650static struct lpfc_nodelist * 1651lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp, 1652 struct lpfc_nodelist *ndlp) 1653{ 1654 struct lpfc_vport *vport = ndlp->vport; 1655 struct lpfc_nodelist *new_ndlp; 1656 struct serv_parm *sp; 1657 uint8_t name[sizeof(struct lpfc_name)]; 1658 uint32_t keepDID = 0, keep_nlp_flag = 0; 1659 uint32_t keep_new_nlp_flag = 0; 1660 uint16_t keep_nlp_state; 1661 u32 keep_nlp_fc4_type = 0; 1662 struct lpfc_nvme_rport *keep_nrport = NULL; 1663 unsigned long *active_rrqs_xri_bitmap = NULL; 1664 1665 /* Fabric nodes can have the same WWPN so we don't bother searching 1666 * by WWPN. Just return the ndlp that was given to us. 1667 */ 1668 if (ndlp->nlp_type & NLP_FABRIC) 1669 return ndlp; 1670 1671 sp = (struct serv_parm *) ((uint8_t *) prsp + sizeof(uint32_t)); 1672 memset(name, 0, sizeof(struct lpfc_name)); 1673 1674 /* Now we find out if the NPort we are logging into, matches the WWPN 1675 * we have for that ndlp. If not, we have some work to do. 1676 */ 1677 new_ndlp = lpfc_findnode_wwpn(vport, &sp->portName); 1678 1679 /* return immediately if the WWPN matches ndlp */ 1680 if (!new_ndlp || (new_ndlp == ndlp)) 1681 return ndlp; 1682 1683 /* 1684 * Unregister from backend if not done yet. Could have been skipped 1685 * due to ADISC 1686 */ 1687 lpfc_nlp_unreg_node(vport, new_ndlp); 1688 1689 if (phba->sli_rev == LPFC_SLI_REV4) { 1690 active_rrqs_xri_bitmap = mempool_alloc(phba->active_rrq_pool, 1691 GFP_KERNEL); 1692 if (active_rrqs_xri_bitmap) 1693 memset(active_rrqs_xri_bitmap, 0, 1694 phba->cfg_rrq_xri_bitmap_sz); 1695 } 1696 1697 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_NODE, 1698 "3178 PLOGI confirm: ndlp x%x x%x x%x: " 1699 "new_ndlp x%x x%x x%x\n", 1700 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_fc4_type, 1701 (new_ndlp ? new_ndlp->nlp_DID : 0), 1702 (new_ndlp ? new_ndlp->nlp_flag : 0), 1703 (new_ndlp ? new_ndlp->nlp_fc4_type : 0)); 1704 1705 keepDID = new_ndlp->nlp_DID; 1706 1707 if (phba->sli_rev == LPFC_SLI_REV4 && active_rrqs_xri_bitmap) 1708 memcpy(active_rrqs_xri_bitmap, new_ndlp->active_rrqs_xri_bitmap, 1709 phba->cfg_rrq_xri_bitmap_sz); 1710 1711 /* At this point in this routine, we know new_ndlp will be 1712 * returned. however, any previous GID_FTs that were done 1713 * would have updated nlp_fc4_type in ndlp, so we must ensure 1714 * new_ndlp has the right value. 1715 */ 1716 if (vport->fc_flag & FC_FABRIC) { 1717 keep_nlp_fc4_type = new_ndlp->nlp_fc4_type; 1718 new_ndlp->nlp_fc4_type = ndlp->nlp_fc4_type; 1719 } 1720 1721 lpfc_unreg_rpi(vport, new_ndlp); 1722 new_ndlp->nlp_DID = ndlp->nlp_DID; 1723 new_ndlp->nlp_prev_state = ndlp->nlp_prev_state; 1724 if (phba->sli_rev == LPFC_SLI_REV4) 1725 memcpy(new_ndlp->active_rrqs_xri_bitmap, 1726 ndlp->active_rrqs_xri_bitmap, 1727 phba->cfg_rrq_xri_bitmap_sz); 1728 1729 /* Lock both ndlps */ 1730 spin_lock_irq(&ndlp->lock); 1731 spin_lock_irq(&new_ndlp->lock); 1732 keep_new_nlp_flag = new_ndlp->nlp_flag; 1733 keep_nlp_flag = ndlp->nlp_flag; 1734 new_ndlp->nlp_flag = ndlp->nlp_flag; 1735 1736 /* if new_ndlp had NLP_UNREG_INP set, keep it */ 1737 if (keep_new_nlp_flag & NLP_UNREG_INP) 1738 new_ndlp->nlp_flag |= NLP_UNREG_INP; 1739 else 1740 new_ndlp->nlp_flag &= ~NLP_UNREG_INP; 1741 1742 /* if new_ndlp had NLP_RPI_REGISTERED set, keep it */ 1743 if (keep_new_nlp_flag & NLP_RPI_REGISTERED) 1744 new_ndlp->nlp_flag |= NLP_RPI_REGISTERED; 1745 else 1746 new_ndlp->nlp_flag &= ~NLP_RPI_REGISTERED; 1747 1748 /* 1749 * Retain the DROPPED flag. This will take care of the init 1750 * refcount when affecting the state change 1751 */ 1752 if (keep_new_nlp_flag & NLP_DROPPED) 1753 new_ndlp->nlp_flag |= NLP_DROPPED; 1754 else 1755 new_ndlp->nlp_flag &= ~NLP_DROPPED; 1756 1757 ndlp->nlp_flag = keep_new_nlp_flag; 1758 1759 /* if ndlp had NLP_UNREG_INP set, keep it */ 1760 if (keep_nlp_flag & NLP_UNREG_INP) 1761 ndlp->nlp_flag |= NLP_UNREG_INP; 1762 else 1763 ndlp->nlp_flag &= ~NLP_UNREG_INP; 1764 1765 /* if ndlp had NLP_RPI_REGISTERED set, keep it */ 1766 if (keep_nlp_flag & NLP_RPI_REGISTERED) 1767 ndlp->nlp_flag |= NLP_RPI_REGISTERED; 1768 else 1769 ndlp->nlp_flag &= ~NLP_RPI_REGISTERED; 1770 1771 /* 1772 * Retain the DROPPED flag. This will take care of the init 1773 * refcount when affecting the state change 1774 */ 1775 if (keep_nlp_flag & NLP_DROPPED) 1776 ndlp->nlp_flag |= NLP_DROPPED; 1777 else 1778 ndlp->nlp_flag &= ~NLP_DROPPED; 1779 1780 spin_unlock_irq(&new_ndlp->lock); 1781 spin_unlock_irq(&ndlp->lock); 1782 1783 /* Set nlp_states accordingly */ 1784 keep_nlp_state = new_ndlp->nlp_state; 1785 lpfc_nlp_set_state(vport, new_ndlp, ndlp->nlp_state); 1786 1787 /* interchange the nvme remoteport structs */ 1788 keep_nrport = new_ndlp->nrport; 1789 new_ndlp->nrport = ndlp->nrport; 1790 1791 /* Move this back to NPR state */ 1792 if (memcmp(&ndlp->nlp_portname, name, sizeof(struct lpfc_name)) == 0) { 1793 /* The new_ndlp is replacing ndlp totally, so we need 1794 * to put ndlp on UNUSED list and try to free it. 1795 */ 1796 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 1797 "3179 PLOGI confirm NEW: %x %x\n", 1798 new_ndlp->nlp_DID, keepDID); 1799 1800 /* Two ndlps cannot have the same did on the nodelist. 1801 * Note: for this case, ndlp has a NULL WWPN so setting 1802 * the nlp_fc4_type isn't required. 1803 */ 1804 ndlp->nlp_DID = keepDID; 1805 lpfc_nlp_set_state(vport, ndlp, keep_nlp_state); 1806 if (phba->sli_rev == LPFC_SLI_REV4 && 1807 active_rrqs_xri_bitmap) 1808 memcpy(ndlp->active_rrqs_xri_bitmap, 1809 active_rrqs_xri_bitmap, 1810 phba->cfg_rrq_xri_bitmap_sz); 1811 1812 } else { 1813 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 1814 "3180 PLOGI confirm SWAP: %x %x\n", 1815 new_ndlp->nlp_DID, keepDID); 1816 1817 lpfc_unreg_rpi(vport, ndlp); 1818 1819 /* Two ndlps cannot have the same did and the fc4 1820 * type must be transferred because the ndlp is in 1821 * flight. 1822 */ 1823 ndlp->nlp_DID = keepDID; 1824 ndlp->nlp_fc4_type = keep_nlp_fc4_type; 1825 1826 if (phba->sli_rev == LPFC_SLI_REV4 && 1827 active_rrqs_xri_bitmap) 1828 memcpy(ndlp->active_rrqs_xri_bitmap, 1829 active_rrqs_xri_bitmap, 1830 phba->cfg_rrq_xri_bitmap_sz); 1831 1832 /* Since we are switching over to the new_ndlp, 1833 * reset the old ndlp state 1834 */ 1835 if ((ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) || 1836 (ndlp->nlp_state == NLP_STE_MAPPED_NODE)) 1837 keep_nlp_state = NLP_STE_NPR_NODE; 1838 lpfc_nlp_set_state(vport, ndlp, keep_nlp_state); 1839 ndlp->nrport = keep_nrport; 1840 } 1841 1842 /* 1843 * If ndlp is not associated with any rport we can drop it here else 1844 * let dev_loss_tmo_callbk trigger DEVICE_RM event 1845 */ 1846 if (!ndlp->rport && (ndlp->nlp_state == NLP_STE_NPR_NODE)) 1847 lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM); 1848 1849 if (phba->sli_rev == LPFC_SLI_REV4 && 1850 active_rrqs_xri_bitmap) 1851 mempool_free(active_rrqs_xri_bitmap, 1852 phba->active_rrq_pool); 1853 1854 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_NODE, 1855 "3173 PLOGI confirm exit: new_ndlp x%x x%x x%x\n", 1856 new_ndlp->nlp_DID, new_ndlp->nlp_flag, 1857 new_ndlp->nlp_fc4_type); 1858 1859 return new_ndlp; 1860} 1861 1862/** 1863 * lpfc_end_rscn - Check and handle more rscn for a vport 1864 * @vport: pointer to a host virtual N_Port data structure. 1865 * 1866 * This routine checks whether more Registration State Change 1867 * Notifications (RSCNs) came in while the discovery state machine was in 1868 * the FC_RSCN_MODE. If so, the lpfc_els_handle_rscn() routine will be 1869 * invoked to handle the additional RSCNs for the @vport. Otherwise, the 1870 * FC_RSCN_MODE bit will be cleared with the @vport to mark as the end of 1871 * handling the RSCNs. 1872 **/ 1873void 1874lpfc_end_rscn(struct lpfc_vport *vport) 1875{ 1876 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 1877 1878 if (vport->fc_flag & FC_RSCN_MODE) { 1879 /* 1880 * Check to see if more RSCNs came in while we were 1881 * processing this one. 1882 */ 1883 if (vport->fc_rscn_id_cnt || 1884 (vport->fc_flag & FC_RSCN_DISCOVERY) != 0) 1885 lpfc_els_handle_rscn(vport); 1886 else { 1887 spin_lock_irq(shost->host_lock); 1888 vport->fc_flag &= ~FC_RSCN_MODE; 1889 vport->fc_flag |= FC_RSCN_MEMENTO; 1890 spin_unlock_irq(shost->host_lock); 1891 } 1892 } 1893} 1894 1895/** 1896 * lpfc_cmpl_els_rrq - Completion handled for els RRQs. 1897 * @phba: pointer to lpfc hba data structure. 1898 * @cmdiocb: pointer to lpfc command iocb data structure. 1899 * @rspiocb: pointer to lpfc response iocb data structure. 1900 * 1901 * This routine will call the clear rrq function to free the rrq and 1902 * clear the xri's bit in the ndlp's xri_bitmap. If the ndlp does not 1903 * exist then the clear_rrq is still called because the rrq needs to 1904 * be freed. 1905 **/ 1906 1907static void 1908lpfc_cmpl_els_rrq(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 1909 struct lpfc_iocbq *rspiocb) 1910{ 1911 struct lpfc_vport *vport = cmdiocb->vport; 1912 struct lpfc_nodelist *ndlp = cmdiocb->ndlp; 1913 struct lpfc_node_rrq *rrq; 1914 u32 ulp_status = get_job_ulpstatus(phba, rspiocb); 1915 u32 ulp_word4 = get_job_word4(phba, rspiocb); 1916 1917 /* we pass cmdiocb to state machine which needs rspiocb as well */ 1918 rrq = cmdiocb->context_un.rrq; 1919 cmdiocb->rsp_iocb = rspiocb; 1920 1921 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 1922 "RRQ cmpl: status:x%x/x%x did:x%x", 1923 ulp_status, ulp_word4, 1924 get_job_els_rsp64_did(phba, cmdiocb)); 1925 1926 1927 /* rrq completes to NPort <nlp_DID> */ 1928 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 1929 "2880 RRQ completes to DID x%x " 1930 "Data: x%x x%x x%x x%x x%x\n", 1931 ndlp->nlp_DID, ulp_status, ulp_word4, 1932 get_wqe_tmo(cmdiocb), rrq->xritag, rrq->rxid); 1933 1934 if (ulp_status) { 1935 /* Check for retry */ 1936 /* RRQ failed Don't print the vport to vport rjts */ 1937 if (ulp_status != IOSTAT_LS_RJT || 1938 (((ulp_word4) >> 16 != LSRJT_INVALID_CMD) && 1939 ((ulp_word4) >> 16 != LSRJT_UNABLE_TPC)) || 1940 (phba)->pport->cfg_log_verbose & LOG_ELS) 1941 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 1942 "2881 RRQ failure DID:%06X Status:" 1943 "x%x/x%x\n", 1944 ndlp->nlp_DID, ulp_status, 1945 ulp_word4); 1946 } 1947 1948 lpfc_clr_rrq_active(phba, rrq->xritag, rrq); 1949 lpfc_els_free_iocb(phba, cmdiocb); 1950 lpfc_nlp_put(ndlp); 1951 return; 1952} 1953/** 1954 * lpfc_cmpl_els_plogi - Completion callback function for plogi 1955 * @phba: pointer to lpfc hba data structure. 1956 * @cmdiocb: pointer to lpfc command iocb data structure. 1957 * @rspiocb: pointer to lpfc response iocb data structure. 1958 * 1959 * This routine is the completion callback function for issuing the Port 1960 * Login (PLOGI) command. For PLOGI completion, there must be an active 1961 * ndlp on the vport node list that matches the remote node ID from the 1962 * PLOGI response IOCB. If such ndlp does not exist, the PLOGI is simply 1963 * ignored and command IOCB released. The PLOGI response IOCB status is 1964 * checked for error conditions. If there is error status reported, PLOGI 1965 * retry shall be attempted by invoking the lpfc_els_retry() routine. 1966 * Otherwise, the lpfc_plogi_confirm_nport() routine shall be invoked on 1967 * the ndlp and the NLP_EVT_CMPL_PLOGI state to the Discover State Machine 1968 * (DSM) is set for this PLOGI completion. Finally, it checks whether 1969 * there are additional N_Port nodes with the vport that need to perform 1970 * PLOGI. If so, the lpfc_more_plogi() routine is invoked to issue addition 1971 * PLOGIs. 1972 **/ 1973static void 1974lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 1975 struct lpfc_iocbq *rspiocb) 1976{ 1977 struct lpfc_vport *vport = cmdiocb->vport; 1978 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 1979 IOCB_t *irsp; 1980 struct lpfc_nodelist *ndlp, *free_ndlp; 1981 struct lpfc_dmabuf *prsp; 1982 int disc; 1983 struct serv_parm *sp = NULL; 1984 u32 ulp_status, ulp_word4, did, iotag; 1985 bool release_node = false; 1986 1987 /* we pass cmdiocb to state machine which needs rspiocb as well */ 1988 cmdiocb->rsp_iocb = rspiocb; 1989 1990 ulp_status = get_job_ulpstatus(phba, rspiocb); 1991 ulp_word4 = get_job_word4(phba, rspiocb); 1992 did = get_job_els_rsp64_did(phba, cmdiocb); 1993 1994 if (phba->sli_rev == LPFC_SLI_REV4) { 1995 iotag = get_wqe_reqtag(cmdiocb); 1996 } else { 1997 irsp = &rspiocb->iocb; 1998 iotag = irsp->ulpIoTag; 1999 } 2000 2001 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 2002 "PLOGI cmpl: status:x%x/x%x did:x%x", 2003 ulp_status, ulp_word4, did); 2004 2005 ndlp = lpfc_findnode_did(vport, did); 2006 if (!ndlp) { 2007 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 2008 "0136 PLOGI completes to NPort x%x " 2009 "with no ndlp. Data: x%x x%x x%x\n", 2010 did, ulp_status, ulp_word4, iotag); 2011 goto out_freeiocb; 2012 } 2013 2014 /* Since ndlp can be freed in the disc state machine, note if this node 2015 * is being used during discovery. 2016 */ 2017 spin_lock_irq(&ndlp->lock); 2018 disc = (ndlp->nlp_flag & NLP_NPR_2B_DISC); 2019 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; 2020 spin_unlock_irq(&ndlp->lock); 2021 2022 /* PLOGI completes to NPort <nlp_DID> */ 2023 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 2024 "0102 PLOGI completes to NPort x%06x " 2025 "Data: x%x x%x x%x x%x x%x\n", 2026 ndlp->nlp_DID, ndlp->nlp_fc4_type, 2027 ulp_status, ulp_word4, 2028 disc, vport->num_disc_nodes); 2029 2030 /* Check to see if link went down during discovery */ 2031 if (lpfc_els_chk_latt(vport)) { 2032 spin_lock_irq(&ndlp->lock); 2033 ndlp->nlp_flag |= NLP_NPR_2B_DISC; 2034 spin_unlock_irq(&ndlp->lock); 2035 goto out; 2036 } 2037 2038 if (ulp_status) { 2039 /* Check for retry */ 2040 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) { 2041 /* ELS command is being retried */ 2042 if (disc) { 2043 spin_lock_irq(&ndlp->lock); 2044 ndlp->nlp_flag |= NLP_NPR_2B_DISC; 2045 spin_unlock_irq(&ndlp->lock); 2046 } 2047 goto out; 2048 } 2049 /* PLOGI failed Don't print the vport to vport rjts */ 2050 if (ulp_status != IOSTAT_LS_RJT || 2051 (((ulp_word4) >> 16 != LSRJT_INVALID_CMD) && 2052 ((ulp_word4) >> 16 != LSRJT_UNABLE_TPC)) || 2053 (phba)->pport->cfg_log_verbose & LOG_ELS) 2054 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 2055 "2753 PLOGI failure DID:%06X " 2056 "Status:x%x/x%x\n", 2057 ndlp->nlp_DID, ulp_status, 2058 ulp_word4); 2059 2060 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */ 2061 if (!lpfc_error_lost_link(ulp_status, ulp_word4)) 2062 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 2063 NLP_EVT_CMPL_PLOGI); 2064 2065 /* If a PLOGI collision occurred, the node needs to continue 2066 * with the reglogin process. 2067 */ 2068 spin_lock_irq(&ndlp->lock); 2069 if ((ndlp->nlp_flag & (NLP_ACC_REGLOGIN | NLP_RCV_PLOGI)) && 2070 ndlp->nlp_state == NLP_STE_REG_LOGIN_ISSUE) { 2071 spin_unlock_irq(&ndlp->lock); 2072 goto out; 2073 } 2074 2075 /* No PLOGI collision and the node is not registered with the 2076 * scsi or nvme transport. It is no longer an active node. Just 2077 * start the device remove process. 2078 */ 2079 if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD))) { 2080 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; 2081 if (!(ndlp->nlp_flag & NLP_IN_DEV_LOSS)) 2082 release_node = true; 2083 } 2084 spin_unlock_irq(&ndlp->lock); 2085 2086 if (release_node) 2087 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 2088 NLP_EVT_DEVICE_RM); 2089 } else { 2090 /* Good status, call state machine */ 2091 prsp = list_entry(cmdiocb->cmd_dmabuf->list.next, 2092 struct lpfc_dmabuf, list); 2093 ndlp = lpfc_plogi_confirm_nport(phba, prsp->virt, ndlp); 2094 2095 sp = (struct serv_parm *)((u8 *)prsp->virt + 2096 sizeof(u32)); 2097 2098 ndlp->vmid_support = 0; 2099 if ((phba->cfg_vmid_app_header && sp->cmn.app_hdr_support) || 2100 (phba->cfg_vmid_priority_tagging && 2101 sp->cmn.priority_tagging)) { 2102 lpfc_printf_log(phba, KERN_DEBUG, LOG_ELS, 2103 "4018 app_hdr_support %d tagging %d DID x%x\n", 2104 sp->cmn.app_hdr_support, 2105 sp->cmn.priority_tagging, 2106 ndlp->nlp_DID); 2107 /* if the dest port supports VMID, mark it in ndlp */ 2108 ndlp->vmid_support = 1; 2109 } 2110 2111 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 2112 NLP_EVT_CMPL_PLOGI); 2113 } 2114 2115 if (disc && vport->num_disc_nodes) { 2116 /* Check to see if there are more PLOGIs to be sent */ 2117 lpfc_more_plogi(vport); 2118 2119 if (vport->num_disc_nodes == 0) { 2120 spin_lock_irq(shost->host_lock); 2121 vport->fc_flag &= ~FC_NDISC_ACTIVE; 2122 spin_unlock_irq(shost->host_lock); 2123 2124 lpfc_can_disctmo(vport); 2125 lpfc_end_rscn(vport); 2126 } 2127 } 2128 2129out: 2130 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_NODE, 2131 "PLOGI Cmpl PUT: did:x%x refcnt %d", 2132 ndlp->nlp_DID, kref_read(&ndlp->kref), 0); 2133 2134out_freeiocb: 2135 /* Release the reference on the original I/O request. */ 2136 free_ndlp = cmdiocb->ndlp; 2137 2138 lpfc_els_free_iocb(phba, cmdiocb); 2139 lpfc_nlp_put(free_ndlp); 2140 return; 2141} 2142 2143/** 2144 * lpfc_issue_els_plogi - Issue an plogi iocb command for a vport 2145 * @vport: pointer to a host virtual N_Port data structure. 2146 * @did: destination port identifier. 2147 * @retry: number of retries to the command IOCB. 2148 * 2149 * This routine issues a Port Login (PLOGI) command to a remote N_Port 2150 * (with the @did) for a @vport. Before issuing a PLOGI to a remote N_Port, 2151 * the ndlp with the remote N_Port DID must exist on the @vport's ndlp list. 2152 * This routine constructs the proper fields of the PLOGI IOCB and invokes 2153 * the lpfc_sli_issue_iocb() routine to send out PLOGI ELS command. 2154 * 2155 * Note that the ndlp reference count will be incremented by 1 for holding 2156 * the ndlp and the reference to ndlp will be stored into the ndlp field 2157 * of the IOCB for the completion callback function to the PLOGI ELS command. 2158 * 2159 * Return code 2160 * 0 - Successfully issued a plogi for @vport 2161 * 1 - failed to issue a plogi for @vport 2162 **/ 2163int 2164lpfc_issue_els_plogi(struct lpfc_vport *vport, uint32_t did, uint8_t retry) 2165{ 2166 struct lpfc_hba *phba = vport->phba; 2167 struct serv_parm *sp; 2168 struct lpfc_nodelist *ndlp; 2169 struct lpfc_iocbq *elsiocb; 2170 uint8_t *pcmd; 2171 uint16_t cmdsize; 2172 int ret; 2173 2174 ndlp = lpfc_findnode_did(vport, did); 2175 if (!ndlp) 2176 return 1; 2177 2178 /* Defer the processing of the issue PLOGI until after the 2179 * outstanding UNREG_RPI mbox command completes, unless we 2180 * are going offline. This logic does not apply for Fabric DIDs 2181 */ 2182 if ((ndlp->nlp_flag & NLP_UNREG_INP) && 2183 ((ndlp->nlp_DID & Fabric_DID_MASK) != Fabric_DID_MASK) && 2184 !(vport->fc_flag & FC_OFFLINE_MODE)) { 2185 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 2186 "4110 Issue PLOGI x%x deferred " 2187 "on NPort x%x rpi x%x Data: x%px\n", 2188 ndlp->nlp_defer_did, ndlp->nlp_DID, 2189 ndlp->nlp_rpi, ndlp); 2190 2191 /* We can only defer 1st PLOGI */ 2192 if (ndlp->nlp_defer_did == NLP_EVT_NOTHING_PENDING) 2193 ndlp->nlp_defer_did = did; 2194 return 0; 2195 } 2196 2197 cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm)); 2198 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, did, 2199 ELS_CMD_PLOGI); 2200 if (!elsiocb) 2201 return 1; 2202 2203 spin_lock_irq(&ndlp->lock); 2204 ndlp->nlp_flag &= ~NLP_FCP_PRLI_RJT; 2205 spin_unlock_irq(&ndlp->lock); 2206 2207 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; 2208 2209 /* For PLOGI request, remainder of payload is service parameters */ 2210 *((uint32_t *) (pcmd)) = ELS_CMD_PLOGI; 2211 pcmd += sizeof(uint32_t); 2212 memcpy(pcmd, &vport->fc_sparam, sizeof(struct serv_parm)); 2213 sp = (struct serv_parm *) pcmd; 2214 2215 /* 2216 * If we are a N-port connected to a Fabric, fix-up paramm's so logins 2217 * to device on remote loops work. 2218 */ 2219 if ((vport->fc_flag & FC_FABRIC) && !(vport->fc_flag & FC_PUBLIC_LOOP)) 2220 sp->cmn.altBbCredit = 1; 2221 2222 if (sp->cmn.fcphLow < FC_PH_4_3) 2223 sp->cmn.fcphLow = FC_PH_4_3; 2224 2225 if (sp->cmn.fcphHigh < FC_PH3) 2226 sp->cmn.fcphHigh = FC_PH3; 2227 2228 sp->cmn.valid_vendor_ver_level = 0; 2229 memset(sp->un.vendorVersion, 0, sizeof(sp->un.vendorVersion)); 2230 sp->cmn.bbRcvSizeMsb &= 0xF; 2231 2232 /* Check if the destination port supports VMID */ 2233 ndlp->vmid_support = 0; 2234 if (vport->vmid_priority_tagging) 2235 sp->cmn.priority_tagging = 1; 2236 else if (phba->cfg_vmid_app_header && 2237 bf_get(lpfc_ftr_ashdr, &phba->sli4_hba.sli4_flags)) 2238 sp->cmn.app_hdr_support = 1; 2239 2240 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 2241 "Issue PLOGI: did:x%x", 2242 did, 0, 0); 2243 2244 /* If our firmware supports this feature, convey that 2245 * information to the target using the vendor specific field. 2246 */ 2247 if (phba->sli.sli_flag & LPFC_SLI_SUPPRESS_RSP) { 2248 sp->cmn.valid_vendor_ver_level = 1; 2249 sp->un.vv.vid = cpu_to_be32(LPFC_VV_EMLX_ID); 2250 sp->un.vv.flags = cpu_to_be32(LPFC_VV_SUPPRESS_RSP); 2251 } 2252 2253 phba->fc_stat.elsXmitPLOGI++; 2254 elsiocb->cmd_cmpl = lpfc_cmpl_els_plogi; 2255 2256 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 2257 "Issue PLOGI: did:x%x refcnt %d", 2258 did, kref_read(&ndlp->kref), 0); 2259 elsiocb->ndlp = lpfc_nlp_get(ndlp); 2260 if (!elsiocb->ndlp) { 2261 lpfc_els_free_iocb(phba, elsiocb); 2262 return 1; 2263 } 2264 2265 ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 2266 if (ret) { 2267 lpfc_els_free_iocb(phba, elsiocb); 2268 lpfc_nlp_put(ndlp); 2269 return 1; 2270 } 2271 2272 return 0; 2273} 2274 2275/** 2276 * lpfc_cmpl_els_prli - Completion callback function for prli 2277 * @phba: pointer to lpfc hba data structure. 2278 * @cmdiocb: pointer to lpfc command iocb data structure. 2279 * @rspiocb: pointer to lpfc response iocb data structure. 2280 * 2281 * This routine is the completion callback function for a Process Login 2282 * (PRLI) ELS command. The PRLI response IOCB status is checked for error 2283 * status. If there is error status reported, PRLI retry shall be attempted 2284 * by invoking the lpfc_els_retry() routine. Otherwise, the state 2285 * NLP_EVT_CMPL_PRLI is sent to the Discover State Machine (DSM) for this 2286 * ndlp to mark the PRLI completion. 2287 **/ 2288static void 2289lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 2290 struct lpfc_iocbq *rspiocb) 2291{ 2292 struct lpfc_vport *vport = cmdiocb->vport; 2293 struct lpfc_nodelist *ndlp; 2294 char *mode; 2295 u32 loglevel; 2296 u32 ulp_status; 2297 u32 ulp_word4; 2298 bool release_node = false; 2299 2300 /* we pass cmdiocb to state machine which needs rspiocb as well */ 2301 cmdiocb->rsp_iocb = rspiocb; 2302 2303 ndlp = cmdiocb->ndlp; 2304 2305 ulp_status = get_job_ulpstatus(phba, rspiocb); 2306 ulp_word4 = get_job_word4(phba, rspiocb); 2307 2308 spin_lock_irq(&ndlp->lock); 2309 ndlp->nlp_flag &= ~NLP_PRLI_SND; 2310 2311 /* Driver supports multiple FC4 types. Counters matter. */ 2312 vport->fc_prli_sent--; 2313 ndlp->fc4_prli_sent--; 2314 spin_unlock_irq(&ndlp->lock); 2315 2316 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 2317 "PRLI cmpl: status:x%x/x%x did:x%x", 2318 ulp_status, ulp_word4, 2319 ndlp->nlp_DID); 2320 2321 /* PRLI completes to NPort <nlp_DID> */ 2322 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 2323 "0103 PRLI completes to NPort x%06x " 2324 "Data: x%x x%x x%x x%x\n", 2325 ndlp->nlp_DID, ulp_status, ulp_word4, 2326 vport->num_disc_nodes, ndlp->fc4_prli_sent); 2327 2328 /* Check to see if link went down during discovery */ 2329 if (lpfc_els_chk_latt(vport)) 2330 goto out; 2331 2332 if (ulp_status) { 2333 /* Check for retry */ 2334 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) { 2335 /* ELS command is being retried */ 2336 goto out; 2337 } 2338 2339 /* If we don't send GFT_ID to Fabric, a PRLI error 2340 * could be expected. 2341 */ 2342 if ((vport->fc_flag & FC_FABRIC) || 2343 (vport->cfg_enable_fc4_type != LPFC_ENABLE_BOTH)) { 2344 mode = KERN_ERR; 2345 loglevel = LOG_TRACE_EVENT; 2346 } else { 2347 mode = KERN_INFO; 2348 loglevel = LOG_ELS; 2349 } 2350 2351 /* PRLI failed */ 2352 lpfc_printf_vlog(vport, mode, loglevel, 2353 "2754 PRLI failure DID:%06X Status:x%x/x%x, " 2354 "data: x%x\n", 2355 ndlp->nlp_DID, ulp_status, 2356 ulp_word4, ndlp->fc4_prli_sent); 2357 2358 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */ 2359 if (!lpfc_error_lost_link(ulp_status, ulp_word4)) 2360 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 2361 NLP_EVT_CMPL_PRLI); 2362 2363 /* 2364 * For P2P topology, retain the node so that PLOGI can be 2365 * attempted on it again. 2366 */ 2367 if (vport->fc_flag & FC_PT2PT) 2368 goto out; 2369 2370 /* As long as this node is not registered with the SCSI 2371 * or NVMe transport and no other PRLIs are outstanding, 2372 * it is no longer an active node. Otherwise devloss 2373 * handles the final cleanup. 2374 */ 2375 spin_lock_irq(&ndlp->lock); 2376 if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD)) && 2377 !ndlp->fc4_prli_sent) { 2378 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; 2379 if (!(ndlp->nlp_flag & NLP_IN_DEV_LOSS)) 2380 release_node = true; 2381 } 2382 spin_unlock_irq(&ndlp->lock); 2383 2384 if (release_node) 2385 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 2386 NLP_EVT_DEVICE_RM); 2387 } else { 2388 /* Good status, call state machine. However, if another 2389 * PRLI is outstanding, don't call the state machine 2390 * because final disposition to Mapped or Unmapped is 2391 * completed there. 2392 */ 2393 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 2394 NLP_EVT_CMPL_PRLI); 2395 } 2396 2397out: 2398 lpfc_els_free_iocb(phba, cmdiocb); 2399 lpfc_nlp_put(ndlp); 2400 return; 2401} 2402 2403/** 2404 * lpfc_issue_els_prli - Issue a prli iocb command for a vport 2405 * @vport: pointer to a host virtual N_Port data structure. 2406 * @ndlp: pointer to a node-list data structure. 2407 * @retry: number of retries to the command IOCB. 2408 * 2409 * This routine issues a Process Login (PRLI) ELS command for the 2410 * @vport. The PRLI service parameters are set up in the payload of the 2411 * PRLI Request command and the pointer to lpfc_cmpl_els_prli() routine 2412 * is put to the IOCB completion callback func field before invoking the 2413 * routine lpfc_sli_issue_iocb() to send out PRLI command. 2414 * 2415 * Note that the ndlp reference count will be incremented by 1 for holding the 2416 * ndlp and the reference to ndlp will be stored into the ndlp field of 2417 * the IOCB for the completion callback function to the PRLI ELS command. 2418 * 2419 * Return code 2420 * 0 - successfully issued prli iocb command for @vport 2421 * 1 - failed to issue prli iocb command for @vport 2422 **/ 2423int 2424lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 2425 uint8_t retry) 2426{ 2427 int rc = 0; 2428 struct lpfc_hba *phba = vport->phba; 2429 PRLI *npr; 2430 struct lpfc_nvme_prli *npr_nvme; 2431 struct lpfc_iocbq *elsiocb; 2432 uint8_t *pcmd; 2433 uint16_t cmdsize; 2434 u32 local_nlp_type, elscmd; 2435 2436 /* 2437 * If discovery was kicked off from RSCN mode, 2438 * the FC4 types supported from a 2439 * previous GFT_ID command may not be accurate. So, if we 2440 * are a NVME Initiator, always look for the possibility of 2441 * the remote NPort beng a NVME Target. 2442 */ 2443 if (phba->sli_rev == LPFC_SLI_REV4 && 2444 vport->fc_flag & (FC_RSCN_MODE | FC_RSCN_MEMENTO) && 2445 vport->nvmei_support) 2446 ndlp->nlp_fc4_type |= NLP_FC4_NVME; 2447 local_nlp_type = ndlp->nlp_fc4_type; 2448 2449 /* This routine will issue 1 or 2 PRLIs, so zero all the ndlp 2450 * fields here before any of them can complete. 2451 */ 2452 ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR); 2453 ndlp->nlp_type &= ~(NLP_NVME_TARGET | NLP_NVME_INITIATOR); 2454 ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE; 2455 ndlp->nlp_flag &= ~(NLP_FIRSTBURST | NLP_NPR_2B_DISC); 2456 ndlp->nvme_fb_size = 0; 2457 2458 send_next_prli: 2459 if (local_nlp_type & NLP_FC4_FCP) { 2460 /* Payload is 4 + 16 = 20 x14 bytes. */ 2461 cmdsize = (sizeof(uint32_t) + sizeof(PRLI)); 2462 elscmd = ELS_CMD_PRLI; 2463 } else if (local_nlp_type & NLP_FC4_NVME) { 2464 /* Payload is 4 + 20 = 24 x18 bytes. */ 2465 cmdsize = (sizeof(uint32_t) + sizeof(struct lpfc_nvme_prli)); 2466 elscmd = ELS_CMD_NVMEPRLI; 2467 } else { 2468 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 2469 "3083 Unknown FC_TYPE x%x ndlp x%06x\n", 2470 ndlp->nlp_fc4_type, ndlp->nlp_DID); 2471 return 1; 2472 } 2473 2474 /* SLI3 ports don't support NVME. If this rport is a strict NVME 2475 * FC4 type, implicitly LOGO. 2476 */ 2477 if (phba->sli_rev == LPFC_SLI_REV3 && 2478 ndlp->nlp_fc4_type == NLP_FC4_NVME) { 2479 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 2480 "3088 Rport fc4 type 0x%x not supported by SLI3 adapter\n", 2481 ndlp->nlp_type); 2482 lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM); 2483 return 1; 2484 } 2485 2486 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 2487 ndlp->nlp_DID, elscmd); 2488 if (!elsiocb) 2489 return 1; 2490 2491 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; 2492 2493 /* For PRLI request, remainder of payload is service parameters */ 2494 memset(pcmd, 0, cmdsize); 2495 2496 if (local_nlp_type & NLP_FC4_FCP) { 2497 /* Remainder of payload is FCP PRLI parameter page. 2498 * Note: this data structure is defined as 2499 * BE/LE in the structure definition so no 2500 * byte swap call is made. 2501 */ 2502 *((uint32_t *)(pcmd)) = ELS_CMD_PRLI; 2503 pcmd += sizeof(uint32_t); 2504 npr = (PRLI *)pcmd; 2505 2506 /* 2507 * If our firmware version is 3.20 or later, 2508 * set the following bits for FC-TAPE support. 2509 */ 2510 if (phba->vpd.rev.feaLevelHigh >= 0x02) { 2511 npr->ConfmComplAllowed = 1; 2512 npr->Retry = 1; 2513 npr->TaskRetryIdReq = 1; 2514 } 2515 npr->estabImagePair = 1; 2516 npr->readXferRdyDis = 1; 2517 if (vport->cfg_first_burst_size) 2518 npr->writeXferRdyDis = 1; 2519 2520 /* For FCP support */ 2521 npr->prliType = PRLI_FCP_TYPE; 2522 npr->initiatorFunc = 1; 2523 elsiocb->cmd_flag |= LPFC_PRLI_FCP_REQ; 2524 2525 /* Remove FCP type - processed. */ 2526 local_nlp_type &= ~NLP_FC4_FCP; 2527 } else if (local_nlp_type & NLP_FC4_NVME) { 2528 /* Remainder of payload is NVME PRLI parameter page. 2529 * This data structure is the newer definition that 2530 * uses bf macros so a byte swap is required. 2531 */ 2532 *((uint32_t *)(pcmd)) = ELS_CMD_NVMEPRLI; 2533 pcmd += sizeof(uint32_t); 2534 npr_nvme = (struct lpfc_nvme_prli *)pcmd; 2535 bf_set(prli_type_code, npr_nvme, PRLI_NVME_TYPE); 2536 bf_set(prli_estabImagePair, npr_nvme, 0); /* Should be 0 */ 2537 if (phba->nsler) { 2538 bf_set(prli_nsler, npr_nvme, 1); 2539 bf_set(prli_conf, npr_nvme, 1); 2540 } 2541 2542 /* Only initiators request first burst. */ 2543 if ((phba->cfg_nvme_enable_fb) && 2544 !phba->nvmet_support) 2545 bf_set(prli_fba, npr_nvme, 1); 2546 2547 if (phba->nvmet_support) { 2548 bf_set(prli_tgt, npr_nvme, 1); 2549 bf_set(prli_disc, npr_nvme, 1); 2550 } else { 2551 bf_set(prli_init, npr_nvme, 1); 2552 bf_set(prli_conf, npr_nvme, 1); 2553 } 2554 2555 npr_nvme->word1 = cpu_to_be32(npr_nvme->word1); 2556 npr_nvme->word4 = cpu_to_be32(npr_nvme->word4); 2557 elsiocb->cmd_flag |= LPFC_PRLI_NVME_REQ; 2558 2559 /* Remove NVME type - processed. */ 2560 local_nlp_type &= ~NLP_FC4_NVME; 2561 } 2562 2563 phba->fc_stat.elsXmitPRLI++; 2564 elsiocb->cmd_cmpl = lpfc_cmpl_els_prli; 2565 2566 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 2567 "Issue PRLI: did:x%x refcnt %d", 2568 ndlp->nlp_DID, kref_read(&ndlp->kref), 0); 2569 elsiocb->ndlp = lpfc_nlp_get(ndlp); 2570 if (!elsiocb->ndlp) { 2571 lpfc_els_free_iocb(phba, elsiocb); 2572 return 1; 2573 } 2574 2575 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 2576 if (rc == IOCB_ERROR) { 2577 lpfc_els_free_iocb(phba, elsiocb); 2578 lpfc_nlp_put(ndlp); 2579 return 1; 2580 } 2581 2582 /* The vport counters are used for lpfc_scan_finished, but 2583 * the ndlp is used to track outstanding PRLIs for different 2584 * FC4 types. 2585 */ 2586 spin_lock_irq(&ndlp->lock); 2587 ndlp->nlp_flag |= NLP_PRLI_SND; 2588 vport->fc_prli_sent++; 2589 ndlp->fc4_prli_sent++; 2590 spin_unlock_irq(&ndlp->lock); 2591 2592 /* The driver supports 2 FC4 types. Make sure 2593 * a PRLI is issued for all types before exiting. 2594 */ 2595 if (phba->sli_rev == LPFC_SLI_REV4 && 2596 local_nlp_type & (NLP_FC4_FCP | NLP_FC4_NVME)) 2597 goto send_next_prli; 2598 else 2599 return 0; 2600} 2601 2602/** 2603 * lpfc_rscn_disc - Perform rscn discovery for a vport 2604 * @vport: pointer to a host virtual N_Port data structure. 2605 * 2606 * This routine performs Registration State Change Notification (RSCN) 2607 * discovery for a @vport. If the @vport's node port recovery count is not 2608 * zero, it will invoke the lpfc_els_disc_plogi() to perform PLOGI for all 2609 * the nodes that need recovery. If none of the PLOGI were needed through 2610 * the lpfc_els_disc_plogi() routine, the lpfc_end_rscn() routine shall be 2611 * invoked to check and handle possible more RSCN came in during the period 2612 * of processing the current ones. 2613 **/ 2614static void 2615lpfc_rscn_disc(struct lpfc_vport *vport) 2616{ 2617 lpfc_can_disctmo(vport); 2618 2619 /* RSCN discovery */ 2620 /* go thru NPR nodes and issue ELS PLOGIs */ 2621 if (vport->fc_npr_cnt) 2622 if (lpfc_els_disc_plogi(vport)) 2623 return; 2624 2625 lpfc_end_rscn(vport); 2626} 2627 2628/** 2629 * lpfc_adisc_done - Complete the adisc phase of discovery 2630 * @vport: pointer to lpfc_vport hba data structure that finished all ADISCs. 2631 * 2632 * This function is called when the final ADISC is completed during discovery. 2633 * This function handles clearing link attention or issuing reg_vpi depending 2634 * on whether npiv is enabled. This function also kicks off the PLOGI phase of 2635 * discovery. 2636 * This function is called with no locks held. 2637 **/ 2638static void 2639lpfc_adisc_done(struct lpfc_vport *vport) 2640{ 2641 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 2642 struct lpfc_hba *phba = vport->phba; 2643 2644 /* 2645 * For NPIV, cmpl_reg_vpi will set port_state to READY, 2646 * and continue discovery. 2647 */ 2648 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && 2649 !(vport->fc_flag & FC_RSCN_MODE) && 2650 (phba->sli_rev < LPFC_SLI_REV4)) { 2651 2652 /* 2653 * If link is down, clear_la and reg_vpi will be done after 2654 * flogi following a link up event 2655 */ 2656 if (!lpfc_is_link_up(phba)) 2657 return; 2658 2659 /* The ADISCs are complete. Doesn't matter if they 2660 * succeeded or failed because the ADISC completion 2661 * routine guarantees to call the state machine and 2662 * the RPI is either unregistered (failed ADISC response) 2663 * or the RPI is still valid and the node is marked 2664 * mapped for a target. The exchanges should be in the 2665 * correct state. This code is specific to SLI3. 2666 */ 2667 lpfc_issue_clear_la(phba, vport); 2668 lpfc_issue_reg_vpi(phba, vport); 2669 return; 2670 } 2671 /* 2672 * For SLI2, we need to set port_state to READY 2673 * and continue discovery. 2674 */ 2675 if (vport->port_state < LPFC_VPORT_READY) { 2676 /* If we get here, there is nothing to ADISC */ 2677 lpfc_issue_clear_la(phba, vport); 2678 if (!(vport->fc_flag & FC_ABORT_DISCOVERY)) { 2679 vport->num_disc_nodes = 0; 2680 /* go thru NPR list, issue ELS PLOGIs */ 2681 if (vport->fc_npr_cnt) 2682 lpfc_els_disc_plogi(vport); 2683 if (!vport->num_disc_nodes) { 2684 spin_lock_irq(shost->host_lock); 2685 vport->fc_flag &= ~FC_NDISC_ACTIVE; 2686 spin_unlock_irq(shost->host_lock); 2687 lpfc_can_disctmo(vport); 2688 lpfc_end_rscn(vport); 2689 } 2690 } 2691 vport->port_state = LPFC_VPORT_READY; 2692 } else 2693 lpfc_rscn_disc(vport); 2694} 2695 2696/** 2697 * lpfc_more_adisc - Issue more adisc as needed 2698 * @vport: pointer to a host virtual N_Port data structure. 2699 * 2700 * This routine determines whether there are more ndlps on a @vport 2701 * node list need to have Address Discover (ADISC) issued. If so, it will 2702 * invoke the lpfc_els_disc_adisc() routine to issue ADISC on the @vport's 2703 * remaining nodes which need to have ADISC sent. 2704 **/ 2705void 2706lpfc_more_adisc(struct lpfc_vport *vport) 2707{ 2708 if (vport->num_disc_nodes) 2709 vport->num_disc_nodes--; 2710 /* Continue discovery with <num_disc_nodes> ADISCs to go */ 2711 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 2712 "0210 Continue discovery with %d ADISCs to go " 2713 "Data: x%x x%x x%x\n", 2714 vport->num_disc_nodes, vport->fc_adisc_cnt, 2715 vport->fc_flag, vport->port_state); 2716 /* Check to see if there are more ADISCs to be sent */ 2717 if (vport->fc_flag & FC_NLP_MORE) { 2718 lpfc_set_disctmo(vport); 2719 /* go thru NPR nodes and issue any remaining ELS ADISCs */ 2720 lpfc_els_disc_adisc(vport); 2721 } 2722 if (!vport->num_disc_nodes) 2723 lpfc_adisc_done(vport); 2724 return; 2725} 2726 2727/** 2728 * lpfc_cmpl_els_adisc - Completion callback function for adisc 2729 * @phba: pointer to lpfc hba data structure. 2730 * @cmdiocb: pointer to lpfc command iocb data structure. 2731 * @rspiocb: pointer to lpfc response iocb data structure. 2732 * 2733 * This routine is the completion function for issuing the Address Discover 2734 * (ADISC) command. It first checks to see whether link went down during 2735 * the discovery process. If so, the node will be marked as node port 2736 * recovery for issuing discover IOCB by the link attention handler and 2737 * exit. Otherwise, the response status is checked. If error was reported 2738 * in the response status, the ADISC command shall be retried by invoking 2739 * the lpfc_els_retry() routine. Otherwise, if no error was reported in 2740 * the response status, the state machine is invoked to set transition 2741 * with respect to NLP_EVT_CMPL_ADISC event. 2742 **/ 2743static void 2744lpfc_cmpl_els_adisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 2745 struct lpfc_iocbq *rspiocb) 2746{ 2747 struct lpfc_vport *vport = cmdiocb->vport; 2748 IOCB_t *irsp; 2749 struct lpfc_nodelist *ndlp; 2750 int disc; 2751 u32 ulp_status, ulp_word4, tmo; 2752 bool release_node = false; 2753 2754 /* we pass cmdiocb to state machine which needs rspiocb as well */ 2755 cmdiocb->rsp_iocb = rspiocb; 2756 2757 ndlp = cmdiocb->ndlp; 2758 2759 ulp_status = get_job_ulpstatus(phba, rspiocb); 2760 ulp_word4 = get_job_word4(phba, rspiocb); 2761 2762 if (phba->sli_rev == LPFC_SLI_REV4) { 2763 tmo = get_wqe_tmo(cmdiocb); 2764 } else { 2765 irsp = &rspiocb->iocb; 2766 tmo = irsp->ulpTimeout; 2767 } 2768 2769 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 2770 "ADISC cmpl: status:x%x/x%x did:x%x", 2771 ulp_status, ulp_word4, 2772 ndlp->nlp_DID); 2773 2774 /* Since ndlp can be freed in the disc state machine, note if this node 2775 * is being used during discovery. 2776 */ 2777 spin_lock_irq(&ndlp->lock); 2778 disc = (ndlp->nlp_flag & NLP_NPR_2B_DISC); 2779 ndlp->nlp_flag &= ~(NLP_ADISC_SND | NLP_NPR_2B_DISC); 2780 spin_unlock_irq(&ndlp->lock); 2781 /* ADISC completes to NPort <nlp_DID> */ 2782 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 2783 "0104 ADISC completes to NPort x%x " 2784 "Data: x%x x%x x%x x%x x%x\n", 2785 ndlp->nlp_DID, ulp_status, ulp_word4, 2786 tmo, disc, vport->num_disc_nodes); 2787 /* Check to see if link went down during discovery */ 2788 if (lpfc_els_chk_latt(vport)) { 2789 spin_lock_irq(&ndlp->lock); 2790 ndlp->nlp_flag |= NLP_NPR_2B_DISC; 2791 spin_unlock_irq(&ndlp->lock); 2792 goto out; 2793 } 2794 2795 if (ulp_status) { 2796 /* Check for retry */ 2797 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) { 2798 /* ELS command is being retried */ 2799 if (disc) { 2800 spin_lock_irq(&ndlp->lock); 2801 ndlp->nlp_flag |= NLP_NPR_2B_DISC; 2802 spin_unlock_irq(&ndlp->lock); 2803 lpfc_set_disctmo(vport); 2804 } 2805 goto out; 2806 } 2807 /* ADISC failed */ 2808 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 2809 "2755 ADISC failure DID:%06X Status:x%x/x%x\n", 2810 ndlp->nlp_DID, ulp_status, 2811 ulp_word4); 2812 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 2813 NLP_EVT_CMPL_ADISC); 2814 2815 /* As long as this node is not registered with the SCSI or NVMe 2816 * transport, it is no longer an active node. Otherwise 2817 * devloss handles the final cleanup. 2818 */ 2819 spin_lock_irq(&ndlp->lock); 2820 if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD))) { 2821 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; 2822 if (!(ndlp->nlp_flag & NLP_IN_DEV_LOSS)) 2823 release_node = true; 2824 } 2825 spin_unlock_irq(&ndlp->lock); 2826 2827 if (release_node) 2828 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 2829 NLP_EVT_DEVICE_RM); 2830 } else 2831 /* Good status, call state machine */ 2832 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 2833 NLP_EVT_CMPL_ADISC); 2834 2835 /* Check to see if there are more ADISCs to be sent */ 2836 if (disc && vport->num_disc_nodes) 2837 lpfc_more_adisc(vport); 2838out: 2839 lpfc_els_free_iocb(phba, cmdiocb); 2840 lpfc_nlp_put(ndlp); 2841 return; 2842} 2843 2844/** 2845 * lpfc_issue_els_adisc - Issue an address discover iocb to an node on a vport 2846 * @vport: pointer to a virtual N_Port data structure. 2847 * @ndlp: pointer to a node-list data structure. 2848 * @retry: number of retries to the command IOCB. 2849 * 2850 * This routine issues an Address Discover (ADISC) for an @ndlp on a 2851 * @vport. It prepares the payload of the ADISC ELS command, updates the 2852 * and states of the ndlp, and invokes the lpfc_sli_issue_iocb() routine 2853 * to issue the ADISC ELS command. 2854 * 2855 * Note that the ndlp reference count will be incremented by 1 for holding the 2856 * ndlp and the reference to ndlp will be stored into the ndlp field of 2857 * the IOCB for the completion callback function to the ADISC ELS command. 2858 * 2859 * Return code 2860 * 0 - successfully issued adisc 2861 * 1 - failed to issue adisc 2862 **/ 2863int 2864lpfc_issue_els_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 2865 uint8_t retry) 2866{ 2867 int rc = 0; 2868 struct lpfc_hba *phba = vport->phba; 2869 ADISC *ap; 2870 struct lpfc_iocbq *elsiocb; 2871 uint8_t *pcmd; 2872 uint16_t cmdsize; 2873 2874 cmdsize = (sizeof(uint32_t) + sizeof(ADISC)); 2875 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 2876 ndlp->nlp_DID, ELS_CMD_ADISC); 2877 if (!elsiocb) 2878 return 1; 2879 2880 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; 2881 2882 /* For ADISC request, remainder of payload is service parameters */ 2883 *((uint32_t *) (pcmd)) = ELS_CMD_ADISC; 2884 pcmd += sizeof(uint32_t); 2885 2886 /* Fill in ADISC payload */ 2887 ap = (ADISC *) pcmd; 2888 ap->hardAL_PA = phba->fc_pref_ALPA; 2889 memcpy(&ap->portName, &vport->fc_portname, sizeof(struct lpfc_name)); 2890 memcpy(&ap->nodeName, &vport->fc_nodename, sizeof(struct lpfc_name)); 2891 ap->DID = be32_to_cpu(vport->fc_myDID); 2892 2893 phba->fc_stat.elsXmitADISC++; 2894 elsiocb->cmd_cmpl = lpfc_cmpl_els_adisc; 2895 spin_lock_irq(&ndlp->lock); 2896 ndlp->nlp_flag |= NLP_ADISC_SND; 2897 spin_unlock_irq(&ndlp->lock); 2898 elsiocb->ndlp = lpfc_nlp_get(ndlp); 2899 if (!elsiocb->ndlp) { 2900 lpfc_els_free_iocb(phba, elsiocb); 2901 goto err; 2902 } 2903 2904 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 2905 "Issue ADISC: did:x%x refcnt %d", 2906 ndlp->nlp_DID, kref_read(&ndlp->kref), 0); 2907 2908 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 2909 if (rc == IOCB_ERROR) { 2910 lpfc_els_free_iocb(phba, elsiocb); 2911 lpfc_nlp_put(ndlp); 2912 goto err; 2913 } 2914 2915 return 0; 2916 2917err: 2918 spin_lock_irq(&ndlp->lock); 2919 ndlp->nlp_flag &= ~NLP_ADISC_SND; 2920 spin_unlock_irq(&ndlp->lock); 2921 return 1; 2922} 2923 2924/** 2925 * lpfc_cmpl_els_logo - Completion callback function for logo 2926 * @phba: pointer to lpfc hba data structure. 2927 * @cmdiocb: pointer to lpfc command iocb data structure. 2928 * @rspiocb: pointer to lpfc response iocb data structure. 2929 * 2930 * This routine is the completion function for issuing the ELS Logout (LOGO) 2931 * command. If no error status was reported from the LOGO response, the 2932 * state machine of the associated ndlp shall be invoked for transition with 2933 * respect to NLP_EVT_CMPL_LOGO event. 2934 **/ 2935static void 2936lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 2937 struct lpfc_iocbq *rspiocb) 2938{ 2939 struct lpfc_nodelist *ndlp = cmdiocb->ndlp; 2940 struct lpfc_vport *vport = ndlp->vport; 2941 IOCB_t *irsp; 2942 unsigned long flags; 2943 uint32_t skip_recovery = 0; 2944 int wake_up_waiter = 0; 2945 u32 ulp_status; 2946 u32 ulp_word4; 2947 u32 tmo; 2948 2949 /* we pass cmdiocb to state machine which needs rspiocb as well */ 2950 cmdiocb->rsp_iocb = rspiocb; 2951 2952 ulp_status = get_job_ulpstatus(phba, rspiocb); 2953 ulp_word4 = get_job_word4(phba, rspiocb); 2954 2955 if (phba->sli_rev == LPFC_SLI_REV4) { 2956 tmo = get_wqe_tmo(cmdiocb); 2957 } else { 2958 irsp = &rspiocb->iocb; 2959 tmo = irsp->ulpTimeout; 2960 } 2961 2962 spin_lock_irq(&ndlp->lock); 2963 ndlp->nlp_flag &= ~NLP_LOGO_SND; 2964 if (ndlp->save_flags & NLP_WAIT_FOR_LOGO) { 2965 wake_up_waiter = 1; 2966 ndlp->save_flags &= ~NLP_WAIT_FOR_LOGO; 2967 } 2968 spin_unlock_irq(&ndlp->lock); 2969 2970 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 2971 "LOGO cmpl: status:x%x/x%x did:x%x", 2972 ulp_status, ulp_word4, 2973 ndlp->nlp_DID); 2974 2975 /* LOGO completes to NPort <nlp_DID> */ 2976 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 2977 "0105 LOGO completes to NPort x%x " 2978 "refcnt %d nflags x%x Data: x%x x%x x%x x%x\n", 2979 ndlp->nlp_DID, kref_read(&ndlp->kref), ndlp->nlp_flag, 2980 ulp_status, ulp_word4, 2981 tmo, vport->num_disc_nodes); 2982 2983 if (lpfc_els_chk_latt(vport)) { 2984 skip_recovery = 1; 2985 goto out; 2986 } 2987 2988 /* The LOGO will not be retried on failure. A LOGO was 2989 * issued to the remote rport and a ACC or RJT or no Answer are 2990 * all acceptable. Note the failure and move forward with 2991 * discovery. The PLOGI will retry. 2992 */ 2993 if (ulp_status) { 2994 /* LOGO failed */ 2995 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 2996 "2756 LOGO failure, No Retry DID:%06X " 2997 "Status:x%x/x%x\n", 2998 ndlp->nlp_DID, ulp_status, 2999 ulp_word4); 3000 3001 if (lpfc_error_lost_link(ulp_status, ulp_word4)) { 3002 skip_recovery = 1; 3003 goto out; 3004 } 3005 } 3006 3007 /* Call state machine. This will unregister the rpi if needed. */ 3008 lpfc_disc_state_machine(vport, ndlp, cmdiocb, NLP_EVT_CMPL_LOGO); 3009 3010 /* The driver sets this flag for an NPIV instance that doesn't want to 3011 * log into the remote port. 3012 */ 3013 if (ndlp->nlp_flag & NLP_TARGET_REMOVE) { 3014 spin_lock_irq(&ndlp->lock); 3015 if (phba->sli_rev == LPFC_SLI_REV4) 3016 ndlp->nlp_flag |= NLP_RELEASE_RPI; 3017 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; 3018 spin_unlock_irq(&ndlp->lock); 3019 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 3020 NLP_EVT_DEVICE_RM); 3021 goto out_rsrc_free; 3022 } 3023 3024out: 3025 /* At this point, the LOGO processing is complete. NOTE: For a 3026 * pt2pt topology, we are assuming the NPortID will only change 3027 * on link up processing. For a LOGO / PLOGI initiated by the 3028 * Initiator, we are assuming the NPortID is not going to change. 3029 */ 3030 3031 if (wake_up_waiter && ndlp->logo_waitq) 3032 wake_up(ndlp->logo_waitq); 3033 /* 3034 * If the node is a target, the handling attempts to recover the port. 3035 * For any other port type, the rpi is unregistered as an implicit 3036 * LOGO. 3037 */ 3038 if (ndlp->nlp_type & (NLP_FCP_TARGET | NLP_NVME_TARGET) && 3039 skip_recovery == 0) { 3040 lpfc_cancel_retry_delay_tmo(vport, ndlp); 3041 spin_lock_irqsave(&ndlp->lock, flags); 3042 ndlp->nlp_flag |= NLP_NPR_2B_DISC; 3043 spin_unlock_irqrestore(&ndlp->lock, flags); 3044 3045 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 3046 "3187 LOGO completes to NPort x%x: Start " 3047 "Recovery Data: x%x x%x x%x x%x\n", 3048 ndlp->nlp_DID, ulp_status, 3049 ulp_word4, tmo, 3050 vport->num_disc_nodes); 3051 3052 lpfc_els_free_iocb(phba, cmdiocb); 3053 lpfc_nlp_put(ndlp); 3054 3055 lpfc_disc_start(vport); 3056 return; 3057 } 3058 3059 /* Cleanup path for failed REG_RPI handling. If REG_RPI fails, the 3060 * driver sends a LOGO to the rport to cleanup. For fabric and 3061 * initiator ports cleanup the node as long as it the node is not 3062 * register with the transport. 3063 */ 3064 if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD))) { 3065 spin_lock_irq(&ndlp->lock); 3066 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; 3067 spin_unlock_irq(&ndlp->lock); 3068 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 3069 NLP_EVT_DEVICE_RM); 3070 } 3071out_rsrc_free: 3072 /* Driver is done with the I/O. */ 3073 lpfc_els_free_iocb(phba, cmdiocb); 3074 lpfc_nlp_put(ndlp); 3075} 3076 3077/** 3078 * lpfc_issue_els_logo - Issue a logo to an node on a vport 3079 * @vport: pointer to a virtual N_Port data structure. 3080 * @ndlp: pointer to a node-list data structure. 3081 * @retry: number of retries to the command IOCB. 3082 * 3083 * This routine constructs and issues an ELS Logout (LOGO) iocb command 3084 * to a remote node, referred by an @ndlp on a @vport. It constructs the 3085 * payload of the IOCB, properly sets up the @ndlp state, and invokes the 3086 * lpfc_sli_issue_iocb() routine to send out the LOGO ELS command. 3087 * 3088 * Note that the ndlp reference count will be incremented by 1 for holding the 3089 * ndlp and the reference to ndlp will be stored into the ndlp field of 3090 * the IOCB for the completion callback function to the LOGO ELS command. 3091 * 3092 * Callers of this routine are expected to unregister the RPI first 3093 * 3094 * Return code 3095 * 0 - successfully issued logo 3096 * 1 - failed to issue logo 3097 **/ 3098int 3099lpfc_issue_els_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 3100 uint8_t retry) 3101{ 3102 struct lpfc_hba *phba = vport->phba; 3103 struct lpfc_iocbq *elsiocb; 3104 uint8_t *pcmd; 3105 uint16_t cmdsize; 3106 int rc; 3107 3108 spin_lock_irq(&ndlp->lock); 3109 if (ndlp->nlp_flag & NLP_LOGO_SND) { 3110 spin_unlock_irq(&ndlp->lock); 3111 return 0; 3112 } 3113 spin_unlock_irq(&ndlp->lock); 3114 3115 cmdsize = (2 * sizeof(uint32_t)) + sizeof(struct lpfc_name); 3116 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 3117 ndlp->nlp_DID, ELS_CMD_LOGO); 3118 if (!elsiocb) 3119 return 1; 3120 3121 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; 3122 *((uint32_t *) (pcmd)) = ELS_CMD_LOGO; 3123 pcmd += sizeof(uint32_t); 3124 3125 /* Fill in LOGO payload */ 3126 *((uint32_t *) (pcmd)) = be32_to_cpu(vport->fc_myDID); 3127 pcmd += sizeof(uint32_t); 3128 memcpy(pcmd, &vport->fc_portname, sizeof(struct lpfc_name)); 3129 3130 phba->fc_stat.elsXmitLOGO++; 3131 elsiocb->cmd_cmpl = lpfc_cmpl_els_logo; 3132 spin_lock_irq(&ndlp->lock); 3133 ndlp->nlp_flag |= NLP_LOGO_SND; 3134 ndlp->nlp_flag &= ~NLP_ISSUE_LOGO; 3135 spin_unlock_irq(&ndlp->lock); 3136 elsiocb->ndlp = lpfc_nlp_get(ndlp); 3137 if (!elsiocb->ndlp) { 3138 lpfc_els_free_iocb(phba, elsiocb); 3139 goto err; 3140 } 3141 3142 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 3143 "Issue LOGO: did:x%x refcnt %d", 3144 ndlp->nlp_DID, kref_read(&ndlp->kref), 0); 3145 3146 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 3147 if (rc == IOCB_ERROR) { 3148 lpfc_els_free_iocb(phba, elsiocb); 3149 lpfc_nlp_put(ndlp); 3150 goto err; 3151 } 3152 3153 spin_lock_irq(&ndlp->lock); 3154 ndlp->nlp_prev_state = ndlp->nlp_state; 3155 spin_unlock_irq(&ndlp->lock); 3156 lpfc_nlp_set_state(vport, ndlp, NLP_STE_LOGO_ISSUE); 3157 return 0; 3158 3159err: 3160 spin_lock_irq(&ndlp->lock); 3161 ndlp->nlp_flag &= ~NLP_LOGO_SND; 3162 spin_unlock_irq(&ndlp->lock); 3163 return 1; 3164} 3165 3166/** 3167 * lpfc_cmpl_els_cmd - Completion callback function for generic els command 3168 * @phba: pointer to lpfc hba data structure. 3169 * @cmdiocb: pointer to lpfc command iocb data structure. 3170 * @rspiocb: pointer to lpfc response iocb data structure. 3171 * 3172 * This routine is a generic completion callback function for ELS commands. 3173 * Specifically, it is the callback function which does not need to perform 3174 * any command specific operations. It is currently used by the ELS command 3175 * issuing routines for RSCN, lpfc_issue_els_rscn, and the ELS Fibre Channel 3176 * Address Resolution Protocol Response (FARPR) routine, lpfc_issue_els_farpr(). 3177 * Other than certain debug loggings, this callback function simply invokes the 3178 * lpfc_els_chk_latt() routine to check whether link went down during the 3179 * discovery process. 3180 **/ 3181static void 3182lpfc_cmpl_els_cmd(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 3183 struct lpfc_iocbq *rspiocb) 3184{ 3185 struct lpfc_vport *vport = cmdiocb->vport; 3186 struct lpfc_nodelist *free_ndlp; 3187 IOCB_t *irsp; 3188 u32 ulp_status, ulp_word4, tmo, did, iotag; 3189 3190 ulp_status = get_job_ulpstatus(phba, rspiocb); 3191 ulp_word4 = get_job_word4(phba, rspiocb); 3192 did = get_job_els_rsp64_did(phba, cmdiocb); 3193 3194 if (phba->sli_rev == LPFC_SLI_REV4) { 3195 tmo = get_wqe_tmo(cmdiocb); 3196 iotag = get_wqe_reqtag(cmdiocb); 3197 } else { 3198 irsp = &rspiocb->iocb; 3199 tmo = irsp->ulpTimeout; 3200 iotag = irsp->ulpIoTag; 3201 } 3202 3203 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 3204 "ELS cmd cmpl: status:x%x/x%x did:x%x", 3205 ulp_status, ulp_word4, did); 3206 3207 /* ELS cmd tag <ulpIoTag> completes */ 3208 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 3209 "0106 ELS cmd tag x%x completes Data: x%x x%x x%x\n", 3210 iotag, ulp_status, ulp_word4, tmo); 3211 3212 /* Check to see if link went down during discovery */ 3213 lpfc_els_chk_latt(vport); 3214 3215 free_ndlp = cmdiocb->ndlp; 3216 3217 lpfc_els_free_iocb(phba, cmdiocb); 3218 lpfc_nlp_put(free_ndlp); 3219} 3220 3221/** 3222 * lpfc_reg_fab_ctrl_node - RPI register the fabric controller node. 3223 * @vport: pointer to lpfc_vport data structure. 3224 * @fc_ndlp: pointer to the fabric controller (0xfffffd) node. 3225 * 3226 * This routine registers the rpi assigned to the fabric controller 3227 * NPort_ID (0xfffffd) with the port and moves the node to UNMAPPED 3228 * state triggering a registration with the SCSI transport. 3229 * 3230 * This routine is single out because the fabric controller node 3231 * does not receive a PLOGI. This routine is consumed by the 3232 * SCR and RDF ELS commands. Callers are expected to qualify 3233 * with SLI4 first. 3234 **/ 3235static int 3236lpfc_reg_fab_ctrl_node(struct lpfc_vport *vport, struct lpfc_nodelist *fc_ndlp) 3237{ 3238 int rc = 0; 3239 struct lpfc_hba *phba = vport->phba; 3240 struct lpfc_nodelist *ns_ndlp; 3241 LPFC_MBOXQ_t *mbox; 3242 3243 if (fc_ndlp->nlp_flag & NLP_RPI_REGISTERED) 3244 return rc; 3245 3246 ns_ndlp = lpfc_findnode_did(vport, NameServer_DID); 3247 if (!ns_ndlp) 3248 return -ENODEV; 3249 3250 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE, 3251 "0935 %s: Reg FC RPI x%x on FC DID x%x NSSte: x%x\n", 3252 __func__, fc_ndlp->nlp_rpi, fc_ndlp->nlp_DID, 3253 ns_ndlp->nlp_state); 3254 if (ns_ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) 3255 return -ENODEV; 3256 3257 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 3258 if (!mbox) { 3259 lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE, 3260 "0936 %s: no memory for reg_login " 3261 "Data: x%x x%x x%x x%x\n", __func__, 3262 fc_ndlp->nlp_DID, fc_ndlp->nlp_state, 3263 fc_ndlp->nlp_flag, fc_ndlp->nlp_rpi); 3264 return -ENOMEM; 3265 } 3266 rc = lpfc_reg_rpi(phba, vport->vpi, fc_ndlp->nlp_DID, 3267 (u8 *)&vport->fc_sparam, mbox, fc_ndlp->nlp_rpi); 3268 if (rc) { 3269 rc = -EACCES; 3270 goto out; 3271 } 3272 3273 fc_ndlp->nlp_flag |= NLP_REG_LOGIN_SEND; 3274 mbox->mbox_cmpl = lpfc_mbx_cmpl_fc_reg_login; 3275 mbox->ctx_ndlp = lpfc_nlp_get(fc_ndlp); 3276 if (!mbox->ctx_ndlp) { 3277 rc = -ENOMEM; 3278 goto out; 3279 } 3280 3281 mbox->vport = vport; 3282 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 3283 if (rc == MBX_NOT_FINISHED) { 3284 rc = -ENODEV; 3285 lpfc_nlp_put(fc_ndlp); 3286 goto out; 3287 } 3288 /* Success path. Exit. */ 3289 lpfc_nlp_set_state(vport, fc_ndlp, 3290 NLP_STE_REG_LOGIN_ISSUE); 3291 return 0; 3292 3293 out: 3294 lpfc_mbox_rsrc_cleanup(phba, mbox, MBOX_THD_UNLOCKED); 3295 lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE, 3296 "0938 %s: failed to format reg_login " 3297 "Data: x%x x%x x%x x%x\n", __func__, 3298 fc_ndlp->nlp_DID, fc_ndlp->nlp_state, 3299 fc_ndlp->nlp_flag, fc_ndlp->nlp_rpi); 3300 return rc; 3301} 3302 3303/** 3304 * lpfc_cmpl_els_disc_cmd - Completion callback function for Discovery ELS cmd 3305 * @phba: pointer to lpfc hba data structure. 3306 * @cmdiocb: pointer to lpfc command iocb data structure. 3307 * @rspiocb: pointer to lpfc response iocb data structure. 3308 * 3309 * This routine is a generic completion callback function for Discovery ELS cmd. 3310 * Currently used by the ELS command issuing routines for the ELS State Change 3311 * Request (SCR), lpfc_issue_els_scr() and the ELS RDF, lpfc_issue_els_rdf(). 3312 * These commands will be retried once only for ELS timeout errors. 3313 **/ 3314static void 3315lpfc_cmpl_els_disc_cmd(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 3316 struct lpfc_iocbq *rspiocb) 3317{ 3318 struct lpfc_vport *vport = cmdiocb->vport; 3319 IOCB_t *irsp; 3320 struct lpfc_els_rdf_rsp *prdf; 3321 struct lpfc_dmabuf *pcmd, *prsp; 3322 u32 *pdata; 3323 u32 cmd; 3324 struct lpfc_nodelist *ndlp = cmdiocb->ndlp; 3325 u32 ulp_status, ulp_word4, tmo, did, iotag; 3326 3327 ulp_status = get_job_ulpstatus(phba, rspiocb); 3328 ulp_word4 = get_job_word4(phba, rspiocb); 3329 did = get_job_els_rsp64_did(phba, cmdiocb); 3330 3331 if (phba->sli_rev == LPFC_SLI_REV4) { 3332 tmo = get_wqe_tmo(cmdiocb); 3333 iotag = get_wqe_reqtag(cmdiocb); 3334 } else { 3335 irsp = &rspiocb->iocb; 3336 tmo = irsp->ulpTimeout; 3337 iotag = irsp->ulpIoTag; 3338 } 3339 3340 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 3341 "ELS cmd cmpl: status:x%x/x%x did:x%x", 3342 ulp_status, ulp_word4, did); 3343 3344 /* ELS cmd tag <ulpIoTag> completes */ 3345 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_CGN_MGMT, 3346 "0217 ELS cmd tag x%x completes Data: x%x x%x x%x x%x\n", 3347 iotag, ulp_status, ulp_word4, tmo, cmdiocb->retry); 3348 3349 pcmd = cmdiocb->cmd_dmabuf; 3350 if (!pcmd) 3351 goto out; 3352 3353 pdata = (u32 *)pcmd->virt; 3354 if (!pdata) 3355 goto out; 3356 cmd = *pdata; 3357 3358 /* Only 1 retry for ELS Timeout only */ 3359 if (ulp_status == IOSTAT_LOCAL_REJECT && 3360 ((ulp_word4 & IOERR_PARAM_MASK) == 3361 IOERR_SEQUENCE_TIMEOUT)) { 3362 cmdiocb->retry++; 3363 if (cmdiocb->retry <= 1) { 3364 switch (cmd) { 3365 case ELS_CMD_SCR: 3366 lpfc_issue_els_scr(vport, cmdiocb->retry); 3367 break; 3368 case ELS_CMD_EDC: 3369 lpfc_issue_els_edc(vport, cmdiocb->retry); 3370 break; 3371 case ELS_CMD_RDF: 3372 lpfc_issue_els_rdf(vport, cmdiocb->retry); 3373 break; 3374 } 3375 goto out; 3376 } 3377 phba->fc_stat.elsRetryExceeded++; 3378 } 3379 if (cmd == ELS_CMD_EDC) { 3380 /* must be called before checking uplStatus and returning */ 3381 lpfc_cmpl_els_edc(phba, cmdiocb, rspiocb); 3382 return; 3383 } 3384 if (ulp_status) { 3385 /* ELS discovery cmd completes with error */ 3386 lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS | LOG_CGN_MGMT, 3387 "4203 ELS cmd x%x error: x%x x%X\n", cmd, 3388 ulp_status, ulp_word4); 3389 goto out; 3390 } 3391 3392 /* The RDF response doesn't have any impact on the running driver 3393 * but the notification descriptors are dumped here for support. 3394 */ 3395 if (cmd == ELS_CMD_RDF) { 3396 int i; 3397 3398 prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list); 3399 if (!prsp) 3400 goto out; 3401 3402 prdf = (struct lpfc_els_rdf_rsp *)prsp->virt; 3403 if (!prdf) 3404 goto out; 3405 3406 for (i = 0; i < ELS_RDF_REG_TAG_CNT && 3407 i < be32_to_cpu(prdf->reg_d1.reg_desc.count); i++) 3408 lpfc_printf_vlog(vport, KERN_INFO, 3409 LOG_ELS | LOG_CGN_MGMT, 3410 "4677 Fabric RDF Notification Grant " 3411 "Data: 0x%08x Reg: %x %x\n", 3412 be32_to_cpu( 3413 prdf->reg_d1.desc_tags[i]), 3414 phba->cgn_reg_signal, 3415 phba->cgn_reg_fpin); 3416 } 3417 3418out: 3419 /* Check to see if link went down during discovery */ 3420 lpfc_els_chk_latt(vport); 3421 lpfc_els_free_iocb(phba, cmdiocb); 3422 lpfc_nlp_put(ndlp); 3423 return; 3424} 3425 3426/** 3427 * lpfc_issue_els_scr - Issue a scr to an node on a vport 3428 * @vport: pointer to a host virtual N_Port data structure. 3429 * @retry: retry counter for the command IOCB. 3430 * 3431 * This routine issues a State Change Request (SCR) to a fabric node 3432 * on a @vport. The remote node is Fabric Controller (0xfffffd). It 3433 * first search the @vport node list to find the matching ndlp. If no such 3434 * ndlp is found, a new ndlp shall be created for this (SCR) purpose. An 3435 * IOCB is allocated, payload prepared, and the lpfc_sli_issue_iocb() 3436 * routine is invoked to send the SCR IOCB. 3437 * 3438 * Note that the ndlp reference count will be incremented by 1 for holding the 3439 * ndlp and the reference to ndlp will be stored into the ndlp field of 3440 * the IOCB for the completion callback function to the SCR ELS command. 3441 * 3442 * Return code 3443 * 0 - Successfully issued scr command 3444 * 1 - Failed to issue scr command 3445 **/ 3446int 3447lpfc_issue_els_scr(struct lpfc_vport *vport, uint8_t retry) 3448{ 3449 int rc = 0; 3450 struct lpfc_hba *phba = vport->phba; 3451 struct lpfc_iocbq *elsiocb; 3452 uint8_t *pcmd; 3453 uint16_t cmdsize; 3454 struct lpfc_nodelist *ndlp; 3455 3456 cmdsize = (sizeof(uint32_t) + sizeof(SCR)); 3457 3458 ndlp = lpfc_findnode_did(vport, Fabric_Cntl_DID); 3459 if (!ndlp) { 3460 ndlp = lpfc_nlp_init(vport, Fabric_Cntl_DID); 3461 if (!ndlp) 3462 return 1; 3463 lpfc_enqueue_node(vport, ndlp); 3464 } 3465 3466 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 3467 ndlp->nlp_DID, ELS_CMD_SCR); 3468 if (!elsiocb) 3469 return 1; 3470 3471 if (phba->sli_rev == LPFC_SLI_REV4) { 3472 rc = lpfc_reg_fab_ctrl_node(vport, ndlp); 3473 if (rc) { 3474 lpfc_els_free_iocb(phba, elsiocb); 3475 lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE, 3476 "0937 %s: Failed to reg fc node, rc %d\n", 3477 __func__, rc); 3478 return 1; 3479 } 3480 } 3481 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; 3482 3483 *((uint32_t *) (pcmd)) = ELS_CMD_SCR; 3484 pcmd += sizeof(uint32_t); 3485 3486 /* For SCR, remainder of payload is SCR parameter page */ 3487 memset(pcmd, 0, sizeof(SCR)); 3488 ((SCR *) pcmd)->Function = SCR_FUNC_FULL; 3489 3490 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 3491 "Issue SCR: did:x%x", 3492 ndlp->nlp_DID, 0, 0); 3493 3494 phba->fc_stat.elsXmitSCR++; 3495 elsiocb->cmd_cmpl = lpfc_cmpl_els_disc_cmd; 3496 elsiocb->ndlp = lpfc_nlp_get(ndlp); 3497 if (!elsiocb->ndlp) { 3498 lpfc_els_free_iocb(phba, elsiocb); 3499 return 1; 3500 } 3501 3502 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 3503 "Issue SCR: did:x%x refcnt %d", 3504 ndlp->nlp_DID, kref_read(&ndlp->kref), 0); 3505 3506 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 3507 if (rc == IOCB_ERROR) { 3508 lpfc_els_free_iocb(phba, elsiocb); 3509 lpfc_nlp_put(ndlp); 3510 return 1; 3511 } 3512 3513 return 0; 3514} 3515 3516/** 3517 * lpfc_issue_els_rscn - Issue an RSCN to the Fabric Controller (Fabric) 3518 * or the other nport (pt2pt). 3519 * @vport: pointer to a host virtual N_Port data structure. 3520 * @retry: number of retries to the command IOCB. 3521 * 3522 * This routine issues a RSCN to the Fabric Controller (DID 0xFFFFFD) 3523 * when connected to a fabric, or to the remote port when connected 3524 * in point-to-point mode. When sent to the Fabric Controller, it will 3525 * replay the RSCN to registered recipients. 3526 * 3527 * Note that the ndlp reference count will be incremented by 1 for holding the 3528 * ndlp and the reference to ndlp will be stored into the ndlp field of 3529 * the IOCB for the completion callback function to the RSCN ELS command. 3530 * 3531 * Return code 3532 * 0 - Successfully issued RSCN command 3533 * 1 - Failed to issue RSCN command 3534 **/ 3535int 3536lpfc_issue_els_rscn(struct lpfc_vport *vport, uint8_t retry) 3537{ 3538 int rc = 0; 3539 struct lpfc_hba *phba = vport->phba; 3540 struct lpfc_iocbq *elsiocb; 3541 struct lpfc_nodelist *ndlp; 3542 struct { 3543 struct fc_els_rscn rscn; 3544 struct fc_els_rscn_page portid; 3545 } *event; 3546 uint32_t nportid; 3547 uint16_t cmdsize = sizeof(*event); 3548 3549 /* Not supported for private loop */ 3550 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP && 3551 !(vport->fc_flag & FC_PUBLIC_LOOP)) 3552 return 1; 3553 3554 if (vport->fc_flag & FC_PT2PT) { 3555 /* find any mapped nport - that would be the other nport */ 3556 ndlp = lpfc_findnode_mapped(vport); 3557 if (!ndlp) 3558 return 1; 3559 } else { 3560 nportid = FC_FID_FCTRL; 3561 /* find the fabric controller node */ 3562 ndlp = lpfc_findnode_did(vport, nportid); 3563 if (!ndlp) { 3564 /* if one didn't exist, make one */ 3565 ndlp = lpfc_nlp_init(vport, nportid); 3566 if (!ndlp) 3567 return 1; 3568 lpfc_enqueue_node(vport, ndlp); 3569 } 3570 } 3571 3572 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 3573 ndlp->nlp_DID, ELS_CMD_RSCN_XMT); 3574 3575 if (!elsiocb) 3576 return 1; 3577 3578 event = elsiocb->cmd_dmabuf->virt; 3579 3580 event->rscn.rscn_cmd = ELS_RSCN; 3581 event->rscn.rscn_page_len = sizeof(struct fc_els_rscn_page); 3582 event->rscn.rscn_plen = cpu_to_be16(cmdsize); 3583 3584 nportid = vport->fc_myDID; 3585 /* appears that page flags must be 0 for fabric to broadcast RSCN */ 3586 event->portid.rscn_page_flags = 0; 3587 event->portid.rscn_fid[0] = (nportid & 0x00FF0000) >> 16; 3588 event->portid.rscn_fid[1] = (nportid & 0x0000FF00) >> 8; 3589 event->portid.rscn_fid[2] = nportid & 0x000000FF; 3590 3591 phba->fc_stat.elsXmitRSCN++; 3592 elsiocb->cmd_cmpl = lpfc_cmpl_els_cmd; 3593 elsiocb->ndlp = lpfc_nlp_get(ndlp); 3594 if (!elsiocb->ndlp) { 3595 lpfc_els_free_iocb(phba, elsiocb); 3596 return 1; 3597 } 3598 3599 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 3600 "Issue RSCN: did:x%x", 3601 ndlp->nlp_DID, 0, 0); 3602 3603 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 3604 if (rc == IOCB_ERROR) { 3605 lpfc_els_free_iocb(phba, elsiocb); 3606 lpfc_nlp_put(ndlp); 3607 return 1; 3608 } 3609 3610 return 0; 3611} 3612 3613/** 3614 * lpfc_issue_els_farpr - Issue a farp to an node on a vport 3615 * @vport: pointer to a host virtual N_Port data structure. 3616 * @nportid: N_Port identifier to the remote node. 3617 * @retry: number of retries to the command IOCB. 3618 * 3619 * This routine issues a Fibre Channel Address Resolution Response 3620 * (FARPR) to a node on a vport. The remote node N_Port identifier (@nportid) 3621 * is passed into the function. It first search the @vport node list to find 3622 * the matching ndlp. If no such ndlp is found, a new ndlp shall be created 3623 * for this (FARPR) purpose. An IOCB is allocated, payload prepared, and the 3624 * lpfc_sli_issue_iocb() routine is invoked to send the FARPR ELS command. 3625 * 3626 * Note that the ndlp reference count will be incremented by 1 for holding the 3627 * ndlp and the reference to ndlp will be stored into the ndlp field of 3628 * the IOCB for the completion callback function to the FARPR ELS command. 3629 * 3630 * Return code 3631 * 0 - Successfully issued farpr command 3632 * 1 - Failed to issue farpr command 3633 **/ 3634static int 3635lpfc_issue_els_farpr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry) 3636{ 3637 int rc = 0; 3638 struct lpfc_hba *phba = vport->phba; 3639 struct lpfc_iocbq *elsiocb; 3640 FARP *fp; 3641 uint8_t *pcmd; 3642 uint32_t *lp; 3643 uint16_t cmdsize; 3644 struct lpfc_nodelist *ondlp; 3645 struct lpfc_nodelist *ndlp; 3646 3647 cmdsize = (sizeof(uint32_t) + sizeof(FARP)); 3648 3649 ndlp = lpfc_findnode_did(vport, nportid); 3650 if (!ndlp) { 3651 ndlp = lpfc_nlp_init(vport, nportid); 3652 if (!ndlp) 3653 return 1; 3654 lpfc_enqueue_node(vport, ndlp); 3655 } 3656 3657 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 3658 ndlp->nlp_DID, ELS_CMD_FARPR); 3659 if (!elsiocb) 3660 return 1; 3661 3662 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; 3663 3664 *((uint32_t *) (pcmd)) = ELS_CMD_FARPR; 3665 pcmd += sizeof(uint32_t); 3666 3667 /* Fill in FARPR payload */ 3668 fp = (FARP *) (pcmd); 3669 memset(fp, 0, sizeof(FARP)); 3670 lp = (uint32_t *) pcmd; 3671 *lp++ = be32_to_cpu(nportid); 3672 *lp++ = be32_to_cpu(vport->fc_myDID); 3673 fp->Rflags = 0; 3674 fp->Mflags = (FARP_MATCH_PORT | FARP_MATCH_NODE); 3675 3676 memcpy(&fp->RportName, &vport->fc_portname, sizeof(struct lpfc_name)); 3677 memcpy(&fp->RnodeName, &vport->fc_nodename, sizeof(struct lpfc_name)); 3678 ondlp = lpfc_findnode_did(vport, nportid); 3679 if (ondlp) { 3680 memcpy(&fp->OportName, &ondlp->nlp_portname, 3681 sizeof(struct lpfc_name)); 3682 memcpy(&fp->OnodeName, &ondlp->nlp_nodename, 3683 sizeof(struct lpfc_name)); 3684 } 3685 3686 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 3687 "Issue FARPR: did:x%x", 3688 ndlp->nlp_DID, 0, 0); 3689 3690 phba->fc_stat.elsXmitFARPR++; 3691 elsiocb->cmd_cmpl = lpfc_cmpl_els_cmd; 3692 elsiocb->ndlp = lpfc_nlp_get(ndlp); 3693 if (!elsiocb->ndlp) { 3694 lpfc_els_free_iocb(phba, elsiocb); 3695 return 1; 3696 } 3697 3698 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 3699 if (rc == IOCB_ERROR) { 3700 /* The additional lpfc_nlp_put will cause the following 3701 * lpfc_els_free_iocb routine to trigger the release of 3702 * the node. 3703 */ 3704 lpfc_els_free_iocb(phba, elsiocb); 3705 lpfc_nlp_put(ndlp); 3706 return 1; 3707 } 3708 /* This will cause the callback-function lpfc_cmpl_els_cmd to 3709 * trigger the release of the node. 3710 */ 3711 /* Don't release reference count as RDF is likely outstanding */ 3712 return 0; 3713} 3714 3715/** 3716 * lpfc_issue_els_rdf - Register for diagnostic functions from the fabric. 3717 * @vport: pointer to a host virtual N_Port data structure. 3718 * @retry: retry counter for the command IOCB. 3719 * 3720 * This routine issues an ELS RDF to the Fabric Controller to register 3721 * for diagnostic functions. 3722 * 3723 * Note that the ndlp reference count will be incremented by 1 for holding the 3724 * ndlp and the reference to ndlp will be stored into the ndlp field of 3725 * the IOCB for the completion callback function to the RDF ELS command. 3726 * 3727 * Return code 3728 * 0 - Successfully issued rdf command 3729 * 1 - Failed to issue rdf command 3730 **/ 3731int 3732lpfc_issue_els_rdf(struct lpfc_vport *vport, uint8_t retry) 3733{ 3734 struct lpfc_hba *phba = vport->phba; 3735 struct lpfc_iocbq *elsiocb; 3736 struct lpfc_els_rdf_req *prdf; 3737 struct lpfc_nodelist *ndlp; 3738 uint16_t cmdsize; 3739 int rc; 3740 3741 cmdsize = sizeof(*prdf); 3742 3743 ndlp = lpfc_findnode_did(vport, Fabric_Cntl_DID); 3744 if (!ndlp) { 3745 ndlp = lpfc_nlp_init(vport, Fabric_Cntl_DID); 3746 if (!ndlp) 3747 return -ENODEV; 3748 lpfc_enqueue_node(vport, ndlp); 3749 } 3750 3751 /* RDF ELS is not required on an NPIV VN_Port. */ 3752 if (vport->port_type == LPFC_NPIV_PORT) 3753 return -EACCES; 3754 3755 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 3756 ndlp->nlp_DID, ELS_CMD_RDF); 3757 if (!elsiocb) 3758 return -ENOMEM; 3759 3760 /* Configure the payload for the supported FPIN events. */ 3761 prdf = (struct lpfc_els_rdf_req *)elsiocb->cmd_dmabuf->virt; 3762 memset(prdf, 0, cmdsize); 3763 prdf->rdf.fpin_cmd = ELS_RDF; 3764 prdf->rdf.desc_len = cpu_to_be32(sizeof(struct lpfc_els_rdf_req) - 3765 sizeof(struct fc_els_rdf)); 3766 prdf->reg_d1.reg_desc.desc_tag = cpu_to_be32(ELS_DTAG_FPIN_REGISTER); 3767 prdf->reg_d1.reg_desc.desc_len = cpu_to_be32( 3768 FC_TLV_DESC_LENGTH_FROM_SZ(prdf->reg_d1)); 3769 prdf->reg_d1.reg_desc.count = cpu_to_be32(ELS_RDF_REG_TAG_CNT); 3770 prdf->reg_d1.desc_tags[0] = cpu_to_be32(ELS_DTAG_LNK_INTEGRITY); 3771 prdf->reg_d1.desc_tags[1] = cpu_to_be32(ELS_DTAG_DELIVERY); 3772 prdf->reg_d1.desc_tags[2] = cpu_to_be32(ELS_DTAG_PEER_CONGEST); 3773 prdf->reg_d1.desc_tags[3] = cpu_to_be32(ELS_DTAG_CONGESTION); 3774 3775 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_CGN_MGMT, 3776 "6444 Xmit RDF to remote NPORT x%x Reg: %x %x\n", 3777 ndlp->nlp_DID, phba->cgn_reg_signal, 3778 phba->cgn_reg_fpin); 3779 3780 phba->cgn_fpin_frequency = LPFC_FPIN_INIT_FREQ; 3781 elsiocb->cmd_cmpl = lpfc_cmpl_els_disc_cmd; 3782 elsiocb->ndlp = lpfc_nlp_get(ndlp); 3783 if (!elsiocb->ndlp) { 3784 lpfc_els_free_iocb(phba, elsiocb); 3785 return -EIO; 3786 } 3787 3788 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 3789 "Issue RDF: did:x%x refcnt %d", 3790 ndlp->nlp_DID, kref_read(&ndlp->kref), 0); 3791 3792 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 3793 if (rc == IOCB_ERROR) { 3794 lpfc_els_free_iocb(phba, elsiocb); 3795 lpfc_nlp_put(ndlp); 3796 return -EIO; 3797 } 3798 return 0; 3799} 3800 3801 /** 3802 * lpfc_els_rcv_rdf - Receive RDF ELS request from the fabric. 3803 * @vport: pointer to a host virtual N_Port data structure. 3804 * @cmdiocb: pointer to lpfc command iocb data structure. 3805 * @ndlp: pointer to a node-list data structure. 3806 * 3807 * A received RDF implies a possible change to fabric supported diagnostic 3808 * functions. This routine sends LS_ACC and then has the Nx_Port issue a new 3809 * RDF request to reregister for supported diagnostic functions. 3810 * 3811 * Return code 3812 * 0 - Success 3813 * -EIO - Failed to process received RDF 3814 **/ 3815static int 3816lpfc_els_rcv_rdf(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 3817 struct lpfc_nodelist *ndlp) 3818{ 3819 /* Send LS_ACC */ 3820 if (lpfc_els_rsp_acc(vport, ELS_CMD_RDF, cmdiocb, ndlp, NULL)) { 3821 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_CGN_MGMT, 3822 "1623 Failed to RDF_ACC from x%x for x%x\n", 3823 ndlp->nlp_DID, vport->fc_myDID); 3824 return -EIO; 3825 } 3826 3827 /* Issue new RDF for reregistering */ 3828 if (lpfc_issue_els_rdf(vport, 0)) { 3829 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_CGN_MGMT, 3830 "2623 Failed to re register RDF for x%x\n", 3831 vport->fc_myDID); 3832 return -EIO; 3833 } 3834 3835 return 0; 3836} 3837 3838/** 3839 * lpfc_least_capable_settings - helper function for EDC rsp processing 3840 * @phba: pointer to lpfc hba data structure. 3841 * @pcgd: pointer to congestion detection descriptor in EDC rsp. 3842 * 3843 * This helper routine determines the least capable setting for 3844 * congestion signals, signal freq, including scale, from the 3845 * congestion detection descriptor in the EDC rsp. The routine 3846 * sets @phba values in preparation for a set_featues mailbox. 3847 **/ 3848static void 3849lpfc_least_capable_settings(struct lpfc_hba *phba, 3850 struct fc_diag_cg_sig_desc *pcgd) 3851{ 3852 u32 rsp_sig_cap = 0, drv_sig_cap = 0; 3853 u32 rsp_sig_freq_cyc = 0, rsp_sig_freq_scale = 0; 3854 3855 /* Get rsp signal and frequency capabilities. */ 3856 rsp_sig_cap = be32_to_cpu(pcgd->xmt_signal_capability); 3857 rsp_sig_freq_cyc = be16_to_cpu(pcgd->xmt_signal_frequency.count); 3858 rsp_sig_freq_scale = be16_to_cpu(pcgd->xmt_signal_frequency.units); 3859 3860 /* If the Fport does not support signals. Set FPIN only */ 3861 if (rsp_sig_cap == EDC_CG_SIG_NOTSUPPORTED) 3862 goto out_no_support; 3863 3864 /* Apply the xmt scale to the xmt cycle to get the correct frequency. 3865 * Adapter default is 100 millisSeconds. Convert all xmt cycle values 3866 * to milliSeconds. 3867 */ 3868 switch (rsp_sig_freq_scale) { 3869 case EDC_CG_SIGFREQ_SEC: 3870 rsp_sig_freq_cyc *= MSEC_PER_SEC; 3871 break; 3872 case EDC_CG_SIGFREQ_MSEC: 3873 rsp_sig_freq_cyc = 1; 3874 break; 3875 default: 3876 goto out_no_support; 3877 } 3878 3879 /* Convenient shorthand. */ 3880 drv_sig_cap = phba->cgn_reg_signal; 3881 3882 /* Choose the least capable frequency. */ 3883 if (rsp_sig_freq_cyc > phba->cgn_sig_freq) 3884 phba->cgn_sig_freq = rsp_sig_freq_cyc; 3885 3886 /* Should be some common signals support. Settle on least capable 3887 * signal and adjust FPIN values. Initialize defaults to ease the 3888 * decision. 3889 */ 3890 phba->cgn_reg_fpin = LPFC_CGN_FPIN_WARN | LPFC_CGN_FPIN_ALARM; 3891 phba->cgn_reg_signal = EDC_CG_SIG_NOTSUPPORTED; 3892 if (rsp_sig_cap == EDC_CG_SIG_WARN_ONLY && 3893 (drv_sig_cap == EDC_CG_SIG_WARN_ONLY || 3894 drv_sig_cap == EDC_CG_SIG_WARN_ALARM)) { 3895 phba->cgn_reg_signal = EDC_CG_SIG_WARN_ONLY; 3896 phba->cgn_reg_fpin &= ~LPFC_CGN_FPIN_WARN; 3897 } 3898 if (rsp_sig_cap == EDC_CG_SIG_WARN_ALARM) { 3899 if (drv_sig_cap == EDC_CG_SIG_WARN_ALARM) { 3900 phba->cgn_reg_signal = EDC_CG_SIG_WARN_ALARM; 3901 phba->cgn_reg_fpin = LPFC_CGN_FPIN_NONE; 3902 } 3903 if (drv_sig_cap == EDC_CG_SIG_WARN_ONLY) { 3904 phba->cgn_reg_signal = EDC_CG_SIG_WARN_ONLY; 3905 phba->cgn_reg_fpin &= ~LPFC_CGN_FPIN_WARN; 3906 } 3907 } 3908 3909 /* We are NOT recording signal frequency in congestion info buffer */ 3910 return; 3911 3912out_no_support: 3913 phba->cgn_reg_signal = EDC_CG_SIG_NOTSUPPORTED; 3914 phba->cgn_sig_freq = 0; 3915 phba->cgn_reg_fpin = LPFC_CGN_FPIN_ALARM | LPFC_CGN_FPIN_WARN; 3916} 3917 3918DECLARE_ENUM2STR_LOOKUP(lpfc_get_tlv_dtag_nm, fc_ls_tlv_dtag, 3919 FC_LS_TLV_DTAG_INIT); 3920 3921/** 3922 * lpfc_cmpl_els_edc - Completion callback function for EDC 3923 * @phba: pointer to lpfc hba data structure. 3924 * @cmdiocb: pointer to lpfc command iocb data structure. 3925 * @rspiocb: pointer to lpfc response iocb data structure. 3926 * 3927 * This routine is the completion callback function for issuing the Exchange 3928 * Diagnostic Capabilities (EDC) command. The driver issues an EDC to 3929 * notify the FPort of its Congestion and Link Fault capabilities. This 3930 * routine parses the FPort's response and decides on the least common 3931 * values applicable to both FPort and NPort for Warnings and Alarms that 3932 * are communicated via hardware signals. 3933 **/ 3934static void 3935lpfc_cmpl_els_edc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 3936 struct lpfc_iocbq *rspiocb) 3937{ 3938 IOCB_t *irsp_iocb; 3939 struct fc_els_edc_resp *edc_rsp; 3940 struct fc_tlv_desc *tlv; 3941 struct fc_diag_cg_sig_desc *pcgd; 3942 struct fc_diag_lnkflt_desc *plnkflt; 3943 struct lpfc_dmabuf *pcmd, *prsp; 3944 const char *dtag_nm; 3945 u32 *pdata, dtag; 3946 int desc_cnt = 0, bytes_remain; 3947 bool rcv_cap_desc = false; 3948 struct lpfc_nodelist *ndlp; 3949 u32 ulp_status, ulp_word4, tmo, did, iotag; 3950 3951 ndlp = cmdiocb->ndlp; 3952 3953 ulp_status = get_job_ulpstatus(phba, rspiocb); 3954 ulp_word4 = get_job_word4(phba, rspiocb); 3955 did = get_job_els_rsp64_did(phba, rspiocb); 3956 3957 if (phba->sli_rev == LPFC_SLI_REV4) { 3958 tmo = get_wqe_tmo(rspiocb); 3959 iotag = get_wqe_reqtag(rspiocb); 3960 } else { 3961 irsp_iocb = &rspiocb->iocb; 3962 tmo = irsp_iocb->ulpTimeout; 3963 iotag = irsp_iocb->ulpIoTag; 3964 } 3965 3966 lpfc_debugfs_disc_trc(phba->pport, LPFC_DISC_TRC_ELS_CMD, 3967 "EDC cmpl: status:x%x/x%x did:x%x", 3968 ulp_status, ulp_word4, did); 3969 3970 /* ELS cmd tag <ulpIoTag> completes */ 3971 lpfc_printf_log(phba, KERN_INFO, LOG_ELS | LOG_CGN_MGMT, 3972 "4201 EDC cmd tag x%x completes Data: x%x x%x x%x\n", 3973 iotag, ulp_status, ulp_word4, tmo); 3974 3975 pcmd = cmdiocb->cmd_dmabuf; 3976 if (!pcmd) 3977 goto out; 3978 3979 pdata = (u32 *)pcmd->virt; 3980 if (!pdata) 3981 goto out; 3982 3983 /* Need to clear signal values, send features MB and RDF with FPIN. */ 3984 if (ulp_status) 3985 goto out; 3986 3987 prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list); 3988 if (!prsp) 3989 goto out; 3990 3991 edc_rsp = prsp->virt; 3992 if (!edc_rsp) 3993 goto out; 3994 3995 /* ELS cmd tag <ulpIoTag> completes */ 3996 lpfc_printf_log(phba, KERN_INFO, LOG_ELS | LOG_CGN_MGMT, 3997 "4676 Fabric EDC Rsp: " 3998 "0x%02x, 0x%08x\n", 3999 edc_rsp->acc_hdr.la_cmd, 4000 be32_to_cpu(edc_rsp->desc_list_len)); 4001 4002 /* 4003 * Payload length in bytes is the response descriptor list 4004 * length minus the 12 bytes of Link Service Request 4005 * Information descriptor in the reply. 4006 */ 4007 bytes_remain = be32_to_cpu(edc_rsp->desc_list_len) - 4008 sizeof(struct fc_els_lsri_desc); 4009 if (bytes_remain <= 0) 4010 goto out; 4011 4012 tlv = edc_rsp->desc; 4013 4014 /* 4015 * cycle through EDC diagnostic descriptors to find the 4016 * congestion signaling capability descriptor 4017 */ 4018 while (bytes_remain) { 4019 if (bytes_remain < FC_TLV_DESC_HDR_SZ) { 4020 lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT, 4021 "6461 Truncated TLV hdr on " 4022 "Diagnostic descriptor[%d]\n", 4023 desc_cnt); 4024 goto out; 4025 } 4026 4027 dtag = be32_to_cpu(tlv->desc_tag); 4028 switch (dtag) { 4029 case ELS_DTAG_LNK_FAULT_CAP: 4030 if (bytes_remain < FC_TLV_DESC_SZ_FROM_LENGTH(tlv) || 4031 FC_TLV_DESC_SZ_FROM_LENGTH(tlv) != 4032 sizeof(struct fc_diag_lnkflt_desc)) { 4033 lpfc_printf_log( 4034 phba, KERN_WARNING, LOG_CGN_MGMT, 4035 "6462 Truncated Link Fault Diagnostic " 4036 "descriptor[%d]: %d vs 0x%zx 0x%zx\n", 4037 desc_cnt, bytes_remain, 4038 FC_TLV_DESC_SZ_FROM_LENGTH(tlv), 4039 sizeof(struct fc_diag_cg_sig_desc)); 4040 goto out; 4041 } 4042 plnkflt = (struct fc_diag_lnkflt_desc *)tlv; 4043 lpfc_printf_log( 4044 phba, KERN_INFO, LOG_ELS | LOG_CGN_MGMT, 4045 "4617 Link Fault Desc Data: 0x%08x 0x%08x " 4046 "0x%08x 0x%08x 0x%08x\n", 4047 be32_to_cpu(plnkflt->desc_tag), 4048 be32_to_cpu(plnkflt->desc_len), 4049 be32_to_cpu( 4050 plnkflt->degrade_activate_threshold), 4051 be32_to_cpu( 4052 plnkflt->degrade_deactivate_threshold), 4053 be32_to_cpu(plnkflt->fec_degrade_interval)); 4054 break; 4055 case ELS_DTAG_CG_SIGNAL_CAP: 4056 if (bytes_remain < FC_TLV_DESC_SZ_FROM_LENGTH(tlv) || 4057 FC_TLV_DESC_SZ_FROM_LENGTH(tlv) != 4058 sizeof(struct fc_diag_cg_sig_desc)) { 4059 lpfc_printf_log( 4060 phba, KERN_WARNING, LOG_CGN_MGMT, 4061 "6463 Truncated Cgn Signal Diagnostic " 4062 "descriptor[%d]: %d vs 0x%zx 0x%zx\n", 4063 desc_cnt, bytes_remain, 4064 FC_TLV_DESC_SZ_FROM_LENGTH(tlv), 4065 sizeof(struct fc_diag_cg_sig_desc)); 4066 goto out; 4067 } 4068 4069 pcgd = (struct fc_diag_cg_sig_desc *)tlv; 4070 lpfc_printf_log( 4071 phba, KERN_INFO, LOG_ELS | LOG_CGN_MGMT, 4072 "4616 CGN Desc Data: 0x%08x 0x%08x " 4073 "0x%08x 0x%04x 0x%04x 0x%08x 0x%04x 0x%04x\n", 4074 be32_to_cpu(pcgd->desc_tag), 4075 be32_to_cpu(pcgd->desc_len), 4076 be32_to_cpu(pcgd->xmt_signal_capability), 4077 be16_to_cpu(pcgd->xmt_signal_frequency.count), 4078 be16_to_cpu(pcgd->xmt_signal_frequency.units), 4079 be32_to_cpu(pcgd->rcv_signal_capability), 4080 be16_to_cpu(pcgd->rcv_signal_frequency.count), 4081 be16_to_cpu(pcgd->rcv_signal_frequency.units)); 4082 4083 /* Compare driver and Fport capabilities and choose 4084 * least common. 4085 */ 4086 lpfc_least_capable_settings(phba, pcgd); 4087 rcv_cap_desc = true; 4088 break; 4089 default: 4090 dtag_nm = lpfc_get_tlv_dtag_nm(dtag); 4091 lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT, 4092 "4919 unknown Diagnostic " 4093 "Descriptor[%d]: tag x%x (%s)\n", 4094 desc_cnt, dtag, dtag_nm); 4095 } 4096 4097 bytes_remain -= FC_TLV_DESC_SZ_FROM_LENGTH(tlv); 4098 tlv = fc_tlv_next_desc(tlv); 4099 desc_cnt++; 4100 } 4101 4102out: 4103 if (!rcv_cap_desc) { 4104 phba->cgn_reg_fpin = LPFC_CGN_FPIN_ALARM | LPFC_CGN_FPIN_WARN; 4105 phba->cgn_reg_signal = EDC_CG_SIG_NOTSUPPORTED; 4106 phba->cgn_sig_freq = 0; 4107 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS | LOG_CGN_MGMT, 4108 "4202 EDC rsp error - sending RDF " 4109 "for FPIN only.\n"); 4110 } 4111 4112 lpfc_config_cgn_signal(phba); 4113 4114 /* Check to see if link went down during discovery */ 4115 lpfc_els_chk_latt(phba->pport); 4116 lpfc_debugfs_disc_trc(phba->pport, LPFC_DISC_TRC_ELS_CMD, 4117 "EDC Cmpl: did:x%x refcnt %d", 4118 ndlp->nlp_DID, kref_read(&ndlp->kref), 0); 4119 lpfc_els_free_iocb(phba, cmdiocb); 4120 lpfc_nlp_put(ndlp); 4121} 4122 4123static void 4124lpfc_format_edc_cgn_desc(struct lpfc_hba *phba, struct fc_diag_cg_sig_desc *cgd) 4125{ 4126 /* We are assuming cgd was zero'ed before calling this routine */ 4127 4128 /* Configure the congestion detection capability */ 4129 cgd->desc_tag = cpu_to_be32(ELS_DTAG_CG_SIGNAL_CAP); 4130 4131 /* Descriptor len doesn't include the tag or len fields. */ 4132 cgd->desc_len = cpu_to_be32( 4133 FC_TLV_DESC_LENGTH_FROM_SZ(struct fc_diag_cg_sig_desc)); 4134 4135 /* xmt_signal_capability already set to EDC_CG_SIG_NOTSUPPORTED. 4136 * xmt_signal_frequency.count already set to 0. 4137 * xmt_signal_frequency.units already set to 0. 4138 */ 4139 4140 if (phba->cmf_active_mode == LPFC_CFG_OFF) { 4141 /* rcv_signal_capability already set to EDC_CG_SIG_NOTSUPPORTED. 4142 * rcv_signal_frequency.count already set to 0. 4143 * rcv_signal_frequency.units already set to 0. 4144 */ 4145 phba->cgn_sig_freq = 0; 4146 return; 4147 } 4148 switch (phba->cgn_reg_signal) { 4149 case EDC_CG_SIG_WARN_ONLY: 4150 cgd->rcv_signal_capability = cpu_to_be32(EDC_CG_SIG_WARN_ONLY); 4151 break; 4152 case EDC_CG_SIG_WARN_ALARM: 4153 cgd->rcv_signal_capability = cpu_to_be32(EDC_CG_SIG_WARN_ALARM); 4154 break; 4155 default: 4156 /* rcv_signal_capability left 0 thus no support */ 4157 break; 4158 } 4159 4160 /* We start negotiation with lpfc_fabric_cgn_frequency, after 4161 * the completion we settle on the higher frequency. 4162 */ 4163 cgd->rcv_signal_frequency.count = 4164 cpu_to_be16(lpfc_fabric_cgn_frequency); 4165 cgd->rcv_signal_frequency.units = 4166 cpu_to_be16(EDC_CG_SIGFREQ_MSEC); 4167} 4168 4169 /** 4170 * lpfc_issue_els_edc - Exchange Diagnostic Capabilities with the fabric. 4171 * @vport: pointer to a host virtual N_Port data structure. 4172 * @retry: retry counter for the command iocb. 4173 * 4174 * This routine issues an ELS EDC to the F-Port Controller to communicate 4175 * this N_Port's support of hardware signals in its Congestion 4176 * Capabilities Descriptor. 4177 * 4178 * Note: This routine does not check if one or more signals are 4179 * set in the cgn_reg_signal parameter. The caller makes the 4180 * decision to enforce cgn_reg_signal as nonzero or zero depending 4181 * on the conditions. During Fabric requests, the driver 4182 * requires cgn_reg_signals to be nonzero. But a dynamic request 4183 * to set the congestion mode to OFF from Monitor or Manage 4184 * would correctly issue an EDC with no signals enabled to 4185 * turn off switch functionality and then update the FW. 4186 * 4187 * Return code 4188 * 0 - Successfully issued edc command 4189 * 1 - Failed to issue edc command 4190 **/ 4191int 4192lpfc_issue_els_edc(struct lpfc_vport *vport, uint8_t retry) 4193{ 4194 struct lpfc_hba *phba = vport->phba; 4195 struct lpfc_iocbq *elsiocb; 4196 struct lpfc_els_edc_req *edc_req; 4197 struct fc_diag_cg_sig_desc *cgn_desc; 4198 u16 cmdsize; 4199 struct lpfc_nodelist *ndlp; 4200 u8 *pcmd = NULL; 4201 u32 edc_req_size, cgn_desc_size; 4202 int rc; 4203 4204 if (vport->port_type == LPFC_NPIV_PORT) 4205 return -EACCES; 4206 4207 ndlp = lpfc_findnode_did(vport, Fabric_DID); 4208 if (!ndlp || ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) 4209 return -ENODEV; 4210 4211 /* If HBA doesn't support signals, drop into RDF */ 4212 if (!phba->cgn_init_reg_signal) 4213 goto try_rdf; 4214 4215 edc_req_size = sizeof(struct fc_els_edc); 4216 cgn_desc_size = sizeof(struct fc_diag_cg_sig_desc); 4217 cmdsize = edc_req_size + cgn_desc_size; 4218 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 4219 ndlp->nlp_DID, ELS_CMD_EDC); 4220 if (!elsiocb) 4221 goto try_rdf; 4222 4223 /* Configure the payload for the supported Diagnostics capabilities. */ 4224 pcmd = (u8 *)elsiocb->cmd_dmabuf->virt; 4225 memset(pcmd, 0, cmdsize); 4226 edc_req = (struct lpfc_els_edc_req *)pcmd; 4227 edc_req->edc.desc_len = cpu_to_be32(cgn_desc_size); 4228 edc_req->edc.edc_cmd = ELS_EDC; 4229 4230 cgn_desc = &edc_req->cgn_desc; 4231 4232 lpfc_format_edc_cgn_desc(phba, cgn_desc); 4233 4234 phba->cgn_sig_freq = lpfc_fabric_cgn_frequency; 4235 4236 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_CGN_MGMT, 4237 "4623 Xmit EDC to remote " 4238 "NPORT x%x reg_sig x%x reg_fpin:x%x\n", 4239 ndlp->nlp_DID, phba->cgn_reg_signal, 4240 phba->cgn_reg_fpin); 4241 4242 elsiocb->cmd_cmpl = lpfc_cmpl_els_disc_cmd; 4243 elsiocb->ndlp = lpfc_nlp_get(ndlp); 4244 if (!elsiocb->ndlp) { 4245 lpfc_els_free_iocb(phba, elsiocb); 4246 return -EIO; 4247 } 4248 4249 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 4250 "Issue EDC: did:x%x refcnt %d", 4251 ndlp->nlp_DID, kref_read(&ndlp->kref), 0); 4252 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 4253 if (rc == IOCB_ERROR) { 4254 /* The additional lpfc_nlp_put will cause the following 4255 * lpfc_els_free_iocb routine to trigger the rlease of 4256 * the node. 4257 */ 4258 lpfc_els_free_iocb(phba, elsiocb); 4259 lpfc_nlp_put(ndlp); 4260 goto try_rdf; 4261 } 4262 return 0; 4263try_rdf: 4264 phba->cgn_reg_fpin = LPFC_CGN_FPIN_WARN | LPFC_CGN_FPIN_ALARM; 4265 phba->cgn_reg_signal = EDC_CG_SIG_NOTSUPPORTED; 4266 rc = lpfc_issue_els_rdf(vport, 0); 4267 return rc; 4268} 4269 4270/** 4271 * lpfc_cancel_retry_delay_tmo - Cancel the timer with delayed iocb-cmd retry 4272 * @vport: pointer to a host virtual N_Port data structure. 4273 * @nlp: pointer to a node-list data structure. 4274 * 4275 * This routine cancels the timer with a delayed IOCB-command retry for 4276 * a @vport's @ndlp. It stops the timer for the delayed function retrial and 4277 * removes the ELS retry event if it presents. In addition, if the 4278 * NLP_NPR_2B_DISC bit is set in the @nlp's nlp_flag bitmap, ADISC IOCB 4279 * commands are sent for the @vport's nodes that require issuing discovery 4280 * ADISC. 4281 **/ 4282void 4283lpfc_cancel_retry_delay_tmo(struct lpfc_vport *vport, struct lpfc_nodelist *nlp) 4284{ 4285 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 4286 struct lpfc_work_evt *evtp; 4287 4288 if (!(nlp->nlp_flag & NLP_DELAY_TMO)) 4289 return; 4290 spin_lock_irq(&nlp->lock); 4291 nlp->nlp_flag &= ~NLP_DELAY_TMO; 4292 spin_unlock_irq(&nlp->lock); 4293 del_timer_sync(&nlp->nlp_delayfunc); 4294 nlp->nlp_last_elscmd = 0; 4295 if (!list_empty(&nlp->els_retry_evt.evt_listp)) { 4296 list_del_init(&nlp->els_retry_evt.evt_listp); 4297 /* Decrement nlp reference count held for the delayed retry */ 4298 evtp = &nlp->els_retry_evt; 4299 lpfc_nlp_put((struct lpfc_nodelist *)evtp->evt_arg1); 4300 } 4301 if (nlp->nlp_flag & NLP_NPR_2B_DISC) { 4302 spin_lock_irq(&nlp->lock); 4303 nlp->nlp_flag &= ~NLP_NPR_2B_DISC; 4304 spin_unlock_irq(&nlp->lock); 4305 if (vport->num_disc_nodes) { 4306 if (vport->port_state < LPFC_VPORT_READY) { 4307 /* Check if there are more ADISCs to be sent */ 4308 lpfc_more_adisc(vport); 4309 } else { 4310 /* Check if there are more PLOGIs to be sent */ 4311 lpfc_more_plogi(vport); 4312 if (vport->num_disc_nodes == 0) { 4313 spin_lock_irq(shost->host_lock); 4314 vport->fc_flag &= ~FC_NDISC_ACTIVE; 4315 spin_unlock_irq(shost->host_lock); 4316 lpfc_can_disctmo(vport); 4317 lpfc_end_rscn(vport); 4318 } 4319 } 4320 } 4321 } 4322 return; 4323} 4324 4325/** 4326 * lpfc_els_retry_delay - Timer function with a ndlp delayed function timer 4327 * @t: pointer to the timer function associated data (ndlp). 4328 * 4329 * This routine is invoked by the ndlp delayed-function timer to check 4330 * whether there is any pending ELS retry event(s) with the node. If not, it 4331 * simply returns. Otherwise, if there is at least one ELS delayed event, it 4332 * adds the delayed events to the HBA work list and invokes the 4333 * lpfc_worker_wake_up() routine to wake up worker thread to process the 4334 * event. Note that lpfc_nlp_get() is called before posting the event to 4335 * the work list to hold reference count of ndlp so that it guarantees the 4336 * reference to ndlp will still be available when the worker thread gets 4337 * to the event associated with the ndlp. 4338 **/ 4339void 4340lpfc_els_retry_delay(struct timer_list *t) 4341{ 4342 struct lpfc_nodelist *ndlp = from_timer(ndlp, t, nlp_delayfunc); 4343 struct lpfc_vport *vport = ndlp->vport; 4344 struct lpfc_hba *phba = vport->phba; 4345 unsigned long flags; 4346 struct lpfc_work_evt *evtp = &ndlp->els_retry_evt; 4347 4348 spin_lock_irqsave(&phba->hbalock, flags); 4349 if (!list_empty(&evtp->evt_listp)) { 4350 spin_unlock_irqrestore(&phba->hbalock, flags); 4351 return; 4352 } 4353 4354 /* We need to hold the node by incrementing the reference 4355 * count until the queued work is done 4356 */ 4357 evtp->evt_arg1 = lpfc_nlp_get(ndlp); 4358 if (evtp->evt_arg1) { 4359 evtp->evt = LPFC_EVT_ELS_RETRY; 4360 list_add_tail(&evtp->evt_listp, &phba->work_list); 4361 lpfc_worker_wake_up(phba); 4362 } 4363 spin_unlock_irqrestore(&phba->hbalock, flags); 4364 return; 4365} 4366 4367/** 4368 * lpfc_els_retry_delay_handler - Work thread handler for ndlp delayed function 4369 * @ndlp: pointer to a node-list data structure. 4370 * 4371 * This routine is the worker-thread handler for processing the @ndlp delayed 4372 * event(s), posted by the lpfc_els_retry_delay() routine. It simply retrieves 4373 * the last ELS command from the associated ndlp and invokes the proper ELS 4374 * function according to the delayed ELS command to retry the command. 4375 **/ 4376void 4377lpfc_els_retry_delay_handler(struct lpfc_nodelist *ndlp) 4378{ 4379 struct lpfc_vport *vport = ndlp->vport; 4380 uint32_t cmd, retry; 4381 4382 spin_lock_irq(&ndlp->lock); 4383 cmd = ndlp->nlp_last_elscmd; 4384 ndlp->nlp_last_elscmd = 0; 4385 4386 if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) { 4387 spin_unlock_irq(&ndlp->lock); 4388 return; 4389 } 4390 4391 ndlp->nlp_flag &= ~NLP_DELAY_TMO; 4392 spin_unlock_irq(&ndlp->lock); 4393 /* 4394 * If a discovery event readded nlp_delayfunc after timer 4395 * firing and before processing the timer, cancel the 4396 * nlp_delayfunc. 4397 */ 4398 del_timer_sync(&ndlp->nlp_delayfunc); 4399 retry = ndlp->nlp_retry; 4400 ndlp->nlp_retry = 0; 4401 4402 switch (cmd) { 4403 case ELS_CMD_FLOGI: 4404 lpfc_issue_els_flogi(vport, ndlp, retry); 4405 break; 4406 case ELS_CMD_PLOGI: 4407 if (!lpfc_issue_els_plogi(vport, ndlp->nlp_DID, retry)) { 4408 ndlp->nlp_prev_state = ndlp->nlp_state; 4409 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); 4410 } 4411 break; 4412 case ELS_CMD_ADISC: 4413 if (!lpfc_issue_els_adisc(vport, ndlp, retry)) { 4414 ndlp->nlp_prev_state = ndlp->nlp_state; 4415 lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE); 4416 } 4417 break; 4418 case ELS_CMD_PRLI: 4419 case ELS_CMD_NVMEPRLI: 4420 if (!lpfc_issue_els_prli(vport, ndlp, retry)) { 4421 ndlp->nlp_prev_state = ndlp->nlp_state; 4422 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE); 4423 } 4424 break; 4425 case ELS_CMD_LOGO: 4426 if (!lpfc_issue_els_logo(vport, ndlp, retry)) { 4427 ndlp->nlp_prev_state = ndlp->nlp_state; 4428 lpfc_nlp_set_state(vport, ndlp, NLP_STE_LOGO_ISSUE); 4429 } 4430 break; 4431 case ELS_CMD_FDISC: 4432 if (!(vport->fc_flag & FC_VPORT_NEEDS_INIT_VPI)) 4433 lpfc_issue_els_fdisc(vport, ndlp, retry); 4434 break; 4435 } 4436 return; 4437} 4438 4439/** 4440 * lpfc_link_reset - Issue link reset 4441 * @vport: pointer to a virtual N_Port data structure. 4442 * 4443 * This routine performs link reset by sending INIT_LINK mailbox command. 4444 * For SLI-3 adapter, link attention interrupt is enabled before issuing 4445 * INIT_LINK mailbox command. 4446 * 4447 * Return code 4448 * 0 - Link reset initiated successfully 4449 * 1 - Failed to initiate link reset 4450 **/ 4451int 4452lpfc_link_reset(struct lpfc_vport *vport) 4453{ 4454 struct lpfc_hba *phba = vport->phba; 4455 LPFC_MBOXQ_t *mbox; 4456 uint32_t control; 4457 int rc; 4458 4459 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 4460 "2851 Attempt link reset\n"); 4461 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 4462 if (!mbox) { 4463 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 4464 "2852 Failed to allocate mbox memory"); 4465 return 1; 4466 } 4467 4468 /* Enable Link attention interrupts */ 4469 if (phba->sli_rev <= LPFC_SLI_REV3) { 4470 spin_lock_irq(&phba->hbalock); 4471 phba->sli.sli_flag |= LPFC_PROCESS_LA; 4472 control = readl(phba->HCregaddr); 4473 control |= HC_LAINT_ENA; 4474 writel(control, phba->HCregaddr); 4475 readl(phba->HCregaddr); /* flush */ 4476 spin_unlock_irq(&phba->hbalock); 4477 } 4478 4479 lpfc_init_link(phba, mbox, phba->cfg_topology, 4480 phba->cfg_link_speed); 4481 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 4482 mbox->vport = vport; 4483 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 4484 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) { 4485 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 4486 "2853 Failed to issue INIT_LINK " 4487 "mbox command, rc:x%x\n", rc); 4488 mempool_free(mbox, phba->mbox_mem_pool); 4489 return 1; 4490 } 4491 4492 return 0; 4493} 4494 4495/** 4496 * lpfc_els_retry - Make retry decision on an els command iocb 4497 * @phba: pointer to lpfc hba data structure. 4498 * @cmdiocb: pointer to lpfc command iocb data structure. 4499 * @rspiocb: pointer to lpfc response iocb data structure. 4500 * 4501 * This routine makes a retry decision on an ELS command IOCB, which has 4502 * failed. The following ELS IOCBs use this function for retrying the command 4503 * when previously issued command responsed with error status: FLOGI, PLOGI, 4504 * PRLI, ADISC and FDISC. Based on the ELS command type and the 4505 * returned error status, it makes the decision whether a retry shall be 4506 * issued for the command, and whether a retry shall be made immediately or 4507 * delayed. In the former case, the corresponding ELS command issuing-function 4508 * is called to retry the command. In the later case, the ELS command shall 4509 * be posted to the ndlp delayed event and delayed function timer set to the 4510 * ndlp for the delayed command issusing. 4511 * 4512 * Return code 4513 * 0 - No retry of els command is made 4514 * 1 - Immediate or delayed retry of els command is made 4515 **/ 4516static int 4517lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 4518 struct lpfc_iocbq *rspiocb) 4519{ 4520 struct lpfc_vport *vport = cmdiocb->vport; 4521 union lpfc_wqe128 *irsp = &rspiocb->wqe; 4522 struct lpfc_nodelist *ndlp = cmdiocb->ndlp; 4523 struct lpfc_dmabuf *pcmd = cmdiocb->cmd_dmabuf; 4524 uint32_t *elscmd; 4525 struct ls_rjt stat; 4526 int retry = 0, maxretry = lpfc_max_els_tries, delay = 0; 4527 int logerr = 0; 4528 uint32_t cmd = 0; 4529 uint32_t did; 4530 int link_reset = 0, rc; 4531 u32 ulp_status = get_job_ulpstatus(phba, rspiocb); 4532 u32 ulp_word4 = get_job_word4(phba, rspiocb); 4533 4534 4535 /* Note: cmd_dmabuf may be 0 for internal driver abort 4536 * of delays ELS command. 4537 */ 4538 4539 if (pcmd && pcmd->virt) { 4540 elscmd = (uint32_t *) (pcmd->virt); 4541 cmd = *elscmd++; 4542 } 4543 4544 if (ndlp) 4545 did = ndlp->nlp_DID; 4546 else { 4547 /* We should only hit this case for retrying PLOGI */ 4548 did = get_job_els_rsp64_did(phba, rspiocb); 4549 ndlp = lpfc_findnode_did(vport, did); 4550 if (!ndlp && (cmd != ELS_CMD_PLOGI)) 4551 return 0; 4552 } 4553 4554 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 4555 "Retry ELS: wd7:x%x wd4:x%x did:x%x", 4556 *(((uint32_t *)irsp) + 7), ulp_word4, did); 4557 4558 switch (ulp_status) { 4559 case IOSTAT_FCP_RSP_ERROR: 4560 break; 4561 case IOSTAT_REMOTE_STOP: 4562 if (phba->sli_rev == LPFC_SLI_REV4) { 4563 /* This IO was aborted by the target, we don't 4564 * know the rxid and because we did not send the 4565 * ABTS we cannot generate and RRQ. 4566 */ 4567 lpfc_set_rrq_active(phba, ndlp, 4568 cmdiocb->sli4_lxritag, 0, 0); 4569 } 4570 break; 4571 case IOSTAT_LOCAL_REJECT: 4572 switch ((ulp_word4 & IOERR_PARAM_MASK)) { 4573 case IOERR_LOOP_OPEN_FAILURE: 4574 if (cmd == ELS_CMD_FLOGI) { 4575 if (PCI_DEVICE_ID_HORNET == 4576 phba->pcidev->device) { 4577 phba->fc_topology = LPFC_TOPOLOGY_LOOP; 4578 phba->pport->fc_myDID = 0; 4579 phba->alpa_map[0] = 0; 4580 phba->alpa_map[1] = 0; 4581 } 4582 } 4583 if (cmd == ELS_CMD_PLOGI && cmdiocb->retry == 0) 4584 delay = 1000; 4585 retry = 1; 4586 break; 4587 4588 case IOERR_ILLEGAL_COMMAND: 4589 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 4590 "0124 Retry illegal cmd x%x " 4591 "retry:x%x delay:x%x\n", 4592 cmd, cmdiocb->retry, delay); 4593 retry = 1; 4594 /* All command's retry policy */ 4595 maxretry = 8; 4596 if (cmdiocb->retry > 2) 4597 delay = 1000; 4598 break; 4599 4600 case IOERR_NO_RESOURCES: 4601 logerr = 1; /* HBA out of resources */ 4602 retry = 1; 4603 if (cmdiocb->retry > 100) 4604 delay = 100; 4605 maxretry = 250; 4606 break; 4607 4608 case IOERR_ILLEGAL_FRAME: 4609 delay = 100; 4610 retry = 1; 4611 break; 4612 4613 case IOERR_INVALID_RPI: 4614 if (cmd == ELS_CMD_PLOGI && 4615 did == NameServer_DID) { 4616 /* Continue forever if plogi to */ 4617 /* the nameserver fails */ 4618 maxretry = 0; 4619 delay = 100; 4620 } 4621 retry = 1; 4622 break; 4623 4624 case IOERR_SEQUENCE_TIMEOUT: 4625 if (cmd == ELS_CMD_PLOGI && 4626 did == NameServer_DID && 4627 (cmdiocb->retry + 1) == maxretry) { 4628 /* Reset the Link */ 4629 link_reset = 1; 4630 break; 4631 } 4632 retry = 1; 4633 delay = 100; 4634 break; 4635 case IOERR_SLI_ABORTED: 4636 /* Retry ELS PLOGI command? 4637 * Possibly the rport just wasn't ready. 4638 */ 4639 if (cmd == ELS_CMD_PLOGI) { 4640 /* No retry if state change */ 4641 if (ndlp && 4642 ndlp->nlp_state != NLP_STE_PLOGI_ISSUE) 4643 goto out_retry; 4644 retry = 1; 4645 maxretry = 2; 4646 } 4647 break; 4648 } 4649 break; 4650 4651 case IOSTAT_NPORT_RJT: 4652 case IOSTAT_FABRIC_RJT: 4653 if (ulp_word4 & RJT_UNAVAIL_TEMP) { 4654 retry = 1; 4655 break; 4656 } 4657 break; 4658 4659 case IOSTAT_NPORT_BSY: 4660 case IOSTAT_FABRIC_BSY: 4661 logerr = 1; /* Fabric / Remote NPort out of resources */ 4662 retry = 1; 4663 break; 4664 4665 case IOSTAT_LS_RJT: 4666 stat.un.ls_rjt_error_be = cpu_to_be32(ulp_word4); 4667 /* Added for Vendor specifc support 4668 * Just keep retrying for these Rsn / Exp codes 4669 */ 4670 if ((vport->fc_flag & FC_PT2PT) && 4671 cmd == ELS_CMD_NVMEPRLI) { 4672 switch (stat.un.b.lsRjtRsnCode) { 4673 case LSRJT_UNABLE_TPC: 4674 case LSRJT_INVALID_CMD: 4675 case LSRJT_LOGICAL_ERR: 4676 case LSRJT_CMD_UNSUPPORTED: 4677 lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS, 4678 "0168 NVME PRLI LS_RJT " 4679 "reason %x port doesn't " 4680 "support NVME, disabling NVME\n", 4681 stat.un.b.lsRjtRsnCode); 4682 retry = 0; 4683 vport->fc_flag |= FC_PT2PT_NO_NVME; 4684 goto out_retry; 4685 } 4686 } 4687 switch (stat.un.b.lsRjtRsnCode) { 4688 case LSRJT_UNABLE_TPC: 4689 /* The driver has a VALID PLOGI but the rport has 4690 * rejected the PRLI - can't do it now. Delay 4691 * for 1 second and try again. 4692 * 4693 * However, if explanation is REQ_UNSUPPORTED there's 4694 * no point to retry PRLI. 4695 */ 4696 if ((cmd == ELS_CMD_PRLI || cmd == ELS_CMD_NVMEPRLI) && 4697 stat.un.b.lsRjtRsnCodeExp != 4698 LSEXP_REQ_UNSUPPORTED) { 4699 delay = 1000; 4700 maxretry = lpfc_max_els_tries + 1; 4701 retry = 1; 4702 break; 4703 } 4704 4705 /* Legacy bug fix code for targets with PLOGI delays. */ 4706 if (stat.un.b.lsRjtRsnCodeExp == 4707 LSEXP_CMD_IN_PROGRESS) { 4708 if (cmd == ELS_CMD_PLOGI) { 4709 delay = 1000; 4710 maxretry = 48; 4711 } 4712 retry = 1; 4713 break; 4714 } 4715 if (stat.un.b.lsRjtRsnCodeExp == 4716 LSEXP_CANT_GIVE_DATA) { 4717 if (cmd == ELS_CMD_PLOGI) { 4718 delay = 1000; 4719 maxretry = 48; 4720 } 4721 retry = 1; 4722 break; 4723 } 4724 if (cmd == ELS_CMD_PLOGI) { 4725 delay = 1000; 4726 maxretry = lpfc_max_els_tries + 1; 4727 retry = 1; 4728 break; 4729 } 4730 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && 4731 (cmd == ELS_CMD_FDISC) && 4732 (stat.un.b.lsRjtRsnCodeExp == LSEXP_OUT_OF_RESOURCE)){ 4733 lpfc_printf_vlog(vport, KERN_ERR, 4734 LOG_TRACE_EVENT, 4735 "0125 FDISC Failed (x%x). " 4736 "Fabric out of resources\n", 4737 stat.un.lsRjtError); 4738 lpfc_vport_set_state(vport, 4739 FC_VPORT_NO_FABRIC_RSCS); 4740 } 4741 break; 4742 4743 case LSRJT_LOGICAL_BSY: 4744 if ((cmd == ELS_CMD_PLOGI) || 4745 (cmd == ELS_CMD_PRLI) || 4746 (cmd == ELS_CMD_NVMEPRLI)) { 4747 delay = 1000; 4748 maxretry = 48; 4749 } else if (cmd == ELS_CMD_FDISC) { 4750 /* FDISC retry policy */ 4751 maxretry = 48; 4752 if (cmdiocb->retry >= 32) 4753 delay = 1000; 4754 } 4755 retry = 1; 4756 break; 4757 4758 case LSRJT_LOGICAL_ERR: 4759 /* There are some cases where switches return this 4760 * error when they are not ready and should be returning 4761 * Logical Busy. We should delay every time. 4762 */ 4763 if (cmd == ELS_CMD_FDISC && 4764 stat.un.b.lsRjtRsnCodeExp == LSEXP_PORT_LOGIN_REQ) { 4765 maxretry = 3; 4766 delay = 1000; 4767 retry = 1; 4768 } else if (cmd == ELS_CMD_FLOGI && 4769 stat.un.b.lsRjtRsnCodeExp == 4770 LSEXP_NOTHING_MORE) { 4771 vport->fc_sparam.cmn.bbRcvSizeMsb &= 0xf; 4772 retry = 1; 4773 lpfc_printf_vlog(vport, KERN_ERR, 4774 LOG_TRACE_EVENT, 4775 "0820 FLOGI Failed (x%x). " 4776 "BBCredit Not Supported\n", 4777 stat.un.lsRjtError); 4778 } 4779 break; 4780 4781 case LSRJT_PROTOCOL_ERR: 4782 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && 4783 (cmd == ELS_CMD_FDISC) && 4784 ((stat.un.b.lsRjtRsnCodeExp == LSEXP_INVALID_PNAME) || 4785 (stat.un.b.lsRjtRsnCodeExp == LSEXP_INVALID_NPORT_ID)) 4786 ) { 4787 lpfc_printf_vlog(vport, KERN_ERR, 4788 LOG_TRACE_EVENT, 4789 "0122 FDISC Failed (x%x). " 4790 "Fabric Detected Bad WWN\n", 4791 stat.un.lsRjtError); 4792 lpfc_vport_set_state(vport, 4793 FC_VPORT_FABRIC_REJ_WWN); 4794 } 4795 break; 4796 case LSRJT_VENDOR_UNIQUE: 4797 if ((stat.un.b.vendorUnique == 0x45) && 4798 (cmd == ELS_CMD_FLOGI)) { 4799 goto out_retry; 4800 } 4801 break; 4802 case LSRJT_CMD_UNSUPPORTED: 4803 /* lpfc nvmet returns this type of LS_RJT when it 4804 * receives an FCP PRLI because lpfc nvmet only 4805 * support NVME. ELS request is terminated for FCP4 4806 * on this rport. 4807 */ 4808 if (stat.un.b.lsRjtRsnCodeExp == 4809 LSEXP_REQ_UNSUPPORTED) { 4810 if (cmd == ELS_CMD_PRLI) { 4811 spin_lock_irq(&ndlp->lock); 4812 ndlp->nlp_flag |= NLP_FCP_PRLI_RJT; 4813 spin_unlock_irq(&ndlp->lock); 4814 retry = 0; 4815 goto out_retry; 4816 } 4817 } 4818 break; 4819 } 4820 break; 4821 4822 case IOSTAT_INTERMED_RSP: 4823 case IOSTAT_BA_RJT: 4824 break; 4825 4826 default: 4827 break; 4828 } 4829 4830 if (link_reset) { 4831 rc = lpfc_link_reset(vport); 4832 if (rc) { 4833 /* Do not give up. Retry PLOGI one more time and attempt 4834 * link reset if PLOGI fails again. 4835 */ 4836 retry = 1; 4837 delay = 100; 4838 goto out_retry; 4839 } 4840 return 1; 4841 } 4842 4843 if (did == FDMI_DID) 4844 retry = 1; 4845 4846 if ((cmd == ELS_CMD_FLOGI) && 4847 (phba->fc_topology != LPFC_TOPOLOGY_LOOP) && 4848 !lpfc_error_lost_link(ulp_status, ulp_word4)) { 4849 /* FLOGI retry policy */ 4850 retry = 1; 4851 /* retry FLOGI forever */ 4852 if (phba->link_flag != LS_LOOPBACK_MODE) 4853 maxretry = 0; 4854 else 4855 maxretry = 2; 4856 4857 if (cmdiocb->retry >= 100) 4858 delay = 5000; 4859 else if (cmdiocb->retry >= 32) 4860 delay = 1000; 4861 } else if ((cmd == ELS_CMD_FDISC) && 4862 !lpfc_error_lost_link(ulp_status, ulp_word4)) { 4863 /* retry FDISCs every second up to devloss */ 4864 retry = 1; 4865 maxretry = vport->cfg_devloss_tmo; 4866 delay = 1000; 4867 } 4868 4869 cmdiocb->retry++; 4870 if (maxretry && (cmdiocb->retry >= maxretry)) { 4871 phba->fc_stat.elsRetryExceeded++; 4872 retry = 0; 4873 } 4874 4875 if ((vport->load_flag & FC_UNLOADING) != 0) 4876 retry = 0; 4877 4878out_retry: 4879 if (retry) { 4880 if ((cmd == ELS_CMD_PLOGI) || (cmd == ELS_CMD_FDISC)) { 4881 /* Stop retrying PLOGI and FDISC if in FCF discovery */ 4882 if (phba->fcf.fcf_flag & FCF_DISCOVERY) { 4883 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 4884 "2849 Stop retry ELS command " 4885 "x%x to remote NPORT x%x, " 4886 "Data: x%x x%x\n", cmd, did, 4887 cmdiocb->retry, delay); 4888 return 0; 4889 } 4890 } 4891 4892 /* Retry ELS command <elsCmd> to remote NPORT <did> */ 4893 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 4894 "0107 Retry ELS command x%x to remote " 4895 "NPORT x%x Data: x%x x%x\n", 4896 cmd, did, cmdiocb->retry, delay); 4897 4898 if (((cmd == ELS_CMD_PLOGI) || (cmd == ELS_CMD_ADISC)) && 4899 ((ulp_status != IOSTAT_LOCAL_REJECT) || 4900 ((ulp_word4 & IOERR_PARAM_MASK) != 4901 IOERR_NO_RESOURCES))) { 4902 /* Don't reset timer for no resources */ 4903 4904 /* If discovery / RSCN timer is running, reset it */ 4905 if (timer_pending(&vport->fc_disctmo) || 4906 (vport->fc_flag & FC_RSCN_MODE)) 4907 lpfc_set_disctmo(vport); 4908 } 4909 4910 phba->fc_stat.elsXmitRetry++; 4911 if (ndlp && delay) { 4912 phba->fc_stat.elsDelayRetry++; 4913 ndlp->nlp_retry = cmdiocb->retry; 4914 4915 /* delay is specified in milliseconds */ 4916 mod_timer(&ndlp->nlp_delayfunc, 4917 jiffies + msecs_to_jiffies(delay)); 4918 spin_lock_irq(&ndlp->lock); 4919 ndlp->nlp_flag |= NLP_DELAY_TMO; 4920 spin_unlock_irq(&ndlp->lock); 4921 4922 ndlp->nlp_prev_state = ndlp->nlp_state; 4923 if ((cmd == ELS_CMD_PRLI) || 4924 (cmd == ELS_CMD_NVMEPRLI)) 4925 lpfc_nlp_set_state(vport, ndlp, 4926 NLP_STE_PRLI_ISSUE); 4927 else if (cmd != ELS_CMD_ADISC) 4928 lpfc_nlp_set_state(vport, ndlp, 4929 NLP_STE_NPR_NODE); 4930 ndlp->nlp_last_elscmd = cmd; 4931 4932 return 1; 4933 } 4934 switch (cmd) { 4935 case ELS_CMD_FLOGI: 4936 lpfc_issue_els_flogi(vport, ndlp, cmdiocb->retry); 4937 return 1; 4938 case ELS_CMD_FDISC: 4939 lpfc_issue_els_fdisc(vport, ndlp, cmdiocb->retry); 4940 return 1; 4941 case ELS_CMD_PLOGI: 4942 if (ndlp) { 4943 ndlp->nlp_prev_state = ndlp->nlp_state; 4944 lpfc_nlp_set_state(vport, ndlp, 4945 NLP_STE_PLOGI_ISSUE); 4946 } 4947 lpfc_issue_els_plogi(vport, did, cmdiocb->retry); 4948 return 1; 4949 case ELS_CMD_ADISC: 4950 ndlp->nlp_prev_state = ndlp->nlp_state; 4951 lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE); 4952 lpfc_issue_els_adisc(vport, ndlp, cmdiocb->retry); 4953 return 1; 4954 case ELS_CMD_PRLI: 4955 case ELS_CMD_NVMEPRLI: 4956 ndlp->nlp_prev_state = ndlp->nlp_state; 4957 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE); 4958 lpfc_issue_els_prli(vport, ndlp, cmdiocb->retry); 4959 return 1; 4960 case ELS_CMD_LOGO: 4961 ndlp->nlp_prev_state = ndlp->nlp_state; 4962 lpfc_nlp_set_state(vport, ndlp, NLP_STE_LOGO_ISSUE); 4963 lpfc_issue_els_logo(vport, ndlp, cmdiocb->retry); 4964 return 1; 4965 } 4966 } 4967 /* No retry ELS command <elsCmd> to remote NPORT <did> */ 4968 if (logerr) { 4969 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 4970 "0137 No retry ELS command x%x to remote " 4971 "NPORT x%x: Out of Resources: Error:x%x/%x\n", 4972 cmd, did, ulp_status, 4973 ulp_word4); 4974 } 4975 else { 4976 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 4977 "0108 No retry ELS command x%x to remote " 4978 "NPORT x%x Retried:%d Error:x%x/%x\n", 4979 cmd, did, cmdiocb->retry, ulp_status, 4980 ulp_word4); 4981 } 4982 return 0; 4983} 4984 4985/** 4986 * lpfc_els_free_data - Free lpfc dma buffer and data structure with an iocb 4987 * @phba: pointer to lpfc hba data structure. 4988 * @buf_ptr1: pointer to the lpfc DMA buffer data structure. 4989 * 4990 * This routine releases the lpfc DMA (Direct Memory Access) buffer(s) 4991 * associated with a command IOCB back to the lpfc DMA buffer pool. It first 4992 * checks to see whether there is a lpfc DMA buffer associated with the 4993 * response of the command IOCB. If so, it will be released before releasing 4994 * the lpfc DMA buffer associated with the IOCB itself. 4995 * 4996 * Return code 4997 * 0 - Successfully released lpfc DMA buffer (currently, always return 0) 4998 **/ 4999static int 5000lpfc_els_free_data(struct lpfc_hba *phba, struct lpfc_dmabuf *buf_ptr1) 5001{ 5002 struct lpfc_dmabuf *buf_ptr; 5003 5004 /* Free the response before processing the command. */ 5005 if (!list_empty(&buf_ptr1->list)) { 5006 list_remove_head(&buf_ptr1->list, buf_ptr, 5007 struct lpfc_dmabuf, 5008 list); 5009 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys); 5010 kfree(buf_ptr); 5011 } 5012 lpfc_mbuf_free(phba, buf_ptr1->virt, buf_ptr1->phys); 5013 kfree(buf_ptr1); 5014 return 0; 5015} 5016 5017/** 5018 * lpfc_els_free_bpl - Free lpfc dma buffer and data structure with bpl 5019 * @phba: pointer to lpfc hba data structure. 5020 * @buf_ptr: pointer to the lpfc dma buffer data structure. 5021 * 5022 * This routine releases the lpfc Direct Memory Access (DMA) buffer 5023 * associated with a Buffer Pointer List (BPL) back to the lpfc DMA buffer 5024 * pool. 5025 * 5026 * Return code 5027 * 0 - Successfully released lpfc DMA buffer (currently, always return 0) 5028 **/ 5029static int 5030lpfc_els_free_bpl(struct lpfc_hba *phba, struct lpfc_dmabuf *buf_ptr) 5031{ 5032 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys); 5033 kfree(buf_ptr); 5034 return 0; 5035} 5036 5037/** 5038 * lpfc_els_free_iocb - Free a command iocb and its associated resources 5039 * @phba: pointer to lpfc hba data structure. 5040 * @elsiocb: pointer to lpfc els command iocb data structure. 5041 * 5042 * This routine frees a command IOCB and its associated resources. The 5043 * command IOCB data structure contains the reference to various associated 5044 * resources, these fields must be set to NULL if the associated reference 5045 * not present: 5046 * cmd_dmabuf - reference to cmd. 5047 * cmd_dmabuf->next - reference to rsp 5048 * rsp_dmabuf - unused 5049 * bpl_dmabuf - reference to bpl 5050 * 5051 * It first properly decrements the reference count held on ndlp for the 5052 * IOCB completion callback function. If LPFC_DELAY_MEM_FREE flag is not 5053 * set, it invokes the lpfc_els_free_data() routine to release the Direct 5054 * Memory Access (DMA) buffers associated with the IOCB. Otherwise, it 5055 * adds the DMA buffer the @phba data structure for the delayed release. 5056 * If reference to the Buffer Pointer List (BPL) is present, the 5057 * lpfc_els_free_bpl() routine is invoked to release the DMA memory 5058 * associated with BPL. Finally, the lpfc_sli_release_iocbq() routine is 5059 * invoked to release the IOCB data structure back to @phba IOCBQ list. 5060 * 5061 * Return code 5062 * 0 - Success (currently, always return 0) 5063 **/ 5064int 5065lpfc_els_free_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *elsiocb) 5066{ 5067 struct lpfc_dmabuf *buf_ptr, *buf_ptr1; 5068 5069 /* The I/O iocb is complete. Clear the node and first dmbuf */ 5070 elsiocb->ndlp = NULL; 5071 5072 /* cmd_dmabuf = cmd, cmd_dmabuf->next = rsp, bpl_dmabuf = bpl */ 5073 if (elsiocb->cmd_dmabuf) { 5074 if (elsiocb->cmd_flag & LPFC_DELAY_MEM_FREE) { 5075 /* Firmware could still be in progress of DMAing 5076 * payload, so don't free data buffer till after 5077 * a hbeat. 5078 */ 5079 elsiocb->cmd_flag &= ~LPFC_DELAY_MEM_FREE; 5080 buf_ptr = elsiocb->cmd_dmabuf; 5081 elsiocb->cmd_dmabuf = NULL; 5082 if (buf_ptr) { 5083 buf_ptr1 = NULL; 5084 spin_lock_irq(&phba->hbalock); 5085 if (!list_empty(&buf_ptr->list)) { 5086 list_remove_head(&buf_ptr->list, 5087 buf_ptr1, struct lpfc_dmabuf, 5088 list); 5089 INIT_LIST_HEAD(&buf_ptr1->list); 5090 list_add_tail(&buf_ptr1->list, 5091 &phba->elsbuf); 5092 phba->elsbuf_cnt++; 5093 } 5094 INIT_LIST_HEAD(&buf_ptr->list); 5095 list_add_tail(&buf_ptr->list, &phba->elsbuf); 5096 phba->elsbuf_cnt++; 5097 spin_unlock_irq(&phba->hbalock); 5098 } 5099 } else { 5100 buf_ptr1 = elsiocb->cmd_dmabuf; 5101 lpfc_els_free_data(phba, buf_ptr1); 5102 elsiocb->cmd_dmabuf = NULL; 5103 } 5104 } 5105 5106 if (elsiocb->bpl_dmabuf) { 5107 buf_ptr = elsiocb->bpl_dmabuf; 5108 lpfc_els_free_bpl(phba, buf_ptr); 5109 elsiocb->bpl_dmabuf = NULL; 5110 } 5111 lpfc_sli_release_iocbq(phba, elsiocb); 5112 return 0; 5113} 5114 5115/** 5116 * lpfc_cmpl_els_logo_acc - Completion callback function to logo acc response 5117 * @phba: pointer to lpfc hba data structure. 5118 * @cmdiocb: pointer to lpfc command iocb data structure. 5119 * @rspiocb: pointer to lpfc response iocb data structure. 5120 * 5121 * This routine is the completion callback function to the Logout (LOGO) 5122 * Accept (ACC) Response ELS command. This routine is invoked to indicate 5123 * the completion of the LOGO process. It invokes the lpfc_nlp_not_used() to 5124 * release the ndlp if it has the last reference remaining (reference count 5125 * is 1). If succeeded (meaning ndlp released), it sets the iocb ndlp 5126 * field to NULL to inform the following lpfc_els_free_iocb() routine no 5127 * ndlp reference count needs to be decremented. Otherwise, the ndlp 5128 * reference use-count shall be decremented by the lpfc_els_free_iocb() 5129 * routine. Finally, the lpfc_els_free_iocb() is invoked to release the 5130 * IOCB data structure. 5131 **/ 5132static void 5133lpfc_cmpl_els_logo_acc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 5134 struct lpfc_iocbq *rspiocb) 5135{ 5136 struct lpfc_nodelist *ndlp = cmdiocb->ndlp; 5137 struct lpfc_vport *vport = cmdiocb->vport; 5138 u32 ulp_status, ulp_word4; 5139 5140 ulp_status = get_job_ulpstatus(phba, rspiocb); 5141 ulp_word4 = get_job_word4(phba, rspiocb); 5142 5143 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 5144 "ACC LOGO cmpl: status:x%x/x%x did:x%x", 5145 ulp_status, ulp_word4, ndlp->nlp_DID); 5146 /* ACC to LOGO completes to NPort <nlp_DID> */ 5147 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 5148 "0109 ACC to LOGO completes to NPort x%x refcnt %d " 5149 "Data: x%x x%x x%x\n", 5150 ndlp->nlp_DID, kref_read(&ndlp->kref), ndlp->nlp_flag, 5151 ndlp->nlp_state, ndlp->nlp_rpi); 5152 5153 /* This clause allows the LOGO ACC to complete and free resources 5154 * for the Fabric Domain Controller. It does deliberately skip 5155 * the unreg_rpi and release rpi because some fabrics send RDP 5156 * requests after logging out from the initiator. 5157 */ 5158 if (ndlp->nlp_type & NLP_FABRIC && 5159 ((ndlp->nlp_DID & WELL_KNOWN_DID_MASK) != WELL_KNOWN_DID_MASK)) 5160 goto out; 5161 5162 if (ndlp->nlp_state == NLP_STE_NPR_NODE) { 5163 /* If PLOGI is being retried, PLOGI completion will cleanup the 5164 * node. The NLP_NPR_2B_DISC flag needs to be retained to make 5165 * progress on nodes discovered from last RSCN. 5166 */ 5167 if ((ndlp->nlp_flag & NLP_DELAY_TMO) && 5168 (ndlp->nlp_last_elscmd == ELS_CMD_PLOGI)) 5169 goto out; 5170 5171 /* NPort Recovery mode or node is just allocated */ 5172 if (!lpfc_nlp_not_used(ndlp)) { 5173 /* A LOGO is completing and the node is in NPR state. 5174 * Just unregister the RPI because the node is still 5175 * required. 5176 */ 5177 lpfc_unreg_rpi(vport, ndlp); 5178 } else { 5179 /* Indicate the node has already released, should 5180 * not reference to it from within lpfc_els_free_iocb. 5181 */ 5182 cmdiocb->ndlp = NULL; 5183 } 5184 } 5185 out: 5186 /* 5187 * The driver received a LOGO from the rport and has ACK'd it. 5188 * At this point, the driver is done so release the IOCB 5189 */ 5190 lpfc_els_free_iocb(phba, cmdiocb); 5191 lpfc_nlp_put(ndlp); 5192} 5193 5194/** 5195 * lpfc_mbx_cmpl_dflt_rpi - Completion callbk func for unreg dflt rpi mbox cmd 5196 * @phba: pointer to lpfc hba data structure. 5197 * @pmb: pointer to the driver internal queue element for mailbox command. 5198 * 5199 * This routine is the completion callback function for unregister default 5200 * RPI (Remote Port Index) mailbox command to the @phba. It simply releases 5201 * the associated lpfc Direct Memory Access (DMA) buffer back to the pool and 5202 * decrements the ndlp reference count held for this completion callback 5203 * function. After that, it invokes the lpfc_nlp_not_used() to check 5204 * whether there is only one reference left on the ndlp. If so, it will 5205 * perform one more decrement and trigger the release of the ndlp. 5206 **/ 5207void 5208lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 5209{ 5210 struct lpfc_nodelist *ndlp = pmb->ctx_ndlp; 5211 u32 mbx_flag = pmb->mbox_flag; 5212 u32 mbx_cmd = pmb->u.mb.mbxCommand; 5213 5214 if (ndlp) { 5215 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE, 5216 "0006 rpi x%x DID:%x flg:%x %d x%px " 5217 "mbx_cmd x%x mbx_flag x%x x%px\n", 5218 ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag, 5219 kref_read(&ndlp->kref), ndlp, mbx_cmd, 5220 mbx_flag, pmb); 5221 5222 /* This ends the default/temporary RPI cleanup logic for this 5223 * ndlp and the node and rpi needs to be released. Free the rpi 5224 * first on an UNREG_LOGIN and then release the final 5225 * references. 5226 */ 5227 spin_lock_irq(&ndlp->lock); 5228 ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND; 5229 if (mbx_cmd == MBX_UNREG_LOGIN) 5230 ndlp->nlp_flag &= ~NLP_UNREG_INP; 5231 spin_unlock_irq(&ndlp->lock); 5232 lpfc_nlp_put(ndlp); 5233 lpfc_drop_node(ndlp->vport, ndlp); 5234 } 5235 5236 lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED); 5237} 5238 5239/** 5240 * lpfc_cmpl_els_rsp - Completion callback function for els response iocb cmd 5241 * @phba: pointer to lpfc hba data structure. 5242 * @cmdiocb: pointer to lpfc command iocb data structure. 5243 * @rspiocb: pointer to lpfc response iocb data structure. 5244 * 5245 * This routine is the completion callback function for ELS Response IOCB 5246 * command. In normal case, this callback function just properly sets the 5247 * nlp_flag bitmap in the ndlp data structure, if the mbox command reference 5248 * field in the command IOCB is not NULL, the referred mailbox command will 5249 * be send out, and then invokes the lpfc_els_free_iocb() routine to release 5250 * the IOCB. 5251 **/ 5252static void 5253lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 5254 struct lpfc_iocbq *rspiocb) 5255{ 5256 struct lpfc_nodelist *ndlp = cmdiocb->ndlp; 5257 struct lpfc_vport *vport = ndlp ? ndlp->vport : NULL; 5258 struct Scsi_Host *shost = vport ? lpfc_shost_from_vport(vport) : NULL; 5259 IOCB_t *irsp; 5260 LPFC_MBOXQ_t *mbox = NULL; 5261 u32 ulp_status, ulp_word4, tmo, did, iotag; 5262 5263 if (!vport) { 5264 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 5265 "3177 ELS response failed\n"); 5266 goto out; 5267 } 5268 if (cmdiocb->context_un.mbox) 5269 mbox = cmdiocb->context_un.mbox; 5270 5271 ulp_status = get_job_ulpstatus(phba, rspiocb); 5272 ulp_word4 = get_job_word4(phba, rspiocb); 5273 did = get_job_els_rsp64_did(phba, cmdiocb); 5274 5275 if (phba->sli_rev == LPFC_SLI_REV4) { 5276 tmo = get_wqe_tmo(cmdiocb); 5277 iotag = get_wqe_reqtag(cmdiocb); 5278 } else { 5279 irsp = &rspiocb->iocb; 5280 tmo = irsp->ulpTimeout; 5281 iotag = irsp->ulpIoTag; 5282 } 5283 5284 /* Check to see if link went down during discovery */ 5285 if (!ndlp || lpfc_els_chk_latt(vport)) { 5286 if (mbox) 5287 lpfc_mbox_rsrc_cleanup(phba, mbox, MBOX_THD_UNLOCKED); 5288 goto out; 5289 } 5290 5291 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 5292 "ELS rsp cmpl: status:x%x/x%x did:x%x", 5293 ulp_status, ulp_word4, did); 5294 /* ELS response tag <ulpIoTag> completes */ 5295 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 5296 "0110 ELS response tag x%x completes " 5297 "Data: x%x x%x x%x x%x x%x x%x x%x x%x %p %p\n", 5298 iotag, ulp_status, ulp_word4, tmo, 5299 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 5300 ndlp->nlp_rpi, kref_read(&ndlp->kref), mbox, ndlp); 5301 if (mbox) { 5302 if (ulp_status == 0 5303 && (ndlp->nlp_flag & NLP_ACC_REGLOGIN)) { 5304 if (!lpfc_unreg_rpi(vport, ndlp) && 5305 (!(vport->fc_flag & FC_PT2PT))) { 5306 if (ndlp->nlp_state == NLP_STE_PLOGI_ISSUE || 5307 ndlp->nlp_state == 5308 NLP_STE_REG_LOGIN_ISSUE) { 5309 lpfc_printf_vlog(vport, KERN_INFO, 5310 LOG_DISCOVERY, 5311 "0314 PLOGI recov " 5312 "DID x%x " 5313 "Data: x%x x%x x%x\n", 5314 ndlp->nlp_DID, 5315 ndlp->nlp_state, 5316 ndlp->nlp_rpi, 5317 ndlp->nlp_flag); 5318 goto out_free_mbox; 5319 } 5320 } 5321 5322 /* Increment reference count to ndlp to hold the 5323 * reference to ndlp for the callback function. 5324 */ 5325 mbox->ctx_ndlp = lpfc_nlp_get(ndlp); 5326 if (!mbox->ctx_ndlp) 5327 goto out_free_mbox; 5328 5329 mbox->vport = vport; 5330 if (ndlp->nlp_flag & NLP_RM_DFLT_RPI) { 5331 mbox->mbox_flag |= LPFC_MBX_IMED_UNREG; 5332 mbox->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi; 5333 } 5334 else { 5335 mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login; 5336 ndlp->nlp_prev_state = ndlp->nlp_state; 5337 lpfc_nlp_set_state(vport, ndlp, 5338 NLP_STE_REG_LOGIN_ISSUE); 5339 } 5340 5341 ndlp->nlp_flag |= NLP_REG_LOGIN_SEND; 5342 if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) 5343 != MBX_NOT_FINISHED) 5344 goto out; 5345 5346 /* Decrement the ndlp reference count we 5347 * set for this failed mailbox command. 5348 */ 5349 lpfc_nlp_put(ndlp); 5350 ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND; 5351 5352 /* ELS rsp: Cannot issue reg_login for <NPortid> */ 5353 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 5354 "0138 ELS rsp: Cannot issue reg_login for x%x " 5355 "Data: x%x x%x x%x\n", 5356 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 5357 ndlp->nlp_rpi); 5358 } 5359out_free_mbox: 5360 lpfc_mbox_rsrc_cleanup(phba, mbox, MBOX_THD_UNLOCKED); 5361 } 5362out: 5363 if (ndlp && shost) { 5364 spin_lock_irq(&ndlp->lock); 5365 if (mbox) 5366 ndlp->nlp_flag &= ~NLP_ACC_REGLOGIN; 5367 ndlp->nlp_flag &= ~NLP_RM_DFLT_RPI; 5368 spin_unlock_irq(&ndlp->lock); 5369 } 5370 5371 /* An SLI4 NPIV instance wants to drop the node at this point under 5372 * these conditions and release the RPI. 5373 */ 5374 if (phba->sli_rev == LPFC_SLI_REV4 && 5375 (vport && vport->port_type == LPFC_NPIV_PORT) && 5376 !(ndlp->fc4_xpt_flags & SCSI_XPT_REGD) && 5377 ndlp->nlp_flag & NLP_RELEASE_RPI) { 5378 if (ndlp->nlp_state != NLP_STE_PLOGI_ISSUE && 5379 ndlp->nlp_state != NLP_STE_REG_LOGIN_ISSUE) { 5380 lpfc_sli4_free_rpi(phba, ndlp->nlp_rpi); 5381 spin_lock_irq(&ndlp->lock); 5382 ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR; 5383 ndlp->nlp_flag &= ~NLP_RELEASE_RPI; 5384 spin_unlock_irq(&ndlp->lock); 5385 lpfc_drop_node(vport, ndlp); 5386 } 5387 } 5388 5389 /* Release the originating I/O reference. */ 5390 lpfc_els_free_iocb(phba, cmdiocb); 5391 lpfc_nlp_put(ndlp); 5392 return; 5393} 5394 5395/** 5396 * lpfc_els_rsp_acc - Prepare and issue an acc response iocb command 5397 * @vport: pointer to a host virtual N_Port data structure. 5398 * @flag: the els command code to be accepted. 5399 * @oldiocb: pointer to the original lpfc command iocb data structure. 5400 * @ndlp: pointer to a node-list data structure. 5401 * @mbox: pointer to the driver internal queue element for mailbox command. 5402 * 5403 * This routine prepares and issues an Accept (ACC) response IOCB 5404 * command. It uses the @flag to properly set up the IOCB field for the 5405 * specific ACC response command to be issued and invokes the 5406 * lpfc_sli_issue_iocb() routine to send out ACC response IOCB. If a 5407 * @mbox pointer is passed in, it will be put into the context_un.mbox 5408 * field of the IOCB for the completion callback function to issue the 5409 * mailbox command to the HBA later when callback is invoked. 5410 * 5411 * Note that the ndlp reference count will be incremented by 1 for holding the 5412 * ndlp and the reference to ndlp will be stored into the ndlp field of 5413 * the IOCB for the completion callback function to the corresponding 5414 * response ELS IOCB command. 5415 * 5416 * Return code 5417 * 0 - Successfully issued acc response 5418 * 1 - Failed to issue acc response 5419 **/ 5420int 5421lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag, 5422 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp, 5423 LPFC_MBOXQ_t *mbox) 5424{ 5425 struct lpfc_hba *phba = vport->phba; 5426 IOCB_t *icmd; 5427 IOCB_t *oldcmd; 5428 union lpfc_wqe128 *wqe; 5429 union lpfc_wqe128 *oldwqe = &oldiocb->wqe; 5430 struct lpfc_iocbq *elsiocb; 5431 uint8_t *pcmd; 5432 struct serv_parm *sp; 5433 uint16_t cmdsize; 5434 int rc; 5435 ELS_PKT *els_pkt_ptr; 5436 struct fc_els_rdf_resp *rdf_resp; 5437 5438 switch (flag) { 5439 case ELS_CMD_ACC: 5440 cmdsize = sizeof(uint32_t); 5441 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, 5442 ndlp, ndlp->nlp_DID, ELS_CMD_ACC); 5443 if (!elsiocb) { 5444 spin_lock_irq(&ndlp->lock); 5445 ndlp->nlp_flag &= ~NLP_LOGO_ACC; 5446 spin_unlock_irq(&ndlp->lock); 5447 return 1; 5448 } 5449 5450 if (phba->sli_rev == LPFC_SLI_REV4) { 5451 wqe = &elsiocb->wqe; 5452 /* XRI / rx_id */ 5453 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com, 5454 bf_get(wqe_ctxt_tag, 5455 &oldwqe->xmit_els_rsp.wqe_com)); 5456 5457 /* oxid */ 5458 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, 5459 bf_get(wqe_rcvoxid, 5460 &oldwqe->xmit_els_rsp.wqe_com)); 5461 } else { 5462 icmd = &elsiocb->iocb; 5463 oldcmd = &oldiocb->iocb; 5464 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ 5465 icmd->unsli3.rcvsli3.ox_id = 5466 oldcmd->unsli3.rcvsli3.ox_id; 5467 } 5468 5469 pcmd = elsiocb->cmd_dmabuf->virt; 5470 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 5471 pcmd += sizeof(uint32_t); 5472 5473 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 5474 "Issue ACC: did:x%x flg:x%x", 5475 ndlp->nlp_DID, ndlp->nlp_flag, 0); 5476 break; 5477 case ELS_CMD_FLOGI: 5478 case ELS_CMD_PLOGI: 5479 cmdsize = (sizeof(struct serv_parm) + sizeof(uint32_t)); 5480 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, 5481 ndlp, ndlp->nlp_DID, ELS_CMD_ACC); 5482 if (!elsiocb) 5483 return 1; 5484 5485 if (phba->sli_rev == LPFC_SLI_REV4) { 5486 wqe = &elsiocb->wqe; 5487 /* XRI / rx_id */ 5488 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com, 5489 bf_get(wqe_ctxt_tag, 5490 &oldwqe->xmit_els_rsp.wqe_com)); 5491 5492 /* oxid */ 5493 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, 5494 bf_get(wqe_rcvoxid, 5495 &oldwqe->xmit_els_rsp.wqe_com)); 5496 } else { 5497 icmd = &elsiocb->iocb; 5498 oldcmd = &oldiocb->iocb; 5499 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ 5500 icmd->unsli3.rcvsli3.ox_id = 5501 oldcmd->unsli3.rcvsli3.ox_id; 5502 } 5503 5504 pcmd = (u8 *)elsiocb->cmd_dmabuf->virt; 5505 5506 if (mbox) 5507 elsiocb->context_un.mbox = mbox; 5508 5509 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 5510 pcmd += sizeof(uint32_t); 5511 sp = (struct serv_parm *)pcmd; 5512 5513 if (flag == ELS_CMD_FLOGI) { 5514 /* Copy the received service parameters back */ 5515 memcpy(sp, &phba->fc_fabparam, 5516 sizeof(struct serv_parm)); 5517 5518 /* Clear the F_Port bit */ 5519 sp->cmn.fPort = 0; 5520 5521 /* Mark all class service parameters as invalid */ 5522 sp->cls1.classValid = 0; 5523 sp->cls2.classValid = 0; 5524 sp->cls3.classValid = 0; 5525 sp->cls4.classValid = 0; 5526 5527 /* Copy our worldwide names */ 5528 memcpy(&sp->portName, &vport->fc_sparam.portName, 5529 sizeof(struct lpfc_name)); 5530 memcpy(&sp->nodeName, &vport->fc_sparam.nodeName, 5531 sizeof(struct lpfc_name)); 5532 } else { 5533 memcpy(pcmd, &vport->fc_sparam, 5534 sizeof(struct serv_parm)); 5535 5536 sp->cmn.valid_vendor_ver_level = 0; 5537 memset(sp->un.vendorVersion, 0, 5538 sizeof(sp->un.vendorVersion)); 5539 sp->cmn.bbRcvSizeMsb &= 0xF; 5540 5541 /* If our firmware supports this feature, convey that 5542 * info to the target using the vendor specific field. 5543 */ 5544 if (phba->sli.sli_flag & LPFC_SLI_SUPPRESS_RSP) { 5545 sp->cmn.valid_vendor_ver_level = 1; 5546 sp->un.vv.vid = cpu_to_be32(LPFC_VV_EMLX_ID); 5547 sp->un.vv.flags = 5548 cpu_to_be32(LPFC_VV_SUPPRESS_RSP); 5549 } 5550 } 5551 5552 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 5553 "Issue ACC FLOGI/PLOGI: did:x%x flg:x%x", 5554 ndlp->nlp_DID, ndlp->nlp_flag, 0); 5555 break; 5556 case ELS_CMD_PRLO: 5557 cmdsize = sizeof(uint32_t) + sizeof(PRLO); 5558 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, 5559 ndlp, ndlp->nlp_DID, ELS_CMD_PRLO); 5560 if (!elsiocb) 5561 return 1; 5562 5563 if (phba->sli_rev == LPFC_SLI_REV4) { 5564 wqe = &elsiocb->wqe; 5565 /* XRI / rx_id */ 5566 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com, 5567 bf_get(wqe_ctxt_tag, 5568 &oldwqe->xmit_els_rsp.wqe_com)); 5569 5570 /* oxid */ 5571 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, 5572 bf_get(wqe_rcvoxid, 5573 &oldwqe->xmit_els_rsp.wqe_com)); 5574 } else { 5575 icmd = &elsiocb->iocb; 5576 oldcmd = &oldiocb->iocb; 5577 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ 5578 icmd->unsli3.rcvsli3.ox_id = 5579 oldcmd->unsli3.rcvsli3.ox_id; 5580 } 5581 5582 pcmd = (u8 *) elsiocb->cmd_dmabuf->virt; 5583 5584 memcpy(pcmd, oldiocb->cmd_dmabuf->virt, 5585 sizeof(uint32_t) + sizeof(PRLO)); 5586 *((uint32_t *) (pcmd)) = ELS_CMD_PRLO_ACC; 5587 els_pkt_ptr = (ELS_PKT *) pcmd; 5588 els_pkt_ptr->un.prlo.acceptRspCode = PRLO_REQ_EXECUTED; 5589 5590 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 5591 "Issue ACC PRLO: did:x%x flg:x%x", 5592 ndlp->nlp_DID, ndlp->nlp_flag, 0); 5593 break; 5594 case ELS_CMD_RDF: 5595 cmdsize = sizeof(*rdf_resp); 5596 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, 5597 ndlp, ndlp->nlp_DID, ELS_CMD_ACC); 5598 if (!elsiocb) 5599 return 1; 5600 5601 if (phba->sli_rev == LPFC_SLI_REV4) { 5602 wqe = &elsiocb->wqe; 5603 /* XRI / rx_id */ 5604 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com, 5605 bf_get(wqe_ctxt_tag, 5606 &oldwqe->xmit_els_rsp.wqe_com)); 5607 5608 /* oxid */ 5609 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, 5610 bf_get(wqe_rcvoxid, 5611 &oldwqe->xmit_els_rsp.wqe_com)); 5612 } else { 5613 icmd = &elsiocb->iocb; 5614 oldcmd = &oldiocb->iocb; 5615 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ 5616 icmd->unsli3.rcvsli3.ox_id = 5617 oldcmd->unsli3.rcvsli3.ox_id; 5618 } 5619 5620 pcmd = (u8 *)elsiocb->cmd_dmabuf->virt; 5621 rdf_resp = (struct fc_els_rdf_resp *)pcmd; 5622 memset(rdf_resp, 0, sizeof(*rdf_resp)); 5623 rdf_resp->acc_hdr.la_cmd = ELS_LS_ACC; 5624 5625 /* FC-LS-5 specifies desc_list_len shall be set to 12 */ 5626 rdf_resp->desc_list_len = cpu_to_be32(12); 5627 5628 /* FC-LS-5 specifies LS REQ Information descriptor */ 5629 rdf_resp->lsri.desc_tag = cpu_to_be32(1); 5630 rdf_resp->lsri.desc_len = cpu_to_be32(sizeof(u32)); 5631 rdf_resp->lsri.rqst_w0.cmd = ELS_RDF; 5632 break; 5633 default: 5634 return 1; 5635 } 5636 if (ndlp->nlp_flag & NLP_LOGO_ACC) { 5637 spin_lock_irq(&ndlp->lock); 5638 if (!(ndlp->nlp_flag & NLP_RPI_REGISTERED || 5639 ndlp->nlp_flag & NLP_REG_LOGIN_SEND)) 5640 ndlp->nlp_flag &= ~NLP_LOGO_ACC; 5641 spin_unlock_irq(&ndlp->lock); 5642 elsiocb->cmd_cmpl = lpfc_cmpl_els_logo_acc; 5643 } else { 5644 elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp; 5645 } 5646 5647 phba->fc_stat.elsXmitACC++; 5648 elsiocb->ndlp = lpfc_nlp_get(ndlp); 5649 if (!elsiocb->ndlp) { 5650 lpfc_els_free_iocb(phba, elsiocb); 5651 return 1; 5652 } 5653 5654 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 5655 if (rc == IOCB_ERROR) { 5656 lpfc_els_free_iocb(phba, elsiocb); 5657 lpfc_nlp_put(ndlp); 5658 return 1; 5659 } 5660 5661 /* Xmit ELS ACC response tag <ulpIoTag> */ 5662 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 5663 "0128 Xmit ELS ACC response Status: x%x, IoTag: x%x, " 5664 "XRI: x%x, DID: x%x, nlp_flag: x%x nlp_state: x%x " 5665 "RPI: x%x, fc_flag x%x refcnt %d\n", 5666 rc, elsiocb->iotag, elsiocb->sli4_xritag, 5667 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 5668 ndlp->nlp_rpi, vport->fc_flag, kref_read(&ndlp->kref)); 5669 return 0; 5670} 5671 5672/** 5673 * lpfc_els_rsp_reject - Prepare and issue a rjt response iocb command 5674 * @vport: pointer to a virtual N_Port data structure. 5675 * @rejectError: reject response to issue 5676 * @oldiocb: pointer to the original lpfc command iocb data structure. 5677 * @ndlp: pointer to a node-list data structure. 5678 * @mbox: pointer to the driver internal queue element for mailbox command. 5679 * 5680 * This routine prepares and issue an Reject (RJT) response IOCB 5681 * command. If a @mbox pointer is passed in, it will be put into the 5682 * context_un.mbox field of the IOCB for the completion callback function 5683 * to issue to the HBA later. 5684 * 5685 * Note that the ndlp reference count will be incremented by 1 for holding the 5686 * ndlp and the reference to ndlp will be stored into the ndlp field of 5687 * the IOCB for the completion callback function to the reject response 5688 * ELS IOCB command. 5689 * 5690 * Return code 5691 * 0 - Successfully issued reject response 5692 * 1 - Failed to issue reject response 5693 **/ 5694int 5695lpfc_els_rsp_reject(struct lpfc_vport *vport, uint32_t rejectError, 5696 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp, 5697 LPFC_MBOXQ_t *mbox) 5698{ 5699 int rc; 5700 struct lpfc_hba *phba = vport->phba; 5701 IOCB_t *icmd; 5702 IOCB_t *oldcmd; 5703 union lpfc_wqe128 *wqe; 5704 struct lpfc_iocbq *elsiocb; 5705 uint8_t *pcmd; 5706 uint16_t cmdsize; 5707 5708 cmdsize = 2 * sizeof(uint32_t); 5709 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, 5710 ndlp->nlp_DID, ELS_CMD_LS_RJT); 5711 if (!elsiocb) 5712 return 1; 5713 5714 if (phba->sli_rev == LPFC_SLI_REV4) { 5715 wqe = &elsiocb->wqe; 5716 bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com, 5717 get_job_ulpcontext(phba, oldiocb)); /* Xri / rx_id */ 5718 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, 5719 get_job_rcvoxid(phba, oldiocb)); 5720 } else { 5721 icmd = &elsiocb->iocb; 5722 oldcmd = &oldiocb->iocb; 5723 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ 5724 icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id; 5725 } 5726 5727 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; 5728 5729 *((uint32_t *) (pcmd)) = ELS_CMD_LS_RJT; 5730 pcmd += sizeof(uint32_t); 5731 *((uint32_t *) (pcmd)) = rejectError; 5732 5733 if (mbox) 5734 elsiocb->context_un.mbox = mbox; 5735 5736 /* Xmit ELS RJT <err> response tag <ulpIoTag> */ 5737 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 5738 "0129 Xmit ELS RJT x%x response tag x%x " 5739 "xri x%x, did x%x, nlp_flag x%x, nlp_state x%x, " 5740 "rpi x%x\n", 5741 rejectError, elsiocb->iotag, 5742 get_job_ulpcontext(phba, elsiocb), ndlp->nlp_DID, 5743 ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi); 5744 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 5745 "Issue LS_RJT: did:x%x flg:x%x err:x%x", 5746 ndlp->nlp_DID, ndlp->nlp_flag, rejectError); 5747 5748 phba->fc_stat.elsXmitLSRJT++; 5749 elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp; 5750 elsiocb->ndlp = lpfc_nlp_get(ndlp); 5751 if (!elsiocb->ndlp) { 5752 lpfc_els_free_iocb(phba, elsiocb); 5753 return 1; 5754 } 5755 5756 /* The NPIV instance is rejecting this unsolicited ELS. Make sure the 5757 * node's assigned RPI gets released provided this node is not already 5758 * registered with the transport. 5759 */ 5760 if (phba->sli_rev == LPFC_SLI_REV4 && 5761 vport->port_type == LPFC_NPIV_PORT && 5762 !(ndlp->fc4_xpt_flags & SCSI_XPT_REGD)) { 5763 spin_lock_irq(&ndlp->lock); 5764 ndlp->nlp_flag |= NLP_RELEASE_RPI; 5765 spin_unlock_irq(&ndlp->lock); 5766 } 5767 5768 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 5769 if (rc == IOCB_ERROR) { 5770 lpfc_els_free_iocb(phba, elsiocb); 5771 lpfc_nlp_put(ndlp); 5772 return 1; 5773 } 5774 5775 return 0; 5776} 5777 5778 /** 5779 * lpfc_issue_els_edc_rsp - Exchange Diagnostic Capabilities with the fabric. 5780 * @vport: pointer to a host virtual N_Port data structure. 5781 * @cmdiocb: pointer to the original lpfc command iocb data structure. 5782 * @ndlp: NPort to where rsp is directed 5783 * 5784 * This routine issues an EDC ACC RSP to the F-Port Controller to communicate 5785 * this N_Port's support of hardware signals in its Congestion 5786 * Capabilities Descriptor. 5787 * 5788 * Return code 5789 * 0 - Successfully issued edc rsp command 5790 * 1 - Failed to issue edc rsp command 5791 **/ 5792static int 5793lpfc_issue_els_edc_rsp(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 5794 struct lpfc_nodelist *ndlp) 5795{ 5796 struct lpfc_hba *phba = vport->phba; 5797 struct lpfc_els_edc_rsp *edc_rsp; 5798 struct lpfc_iocbq *elsiocb; 5799 IOCB_t *icmd, *cmd; 5800 union lpfc_wqe128 *wqe; 5801 uint8_t *pcmd; 5802 int cmdsize, rc; 5803 5804 cmdsize = sizeof(struct lpfc_els_edc_rsp); 5805 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, cmdiocb->retry, 5806 ndlp, ndlp->nlp_DID, ELS_CMD_ACC); 5807 if (!elsiocb) 5808 return 1; 5809 5810 if (phba->sli_rev == LPFC_SLI_REV4) { 5811 wqe = &elsiocb->wqe; 5812 bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com, 5813 get_job_ulpcontext(phba, cmdiocb)); /* Xri / rx_id */ 5814 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, 5815 get_job_rcvoxid(phba, cmdiocb)); 5816 } else { 5817 icmd = &elsiocb->iocb; 5818 cmd = &cmdiocb->iocb; 5819 icmd->ulpContext = cmd->ulpContext; /* Xri / rx_id */ 5820 icmd->unsli3.rcvsli3.ox_id = cmd->unsli3.rcvsli3.ox_id; 5821 } 5822 5823 pcmd = elsiocb->cmd_dmabuf->virt; 5824 memset(pcmd, 0, cmdsize); 5825 5826 edc_rsp = (struct lpfc_els_edc_rsp *)pcmd; 5827 edc_rsp->edc_rsp.acc_hdr.la_cmd = ELS_LS_ACC; 5828 edc_rsp->edc_rsp.desc_list_len = cpu_to_be32( 5829 FC_TLV_DESC_LENGTH_FROM_SZ(struct lpfc_els_edc_rsp)); 5830 edc_rsp->edc_rsp.lsri.desc_tag = cpu_to_be32(ELS_DTAG_LS_REQ_INFO); 5831 edc_rsp->edc_rsp.lsri.desc_len = cpu_to_be32( 5832 FC_TLV_DESC_LENGTH_FROM_SZ(struct fc_els_lsri_desc)); 5833 edc_rsp->edc_rsp.lsri.rqst_w0.cmd = ELS_EDC; 5834 lpfc_format_edc_cgn_desc(phba, &edc_rsp->cgn_desc); 5835 5836 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 5837 "Issue EDC ACC: did:x%x flg:x%x refcnt %d", 5838 ndlp->nlp_DID, ndlp->nlp_flag, 5839 kref_read(&ndlp->kref)); 5840 elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp; 5841 5842 phba->fc_stat.elsXmitACC++; 5843 elsiocb->ndlp = lpfc_nlp_get(ndlp); 5844 if (!elsiocb->ndlp) { 5845 lpfc_els_free_iocb(phba, elsiocb); 5846 return 1; 5847 } 5848 5849 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 5850 if (rc == IOCB_ERROR) { 5851 lpfc_els_free_iocb(phba, elsiocb); 5852 lpfc_nlp_put(ndlp); 5853 return 1; 5854 } 5855 5856 /* Xmit ELS ACC response tag <ulpIoTag> */ 5857 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 5858 "0152 Xmit EDC ACC response Status: x%x, IoTag: x%x, " 5859 "XRI: x%x, DID: x%x, nlp_flag: x%x nlp_state: x%x " 5860 "RPI: x%x, fc_flag x%x\n", 5861 rc, elsiocb->iotag, elsiocb->sli4_xritag, 5862 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 5863 ndlp->nlp_rpi, vport->fc_flag); 5864 5865 return 0; 5866} 5867 5868/** 5869 * lpfc_els_rsp_adisc_acc - Prepare and issue acc response to adisc iocb cmd 5870 * @vport: pointer to a virtual N_Port data structure. 5871 * @oldiocb: pointer to the original lpfc command iocb data structure. 5872 * @ndlp: pointer to a node-list data structure. 5873 * 5874 * This routine prepares and issues an Accept (ACC) response to Address 5875 * Discover (ADISC) ELS command. It simply prepares the payload of the IOCB 5876 * and invokes the lpfc_sli_issue_iocb() routine to send out the command. 5877 * 5878 * Note that the ndlp reference count will be incremented by 1 for holding the 5879 * ndlp and the reference to ndlp will be stored into the ndlp field of 5880 * the IOCB for the completion callback function to the ADISC Accept response 5881 * ELS IOCB command. 5882 * 5883 * Return code 5884 * 0 - Successfully issued acc adisc response 5885 * 1 - Failed to issue adisc acc response 5886 **/ 5887int 5888lpfc_els_rsp_adisc_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb, 5889 struct lpfc_nodelist *ndlp) 5890{ 5891 struct lpfc_hba *phba = vport->phba; 5892 ADISC *ap; 5893 IOCB_t *icmd, *oldcmd; 5894 union lpfc_wqe128 *wqe; 5895 struct lpfc_iocbq *elsiocb; 5896 uint8_t *pcmd; 5897 uint16_t cmdsize; 5898 int rc; 5899 u32 ulp_context; 5900 5901 cmdsize = sizeof(uint32_t) + sizeof(ADISC); 5902 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, 5903 ndlp->nlp_DID, ELS_CMD_ACC); 5904 if (!elsiocb) 5905 return 1; 5906 5907 if (phba->sli_rev == LPFC_SLI_REV4) { 5908 wqe = &elsiocb->wqe; 5909 /* XRI / rx_id */ 5910 bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com, 5911 get_job_ulpcontext(phba, oldiocb)); 5912 ulp_context = get_job_ulpcontext(phba, elsiocb); 5913 /* oxid */ 5914 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, 5915 get_job_rcvoxid(phba, oldiocb)); 5916 } else { 5917 icmd = &elsiocb->iocb; 5918 oldcmd = &oldiocb->iocb; 5919 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ 5920 ulp_context = elsiocb->iocb.ulpContext; 5921 icmd->unsli3.rcvsli3.ox_id = 5922 oldcmd->unsli3.rcvsli3.ox_id; 5923 } 5924 5925 /* Xmit ADISC ACC response tag <ulpIoTag> */ 5926 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 5927 "0130 Xmit ADISC ACC response iotag x%x xri: " 5928 "x%x, did x%x, nlp_flag x%x, nlp_state x%x rpi x%x\n", 5929 elsiocb->iotag, ulp_context, 5930 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 5931 ndlp->nlp_rpi); 5932 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; 5933 5934 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 5935 pcmd += sizeof(uint32_t); 5936 5937 ap = (ADISC *) (pcmd); 5938 ap->hardAL_PA = phba->fc_pref_ALPA; 5939 memcpy(&ap->portName, &vport->fc_portname, sizeof(struct lpfc_name)); 5940 memcpy(&ap->nodeName, &vport->fc_nodename, sizeof(struct lpfc_name)); 5941 ap->DID = be32_to_cpu(vport->fc_myDID); 5942 5943 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 5944 "Issue ACC ADISC: did:x%x flg:x%x refcnt %d", 5945 ndlp->nlp_DID, ndlp->nlp_flag, kref_read(&ndlp->kref)); 5946 5947 phba->fc_stat.elsXmitACC++; 5948 elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp; 5949 elsiocb->ndlp = lpfc_nlp_get(ndlp); 5950 if (!elsiocb->ndlp) { 5951 lpfc_els_free_iocb(phba, elsiocb); 5952 return 1; 5953 } 5954 5955 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 5956 if (rc == IOCB_ERROR) { 5957 lpfc_els_free_iocb(phba, elsiocb); 5958 lpfc_nlp_put(ndlp); 5959 return 1; 5960 } 5961 5962 return 0; 5963} 5964 5965/** 5966 * lpfc_els_rsp_prli_acc - Prepare and issue acc response to prli iocb cmd 5967 * @vport: pointer to a virtual N_Port data structure. 5968 * @oldiocb: pointer to the original lpfc command iocb data structure. 5969 * @ndlp: pointer to a node-list data structure. 5970 * 5971 * This routine prepares and issues an Accept (ACC) response to Process 5972 * Login (PRLI) ELS command. It simply prepares the payload of the IOCB 5973 * and invokes the lpfc_sli_issue_iocb() routine to send out the command. 5974 * 5975 * Note that the ndlp reference count will be incremented by 1 for holding the 5976 * ndlp and the reference to ndlp will be stored into the ndlp field of 5977 * the IOCB for the completion callback function to the PRLI Accept response 5978 * ELS IOCB command. 5979 * 5980 * Return code 5981 * 0 - Successfully issued acc prli response 5982 * 1 - Failed to issue acc prli response 5983 **/ 5984int 5985lpfc_els_rsp_prli_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb, 5986 struct lpfc_nodelist *ndlp) 5987{ 5988 struct lpfc_hba *phba = vport->phba; 5989 PRLI *npr; 5990 struct lpfc_nvme_prli *npr_nvme; 5991 lpfc_vpd_t *vpd; 5992 IOCB_t *icmd; 5993 IOCB_t *oldcmd; 5994 union lpfc_wqe128 *wqe; 5995 struct lpfc_iocbq *elsiocb; 5996 uint8_t *pcmd; 5997 uint16_t cmdsize; 5998 uint32_t prli_fc4_req, *req_payload; 5999 struct lpfc_dmabuf *req_buf; 6000 int rc; 6001 u32 elsrspcmd, ulp_context; 6002 6003 /* Need the incoming PRLI payload to determine if the ACC is for an 6004 * FC4 or NVME PRLI type. The PRLI type is at word 1. 6005 */ 6006 req_buf = oldiocb->cmd_dmabuf; 6007 req_payload = (((uint32_t *)req_buf->virt) + 1); 6008 6009 /* PRLI type payload is at byte 3 for FCP or NVME. */ 6010 prli_fc4_req = be32_to_cpu(*req_payload); 6011 prli_fc4_req = (prli_fc4_req >> 24) & 0xff; 6012 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 6013 "6127 PRLI_ACC: Req Type x%x, Word1 x%08x\n", 6014 prli_fc4_req, *((uint32_t *)req_payload)); 6015 6016 if (prli_fc4_req == PRLI_FCP_TYPE) { 6017 cmdsize = sizeof(uint32_t) + sizeof(PRLI); 6018 elsrspcmd = (ELS_CMD_ACC | (ELS_CMD_PRLI & ~ELS_RSP_MASK)); 6019 } else if (prli_fc4_req & PRLI_NVME_TYPE) { 6020 cmdsize = sizeof(uint32_t) + sizeof(struct lpfc_nvme_prli); 6021 elsrspcmd = (ELS_CMD_ACC | (ELS_CMD_NVMEPRLI & ~ELS_RSP_MASK)); 6022 } else { 6023 return 1; 6024 } 6025 6026 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, 6027 ndlp->nlp_DID, elsrspcmd); 6028 if (!elsiocb) 6029 return 1; 6030 6031 if (phba->sli_rev == LPFC_SLI_REV4) { 6032 wqe = &elsiocb->wqe; 6033 bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com, 6034 get_job_ulpcontext(phba, oldiocb)); /* Xri / rx_id */ 6035 ulp_context = get_job_ulpcontext(phba, elsiocb); 6036 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, 6037 get_job_rcvoxid(phba, oldiocb)); 6038 } else { 6039 icmd = &elsiocb->iocb; 6040 oldcmd = &oldiocb->iocb; 6041 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ 6042 ulp_context = elsiocb->iocb.ulpContext; 6043 icmd->unsli3.rcvsli3.ox_id = 6044 oldcmd->unsli3.rcvsli3.ox_id; 6045 } 6046 6047 /* Xmit PRLI ACC response tag <ulpIoTag> */ 6048 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 6049 "0131 Xmit PRLI ACC response tag x%x xri x%x, " 6050 "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x\n", 6051 elsiocb->iotag, ulp_context, 6052 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 6053 ndlp->nlp_rpi); 6054 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; 6055 memset(pcmd, 0, cmdsize); 6056 6057 *((uint32_t *)(pcmd)) = elsrspcmd; 6058 pcmd += sizeof(uint32_t); 6059 6060 /* For PRLI, remainder of payload is PRLI parameter page */ 6061 vpd = &phba->vpd; 6062 6063 if (prli_fc4_req == PRLI_FCP_TYPE) { 6064 /* 6065 * If the remote port is a target and our firmware version 6066 * is 3.20 or later, set the following bits for FC-TAPE 6067 * support. 6068 */ 6069 npr = (PRLI *) pcmd; 6070 if ((ndlp->nlp_type & NLP_FCP_TARGET) && 6071 (vpd->rev.feaLevelHigh >= 0x02)) { 6072 npr->ConfmComplAllowed = 1; 6073 npr->Retry = 1; 6074 npr->TaskRetryIdReq = 1; 6075 } 6076 npr->acceptRspCode = PRLI_REQ_EXECUTED; 6077 npr->estabImagePair = 1; 6078 npr->readXferRdyDis = 1; 6079 npr->ConfmComplAllowed = 1; 6080 npr->prliType = PRLI_FCP_TYPE; 6081 npr->initiatorFunc = 1; 6082 } else if (prli_fc4_req & PRLI_NVME_TYPE) { 6083 /* Respond with an NVME PRLI Type */ 6084 npr_nvme = (struct lpfc_nvme_prli *) pcmd; 6085 bf_set(prli_type_code, npr_nvme, PRLI_NVME_TYPE); 6086 bf_set(prli_estabImagePair, npr_nvme, 0); /* Should be 0 */ 6087 bf_set(prli_acc_rsp_code, npr_nvme, PRLI_REQ_EXECUTED); 6088 if (phba->nvmet_support) { 6089 bf_set(prli_tgt, npr_nvme, 1); 6090 bf_set(prli_disc, npr_nvme, 1); 6091 if (phba->cfg_nvme_enable_fb) { 6092 bf_set(prli_fba, npr_nvme, 1); 6093 6094 /* TBD. Target mode needs to post buffers 6095 * that support the configured first burst 6096 * byte size. 6097 */ 6098 bf_set(prli_fb_sz, npr_nvme, 6099 phba->cfg_nvmet_fb_size); 6100 } 6101 } else { 6102 bf_set(prli_init, npr_nvme, 1); 6103 } 6104 6105 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC, 6106 "6015 NVME issue PRLI ACC word1 x%08x " 6107 "word4 x%08x word5 x%08x flag x%x, " 6108 "fcp_info x%x nlp_type x%x\n", 6109 npr_nvme->word1, npr_nvme->word4, 6110 npr_nvme->word5, ndlp->nlp_flag, 6111 ndlp->nlp_fcp_info, ndlp->nlp_type); 6112 npr_nvme->word1 = cpu_to_be32(npr_nvme->word1); 6113 npr_nvme->word4 = cpu_to_be32(npr_nvme->word4); 6114 npr_nvme->word5 = cpu_to_be32(npr_nvme->word5); 6115 } else 6116 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 6117 "6128 Unknown FC_TYPE x%x x%x ndlp x%06x\n", 6118 prli_fc4_req, ndlp->nlp_fc4_type, 6119 ndlp->nlp_DID); 6120 6121 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 6122 "Issue ACC PRLI: did:x%x flg:x%x", 6123 ndlp->nlp_DID, ndlp->nlp_flag, kref_read(&ndlp->kref)); 6124 6125 phba->fc_stat.elsXmitACC++; 6126 elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp; 6127 elsiocb->ndlp = lpfc_nlp_get(ndlp); 6128 if (!elsiocb->ndlp) { 6129 lpfc_els_free_iocb(phba, elsiocb); 6130 return 1; 6131 } 6132 6133 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 6134 if (rc == IOCB_ERROR) { 6135 lpfc_els_free_iocb(phba, elsiocb); 6136 lpfc_nlp_put(ndlp); 6137 return 1; 6138 } 6139 6140 return 0; 6141} 6142 6143/** 6144 * lpfc_els_rsp_rnid_acc - Issue rnid acc response iocb command 6145 * @vport: pointer to a virtual N_Port data structure. 6146 * @format: rnid command format. 6147 * @oldiocb: pointer to the original lpfc command iocb data structure. 6148 * @ndlp: pointer to a node-list data structure. 6149 * 6150 * This routine issues a Request Node Identification Data (RNID) Accept 6151 * (ACC) response. It constructs the RNID ACC response command according to 6152 * the proper @format and then calls the lpfc_sli_issue_iocb() routine to 6153 * issue the response. 6154 * 6155 * Note that the ndlp reference count will be incremented by 1 for holding the 6156 * ndlp and the reference to ndlp will be stored into the ndlp field of 6157 * the IOCB for the completion callback function. 6158 * 6159 * Return code 6160 * 0 - Successfully issued acc rnid response 6161 * 1 - Failed to issue acc rnid response 6162 **/ 6163static int 6164lpfc_els_rsp_rnid_acc(struct lpfc_vport *vport, uint8_t format, 6165 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp) 6166{ 6167 struct lpfc_hba *phba = vport->phba; 6168 RNID *rn; 6169 IOCB_t *icmd, *oldcmd; 6170 union lpfc_wqe128 *wqe; 6171 struct lpfc_iocbq *elsiocb; 6172 uint8_t *pcmd; 6173 uint16_t cmdsize; 6174 int rc; 6175 u32 ulp_context; 6176 6177 cmdsize = sizeof(uint32_t) + sizeof(uint32_t) 6178 + (2 * sizeof(struct lpfc_name)); 6179 if (format) 6180 cmdsize += sizeof(RNID_TOP_DISC); 6181 6182 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, 6183 ndlp->nlp_DID, ELS_CMD_ACC); 6184 if (!elsiocb) 6185 return 1; 6186 6187 if (phba->sli_rev == LPFC_SLI_REV4) { 6188 wqe = &elsiocb->wqe; 6189 bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com, 6190 get_job_ulpcontext(phba, oldiocb)); /* Xri / rx_id */ 6191 ulp_context = get_job_ulpcontext(phba, elsiocb); 6192 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, 6193 get_job_rcvoxid(phba, oldiocb)); 6194 } else { 6195 icmd = &elsiocb->iocb; 6196 oldcmd = &oldiocb->iocb; 6197 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ 6198 ulp_context = elsiocb->iocb.ulpContext; 6199 icmd->unsli3.rcvsli3.ox_id = 6200 oldcmd->unsli3.rcvsli3.ox_id; 6201 } 6202 6203 /* Xmit RNID ACC response tag <ulpIoTag> */ 6204 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 6205 "0132 Xmit RNID ACC response tag x%x xri x%x\n", 6206 elsiocb->iotag, ulp_context); 6207 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; 6208 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 6209 pcmd += sizeof(uint32_t); 6210 6211 memset(pcmd, 0, sizeof(RNID)); 6212 rn = (RNID *) (pcmd); 6213 rn->Format = format; 6214 rn->CommonLen = (2 * sizeof(struct lpfc_name)); 6215 memcpy(&rn->portName, &vport->fc_portname, sizeof(struct lpfc_name)); 6216 memcpy(&rn->nodeName, &vport->fc_nodename, sizeof(struct lpfc_name)); 6217 switch (format) { 6218 case 0: 6219 rn->SpecificLen = 0; 6220 break; 6221 case RNID_TOPOLOGY_DISC: 6222 rn->SpecificLen = sizeof(RNID_TOP_DISC); 6223 memcpy(&rn->un.topologyDisc.portName, 6224 &vport->fc_portname, sizeof(struct lpfc_name)); 6225 rn->un.topologyDisc.unitType = RNID_HBA; 6226 rn->un.topologyDisc.physPort = 0; 6227 rn->un.topologyDisc.attachedNodes = 0; 6228 break; 6229 default: 6230 rn->CommonLen = 0; 6231 rn->SpecificLen = 0; 6232 break; 6233 } 6234 6235 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 6236 "Issue ACC RNID: did:x%x flg:x%x refcnt %d", 6237 ndlp->nlp_DID, ndlp->nlp_flag, kref_read(&ndlp->kref)); 6238 6239 phba->fc_stat.elsXmitACC++; 6240 elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp; 6241 elsiocb->ndlp = lpfc_nlp_get(ndlp); 6242 if (!elsiocb->ndlp) { 6243 lpfc_els_free_iocb(phba, elsiocb); 6244 return 1; 6245 } 6246 6247 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 6248 if (rc == IOCB_ERROR) { 6249 lpfc_els_free_iocb(phba, elsiocb); 6250 lpfc_nlp_put(ndlp); 6251 return 1; 6252 } 6253 6254 return 0; 6255} 6256 6257/** 6258 * lpfc_els_clear_rrq - Clear the rq that this rrq describes. 6259 * @vport: pointer to a virtual N_Port data structure. 6260 * @iocb: pointer to the lpfc command iocb data structure. 6261 * @ndlp: pointer to a node-list data structure. 6262 * 6263 * Return 6264 **/ 6265static void 6266lpfc_els_clear_rrq(struct lpfc_vport *vport, 6267 struct lpfc_iocbq *iocb, struct lpfc_nodelist *ndlp) 6268{ 6269 struct lpfc_hba *phba = vport->phba; 6270 uint8_t *pcmd; 6271 struct RRQ *rrq; 6272 uint16_t rxid; 6273 uint16_t xri; 6274 struct lpfc_node_rrq *prrq; 6275 6276 6277 pcmd = (uint8_t *)iocb->cmd_dmabuf->virt; 6278 pcmd += sizeof(uint32_t); 6279 rrq = (struct RRQ *)pcmd; 6280 rrq->rrq_exchg = be32_to_cpu(rrq->rrq_exchg); 6281 rxid = bf_get(rrq_rxid, rrq); 6282 6283 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 6284 "2883 Clear RRQ for SID:x%x OXID:x%x RXID:x%x" 6285 " x%x x%x\n", 6286 be32_to_cpu(bf_get(rrq_did, rrq)), 6287 bf_get(rrq_oxid, rrq), 6288 rxid, 6289 get_wqe_reqtag(iocb), 6290 get_job_ulpcontext(phba, iocb)); 6291 6292 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 6293 "Clear RRQ: did:x%x flg:x%x exchg:x%.08x", 6294 ndlp->nlp_DID, ndlp->nlp_flag, rrq->rrq_exchg); 6295 if (vport->fc_myDID == be32_to_cpu(bf_get(rrq_did, rrq))) 6296 xri = bf_get(rrq_oxid, rrq); 6297 else 6298 xri = rxid; 6299 prrq = lpfc_get_active_rrq(vport, xri, ndlp->nlp_DID); 6300 if (prrq) 6301 lpfc_clr_rrq_active(phba, xri, prrq); 6302 return; 6303} 6304 6305/** 6306 * lpfc_els_rsp_echo_acc - Issue echo acc response 6307 * @vport: pointer to a virtual N_Port data structure. 6308 * @data: pointer to echo data to return in the accept. 6309 * @oldiocb: pointer to the original lpfc command iocb data structure. 6310 * @ndlp: pointer to a node-list data structure. 6311 * 6312 * Return code 6313 * 0 - Successfully issued acc echo response 6314 * 1 - Failed to issue acc echo response 6315 **/ 6316static int 6317lpfc_els_rsp_echo_acc(struct lpfc_vport *vport, uint8_t *data, 6318 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp) 6319{ 6320 struct lpfc_hba *phba = vport->phba; 6321 IOCB_t *icmd, *oldcmd; 6322 union lpfc_wqe128 *wqe; 6323 struct lpfc_iocbq *elsiocb; 6324 uint8_t *pcmd; 6325 uint16_t cmdsize; 6326 int rc; 6327 u32 ulp_context; 6328 6329 if (phba->sli_rev == LPFC_SLI_REV4) 6330 cmdsize = oldiocb->wcqe_cmpl.total_data_placed; 6331 else 6332 cmdsize = oldiocb->iocb.unsli3.rcvsli3.acc_len; 6333 6334 /* The accumulated length can exceed the BPL_SIZE. For 6335 * now, use this as the limit 6336 */ 6337 if (cmdsize > LPFC_BPL_SIZE) 6338 cmdsize = LPFC_BPL_SIZE; 6339 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, 6340 ndlp->nlp_DID, ELS_CMD_ACC); 6341 if (!elsiocb) 6342 return 1; 6343 6344 if (phba->sli_rev == LPFC_SLI_REV4) { 6345 wqe = &elsiocb->wqe; 6346 bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com, 6347 get_job_ulpcontext(phba, oldiocb)); /* Xri / rx_id */ 6348 ulp_context = get_job_ulpcontext(phba, elsiocb); 6349 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, 6350 get_job_rcvoxid(phba, oldiocb)); 6351 } else { 6352 icmd = &elsiocb->iocb; 6353 oldcmd = &oldiocb->iocb; 6354 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ 6355 ulp_context = elsiocb->iocb.ulpContext; 6356 icmd->unsli3.rcvsli3.ox_id = 6357 oldcmd->unsli3.rcvsli3.ox_id; 6358 } 6359 6360 /* Xmit ECHO ACC response tag <ulpIoTag> */ 6361 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 6362 "2876 Xmit ECHO ACC response tag x%x xri x%x\n", 6363 elsiocb->iotag, ulp_context); 6364 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; 6365 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 6366 pcmd += sizeof(uint32_t); 6367 memcpy(pcmd, data, cmdsize - sizeof(uint32_t)); 6368 6369 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 6370 "Issue ACC ECHO: did:x%x flg:x%x refcnt %d", 6371 ndlp->nlp_DID, ndlp->nlp_flag, kref_read(&ndlp->kref)); 6372 6373 phba->fc_stat.elsXmitACC++; 6374 elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp; 6375 elsiocb->ndlp = lpfc_nlp_get(ndlp); 6376 if (!elsiocb->ndlp) { 6377 lpfc_els_free_iocb(phba, elsiocb); 6378 return 1; 6379 } 6380 6381 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 6382 if (rc == IOCB_ERROR) { 6383 lpfc_els_free_iocb(phba, elsiocb); 6384 lpfc_nlp_put(ndlp); 6385 return 1; 6386 } 6387 6388 return 0; 6389} 6390 6391/** 6392 * lpfc_els_disc_adisc - Issue remaining adisc iocbs to npr nodes of a vport 6393 * @vport: pointer to a host virtual N_Port data structure. 6394 * 6395 * This routine issues Address Discover (ADISC) ELS commands to those 6396 * N_Ports which are in node port recovery state and ADISC has not been issued 6397 * for the @vport. Each time an ELS ADISC IOCB is issued by invoking the 6398 * lpfc_issue_els_adisc() routine, the per @vport number of discover count 6399 * (num_disc_nodes) shall be incremented. If the num_disc_nodes reaches a 6400 * pre-configured threshold (cfg_discovery_threads), the @vport fc_flag will 6401 * be marked with FC_NLP_MORE bit and the process of issuing remaining ADISC 6402 * IOCBs quit for later pick up. On the other hand, after walking through 6403 * all the ndlps with the @vport and there is none ADISC IOCB issued, the 6404 * @vport fc_flag shall be cleared with FC_NLP_MORE bit indicating there is 6405 * no more ADISC need to be sent. 6406 * 6407 * Return code 6408 * The number of N_Ports with adisc issued. 6409 **/ 6410int 6411lpfc_els_disc_adisc(struct lpfc_vport *vport) 6412{ 6413 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 6414 struct lpfc_nodelist *ndlp, *next_ndlp; 6415 int sentadisc = 0; 6416 6417 /* go thru NPR nodes and issue any remaining ELS ADISCs */ 6418 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { 6419 6420 if (ndlp->nlp_state != NLP_STE_NPR_NODE || 6421 !(ndlp->nlp_flag & NLP_NPR_ADISC)) 6422 continue; 6423 6424 spin_lock_irq(&ndlp->lock); 6425 ndlp->nlp_flag &= ~NLP_NPR_ADISC; 6426 spin_unlock_irq(&ndlp->lock); 6427 6428 if (!(ndlp->nlp_flag & NLP_NPR_2B_DISC)) { 6429 /* This node was marked for ADISC but was not picked 6430 * for discovery. This is possible if the node was 6431 * missing in gidft response. 6432 * 6433 * At time of marking node for ADISC, we skipped unreg 6434 * from backend 6435 */ 6436 lpfc_nlp_unreg_node(vport, ndlp); 6437 lpfc_unreg_rpi(vport, ndlp); 6438 continue; 6439 } 6440 6441 ndlp->nlp_prev_state = ndlp->nlp_state; 6442 lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE); 6443 lpfc_issue_els_adisc(vport, ndlp, 0); 6444 sentadisc++; 6445 vport->num_disc_nodes++; 6446 if (vport->num_disc_nodes >= 6447 vport->cfg_discovery_threads) { 6448 spin_lock_irq(shost->host_lock); 6449 vport->fc_flag |= FC_NLP_MORE; 6450 spin_unlock_irq(shost->host_lock); 6451 break; 6452 } 6453 6454 } 6455 if (sentadisc == 0) { 6456 spin_lock_irq(shost->host_lock); 6457 vport->fc_flag &= ~FC_NLP_MORE; 6458 spin_unlock_irq(shost->host_lock); 6459 } 6460 return sentadisc; 6461} 6462 6463/** 6464 * lpfc_els_disc_plogi - Issue plogi for all npr nodes of a vport before adisc 6465 * @vport: pointer to a host virtual N_Port data structure. 6466 * 6467 * This routine issues Port Login (PLOGI) ELS commands to all the N_Ports 6468 * which are in node port recovery state, with a @vport. Each time an ELS 6469 * ADISC PLOGI IOCB is issued by invoking the lpfc_issue_els_plogi() routine, 6470 * the per @vport number of discover count (num_disc_nodes) shall be 6471 * incremented. If the num_disc_nodes reaches a pre-configured threshold 6472 * (cfg_discovery_threads), the @vport fc_flag will be marked with FC_NLP_MORE 6473 * bit set and quit the process of issuing remaining ADISC PLOGIN IOCBs for 6474 * later pick up. On the other hand, after walking through all the ndlps with 6475 * the @vport and there is none ADISC PLOGI IOCB issued, the @vport fc_flag 6476 * shall be cleared with the FC_NLP_MORE bit indicating there is no more ADISC 6477 * PLOGI need to be sent. 6478 * 6479 * Return code 6480 * The number of N_Ports with plogi issued. 6481 **/ 6482int 6483lpfc_els_disc_plogi(struct lpfc_vport *vport) 6484{ 6485 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 6486 struct lpfc_nodelist *ndlp, *next_ndlp; 6487 int sentplogi = 0; 6488 6489 /* go thru NPR nodes and issue any remaining ELS PLOGIs */ 6490 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { 6491 if (ndlp->nlp_state == NLP_STE_NPR_NODE && 6492 (ndlp->nlp_flag & NLP_NPR_2B_DISC) != 0 && 6493 (ndlp->nlp_flag & NLP_DELAY_TMO) == 0 && 6494 (ndlp->nlp_flag & NLP_NPR_ADISC) == 0) { 6495 ndlp->nlp_prev_state = ndlp->nlp_state; 6496 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); 6497 lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0); 6498 sentplogi++; 6499 vport->num_disc_nodes++; 6500 if (vport->num_disc_nodes >= 6501 vport->cfg_discovery_threads) { 6502 spin_lock_irq(shost->host_lock); 6503 vport->fc_flag |= FC_NLP_MORE; 6504 spin_unlock_irq(shost->host_lock); 6505 break; 6506 } 6507 } 6508 } 6509 6510 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 6511 "6452 Discover PLOGI %d flag x%x\n", 6512 sentplogi, vport->fc_flag); 6513 6514 if (sentplogi) { 6515 lpfc_set_disctmo(vport); 6516 } 6517 else { 6518 spin_lock_irq(shost->host_lock); 6519 vport->fc_flag &= ~FC_NLP_MORE; 6520 spin_unlock_irq(shost->host_lock); 6521 } 6522 return sentplogi; 6523} 6524 6525static uint32_t 6526lpfc_rdp_res_link_service(struct fc_rdp_link_service_desc *desc, 6527 uint32_t word0) 6528{ 6529 6530 desc->tag = cpu_to_be32(RDP_LINK_SERVICE_DESC_TAG); 6531 desc->payload.els_req = word0; 6532 desc->length = cpu_to_be32(sizeof(desc->payload)); 6533 6534 return sizeof(struct fc_rdp_link_service_desc); 6535} 6536 6537static uint32_t 6538lpfc_rdp_res_sfp_desc(struct fc_rdp_sfp_desc *desc, 6539 uint8_t *page_a0, uint8_t *page_a2) 6540{ 6541 uint16_t wavelength; 6542 uint16_t temperature; 6543 uint16_t rx_power; 6544 uint16_t tx_bias; 6545 uint16_t tx_power; 6546 uint16_t vcc; 6547 uint16_t flag = 0; 6548 struct sff_trasnceiver_codes_byte4 *trasn_code_byte4; 6549 struct sff_trasnceiver_codes_byte5 *trasn_code_byte5; 6550 6551 desc->tag = cpu_to_be32(RDP_SFP_DESC_TAG); 6552 6553 trasn_code_byte4 = (struct sff_trasnceiver_codes_byte4 *) 6554 &page_a0[SSF_TRANSCEIVER_CODE_B4]; 6555 trasn_code_byte5 = (struct sff_trasnceiver_codes_byte5 *) 6556 &page_a0[SSF_TRANSCEIVER_CODE_B5]; 6557 6558 if ((trasn_code_byte4->fc_sw_laser) || 6559 (trasn_code_byte5->fc_sw_laser_sl) || 6560 (trasn_code_byte5->fc_sw_laser_sn)) { /* check if its short WL */ 6561 flag |= (SFP_FLAG_PT_SWLASER << SFP_FLAG_PT_SHIFT); 6562 } else if (trasn_code_byte4->fc_lw_laser) { 6563 wavelength = (page_a0[SSF_WAVELENGTH_B1] << 8) | 6564 page_a0[SSF_WAVELENGTH_B0]; 6565 if (wavelength == SFP_WAVELENGTH_LC1310) 6566 flag |= SFP_FLAG_PT_LWLASER_LC1310 << SFP_FLAG_PT_SHIFT; 6567 if (wavelength == SFP_WAVELENGTH_LL1550) 6568 flag |= SFP_FLAG_PT_LWLASER_LL1550 << SFP_FLAG_PT_SHIFT; 6569 } 6570 /* check if its SFP+ */ 6571 flag |= ((page_a0[SSF_IDENTIFIER] == SFF_PG0_IDENT_SFP) ? 6572 SFP_FLAG_CT_SFP_PLUS : SFP_FLAG_CT_UNKNOWN) 6573 << SFP_FLAG_CT_SHIFT; 6574 6575 /* check if its OPTICAL */ 6576 flag |= ((page_a0[SSF_CONNECTOR] == SFF_PG0_CONNECTOR_LC) ? 6577 SFP_FLAG_IS_OPTICAL_PORT : 0) 6578 << SFP_FLAG_IS_OPTICAL_SHIFT; 6579 6580 temperature = (page_a2[SFF_TEMPERATURE_B1] << 8 | 6581 page_a2[SFF_TEMPERATURE_B0]); 6582 vcc = (page_a2[SFF_VCC_B1] << 8 | 6583 page_a2[SFF_VCC_B0]); 6584 tx_power = (page_a2[SFF_TXPOWER_B1] << 8 | 6585 page_a2[SFF_TXPOWER_B0]); 6586 tx_bias = (page_a2[SFF_TX_BIAS_CURRENT_B1] << 8 | 6587 page_a2[SFF_TX_BIAS_CURRENT_B0]); 6588 rx_power = (page_a2[SFF_RXPOWER_B1] << 8 | 6589 page_a2[SFF_RXPOWER_B0]); 6590 desc->sfp_info.temperature = cpu_to_be16(temperature); 6591 desc->sfp_info.rx_power = cpu_to_be16(rx_power); 6592 desc->sfp_info.tx_bias = cpu_to_be16(tx_bias); 6593 desc->sfp_info.tx_power = cpu_to_be16(tx_power); 6594 desc->sfp_info.vcc = cpu_to_be16(vcc); 6595 6596 desc->sfp_info.flags = cpu_to_be16(flag); 6597 desc->length = cpu_to_be32(sizeof(desc->sfp_info)); 6598 6599 return sizeof(struct fc_rdp_sfp_desc); 6600} 6601 6602static uint32_t 6603lpfc_rdp_res_link_error(struct fc_rdp_link_error_status_desc *desc, 6604 READ_LNK_VAR *stat) 6605{ 6606 uint32_t type; 6607 6608 desc->tag = cpu_to_be32(RDP_LINK_ERROR_STATUS_DESC_TAG); 6609 6610 type = VN_PT_PHY_PF_PORT << VN_PT_PHY_SHIFT; 6611 6612 desc->info.port_type = cpu_to_be32(type); 6613 6614 desc->info.link_status.link_failure_cnt = 6615 cpu_to_be32(stat->linkFailureCnt); 6616 desc->info.link_status.loss_of_synch_cnt = 6617 cpu_to_be32(stat->lossSyncCnt); 6618 desc->info.link_status.loss_of_signal_cnt = 6619 cpu_to_be32(stat->lossSignalCnt); 6620 desc->info.link_status.primitive_seq_proto_err = 6621 cpu_to_be32(stat->primSeqErrCnt); 6622 desc->info.link_status.invalid_trans_word = 6623 cpu_to_be32(stat->invalidXmitWord); 6624 desc->info.link_status.invalid_crc_cnt = cpu_to_be32(stat->crcCnt); 6625 6626 desc->length = cpu_to_be32(sizeof(desc->info)); 6627 6628 return sizeof(struct fc_rdp_link_error_status_desc); 6629} 6630 6631static uint32_t 6632lpfc_rdp_res_bbc_desc(struct fc_rdp_bbc_desc *desc, READ_LNK_VAR *stat, 6633 struct lpfc_vport *vport) 6634{ 6635 uint32_t bbCredit; 6636 6637 desc->tag = cpu_to_be32(RDP_BBC_DESC_TAG); 6638 6639 bbCredit = vport->fc_sparam.cmn.bbCreditLsb | 6640 (vport->fc_sparam.cmn.bbCreditMsb << 8); 6641 desc->bbc_info.port_bbc = cpu_to_be32(bbCredit); 6642 if (vport->phba->fc_topology != LPFC_TOPOLOGY_LOOP) { 6643 bbCredit = vport->phba->fc_fabparam.cmn.bbCreditLsb | 6644 (vport->phba->fc_fabparam.cmn.bbCreditMsb << 8); 6645 desc->bbc_info.attached_port_bbc = cpu_to_be32(bbCredit); 6646 } else { 6647 desc->bbc_info.attached_port_bbc = 0; 6648 } 6649 6650 desc->bbc_info.rtt = 0; 6651 desc->length = cpu_to_be32(sizeof(desc->bbc_info)); 6652 6653 return sizeof(struct fc_rdp_bbc_desc); 6654} 6655 6656static uint32_t 6657lpfc_rdp_res_oed_temp_desc(struct lpfc_hba *phba, 6658 struct fc_rdp_oed_sfp_desc *desc, uint8_t *page_a2) 6659{ 6660 uint32_t flags = 0; 6661 6662 desc->tag = cpu_to_be32(RDP_OED_DESC_TAG); 6663 6664 desc->oed_info.hi_alarm = page_a2[SSF_TEMP_HIGH_ALARM]; 6665 desc->oed_info.lo_alarm = page_a2[SSF_TEMP_LOW_ALARM]; 6666 desc->oed_info.hi_warning = page_a2[SSF_TEMP_HIGH_WARNING]; 6667 desc->oed_info.lo_warning = page_a2[SSF_TEMP_LOW_WARNING]; 6668 6669 if (phba->sfp_alarm & LPFC_TRANSGRESSION_HIGH_TEMPERATURE) 6670 flags |= RDP_OET_HIGH_ALARM; 6671 if (phba->sfp_alarm & LPFC_TRANSGRESSION_LOW_TEMPERATURE) 6672 flags |= RDP_OET_LOW_ALARM; 6673 if (phba->sfp_warning & LPFC_TRANSGRESSION_HIGH_TEMPERATURE) 6674 flags |= RDP_OET_HIGH_WARNING; 6675 if (phba->sfp_warning & LPFC_TRANSGRESSION_LOW_TEMPERATURE) 6676 flags |= RDP_OET_LOW_WARNING; 6677 6678 flags |= ((0xf & RDP_OED_TEMPERATURE) << RDP_OED_TYPE_SHIFT); 6679 desc->oed_info.function_flags = cpu_to_be32(flags); 6680 desc->length = cpu_to_be32(sizeof(desc->oed_info)); 6681 return sizeof(struct fc_rdp_oed_sfp_desc); 6682} 6683 6684static uint32_t 6685lpfc_rdp_res_oed_voltage_desc(struct lpfc_hba *phba, 6686 struct fc_rdp_oed_sfp_desc *desc, 6687 uint8_t *page_a2) 6688{ 6689 uint32_t flags = 0; 6690 6691 desc->tag = cpu_to_be32(RDP_OED_DESC_TAG); 6692 6693 desc->oed_info.hi_alarm = page_a2[SSF_VOLTAGE_HIGH_ALARM]; 6694 desc->oed_info.lo_alarm = page_a2[SSF_VOLTAGE_LOW_ALARM]; 6695 desc->oed_info.hi_warning = page_a2[SSF_VOLTAGE_HIGH_WARNING]; 6696 desc->oed_info.lo_warning = page_a2[SSF_VOLTAGE_LOW_WARNING]; 6697 6698 if (phba->sfp_alarm & LPFC_TRANSGRESSION_HIGH_VOLTAGE) 6699 flags |= RDP_OET_HIGH_ALARM; 6700 if (phba->sfp_alarm & LPFC_TRANSGRESSION_LOW_VOLTAGE) 6701 flags |= RDP_OET_LOW_ALARM; 6702 if (phba->sfp_warning & LPFC_TRANSGRESSION_HIGH_VOLTAGE) 6703 flags |= RDP_OET_HIGH_WARNING; 6704 if (phba->sfp_warning & LPFC_TRANSGRESSION_LOW_VOLTAGE) 6705 flags |= RDP_OET_LOW_WARNING; 6706 6707 flags |= ((0xf & RDP_OED_VOLTAGE) << RDP_OED_TYPE_SHIFT); 6708 desc->oed_info.function_flags = cpu_to_be32(flags); 6709 desc->length = cpu_to_be32(sizeof(desc->oed_info)); 6710 return sizeof(struct fc_rdp_oed_sfp_desc); 6711} 6712 6713static uint32_t 6714lpfc_rdp_res_oed_txbias_desc(struct lpfc_hba *phba, 6715 struct fc_rdp_oed_sfp_desc *desc, 6716 uint8_t *page_a2) 6717{ 6718 uint32_t flags = 0; 6719 6720 desc->tag = cpu_to_be32(RDP_OED_DESC_TAG); 6721 6722 desc->oed_info.hi_alarm = page_a2[SSF_BIAS_HIGH_ALARM]; 6723 desc->oed_info.lo_alarm = page_a2[SSF_BIAS_LOW_ALARM]; 6724 desc->oed_info.hi_warning = page_a2[SSF_BIAS_HIGH_WARNING]; 6725 desc->oed_info.lo_warning = page_a2[SSF_BIAS_LOW_WARNING]; 6726 6727 if (phba->sfp_alarm & LPFC_TRANSGRESSION_HIGH_TXBIAS) 6728 flags |= RDP_OET_HIGH_ALARM; 6729 if (phba->sfp_alarm & LPFC_TRANSGRESSION_LOW_TXBIAS) 6730 flags |= RDP_OET_LOW_ALARM; 6731 if (phba->sfp_warning & LPFC_TRANSGRESSION_HIGH_TXBIAS) 6732 flags |= RDP_OET_HIGH_WARNING; 6733 if (phba->sfp_warning & LPFC_TRANSGRESSION_LOW_TXBIAS) 6734 flags |= RDP_OET_LOW_WARNING; 6735 6736 flags |= ((0xf & RDP_OED_TXBIAS) << RDP_OED_TYPE_SHIFT); 6737 desc->oed_info.function_flags = cpu_to_be32(flags); 6738 desc->length = cpu_to_be32(sizeof(desc->oed_info)); 6739 return sizeof(struct fc_rdp_oed_sfp_desc); 6740} 6741 6742static uint32_t 6743lpfc_rdp_res_oed_txpower_desc(struct lpfc_hba *phba, 6744 struct fc_rdp_oed_sfp_desc *desc, 6745 uint8_t *page_a2) 6746{ 6747 uint32_t flags = 0; 6748 6749 desc->tag = cpu_to_be32(RDP_OED_DESC_TAG); 6750 6751 desc->oed_info.hi_alarm = page_a2[SSF_TXPOWER_HIGH_ALARM]; 6752 desc->oed_info.lo_alarm = page_a2[SSF_TXPOWER_LOW_ALARM]; 6753 desc->oed_info.hi_warning = page_a2[SSF_TXPOWER_HIGH_WARNING]; 6754 desc->oed_info.lo_warning = page_a2[SSF_TXPOWER_LOW_WARNING]; 6755 6756 if (phba->sfp_alarm & LPFC_TRANSGRESSION_HIGH_TXPOWER) 6757 flags |= RDP_OET_HIGH_ALARM; 6758 if (phba->sfp_alarm & LPFC_TRANSGRESSION_LOW_TXPOWER) 6759 flags |= RDP_OET_LOW_ALARM; 6760 if (phba->sfp_warning & LPFC_TRANSGRESSION_HIGH_TXPOWER) 6761 flags |= RDP_OET_HIGH_WARNING; 6762 if (phba->sfp_warning & LPFC_TRANSGRESSION_LOW_TXPOWER) 6763 flags |= RDP_OET_LOW_WARNING; 6764 6765 flags |= ((0xf & RDP_OED_TXPOWER) << RDP_OED_TYPE_SHIFT); 6766 desc->oed_info.function_flags = cpu_to_be32(flags); 6767 desc->length = cpu_to_be32(sizeof(desc->oed_info)); 6768 return sizeof(struct fc_rdp_oed_sfp_desc); 6769} 6770 6771 6772static uint32_t 6773lpfc_rdp_res_oed_rxpower_desc(struct lpfc_hba *phba, 6774 struct fc_rdp_oed_sfp_desc *desc, 6775 uint8_t *page_a2) 6776{ 6777 uint32_t flags = 0; 6778 6779 desc->tag = cpu_to_be32(RDP_OED_DESC_TAG); 6780 6781 desc->oed_info.hi_alarm = page_a2[SSF_RXPOWER_HIGH_ALARM]; 6782 desc->oed_info.lo_alarm = page_a2[SSF_RXPOWER_LOW_ALARM]; 6783 desc->oed_info.hi_warning = page_a2[SSF_RXPOWER_HIGH_WARNING]; 6784 desc->oed_info.lo_warning = page_a2[SSF_RXPOWER_LOW_WARNING]; 6785 6786 if (phba->sfp_alarm & LPFC_TRANSGRESSION_HIGH_RXPOWER) 6787 flags |= RDP_OET_HIGH_ALARM; 6788 if (phba->sfp_alarm & LPFC_TRANSGRESSION_LOW_RXPOWER) 6789 flags |= RDP_OET_LOW_ALARM; 6790 if (phba->sfp_warning & LPFC_TRANSGRESSION_HIGH_RXPOWER) 6791 flags |= RDP_OET_HIGH_WARNING; 6792 if (phba->sfp_warning & LPFC_TRANSGRESSION_LOW_RXPOWER) 6793 flags |= RDP_OET_LOW_WARNING; 6794 6795 flags |= ((0xf & RDP_OED_RXPOWER) << RDP_OED_TYPE_SHIFT); 6796 desc->oed_info.function_flags = cpu_to_be32(flags); 6797 desc->length = cpu_to_be32(sizeof(desc->oed_info)); 6798 return sizeof(struct fc_rdp_oed_sfp_desc); 6799} 6800 6801static uint32_t 6802lpfc_rdp_res_opd_desc(struct fc_rdp_opd_sfp_desc *desc, 6803 uint8_t *page_a0, struct lpfc_vport *vport) 6804{ 6805 desc->tag = cpu_to_be32(RDP_OPD_DESC_TAG); 6806 memcpy(desc->opd_info.vendor_name, &page_a0[SSF_VENDOR_NAME], 16); 6807 memcpy(desc->opd_info.model_number, &page_a0[SSF_VENDOR_PN], 16); 6808 memcpy(desc->opd_info.serial_number, &page_a0[SSF_VENDOR_SN], 16); 6809 memcpy(desc->opd_info.revision, &page_a0[SSF_VENDOR_REV], 4); 6810 memcpy(desc->opd_info.date, &page_a0[SSF_DATE_CODE], 8); 6811 desc->length = cpu_to_be32(sizeof(desc->opd_info)); 6812 return sizeof(struct fc_rdp_opd_sfp_desc); 6813} 6814 6815static uint32_t 6816lpfc_rdp_res_fec_desc(struct fc_fec_rdp_desc *desc, READ_LNK_VAR *stat) 6817{ 6818 if (bf_get(lpfc_read_link_stat_gec2, stat) == 0) 6819 return 0; 6820 desc->tag = cpu_to_be32(RDP_FEC_DESC_TAG); 6821 6822 desc->info.CorrectedBlocks = 6823 cpu_to_be32(stat->fecCorrBlkCount); 6824 desc->info.UncorrectableBlocks = 6825 cpu_to_be32(stat->fecUncorrBlkCount); 6826 6827 desc->length = cpu_to_be32(sizeof(desc->info)); 6828 6829 return sizeof(struct fc_fec_rdp_desc); 6830} 6831 6832static uint32_t 6833lpfc_rdp_res_speed(struct fc_rdp_port_speed_desc *desc, struct lpfc_hba *phba) 6834{ 6835 uint16_t rdp_cap = 0; 6836 uint16_t rdp_speed; 6837 6838 desc->tag = cpu_to_be32(RDP_PORT_SPEED_DESC_TAG); 6839 6840 switch (phba->fc_linkspeed) { 6841 case LPFC_LINK_SPEED_1GHZ: 6842 rdp_speed = RDP_PS_1GB; 6843 break; 6844 case LPFC_LINK_SPEED_2GHZ: 6845 rdp_speed = RDP_PS_2GB; 6846 break; 6847 case LPFC_LINK_SPEED_4GHZ: 6848 rdp_speed = RDP_PS_4GB; 6849 break; 6850 case LPFC_LINK_SPEED_8GHZ: 6851 rdp_speed = RDP_PS_8GB; 6852 break; 6853 case LPFC_LINK_SPEED_10GHZ: 6854 rdp_speed = RDP_PS_10GB; 6855 break; 6856 case LPFC_LINK_SPEED_16GHZ: 6857 rdp_speed = RDP_PS_16GB; 6858 break; 6859 case LPFC_LINK_SPEED_32GHZ: 6860 rdp_speed = RDP_PS_32GB; 6861 break; 6862 case LPFC_LINK_SPEED_64GHZ: 6863 rdp_speed = RDP_PS_64GB; 6864 break; 6865 case LPFC_LINK_SPEED_128GHZ: 6866 rdp_speed = RDP_PS_128GB; 6867 break; 6868 case LPFC_LINK_SPEED_256GHZ: 6869 rdp_speed = RDP_PS_256GB; 6870 break; 6871 default: 6872 rdp_speed = RDP_PS_UNKNOWN; 6873 break; 6874 } 6875 6876 desc->info.port_speed.speed = cpu_to_be16(rdp_speed); 6877 6878 if (phba->lmt & LMT_256Gb) 6879 rdp_cap |= RDP_PS_256GB; 6880 if (phba->lmt & LMT_128Gb) 6881 rdp_cap |= RDP_PS_128GB; 6882 if (phba->lmt & LMT_64Gb) 6883 rdp_cap |= RDP_PS_64GB; 6884 if (phba->lmt & LMT_32Gb) 6885 rdp_cap |= RDP_PS_32GB; 6886 if (phba->lmt & LMT_16Gb) 6887 rdp_cap |= RDP_PS_16GB; 6888 if (phba->lmt & LMT_10Gb) 6889 rdp_cap |= RDP_PS_10GB; 6890 if (phba->lmt & LMT_8Gb) 6891 rdp_cap |= RDP_PS_8GB; 6892 if (phba->lmt & LMT_4Gb) 6893 rdp_cap |= RDP_PS_4GB; 6894 if (phba->lmt & LMT_2Gb) 6895 rdp_cap |= RDP_PS_2GB; 6896 if (phba->lmt & LMT_1Gb) 6897 rdp_cap |= RDP_PS_1GB; 6898 6899 if (rdp_cap == 0) 6900 rdp_cap = RDP_CAP_UNKNOWN; 6901 if (phba->cfg_link_speed != LPFC_USER_LINK_SPEED_AUTO) 6902 rdp_cap |= RDP_CAP_USER_CONFIGURED; 6903 6904 desc->info.port_speed.capabilities = cpu_to_be16(rdp_cap); 6905 desc->length = cpu_to_be32(sizeof(desc->info)); 6906 return sizeof(struct fc_rdp_port_speed_desc); 6907} 6908 6909static uint32_t 6910lpfc_rdp_res_diag_port_names(struct fc_rdp_port_name_desc *desc, 6911 struct lpfc_vport *vport) 6912{ 6913 6914 desc->tag = cpu_to_be32(RDP_PORT_NAMES_DESC_TAG); 6915 6916 memcpy(desc->port_names.wwnn, &vport->fc_nodename, 6917 sizeof(desc->port_names.wwnn)); 6918 6919 memcpy(desc->port_names.wwpn, &vport->fc_portname, 6920 sizeof(desc->port_names.wwpn)); 6921 6922 desc->length = cpu_to_be32(sizeof(desc->port_names)); 6923 return sizeof(struct fc_rdp_port_name_desc); 6924} 6925 6926static uint32_t 6927lpfc_rdp_res_attach_port_names(struct fc_rdp_port_name_desc *desc, 6928 struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) 6929{ 6930 6931 desc->tag = cpu_to_be32(RDP_PORT_NAMES_DESC_TAG); 6932 if (vport->fc_flag & FC_FABRIC) { 6933 memcpy(desc->port_names.wwnn, &vport->fabric_nodename, 6934 sizeof(desc->port_names.wwnn)); 6935 6936 memcpy(desc->port_names.wwpn, &vport->fabric_portname, 6937 sizeof(desc->port_names.wwpn)); 6938 } else { /* Point to Point */ 6939 memcpy(desc->port_names.wwnn, &ndlp->nlp_nodename, 6940 sizeof(desc->port_names.wwnn)); 6941 6942 memcpy(desc->port_names.wwpn, &ndlp->nlp_portname, 6943 sizeof(desc->port_names.wwpn)); 6944 } 6945 6946 desc->length = cpu_to_be32(sizeof(desc->port_names)); 6947 return sizeof(struct fc_rdp_port_name_desc); 6948} 6949 6950static void 6951lpfc_els_rdp_cmpl(struct lpfc_hba *phba, struct lpfc_rdp_context *rdp_context, 6952 int status) 6953{ 6954 struct lpfc_nodelist *ndlp = rdp_context->ndlp; 6955 struct lpfc_vport *vport = ndlp->vport; 6956 struct lpfc_iocbq *elsiocb; 6957 struct ulp_bde64 *bpl; 6958 IOCB_t *icmd; 6959 union lpfc_wqe128 *wqe; 6960 uint8_t *pcmd; 6961 struct ls_rjt *stat; 6962 struct fc_rdp_res_frame *rdp_res; 6963 uint32_t cmdsize, len; 6964 uint16_t *flag_ptr; 6965 int rc; 6966 u32 ulp_context; 6967 6968 if (status != SUCCESS) 6969 goto error; 6970 6971 /* This will change once we know the true size of the RDP payload */ 6972 cmdsize = sizeof(struct fc_rdp_res_frame); 6973 6974 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, 6975 lpfc_max_els_tries, rdp_context->ndlp, 6976 rdp_context->ndlp->nlp_DID, ELS_CMD_ACC); 6977 if (!elsiocb) 6978 goto free_rdp_context; 6979 6980 ulp_context = get_job_ulpcontext(phba, elsiocb); 6981 if (phba->sli_rev == LPFC_SLI_REV4) { 6982 wqe = &elsiocb->wqe; 6983 /* ox-id of the frame */ 6984 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, 6985 rdp_context->ox_id); 6986 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com, 6987 rdp_context->rx_id); 6988 } else { 6989 icmd = &elsiocb->iocb; 6990 icmd->ulpContext = rdp_context->rx_id; 6991 icmd->unsli3.rcvsli3.ox_id = rdp_context->ox_id; 6992 } 6993 6994 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 6995 "2171 Xmit RDP response tag x%x xri x%x, " 6996 "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x", 6997 elsiocb->iotag, ulp_context, 6998 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 6999 ndlp->nlp_rpi); 7000 rdp_res = (struct fc_rdp_res_frame *)elsiocb->cmd_dmabuf->virt; 7001 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; 7002 memset(pcmd, 0, sizeof(struct fc_rdp_res_frame)); 7003 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 7004 7005 /* Update Alarm and Warning */ 7006 flag_ptr = (uint16_t *)(rdp_context->page_a2 + SSF_ALARM_FLAGS); 7007 phba->sfp_alarm |= *flag_ptr; 7008 flag_ptr = (uint16_t *)(rdp_context->page_a2 + SSF_WARNING_FLAGS); 7009 phba->sfp_warning |= *flag_ptr; 7010 7011 /* For RDP payload */ 7012 len = 8; 7013 len += lpfc_rdp_res_link_service((struct fc_rdp_link_service_desc *) 7014 (len + pcmd), ELS_CMD_RDP); 7015 7016 len += lpfc_rdp_res_sfp_desc((struct fc_rdp_sfp_desc *)(len + pcmd), 7017 rdp_context->page_a0, rdp_context->page_a2); 7018 len += lpfc_rdp_res_speed((struct fc_rdp_port_speed_desc *)(len + pcmd), 7019 phba); 7020 len += lpfc_rdp_res_link_error((struct fc_rdp_link_error_status_desc *) 7021 (len + pcmd), &rdp_context->link_stat); 7022 len += lpfc_rdp_res_diag_port_names((struct fc_rdp_port_name_desc *) 7023 (len + pcmd), vport); 7024 len += lpfc_rdp_res_attach_port_names((struct fc_rdp_port_name_desc *) 7025 (len + pcmd), vport, ndlp); 7026 len += lpfc_rdp_res_fec_desc((struct fc_fec_rdp_desc *)(len + pcmd), 7027 &rdp_context->link_stat); 7028 len += lpfc_rdp_res_bbc_desc((struct fc_rdp_bbc_desc *)(len + pcmd), 7029 &rdp_context->link_stat, vport); 7030 len += lpfc_rdp_res_oed_temp_desc(phba, 7031 (struct fc_rdp_oed_sfp_desc *)(len + pcmd), 7032 rdp_context->page_a2); 7033 len += lpfc_rdp_res_oed_voltage_desc(phba, 7034 (struct fc_rdp_oed_sfp_desc *)(len + pcmd), 7035 rdp_context->page_a2); 7036 len += lpfc_rdp_res_oed_txbias_desc(phba, 7037 (struct fc_rdp_oed_sfp_desc *)(len + pcmd), 7038 rdp_context->page_a2); 7039 len += lpfc_rdp_res_oed_txpower_desc(phba, 7040 (struct fc_rdp_oed_sfp_desc *)(len + pcmd), 7041 rdp_context->page_a2); 7042 len += lpfc_rdp_res_oed_rxpower_desc(phba, 7043 (struct fc_rdp_oed_sfp_desc *)(len + pcmd), 7044 rdp_context->page_a2); 7045 len += lpfc_rdp_res_opd_desc((struct fc_rdp_opd_sfp_desc *)(len + pcmd), 7046 rdp_context->page_a0, vport); 7047 7048 rdp_res->length = cpu_to_be32(len - 8); 7049 elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp; 7050 7051 /* Now that we know the true size of the payload, update the BPL */ 7052 bpl = (struct ulp_bde64 *)elsiocb->bpl_dmabuf->virt; 7053 bpl->tus.f.bdeSize = len; 7054 bpl->tus.f.bdeFlags = 0; 7055 bpl->tus.w = le32_to_cpu(bpl->tus.w); 7056 7057 phba->fc_stat.elsXmitACC++; 7058 elsiocb->ndlp = lpfc_nlp_get(ndlp); 7059 if (!elsiocb->ndlp) { 7060 lpfc_els_free_iocb(phba, elsiocb); 7061 goto free_rdp_context; 7062 } 7063 7064 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 7065 if (rc == IOCB_ERROR) { 7066 lpfc_els_free_iocb(phba, elsiocb); 7067 lpfc_nlp_put(ndlp); 7068 } 7069 7070 goto free_rdp_context; 7071 7072error: 7073 cmdsize = 2 * sizeof(uint32_t); 7074 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, lpfc_max_els_tries, 7075 ndlp, ndlp->nlp_DID, ELS_CMD_LS_RJT); 7076 if (!elsiocb) 7077 goto free_rdp_context; 7078 7079 if (phba->sli_rev == LPFC_SLI_REV4) { 7080 wqe = &elsiocb->wqe; 7081 /* ox-id of the frame */ 7082 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, 7083 rdp_context->ox_id); 7084 bf_set(wqe_ctxt_tag, 7085 &wqe->xmit_els_rsp.wqe_com, 7086 rdp_context->rx_id); 7087 } else { 7088 icmd = &elsiocb->iocb; 7089 icmd->ulpContext = rdp_context->rx_id; 7090 icmd->unsli3.rcvsli3.ox_id = rdp_context->ox_id; 7091 } 7092 7093 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; 7094 7095 *((uint32_t *) (pcmd)) = ELS_CMD_LS_RJT; 7096 stat = (struct ls_rjt *)(pcmd + sizeof(uint32_t)); 7097 stat->un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 7098 7099 phba->fc_stat.elsXmitLSRJT++; 7100 elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp; 7101 elsiocb->ndlp = lpfc_nlp_get(ndlp); 7102 if (!elsiocb->ndlp) { 7103 lpfc_els_free_iocb(phba, elsiocb); 7104 goto free_rdp_context; 7105 } 7106 7107 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 7108 if (rc == IOCB_ERROR) { 7109 lpfc_els_free_iocb(phba, elsiocb); 7110 lpfc_nlp_put(ndlp); 7111 } 7112 7113free_rdp_context: 7114 /* This reference put is for the original unsolicited RDP. If the 7115 * prep failed, there is no reference to remove. 7116 */ 7117 lpfc_nlp_put(ndlp); 7118 kfree(rdp_context); 7119} 7120 7121static int 7122lpfc_get_rdp_info(struct lpfc_hba *phba, struct lpfc_rdp_context *rdp_context) 7123{ 7124 LPFC_MBOXQ_t *mbox = NULL; 7125 int rc; 7126 7127 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 7128 if (!mbox) { 7129 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_ELS, 7130 "7105 failed to allocate mailbox memory"); 7131 return 1; 7132 } 7133 7134 if (lpfc_sli4_dump_page_a0(phba, mbox)) 7135 goto rdp_fail; 7136 mbox->vport = rdp_context->ndlp->vport; 7137 mbox->mbox_cmpl = lpfc_mbx_cmpl_rdp_page_a0; 7138 mbox->ctx_ndlp = (struct lpfc_rdp_context *)rdp_context; 7139 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 7140 if (rc == MBX_NOT_FINISHED) { 7141 lpfc_mbox_rsrc_cleanup(phba, mbox, MBOX_THD_UNLOCKED); 7142 return 1; 7143 } 7144 7145 return 0; 7146 7147rdp_fail: 7148 mempool_free(mbox, phba->mbox_mem_pool); 7149 return 1; 7150} 7151 7152/* 7153 * lpfc_els_rcv_rdp - Process an unsolicited RDP ELS. 7154 * @vport: pointer to a host virtual N_Port data structure. 7155 * @cmdiocb: pointer to lpfc command iocb data structure. 7156 * @ndlp: pointer to a node-list data structure. 7157 * 7158 * This routine processes an unsolicited RDP(Read Diagnostic Parameters) 7159 * IOCB. First, the payload of the unsolicited RDP is checked. 7160 * Then it will (1) send MBX_DUMP_MEMORY, Embedded DMP_LMSD sub command TYPE-3 7161 * for Page A0, (2) send MBX_DUMP_MEMORY, DMP_LMSD for Page A2, 7162 * (3) send MBX_READ_LNK_STAT to get link stat, (4) Call lpfc_els_rdp_cmpl 7163 * gather all data and send RDP response. 7164 * 7165 * Return code 7166 * 0 - Sent the acc response 7167 * 1 - Sent the reject response. 7168 */ 7169static int 7170lpfc_els_rcv_rdp(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 7171 struct lpfc_nodelist *ndlp) 7172{ 7173 struct lpfc_hba *phba = vport->phba; 7174 struct lpfc_dmabuf *pcmd; 7175 uint8_t rjt_err, rjt_expl = LSEXP_NOTHING_MORE; 7176 struct fc_rdp_req_frame *rdp_req; 7177 struct lpfc_rdp_context *rdp_context; 7178 union lpfc_wqe128 *cmd = NULL; 7179 struct ls_rjt stat; 7180 7181 if (phba->sli_rev < LPFC_SLI_REV4 || 7182 bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) < 7183 LPFC_SLI_INTF_IF_TYPE_2) { 7184 rjt_err = LSRJT_UNABLE_TPC; 7185 rjt_expl = LSEXP_REQ_UNSUPPORTED; 7186 goto error; 7187 } 7188 7189 if (phba->sli_rev < LPFC_SLI_REV4 || (phba->hba_flag & HBA_FCOE_MODE)) { 7190 rjt_err = LSRJT_UNABLE_TPC; 7191 rjt_expl = LSEXP_REQ_UNSUPPORTED; 7192 goto error; 7193 } 7194 7195 pcmd = cmdiocb->cmd_dmabuf; 7196 rdp_req = (struct fc_rdp_req_frame *) pcmd->virt; 7197 7198 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 7199 "2422 ELS RDP Request " 7200 "dec len %d tag x%x port_id %d len %d\n", 7201 be32_to_cpu(rdp_req->rdp_des_length), 7202 be32_to_cpu(rdp_req->nport_id_desc.tag), 7203 be32_to_cpu(rdp_req->nport_id_desc.nport_id), 7204 be32_to_cpu(rdp_req->nport_id_desc.length)); 7205 7206 if (sizeof(struct fc_rdp_nport_desc) != 7207 be32_to_cpu(rdp_req->rdp_des_length)) 7208 goto rjt_logerr; 7209 if (RDP_N_PORT_DESC_TAG != be32_to_cpu(rdp_req->nport_id_desc.tag)) 7210 goto rjt_logerr; 7211 if (RDP_NPORT_ID_SIZE != 7212 be32_to_cpu(rdp_req->nport_id_desc.length)) 7213 goto rjt_logerr; 7214 rdp_context = kzalloc(sizeof(struct lpfc_rdp_context), GFP_KERNEL); 7215 if (!rdp_context) { 7216 rjt_err = LSRJT_UNABLE_TPC; 7217 goto error; 7218 } 7219 7220 cmd = &cmdiocb->wqe; 7221 rdp_context->ndlp = lpfc_nlp_get(ndlp); 7222 if (!rdp_context->ndlp) { 7223 kfree(rdp_context); 7224 rjt_err = LSRJT_UNABLE_TPC; 7225 goto error; 7226 } 7227 rdp_context->ox_id = bf_get(wqe_rcvoxid, 7228 &cmd->xmit_els_rsp.wqe_com); 7229 rdp_context->rx_id = bf_get(wqe_ctxt_tag, 7230 &cmd->xmit_els_rsp.wqe_com); 7231 rdp_context->cmpl = lpfc_els_rdp_cmpl; 7232 if (lpfc_get_rdp_info(phba, rdp_context)) { 7233 lpfc_printf_vlog(ndlp->vport, KERN_WARNING, LOG_ELS, 7234 "2423 Unable to send mailbox"); 7235 kfree(rdp_context); 7236 rjt_err = LSRJT_UNABLE_TPC; 7237 lpfc_nlp_put(ndlp); 7238 goto error; 7239 } 7240 7241 return 0; 7242 7243rjt_logerr: 7244 rjt_err = LSRJT_LOGICAL_ERR; 7245 7246error: 7247 memset(&stat, 0, sizeof(stat)); 7248 stat.un.b.lsRjtRsnCode = rjt_err; 7249 stat.un.b.lsRjtRsnCodeExp = rjt_expl; 7250 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); 7251 return 1; 7252} 7253 7254 7255static void 7256lpfc_els_lcb_rsp(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 7257{ 7258 MAILBOX_t *mb; 7259 IOCB_t *icmd; 7260 union lpfc_wqe128 *wqe; 7261 uint8_t *pcmd; 7262 struct lpfc_iocbq *elsiocb; 7263 struct lpfc_nodelist *ndlp; 7264 struct ls_rjt *stat; 7265 union lpfc_sli4_cfg_shdr *shdr; 7266 struct lpfc_lcb_context *lcb_context; 7267 struct fc_lcb_res_frame *lcb_res; 7268 uint32_t cmdsize, shdr_status, shdr_add_status; 7269 int rc; 7270 7271 mb = &pmb->u.mb; 7272 lcb_context = (struct lpfc_lcb_context *)pmb->ctx_ndlp; 7273 ndlp = lcb_context->ndlp; 7274 pmb->ctx_ndlp = NULL; 7275 pmb->ctx_buf = NULL; 7276 7277 shdr = (union lpfc_sli4_cfg_shdr *) 7278 &pmb->u.mqe.un.beacon_config.header.cfg_shdr; 7279 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 7280 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 7281 7282 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX, 7283 "0194 SET_BEACON_CONFIG mailbox " 7284 "completed with status x%x add_status x%x," 7285 " mbx status x%x\n", 7286 shdr_status, shdr_add_status, mb->mbxStatus); 7287 7288 if ((mb->mbxStatus != MBX_SUCCESS) || shdr_status || 7289 (shdr_add_status == ADD_STATUS_OPERATION_ALREADY_ACTIVE) || 7290 (shdr_add_status == ADD_STATUS_INVALID_REQUEST)) { 7291 mempool_free(pmb, phba->mbox_mem_pool); 7292 goto error; 7293 } 7294 7295 mempool_free(pmb, phba->mbox_mem_pool); 7296 cmdsize = sizeof(struct fc_lcb_res_frame); 7297 elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize, 7298 lpfc_max_els_tries, ndlp, 7299 ndlp->nlp_DID, ELS_CMD_ACC); 7300 7301 /* Decrement the ndlp reference count from previous mbox command */ 7302 lpfc_nlp_put(ndlp); 7303 7304 if (!elsiocb) 7305 goto free_lcb_context; 7306 7307 lcb_res = (struct fc_lcb_res_frame *)elsiocb->cmd_dmabuf->virt; 7308 7309 memset(lcb_res, 0, sizeof(struct fc_lcb_res_frame)); 7310 7311 if (phba->sli_rev == LPFC_SLI_REV4) { 7312 wqe = &elsiocb->wqe; 7313 bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com, lcb_context->rx_id); 7314 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, 7315 lcb_context->ox_id); 7316 } else { 7317 icmd = &elsiocb->iocb; 7318 icmd->ulpContext = lcb_context->rx_id; 7319 icmd->unsli3.rcvsli3.ox_id = lcb_context->ox_id; 7320 } 7321 7322 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; 7323 *((uint32_t *)(pcmd)) = ELS_CMD_ACC; 7324 lcb_res->lcb_sub_command = lcb_context->sub_command; 7325 lcb_res->lcb_type = lcb_context->type; 7326 lcb_res->capability = lcb_context->capability; 7327 lcb_res->lcb_frequency = lcb_context->frequency; 7328 lcb_res->lcb_duration = lcb_context->duration; 7329 elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp; 7330 phba->fc_stat.elsXmitACC++; 7331 7332 elsiocb->ndlp = lpfc_nlp_get(ndlp); 7333 if (!elsiocb->ndlp) { 7334 lpfc_els_free_iocb(phba, elsiocb); 7335 goto out; 7336 } 7337 7338 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 7339 if (rc == IOCB_ERROR) { 7340 lpfc_els_free_iocb(phba, elsiocb); 7341 lpfc_nlp_put(ndlp); 7342 } 7343 out: 7344 kfree(lcb_context); 7345 return; 7346 7347error: 7348 cmdsize = sizeof(struct fc_lcb_res_frame); 7349 elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize, 7350 lpfc_max_els_tries, ndlp, 7351 ndlp->nlp_DID, ELS_CMD_LS_RJT); 7352 lpfc_nlp_put(ndlp); 7353 if (!elsiocb) 7354 goto free_lcb_context; 7355 7356 if (phba->sli_rev == LPFC_SLI_REV4) { 7357 wqe = &elsiocb->wqe; 7358 bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com, lcb_context->rx_id); 7359 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, 7360 lcb_context->ox_id); 7361 } else { 7362 icmd = &elsiocb->iocb; 7363 icmd->ulpContext = lcb_context->rx_id; 7364 icmd->unsli3.rcvsli3.ox_id = lcb_context->ox_id; 7365 } 7366 7367 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; 7368 7369 *((uint32_t *)(pcmd)) = ELS_CMD_LS_RJT; 7370 stat = (struct ls_rjt *)(pcmd + sizeof(uint32_t)); 7371 stat->un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 7372 7373 if (shdr_add_status == ADD_STATUS_OPERATION_ALREADY_ACTIVE) 7374 stat->un.b.lsRjtRsnCodeExp = LSEXP_CMD_IN_PROGRESS; 7375 7376 elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp; 7377 phba->fc_stat.elsXmitLSRJT++; 7378 elsiocb->ndlp = lpfc_nlp_get(ndlp); 7379 if (!elsiocb->ndlp) { 7380 lpfc_els_free_iocb(phba, elsiocb); 7381 goto free_lcb_context; 7382 } 7383 7384 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 7385 if (rc == IOCB_ERROR) { 7386 lpfc_els_free_iocb(phba, elsiocb); 7387 lpfc_nlp_put(ndlp); 7388 } 7389free_lcb_context: 7390 kfree(lcb_context); 7391} 7392 7393static int 7394lpfc_sli4_set_beacon(struct lpfc_vport *vport, 7395 struct lpfc_lcb_context *lcb_context, 7396 uint32_t beacon_state) 7397{ 7398 struct lpfc_hba *phba = vport->phba; 7399 union lpfc_sli4_cfg_shdr *cfg_shdr; 7400 LPFC_MBOXQ_t *mbox = NULL; 7401 uint32_t len; 7402 int rc; 7403 7404 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 7405 if (!mbox) 7406 return 1; 7407 7408 cfg_shdr = &mbox->u.mqe.un.sli4_config.header.cfg_shdr; 7409 len = sizeof(struct lpfc_mbx_set_beacon_config) - 7410 sizeof(struct lpfc_sli4_cfg_mhdr); 7411 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 7412 LPFC_MBOX_OPCODE_SET_BEACON_CONFIG, len, 7413 LPFC_SLI4_MBX_EMBED); 7414 mbox->ctx_ndlp = (void *)lcb_context; 7415 mbox->vport = phba->pport; 7416 mbox->mbox_cmpl = lpfc_els_lcb_rsp; 7417 bf_set(lpfc_mbx_set_beacon_port_num, &mbox->u.mqe.un.beacon_config, 7418 phba->sli4_hba.physical_port); 7419 bf_set(lpfc_mbx_set_beacon_state, &mbox->u.mqe.un.beacon_config, 7420 beacon_state); 7421 mbox->u.mqe.un.beacon_config.word5 = 0; /* Reserved */ 7422 7423 /* 7424 * Check bv1s bit before issuing the mailbox 7425 * if bv1s == 1, LCB V1 supported 7426 * else, LCB V0 supported 7427 */ 7428 7429 if (phba->sli4_hba.pc_sli4_params.bv1s) { 7430 /* COMMON_SET_BEACON_CONFIG_V1 */ 7431 cfg_shdr->request.word9 = BEACON_VERSION_V1; 7432 lcb_context->capability |= LCB_CAPABILITY_DURATION; 7433 bf_set(lpfc_mbx_set_beacon_port_type, 7434 &mbox->u.mqe.un.beacon_config, 0); 7435 bf_set(lpfc_mbx_set_beacon_duration_v1, 7436 &mbox->u.mqe.un.beacon_config, 7437 be16_to_cpu(lcb_context->duration)); 7438 } else { 7439 /* COMMON_SET_BEACON_CONFIG_V0 */ 7440 if (be16_to_cpu(lcb_context->duration) != 0) { 7441 mempool_free(mbox, phba->mbox_mem_pool); 7442 return 1; 7443 } 7444 cfg_shdr->request.word9 = BEACON_VERSION_V0; 7445 lcb_context->capability &= ~(LCB_CAPABILITY_DURATION); 7446 bf_set(lpfc_mbx_set_beacon_state, 7447 &mbox->u.mqe.un.beacon_config, beacon_state); 7448 bf_set(lpfc_mbx_set_beacon_port_type, 7449 &mbox->u.mqe.un.beacon_config, 1); 7450 bf_set(lpfc_mbx_set_beacon_duration, 7451 &mbox->u.mqe.un.beacon_config, 7452 be16_to_cpu(lcb_context->duration)); 7453 } 7454 7455 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 7456 if (rc == MBX_NOT_FINISHED) { 7457 mempool_free(mbox, phba->mbox_mem_pool); 7458 return 1; 7459 } 7460 7461 return 0; 7462} 7463 7464 7465/** 7466 * lpfc_els_rcv_lcb - Process an unsolicited LCB 7467 * @vport: pointer to a host virtual N_Port data structure. 7468 * @cmdiocb: pointer to lpfc command iocb data structure. 7469 * @ndlp: pointer to a node-list data structure. 7470 * 7471 * This routine processes an unsolicited LCB(LINK CABLE BEACON) IOCB. 7472 * First, the payload of the unsolicited LCB is checked. 7473 * Then based on Subcommand beacon will either turn on or off. 7474 * 7475 * Return code 7476 * 0 - Sent the acc response 7477 * 1 - Sent the reject response. 7478 **/ 7479static int 7480lpfc_els_rcv_lcb(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 7481 struct lpfc_nodelist *ndlp) 7482{ 7483 struct lpfc_hba *phba = vport->phba; 7484 struct lpfc_dmabuf *pcmd; 7485 uint8_t *lp; 7486 struct fc_lcb_request_frame *beacon; 7487 struct lpfc_lcb_context *lcb_context; 7488 u8 state, rjt_err = 0; 7489 struct ls_rjt stat; 7490 7491 pcmd = cmdiocb->cmd_dmabuf; 7492 lp = (uint8_t *)pcmd->virt; 7493 beacon = (struct fc_lcb_request_frame *)pcmd->virt; 7494 7495 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 7496 "0192 ELS LCB Data x%x x%x x%x x%x sub x%x " 7497 "type x%x frequency %x duration x%x\n", 7498 lp[0], lp[1], lp[2], 7499 beacon->lcb_command, 7500 beacon->lcb_sub_command, 7501 beacon->lcb_type, 7502 beacon->lcb_frequency, 7503 be16_to_cpu(beacon->lcb_duration)); 7504 7505 if (beacon->lcb_sub_command != LPFC_LCB_ON && 7506 beacon->lcb_sub_command != LPFC_LCB_OFF) { 7507 rjt_err = LSRJT_CMD_UNSUPPORTED; 7508 goto rjt; 7509 } 7510 7511 if (phba->sli_rev < LPFC_SLI_REV4 || 7512 phba->hba_flag & HBA_FCOE_MODE || 7513 (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) < 7514 LPFC_SLI_INTF_IF_TYPE_2)) { 7515 rjt_err = LSRJT_CMD_UNSUPPORTED; 7516 goto rjt; 7517 } 7518 7519 lcb_context = kmalloc(sizeof(*lcb_context), GFP_KERNEL); 7520 if (!lcb_context) { 7521 rjt_err = LSRJT_UNABLE_TPC; 7522 goto rjt; 7523 } 7524 7525 state = (beacon->lcb_sub_command == LPFC_LCB_ON) ? 1 : 0; 7526 lcb_context->sub_command = beacon->lcb_sub_command; 7527 lcb_context->capability = 0; 7528 lcb_context->type = beacon->lcb_type; 7529 lcb_context->frequency = beacon->lcb_frequency; 7530 lcb_context->duration = beacon->lcb_duration; 7531 lcb_context->ox_id = get_job_rcvoxid(phba, cmdiocb); 7532 lcb_context->rx_id = get_job_ulpcontext(phba, cmdiocb); 7533 lcb_context->ndlp = lpfc_nlp_get(ndlp); 7534 if (!lcb_context->ndlp) { 7535 rjt_err = LSRJT_UNABLE_TPC; 7536 goto rjt_free; 7537 } 7538 7539 if (lpfc_sli4_set_beacon(vport, lcb_context, state)) { 7540 lpfc_printf_vlog(ndlp->vport, KERN_ERR, LOG_TRACE_EVENT, 7541 "0193 failed to send mail box"); 7542 lpfc_nlp_put(ndlp); 7543 rjt_err = LSRJT_UNABLE_TPC; 7544 goto rjt_free; 7545 } 7546 return 0; 7547 7548rjt_free: 7549 kfree(lcb_context); 7550rjt: 7551 memset(&stat, 0, sizeof(stat)); 7552 stat.un.b.lsRjtRsnCode = rjt_err; 7553 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); 7554 return 1; 7555} 7556 7557 7558/** 7559 * lpfc_els_flush_rscn - Clean up any rscn activities with a vport 7560 * @vport: pointer to a host virtual N_Port data structure. 7561 * 7562 * This routine cleans up any Registration State Change Notification 7563 * (RSCN) activity with a @vport. Note that the fc_rscn_flush flag of the 7564 * @vport together with the host_lock is used to prevent multiple thread 7565 * trying to access the RSCN array on a same @vport at the same time. 7566 **/ 7567void 7568lpfc_els_flush_rscn(struct lpfc_vport *vport) 7569{ 7570 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 7571 struct lpfc_hba *phba = vport->phba; 7572 int i; 7573 7574 spin_lock_irq(shost->host_lock); 7575 if (vport->fc_rscn_flush) { 7576 /* Another thread is walking fc_rscn_id_list on this vport */ 7577 spin_unlock_irq(shost->host_lock); 7578 return; 7579 } 7580 /* Indicate we are walking lpfc_els_flush_rscn on this vport */ 7581 vport->fc_rscn_flush = 1; 7582 spin_unlock_irq(shost->host_lock); 7583 7584 for (i = 0; i < vport->fc_rscn_id_cnt; i++) { 7585 lpfc_in_buf_free(phba, vport->fc_rscn_id_list[i]); 7586 vport->fc_rscn_id_list[i] = NULL; 7587 } 7588 spin_lock_irq(shost->host_lock); 7589 vport->fc_rscn_id_cnt = 0; 7590 vport->fc_flag &= ~(FC_RSCN_MODE | FC_RSCN_DISCOVERY); 7591 spin_unlock_irq(shost->host_lock); 7592 lpfc_can_disctmo(vport); 7593 /* Indicate we are done walking this fc_rscn_id_list */ 7594 vport->fc_rscn_flush = 0; 7595} 7596 7597/** 7598 * lpfc_rscn_payload_check - Check whether there is a pending rscn to a did 7599 * @vport: pointer to a host virtual N_Port data structure. 7600 * @did: remote destination port identifier. 7601 * 7602 * This routine checks whether there is any pending Registration State 7603 * Configuration Notification (RSCN) to a @did on @vport. 7604 * 7605 * Return code 7606 * None zero - The @did matched with a pending rscn 7607 * 0 - not able to match @did with a pending rscn 7608 **/ 7609int 7610lpfc_rscn_payload_check(struct lpfc_vport *vport, uint32_t did) 7611{ 7612 D_ID ns_did; 7613 D_ID rscn_did; 7614 uint32_t *lp; 7615 uint32_t payload_len, i; 7616 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 7617 7618 ns_did.un.word = did; 7619 7620 /* Never match fabric nodes for RSCNs */ 7621 if ((did & Fabric_DID_MASK) == Fabric_DID_MASK) 7622 return 0; 7623 7624 /* If we are doing a FULL RSCN rediscovery, match everything */ 7625 if (vport->fc_flag & FC_RSCN_DISCOVERY) 7626 return did; 7627 7628 spin_lock_irq(shost->host_lock); 7629 if (vport->fc_rscn_flush) { 7630 /* Another thread is walking fc_rscn_id_list on this vport */ 7631 spin_unlock_irq(shost->host_lock); 7632 return 0; 7633 } 7634 /* Indicate we are walking fc_rscn_id_list on this vport */ 7635 vport->fc_rscn_flush = 1; 7636 spin_unlock_irq(shost->host_lock); 7637 for (i = 0; i < vport->fc_rscn_id_cnt; i++) { 7638 lp = vport->fc_rscn_id_list[i]->virt; 7639 payload_len = be32_to_cpu(*lp++ & ~ELS_CMD_MASK); 7640 payload_len -= sizeof(uint32_t); /* take off word 0 */ 7641 while (payload_len) { 7642 rscn_did.un.word = be32_to_cpu(*lp++); 7643 payload_len -= sizeof(uint32_t); 7644 switch (rscn_did.un.b.resv & RSCN_ADDRESS_FORMAT_MASK) { 7645 case RSCN_ADDRESS_FORMAT_PORT: 7646 if ((ns_did.un.b.domain == rscn_did.un.b.domain) 7647 && (ns_did.un.b.area == rscn_did.un.b.area) 7648 && (ns_did.un.b.id == rscn_did.un.b.id)) 7649 goto return_did_out; 7650 break; 7651 case RSCN_ADDRESS_FORMAT_AREA: 7652 if ((ns_did.un.b.domain == rscn_did.un.b.domain) 7653 && (ns_did.un.b.area == rscn_did.un.b.area)) 7654 goto return_did_out; 7655 break; 7656 case RSCN_ADDRESS_FORMAT_DOMAIN: 7657 if (ns_did.un.b.domain == rscn_did.un.b.domain) 7658 goto return_did_out; 7659 break; 7660 case RSCN_ADDRESS_FORMAT_FABRIC: 7661 goto return_did_out; 7662 } 7663 } 7664 } 7665 /* Indicate we are done with walking fc_rscn_id_list on this vport */ 7666 vport->fc_rscn_flush = 0; 7667 return 0; 7668return_did_out: 7669 /* Indicate we are done with walking fc_rscn_id_list on this vport */ 7670 vport->fc_rscn_flush = 0; 7671 return did; 7672} 7673 7674/** 7675 * lpfc_rscn_recovery_check - Send recovery event to vport nodes matching rscn 7676 * @vport: pointer to a host virtual N_Port data structure. 7677 * 7678 * This routine sends recovery (NLP_EVT_DEVICE_RECOVERY) event to the 7679 * state machine for a @vport's nodes that are with pending RSCN (Registration 7680 * State Change Notification). 7681 * 7682 * Return code 7683 * 0 - Successful (currently alway return 0) 7684 **/ 7685static int 7686lpfc_rscn_recovery_check(struct lpfc_vport *vport) 7687{ 7688 struct lpfc_nodelist *ndlp = NULL, *n; 7689 7690 /* Move all affected nodes by pending RSCNs to NPR state. */ 7691 list_for_each_entry_safe(ndlp, n, &vport->fc_nodes, nlp_listp) { 7692 if ((ndlp->nlp_state == NLP_STE_UNUSED_NODE) || 7693 !lpfc_rscn_payload_check(vport, ndlp->nlp_DID)) 7694 continue; 7695 7696 /* NVME Target mode does not do RSCN Recovery. */ 7697 if (vport->phba->nvmet_support) 7698 continue; 7699 7700 /* If we are in the process of doing discovery on this 7701 * NPort, let it continue on its own. 7702 */ 7703 switch (ndlp->nlp_state) { 7704 case NLP_STE_PLOGI_ISSUE: 7705 case NLP_STE_ADISC_ISSUE: 7706 case NLP_STE_REG_LOGIN_ISSUE: 7707 case NLP_STE_PRLI_ISSUE: 7708 case NLP_STE_LOGO_ISSUE: 7709 continue; 7710 } 7711 7712 lpfc_disc_state_machine(vport, ndlp, NULL, 7713 NLP_EVT_DEVICE_RECOVERY); 7714 lpfc_cancel_retry_delay_tmo(vport, ndlp); 7715 } 7716 return 0; 7717} 7718 7719/** 7720 * lpfc_send_rscn_event - Send an RSCN event to management application 7721 * @vport: pointer to a host virtual N_Port data structure. 7722 * @cmdiocb: pointer to lpfc command iocb data structure. 7723 * 7724 * lpfc_send_rscn_event sends an RSCN netlink event to management 7725 * applications. 7726 */ 7727static void 7728lpfc_send_rscn_event(struct lpfc_vport *vport, 7729 struct lpfc_iocbq *cmdiocb) 7730{ 7731 struct lpfc_dmabuf *pcmd; 7732 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 7733 uint32_t *payload_ptr; 7734 uint32_t payload_len; 7735 struct lpfc_rscn_event_header *rscn_event_data; 7736 7737 pcmd = cmdiocb->cmd_dmabuf; 7738 payload_ptr = (uint32_t *) pcmd->virt; 7739 payload_len = be32_to_cpu(*payload_ptr & ~ELS_CMD_MASK); 7740 7741 rscn_event_data = kmalloc(sizeof(struct lpfc_rscn_event_header) + 7742 payload_len, GFP_KERNEL); 7743 if (!rscn_event_data) { 7744 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 7745 "0147 Failed to allocate memory for RSCN event\n"); 7746 return; 7747 } 7748 rscn_event_data->event_type = FC_REG_RSCN_EVENT; 7749 rscn_event_data->payload_length = payload_len; 7750 memcpy(rscn_event_data->rscn_payload, payload_ptr, 7751 payload_len); 7752 7753 fc_host_post_vendor_event(shost, 7754 fc_get_event_number(), 7755 sizeof(struct lpfc_rscn_event_header) + payload_len, 7756 (char *)rscn_event_data, 7757 LPFC_NL_VENDOR_ID); 7758 7759 kfree(rscn_event_data); 7760} 7761 7762/** 7763 * lpfc_els_rcv_rscn - Process an unsolicited rscn iocb 7764 * @vport: pointer to a host virtual N_Port data structure. 7765 * @cmdiocb: pointer to lpfc command iocb data structure. 7766 * @ndlp: pointer to a node-list data structure. 7767 * 7768 * This routine processes an unsolicited RSCN (Registration State Change 7769 * Notification) IOCB. First, the payload of the unsolicited RSCN is walked 7770 * to invoke fc_host_post_event() routine to the FC transport layer. If the 7771 * discover state machine is about to begin discovery, it just accepts the 7772 * RSCN and the discovery process will satisfy the RSCN. If this RSCN only 7773 * contains N_Port IDs for other vports on this HBA, it just accepts the 7774 * RSCN and ignore processing it. If the state machine is in the recovery 7775 * state, the fc_rscn_id_list of this @vport is walked and the 7776 * lpfc_rscn_recovery_check() routine is invoked to send recovery event for 7777 * all nodes that match RSCN payload. Otherwise, the lpfc_els_handle_rscn() 7778 * routine is invoked to handle the RSCN event. 7779 * 7780 * Return code 7781 * 0 - Just sent the acc response 7782 * 1 - Sent the acc response and waited for name server completion 7783 **/ 7784static int 7785lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 7786 struct lpfc_nodelist *ndlp) 7787{ 7788 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 7789 struct lpfc_hba *phba = vport->phba; 7790 struct lpfc_dmabuf *pcmd; 7791 uint32_t *lp, *datap; 7792 uint32_t payload_len, length, nportid, *cmd; 7793 int rscn_cnt; 7794 int rscn_id = 0, hba_id = 0; 7795 int i, tmo; 7796 7797 pcmd = cmdiocb->cmd_dmabuf; 7798 lp = (uint32_t *) pcmd->virt; 7799 7800 payload_len = be32_to_cpu(*lp++ & ~ELS_CMD_MASK); 7801 payload_len -= sizeof(uint32_t); /* take off word 0 */ 7802 /* RSCN received */ 7803 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 7804 "0214 RSCN received Data: x%x x%x x%x x%x\n", 7805 vport->fc_flag, payload_len, *lp, 7806 vport->fc_rscn_id_cnt); 7807 7808 /* Send an RSCN event to the management application */ 7809 lpfc_send_rscn_event(vport, cmdiocb); 7810 7811 for (i = 0; i < payload_len/sizeof(uint32_t); i++) 7812 fc_host_post_event(shost, fc_get_event_number(), 7813 FCH_EVT_RSCN, lp[i]); 7814 7815 /* Check if RSCN is coming from a direct-connected remote NPort */ 7816 if (vport->fc_flag & FC_PT2PT) { 7817 /* If so, just ACC it, no other action needed for now */ 7818 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 7819 "2024 pt2pt RSCN %08x Data: x%x x%x\n", 7820 *lp, vport->fc_flag, payload_len); 7821 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 7822 7823 /* Check to see if we need to NVME rescan this target 7824 * remoteport. 7825 */ 7826 if (ndlp->nlp_fc4_type & NLP_FC4_NVME && 7827 ndlp->nlp_type & (NLP_NVME_TARGET | NLP_NVME_DISCOVERY)) 7828 lpfc_nvme_rescan_port(vport, ndlp); 7829 return 0; 7830 } 7831 7832 /* If we are about to begin discovery, just ACC the RSCN. 7833 * Discovery processing will satisfy it. 7834 */ 7835 if (vport->port_state <= LPFC_NS_QRY) { 7836 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 7837 "RCV RSCN ignore: did:x%x/ste:x%x flg:x%x", 7838 ndlp->nlp_DID, vport->port_state, ndlp->nlp_flag); 7839 7840 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 7841 return 0; 7842 } 7843 7844 /* If this RSCN just contains NPortIDs for other vports on this HBA, 7845 * just ACC and ignore it. 7846 */ 7847 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && 7848 !(vport->cfg_peer_port_login)) { 7849 i = payload_len; 7850 datap = lp; 7851 while (i > 0) { 7852 nportid = *datap++; 7853 nportid = ((be32_to_cpu(nportid)) & Mask_DID); 7854 i -= sizeof(uint32_t); 7855 rscn_id++; 7856 if (lpfc_find_vport_by_did(phba, nportid)) 7857 hba_id++; 7858 } 7859 if (rscn_id == hba_id) { 7860 /* ALL NPortIDs in RSCN are on HBA */ 7861 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 7862 "0219 Ignore RSCN " 7863 "Data: x%x x%x x%x x%x\n", 7864 vport->fc_flag, payload_len, 7865 *lp, vport->fc_rscn_id_cnt); 7866 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 7867 "RCV RSCN vport: did:x%x/ste:x%x flg:x%x", 7868 ndlp->nlp_DID, vport->port_state, 7869 ndlp->nlp_flag); 7870 7871 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, 7872 ndlp, NULL); 7873 /* Restart disctmo if its already running */ 7874 if (vport->fc_flag & FC_DISC_TMO) { 7875 tmo = ((phba->fc_ratov * 3) + 3); 7876 mod_timer(&vport->fc_disctmo, 7877 jiffies + 7878 msecs_to_jiffies(1000 * tmo)); 7879 } 7880 return 0; 7881 } 7882 } 7883 7884 spin_lock_irq(shost->host_lock); 7885 if (vport->fc_rscn_flush) { 7886 /* Another thread is walking fc_rscn_id_list on this vport */ 7887 vport->fc_flag |= FC_RSCN_DISCOVERY; 7888 spin_unlock_irq(shost->host_lock); 7889 /* Send back ACC */ 7890 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 7891 return 0; 7892 } 7893 /* Indicate we are walking fc_rscn_id_list on this vport */ 7894 vport->fc_rscn_flush = 1; 7895 spin_unlock_irq(shost->host_lock); 7896 /* Get the array count after successfully have the token */ 7897 rscn_cnt = vport->fc_rscn_id_cnt; 7898 /* If we are already processing an RSCN, save the received 7899 * RSCN payload buffer, cmdiocb->cmd_dmabuf to process later. 7900 */ 7901 if (vport->fc_flag & (FC_RSCN_MODE | FC_NDISC_ACTIVE)) { 7902 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 7903 "RCV RSCN defer: did:x%x/ste:x%x flg:x%x", 7904 ndlp->nlp_DID, vport->port_state, ndlp->nlp_flag); 7905 7906 spin_lock_irq(shost->host_lock); 7907 vport->fc_flag |= FC_RSCN_DEFERRED; 7908 7909 /* Restart disctmo if its already running */ 7910 if (vport->fc_flag & FC_DISC_TMO) { 7911 tmo = ((phba->fc_ratov * 3) + 3); 7912 mod_timer(&vport->fc_disctmo, 7913 jiffies + msecs_to_jiffies(1000 * tmo)); 7914 } 7915 if ((rscn_cnt < FC_MAX_HOLD_RSCN) && 7916 !(vport->fc_flag & FC_RSCN_DISCOVERY)) { 7917 vport->fc_flag |= FC_RSCN_MODE; 7918 vport->fc_flag &= ~FC_RSCN_MEMENTO; 7919 spin_unlock_irq(shost->host_lock); 7920 if (rscn_cnt) { 7921 cmd = vport->fc_rscn_id_list[rscn_cnt-1]->virt; 7922 length = be32_to_cpu(*cmd & ~ELS_CMD_MASK); 7923 } 7924 if ((rscn_cnt) && 7925 (payload_len + length <= LPFC_BPL_SIZE)) { 7926 *cmd &= ELS_CMD_MASK; 7927 *cmd |= cpu_to_be32(payload_len + length); 7928 memcpy(((uint8_t *)cmd) + length, lp, 7929 payload_len); 7930 } else { 7931 vport->fc_rscn_id_list[rscn_cnt] = pcmd; 7932 vport->fc_rscn_id_cnt++; 7933 /* If we zero, cmdiocb->cmd_dmabuf, the calling 7934 * routine will not try to free it. 7935 */ 7936 cmdiocb->cmd_dmabuf = NULL; 7937 } 7938 /* Deferred RSCN */ 7939 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 7940 "0235 Deferred RSCN " 7941 "Data: x%x x%x x%x\n", 7942 vport->fc_rscn_id_cnt, vport->fc_flag, 7943 vport->port_state); 7944 } else { 7945 vport->fc_flag |= FC_RSCN_DISCOVERY; 7946 spin_unlock_irq(shost->host_lock); 7947 /* ReDiscovery RSCN */ 7948 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 7949 "0234 ReDiscovery RSCN " 7950 "Data: x%x x%x x%x\n", 7951 vport->fc_rscn_id_cnt, vport->fc_flag, 7952 vport->port_state); 7953 } 7954 /* Indicate we are done walking fc_rscn_id_list on this vport */ 7955 vport->fc_rscn_flush = 0; 7956 /* Send back ACC */ 7957 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 7958 /* send RECOVERY event for ALL nodes that match RSCN payload */ 7959 lpfc_rscn_recovery_check(vport); 7960 return 0; 7961 } 7962 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 7963 "RCV RSCN: did:x%x/ste:x%x flg:x%x", 7964 ndlp->nlp_DID, vport->port_state, ndlp->nlp_flag); 7965 7966 spin_lock_irq(shost->host_lock); 7967 vport->fc_flag |= FC_RSCN_MODE; 7968 vport->fc_flag &= ~FC_RSCN_MEMENTO; 7969 spin_unlock_irq(shost->host_lock); 7970 vport->fc_rscn_id_list[vport->fc_rscn_id_cnt++] = pcmd; 7971 /* Indicate we are done walking fc_rscn_id_list on this vport */ 7972 vport->fc_rscn_flush = 0; 7973 /* 7974 * If we zero, cmdiocb->cmd_dmabuf, the calling routine will 7975 * not try to free it. 7976 */ 7977 cmdiocb->cmd_dmabuf = NULL; 7978 lpfc_set_disctmo(vport); 7979 /* Send back ACC */ 7980 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 7981 /* send RECOVERY event for ALL nodes that match RSCN payload */ 7982 lpfc_rscn_recovery_check(vport); 7983 return lpfc_els_handle_rscn(vport); 7984} 7985 7986/** 7987 * lpfc_els_handle_rscn - Handle rscn for a vport 7988 * @vport: pointer to a host virtual N_Port data structure. 7989 * 7990 * This routine handles the Registration State Configuration Notification 7991 * (RSCN) for a @vport. If login to NameServer does not exist, a new ndlp shall 7992 * be created and a Port Login (PLOGI) to the NameServer is issued. Otherwise, 7993 * if the ndlp to NameServer exists, a Common Transport (CT) command to the 7994 * NameServer shall be issued. If CT command to the NameServer fails to be 7995 * issued, the lpfc_els_flush_rscn() routine shall be invoked to clean up any 7996 * RSCN activities with the @vport. 7997 * 7998 * Return code 7999 * 0 - Cleaned up rscn on the @vport 8000 * 1 - Wait for plogi to name server before proceed 8001 **/ 8002int 8003lpfc_els_handle_rscn(struct lpfc_vport *vport) 8004{ 8005 struct lpfc_nodelist *ndlp; 8006 struct lpfc_hba *phba = vport->phba; 8007 8008 /* Ignore RSCN if the port is being torn down. */ 8009 if (vport->load_flag & FC_UNLOADING) { 8010 lpfc_els_flush_rscn(vport); 8011 return 0; 8012 } 8013 8014 /* Start timer for RSCN processing */ 8015 lpfc_set_disctmo(vport); 8016 8017 /* RSCN processed */ 8018 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 8019 "0215 RSCN processed Data: x%x x%x x%x x%x x%x x%x\n", 8020 vport->fc_flag, 0, vport->fc_rscn_id_cnt, 8021 vport->port_state, vport->num_disc_nodes, 8022 vport->gidft_inp); 8023 8024 /* To process RSCN, first compare RSCN data with NameServer */ 8025 vport->fc_ns_retry = 0; 8026 vport->num_disc_nodes = 0; 8027 8028 ndlp = lpfc_findnode_did(vport, NameServer_DID); 8029 if (ndlp && ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) { 8030 /* Good ndlp, issue CT Request to NameServer. Need to 8031 * know how many gidfts were issued. If none, then just 8032 * flush the RSCN. Otherwise, the outstanding requests 8033 * need to complete. 8034 */ 8035 if (phba->cfg_ns_query == LPFC_NS_QUERY_GID_FT) { 8036 if (lpfc_issue_gidft(vport) > 0) 8037 return 1; 8038 } else if (phba->cfg_ns_query == LPFC_NS_QUERY_GID_PT) { 8039 if (lpfc_issue_gidpt(vport) > 0) 8040 return 1; 8041 } else { 8042 return 1; 8043 } 8044 } else { 8045 /* Nameserver login in question. Revalidate. */ 8046 if (ndlp) { 8047 ndlp->nlp_prev_state = NLP_STE_UNUSED_NODE; 8048 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); 8049 } else { 8050 ndlp = lpfc_nlp_init(vport, NameServer_DID); 8051 if (!ndlp) { 8052 lpfc_els_flush_rscn(vport); 8053 return 0; 8054 } 8055 ndlp->nlp_prev_state = ndlp->nlp_state; 8056 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); 8057 } 8058 ndlp->nlp_type |= NLP_FABRIC; 8059 lpfc_issue_els_plogi(vport, NameServer_DID, 0); 8060 /* Wait for NameServer login cmpl before we can 8061 * continue 8062 */ 8063 return 1; 8064 } 8065 8066 lpfc_els_flush_rscn(vport); 8067 return 0; 8068} 8069 8070/** 8071 * lpfc_els_rcv_flogi - Process an unsolicited flogi iocb 8072 * @vport: pointer to a host virtual N_Port data structure. 8073 * @cmdiocb: pointer to lpfc command iocb data structure. 8074 * @ndlp: pointer to a node-list data structure. 8075 * 8076 * This routine processes Fabric Login (FLOGI) IOCB received as an ELS 8077 * unsolicited event. An unsolicited FLOGI can be received in a point-to- 8078 * point topology. As an unsolicited FLOGI should not be received in a loop 8079 * mode, any unsolicited FLOGI received in loop mode shall be ignored. The 8080 * lpfc_check_sparm() routine is invoked to check the parameters in the 8081 * unsolicited FLOGI. If parameters validation failed, the routine 8082 * lpfc_els_rsp_reject() shall be called with reject reason code set to 8083 * LSEXP_SPARM_OPTIONS to reject the FLOGI. Otherwise, the Port WWN in the 8084 * FLOGI shall be compared with the Port WWN of the @vport to determine who 8085 * will initiate PLOGI. The higher lexicographical value party shall has 8086 * higher priority (as the winning port) and will initiate PLOGI and 8087 * communicate Port_IDs (Addresses) for both nodes in PLOGI. The result 8088 * of this will be marked in the @vport fc_flag field with FC_PT2PT_PLOGI 8089 * and then the lpfc_els_rsp_acc() routine is invoked to accept the FLOGI. 8090 * 8091 * Return code 8092 * 0 - Successfully processed the unsolicited flogi 8093 * 1 - Failed to process the unsolicited flogi 8094 **/ 8095static int 8096lpfc_els_rcv_flogi(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 8097 struct lpfc_nodelist *ndlp) 8098{ 8099 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 8100 struct lpfc_hba *phba = vport->phba; 8101 struct lpfc_dmabuf *pcmd = cmdiocb->cmd_dmabuf; 8102 uint32_t *lp = (uint32_t *) pcmd->virt; 8103 union lpfc_wqe128 *wqe = &cmdiocb->wqe; 8104 struct serv_parm *sp; 8105 LPFC_MBOXQ_t *mbox; 8106 uint32_t cmd, did; 8107 int rc; 8108 uint32_t fc_flag = 0; 8109 uint32_t port_state = 0; 8110 8111 /* Clear external loopback plug detected flag */ 8112 phba->link_flag &= ~LS_EXTERNAL_LOOPBACK; 8113 8114 cmd = *lp++; 8115 sp = (struct serv_parm *) lp; 8116 8117 /* FLOGI received */ 8118 8119 lpfc_set_disctmo(vport); 8120 8121 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) { 8122 /* We should never receive a FLOGI in loop mode, ignore it */ 8123 did = bf_get(wqe_els_did, &wqe->xmit_els_rsp.wqe_dest); 8124 8125 /* An FLOGI ELS command <elsCmd> was received from DID <did> in 8126 Loop Mode */ 8127 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 8128 "0113 An FLOGI ELS command x%x was " 8129 "received from DID x%x in Loop Mode\n", 8130 cmd, did); 8131 return 1; 8132 } 8133 8134 (void) lpfc_check_sparm(vport, ndlp, sp, CLASS3, 1); 8135 8136 /* 8137 * If our portname is greater than the remote portname, 8138 * then we initiate Nport login. 8139 */ 8140 8141 rc = memcmp(&vport->fc_portname, &sp->portName, 8142 sizeof(struct lpfc_name)); 8143 8144 if (!rc) { 8145 if (phba->sli_rev < LPFC_SLI_REV4) { 8146 mbox = mempool_alloc(phba->mbox_mem_pool, 8147 GFP_KERNEL); 8148 if (!mbox) 8149 return 1; 8150 lpfc_linkdown(phba); 8151 lpfc_init_link(phba, mbox, 8152 phba->cfg_topology, 8153 phba->cfg_link_speed); 8154 mbox->u.mb.un.varInitLnk.lipsr_AL_PA = 0; 8155 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 8156 mbox->vport = vport; 8157 rc = lpfc_sli_issue_mbox(phba, mbox, 8158 MBX_NOWAIT); 8159 lpfc_set_loopback_flag(phba); 8160 if (rc == MBX_NOT_FINISHED) 8161 mempool_free(mbox, phba->mbox_mem_pool); 8162 return 1; 8163 } 8164 8165 /* External loopback plug insertion detected */ 8166 phba->link_flag |= LS_EXTERNAL_LOOPBACK; 8167 8168 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_LIBDFC, 8169 "1119 External Loopback plug detected\n"); 8170 8171 /* abort the flogi coming back to ourselves 8172 * due to external loopback on the port. 8173 */ 8174 lpfc_els_abort_flogi(phba); 8175 return 0; 8176 8177 } else if (rc > 0) { /* greater than */ 8178 spin_lock_irq(shost->host_lock); 8179 vport->fc_flag |= FC_PT2PT_PLOGI; 8180 spin_unlock_irq(shost->host_lock); 8181 8182 /* If we have the high WWPN we can assign our own 8183 * myDID; otherwise, we have to WAIT for a PLOGI 8184 * from the remote NPort to find out what it 8185 * will be. 8186 */ 8187 vport->fc_myDID = PT2PT_LocalID; 8188 } else { 8189 vport->fc_myDID = PT2PT_RemoteID; 8190 } 8191 8192 /* 8193 * The vport state should go to LPFC_FLOGI only 8194 * AFTER we issue a FLOGI, not receive one. 8195 */ 8196 spin_lock_irq(shost->host_lock); 8197 fc_flag = vport->fc_flag; 8198 port_state = vport->port_state; 8199 vport->fc_flag |= FC_PT2PT; 8200 vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP); 8201 8202 /* Acking an unsol FLOGI. Count 1 for link bounce 8203 * work-around. 8204 */ 8205 vport->rcv_flogi_cnt++; 8206 spin_unlock_irq(shost->host_lock); 8207 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 8208 "3311 Rcv Flogi PS x%x new PS x%x " 8209 "fc_flag x%x new fc_flag x%x\n", 8210 port_state, vport->port_state, 8211 fc_flag, vport->fc_flag); 8212 8213 /* 8214 * We temporarily set fc_myDID to make it look like we are 8215 * a Fabric. This is done just so we end up with the right 8216 * did / sid on the FLOGI ACC rsp. 8217 */ 8218 did = vport->fc_myDID; 8219 vport->fc_myDID = Fabric_DID; 8220 8221 memcpy(&phba->fc_fabparam, sp, sizeof(struct serv_parm)); 8222 8223 /* Defer ACC response until AFTER we issue a FLOGI */ 8224 if (!(phba->hba_flag & HBA_FLOGI_ISSUED)) { 8225 phba->defer_flogi_acc_rx_id = bf_get(wqe_ctxt_tag, 8226 &wqe->xmit_els_rsp.wqe_com); 8227 phba->defer_flogi_acc_ox_id = bf_get(wqe_rcvoxid, 8228 &wqe->xmit_els_rsp.wqe_com); 8229 8230 vport->fc_myDID = did; 8231 8232 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 8233 "3344 Deferring FLOGI ACC: rx_id: x%x," 8234 " ox_id: x%x, hba_flag x%x\n", 8235 phba->defer_flogi_acc_rx_id, 8236 phba->defer_flogi_acc_ox_id, phba->hba_flag); 8237 8238 phba->defer_flogi_acc_flag = true; 8239 8240 return 0; 8241 } 8242 8243 /* Send back ACC */ 8244 lpfc_els_rsp_acc(vport, ELS_CMD_FLOGI, cmdiocb, ndlp, NULL); 8245 8246 /* Now lets put fc_myDID back to what its supposed to be */ 8247 vport->fc_myDID = did; 8248 8249 return 0; 8250} 8251 8252/** 8253 * lpfc_els_rcv_rnid - Process an unsolicited rnid iocb 8254 * @vport: pointer to a host virtual N_Port data structure. 8255 * @cmdiocb: pointer to lpfc command iocb data structure. 8256 * @ndlp: pointer to a node-list data structure. 8257 * 8258 * This routine processes Request Node Identification Data (RNID) IOCB 8259 * received as an ELS unsolicited event. Only when the RNID specified format 8260 * 0x0 or 0xDF (Topology Discovery Specific Node Identification Data) 8261 * present, this routine will invoke the lpfc_els_rsp_rnid_acc() routine to 8262 * Accept (ACC) the RNID ELS command. All the other RNID formats are 8263 * rejected by invoking the lpfc_els_rsp_reject() routine. 8264 * 8265 * Return code 8266 * 0 - Successfully processed rnid iocb (currently always return 0) 8267 **/ 8268static int 8269lpfc_els_rcv_rnid(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 8270 struct lpfc_nodelist *ndlp) 8271{ 8272 struct lpfc_dmabuf *pcmd; 8273 uint32_t *lp; 8274 RNID *rn; 8275 struct ls_rjt stat; 8276 8277 pcmd = cmdiocb->cmd_dmabuf; 8278 lp = (uint32_t *) pcmd->virt; 8279 8280 lp++; 8281 rn = (RNID *) lp; 8282 8283 /* RNID received */ 8284 8285 switch (rn->Format) { 8286 case 0: 8287 case RNID_TOPOLOGY_DISC: 8288 /* Send back ACC */ 8289 lpfc_els_rsp_rnid_acc(vport, rn->Format, cmdiocb, ndlp); 8290 break; 8291 default: 8292 /* Reject this request because format not supported */ 8293 stat.un.b.lsRjtRsvd0 = 0; 8294 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 8295 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA; 8296 stat.un.b.vendorUnique = 0; 8297 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, 8298 NULL); 8299 } 8300 return 0; 8301} 8302 8303/** 8304 * lpfc_els_rcv_echo - Process an unsolicited echo iocb 8305 * @vport: pointer to a host virtual N_Port data structure. 8306 * @cmdiocb: pointer to lpfc command iocb data structure. 8307 * @ndlp: pointer to a node-list data structure. 8308 * 8309 * Return code 8310 * 0 - Successfully processed echo iocb (currently always return 0) 8311 **/ 8312static int 8313lpfc_els_rcv_echo(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 8314 struct lpfc_nodelist *ndlp) 8315{ 8316 uint8_t *pcmd; 8317 8318 pcmd = (uint8_t *)cmdiocb->cmd_dmabuf->virt; 8319 8320 /* skip over first word of echo command to find echo data */ 8321 pcmd += sizeof(uint32_t); 8322 8323 lpfc_els_rsp_echo_acc(vport, pcmd, cmdiocb, ndlp); 8324 return 0; 8325} 8326 8327/** 8328 * lpfc_els_rcv_lirr - Process an unsolicited lirr iocb 8329 * @vport: pointer to a host virtual N_Port data structure. 8330 * @cmdiocb: pointer to lpfc command iocb data structure. 8331 * @ndlp: pointer to a node-list data structure. 8332 * 8333 * This routine processes a Link Incident Report Registration(LIRR) IOCB 8334 * received as an ELS unsolicited event. Currently, this function just invokes 8335 * the lpfc_els_rsp_reject() routine to reject the LIRR IOCB unconditionally. 8336 * 8337 * Return code 8338 * 0 - Successfully processed lirr iocb (currently always return 0) 8339 **/ 8340static int 8341lpfc_els_rcv_lirr(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 8342 struct lpfc_nodelist *ndlp) 8343{ 8344 struct ls_rjt stat; 8345 8346 /* For now, unconditionally reject this command */ 8347 stat.un.b.lsRjtRsvd0 = 0; 8348 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 8349 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA; 8350 stat.un.b.vendorUnique = 0; 8351 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); 8352 return 0; 8353} 8354 8355/** 8356 * lpfc_els_rcv_rrq - Process an unsolicited rrq iocb 8357 * @vport: pointer to a host virtual N_Port data structure. 8358 * @cmdiocb: pointer to lpfc command iocb data structure. 8359 * @ndlp: pointer to a node-list data structure. 8360 * 8361 * This routine processes a Reinstate Recovery Qualifier (RRQ) IOCB 8362 * received as an ELS unsolicited event. A request to RRQ shall only 8363 * be accepted if the Originator Nx_Port N_Port_ID or the Responder 8364 * Nx_Port N_Port_ID of the target Exchange is the same as the 8365 * N_Port_ID of the Nx_Port that makes the request. If the RRQ is 8366 * not accepted, an LS_RJT with reason code "Unable to perform 8367 * command request" and reason code explanation "Invalid Originator 8368 * S_ID" shall be returned. For now, we just unconditionally accept 8369 * RRQ from the target. 8370 **/ 8371static void 8372lpfc_els_rcv_rrq(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 8373 struct lpfc_nodelist *ndlp) 8374{ 8375 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 8376 if (vport->phba->sli_rev == LPFC_SLI_REV4) 8377 lpfc_els_clear_rrq(vport, cmdiocb, ndlp); 8378} 8379 8380/** 8381 * lpfc_els_rsp_rls_acc - Completion callbk func for MBX_READ_LNK_STAT mbox cmd 8382 * @phba: pointer to lpfc hba data structure. 8383 * @pmb: pointer to the driver internal queue element for mailbox command. 8384 * 8385 * This routine is the completion callback function for the MBX_READ_LNK_STAT 8386 * mailbox command. This callback function is to actually send the Accept 8387 * (ACC) response to a Read Link Status (RLS) unsolicited IOCB event. It 8388 * collects the link statistics from the completion of the MBX_READ_LNK_STAT 8389 * mailbox command, constructs the RLS response with the link statistics 8390 * collected, and then invokes the lpfc_sli_issue_iocb() routine to send ACC 8391 * response to the RLS. 8392 * 8393 * Note that the ndlp reference count will be incremented by 1 for holding the 8394 * ndlp and the reference to ndlp will be stored into the ndlp field of 8395 * the IOCB for the completion callback function to the RLS Accept Response 8396 * ELS IOCB command. 8397 * 8398 **/ 8399static void 8400lpfc_els_rsp_rls_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 8401{ 8402 int rc = 0; 8403 MAILBOX_t *mb; 8404 IOCB_t *icmd; 8405 union lpfc_wqe128 *wqe; 8406 struct RLS_RSP *rls_rsp; 8407 uint8_t *pcmd; 8408 struct lpfc_iocbq *elsiocb; 8409 struct lpfc_nodelist *ndlp; 8410 uint16_t oxid; 8411 uint16_t rxid; 8412 uint32_t cmdsize; 8413 u32 ulp_context; 8414 8415 mb = &pmb->u.mb; 8416 8417 ndlp = pmb->ctx_ndlp; 8418 rxid = (uint16_t)((unsigned long)(pmb->ctx_buf) & 0xffff); 8419 oxid = (uint16_t)(((unsigned long)(pmb->ctx_buf) >> 16) & 0xffff); 8420 pmb->ctx_buf = NULL; 8421 pmb->ctx_ndlp = NULL; 8422 8423 if (mb->mbxStatus) { 8424 mempool_free(pmb, phba->mbox_mem_pool); 8425 return; 8426 } 8427 8428 cmdsize = sizeof(struct RLS_RSP) + sizeof(uint32_t); 8429 elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize, 8430 lpfc_max_els_tries, ndlp, 8431 ndlp->nlp_DID, ELS_CMD_ACC); 8432 8433 /* Decrement the ndlp reference count from previous mbox command */ 8434 lpfc_nlp_put(ndlp); 8435 8436 if (!elsiocb) { 8437 mempool_free(pmb, phba->mbox_mem_pool); 8438 return; 8439 } 8440 8441 ulp_context = get_job_ulpcontext(phba, elsiocb); 8442 if (phba->sli_rev == LPFC_SLI_REV4) { 8443 wqe = &elsiocb->wqe; 8444 /* Xri / rx_id */ 8445 bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com, rxid); 8446 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, oxid); 8447 } else { 8448 icmd = &elsiocb->iocb; 8449 icmd->ulpContext = rxid; 8450 icmd->unsli3.rcvsli3.ox_id = oxid; 8451 } 8452 8453 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; 8454 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 8455 pcmd += sizeof(uint32_t); /* Skip past command */ 8456 rls_rsp = (struct RLS_RSP *)pcmd; 8457 8458 rls_rsp->linkFailureCnt = cpu_to_be32(mb->un.varRdLnk.linkFailureCnt); 8459 rls_rsp->lossSyncCnt = cpu_to_be32(mb->un.varRdLnk.lossSyncCnt); 8460 rls_rsp->lossSignalCnt = cpu_to_be32(mb->un.varRdLnk.lossSignalCnt); 8461 rls_rsp->primSeqErrCnt = cpu_to_be32(mb->un.varRdLnk.primSeqErrCnt); 8462 rls_rsp->invalidXmitWord = cpu_to_be32(mb->un.varRdLnk.invalidXmitWord); 8463 rls_rsp->crcCnt = cpu_to_be32(mb->un.varRdLnk.crcCnt); 8464 mempool_free(pmb, phba->mbox_mem_pool); 8465 /* Xmit ELS RLS ACC response tag <ulpIoTag> */ 8466 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_ELS, 8467 "2874 Xmit ELS RLS ACC response tag x%x xri x%x, " 8468 "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x\n", 8469 elsiocb->iotag, ulp_context, 8470 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 8471 ndlp->nlp_rpi); 8472 elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp; 8473 phba->fc_stat.elsXmitACC++; 8474 elsiocb->ndlp = lpfc_nlp_get(ndlp); 8475 if (!elsiocb->ndlp) { 8476 lpfc_els_free_iocb(phba, elsiocb); 8477 return; 8478 } 8479 8480 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 8481 if (rc == IOCB_ERROR) { 8482 lpfc_els_free_iocb(phba, elsiocb); 8483 lpfc_nlp_put(ndlp); 8484 } 8485 return; 8486} 8487 8488/** 8489 * lpfc_els_rcv_rls - Process an unsolicited rls iocb 8490 * @vport: pointer to a host virtual N_Port data structure. 8491 * @cmdiocb: pointer to lpfc command iocb data structure. 8492 * @ndlp: pointer to a node-list data structure. 8493 * 8494 * This routine processes Read Link Status (RLS) IOCB received as an 8495 * ELS unsolicited event. It first checks the remote port state. If the 8496 * remote port is not in NLP_STE_UNMAPPED_NODE state or NLP_STE_MAPPED_NODE 8497 * state, it invokes the lpfc_els_rsl_reject() routine to send the reject 8498 * response. Otherwise, it issue the MBX_READ_LNK_STAT mailbox command 8499 * for reading the HBA link statistics. It is for the callback function, 8500 * lpfc_els_rsp_rls_acc(), set to the MBX_READ_LNK_STAT mailbox command 8501 * to actually sending out RPL Accept (ACC) response. 8502 * 8503 * Return codes 8504 * 0 - Successfully processed rls iocb (currently always return 0) 8505 **/ 8506static int 8507lpfc_els_rcv_rls(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 8508 struct lpfc_nodelist *ndlp) 8509{ 8510 struct lpfc_hba *phba = vport->phba; 8511 LPFC_MBOXQ_t *mbox; 8512 struct ls_rjt stat; 8513 u32 ctx = get_job_ulpcontext(phba, cmdiocb); 8514 u32 ox_id = get_job_rcvoxid(phba, cmdiocb); 8515 8516 if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) && 8517 (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) 8518 /* reject the unsolicited RLS request and done with it */ 8519 goto reject_out; 8520 8521 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_ATOMIC); 8522 if (mbox) { 8523 lpfc_read_lnk_stat(phba, mbox); 8524 mbox->ctx_buf = (void *)((unsigned long) 8525 (ox_id << 16 | ctx)); 8526 mbox->ctx_ndlp = lpfc_nlp_get(ndlp); 8527 if (!mbox->ctx_ndlp) 8528 goto node_err; 8529 mbox->vport = vport; 8530 mbox->mbox_cmpl = lpfc_els_rsp_rls_acc; 8531 if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) 8532 != MBX_NOT_FINISHED) 8533 /* Mbox completion will send ELS Response */ 8534 return 0; 8535 /* Decrement reference count used for the failed mbox 8536 * command. 8537 */ 8538 lpfc_nlp_put(ndlp); 8539node_err: 8540 mempool_free(mbox, phba->mbox_mem_pool); 8541 } 8542reject_out: 8543 /* issue rejection response */ 8544 stat.un.b.lsRjtRsvd0 = 0; 8545 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 8546 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA; 8547 stat.un.b.vendorUnique = 0; 8548 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); 8549 return 0; 8550} 8551 8552/** 8553 * lpfc_els_rcv_rtv - Process an unsolicited rtv iocb 8554 * @vport: pointer to a host virtual N_Port data structure. 8555 * @cmdiocb: pointer to lpfc command iocb data structure. 8556 * @ndlp: pointer to a node-list data structure. 8557 * 8558 * This routine processes Read Timout Value (RTV) IOCB received as an 8559 * ELS unsolicited event. It first checks the remote port state. If the 8560 * remote port is not in NLP_STE_UNMAPPED_NODE state or NLP_STE_MAPPED_NODE 8561 * state, it invokes the lpfc_els_rsl_reject() routine to send the reject 8562 * response. Otherwise, it sends the Accept(ACC) response to a Read Timeout 8563 * Value (RTV) unsolicited IOCB event. 8564 * 8565 * Note that the ndlp reference count will be incremented by 1 for holding the 8566 * ndlp and the reference to ndlp will be stored into the ndlp field of 8567 * the IOCB for the completion callback function to the RTV Accept Response 8568 * ELS IOCB command. 8569 * 8570 * Return codes 8571 * 0 - Successfully processed rtv iocb (currently always return 0) 8572 **/ 8573static int 8574lpfc_els_rcv_rtv(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 8575 struct lpfc_nodelist *ndlp) 8576{ 8577 int rc = 0; 8578 IOCB_t *icmd; 8579 union lpfc_wqe128 *wqe; 8580 struct lpfc_hba *phba = vport->phba; 8581 struct ls_rjt stat; 8582 struct RTV_RSP *rtv_rsp; 8583 uint8_t *pcmd; 8584 struct lpfc_iocbq *elsiocb; 8585 uint32_t cmdsize; 8586 u32 ulp_context; 8587 8588 if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) && 8589 (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) 8590 /* reject the unsolicited RTV request and done with it */ 8591 goto reject_out; 8592 8593 cmdsize = sizeof(struct RTV_RSP) + sizeof(uint32_t); 8594 elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize, 8595 lpfc_max_els_tries, ndlp, 8596 ndlp->nlp_DID, ELS_CMD_ACC); 8597 8598 if (!elsiocb) 8599 return 1; 8600 8601 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; 8602 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 8603 pcmd += sizeof(uint32_t); /* Skip past command */ 8604 8605 ulp_context = get_job_ulpcontext(phba, elsiocb); 8606 /* use the command's xri in the response */ 8607 if (phba->sli_rev == LPFC_SLI_REV4) { 8608 wqe = &elsiocb->wqe; 8609 bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com, 8610 get_job_ulpcontext(phba, cmdiocb)); 8611 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, 8612 get_job_rcvoxid(phba, cmdiocb)); 8613 } else { 8614 icmd = &elsiocb->iocb; 8615 icmd->ulpContext = get_job_ulpcontext(phba, cmdiocb); 8616 icmd->unsli3.rcvsli3.ox_id = get_job_rcvoxid(phba, cmdiocb); 8617 } 8618 8619 rtv_rsp = (struct RTV_RSP *)pcmd; 8620 8621 /* populate RTV payload */ 8622 rtv_rsp->ratov = cpu_to_be32(phba->fc_ratov * 1000); /* report msecs */ 8623 rtv_rsp->edtov = cpu_to_be32(phba->fc_edtov); 8624 bf_set(qtov_edtovres, rtv_rsp, phba->fc_edtovResol ? 1 : 0); 8625 bf_set(qtov_rttov, rtv_rsp, 0); /* Field is for FC ONLY */ 8626 rtv_rsp->qtov = cpu_to_be32(rtv_rsp->qtov); 8627 8628 /* Xmit ELS RLS ACC response tag <ulpIoTag> */ 8629 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_ELS, 8630 "2875 Xmit ELS RTV ACC response tag x%x xri x%x, " 8631 "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x, " 8632 "Data: x%x x%x x%x\n", 8633 elsiocb->iotag, ulp_context, 8634 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 8635 ndlp->nlp_rpi, 8636 rtv_rsp->ratov, rtv_rsp->edtov, rtv_rsp->qtov); 8637 elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp; 8638 phba->fc_stat.elsXmitACC++; 8639 elsiocb->ndlp = lpfc_nlp_get(ndlp); 8640 if (!elsiocb->ndlp) { 8641 lpfc_els_free_iocb(phba, elsiocb); 8642 return 0; 8643 } 8644 8645 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 8646 if (rc == IOCB_ERROR) { 8647 lpfc_els_free_iocb(phba, elsiocb); 8648 lpfc_nlp_put(ndlp); 8649 } 8650 return 0; 8651 8652reject_out: 8653 /* issue rejection response */ 8654 stat.un.b.lsRjtRsvd0 = 0; 8655 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 8656 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA; 8657 stat.un.b.vendorUnique = 0; 8658 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); 8659 return 0; 8660} 8661 8662/* lpfc_issue_els_rrq - Process an unsolicited rrq iocb 8663 * @vport: pointer to a host virtual N_Port data structure. 8664 * @ndlp: pointer to a node-list data structure. 8665 * @did: DID of the target. 8666 * @rrq: Pointer to the rrq struct. 8667 * 8668 * Build a ELS RRQ command and send it to the target. If the issue_iocb is 8669 * Successful the the completion handler will clear the RRQ. 8670 * 8671 * Return codes 8672 * 0 - Successfully sent rrq els iocb. 8673 * 1 - Failed to send rrq els iocb. 8674 **/ 8675static int 8676lpfc_issue_els_rrq(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 8677 uint32_t did, struct lpfc_node_rrq *rrq) 8678{ 8679 struct lpfc_hba *phba = vport->phba; 8680 struct RRQ *els_rrq; 8681 struct lpfc_iocbq *elsiocb; 8682 uint8_t *pcmd; 8683 uint16_t cmdsize; 8684 int ret; 8685 8686 if (!ndlp) 8687 return 1; 8688 8689 /* If ndlp is not NULL, we will bump the reference count on it */ 8690 cmdsize = (sizeof(uint32_t) + sizeof(struct RRQ)); 8691 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, 0, ndlp, did, 8692 ELS_CMD_RRQ); 8693 if (!elsiocb) 8694 return 1; 8695 8696 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; 8697 8698 /* For RRQ request, remainder of payload is Exchange IDs */ 8699 *((uint32_t *) (pcmd)) = ELS_CMD_RRQ; 8700 pcmd += sizeof(uint32_t); 8701 els_rrq = (struct RRQ *) pcmd; 8702 8703 bf_set(rrq_oxid, els_rrq, phba->sli4_hba.xri_ids[rrq->xritag]); 8704 bf_set(rrq_rxid, els_rrq, rrq->rxid); 8705 bf_set(rrq_did, els_rrq, vport->fc_myDID); 8706 els_rrq->rrq = cpu_to_be32(els_rrq->rrq); 8707 els_rrq->rrq_exchg = cpu_to_be32(els_rrq->rrq_exchg); 8708 8709 8710 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 8711 "Issue RRQ: did:x%x", 8712 did, rrq->xritag, rrq->rxid); 8713 elsiocb->context_un.rrq = rrq; 8714 elsiocb->cmd_cmpl = lpfc_cmpl_els_rrq; 8715 8716 elsiocb->ndlp = lpfc_nlp_get(ndlp); 8717 if (!elsiocb->ndlp) 8718 goto io_err; 8719 8720 ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 8721 if (ret == IOCB_ERROR) { 8722 lpfc_nlp_put(ndlp); 8723 goto io_err; 8724 } 8725 return 0; 8726 8727 io_err: 8728 lpfc_els_free_iocb(phba, elsiocb); 8729 return 1; 8730} 8731 8732/** 8733 * lpfc_send_rrq - Sends ELS RRQ if needed. 8734 * @phba: pointer to lpfc hba data structure. 8735 * @rrq: pointer to the active rrq. 8736 * 8737 * This routine will call the lpfc_issue_els_rrq if the rrq is 8738 * still active for the xri. If this function returns a failure then 8739 * the caller needs to clean up the RRQ by calling lpfc_clr_active_rrq. 8740 * 8741 * Returns 0 Success. 8742 * 1 Failure. 8743 **/ 8744int 8745lpfc_send_rrq(struct lpfc_hba *phba, struct lpfc_node_rrq *rrq) 8746{ 8747 struct lpfc_nodelist *ndlp = lpfc_findnode_did(rrq->vport, 8748 rrq->nlp_DID); 8749 if (!ndlp) 8750 return 1; 8751 8752 if (lpfc_test_rrq_active(phba, ndlp, rrq->xritag)) 8753 return lpfc_issue_els_rrq(rrq->vport, ndlp, 8754 rrq->nlp_DID, rrq); 8755 else 8756 return 1; 8757} 8758 8759/** 8760 * lpfc_els_rsp_rpl_acc - Issue an accept rpl els command 8761 * @vport: pointer to a host virtual N_Port data structure. 8762 * @cmdsize: size of the ELS command. 8763 * @oldiocb: pointer to the original lpfc command iocb data structure. 8764 * @ndlp: pointer to a node-list data structure. 8765 * 8766 * This routine issuees an Accept (ACC) Read Port List (RPL) ELS command. 8767 * It is to be called by the lpfc_els_rcv_rpl() routine to accept the RPL. 8768 * 8769 * Note that the ndlp reference count will be incremented by 1 for holding the 8770 * ndlp and the reference to ndlp will be stored into the ndlp field of 8771 * the IOCB for the completion callback function to the RPL Accept Response 8772 * ELS command. 8773 * 8774 * Return code 8775 * 0 - Successfully issued ACC RPL ELS command 8776 * 1 - Failed to issue ACC RPL ELS command 8777 **/ 8778static int 8779lpfc_els_rsp_rpl_acc(struct lpfc_vport *vport, uint16_t cmdsize, 8780 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp) 8781{ 8782 int rc = 0; 8783 struct lpfc_hba *phba = vport->phba; 8784 IOCB_t *icmd; 8785 union lpfc_wqe128 *wqe; 8786 RPL_RSP rpl_rsp; 8787 struct lpfc_iocbq *elsiocb; 8788 uint8_t *pcmd; 8789 u32 ulp_context; 8790 8791 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, 8792 ndlp->nlp_DID, ELS_CMD_ACC); 8793 8794 if (!elsiocb) 8795 return 1; 8796 8797 ulp_context = get_job_ulpcontext(phba, elsiocb); 8798 if (phba->sli_rev == LPFC_SLI_REV4) { 8799 wqe = &elsiocb->wqe; 8800 /* Xri / rx_id */ 8801 bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com, 8802 get_job_ulpcontext(phba, oldiocb)); 8803 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, 8804 get_job_rcvoxid(phba, oldiocb)); 8805 } else { 8806 icmd = &elsiocb->iocb; 8807 icmd->ulpContext = get_job_ulpcontext(phba, oldiocb); 8808 icmd->unsli3.rcvsli3.ox_id = get_job_rcvoxid(phba, oldiocb); 8809 } 8810 8811 pcmd = elsiocb->cmd_dmabuf->virt; 8812 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 8813 pcmd += sizeof(uint16_t); 8814 *((uint16_t *)(pcmd)) = be16_to_cpu(cmdsize); 8815 pcmd += sizeof(uint16_t); 8816 8817 /* Setup the RPL ACC payload */ 8818 rpl_rsp.listLen = be32_to_cpu(1); 8819 rpl_rsp.index = 0; 8820 rpl_rsp.port_num_blk.portNum = 0; 8821 rpl_rsp.port_num_blk.portID = be32_to_cpu(vport->fc_myDID); 8822 memcpy(&rpl_rsp.port_num_blk.portName, &vport->fc_portname, 8823 sizeof(struct lpfc_name)); 8824 memcpy(pcmd, &rpl_rsp, cmdsize - sizeof(uint32_t)); 8825 /* Xmit ELS RPL ACC response tag <ulpIoTag> */ 8826 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 8827 "0120 Xmit ELS RPL ACC response tag x%x " 8828 "xri x%x, did x%x, nlp_flag x%x, nlp_state x%x, " 8829 "rpi x%x\n", 8830 elsiocb->iotag, ulp_context, 8831 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 8832 ndlp->nlp_rpi); 8833 elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp; 8834 phba->fc_stat.elsXmitACC++; 8835 elsiocb->ndlp = lpfc_nlp_get(ndlp); 8836 if (!elsiocb->ndlp) { 8837 lpfc_els_free_iocb(phba, elsiocb); 8838 return 1; 8839 } 8840 8841 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 8842 if (rc == IOCB_ERROR) { 8843 lpfc_els_free_iocb(phba, elsiocb); 8844 lpfc_nlp_put(ndlp); 8845 return 1; 8846 } 8847 8848 return 0; 8849} 8850 8851/** 8852 * lpfc_els_rcv_rpl - Process an unsolicited rpl iocb 8853 * @vport: pointer to a host virtual N_Port data structure. 8854 * @cmdiocb: pointer to lpfc command iocb data structure. 8855 * @ndlp: pointer to a node-list data structure. 8856 * 8857 * This routine processes Read Port List (RPL) IOCB received as an ELS 8858 * unsolicited event. It first checks the remote port state. If the remote 8859 * port is not in NLP_STE_UNMAPPED_NODE and NLP_STE_MAPPED_NODE states, it 8860 * invokes the lpfc_els_rsp_reject() routine to send reject response. 8861 * Otherwise, this routine then invokes the lpfc_els_rsp_rpl_acc() routine 8862 * to accept the RPL. 8863 * 8864 * Return code 8865 * 0 - Successfully processed rpl iocb (currently always return 0) 8866 **/ 8867static int 8868lpfc_els_rcv_rpl(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 8869 struct lpfc_nodelist *ndlp) 8870{ 8871 struct lpfc_dmabuf *pcmd; 8872 uint32_t *lp; 8873 uint32_t maxsize; 8874 uint16_t cmdsize; 8875 RPL *rpl; 8876 struct ls_rjt stat; 8877 8878 if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) && 8879 (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) { 8880 /* issue rejection response */ 8881 stat.un.b.lsRjtRsvd0 = 0; 8882 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 8883 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA; 8884 stat.un.b.vendorUnique = 0; 8885 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, 8886 NULL); 8887 /* rejected the unsolicited RPL request and done with it */ 8888 return 0; 8889 } 8890 8891 pcmd = cmdiocb->cmd_dmabuf; 8892 lp = (uint32_t *) pcmd->virt; 8893 rpl = (RPL *) (lp + 1); 8894 maxsize = be32_to_cpu(rpl->maxsize); 8895 8896 /* We support only one port */ 8897 if ((rpl->index == 0) && 8898 ((maxsize == 0) || 8899 ((maxsize * sizeof(uint32_t)) >= sizeof(RPL_RSP)))) { 8900 cmdsize = sizeof(uint32_t) + sizeof(RPL_RSP); 8901 } else { 8902 cmdsize = sizeof(uint32_t) + maxsize * sizeof(uint32_t); 8903 } 8904 lpfc_els_rsp_rpl_acc(vport, cmdsize, cmdiocb, ndlp); 8905 8906 return 0; 8907} 8908 8909/** 8910 * lpfc_els_rcv_farp - Process an unsolicited farp request els command 8911 * @vport: pointer to a virtual N_Port data structure. 8912 * @cmdiocb: pointer to lpfc command iocb data structure. 8913 * @ndlp: pointer to a node-list data structure. 8914 * 8915 * This routine processes Fibre Channel Address Resolution Protocol 8916 * (FARP) Request IOCB received as an ELS unsolicited event. Currently, 8917 * the lpfc driver only supports matching on WWPN or WWNN for FARP. As such, 8918 * FARP_MATCH_PORT flag and FARP_MATCH_NODE flag are checked against the 8919 * Match Flag in the FARP request IOCB: if FARP_MATCH_PORT flag is set, the 8920 * remote PortName is compared against the FC PortName stored in the @vport 8921 * data structure; if FARP_MATCH_NODE flag is set, the remote NodeName is 8922 * compared against the FC NodeName stored in the @vport data structure. 8923 * If any of these matches and the FARP_REQUEST_FARPR flag is set in the 8924 * FARP request IOCB Response Flag, the lpfc_issue_els_farpr() routine is 8925 * invoked to send out FARP Response to the remote node. Before sending the 8926 * FARP Response, however, the FARP_REQUEST_PLOGI flag is check in the FARP 8927 * request IOCB Response Flag and, if it is set, the lpfc_issue_els_plogi() 8928 * routine is invoked to log into the remote port first. 8929 * 8930 * Return code 8931 * 0 - Either the FARP Match Mode not supported or successfully processed 8932 **/ 8933static int 8934lpfc_els_rcv_farp(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 8935 struct lpfc_nodelist *ndlp) 8936{ 8937 struct lpfc_dmabuf *pcmd; 8938 uint32_t *lp; 8939 FARP *fp; 8940 uint32_t cnt, did; 8941 8942 did = get_job_els_rsp64_did(vport->phba, cmdiocb); 8943 pcmd = cmdiocb->cmd_dmabuf; 8944 lp = (uint32_t *) pcmd->virt; 8945 8946 lp++; 8947 fp = (FARP *) lp; 8948 /* FARP-REQ received from DID <did> */ 8949 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 8950 "0601 FARP-REQ received from DID x%x\n", did); 8951 /* We will only support match on WWPN or WWNN */ 8952 if (fp->Mflags & ~(FARP_MATCH_NODE | FARP_MATCH_PORT)) { 8953 return 0; 8954 } 8955 8956 cnt = 0; 8957 /* If this FARP command is searching for my portname */ 8958 if (fp->Mflags & FARP_MATCH_PORT) { 8959 if (memcmp(&fp->RportName, &vport->fc_portname, 8960 sizeof(struct lpfc_name)) == 0) 8961 cnt = 1; 8962 } 8963 8964 /* If this FARP command is searching for my nodename */ 8965 if (fp->Mflags & FARP_MATCH_NODE) { 8966 if (memcmp(&fp->RnodeName, &vport->fc_nodename, 8967 sizeof(struct lpfc_name)) == 0) 8968 cnt = 1; 8969 } 8970 8971 if (cnt) { 8972 if ((ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) || 8973 (ndlp->nlp_state == NLP_STE_MAPPED_NODE)) { 8974 /* Log back into the node before sending the FARP. */ 8975 if (fp->Rflags & FARP_REQUEST_PLOGI) { 8976 ndlp->nlp_prev_state = ndlp->nlp_state; 8977 lpfc_nlp_set_state(vport, ndlp, 8978 NLP_STE_PLOGI_ISSUE); 8979 lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0); 8980 } 8981 8982 /* Send a FARP response to that node */ 8983 if (fp->Rflags & FARP_REQUEST_FARPR) 8984 lpfc_issue_els_farpr(vport, did, 0); 8985 } 8986 } 8987 return 0; 8988} 8989 8990/** 8991 * lpfc_els_rcv_farpr - Process an unsolicited farp response iocb 8992 * @vport: pointer to a host virtual N_Port data structure. 8993 * @cmdiocb: pointer to lpfc command iocb data structure. 8994 * @ndlp: pointer to a node-list data structure. 8995 * 8996 * This routine processes Fibre Channel Address Resolution Protocol 8997 * Response (FARPR) IOCB received as an ELS unsolicited event. It simply 8998 * invokes the lpfc_els_rsp_acc() routine to the remote node to accept 8999 * the FARP response request. 9000 * 9001 * Return code 9002 * 0 - Successfully processed FARPR IOCB (currently always return 0) 9003 **/ 9004static int 9005lpfc_els_rcv_farpr(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 9006 struct lpfc_nodelist *ndlp) 9007{ 9008 struct lpfc_dmabuf *pcmd; 9009 uint32_t *lp; 9010 uint32_t did; 9011 9012 did = get_job_els_rsp64_did(vport->phba, cmdiocb); 9013 pcmd = cmdiocb->cmd_dmabuf; 9014 lp = (uint32_t *)pcmd->virt; 9015 9016 lp++; 9017 /* FARP-RSP received from DID <did> */ 9018 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 9019 "0600 FARP-RSP received from DID x%x\n", did); 9020 /* ACCEPT the Farp resp request */ 9021 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 9022 9023 return 0; 9024} 9025 9026/** 9027 * lpfc_els_rcv_fan - Process an unsolicited fan iocb command 9028 * @vport: pointer to a host virtual N_Port data structure. 9029 * @cmdiocb: pointer to lpfc command iocb data structure. 9030 * @fan_ndlp: pointer to a node-list data structure. 9031 * 9032 * This routine processes a Fabric Address Notification (FAN) IOCB 9033 * command received as an ELS unsolicited event. The FAN ELS command will 9034 * only be processed on a physical port (i.e., the @vport represents the 9035 * physical port). The fabric NodeName and PortName from the FAN IOCB are 9036 * compared against those in the phba data structure. If any of those is 9037 * different, the lpfc_initial_flogi() routine is invoked to initialize 9038 * Fabric Login (FLOGI) to the fabric to start the discover over. Otherwise, 9039 * if both of those are identical, the lpfc_issue_fabric_reglogin() routine 9040 * is invoked to register login to the fabric. 9041 * 9042 * Return code 9043 * 0 - Successfully processed fan iocb (currently always return 0). 9044 **/ 9045static int 9046lpfc_els_rcv_fan(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 9047 struct lpfc_nodelist *fan_ndlp) 9048{ 9049 struct lpfc_hba *phba = vport->phba; 9050 uint32_t *lp; 9051 FAN *fp; 9052 9053 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "0265 FAN received\n"); 9054 lp = (uint32_t *)cmdiocb->cmd_dmabuf->virt; 9055 fp = (FAN *) ++lp; 9056 /* FAN received; Fan does not have a reply sequence */ 9057 if ((vport == phba->pport) && 9058 (vport->port_state == LPFC_LOCAL_CFG_LINK)) { 9059 if ((memcmp(&phba->fc_fabparam.nodeName, &fp->FnodeName, 9060 sizeof(struct lpfc_name))) || 9061 (memcmp(&phba->fc_fabparam.portName, &fp->FportName, 9062 sizeof(struct lpfc_name)))) { 9063 /* This port has switched fabrics. FLOGI is required */ 9064 lpfc_issue_init_vfi(vport); 9065 } else { 9066 /* FAN verified - skip FLOGI */ 9067 vport->fc_myDID = vport->fc_prevDID; 9068 if (phba->sli_rev < LPFC_SLI_REV4) 9069 lpfc_issue_fabric_reglogin(vport); 9070 else { 9071 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 9072 "3138 Need register VFI: (x%x/%x)\n", 9073 vport->fc_prevDID, vport->fc_myDID); 9074 lpfc_issue_reg_vfi(vport); 9075 } 9076 } 9077 } 9078 return 0; 9079} 9080 9081/** 9082 * lpfc_els_rcv_edc - Process an unsolicited EDC iocb 9083 * @vport: pointer to a host virtual N_Port data structure. 9084 * @cmdiocb: pointer to lpfc command iocb data structure. 9085 * @ndlp: pointer to a node-list data structure. 9086 * 9087 * Return code 9088 * 0 - Successfully processed echo iocb (currently always return 0) 9089 **/ 9090static int 9091lpfc_els_rcv_edc(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 9092 struct lpfc_nodelist *ndlp) 9093{ 9094 struct lpfc_hba *phba = vport->phba; 9095 struct fc_els_edc *edc_req; 9096 struct fc_tlv_desc *tlv; 9097 uint8_t *payload; 9098 uint32_t *ptr, dtag; 9099 const char *dtag_nm; 9100 int desc_cnt = 0, bytes_remain; 9101 bool rcv_cap_desc = false; 9102 9103 payload = cmdiocb->cmd_dmabuf->virt; 9104 9105 edc_req = (struct fc_els_edc *)payload; 9106 bytes_remain = be32_to_cpu(edc_req->desc_len); 9107 9108 ptr = (uint32_t *)payload; 9109 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_CGN_MGMT, 9110 "3319 Rcv EDC payload len %d: x%x x%x x%x\n", 9111 bytes_remain, be32_to_cpu(*ptr), 9112 be32_to_cpu(*(ptr + 1)), be32_to_cpu(*(ptr + 2))); 9113 9114 /* No signal support unless there is a congestion descriptor */ 9115 phba->cgn_reg_signal = EDC_CG_SIG_NOTSUPPORTED; 9116 phba->cgn_sig_freq = 0; 9117 phba->cgn_reg_fpin = LPFC_CGN_FPIN_ALARM | LPFC_CGN_FPIN_WARN; 9118 9119 if (bytes_remain <= 0) 9120 goto out; 9121 9122 tlv = edc_req->desc; 9123 9124 /* 9125 * cycle through EDC diagnostic descriptors to find the 9126 * congestion signaling capability descriptor 9127 */ 9128 while (bytes_remain && !rcv_cap_desc) { 9129 if (bytes_remain < FC_TLV_DESC_HDR_SZ) { 9130 lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT, 9131 "6464 Truncated TLV hdr on " 9132 "Diagnostic descriptor[%d]\n", 9133 desc_cnt); 9134 goto out; 9135 } 9136 9137 dtag = be32_to_cpu(tlv->desc_tag); 9138 switch (dtag) { 9139 case ELS_DTAG_LNK_FAULT_CAP: 9140 if (bytes_remain < FC_TLV_DESC_SZ_FROM_LENGTH(tlv) || 9141 FC_TLV_DESC_SZ_FROM_LENGTH(tlv) != 9142 sizeof(struct fc_diag_lnkflt_desc)) { 9143 lpfc_printf_log( 9144 phba, KERN_WARNING, LOG_CGN_MGMT, 9145 "6465 Truncated Link Fault Diagnostic " 9146 "descriptor[%d]: %d vs 0x%zx 0x%zx\n", 9147 desc_cnt, bytes_remain, 9148 FC_TLV_DESC_SZ_FROM_LENGTH(tlv), 9149 sizeof(struct fc_diag_cg_sig_desc)); 9150 goto out; 9151 } 9152 /* No action for Link Fault descriptor for now */ 9153 break; 9154 case ELS_DTAG_CG_SIGNAL_CAP: 9155 if (bytes_remain < FC_TLV_DESC_SZ_FROM_LENGTH(tlv) || 9156 FC_TLV_DESC_SZ_FROM_LENGTH(tlv) != 9157 sizeof(struct fc_diag_cg_sig_desc)) { 9158 lpfc_printf_log( 9159 phba, KERN_WARNING, LOG_CGN_MGMT, 9160 "6466 Truncated cgn signal Diagnostic " 9161 "descriptor[%d]: %d vs 0x%zx 0x%zx\n", 9162 desc_cnt, bytes_remain, 9163 FC_TLV_DESC_SZ_FROM_LENGTH(tlv), 9164 sizeof(struct fc_diag_cg_sig_desc)); 9165 goto out; 9166 } 9167 9168 phba->cgn_reg_fpin = phba->cgn_init_reg_fpin; 9169 phba->cgn_reg_signal = phba->cgn_init_reg_signal; 9170 9171 /* We start negotiation with lpfc_fabric_cgn_frequency. 9172 * When we process the EDC, we will settle on the 9173 * higher frequency. 9174 */ 9175 phba->cgn_sig_freq = lpfc_fabric_cgn_frequency; 9176 9177 lpfc_least_capable_settings( 9178 phba, (struct fc_diag_cg_sig_desc *)tlv); 9179 rcv_cap_desc = true; 9180 break; 9181 default: 9182 dtag_nm = lpfc_get_tlv_dtag_nm(dtag); 9183 lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT, 9184 "6467 unknown Diagnostic " 9185 "Descriptor[%d]: tag x%x (%s)\n", 9186 desc_cnt, dtag, dtag_nm); 9187 } 9188 bytes_remain -= FC_TLV_DESC_SZ_FROM_LENGTH(tlv); 9189 tlv = fc_tlv_next_desc(tlv); 9190 desc_cnt++; 9191 } 9192out: 9193 /* Need to send back an ACC */ 9194 lpfc_issue_els_edc_rsp(vport, cmdiocb, ndlp); 9195 9196 lpfc_config_cgn_signal(phba); 9197 return 0; 9198} 9199 9200/** 9201 * lpfc_els_timeout - Handler funciton to the els timer 9202 * @t: timer context used to obtain the vport. 9203 * 9204 * This routine is invoked by the ELS timer after timeout. It posts the ELS 9205 * timer timeout event by setting the WORKER_ELS_TMO bit to the work port 9206 * event bitmap and then invokes the lpfc_worker_wake_up() routine to wake 9207 * up the worker thread. It is for the worker thread to invoke the routine 9208 * lpfc_els_timeout_handler() to work on the posted event WORKER_ELS_TMO. 9209 **/ 9210void 9211lpfc_els_timeout(struct timer_list *t) 9212{ 9213 struct lpfc_vport *vport = from_timer(vport, t, els_tmofunc); 9214 struct lpfc_hba *phba = vport->phba; 9215 uint32_t tmo_posted; 9216 unsigned long iflag; 9217 9218 spin_lock_irqsave(&vport->work_port_lock, iflag); 9219 tmo_posted = vport->work_port_events & WORKER_ELS_TMO; 9220 if ((!tmo_posted) && (!(vport->load_flag & FC_UNLOADING))) 9221 vport->work_port_events |= WORKER_ELS_TMO; 9222 spin_unlock_irqrestore(&vport->work_port_lock, iflag); 9223 9224 if ((!tmo_posted) && (!(vport->load_flag & FC_UNLOADING))) 9225 lpfc_worker_wake_up(phba); 9226 return; 9227} 9228 9229 9230/** 9231 * lpfc_els_timeout_handler - Process an els timeout event 9232 * @vport: pointer to a virtual N_Port data structure. 9233 * 9234 * This routine is the actual handler function that processes an ELS timeout 9235 * event. It walks the ELS ring to get and abort all the IOCBs (except the 9236 * ABORT/CLOSE/FARP/FARPR/FDISC), which are associated with the @vport by 9237 * invoking the lpfc_sli_issue_abort_iotag() routine. 9238 **/ 9239void 9240lpfc_els_timeout_handler(struct lpfc_vport *vport) 9241{ 9242 struct lpfc_hba *phba = vport->phba; 9243 struct lpfc_sli_ring *pring; 9244 struct lpfc_iocbq *tmp_iocb, *piocb; 9245 IOCB_t *cmd = NULL; 9246 struct lpfc_dmabuf *pcmd; 9247 uint32_t els_command = 0; 9248 uint32_t timeout; 9249 uint32_t remote_ID = 0xffffffff; 9250 LIST_HEAD(abort_list); 9251 u32 ulp_command = 0, ulp_context = 0, did = 0, iotag = 0; 9252 9253 9254 timeout = (uint32_t)(phba->fc_ratov << 1); 9255 9256 pring = lpfc_phba_elsring(phba); 9257 if (unlikely(!pring)) 9258 return; 9259 9260 if (phba->pport->load_flag & FC_UNLOADING) 9261 return; 9262 9263 spin_lock_irq(&phba->hbalock); 9264 if (phba->sli_rev == LPFC_SLI_REV4) 9265 spin_lock(&pring->ring_lock); 9266 9267 list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) { 9268 ulp_command = get_job_cmnd(phba, piocb); 9269 ulp_context = get_job_ulpcontext(phba, piocb); 9270 did = get_job_els_rsp64_did(phba, piocb); 9271 9272 if (phba->sli_rev == LPFC_SLI_REV4) { 9273 iotag = get_wqe_reqtag(piocb); 9274 } else { 9275 cmd = &piocb->iocb; 9276 iotag = cmd->ulpIoTag; 9277 } 9278 9279 if ((piocb->cmd_flag & LPFC_IO_LIBDFC) != 0 || 9280 ulp_command == CMD_ABORT_XRI_CX || 9281 ulp_command == CMD_ABORT_XRI_CN || 9282 ulp_command == CMD_CLOSE_XRI_CN) 9283 continue; 9284 9285 if (piocb->vport != vport) 9286 continue; 9287 9288 pcmd = piocb->cmd_dmabuf; 9289 if (pcmd) 9290 els_command = *(uint32_t *) (pcmd->virt); 9291 9292 if (els_command == ELS_CMD_FARP || 9293 els_command == ELS_CMD_FARPR || 9294 els_command == ELS_CMD_FDISC) 9295 continue; 9296 9297 if (piocb->drvrTimeout > 0) { 9298 if (piocb->drvrTimeout >= timeout) 9299 piocb->drvrTimeout -= timeout; 9300 else 9301 piocb->drvrTimeout = 0; 9302 continue; 9303 } 9304 9305 remote_ID = 0xffffffff; 9306 if (ulp_command != CMD_GEN_REQUEST64_CR) { 9307 remote_ID = did; 9308 } else { 9309 struct lpfc_nodelist *ndlp; 9310 ndlp = __lpfc_findnode_rpi(vport, ulp_context); 9311 if (ndlp) 9312 remote_ID = ndlp->nlp_DID; 9313 } 9314 list_add_tail(&piocb->dlist, &abort_list); 9315 } 9316 if (phba->sli_rev == LPFC_SLI_REV4) 9317 spin_unlock(&pring->ring_lock); 9318 spin_unlock_irq(&phba->hbalock); 9319 9320 list_for_each_entry_safe(piocb, tmp_iocb, &abort_list, dlist) { 9321 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 9322 "0127 ELS timeout Data: x%x x%x x%x " 9323 "x%x\n", els_command, 9324 remote_ID, ulp_command, iotag); 9325 9326 spin_lock_irq(&phba->hbalock); 9327 list_del_init(&piocb->dlist); 9328 lpfc_sli_issue_abort_iotag(phba, pring, piocb, NULL); 9329 spin_unlock_irq(&phba->hbalock); 9330 } 9331 9332 /* Make sure HBA is alive */ 9333 lpfc_issue_hb_tmo(phba); 9334 9335 if (!list_empty(&pring->txcmplq)) 9336 if (!(phba->pport->load_flag & FC_UNLOADING)) 9337 mod_timer(&vport->els_tmofunc, 9338 jiffies + msecs_to_jiffies(1000 * timeout)); 9339} 9340 9341/** 9342 * lpfc_els_flush_cmd - Clean up the outstanding els commands to a vport 9343 * @vport: pointer to a host virtual N_Port data structure. 9344 * 9345 * This routine is used to clean up all the outstanding ELS commands on a 9346 * @vport. It first aborts the @vport by invoking lpfc_fabric_abort_vport() 9347 * routine. After that, it walks the ELS transmit queue to remove all the 9348 * IOCBs with the @vport other than the QUE_RING and ABORT/CLOSE IOCBs. For 9349 * the IOCBs with a non-NULL completion callback function, the callback 9350 * function will be invoked with the status set to IOSTAT_LOCAL_REJECT and 9351 * un.ulpWord[4] set to IOERR_SLI_ABORTED. For IOCBs with a NULL completion 9352 * callback function, the IOCB will simply be released. Finally, it walks 9353 * the ELS transmit completion queue to issue an abort IOCB to any transmit 9354 * completion queue IOCB that is associated with the @vport and is not 9355 * an IOCB from libdfc (i.e., the management plane IOCBs that are not 9356 * part of the discovery state machine) out to HBA by invoking the 9357 * lpfc_sli_issue_abort_iotag() routine. Note that this function issues the 9358 * abort IOCB to any transmit completion queueed IOCB, it does not guarantee 9359 * the IOCBs are aborted when this function returns. 9360 **/ 9361void 9362lpfc_els_flush_cmd(struct lpfc_vport *vport) 9363{ 9364 LIST_HEAD(abort_list); 9365 struct lpfc_hba *phba = vport->phba; 9366 struct lpfc_sli_ring *pring; 9367 struct lpfc_iocbq *tmp_iocb, *piocb; 9368 u32 ulp_command; 9369 unsigned long iflags = 0; 9370 9371 lpfc_fabric_abort_vport(vport); 9372 9373 /* 9374 * For SLI3, only the hbalock is required. But SLI4 needs to coordinate 9375 * with the ring insert operation. Because lpfc_sli_issue_abort_iotag 9376 * ultimately grabs the ring_lock, the driver must splice the list into 9377 * a working list and release the locks before calling the abort. 9378 */ 9379 spin_lock_irqsave(&phba->hbalock, iflags); 9380 pring = lpfc_phba_elsring(phba); 9381 9382 /* Bail out if we've no ELS wq, like in PCI error recovery case. */ 9383 if (unlikely(!pring)) { 9384 spin_unlock_irqrestore(&phba->hbalock, iflags); 9385 return; 9386 } 9387 9388 if (phba->sli_rev == LPFC_SLI_REV4) 9389 spin_lock(&pring->ring_lock); 9390 9391 /* First we need to issue aborts to outstanding cmds on txcmpl */ 9392 list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) { 9393 if (piocb->cmd_flag & LPFC_IO_LIBDFC) 9394 continue; 9395 9396 if (piocb->vport != vport) 9397 continue; 9398 9399 if (piocb->cmd_flag & LPFC_DRIVER_ABORTED) 9400 continue; 9401 9402 /* On the ELS ring we can have ELS_REQUESTs or 9403 * GEN_REQUESTs waiting for a response. 9404 */ 9405 ulp_command = get_job_cmnd(phba, piocb); 9406 if (ulp_command == CMD_ELS_REQUEST64_CR) { 9407 list_add_tail(&piocb->dlist, &abort_list); 9408 9409 /* If the link is down when flushing ELS commands 9410 * the firmware will not complete them till after 9411 * the link comes back up. This may confuse 9412 * discovery for the new link up, so we need to 9413 * change the compl routine to just clean up the iocb 9414 * and avoid any retry logic. 9415 */ 9416 if (phba->link_state == LPFC_LINK_DOWN) 9417 piocb->cmd_cmpl = lpfc_cmpl_els_link_down; 9418 } 9419 if (ulp_command == CMD_GEN_REQUEST64_CR) 9420 list_add_tail(&piocb->dlist, &abort_list); 9421 } 9422 9423 if (phba->sli_rev == LPFC_SLI_REV4) 9424 spin_unlock(&pring->ring_lock); 9425 spin_unlock_irqrestore(&phba->hbalock, iflags); 9426 9427 /* Abort each txcmpl iocb on aborted list and remove the dlist links. */ 9428 list_for_each_entry_safe(piocb, tmp_iocb, &abort_list, dlist) { 9429 spin_lock_irqsave(&phba->hbalock, iflags); 9430 list_del_init(&piocb->dlist); 9431 lpfc_sli_issue_abort_iotag(phba, pring, piocb, NULL); 9432 spin_unlock_irqrestore(&phba->hbalock, iflags); 9433 } 9434 /* Make sure HBA is alive */ 9435 lpfc_issue_hb_tmo(phba); 9436 9437 if (!list_empty(&abort_list)) 9438 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 9439 "3387 abort list for txq not empty\n"); 9440 INIT_LIST_HEAD(&abort_list); 9441 9442 spin_lock_irqsave(&phba->hbalock, iflags); 9443 if (phba->sli_rev == LPFC_SLI_REV4) 9444 spin_lock(&pring->ring_lock); 9445 9446 /* No need to abort the txq list, 9447 * just queue them up for lpfc_sli_cancel_iocbs 9448 */ 9449 list_for_each_entry_safe(piocb, tmp_iocb, &pring->txq, list) { 9450 ulp_command = get_job_cmnd(phba, piocb); 9451 9452 if (piocb->cmd_flag & LPFC_IO_LIBDFC) 9453 continue; 9454 9455 /* Do not flush out the QUE_RING and ABORT/CLOSE iocbs */ 9456 if (ulp_command == CMD_QUE_RING_BUF_CN || 9457 ulp_command == CMD_QUE_RING_BUF64_CN || 9458 ulp_command == CMD_CLOSE_XRI_CN || 9459 ulp_command == CMD_ABORT_XRI_CN || 9460 ulp_command == CMD_ABORT_XRI_CX) 9461 continue; 9462 9463 if (piocb->vport != vport) 9464 continue; 9465 9466 list_del_init(&piocb->list); 9467 list_add_tail(&piocb->list, &abort_list); 9468 } 9469 9470 /* The same holds true for any FLOGI/FDISC on the fabric_iocb_list */ 9471 if (vport == phba->pport) { 9472 list_for_each_entry_safe(piocb, tmp_iocb, 9473 &phba->fabric_iocb_list, list) { 9474 list_del_init(&piocb->list); 9475 list_add_tail(&piocb->list, &abort_list); 9476 } 9477 } 9478 9479 if (phba->sli_rev == LPFC_SLI_REV4) 9480 spin_unlock(&pring->ring_lock); 9481 spin_unlock_irqrestore(&phba->hbalock, iflags); 9482 9483 /* Cancel all the IOCBs from the completions list */ 9484 lpfc_sli_cancel_iocbs(phba, &abort_list, 9485 IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED); 9486 9487 return; 9488} 9489 9490/** 9491 * lpfc_els_flush_all_cmd - Clean up all the outstanding els commands to a HBA 9492 * @phba: pointer to lpfc hba data structure. 9493 * 9494 * This routine is used to clean up all the outstanding ELS commands on a 9495 * @phba. It first aborts the @phba by invoking the lpfc_fabric_abort_hba() 9496 * routine. After that, it walks the ELS transmit queue to remove all the 9497 * IOCBs to the @phba other than the QUE_RING and ABORT/CLOSE IOCBs. For 9498 * the IOCBs with the completion callback function associated, the callback 9499 * function will be invoked with the status set to IOSTAT_LOCAL_REJECT and 9500 * un.ulpWord[4] set to IOERR_SLI_ABORTED. For IOCBs without the completion 9501 * callback function associated, the IOCB will simply be released. Finally, 9502 * it walks the ELS transmit completion queue to issue an abort IOCB to any 9503 * transmit completion queue IOCB that is not an IOCB from libdfc (i.e., the 9504 * management plane IOCBs that are not part of the discovery state machine) 9505 * out to HBA by invoking the lpfc_sli_issue_abort_iotag() routine. 9506 **/ 9507void 9508lpfc_els_flush_all_cmd(struct lpfc_hba *phba) 9509{ 9510 struct lpfc_vport *vport; 9511 9512 spin_lock_irq(&phba->port_list_lock); 9513 list_for_each_entry(vport, &phba->port_list, listentry) 9514 lpfc_els_flush_cmd(vport); 9515 spin_unlock_irq(&phba->port_list_lock); 9516 9517 return; 9518} 9519 9520/** 9521 * lpfc_send_els_failure_event - Posts an ELS command failure event 9522 * @phba: Pointer to hba context object. 9523 * @cmdiocbp: Pointer to command iocb which reported error. 9524 * @rspiocbp: Pointer to response iocb which reported error. 9525 * 9526 * This function sends an event when there is an ELS command 9527 * failure. 9528 **/ 9529void 9530lpfc_send_els_failure_event(struct lpfc_hba *phba, 9531 struct lpfc_iocbq *cmdiocbp, 9532 struct lpfc_iocbq *rspiocbp) 9533{ 9534 struct lpfc_vport *vport = cmdiocbp->vport; 9535 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 9536 struct lpfc_lsrjt_event lsrjt_event; 9537 struct lpfc_fabric_event_header fabric_event; 9538 struct ls_rjt stat; 9539 struct lpfc_nodelist *ndlp; 9540 uint32_t *pcmd; 9541 u32 ulp_status, ulp_word4; 9542 9543 ndlp = cmdiocbp->ndlp; 9544 if (!ndlp) 9545 return; 9546 9547 ulp_status = get_job_ulpstatus(phba, rspiocbp); 9548 ulp_word4 = get_job_word4(phba, rspiocbp); 9549 9550 if (ulp_status == IOSTAT_LS_RJT) { 9551 lsrjt_event.header.event_type = FC_REG_ELS_EVENT; 9552 lsrjt_event.header.subcategory = LPFC_EVENT_LSRJT_RCV; 9553 memcpy(lsrjt_event.header.wwpn, &ndlp->nlp_portname, 9554 sizeof(struct lpfc_name)); 9555 memcpy(lsrjt_event.header.wwnn, &ndlp->nlp_nodename, 9556 sizeof(struct lpfc_name)); 9557 pcmd = (uint32_t *)cmdiocbp->cmd_dmabuf->virt; 9558 lsrjt_event.command = (pcmd != NULL) ? *pcmd : 0; 9559 stat.un.ls_rjt_error_be = cpu_to_be32(ulp_word4); 9560 lsrjt_event.reason_code = stat.un.b.lsRjtRsnCode; 9561 lsrjt_event.explanation = stat.un.b.lsRjtRsnCodeExp; 9562 fc_host_post_vendor_event(shost, 9563 fc_get_event_number(), 9564 sizeof(lsrjt_event), 9565 (char *)&lsrjt_event, 9566 LPFC_NL_VENDOR_ID); 9567 return; 9568 } 9569 if (ulp_status == IOSTAT_NPORT_BSY || 9570 ulp_status == IOSTAT_FABRIC_BSY) { 9571 fabric_event.event_type = FC_REG_FABRIC_EVENT; 9572 if (ulp_status == IOSTAT_NPORT_BSY) 9573 fabric_event.subcategory = LPFC_EVENT_PORT_BUSY; 9574 else 9575 fabric_event.subcategory = LPFC_EVENT_FABRIC_BUSY; 9576 memcpy(fabric_event.wwpn, &ndlp->nlp_portname, 9577 sizeof(struct lpfc_name)); 9578 memcpy(fabric_event.wwnn, &ndlp->nlp_nodename, 9579 sizeof(struct lpfc_name)); 9580 fc_host_post_vendor_event(shost, 9581 fc_get_event_number(), 9582 sizeof(fabric_event), 9583 (char *)&fabric_event, 9584 LPFC_NL_VENDOR_ID); 9585 return; 9586 } 9587 9588} 9589 9590/** 9591 * lpfc_send_els_event - Posts unsolicited els event 9592 * @vport: Pointer to vport object. 9593 * @ndlp: Pointer FC node object. 9594 * @payload: ELS command code type. 9595 * 9596 * This function posts an event when there is an incoming 9597 * unsolicited ELS command. 9598 **/ 9599static void 9600lpfc_send_els_event(struct lpfc_vport *vport, 9601 struct lpfc_nodelist *ndlp, 9602 uint32_t *payload) 9603{ 9604 struct lpfc_els_event_header *els_data = NULL; 9605 struct lpfc_logo_event *logo_data = NULL; 9606 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 9607 9608 if (*payload == ELS_CMD_LOGO) { 9609 logo_data = kmalloc(sizeof(struct lpfc_logo_event), GFP_KERNEL); 9610 if (!logo_data) { 9611 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 9612 "0148 Failed to allocate memory " 9613 "for LOGO event\n"); 9614 return; 9615 } 9616 els_data = &logo_data->header; 9617 } else { 9618 els_data = kmalloc(sizeof(struct lpfc_els_event_header), 9619 GFP_KERNEL); 9620 if (!els_data) { 9621 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 9622 "0149 Failed to allocate memory " 9623 "for ELS event\n"); 9624 return; 9625 } 9626 } 9627 els_data->event_type = FC_REG_ELS_EVENT; 9628 switch (*payload) { 9629 case ELS_CMD_PLOGI: 9630 els_data->subcategory = LPFC_EVENT_PLOGI_RCV; 9631 break; 9632 case ELS_CMD_PRLO: 9633 els_data->subcategory = LPFC_EVENT_PRLO_RCV; 9634 break; 9635 case ELS_CMD_ADISC: 9636 els_data->subcategory = LPFC_EVENT_ADISC_RCV; 9637 break; 9638 case ELS_CMD_LOGO: 9639 els_data->subcategory = LPFC_EVENT_LOGO_RCV; 9640 /* Copy the WWPN in the LOGO payload */ 9641 memcpy(logo_data->logo_wwpn, &payload[2], 9642 sizeof(struct lpfc_name)); 9643 break; 9644 default: 9645 kfree(els_data); 9646 return; 9647 } 9648 memcpy(els_data->wwpn, &ndlp->nlp_portname, sizeof(struct lpfc_name)); 9649 memcpy(els_data->wwnn, &ndlp->nlp_nodename, sizeof(struct lpfc_name)); 9650 if (*payload == ELS_CMD_LOGO) { 9651 fc_host_post_vendor_event(shost, 9652 fc_get_event_number(), 9653 sizeof(struct lpfc_logo_event), 9654 (char *)logo_data, 9655 LPFC_NL_VENDOR_ID); 9656 kfree(logo_data); 9657 } else { 9658 fc_host_post_vendor_event(shost, 9659 fc_get_event_number(), 9660 sizeof(struct lpfc_els_event_header), 9661 (char *)els_data, 9662 LPFC_NL_VENDOR_ID); 9663 kfree(els_data); 9664 } 9665 9666 return; 9667} 9668 9669 9670DECLARE_ENUM2STR_LOOKUP(lpfc_get_fpin_li_event_nm, fc_fpin_li_event_types, 9671 FC_FPIN_LI_EVT_TYPES_INIT); 9672 9673DECLARE_ENUM2STR_LOOKUP(lpfc_get_fpin_deli_event_nm, fc_fpin_deli_event_types, 9674 FC_FPIN_DELI_EVT_TYPES_INIT); 9675 9676DECLARE_ENUM2STR_LOOKUP(lpfc_get_fpin_congn_event_nm, fc_fpin_congn_event_types, 9677 FC_FPIN_CONGN_EVT_TYPES_INIT); 9678 9679DECLARE_ENUM2STR_LOOKUP(lpfc_get_fpin_congn_severity_nm, 9680 fc_fpin_congn_severity_types, 9681 FC_FPIN_CONGN_SEVERITY_INIT); 9682 9683 9684/** 9685 * lpfc_display_fpin_wwpn - Display WWPNs accessible by the attached port 9686 * @phba: Pointer to phba object. 9687 * @wwnlist: Pointer to list of WWPNs in FPIN payload 9688 * @cnt: count of WWPNs in FPIN payload 9689 * 9690 * This routine is called by LI and PC descriptors. 9691 * Limit the number of WWPNs displayed to 6 log messages, 6 per log message 9692 */ 9693static void 9694lpfc_display_fpin_wwpn(struct lpfc_hba *phba, __be64 *wwnlist, u32 cnt) 9695{ 9696 char buf[LPFC_FPIN_WWPN_LINE_SZ]; 9697 __be64 wwn; 9698 u64 wwpn; 9699 int i, len; 9700 int line = 0; 9701 int wcnt = 0; 9702 bool endit = false; 9703 9704 len = scnprintf(buf, LPFC_FPIN_WWPN_LINE_SZ, "Accessible WWPNs:"); 9705 for (i = 0; i < cnt; i++) { 9706 /* Are we on the last WWPN */ 9707 if (i == (cnt - 1)) 9708 endit = true; 9709 9710 /* Extract the next WWPN from the payload */ 9711 wwn = *wwnlist++; 9712 wwpn = be64_to_cpu(wwn); 9713 len += scnprintf(buf + len, LPFC_FPIN_WWPN_LINE_SZ - len, 9714 " %016llx", wwpn); 9715 9716 /* Log a message if we are on the last WWPN 9717 * or if we hit the max allowed per message. 9718 */ 9719 wcnt++; 9720 if (wcnt == LPFC_FPIN_WWPN_LINE_CNT || endit) { 9721 buf[len] = 0; 9722 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 9723 "4686 %s\n", buf); 9724 9725 /* Check if we reached the last WWPN */ 9726 if (endit) 9727 return; 9728 9729 /* Limit the number of log message displayed per FPIN */ 9730 line++; 9731 if (line == LPFC_FPIN_WWPN_NUM_LINE) { 9732 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 9733 "4687 %d WWPNs Truncated\n", 9734 cnt - i - 1); 9735 return; 9736 } 9737 9738 /* Start over with next log message */ 9739 wcnt = 0; 9740 len = scnprintf(buf, LPFC_FPIN_WWPN_LINE_SZ, 9741 "Additional WWPNs:"); 9742 } 9743 } 9744} 9745 9746/** 9747 * lpfc_els_rcv_fpin_li - Process an FPIN Link Integrity Event. 9748 * @phba: Pointer to phba object. 9749 * @tlv: Pointer to the Link Integrity Notification Descriptor. 9750 * 9751 * This function processes a Link Integrity FPIN event by logging a message. 9752 **/ 9753static void 9754lpfc_els_rcv_fpin_li(struct lpfc_hba *phba, struct fc_tlv_desc *tlv) 9755{ 9756 struct fc_fn_li_desc *li = (struct fc_fn_li_desc *)tlv; 9757 const char *li_evt_str; 9758 u32 li_evt, cnt; 9759 9760 li_evt = be16_to_cpu(li->event_type); 9761 li_evt_str = lpfc_get_fpin_li_event_nm(li_evt); 9762 cnt = be32_to_cpu(li->pname_count); 9763 9764 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 9765 "4680 FPIN Link Integrity %s (x%x) " 9766 "Detecting PN x%016llx Attached PN x%016llx " 9767 "Duration %d mSecs Count %d Port Cnt %d\n", 9768 li_evt_str, li_evt, 9769 be64_to_cpu(li->detecting_wwpn), 9770 be64_to_cpu(li->attached_wwpn), 9771 be32_to_cpu(li->event_threshold), 9772 be32_to_cpu(li->event_count), cnt); 9773 9774 lpfc_display_fpin_wwpn(phba, (__be64 *)&li->pname_list, cnt); 9775} 9776 9777/** 9778 * lpfc_els_rcv_fpin_del - Process an FPIN Delivery Event. 9779 * @phba: Pointer to hba object. 9780 * @tlv: Pointer to the Delivery Notification Descriptor TLV 9781 * 9782 * This function processes a Delivery FPIN event by logging a message. 9783 **/ 9784static void 9785lpfc_els_rcv_fpin_del(struct lpfc_hba *phba, struct fc_tlv_desc *tlv) 9786{ 9787 struct fc_fn_deli_desc *del = (struct fc_fn_deli_desc *)tlv; 9788 const char *del_rsn_str; 9789 u32 del_rsn; 9790 __be32 *frame; 9791 9792 del_rsn = be16_to_cpu(del->deli_reason_code); 9793 del_rsn_str = lpfc_get_fpin_deli_event_nm(del_rsn); 9794 9795 /* Skip over desc_tag/desc_len header to payload */ 9796 frame = (__be32 *)(del + 1); 9797 9798 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 9799 "4681 FPIN Delivery %s (x%x) " 9800 "Detecting PN x%016llx Attached PN x%016llx " 9801 "DiscHdr0 x%08x " 9802 "DiscHdr1 x%08x DiscHdr2 x%08x DiscHdr3 x%08x " 9803 "DiscHdr4 x%08x DiscHdr5 x%08x\n", 9804 del_rsn_str, del_rsn, 9805 be64_to_cpu(del->detecting_wwpn), 9806 be64_to_cpu(del->attached_wwpn), 9807 be32_to_cpu(frame[0]), 9808 be32_to_cpu(frame[1]), 9809 be32_to_cpu(frame[2]), 9810 be32_to_cpu(frame[3]), 9811 be32_to_cpu(frame[4]), 9812 be32_to_cpu(frame[5])); 9813} 9814 9815/** 9816 * lpfc_els_rcv_fpin_peer_cgn - Process a FPIN Peer Congestion Event. 9817 * @phba: Pointer to hba object. 9818 * @tlv: Pointer to the Peer Congestion Notification Descriptor TLV 9819 * 9820 * This function processes a Peer Congestion FPIN event by logging a message. 9821 **/ 9822static void 9823lpfc_els_rcv_fpin_peer_cgn(struct lpfc_hba *phba, struct fc_tlv_desc *tlv) 9824{ 9825 struct fc_fn_peer_congn_desc *pc = (struct fc_fn_peer_congn_desc *)tlv; 9826 const char *pc_evt_str; 9827 u32 pc_evt, cnt; 9828 9829 pc_evt = be16_to_cpu(pc->event_type); 9830 pc_evt_str = lpfc_get_fpin_congn_event_nm(pc_evt); 9831 cnt = be32_to_cpu(pc->pname_count); 9832 9833 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT | LOG_ELS, 9834 "4684 FPIN Peer Congestion %s (x%x) " 9835 "Duration %d mSecs " 9836 "Detecting PN x%016llx Attached PN x%016llx " 9837 "Impacted Port Cnt %d\n", 9838 pc_evt_str, pc_evt, 9839 be32_to_cpu(pc->event_period), 9840 be64_to_cpu(pc->detecting_wwpn), 9841 be64_to_cpu(pc->attached_wwpn), 9842 cnt); 9843 9844 lpfc_display_fpin_wwpn(phba, (__be64 *)&pc->pname_list, cnt); 9845} 9846 9847/** 9848 * lpfc_els_rcv_fpin_cgn - Process an FPIN Congestion notification 9849 * @phba: Pointer to hba object. 9850 * @tlv: Pointer to the Congestion Notification Descriptor TLV 9851 * 9852 * This function processes an FPIN Congestion Notifiction. The notification 9853 * could be an Alarm or Warning. This routine feeds that data into driver's 9854 * running congestion algorithm. It also processes the FPIN by 9855 * logging a message. It returns 1 to indicate deliver this message 9856 * to the upper layer or 0 to indicate don't deliver it. 9857 **/ 9858static int 9859lpfc_els_rcv_fpin_cgn(struct lpfc_hba *phba, struct fc_tlv_desc *tlv) 9860{ 9861 struct lpfc_cgn_info *cp; 9862 struct fc_fn_congn_desc *cgn = (struct fc_fn_congn_desc *)tlv; 9863 const char *cgn_evt_str; 9864 u32 cgn_evt; 9865 const char *cgn_sev_str; 9866 u32 cgn_sev; 9867 uint16_t value; 9868 u32 crc; 9869 bool nm_log = false; 9870 int rc = 1; 9871 9872 cgn_evt = be16_to_cpu(cgn->event_type); 9873 cgn_evt_str = lpfc_get_fpin_congn_event_nm(cgn_evt); 9874 cgn_sev = cgn->severity; 9875 cgn_sev_str = lpfc_get_fpin_congn_severity_nm(cgn_sev); 9876 9877 /* The driver only takes action on a Credit Stall or Oversubscription 9878 * event type to engage the IO algorithm. The driver prints an 9879 * unmaskable message only for Lost Credit and Credit Stall. 9880 * TODO: Still need to have definition of host action on clear, 9881 * lost credit and device specific event types. 9882 */ 9883 switch (cgn_evt) { 9884 case FPIN_CONGN_LOST_CREDIT: 9885 nm_log = true; 9886 break; 9887 case FPIN_CONGN_CREDIT_STALL: 9888 nm_log = true; 9889 fallthrough; 9890 case FPIN_CONGN_OVERSUBSCRIPTION: 9891 if (cgn_evt == FPIN_CONGN_OVERSUBSCRIPTION) 9892 nm_log = false; 9893 switch (cgn_sev) { 9894 case FPIN_CONGN_SEVERITY_ERROR: 9895 /* Take action here for an Alarm event */ 9896 if (phba->cmf_active_mode != LPFC_CFG_OFF) { 9897 if (phba->cgn_reg_fpin & LPFC_CGN_FPIN_ALARM) { 9898 /* Track of alarm cnt for SYNC_WQE */ 9899 atomic_inc(&phba->cgn_sync_alarm_cnt); 9900 } 9901 /* Track alarm cnt for cgn_info regardless 9902 * of whether CMF is configured for Signals 9903 * or FPINs. 9904 */ 9905 atomic_inc(&phba->cgn_fabric_alarm_cnt); 9906 goto cleanup; 9907 } 9908 break; 9909 case FPIN_CONGN_SEVERITY_WARNING: 9910 /* Take action here for a Warning event */ 9911 if (phba->cmf_active_mode != LPFC_CFG_OFF) { 9912 if (phba->cgn_reg_fpin & LPFC_CGN_FPIN_WARN) { 9913 /* Track of warning cnt for SYNC_WQE */ 9914 atomic_inc(&phba->cgn_sync_warn_cnt); 9915 } 9916 /* Track warning cnt and freq for cgn_info 9917 * regardless of whether CMF is configured for 9918 * Signals or FPINs. 9919 */ 9920 atomic_inc(&phba->cgn_fabric_warn_cnt); 9921cleanup: 9922 /* Save frequency in ms */ 9923 phba->cgn_fpin_frequency = 9924 be32_to_cpu(cgn->event_period); 9925 value = phba->cgn_fpin_frequency; 9926 if (phba->cgn_i) { 9927 cp = (struct lpfc_cgn_info *) 9928 phba->cgn_i->virt; 9929 cp->cgn_alarm_freq = 9930 cpu_to_le16(value); 9931 cp->cgn_warn_freq = 9932 cpu_to_le16(value); 9933 crc = lpfc_cgn_calc_crc32 9934 (cp, 9935 LPFC_CGN_INFO_SZ, 9936 LPFC_CGN_CRC32_SEED); 9937 cp->cgn_info_crc = cpu_to_le32(crc); 9938 } 9939 9940 /* Don't deliver to upper layer since 9941 * driver took action on this tlv. 9942 */ 9943 rc = 0; 9944 } 9945 break; 9946 } 9947 break; 9948 } 9949 9950 /* Change the log level to unmaskable for the following event types. */ 9951 lpfc_printf_log(phba, (nm_log ? KERN_WARNING : KERN_INFO), 9952 LOG_CGN_MGMT | LOG_ELS, 9953 "4683 FPIN CONGESTION %s type %s (x%x) Event " 9954 "Duration %d mSecs\n", 9955 cgn_sev_str, cgn_evt_str, cgn_evt, 9956 be32_to_cpu(cgn->event_period)); 9957 return rc; 9958} 9959 9960void 9961lpfc_els_rcv_fpin(struct lpfc_vport *vport, void *p, u32 fpin_length) 9962{ 9963 struct lpfc_hba *phba = vport->phba; 9964 struct fc_els_fpin *fpin = (struct fc_els_fpin *)p; 9965 struct fc_tlv_desc *tlv, *first_tlv, *current_tlv; 9966 const char *dtag_nm; 9967 int desc_cnt = 0, bytes_remain, cnt; 9968 u32 dtag, deliver = 0; 9969 int len; 9970 9971 /* FPINs handled only if we are in the right discovery state */ 9972 if (vport->port_state < LPFC_DISC_AUTH) 9973 return; 9974 9975 /* make sure there is the full fpin header */ 9976 if (fpin_length < sizeof(struct fc_els_fpin)) 9977 return; 9978 9979 /* Sanity check descriptor length. The desc_len value does not 9980 * include space for the ELS command and the desc_len fields. 9981 */ 9982 len = be32_to_cpu(fpin->desc_len); 9983 if (fpin_length < len + sizeof(struct fc_els_fpin)) { 9984 lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT, 9985 "4671 Bad ELS FPIN length %d: %d\n", 9986 len, fpin_length); 9987 return; 9988 } 9989 9990 tlv = (struct fc_tlv_desc *)&fpin->fpin_desc[0]; 9991 first_tlv = tlv; 9992 bytes_remain = fpin_length - offsetof(struct fc_els_fpin, fpin_desc); 9993 bytes_remain = min_t(u32, bytes_remain, be32_to_cpu(fpin->desc_len)); 9994 9995 /* process each descriptor separately */ 9996 while (bytes_remain >= FC_TLV_DESC_HDR_SZ && 9997 bytes_remain >= FC_TLV_DESC_SZ_FROM_LENGTH(tlv)) { 9998 dtag = be32_to_cpu(tlv->desc_tag); 9999 switch (dtag) { 10000 case ELS_DTAG_LNK_INTEGRITY: 10001 lpfc_els_rcv_fpin_li(phba, tlv); 10002 deliver = 1; 10003 break; 10004 case ELS_DTAG_DELIVERY: 10005 lpfc_els_rcv_fpin_del(phba, tlv); 10006 deliver = 1; 10007 break; 10008 case ELS_DTAG_PEER_CONGEST: 10009 lpfc_els_rcv_fpin_peer_cgn(phba, tlv); 10010 deliver = 1; 10011 break; 10012 case ELS_DTAG_CONGESTION: 10013 deliver = lpfc_els_rcv_fpin_cgn(phba, tlv); 10014 break; 10015 default: 10016 dtag_nm = lpfc_get_tlv_dtag_nm(dtag); 10017 lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT, 10018 "4678 unknown FPIN descriptor[%d]: " 10019 "tag x%x (%s)\n", 10020 desc_cnt, dtag, dtag_nm); 10021 10022 /* If descriptor is bad, drop the rest of the data */ 10023 return; 10024 } 10025 lpfc_cgn_update_stat(phba, dtag); 10026 cnt = be32_to_cpu(tlv->desc_len); 10027 10028 /* Sanity check descriptor length. The desc_len value does not 10029 * include space for the desc_tag and the desc_len fields. 10030 */ 10031 len -= (cnt + sizeof(struct fc_tlv_desc)); 10032 if (len < 0) { 10033 dtag_nm = lpfc_get_tlv_dtag_nm(dtag); 10034 lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT, 10035 "4672 Bad FPIN descriptor TLV length " 10036 "%d: %d %d %s\n", 10037 cnt, len, fpin_length, dtag_nm); 10038 return; 10039 } 10040 10041 current_tlv = tlv; 10042 bytes_remain -= FC_TLV_DESC_SZ_FROM_LENGTH(tlv); 10043 tlv = fc_tlv_next_desc(tlv); 10044 10045 /* Format payload such that the FPIN delivered to the 10046 * upper layer is a single descriptor FPIN. 10047 */ 10048 if (desc_cnt) 10049 memcpy(first_tlv, current_tlv, 10050 (cnt + sizeof(struct fc_els_fpin))); 10051 10052 /* Adjust the length so that it only reflects a 10053 * single descriptor FPIN. 10054 */ 10055 fpin_length = cnt + sizeof(struct fc_els_fpin); 10056 fpin->desc_len = cpu_to_be32(fpin_length); 10057 fpin_length += sizeof(struct fc_els_fpin); /* the entire FPIN */ 10058 10059 /* Send every descriptor individually to the upper layer */ 10060 if (deliver) 10061 fc_host_fpin_rcv(lpfc_shost_from_vport(vport), 10062 fpin_length, (char *)fpin); 10063 desc_cnt++; 10064 } 10065} 10066 10067/** 10068 * lpfc_els_unsol_buffer - Process an unsolicited event data buffer 10069 * @phba: pointer to lpfc hba data structure. 10070 * @pring: pointer to a SLI ring. 10071 * @vport: pointer to a host virtual N_Port data structure. 10072 * @elsiocb: pointer to lpfc els command iocb data structure. 10073 * 10074 * This routine is used for processing the IOCB associated with a unsolicited 10075 * event. It first determines whether there is an existing ndlp that matches 10076 * the DID from the unsolicited IOCB. If not, it will create a new one with 10077 * the DID from the unsolicited IOCB. The ELS command from the unsolicited 10078 * IOCB is then used to invoke the proper routine and to set up proper state 10079 * of the discovery state machine. 10080 **/ 10081static void 10082lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 10083 struct lpfc_vport *vport, struct lpfc_iocbq *elsiocb) 10084{ 10085 struct lpfc_nodelist *ndlp; 10086 struct ls_rjt stat; 10087 u32 *payload, payload_len; 10088 u32 cmd = 0, did = 0, newnode, status = 0; 10089 uint8_t rjt_exp, rjt_err = 0, init_link = 0; 10090 struct lpfc_wcqe_complete *wcqe_cmpl = NULL; 10091 LPFC_MBOXQ_t *mbox; 10092 10093 if (!vport || !elsiocb->cmd_dmabuf) 10094 goto dropit; 10095 10096 newnode = 0; 10097 wcqe_cmpl = &elsiocb->wcqe_cmpl; 10098 payload = elsiocb->cmd_dmabuf->virt; 10099 if (phba->sli_rev == LPFC_SLI_REV4) 10100 payload_len = wcqe_cmpl->total_data_placed; 10101 else 10102 payload_len = elsiocb->iocb.unsli3.rcvsli3.acc_len; 10103 status = get_job_ulpstatus(phba, elsiocb); 10104 cmd = *payload; 10105 if ((phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) == 0) 10106 lpfc_sli3_post_buffer(phba, pring, 1); 10107 10108 did = get_job_els_rsp64_did(phba, elsiocb); 10109 if (status) { 10110 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10111 "RCV Unsol ELS: status:x%x/x%x did:x%x", 10112 status, get_job_word4(phba, elsiocb), did); 10113 goto dropit; 10114 } 10115 10116 /* Check to see if link went down during discovery */ 10117 if (lpfc_els_chk_latt(vport)) 10118 goto dropit; 10119 10120 /* Ignore traffic received during vport shutdown. */ 10121 if (vport->load_flag & FC_UNLOADING) 10122 goto dropit; 10123 10124 /* If NPort discovery is delayed drop incoming ELS */ 10125 if ((vport->fc_flag & FC_DISC_DELAYED) && 10126 (cmd != ELS_CMD_PLOGI)) 10127 goto dropit; 10128 10129 ndlp = lpfc_findnode_did(vport, did); 10130 if (!ndlp) { 10131 /* Cannot find existing Fabric ndlp, so allocate a new one */ 10132 ndlp = lpfc_nlp_init(vport, did); 10133 if (!ndlp) 10134 goto dropit; 10135 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); 10136 newnode = 1; 10137 if ((did & Fabric_DID_MASK) == Fabric_DID_MASK) 10138 ndlp->nlp_type |= NLP_FABRIC; 10139 } else if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) { 10140 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); 10141 newnode = 1; 10142 } 10143 10144 phba->fc_stat.elsRcvFrame++; 10145 10146 /* 10147 * Do not process any unsolicited ELS commands 10148 * if the ndlp is in DEV_LOSS 10149 */ 10150 spin_lock_irq(&ndlp->lock); 10151 if (ndlp->nlp_flag & NLP_IN_DEV_LOSS) { 10152 spin_unlock_irq(&ndlp->lock); 10153 if (newnode) 10154 lpfc_nlp_put(ndlp); 10155 goto dropit; 10156 } 10157 spin_unlock_irq(&ndlp->lock); 10158 10159 elsiocb->ndlp = lpfc_nlp_get(ndlp); 10160 if (!elsiocb->ndlp) 10161 goto dropit; 10162 elsiocb->vport = vport; 10163 10164 if ((cmd & ELS_CMD_MASK) == ELS_CMD_RSCN) { 10165 cmd &= ELS_CMD_MASK; 10166 } 10167 /* ELS command <elsCmd> received from NPORT <did> */ 10168 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 10169 "0112 ELS command x%x received from NPORT x%x " 10170 "refcnt %d Data: x%x x%x x%x x%x\n", 10171 cmd, did, kref_read(&ndlp->kref), vport->port_state, 10172 vport->fc_flag, vport->fc_myDID, vport->fc_prevDID); 10173 10174 /* reject till our FLOGI completes or PLOGI assigned DID via PT2PT */ 10175 if ((vport->port_state < LPFC_FABRIC_CFG_LINK) && 10176 (cmd != ELS_CMD_FLOGI) && 10177 !((cmd == ELS_CMD_PLOGI) && (vport->fc_flag & FC_PT2PT))) { 10178 rjt_err = LSRJT_LOGICAL_BSY; 10179 rjt_exp = LSEXP_NOTHING_MORE; 10180 goto lsrjt; 10181 } 10182 10183 switch (cmd) { 10184 case ELS_CMD_PLOGI: 10185 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10186 "RCV PLOGI: did:x%x/ste:x%x flg:x%x", 10187 did, vport->port_state, ndlp->nlp_flag); 10188 10189 phba->fc_stat.elsRcvPLOGI++; 10190 ndlp = lpfc_plogi_confirm_nport(phba, payload, ndlp); 10191 if (phba->sli_rev == LPFC_SLI_REV4 && 10192 (phba->pport->fc_flag & FC_PT2PT)) { 10193 vport->fc_prevDID = vport->fc_myDID; 10194 /* Our DID needs to be updated before registering 10195 * the vfi. This is done in lpfc_rcv_plogi but 10196 * that is called after the reg_vfi. 10197 */ 10198 vport->fc_myDID = 10199 bf_get(els_rsp64_sid, 10200 &elsiocb->wqe.xmit_els_rsp); 10201 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 10202 "3312 Remote port assigned DID x%x " 10203 "%x\n", vport->fc_myDID, 10204 vport->fc_prevDID); 10205 } 10206 10207 lpfc_send_els_event(vport, ndlp, payload); 10208 10209 /* If Nport discovery is delayed, reject PLOGIs */ 10210 if (vport->fc_flag & FC_DISC_DELAYED) { 10211 rjt_err = LSRJT_UNABLE_TPC; 10212 rjt_exp = LSEXP_NOTHING_MORE; 10213 break; 10214 } 10215 10216 if (vport->port_state < LPFC_DISC_AUTH) { 10217 if (!(phba->pport->fc_flag & FC_PT2PT) || 10218 (phba->pport->fc_flag & FC_PT2PT_PLOGI)) { 10219 rjt_err = LSRJT_UNABLE_TPC; 10220 rjt_exp = LSEXP_NOTHING_MORE; 10221 break; 10222 } 10223 } 10224 10225 spin_lock_irq(&ndlp->lock); 10226 ndlp->nlp_flag &= ~NLP_TARGET_REMOVE; 10227 spin_unlock_irq(&ndlp->lock); 10228 10229 lpfc_disc_state_machine(vport, ndlp, elsiocb, 10230 NLP_EVT_RCV_PLOGI); 10231 10232 break; 10233 case ELS_CMD_FLOGI: 10234 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10235 "RCV FLOGI: did:x%x/ste:x%x flg:x%x", 10236 did, vport->port_state, ndlp->nlp_flag); 10237 10238 phba->fc_stat.elsRcvFLOGI++; 10239 10240 /* If the driver believes fabric discovery is done and is ready, 10241 * bounce the link. There is some descrepancy. 10242 */ 10243 if (vport->port_state >= LPFC_LOCAL_CFG_LINK && 10244 vport->fc_flag & FC_PT2PT && 10245 vport->rcv_flogi_cnt >= 1) { 10246 rjt_err = LSRJT_LOGICAL_BSY; 10247 rjt_exp = LSEXP_NOTHING_MORE; 10248 init_link++; 10249 goto lsrjt; 10250 } 10251 10252 lpfc_els_rcv_flogi(vport, elsiocb, ndlp); 10253 /* retain node if our response is deferred */ 10254 if (phba->defer_flogi_acc_flag) 10255 break; 10256 if (newnode) 10257 lpfc_disc_state_machine(vport, ndlp, NULL, 10258 NLP_EVT_DEVICE_RM); 10259 break; 10260 case ELS_CMD_LOGO: 10261 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10262 "RCV LOGO: did:x%x/ste:x%x flg:x%x", 10263 did, vport->port_state, ndlp->nlp_flag); 10264 10265 phba->fc_stat.elsRcvLOGO++; 10266 lpfc_send_els_event(vport, ndlp, payload); 10267 if (vport->port_state < LPFC_DISC_AUTH) { 10268 rjt_err = LSRJT_UNABLE_TPC; 10269 rjt_exp = LSEXP_NOTHING_MORE; 10270 break; 10271 } 10272 lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_LOGO); 10273 if (newnode) 10274 lpfc_disc_state_machine(vport, ndlp, NULL, 10275 NLP_EVT_DEVICE_RM); 10276 break; 10277 case ELS_CMD_PRLO: 10278 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10279 "RCV PRLO: did:x%x/ste:x%x flg:x%x", 10280 did, vport->port_state, ndlp->nlp_flag); 10281 10282 phba->fc_stat.elsRcvPRLO++; 10283 lpfc_send_els_event(vport, ndlp, payload); 10284 if (vport->port_state < LPFC_DISC_AUTH) { 10285 rjt_err = LSRJT_UNABLE_TPC; 10286 rjt_exp = LSEXP_NOTHING_MORE; 10287 break; 10288 } 10289 lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_PRLO); 10290 break; 10291 case ELS_CMD_LCB: 10292 phba->fc_stat.elsRcvLCB++; 10293 lpfc_els_rcv_lcb(vport, elsiocb, ndlp); 10294 break; 10295 case ELS_CMD_RDP: 10296 phba->fc_stat.elsRcvRDP++; 10297 lpfc_els_rcv_rdp(vport, elsiocb, ndlp); 10298 break; 10299 case ELS_CMD_RSCN: 10300 phba->fc_stat.elsRcvRSCN++; 10301 lpfc_els_rcv_rscn(vport, elsiocb, ndlp); 10302 if (newnode) 10303 lpfc_disc_state_machine(vport, ndlp, NULL, 10304 NLP_EVT_DEVICE_RM); 10305 break; 10306 case ELS_CMD_ADISC: 10307 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10308 "RCV ADISC: did:x%x/ste:x%x flg:x%x", 10309 did, vport->port_state, ndlp->nlp_flag); 10310 10311 lpfc_send_els_event(vport, ndlp, payload); 10312 phba->fc_stat.elsRcvADISC++; 10313 if (vport->port_state < LPFC_DISC_AUTH) { 10314 rjt_err = LSRJT_UNABLE_TPC; 10315 rjt_exp = LSEXP_NOTHING_MORE; 10316 break; 10317 } 10318 lpfc_disc_state_machine(vport, ndlp, elsiocb, 10319 NLP_EVT_RCV_ADISC); 10320 break; 10321 case ELS_CMD_PDISC: 10322 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10323 "RCV PDISC: did:x%x/ste:x%x flg:x%x", 10324 did, vport->port_state, ndlp->nlp_flag); 10325 10326 phba->fc_stat.elsRcvPDISC++; 10327 if (vport->port_state < LPFC_DISC_AUTH) { 10328 rjt_err = LSRJT_UNABLE_TPC; 10329 rjt_exp = LSEXP_NOTHING_MORE; 10330 break; 10331 } 10332 lpfc_disc_state_machine(vport, ndlp, elsiocb, 10333 NLP_EVT_RCV_PDISC); 10334 break; 10335 case ELS_CMD_FARPR: 10336 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10337 "RCV FARPR: did:x%x/ste:x%x flg:x%x", 10338 did, vport->port_state, ndlp->nlp_flag); 10339 10340 phba->fc_stat.elsRcvFARPR++; 10341 lpfc_els_rcv_farpr(vport, elsiocb, ndlp); 10342 break; 10343 case ELS_CMD_FARP: 10344 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10345 "RCV FARP: did:x%x/ste:x%x flg:x%x", 10346 did, vport->port_state, ndlp->nlp_flag); 10347 10348 phba->fc_stat.elsRcvFARP++; 10349 lpfc_els_rcv_farp(vport, elsiocb, ndlp); 10350 break; 10351 case ELS_CMD_FAN: 10352 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10353 "RCV FAN: did:x%x/ste:x%x flg:x%x", 10354 did, vport->port_state, ndlp->nlp_flag); 10355 10356 phba->fc_stat.elsRcvFAN++; 10357 lpfc_els_rcv_fan(vport, elsiocb, ndlp); 10358 break; 10359 case ELS_CMD_PRLI: 10360 case ELS_CMD_NVMEPRLI: 10361 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10362 "RCV PRLI: did:x%x/ste:x%x flg:x%x", 10363 did, vport->port_state, ndlp->nlp_flag); 10364 10365 phba->fc_stat.elsRcvPRLI++; 10366 if ((vport->port_state < LPFC_DISC_AUTH) && 10367 (vport->fc_flag & FC_FABRIC)) { 10368 rjt_err = LSRJT_UNABLE_TPC; 10369 rjt_exp = LSEXP_NOTHING_MORE; 10370 break; 10371 } 10372 lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_PRLI); 10373 break; 10374 case ELS_CMD_LIRR: 10375 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10376 "RCV LIRR: did:x%x/ste:x%x flg:x%x", 10377 did, vport->port_state, ndlp->nlp_flag); 10378 10379 phba->fc_stat.elsRcvLIRR++; 10380 lpfc_els_rcv_lirr(vport, elsiocb, ndlp); 10381 if (newnode) 10382 lpfc_disc_state_machine(vport, ndlp, NULL, 10383 NLP_EVT_DEVICE_RM); 10384 break; 10385 case ELS_CMD_RLS: 10386 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10387 "RCV RLS: did:x%x/ste:x%x flg:x%x", 10388 did, vport->port_state, ndlp->nlp_flag); 10389 10390 phba->fc_stat.elsRcvRLS++; 10391 lpfc_els_rcv_rls(vport, elsiocb, ndlp); 10392 if (newnode) 10393 lpfc_disc_state_machine(vport, ndlp, NULL, 10394 NLP_EVT_DEVICE_RM); 10395 break; 10396 case ELS_CMD_RPL: 10397 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10398 "RCV RPL: did:x%x/ste:x%x flg:x%x", 10399 did, vport->port_state, ndlp->nlp_flag); 10400 10401 phba->fc_stat.elsRcvRPL++; 10402 lpfc_els_rcv_rpl(vport, elsiocb, ndlp); 10403 if (newnode) 10404 lpfc_disc_state_machine(vport, ndlp, NULL, 10405 NLP_EVT_DEVICE_RM); 10406 break; 10407 case ELS_CMD_RNID: 10408 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10409 "RCV RNID: did:x%x/ste:x%x flg:x%x", 10410 did, vport->port_state, ndlp->nlp_flag); 10411 10412 phba->fc_stat.elsRcvRNID++; 10413 lpfc_els_rcv_rnid(vport, elsiocb, ndlp); 10414 if (newnode) 10415 lpfc_disc_state_machine(vport, ndlp, NULL, 10416 NLP_EVT_DEVICE_RM); 10417 break; 10418 case ELS_CMD_RTV: 10419 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10420 "RCV RTV: did:x%x/ste:x%x flg:x%x", 10421 did, vport->port_state, ndlp->nlp_flag); 10422 phba->fc_stat.elsRcvRTV++; 10423 lpfc_els_rcv_rtv(vport, elsiocb, ndlp); 10424 if (newnode) 10425 lpfc_disc_state_machine(vport, ndlp, NULL, 10426 NLP_EVT_DEVICE_RM); 10427 break; 10428 case ELS_CMD_RRQ: 10429 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10430 "RCV RRQ: did:x%x/ste:x%x flg:x%x", 10431 did, vport->port_state, ndlp->nlp_flag); 10432 10433 phba->fc_stat.elsRcvRRQ++; 10434 lpfc_els_rcv_rrq(vport, elsiocb, ndlp); 10435 if (newnode) 10436 lpfc_disc_state_machine(vport, ndlp, NULL, 10437 NLP_EVT_DEVICE_RM); 10438 break; 10439 case ELS_CMD_ECHO: 10440 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10441 "RCV ECHO: did:x%x/ste:x%x flg:x%x", 10442 did, vport->port_state, ndlp->nlp_flag); 10443 10444 phba->fc_stat.elsRcvECHO++; 10445 lpfc_els_rcv_echo(vport, elsiocb, ndlp); 10446 if (newnode) 10447 lpfc_disc_state_machine(vport, ndlp, NULL, 10448 NLP_EVT_DEVICE_RM); 10449 break; 10450 case ELS_CMD_REC: 10451 /* receive this due to exchange closed */ 10452 rjt_err = LSRJT_UNABLE_TPC; 10453 rjt_exp = LSEXP_INVALID_OX_RX; 10454 break; 10455 case ELS_CMD_FPIN: 10456 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10457 "RCV FPIN: did:x%x/ste:x%x flg:x%x", 10458 did, vport->port_state, ndlp->nlp_flag); 10459 10460 lpfc_els_rcv_fpin(vport, (struct fc_els_fpin *)payload, 10461 payload_len); 10462 10463 /* There are no replies, so no rjt codes */ 10464 break; 10465 case ELS_CMD_EDC: 10466 lpfc_els_rcv_edc(vport, elsiocb, ndlp); 10467 break; 10468 case ELS_CMD_RDF: 10469 phba->fc_stat.elsRcvRDF++; 10470 /* Accept RDF only from fabric controller */ 10471 if (did != Fabric_Cntl_DID) { 10472 lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS, 10473 "1115 Received RDF from invalid DID " 10474 "x%x\n", did); 10475 rjt_err = LSRJT_PROTOCOL_ERR; 10476 rjt_exp = LSEXP_NOTHING_MORE; 10477 goto lsrjt; 10478 } 10479 10480 lpfc_els_rcv_rdf(vport, elsiocb, ndlp); 10481 break; 10482 default: 10483 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10484 "RCV ELS cmd: cmd:x%x did:x%x/ste:x%x", 10485 cmd, did, vport->port_state); 10486 10487 /* Unsupported ELS command, reject */ 10488 rjt_err = LSRJT_CMD_UNSUPPORTED; 10489 rjt_exp = LSEXP_NOTHING_MORE; 10490 10491 /* Unknown ELS command <elsCmd> received from NPORT <did> */ 10492 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 10493 "0115 Unknown ELS command x%x " 10494 "received from NPORT x%x\n", cmd, did); 10495 if (newnode) 10496 lpfc_disc_state_machine(vport, ndlp, NULL, 10497 NLP_EVT_DEVICE_RM); 10498 break; 10499 } 10500 10501lsrjt: 10502 /* check if need to LS_RJT received ELS cmd */ 10503 if (rjt_err) { 10504 memset(&stat, 0, sizeof(stat)); 10505 stat.un.b.lsRjtRsnCode = rjt_err; 10506 stat.un.b.lsRjtRsnCodeExp = rjt_exp; 10507 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, elsiocb, ndlp, 10508 NULL); 10509 /* Remove the reference from above for new nodes. */ 10510 if (newnode) 10511 lpfc_disc_state_machine(vport, ndlp, NULL, 10512 NLP_EVT_DEVICE_RM); 10513 } 10514 10515 /* Release the reference on this elsiocb, not the ndlp. */ 10516 lpfc_nlp_put(elsiocb->ndlp); 10517 elsiocb->ndlp = NULL; 10518 10519 /* Special case. Driver received an unsolicited command that 10520 * unsupportable given the driver's current state. Reset the 10521 * link and start over. 10522 */ 10523 if (init_link) { 10524 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 10525 if (!mbox) 10526 return; 10527 lpfc_linkdown(phba); 10528 lpfc_init_link(phba, mbox, 10529 phba->cfg_topology, 10530 phba->cfg_link_speed); 10531 mbox->u.mb.un.varInitLnk.lipsr_AL_PA = 0; 10532 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 10533 mbox->vport = vport; 10534 if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) == 10535 MBX_NOT_FINISHED) 10536 mempool_free(mbox, phba->mbox_mem_pool); 10537 } 10538 10539 return; 10540 10541dropit: 10542 if (vport && !(vport->load_flag & FC_UNLOADING)) 10543 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 10544 "0111 Dropping received ELS cmd " 10545 "Data: x%x x%x x%x x%x\n", 10546 cmd, status, get_job_word4(phba, elsiocb), did); 10547 10548 phba->fc_stat.elsRcvDrop++; 10549} 10550 10551/** 10552 * lpfc_els_unsol_event - Process an unsolicited event from an els sli ring 10553 * @phba: pointer to lpfc hba data structure. 10554 * @pring: pointer to a SLI ring. 10555 * @elsiocb: pointer to lpfc els iocb data structure. 10556 * 10557 * This routine is used to process an unsolicited event received from a SLI 10558 * (Service Level Interface) ring. The actual processing of the data buffer 10559 * associated with the unsolicited event is done by invoking the routine 10560 * lpfc_els_unsol_buffer() after properly set up the iocb buffer from the 10561 * SLI ring on which the unsolicited event was received. 10562 **/ 10563void 10564lpfc_els_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 10565 struct lpfc_iocbq *elsiocb) 10566{ 10567 struct lpfc_vport *vport = elsiocb->vport; 10568 u32 ulp_command, status, parameter, bde_count = 0; 10569 IOCB_t *icmd; 10570 struct lpfc_wcqe_complete *wcqe_cmpl = NULL; 10571 struct lpfc_dmabuf *bdeBuf1 = elsiocb->cmd_dmabuf; 10572 struct lpfc_dmabuf *bdeBuf2 = elsiocb->bpl_dmabuf; 10573 dma_addr_t paddr; 10574 10575 elsiocb->cmd_dmabuf = NULL; 10576 elsiocb->rsp_dmabuf = NULL; 10577 elsiocb->bpl_dmabuf = NULL; 10578 10579 wcqe_cmpl = &elsiocb->wcqe_cmpl; 10580 ulp_command = get_job_cmnd(phba, elsiocb); 10581 status = get_job_ulpstatus(phba, elsiocb); 10582 parameter = get_job_word4(phba, elsiocb); 10583 if (phba->sli_rev == LPFC_SLI_REV4) 10584 bde_count = wcqe_cmpl->word3; 10585 else 10586 bde_count = elsiocb->iocb.ulpBdeCount; 10587 10588 if (status == IOSTAT_NEED_BUFFER) { 10589 lpfc_sli_hbqbuf_add_hbqs(phba, LPFC_ELS_HBQ); 10590 } else if (status == IOSTAT_LOCAL_REJECT && 10591 (parameter & IOERR_PARAM_MASK) == 10592 IOERR_RCV_BUFFER_WAITING) { 10593 phba->fc_stat.NoRcvBuf++; 10594 /* Not enough posted buffers; Try posting more buffers */ 10595 if (!(phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)) 10596 lpfc_sli3_post_buffer(phba, pring, 0); 10597 return; 10598 } 10599 10600 if (phba->sli_rev == LPFC_SLI_REV3) { 10601 icmd = &elsiocb->iocb; 10602 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && 10603 (ulp_command == CMD_IOCB_RCV_ELS64_CX || 10604 ulp_command == CMD_IOCB_RCV_SEQ64_CX)) { 10605 if (icmd->unsli3.rcvsli3.vpi == 0xffff) 10606 vport = phba->pport; 10607 else 10608 vport = lpfc_find_vport_by_vpid(phba, 10609 icmd->unsli3.rcvsli3.vpi); 10610 } 10611 } 10612 10613 /* If there are no BDEs associated 10614 * with this IOCB, there is nothing to do. 10615 */ 10616 if (bde_count == 0) 10617 return; 10618 10619 /* Account for SLI2 or SLI3 and later unsolicited buffering */ 10620 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { 10621 elsiocb->cmd_dmabuf = bdeBuf1; 10622 if (bde_count == 2) 10623 elsiocb->bpl_dmabuf = bdeBuf2; 10624 } else { 10625 icmd = &elsiocb->iocb; 10626 paddr = getPaddr(icmd->un.cont64[0].addrHigh, 10627 icmd->un.cont64[0].addrLow); 10628 elsiocb->cmd_dmabuf = lpfc_sli_ringpostbuf_get(phba, pring, 10629 paddr); 10630 if (bde_count == 2) { 10631 paddr = getPaddr(icmd->un.cont64[1].addrHigh, 10632 icmd->un.cont64[1].addrLow); 10633 elsiocb->bpl_dmabuf = lpfc_sli_ringpostbuf_get(phba, 10634 pring, 10635 paddr); 10636 } 10637 } 10638 10639 lpfc_els_unsol_buffer(phba, pring, vport, elsiocb); 10640 /* 10641 * The different unsolicited event handlers would tell us 10642 * if they are done with "mp" by setting cmd_dmabuf to NULL. 10643 */ 10644 if (elsiocb->cmd_dmabuf) { 10645 lpfc_in_buf_free(phba, elsiocb->cmd_dmabuf); 10646 elsiocb->cmd_dmabuf = NULL; 10647 } 10648 10649 if (elsiocb->bpl_dmabuf) { 10650 lpfc_in_buf_free(phba, elsiocb->bpl_dmabuf); 10651 elsiocb->bpl_dmabuf = NULL; 10652 } 10653 10654} 10655 10656static void 10657lpfc_start_fdmi(struct lpfc_vport *vport) 10658{ 10659 struct lpfc_nodelist *ndlp; 10660 10661 /* If this is the first time, allocate an ndlp and initialize 10662 * it. Otherwise, make sure the node is enabled and then do the 10663 * login. 10664 */ 10665 ndlp = lpfc_findnode_did(vport, FDMI_DID); 10666 if (!ndlp) { 10667 ndlp = lpfc_nlp_init(vport, FDMI_DID); 10668 if (ndlp) { 10669 ndlp->nlp_type |= NLP_FABRIC; 10670 } else { 10671 return; 10672 } 10673 } 10674 10675 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); 10676 lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0); 10677} 10678 10679/** 10680 * lpfc_do_scr_ns_plogi - Issue a plogi to the name server for scr 10681 * @phba: pointer to lpfc hba data structure. 10682 * @vport: pointer to a virtual N_Port data structure. 10683 * 10684 * This routine issues a Port Login (PLOGI) to the Name Server with 10685 * State Change Request (SCR) for a @vport. This routine will create an 10686 * ndlp for the Name Server associated to the @vport if such node does 10687 * not already exist. The PLOGI to Name Server is issued by invoking the 10688 * lpfc_issue_els_plogi() routine. If Fabric-Device Management Interface 10689 * (FDMI) is configured to the @vport, a FDMI node will be created and 10690 * the PLOGI to FDMI is issued by invoking lpfc_issue_els_plogi() routine. 10691 **/ 10692void 10693lpfc_do_scr_ns_plogi(struct lpfc_hba *phba, struct lpfc_vport *vport) 10694{ 10695 struct lpfc_nodelist *ndlp; 10696 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 10697 10698 /* 10699 * If lpfc_delay_discovery parameter is set and the clean address 10700 * bit is cleared and fc fabric parameters chenged, delay FC NPort 10701 * discovery. 10702 */ 10703 spin_lock_irq(shost->host_lock); 10704 if (vport->fc_flag & FC_DISC_DELAYED) { 10705 spin_unlock_irq(shost->host_lock); 10706 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 10707 "3334 Delay fc port discovery for %d secs\n", 10708 phba->fc_ratov); 10709 mod_timer(&vport->delayed_disc_tmo, 10710 jiffies + msecs_to_jiffies(1000 * phba->fc_ratov)); 10711 return; 10712 } 10713 spin_unlock_irq(shost->host_lock); 10714 10715 ndlp = lpfc_findnode_did(vport, NameServer_DID); 10716 if (!ndlp) { 10717 ndlp = lpfc_nlp_init(vport, NameServer_DID); 10718 if (!ndlp) { 10719 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) { 10720 lpfc_disc_start(vport); 10721 return; 10722 } 10723 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 10724 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 10725 "0251 NameServer login: no memory\n"); 10726 return; 10727 } 10728 } 10729 10730 ndlp->nlp_type |= NLP_FABRIC; 10731 10732 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); 10733 10734 if (lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0)) { 10735 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 10736 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 10737 "0252 Cannot issue NameServer login\n"); 10738 return; 10739 } 10740 10741 if ((phba->cfg_enable_SmartSAN || 10742 (phba->cfg_fdmi_on == LPFC_FDMI_SUPPORT)) && 10743 (vport->load_flag & FC_ALLOW_FDMI)) 10744 lpfc_start_fdmi(vport); 10745} 10746 10747/** 10748 * lpfc_cmpl_reg_new_vport - Completion callback function to register new vport 10749 * @phba: pointer to lpfc hba data structure. 10750 * @pmb: pointer to the driver internal queue element for mailbox command. 10751 * 10752 * This routine is the completion callback function to register new vport 10753 * mailbox command. If the new vport mailbox command completes successfully, 10754 * the fabric registration login shall be performed on physical port (the 10755 * new vport created is actually a physical port, with VPI 0) or the port 10756 * login to Name Server for State Change Request (SCR) will be performed 10757 * on virtual port (real virtual port, with VPI greater than 0). 10758 **/ 10759static void 10760lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 10761{ 10762 struct lpfc_vport *vport = pmb->vport; 10763 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 10764 struct lpfc_nodelist *ndlp = pmb->ctx_ndlp; 10765 MAILBOX_t *mb = &pmb->u.mb; 10766 int rc; 10767 10768 spin_lock_irq(shost->host_lock); 10769 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI; 10770 spin_unlock_irq(shost->host_lock); 10771 10772 if (mb->mbxStatus) { 10773 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 10774 "0915 Register VPI failed : Status: x%x" 10775 " upd bit: x%x \n", mb->mbxStatus, 10776 mb->un.varRegVpi.upd); 10777 if (phba->sli_rev == LPFC_SLI_REV4 && 10778 mb->un.varRegVpi.upd) 10779 goto mbox_err_exit ; 10780 10781 switch (mb->mbxStatus) { 10782 case 0x11: /* unsupported feature */ 10783 case 0x9603: /* max_vpi exceeded */ 10784 case 0x9602: /* Link event since CLEAR_LA */ 10785 /* giving up on vport registration */ 10786 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 10787 spin_lock_irq(shost->host_lock); 10788 vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP); 10789 spin_unlock_irq(shost->host_lock); 10790 lpfc_can_disctmo(vport); 10791 break; 10792 /* If reg_vpi fail with invalid VPI status, re-init VPI */ 10793 case 0x20: 10794 spin_lock_irq(shost->host_lock); 10795 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 10796 spin_unlock_irq(shost->host_lock); 10797 lpfc_init_vpi(phba, pmb, vport->vpi); 10798 pmb->vport = vport; 10799 pmb->mbox_cmpl = lpfc_init_vpi_cmpl; 10800 rc = lpfc_sli_issue_mbox(phba, pmb, 10801 MBX_NOWAIT); 10802 if (rc == MBX_NOT_FINISHED) { 10803 lpfc_printf_vlog(vport, KERN_ERR, 10804 LOG_TRACE_EVENT, 10805 "2732 Failed to issue INIT_VPI" 10806 " mailbox command\n"); 10807 } else { 10808 lpfc_nlp_put(ndlp); 10809 return; 10810 } 10811 fallthrough; 10812 default: 10813 /* Try to recover from this error */ 10814 if (phba->sli_rev == LPFC_SLI_REV4) 10815 lpfc_sli4_unreg_all_rpis(vport); 10816 lpfc_mbx_unreg_vpi(vport); 10817 spin_lock_irq(shost->host_lock); 10818 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 10819 spin_unlock_irq(shost->host_lock); 10820 if (mb->mbxStatus == MBX_NOT_FINISHED) 10821 break; 10822 if ((vport->port_type == LPFC_PHYSICAL_PORT) && 10823 !(vport->fc_flag & FC_LOGO_RCVD_DID_CHNG)) { 10824 if (phba->sli_rev == LPFC_SLI_REV4) 10825 lpfc_issue_init_vfi(vport); 10826 else 10827 lpfc_initial_flogi(vport); 10828 } else { 10829 lpfc_initial_fdisc(vport); 10830 } 10831 break; 10832 } 10833 } else { 10834 spin_lock_irq(shost->host_lock); 10835 vport->vpi_state |= LPFC_VPI_REGISTERED; 10836 spin_unlock_irq(shost->host_lock); 10837 if (vport == phba->pport) { 10838 if (phba->sli_rev < LPFC_SLI_REV4) 10839 lpfc_issue_fabric_reglogin(vport); 10840 else { 10841 /* 10842 * If the physical port is instantiated using 10843 * FDISC, do not start vport discovery. 10844 */ 10845 if (vport->port_state != LPFC_FDISC) 10846 lpfc_start_fdiscs(phba); 10847 lpfc_do_scr_ns_plogi(phba, vport); 10848 } 10849 } else { 10850 lpfc_do_scr_ns_plogi(phba, vport); 10851 } 10852 } 10853mbox_err_exit: 10854 /* Now, we decrement the ndlp reference count held for this 10855 * callback function 10856 */ 10857 lpfc_nlp_put(ndlp); 10858 10859 mempool_free(pmb, phba->mbox_mem_pool); 10860 return; 10861} 10862 10863/** 10864 * lpfc_register_new_vport - Register a new vport with a HBA 10865 * @phba: pointer to lpfc hba data structure. 10866 * @vport: pointer to a host virtual N_Port data structure. 10867 * @ndlp: pointer to a node-list data structure. 10868 * 10869 * This routine registers the @vport as a new virtual port with a HBA. 10870 * It is done through a registering vpi mailbox command. 10871 **/ 10872void 10873lpfc_register_new_vport(struct lpfc_hba *phba, struct lpfc_vport *vport, 10874 struct lpfc_nodelist *ndlp) 10875{ 10876 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 10877 LPFC_MBOXQ_t *mbox; 10878 10879 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 10880 if (mbox) { 10881 lpfc_reg_vpi(vport, mbox); 10882 mbox->vport = vport; 10883 mbox->ctx_ndlp = lpfc_nlp_get(ndlp); 10884 if (!mbox->ctx_ndlp) { 10885 mempool_free(mbox, phba->mbox_mem_pool); 10886 goto mbox_err_exit; 10887 } 10888 10889 mbox->mbox_cmpl = lpfc_cmpl_reg_new_vport; 10890 if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) 10891 == MBX_NOT_FINISHED) { 10892 /* mailbox command not success, decrement ndlp 10893 * reference count for this command 10894 */ 10895 lpfc_nlp_put(ndlp); 10896 mempool_free(mbox, phba->mbox_mem_pool); 10897 10898 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 10899 "0253 Register VPI: Can't send mbox\n"); 10900 goto mbox_err_exit; 10901 } 10902 } else { 10903 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 10904 "0254 Register VPI: no memory\n"); 10905 goto mbox_err_exit; 10906 } 10907 return; 10908 10909mbox_err_exit: 10910 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 10911 spin_lock_irq(shost->host_lock); 10912 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI; 10913 spin_unlock_irq(shost->host_lock); 10914 return; 10915} 10916 10917/** 10918 * lpfc_cancel_all_vport_retry_delay_timer - Cancel all vport retry delay timer 10919 * @phba: pointer to lpfc hba data structure. 10920 * 10921 * This routine cancels the retry delay timers to all the vports. 10922 **/ 10923void 10924lpfc_cancel_all_vport_retry_delay_timer(struct lpfc_hba *phba) 10925{ 10926 struct lpfc_vport **vports; 10927 struct lpfc_nodelist *ndlp; 10928 uint32_t link_state; 10929 int i; 10930 10931 /* Treat this failure as linkdown for all vports */ 10932 link_state = phba->link_state; 10933 lpfc_linkdown(phba); 10934 phba->link_state = link_state; 10935 10936 vports = lpfc_create_vport_work_array(phba); 10937 10938 if (vports) { 10939 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 10940 ndlp = lpfc_findnode_did(vports[i], Fabric_DID); 10941 if (ndlp) 10942 lpfc_cancel_retry_delay_tmo(vports[i], ndlp); 10943 lpfc_els_flush_cmd(vports[i]); 10944 } 10945 lpfc_destroy_vport_work_array(phba, vports); 10946 } 10947} 10948 10949/** 10950 * lpfc_retry_pport_discovery - Start timer to retry FLOGI. 10951 * @phba: pointer to lpfc hba data structure. 10952 * 10953 * This routine abort all pending discovery commands and 10954 * start a timer to retry FLOGI for the physical port 10955 * discovery. 10956 **/ 10957void 10958lpfc_retry_pport_discovery(struct lpfc_hba *phba) 10959{ 10960 struct lpfc_nodelist *ndlp; 10961 10962 /* Cancel the all vports retry delay retry timers */ 10963 lpfc_cancel_all_vport_retry_delay_timer(phba); 10964 10965 /* If fabric require FLOGI, then re-instantiate physical login */ 10966 ndlp = lpfc_findnode_did(phba->pport, Fabric_DID); 10967 if (!ndlp) 10968 return; 10969 10970 mod_timer(&ndlp->nlp_delayfunc, jiffies + msecs_to_jiffies(1000)); 10971 spin_lock_irq(&ndlp->lock); 10972 ndlp->nlp_flag |= NLP_DELAY_TMO; 10973 spin_unlock_irq(&ndlp->lock); 10974 ndlp->nlp_last_elscmd = ELS_CMD_FLOGI; 10975 phba->pport->port_state = LPFC_FLOGI; 10976 return; 10977} 10978 10979/** 10980 * lpfc_fabric_login_reqd - Check if FLOGI required. 10981 * @phba: pointer to lpfc hba data structure. 10982 * @cmdiocb: pointer to FDISC command iocb. 10983 * @rspiocb: pointer to FDISC response iocb. 10984 * 10985 * This routine checks if a FLOGI is reguired for FDISC 10986 * to succeed. 10987 **/ 10988static int 10989lpfc_fabric_login_reqd(struct lpfc_hba *phba, 10990 struct lpfc_iocbq *cmdiocb, 10991 struct lpfc_iocbq *rspiocb) 10992{ 10993 u32 ulp_status = get_job_ulpstatus(phba, rspiocb); 10994 u32 ulp_word4 = get_job_word4(phba, rspiocb); 10995 10996 if (ulp_status != IOSTAT_FABRIC_RJT || 10997 ulp_word4 != RJT_LOGIN_REQUIRED) 10998 return 0; 10999 else 11000 return 1; 11001} 11002 11003/** 11004 * lpfc_cmpl_els_fdisc - Completion function for fdisc iocb command 11005 * @phba: pointer to lpfc hba data structure. 11006 * @cmdiocb: pointer to lpfc command iocb data structure. 11007 * @rspiocb: pointer to lpfc response iocb data structure. 11008 * 11009 * This routine is the completion callback function to a Fabric Discover 11010 * (FDISC) ELS command. Since all the FDISC ELS commands are issued 11011 * single threaded, each FDISC completion callback function will reset 11012 * the discovery timer for all vports such that the timers will not get 11013 * unnecessary timeout. The function checks the FDISC IOCB status. If error 11014 * detected, the vport will be set to FC_VPORT_FAILED state. Otherwise,the 11015 * vport will set to FC_VPORT_ACTIVE state. It then checks whether the DID 11016 * assigned to the vport has been changed with the completion of the FDISC 11017 * command. If so, both RPI (Remote Port Index) and VPI (Virtual Port Index) 11018 * are unregistered from the HBA, and then the lpfc_register_new_vport() 11019 * routine is invoked to register new vport with the HBA. Otherwise, the 11020 * lpfc_do_scr_ns_plogi() routine is invoked to issue a PLOGI to the Name 11021 * Server for State Change Request (SCR). 11022 **/ 11023static void 11024lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 11025 struct lpfc_iocbq *rspiocb) 11026{ 11027 struct lpfc_vport *vport = cmdiocb->vport; 11028 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 11029 struct lpfc_nodelist *ndlp = cmdiocb->ndlp; 11030 struct lpfc_nodelist *np; 11031 struct lpfc_nodelist *next_np; 11032 struct lpfc_iocbq *piocb; 11033 struct lpfc_dmabuf *pcmd = cmdiocb->cmd_dmabuf, *prsp; 11034 struct serv_parm *sp; 11035 uint8_t fabric_param_changed; 11036 u32 ulp_status, ulp_word4; 11037 11038 ulp_status = get_job_ulpstatus(phba, rspiocb); 11039 ulp_word4 = get_job_word4(phba, rspiocb); 11040 11041 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 11042 "0123 FDISC completes. x%x/x%x prevDID: x%x\n", 11043 ulp_status, ulp_word4, 11044 vport->fc_prevDID); 11045 /* Since all FDISCs are being single threaded, we 11046 * must reset the discovery timer for ALL vports 11047 * waiting to send FDISC when one completes. 11048 */ 11049 list_for_each_entry(piocb, &phba->fabric_iocb_list, list) { 11050 lpfc_set_disctmo(piocb->vport); 11051 } 11052 11053 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 11054 "FDISC cmpl: status:x%x/x%x prevdid:x%x", 11055 ulp_status, ulp_word4, vport->fc_prevDID); 11056 11057 if (ulp_status) { 11058 11059 if (lpfc_fabric_login_reqd(phba, cmdiocb, rspiocb)) { 11060 lpfc_retry_pport_discovery(phba); 11061 goto out; 11062 } 11063 11064 /* Check for retry */ 11065 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) 11066 goto out; 11067 /* FDISC failed */ 11068 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 11069 "0126 FDISC failed. (x%x/x%x)\n", 11070 ulp_status, ulp_word4); 11071 goto fdisc_failed; 11072 } 11073 11074 lpfc_check_nlp_post_devloss(vport, ndlp); 11075 11076 spin_lock_irq(shost->host_lock); 11077 vport->fc_flag &= ~FC_VPORT_CVL_RCVD; 11078 vport->fc_flag &= ~FC_VPORT_LOGO_RCVD; 11079 vport->fc_flag |= FC_FABRIC; 11080 if (vport->phba->fc_topology == LPFC_TOPOLOGY_LOOP) 11081 vport->fc_flag |= FC_PUBLIC_LOOP; 11082 spin_unlock_irq(shost->host_lock); 11083 11084 vport->fc_myDID = ulp_word4 & Mask_DID; 11085 lpfc_vport_set_state(vport, FC_VPORT_ACTIVE); 11086 prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list); 11087 if (!prsp) 11088 goto out; 11089 sp = prsp->virt + sizeof(uint32_t); 11090 fabric_param_changed = lpfc_check_clean_addr_bit(vport, sp); 11091 memcpy(&vport->fabric_portname, &sp->portName, 11092 sizeof(struct lpfc_name)); 11093 memcpy(&vport->fabric_nodename, &sp->nodeName, 11094 sizeof(struct lpfc_name)); 11095 if (fabric_param_changed && 11096 !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) { 11097 /* If our NportID changed, we need to ensure all 11098 * remaining NPORTs get unreg_login'ed so we can 11099 * issue unreg_vpi. 11100 */ 11101 list_for_each_entry_safe(np, next_np, 11102 &vport->fc_nodes, nlp_listp) { 11103 if ((np->nlp_state != NLP_STE_NPR_NODE) || 11104 !(np->nlp_flag & NLP_NPR_ADISC)) 11105 continue; 11106 spin_lock_irq(&ndlp->lock); 11107 np->nlp_flag &= ~NLP_NPR_ADISC; 11108 spin_unlock_irq(&ndlp->lock); 11109 lpfc_unreg_rpi(vport, np); 11110 } 11111 lpfc_cleanup_pending_mbox(vport); 11112 11113 if (phba->sli_rev == LPFC_SLI_REV4) 11114 lpfc_sli4_unreg_all_rpis(vport); 11115 11116 lpfc_mbx_unreg_vpi(vport); 11117 spin_lock_irq(shost->host_lock); 11118 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 11119 if (phba->sli_rev == LPFC_SLI_REV4) 11120 vport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI; 11121 else 11122 vport->fc_flag |= FC_LOGO_RCVD_DID_CHNG; 11123 spin_unlock_irq(shost->host_lock); 11124 } else if ((phba->sli_rev == LPFC_SLI_REV4) && 11125 !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) { 11126 /* 11127 * Driver needs to re-reg VPI in order for f/w 11128 * to update the MAC address. 11129 */ 11130 lpfc_register_new_vport(phba, vport, ndlp); 11131 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); 11132 goto out; 11133 } 11134 11135 if (vport->fc_flag & FC_VPORT_NEEDS_INIT_VPI) 11136 lpfc_issue_init_vpi(vport); 11137 else if (vport->fc_flag & FC_VPORT_NEEDS_REG_VPI) 11138 lpfc_register_new_vport(phba, vport, ndlp); 11139 else 11140 lpfc_do_scr_ns_plogi(phba, vport); 11141 11142 /* The FDISC completed successfully. Move the fabric ndlp to 11143 * UNMAPPED state and register with the transport. 11144 */ 11145 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); 11146 goto out; 11147 11148fdisc_failed: 11149 if (vport->fc_vport && 11150 (vport->fc_vport->vport_state != FC_VPORT_NO_FABRIC_RSCS)) 11151 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 11152 /* Cancel discovery timer */ 11153 lpfc_can_disctmo(vport); 11154out: 11155 lpfc_els_free_iocb(phba, cmdiocb); 11156 lpfc_nlp_put(ndlp); 11157} 11158 11159/** 11160 * lpfc_issue_els_fdisc - Issue a fdisc iocb command 11161 * @vport: pointer to a virtual N_Port data structure. 11162 * @ndlp: pointer to a node-list data structure. 11163 * @retry: number of retries to the command IOCB. 11164 * 11165 * This routine prepares and issues a Fabric Discover (FDISC) IOCB to 11166 * a remote node (@ndlp) off a @vport. It uses the lpfc_issue_fabric_iocb() 11167 * routine to issue the IOCB, which makes sure only one outstanding fabric 11168 * IOCB will be sent off HBA at any given time. 11169 * 11170 * Note that the ndlp reference count will be incremented by 1 for holding the 11171 * ndlp and the reference to ndlp will be stored into the ndlp field of 11172 * the IOCB for the completion callback function to the FDISC ELS command. 11173 * 11174 * Return code 11175 * 0 - Successfully issued fdisc iocb command 11176 * 1 - Failed to issue fdisc iocb command 11177 **/ 11178static int 11179lpfc_issue_els_fdisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 11180 uint8_t retry) 11181{ 11182 struct lpfc_hba *phba = vport->phba; 11183 IOCB_t *icmd; 11184 union lpfc_wqe128 *wqe = NULL; 11185 struct lpfc_iocbq *elsiocb; 11186 struct serv_parm *sp; 11187 uint8_t *pcmd; 11188 uint16_t cmdsize; 11189 int did = ndlp->nlp_DID; 11190 int rc; 11191 11192 vport->port_state = LPFC_FDISC; 11193 vport->fc_myDID = 0; 11194 cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm)); 11195 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, did, 11196 ELS_CMD_FDISC); 11197 if (!elsiocb) { 11198 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 11199 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 11200 "0255 Issue FDISC: no IOCB\n"); 11201 return 1; 11202 } 11203 11204 if (phba->sli_rev == LPFC_SLI_REV4) { 11205 wqe = &elsiocb->wqe; 11206 bf_set(els_req64_sid, &wqe->els_req, 0); 11207 bf_set(els_req64_sp, &wqe->els_req, 1); 11208 } else { 11209 icmd = &elsiocb->iocb; 11210 icmd->un.elsreq64.myID = 0; 11211 icmd->un.elsreq64.fl = 1; 11212 icmd->ulpCt_h = 1; 11213 icmd->ulpCt_l = 0; 11214 } 11215 11216 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; 11217 *((uint32_t *) (pcmd)) = ELS_CMD_FDISC; 11218 pcmd += sizeof(uint32_t); /* CSP Word 1 */ 11219 memcpy(pcmd, &vport->phba->pport->fc_sparam, sizeof(struct serv_parm)); 11220 sp = (struct serv_parm *) pcmd; 11221 /* Setup CSPs accordingly for Fabric */ 11222 sp->cmn.e_d_tov = 0; 11223 sp->cmn.w2.r_a_tov = 0; 11224 sp->cmn.virtual_fabric_support = 0; 11225 sp->cls1.classValid = 0; 11226 sp->cls2.seqDelivery = 1; 11227 sp->cls3.seqDelivery = 1; 11228 11229 pcmd += sizeof(uint32_t); /* CSP Word 2 */ 11230 pcmd += sizeof(uint32_t); /* CSP Word 3 */ 11231 pcmd += sizeof(uint32_t); /* CSP Word 4 */ 11232 pcmd += sizeof(uint32_t); /* Port Name */ 11233 memcpy(pcmd, &vport->fc_portname, 8); 11234 pcmd += sizeof(uint32_t); /* Node Name */ 11235 pcmd += sizeof(uint32_t); /* Node Name */ 11236 memcpy(pcmd, &vport->fc_nodename, 8); 11237 sp->cmn.valid_vendor_ver_level = 0; 11238 memset(sp->un.vendorVersion, 0, sizeof(sp->un.vendorVersion)); 11239 lpfc_set_disctmo(vport); 11240 11241 phba->fc_stat.elsXmitFDISC++; 11242 elsiocb->cmd_cmpl = lpfc_cmpl_els_fdisc; 11243 11244 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 11245 "Issue FDISC: did:x%x", 11246 did, 0, 0); 11247 11248 elsiocb->ndlp = lpfc_nlp_get(ndlp); 11249 if (!elsiocb->ndlp) 11250 goto err_out; 11251 11252 rc = lpfc_issue_fabric_iocb(phba, elsiocb); 11253 if (rc == IOCB_ERROR) { 11254 lpfc_nlp_put(ndlp); 11255 goto err_out; 11256 } 11257 11258 lpfc_vport_set_state(vport, FC_VPORT_INITIALIZING); 11259 return 0; 11260 11261 err_out: 11262 lpfc_els_free_iocb(phba, elsiocb); 11263 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 11264 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 11265 "0256 Issue FDISC: Cannot send IOCB\n"); 11266 return 1; 11267} 11268 11269/** 11270 * lpfc_cmpl_els_npiv_logo - Completion function with vport logo 11271 * @phba: pointer to lpfc hba data structure. 11272 * @cmdiocb: pointer to lpfc command iocb data structure. 11273 * @rspiocb: pointer to lpfc response iocb data structure. 11274 * 11275 * This routine is the completion callback function to the issuing of a LOGO 11276 * ELS command off a vport. It frees the command IOCB and then decrement the 11277 * reference count held on ndlp for this completion function, indicating that 11278 * the reference to the ndlp is no long needed. Note that the 11279 * lpfc_els_free_iocb() routine decrements the ndlp reference held for this 11280 * callback function and an additional explicit ndlp reference decrementation 11281 * will trigger the actual release of the ndlp. 11282 **/ 11283static void 11284lpfc_cmpl_els_npiv_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 11285 struct lpfc_iocbq *rspiocb) 11286{ 11287 struct lpfc_vport *vport = cmdiocb->vport; 11288 IOCB_t *irsp; 11289 struct lpfc_nodelist *ndlp; 11290 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 11291 u32 ulp_status, ulp_word4, did, tmo; 11292 11293 ndlp = cmdiocb->ndlp; 11294 11295 ulp_status = get_job_ulpstatus(phba, rspiocb); 11296 ulp_word4 = get_job_word4(phba, rspiocb); 11297 11298 if (phba->sli_rev == LPFC_SLI_REV4) { 11299 did = get_job_els_rsp64_did(phba, cmdiocb); 11300 tmo = get_wqe_tmo(cmdiocb); 11301 } else { 11302 irsp = &rspiocb->iocb; 11303 did = get_job_els_rsp64_did(phba, rspiocb); 11304 tmo = irsp->ulpTimeout; 11305 } 11306 11307 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 11308 "LOGO npiv cmpl: status:x%x/x%x did:x%x", 11309 ulp_status, ulp_word4, did); 11310 11311 /* NPIV LOGO completes to NPort <nlp_DID> */ 11312 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 11313 "2928 NPIV LOGO completes to NPort x%x " 11314 "Data: x%x x%x x%x x%x x%x x%x x%x\n", 11315 ndlp->nlp_DID, ulp_status, ulp_word4, 11316 tmo, vport->num_disc_nodes, 11317 kref_read(&ndlp->kref), ndlp->nlp_flag, 11318 ndlp->fc4_xpt_flags); 11319 11320 if (ulp_status == IOSTAT_SUCCESS) { 11321 spin_lock_irq(shost->host_lock); 11322 vport->fc_flag &= ~FC_NDISC_ACTIVE; 11323 vport->fc_flag &= ~FC_FABRIC; 11324 spin_unlock_irq(shost->host_lock); 11325 lpfc_can_disctmo(vport); 11326 } 11327 11328 if (ndlp->save_flags & NLP_WAIT_FOR_LOGO) { 11329 /* Wake up lpfc_vport_delete if waiting...*/ 11330 if (ndlp->logo_waitq) 11331 wake_up(ndlp->logo_waitq); 11332 spin_lock_irq(&ndlp->lock); 11333 ndlp->nlp_flag &= ~(NLP_ISSUE_LOGO | NLP_LOGO_SND); 11334 ndlp->save_flags &= ~NLP_WAIT_FOR_LOGO; 11335 spin_unlock_irq(&ndlp->lock); 11336 } 11337 11338 /* Safe to release resources now. */ 11339 lpfc_els_free_iocb(phba, cmdiocb); 11340 lpfc_nlp_put(ndlp); 11341} 11342 11343/** 11344 * lpfc_issue_els_npiv_logo - Issue a logo off a vport 11345 * @vport: pointer to a virtual N_Port data structure. 11346 * @ndlp: pointer to a node-list data structure. 11347 * 11348 * This routine issues a LOGO ELS command to an @ndlp off a @vport. 11349 * 11350 * Note that the ndlp reference count will be incremented by 1 for holding the 11351 * ndlp and the reference to ndlp will be stored into the ndlp field of 11352 * the IOCB for the completion callback function to the LOGO ELS command. 11353 * 11354 * Return codes 11355 * 0 - Successfully issued logo off the @vport 11356 * 1 - Failed to issue logo off the @vport 11357 **/ 11358int 11359lpfc_issue_els_npiv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) 11360{ 11361 int rc = 0; 11362 struct lpfc_hba *phba = vport->phba; 11363 struct lpfc_iocbq *elsiocb; 11364 uint8_t *pcmd; 11365 uint16_t cmdsize; 11366 11367 cmdsize = 2 * sizeof(uint32_t) + sizeof(struct lpfc_name); 11368 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, 0, ndlp, ndlp->nlp_DID, 11369 ELS_CMD_LOGO); 11370 if (!elsiocb) 11371 return 1; 11372 11373 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; 11374 *((uint32_t *) (pcmd)) = ELS_CMD_LOGO; 11375 pcmd += sizeof(uint32_t); 11376 11377 /* Fill in LOGO payload */ 11378 *((uint32_t *) (pcmd)) = be32_to_cpu(vport->fc_myDID); 11379 pcmd += sizeof(uint32_t); 11380 memcpy(pcmd, &vport->fc_portname, sizeof(struct lpfc_name)); 11381 11382 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 11383 "Issue LOGO npiv did:x%x flg:x%x", 11384 ndlp->nlp_DID, ndlp->nlp_flag, 0); 11385 11386 elsiocb->cmd_cmpl = lpfc_cmpl_els_npiv_logo; 11387 spin_lock_irq(&ndlp->lock); 11388 ndlp->nlp_flag |= NLP_LOGO_SND; 11389 spin_unlock_irq(&ndlp->lock); 11390 elsiocb->ndlp = lpfc_nlp_get(ndlp); 11391 if (!elsiocb->ndlp) { 11392 lpfc_els_free_iocb(phba, elsiocb); 11393 goto err; 11394 } 11395 11396 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 11397 if (rc == IOCB_ERROR) { 11398 lpfc_els_free_iocb(phba, elsiocb); 11399 lpfc_nlp_put(ndlp); 11400 goto err; 11401 } 11402 return 0; 11403 11404err: 11405 spin_lock_irq(&ndlp->lock); 11406 ndlp->nlp_flag &= ~NLP_LOGO_SND; 11407 spin_unlock_irq(&ndlp->lock); 11408 return 1; 11409} 11410 11411/** 11412 * lpfc_fabric_block_timeout - Handler function to the fabric block timer 11413 * @t: timer context used to obtain the lpfc hba. 11414 * 11415 * This routine is invoked by the fabric iocb block timer after 11416 * timeout. It posts the fabric iocb block timeout event by setting the 11417 * WORKER_FABRIC_BLOCK_TMO bit to work port event bitmap and then invokes 11418 * lpfc_worker_wake_up() routine to wake up the worker thread. It is for 11419 * the worker thread to invoke the lpfc_unblock_fabric_iocbs() on the 11420 * posted event WORKER_FABRIC_BLOCK_TMO. 11421 **/ 11422void 11423lpfc_fabric_block_timeout(struct timer_list *t) 11424{ 11425 struct lpfc_hba *phba = from_timer(phba, t, fabric_block_timer); 11426 unsigned long iflags; 11427 uint32_t tmo_posted; 11428 11429 spin_lock_irqsave(&phba->pport->work_port_lock, iflags); 11430 tmo_posted = phba->pport->work_port_events & WORKER_FABRIC_BLOCK_TMO; 11431 if (!tmo_posted) 11432 phba->pport->work_port_events |= WORKER_FABRIC_BLOCK_TMO; 11433 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflags); 11434 11435 if (!tmo_posted) 11436 lpfc_worker_wake_up(phba); 11437 return; 11438} 11439 11440/** 11441 * lpfc_resume_fabric_iocbs - Issue a fabric iocb from driver internal list 11442 * @phba: pointer to lpfc hba data structure. 11443 * 11444 * This routine issues one fabric iocb from the driver internal list to 11445 * the HBA. It first checks whether it's ready to issue one fabric iocb to 11446 * the HBA (whether there is no outstanding fabric iocb). If so, it shall 11447 * remove one pending fabric iocb from the driver internal list and invokes 11448 * lpfc_sli_issue_iocb() routine to send the fabric iocb to the HBA. 11449 **/ 11450static void 11451lpfc_resume_fabric_iocbs(struct lpfc_hba *phba) 11452{ 11453 struct lpfc_iocbq *iocb; 11454 unsigned long iflags; 11455 int ret; 11456 11457repeat: 11458 iocb = NULL; 11459 spin_lock_irqsave(&phba->hbalock, iflags); 11460 /* Post any pending iocb to the SLI layer */ 11461 if (atomic_read(&phba->fabric_iocb_count) == 0) { 11462 list_remove_head(&phba->fabric_iocb_list, iocb, typeof(*iocb), 11463 list); 11464 if (iocb) 11465 /* Increment fabric iocb count to hold the position */ 11466 atomic_inc(&phba->fabric_iocb_count); 11467 } 11468 spin_unlock_irqrestore(&phba->hbalock, iflags); 11469 if (iocb) { 11470 iocb->fabric_cmd_cmpl = iocb->cmd_cmpl; 11471 iocb->cmd_cmpl = lpfc_cmpl_fabric_iocb; 11472 iocb->cmd_flag |= LPFC_IO_FABRIC; 11473 11474 lpfc_debugfs_disc_trc(iocb->vport, LPFC_DISC_TRC_ELS_CMD, 11475 "Fabric sched1: ste:x%x", 11476 iocb->vport->port_state, 0, 0); 11477 11478 ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, iocb, 0); 11479 11480 if (ret == IOCB_ERROR) { 11481 iocb->cmd_cmpl = iocb->fabric_cmd_cmpl; 11482 iocb->fabric_cmd_cmpl = NULL; 11483 iocb->cmd_flag &= ~LPFC_IO_FABRIC; 11484 set_job_ulpstatus(iocb, IOSTAT_LOCAL_REJECT); 11485 iocb->wcqe_cmpl.parameter = IOERR_SLI_ABORTED; 11486 iocb->cmd_cmpl(phba, iocb, iocb); 11487 11488 atomic_dec(&phba->fabric_iocb_count); 11489 goto repeat; 11490 } 11491 } 11492} 11493 11494/** 11495 * lpfc_unblock_fabric_iocbs - Unblock issuing fabric iocb command 11496 * @phba: pointer to lpfc hba data structure. 11497 * 11498 * This routine unblocks the issuing fabric iocb command. The function 11499 * will clear the fabric iocb block bit and then invoke the routine 11500 * lpfc_resume_fabric_iocbs() to issue one of the pending fabric iocb 11501 * from the driver internal fabric iocb list. 11502 **/ 11503void 11504lpfc_unblock_fabric_iocbs(struct lpfc_hba *phba) 11505{ 11506 clear_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags); 11507 11508 lpfc_resume_fabric_iocbs(phba); 11509 return; 11510} 11511 11512/** 11513 * lpfc_block_fabric_iocbs - Block issuing fabric iocb command 11514 * @phba: pointer to lpfc hba data structure. 11515 * 11516 * This routine blocks the issuing fabric iocb for a specified amount of 11517 * time (currently 100 ms). This is done by set the fabric iocb block bit 11518 * and set up a timeout timer for 100ms. When the block bit is set, no more 11519 * fabric iocb will be issued out of the HBA. 11520 **/ 11521static void 11522lpfc_block_fabric_iocbs(struct lpfc_hba *phba) 11523{ 11524 int blocked; 11525 11526 blocked = test_and_set_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags); 11527 /* Start a timer to unblock fabric iocbs after 100ms */ 11528 if (!blocked) 11529 mod_timer(&phba->fabric_block_timer, 11530 jiffies + msecs_to_jiffies(100)); 11531 11532 return; 11533} 11534 11535/** 11536 * lpfc_cmpl_fabric_iocb - Completion callback function for fabric iocb 11537 * @phba: pointer to lpfc hba data structure. 11538 * @cmdiocb: pointer to lpfc command iocb data structure. 11539 * @rspiocb: pointer to lpfc response iocb data structure. 11540 * 11541 * This routine is the callback function that is put to the fabric iocb's 11542 * callback function pointer (iocb->cmd_cmpl). The original iocb's callback 11543 * function pointer has been stored in iocb->fabric_cmd_cmpl. This callback 11544 * function first restores and invokes the original iocb's callback function 11545 * and then invokes the lpfc_resume_fabric_iocbs() routine to issue the next 11546 * fabric bound iocb from the driver internal fabric iocb list onto the wire. 11547 **/ 11548static void 11549lpfc_cmpl_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 11550 struct lpfc_iocbq *rspiocb) 11551{ 11552 struct ls_rjt stat; 11553 u32 ulp_status = get_job_ulpstatus(phba, rspiocb); 11554 u32 ulp_word4 = get_job_word4(phba, rspiocb); 11555 11556 WARN_ON((cmdiocb->cmd_flag & LPFC_IO_FABRIC) != LPFC_IO_FABRIC); 11557 11558 switch (ulp_status) { 11559 case IOSTAT_NPORT_RJT: 11560 case IOSTAT_FABRIC_RJT: 11561 if (ulp_word4 & RJT_UNAVAIL_TEMP) 11562 lpfc_block_fabric_iocbs(phba); 11563 break; 11564 11565 case IOSTAT_NPORT_BSY: 11566 case IOSTAT_FABRIC_BSY: 11567 lpfc_block_fabric_iocbs(phba); 11568 break; 11569 11570 case IOSTAT_LS_RJT: 11571 stat.un.ls_rjt_error_be = 11572 cpu_to_be32(ulp_word4); 11573 if ((stat.un.b.lsRjtRsnCode == LSRJT_UNABLE_TPC) || 11574 (stat.un.b.lsRjtRsnCode == LSRJT_LOGICAL_BSY)) 11575 lpfc_block_fabric_iocbs(phba); 11576 break; 11577 } 11578 11579 BUG_ON(atomic_read(&phba->fabric_iocb_count) == 0); 11580 11581 cmdiocb->cmd_cmpl = cmdiocb->fabric_cmd_cmpl; 11582 cmdiocb->fabric_cmd_cmpl = NULL; 11583 cmdiocb->cmd_flag &= ~LPFC_IO_FABRIC; 11584 cmdiocb->cmd_cmpl(phba, cmdiocb, rspiocb); 11585 11586 atomic_dec(&phba->fabric_iocb_count); 11587 if (!test_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags)) { 11588 /* Post any pending iocbs to HBA */ 11589 lpfc_resume_fabric_iocbs(phba); 11590 } 11591} 11592 11593/** 11594 * lpfc_issue_fabric_iocb - Issue a fabric iocb command 11595 * @phba: pointer to lpfc hba data structure. 11596 * @iocb: pointer to lpfc command iocb data structure. 11597 * 11598 * This routine is used as the top-level API for issuing a fabric iocb command 11599 * such as FLOGI and FDISC. To accommodate certain switch fabric, this driver 11600 * function makes sure that only one fabric bound iocb will be outstanding at 11601 * any given time. As such, this function will first check to see whether there 11602 * is already an outstanding fabric iocb on the wire. If so, it will put the 11603 * newly issued iocb onto the driver internal fabric iocb list, waiting to be 11604 * issued later. Otherwise, it will issue the iocb on the wire and update the 11605 * fabric iocb count it indicate that there is one fabric iocb on the wire. 11606 * 11607 * Note, this implementation has a potential sending out fabric IOCBs out of 11608 * order. The problem is caused by the construction of the "ready" boolen does 11609 * not include the condition that the internal fabric IOCB list is empty. As 11610 * such, it is possible a fabric IOCB issued by this routine might be "jump" 11611 * ahead of the fabric IOCBs in the internal list. 11612 * 11613 * Return code 11614 * IOCB_SUCCESS - either fabric iocb put on the list or issued successfully 11615 * IOCB_ERROR - failed to issue fabric iocb 11616 **/ 11617static int 11618lpfc_issue_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *iocb) 11619{ 11620 unsigned long iflags; 11621 int ready; 11622 int ret; 11623 11624 BUG_ON(atomic_read(&phba->fabric_iocb_count) > 1); 11625 11626 spin_lock_irqsave(&phba->hbalock, iflags); 11627 ready = atomic_read(&phba->fabric_iocb_count) == 0 && 11628 !test_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags); 11629 11630 if (ready) 11631 /* Increment fabric iocb count to hold the position */ 11632 atomic_inc(&phba->fabric_iocb_count); 11633 spin_unlock_irqrestore(&phba->hbalock, iflags); 11634 if (ready) { 11635 iocb->fabric_cmd_cmpl = iocb->cmd_cmpl; 11636 iocb->cmd_cmpl = lpfc_cmpl_fabric_iocb; 11637 iocb->cmd_flag |= LPFC_IO_FABRIC; 11638 11639 lpfc_debugfs_disc_trc(iocb->vport, LPFC_DISC_TRC_ELS_CMD, 11640 "Fabric sched2: ste:x%x", 11641 iocb->vport->port_state, 0, 0); 11642 11643 ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, iocb, 0); 11644 11645 if (ret == IOCB_ERROR) { 11646 iocb->cmd_cmpl = iocb->fabric_cmd_cmpl; 11647 iocb->fabric_cmd_cmpl = NULL; 11648 iocb->cmd_flag &= ~LPFC_IO_FABRIC; 11649 atomic_dec(&phba->fabric_iocb_count); 11650 } 11651 } else { 11652 spin_lock_irqsave(&phba->hbalock, iflags); 11653 list_add_tail(&iocb->list, &phba->fabric_iocb_list); 11654 spin_unlock_irqrestore(&phba->hbalock, iflags); 11655 ret = IOCB_SUCCESS; 11656 } 11657 return ret; 11658} 11659 11660/** 11661 * lpfc_fabric_abort_vport - Abort a vport's iocbs from driver fabric iocb list 11662 * @vport: pointer to a virtual N_Port data structure. 11663 * 11664 * This routine aborts all the IOCBs associated with a @vport from the 11665 * driver internal fabric IOCB list. The list contains fabric IOCBs to be 11666 * issued to the ELS IOCB ring. This abort function walks the fabric IOCB 11667 * list, removes each IOCB associated with the @vport off the list, set the 11668 * status field to IOSTAT_LOCAL_REJECT, and invokes the callback function 11669 * associated with the IOCB. 11670 **/ 11671static void lpfc_fabric_abort_vport(struct lpfc_vport *vport) 11672{ 11673 LIST_HEAD(completions); 11674 struct lpfc_hba *phba = vport->phba; 11675 struct lpfc_iocbq *tmp_iocb, *piocb; 11676 11677 spin_lock_irq(&phba->hbalock); 11678 list_for_each_entry_safe(piocb, tmp_iocb, &phba->fabric_iocb_list, 11679 list) { 11680 11681 if (piocb->vport != vport) 11682 continue; 11683 11684 list_move_tail(&piocb->list, &completions); 11685 } 11686 spin_unlock_irq(&phba->hbalock); 11687 11688 /* Cancel all the IOCBs from the completions list */ 11689 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 11690 IOERR_SLI_ABORTED); 11691} 11692 11693/** 11694 * lpfc_fabric_abort_nport - Abort a ndlp's iocbs from driver fabric iocb list 11695 * @ndlp: pointer to a node-list data structure. 11696 * 11697 * This routine aborts all the IOCBs associated with an @ndlp from the 11698 * driver internal fabric IOCB list. The list contains fabric IOCBs to be 11699 * issued to the ELS IOCB ring. This abort function walks the fabric IOCB 11700 * list, removes each IOCB associated with the @ndlp off the list, set the 11701 * status field to IOSTAT_LOCAL_REJECT, and invokes the callback function 11702 * associated with the IOCB. 11703 **/ 11704void lpfc_fabric_abort_nport(struct lpfc_nodelist *ndlp) 11705{ 11706 LIST_HEAD(completions); 11707 struct lpfc_hba *phba = ndlp->phba; 11708 struct lpfc_iocbq *tmp_iocb, *piocb; 11709 struct lpfc_sli_ring *pring; 11710 11711 pring = lpfc_phba_elsring(phba); 11712 11713 if (unlikely(!pring)) 11714 return; 11715 11716 spin_lock_irq(&phba->hbalock); 11717 list_for_each_entry_safe(piocb, tmp_iocb, &phba->fabric_iocb_list, 11718 list) { 11719 if ((lpfc_check_sli_ndlp(phba, pring, piocb, ndlp))) { 11720 11721 list_move_tail(&piocb->list, &completions); 11722 } 11723 } 11724 spin_unlock_irq(&phba->hbalock); 11725 11726 /* Cancel all the IOCBs from the completions list */ 11727 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 11728 IOERR_SLI_ABORTED); 11729} 11730 11731/** 11732 * lpfc_fabric_abort_hba - Abort all iocbs on driver fabric iocb list 11733 * @phba: pointer to lpfc hba data structure. 11734 * 11735 * This routine aborts all the IOCBs currently on the driver internal 11736 * fabric IOCB list. The list contains fabric IOCBs to be issued to the ELS 11737 * IOCB ring. This function takes the entire IOCB list off the fabric IOCB 11738 * list, removes IOCBs off the list, set the status field to 11739 * IOSTAT_LOCAL_REJECT, and invokes the callback function associated with 11740 * the IOCB. 11741 **/ 11742void lpfc_fabric_abort_hba(struct lpfc_hba *phba) 11743{ 11744 LIST_HEAD(completions); 11745 11746 spin_lock_irq(&phba->hbalock); 11747 list_splice_init(&phba->fabric_iocb_list, &completions); 11748 spin_unlock_irq(&phba->hbalock); 11749 11750 /* Cancel all the IOCBs from the completions list */ 11751 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 11752 IOERR_SLI_ABORTED); 11753} 11754 11755/** 11756 * lpfc_sli4_vport_delete_els_xri_aborted -Remove all ndlp references for vport 11757 * @vport: pointer to lpfc vport data structure. 11758 * 11759 * This routine is invoked by the vport cleanup for deletions and the cleanup 11760 * for an ndlp on removal. 11761 **/ 11762void 11763lpfc_sli4_vport_delete_els_xri_aborted(struct lpfc_vport *vport) 11764{ 11765 struct lpfc_hba *phba = vport->phba; 11766 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL; 11767 struct lpfc_nodelist *ndlp = NULL; 11768 unsigned long iflag = 0; 11769 11770 spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock, iflag); 11771 list_for_each_entry_safe(sglq_entry, sglq_next, 11772 &phba->sli4_hba.lpfc_abts_els_sgl_list, list) { 11773 if (sglq_entry->ndlp && sglq_entry->ndlp->vport == vport) { 11774 lpfc_nlp_put(sglq_entry->ndlp); 11775 ndlp = sglq_entry->ndlp; 11776 sglq_entry->ndlp = NULL; 11777 11778 /* If the xri on the abts_els_sgl list is for the Fport 11779 * node and the vport is unloading, the xri aborted wcqe 11780 * likely isn't coming back. Just release the sgl. 11781 */ 11782 if ((vport->load_flag & FC_UNLOADING) && 11783 ndlp->nlp_DID == Fabric_DID) { 11784 list_del(&sglq_entry->list); 11785 sglq_entry->state = SGL_FREED; 11786 list_add_tail(&sglq_entry->list, 11787 &phba->sli4_hba.lpfc_els_sgl_list); 11788 } 11789 } 11790 } 11791 spin_unlock_irqrestore(&phba->sli4_hba.sgl_list_lock, iflag); 11792 return; 11793} 11794 11795/** 11796 * lpfc_sli4_els_xri_aborted - Slow-path process of els xri abort 11797 * @phba: pointer to lpfc hba data structure. 11798 * @axri: pointer to the els xri abort wcqe structure. 11799 * 11800 * This routine is invoked by the worker thread to process a SLI4 slow-path 11801 * ELS aborted xri. 11802 **/ 11803void 11804lpfc_sli4_els_xri_aborted(struct lpfc_hba *phba, 11805 struct sli4_wcqe_xri_aborted *axri) 11806{ 11807 uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri); 11808 uint16_t rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri); 11809 uint16_t lxri = 0; 11810 11811 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL; 11812 unsigned long iflag = 0; 11813 struct lpfc_nodelist *ndlp; 11814 struct lpfc_sli_ring *pring; 11815 11816 pring = lpfc_phba_elsring(phba); 11817 11818 spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock, iflag); 11819 list_for_each_entry_safe(sglq_entry, sglq_next, 11820 &phba->sli4_hba.lpfc_abts_els_sgl_list, list) { 11821 if (sglq_entry->sli4_xritag == xri) { 11822 list_del(&sglq_entry->list); 11823 ndlp = sglq_entry->ndlp; 11824 sglq_entry->ndlp = NULL; 11825 list_add_tail(&sglq_entry->list, 11826 &phba->sli4_hba.lpfc_els_sgl_list); 11827 sglq_entry->state = SGL_FREED; 11828 spin_unlock_irqrestore(&phba->sli4_hba.sgl_list_lock, 11829 iflag); 11830 11831 if (ndlp) { 11832 lpfc_set_rrq_active(phba, ndlp, 11833 sglq_entry->sli4_lxritag, 11834 rxid, 1); 11835 lpfc_nlp_put(ndlp); 11836 } 11837 11838 /* Check if TXQ queue needs to be serviced */ 11839 if (pring && !list_empty(&pring->txq)) 11840 lpfc_worker_wake_up(phba); 11841 return; 11842 } 11843 } 11844 spin_unlock_irqrestore(&phba->sli4_hba.sgl_list_lock, iflag); 11845 lxri = lpfc_sli4_xri_inrange(phba, xri); 11846 if (lxri == NO_XRI) 11847 return; 11848 11849 spin_lock_irqsave(&phba->hbalock, iflag); 11850 sglq_entry = __lpfc_get_active_sglq(phba, lxri); 11851 if (!sglq_entry || (sglq_entry->sli4_xritag != xri)) { 11852 spin_unlock_irqrestore(&phba->hbalock, iflag); 11853 return; 11854 } 11855 sglq_entry->state = SGL_XRI_ABORTED; 11856 spin_unlock_irqrestore(&phba->hbalock, iflag); 11857 return; 11858} 11859 11860/* lpfc_sli_abts_recover_port - Recover a port that failed a BLS_ABORT req. 11861 * @vport: pointer to virtual port object. 11862 * @ndlp: nodelist pointer for the impacted node. 11863 * 11864 * The driver calls this routine in response to an SLI4 XRI ABORT CQE 11865 * or an SLI3 ASYNC_STATUS_CN event from the port. For either event, 11866 * the driver is required to send a LOGO to the remote node before it 11867 * attempts to recover its login to the remote node. 11868 */ 11869void 11870lpfc_sli_abts_recover_port(struct lpfc_vport *vport, 11871 struct lpfc_nodelist *ndlp) 11872{ 11873 struct Scsi_Host *shost; 11874 struct lpfc_hba *phba; 11875 unsigned long flags = 0; 11876 11877 shost = lpfc_shost_from_vport(vport); 11878 phba = vport->phba; 11879 if (ndlp->nlp_state != NLP_STE_MAPPED_NODE) { 11880 lpfc_printf_log(phba, KERN_INFO, 11881 LOG_SLI, "3093 No rport recovery needed. " 11882 "rport in state 0x%x\n", ndlp->nlp_state); 11883 return; 11884 } 11885 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 11886 "3094 Start rport recovery on shost id 0x%x " 11887 "fc_id 0x%06x vpi 0x%x rpi 0x%x state 0x%x " 11888 "flags 0x%x\n", 11889 shost->host_no, ndlp->nlp_DID, 11890 vport->vpi, ndlp->nlp_rpi, ndlp->nlp_state, 11891 ndlp->nlp_flag); 11892 /* 11893 * The rport is not responding. Remove the FCP-2 flag to prevent 11894 * an ADISC in the follow-up recovery code. 11895 */ 11896 spin_lock_irqsave(&ndlp->lock, flags); 11897 ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE; 11898 ndlp->nlp_flag |= NLP_ISSUE_LOGO; 11899 spin_unlock_irqrestore(&ndlp->lock, flags); 11900 lpfc_unreg_rpi(vport, ndlp); 11901} 11902 11903static void lpfc_init_cs_ctl_bitmap(struct lpfc_vport *vport) 11904{ 11905 bitmap_zero(vport->vmid_priority_range, LPFC_VMID_MAX_PRIORITY_RANGE); 11906} 11907 11908static void 11909lpfc_vmid_set_cs_ctl_range(struct lpfc_vport *vport, u32 min, u32 max) 11910{ 11911 u32 i; 11912 11913 if ((min > max) || (max > LPFC_VMID_MAX_PRIORITY_RANGE)) 11914 return; 11915 11916 for (i = min; i <= max; i++) 11917 set_bit(i, vport->vmid_priority_range); 11918} 11919 11920static void lpfc_vmid_put_cs_ctl(struct lpfc_vport *vport, u32 ctcl_vmid) 11921{ 11922 set_bit(ctcl_vmid, vport->vmid_priority_range); 11923} 11924 11925u32 lpfc_vmid_get_cs_ctl(struct lpfc_vport *vport) 11926{ 11927 u32 i; 11928 11929 i = find_first_bit(vport->vmid_priority_range, 11930 LPFC_VMID_MAX_PRIORITY_RANGE); 11931 11932 if (i == LPFC_VMID_MAX_PRIORITY_RANGE) 11933 return 0; 11934 11935 clear_bit(i, vport->vmid_priority_range); 11936 return i; 11937} 11938 11939#define MAX_PRIORITY_DESC 255 11940 11941static void 11942lpfc_cmpl_els_qfpa(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 11943 struct lpfc_iocbq *rspiocb) 11944{ 11945 struct lpfc_vport *vport = cmdiocb->vport; 11946 struct priority_range_desc *desc; 11947 struct lpfc_dmabuf *prsp = NULL; 11948 struct lpfc_vmid_priority_range *vmid_range = NULL; 11949 u32 *data; 11950 struct lpfc_dmabuf *dmabuf = cmdiocb->cmd_dmabuf; 11951 u32 ulp_status = get_job_ulpstatus(phba, rspiocb); 11952 u32 ulp_word4 = get_job_word4(phba, rspiocb); 11953 u8 *pcmd, max_desc; 11954 u32 len, i; 11955 struct lpfc_nodelist *ndlp = cmdiocb->ndlp; 11956 11957 prsp = list_get_first(&dmabuf->list, struct lpfc_dmabuf, list); 11958 if (!prsp) 11959 goto out; 11960 11961 pcmd = prsp->virt; 11962 data = (u32 *)pcmd; 11963 if (data[0] == ELS_CMD_LS_RJT) { 11964 lpfc_printf_vlog(vport, KERN_WARNING, LOG_SLI, 11965 "3277 QFPA LS_RJT x%x x%x\n", 11966 data[0], data[1]); 11967 goto out; 11968 } 11969 if (ulp_status) { 11970 lpfc_printf_vlog(vport, KERN_ERR, LOG_SLI, 11971 "6529 QFPA failed with status x%x x%x\n", 11972 ulp_status, ulp_word4); 11973 goto out; 11974 } 11975 11976 if (!vport->qfpa_res) { 11977 max_desc = FCELSSIZE / sizeof(*vport->qfpa_res); 11978 vport->qfpa_res = kcalloc(max_desc, sizeof(*vport->qfpa_res), 11979 GFP_KERNEL); 11980 if (!vport->qfpa_res) 11981 goto out; 11982 } 11983 11984 len = *((u32 *)(pcmd + 4)); 11985 len = be32_to_cpu(len); 11986 memcpy(vport->qfpa_res, pcmd, len + 8); 11987 len = len / LPFC_PRIORITY_RANGE_DESC_SIZE; 11988 11989 desc = (struct priority_range_desc *)(pcmd + 8); 11990 vmid_range = vport->vmid_priority.vmid_range; 11991 if (!vmid_range) { 11992 vmid_range = kcalloc(MAX_PRIORITY_DESC, sizeof(*vmid_range), 11993 GFP_KERNEL); 11994 if (!vmid_range) { 11995 kfree(vport->qfpa_res); 11996 goto out; 11997 } 11998 vport->vmid_priority.vmid_range = vmid_range; 11999 } 12000 vport->vmid_priority.num_descriptors = len; 12001 12002 for (i = 0; i < len; i++, vmid_range++, desc++) { 12003 lpfc_printf_vlog(vport, KERN_DEBUG, LOG_ELS, 12004 "6539 vmid values low=%d, high=%d, qos=%d, " 12005 "local ve id=%d\n", desc->lo_range, 12006 desc->hi_range, desc->qos_priority, 12007 desc->local_ve_id); 12008 12009 vmid_range->low = desc->lo_range << 1; 12010 if (desc->local_ve_id == QFPA_ODD_ONLY) 12011 vmid_range->low++; 12012 if (desc->qos_priority) 12013 vport->vmid_flag |= LPFC_VMID_QOS_ENABLED; 12014 vmid_range->qos = desc->qos_priority; 12015 12016 vmid_range->high = desc->hi_range << 1; 12017 if ((desc->local_ve_id == QFPA_ODD_ONLY) || 12018 (desc->local_ve_id == QFPA_EVEN_ODD)) 12019 vmid_range->high++; 12020 } 12021 lpfc_init_cs_ctl_bitmap(vport); 12022 for (i = 0; i < vport->vmid_priority.num_descriptors; i++) { 12023 lpfc_vmid_set_cs_ctl_range(vport, 12024 vport->vmid_priority.vmid_range[i].low, 12025 vport->vmid_priority.vmid_range[i].high); 12026 } 12027 12028 vport->vmid_flag |= LPFC_VMID_QFPA_CMPL; 12029 out: 12030 lpfc_els_free_iocb(phba, cmdiocb); 12031 lpfc_nlp_put(ndlp); 12032} 12033 12034int lpfc_issue_els_qfpa(struct lpfc_vport *vport) 12035{ 12036 struct lpfc_hba *phba = vport->phba; 12037 struct lpfc_nodelist *ndlp; 12038 struct lpfc_iocbq *elsiocb; 12039 u8 *pcmd; 12040 int ret; 12041 12042 ndlp = lpfc_findnode_did(phba->pport, Fabric_DID); 12043 if (!ndlp || ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) 12044 return -ENXIO; 12045 12046 elsiocb = lpfc_prep_els_iocb(vport, 1, LPFC_QFPA_SIZE, 2, ndlp, 12047 ndlp->nlp_DID, ELS_CMD_QFPA); 12048 if (!elsiocb) 12049 return -ENOMEM; 12050 12051 pcmd = (u8 *)elsiocb->cmd_dmabuf->virt; 12052 12053 *((u32 *)(pcmd)) = ELS_CMD_QFPA; 12054 pcmd += 4; 12055 12056 elsiocb->cmd_cmpl = lpfc_cmpl_els_qfpa; 12057 12058 elsiocb->ndlp = lpfc_nlp_get(ndlp); 12059 if (!elsiocb->ndlp) { 12060 lpfc_els_free_iocb(vport->phba, elsiocb); 12061 return -ENXIO; 12062 } 12063 12064 ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 2); 12065 if (ret != IOCB_SUCCESS) { 12066 lpfc_els_free_iocb(phba, elsiocb); 12067 lpfc_nlp_put(ndlp); 12068 return -EIO; 12069 } 12070 vport->vmid_flag &= ~LPFC_VMID_QOS_ENABLED; 12071 return 0; 12072} 12073 12074int 12075lpfc_vmid_uvem(struct lpfc_vport *vport, 12076 struct lpfc_vmid *vmid, bool instantiated) 12077{ 12078 struct lpfc_vem_id_desc *vem_id_desc; 12079 struct lpfc_nodelist *ndlp; 12080 struct lpfc_iocbq *elsiocb; 12081 struct instantiated_ve_desc *inst_desc; 12082 struct lpfc_vmid_context *vmid_context; 12083 u8 *pcmd; 12084 u32 *len; 12085 int ret = 0; 12086 12087 ndlp = lpfc_findnode_did(vport, Fabric_DID); 12088 if (!ndlp || ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) 12089 return -ENXIO; 12090 12091 vmid_context = kmalloc(sizeof(*vmid_context), GFP_KERNEL); 12092 if (!vmid_context) 12093 return -ENOMEM; 12094 elsiocb = lpfc_prep_els_iocb(vport, 1, LPFC_UVEM_SIZE, 2, 12095 ndlp, Fabric_DID, ELS_CMD_UVEM); 12096 if (!elsiocb) 12097 goto out; 12098 12099 lpfc_printf_vlog(vport, KERN_DEBUG, LOG_ELS, 12100 "3427 Host vmid %s %d\n", 12101 vmid->host_vmid, instantiated); 12102 vmid_context->vmp = vmid; 12103 vmid_context->nlp = ndlp; 12104 vmid_context->instantiated = instantiated; 12105 elsiocb->vmid_tag.vmid_context = vmid_context; 12106 pcmd = (u8 *)elsiocb->cmd_dmabuf->virt; 12107 12108 if (uuid_is_null((uuid_t *)vport->lpfc_vmid_host_uuid)) 12109 memcpy(vport->lpfc_vmid_host_uuid, vmid->host_vmid, 12110 LPFC_COMPRESS_VMID_SIZE); 12111 12112 *((u32 *)(pcmd)) = ELS_CMD_UVEM; 12113 len = (u32 *)(pcmd + 4); 12114 *len = cpu_to_be32(LPFC_UVEM_SIZE - 8); 12115 12116 vem_id_desc = (struct lpfc_vem_id_desc *)(pcmd + 8); 12117 vem_id_desc->tag = be32_to_cpu(VEM_ID_DESC_TAG); 12118 vem_id_desc->length = be32_to_cpu(LPFC_UVEM_VEM_ID_DESC_SIZE); 12119 memcpy(vem_id_desc->vem_id, vport->lpfc_vmid_host_uuid, 12120 LPFC_COMPRESS_VMID_SIZE); 12121 12122 inst_desc = (struct instantiated_ve_desc *)(pcmd + 32); 12123 inst_desc->tag = be32_to_cpu(INSTANTIATED_VE_DESC_TAG); 12124 inst_desc->length = be32_to_cpu(LPFC_UVEM_VE_MAP_DESC_SIZE); 12125 memcpy(inst_desc->global_vem_id, vmid->host_vmid, 12126 LPFC_COMPRESS_VMID_SIZE); 12127 12128 bf_set(lpfc_instantiated_nport_id, inst_desc, vport->fc_myDID); 12129 bf_set(lpfc_instantiated_local_id, inst_desc, 12130 vmid->un.cs_ctl_vmid); 12131 if (instantiated) { 12132 inst_desc->tag = be32_to_cpu(INSTANTIATED_VE_DESC_TAG); 12133 } else { 12134 inst_desc->tag = be32_to_cpu(DEINSTANTIATED_VE_DESC_TAG); 12135 lpfc_vmid_put_cs_ctl(vport, vmid->un.cs_ctl_vmid); 12136 } 12137 inst_desc->word6 = cpu_to_be32(inst_desc->word6); 12138 12139 elsiocb->cmd_cmpl = lpfc_cmpl_els_uvem; 12140 12141 elsiocb->ndlp = lpfc_nlp_get(ndlp); 12142 if (!elsiocb->ndlp) { 12143 lpfc_els_free_iocb(vport->phba, elsiocb); 12144 goto out; 12145 } 12146 12147 ret = lpfc_sli_issue_iocb(vport->phba, LPFC_ELS_RING, elsiocb, 0); 12148 if (ret != IOCB_SUCCESS) { 12149 lpfc_els_free_iocb(vport->phba, elsiocb); 12150 lpfc_nlp_put(ndlp); 12151 goto out; 12152 } 12153 12154 return 0; 12155 out: 12156 kfree(vmid_context); 12157 return -EIO; 12158} 12159 12160static void 12161lpfc_cmpl_els_uvem(struct lpfc_hba *phba, struct lpfc_iocbq *icmdiocb, 12162 struct lpfc_iocbq *rspiocb) 12163{ 12164 struct lpfc_vport *vport = icmdiocb->vport; 12165 struct lpfc_dmabuf *prsp = NULL; 12166 struct lpfc_vmid_context *vmid_context = 12167 icmdiocb->vmid_tag.vmid_context; 12168 struct lpfc_nodelist *ndlp = icmdiocb->ndlp; 12169 u8 *pcmd; 12170 u32 *data; 12171 u32 ulp_status = get_job_ulpstatus(phba, rspiocb); 12172 u32 ulp_word4 = get_job_word4(phba, rspiocb); 12173 struct lpfc_dmabuf *dmabuf = icmdiocb->cmd_dmabuf; 12174 struct lpfc_vmid *vmid; 12175 12176 vmid = vmid_context->vmp; 12177 if (!ndlp || ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) 12178 ndlp = NULL; 12179 12180 prsp = list_get_first(&dmabuf->list, struct lpfc_dmabuf, list); 12181 if (!prsp) 12182 goto out; 12183 pcmd = prsp->virt; 12184 data = (u32 *)pcmd; 12185 if (data[0] == ELS_CMD_LS_RJT) { 12186 lpfc_printf_vlog(vport, KERN_WARNING, LOG_SLI, 12187 "4532 UVEM LS_RJT %x %x\n", data[0], data[1]); 12188 goto out; 12189 } 12190 if (ulp_status) { 12191 lpfc_printf_vlog(vport, KERN_WARNING, LOG_SLI, 12192 "4533 UVEM error status %x: %x\n", 12193 ulp_status, ulp_word4); 12194 goto out; 12195 } 12196 spin_lock(&phba->hbalock); 12197 /* Set IN USE flag */ 12198 vport->vmid_flag |= LPFC_VMID_IN_USE; 12199 phba->pport->vmid_flag |= LPFC_VMID_IN_USE; 12200 spin_unlock(&phba->hbalock); 12201 12202 if (vmid_context->instantiated) { 12203 write_lock(&vport->vmid_lock); 12204 vmid->flag |= LPFC_VMID_REGISTERED; 12205 vmid->flag &= ~LPFC_VMID_REQ_REGISTER; 12206 write_unlock(&vport->vmid_lock); 12207 } 12208 12209 out: 12210 kfree(vmid_context); 12211 lpfc_els_free_iocb(phba, icmdiocb); 12212 lpfc_nlp_put(ndlp); 12213}