qla_gs.c (123092B)
1// SPDX-License-Identifier: GPL-2.0-only 2/* 3 * QLogic Fibre Channel HBA Driver 4 * Copyright (c) 2003-2014 QLogic Corporation 5 */ 6#include "qla_def.h" 7#include "qla_target.h" 8#include <linux/utsname.h> 9 10static int qla2x00_sns_ga_nxt(scsi_qla_host_t *, fc_port_t *); 11static int qla2x00_sns_gid_pt(scsi_qla_host_t *, sw_info_t *); 12static int qla2x00_sns_gpn_id(scsi_qla_host_t *, sw_info_t *); 13static int qla2x00_sns_gnn_id(scsi_qla_host_t *, sw_info_t *); 14static int qla2x00_sns_rft_id(scsi_qla_host_t *); 15static int qla2x00_sns_rnn_id(scsi_qla_host_t *); 16static int qla_async_rftid(scsi_qla_host_t *, port_id_t *); 17static int qla_async_rffid(scsi_qla_host_t *, port_id_t *, u8, u8); 18static int qla_async_rnnid(scsi_qla_host_t *, port_id_t *, u8*); 19static int qla_async_rsnn_nn(scsi_qla_host_t *); 20 21 22 23/** 24 * qla2x00_prep_ms_iocb() - Prepare common MS/CT IOCB fields for SNS CT query. 25 * @vha: HA context 26 * @arg: CT arguments 27 * 28 * Returns a pointer to the @vha's ms_iocb. 29 */ 30void * 31qla2x00_prep_ms_iocb(scsi_qla_host_t *vha, struct ct_arg *arg) 32{ 33 struct qla_hw_data *ha = vha->hw; 34 ms_iocb_entry_t *ms_pkt; 35 36 ms_pkt = (ms_iocb_entry_t *)arg->iocb; 37 memset(ms_pkt, 0, sizeof(ms_iocb_entry_t)); 38 39 ms_pkt->entry_type = MS_IOCB_TYPE; 40 ms_pkt->entry_count = 1; 41 SET_TARGET_ID(ha, ms_pkt->loop_id, SIMPLE_NAME_SERVER); 42 ms_pkt->control_flags = cpu_to_le16(CF_READ | CF_HEAD_TAG); 43 ms_pkt->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2); 44 ms_pkt->cmd_dsd_count = cpu_to_le16(1); 45 ms_pkt->total_dsd_count = cpu_to_le16(2); 46 ms_pkt->rsp_bytecount = cpu_to_le32(arg->rsp_size); 47 ms_pkt->req_bytecount = cpu_to_le32(arg->req_size); 48 49 put_unaligned_le64(arg->req_dma, &ms_pkt->req_dsd.address); 50 ms_pkt->req_dsd.length = ms_pkt->req_bytecount; 51 52 put_unaligned_le64(arg->rsp_dma, &ms_pkt->rsp_dsd.address); 53 ms_pkt->rsp_dsd.length = ms_pkt->rsp_bytecount; 54 55 vha->qla_stats.control_requests++; 56 57 return (ms_pkt); 58} 59 60/** 61 * qla24xx_prep_ms_iocb() - Prepare common CT IOCB fields for SNS CT query. 62 * @vha: HA context 63 * @arg: CT arguments 64 * 65 * Returns a pointer to the @ha's ms_iocb. 66 */ 67void * 68qla24xx_prep_ms_iocb(scsi_qla_host_t *vha, struct ct_arg *arg) 69{ 70 struct qla_hw_data *ha = vha->hw; 71 struct ct_entry_24xx *ct_pkt; 72 73 ct_pkt = (struct ct_entry_24xx *)arg->iocb; 74 memset(ct_pkt, 0, sizeof(struct ct_entry_24xx)); 75 76 ct_pkt->entry_type = CT_IOCB_TYPE; 77 ct_pkt->entry_count = 1; 78 ct_pkt->nport_handle = cpu_to_le16(arg->nport_handle); 79 ct_pkt->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2); 80 ct_pkt->cmd_dsd_count = cpu_to_le16(1); 81 ct_pkt->rsp_dsd_count = cpu_to_le16(1); 82 ct_pkt->rsp_byte_count = cpu_to_le32(arg->rsp_size); 83 ct_pkt->cmd_byte_count = cpu_to_le32(arg->req_size); 84 85 put_unaligned_le64(arg->req_dma, &ct_pkt->dsd[0].address); 86 ct_pkt->dsd[0].length = ct_pkt->cmd_byte_count; 87 88 put_unaligned_le64(arg->rsp_dma, &ct_pkt->dsd[1].address); 89 ct_pkt->dsd[1].length = ct_pkt->rsp_byte_count; 90 ct_pkt->vp_index = vha->vp_idx; 91 92 vha->qla_stats.control_requests++; 93 94 return (ct_pkt); 95} 96 97/** 98 * qla2x00_prep_ct_req() - Prepare common CT request fields for SNS query. 99 * @p: CT request buffer 100 * @cmd: GS command 101 * @rsp_size: response size in bytes 102 * 103 * Returns a pointer to the intitialized @ct_req. 104 */ 105static inline struct ct_sns_req * 106qla2x00_prep_ct_req(struct ct_sns_pkt *p, uint16_t cmd, uint16_t rsp_size) 107{ 108 memset(p, 0, sizeof(struct ct_sns_pkt)); 109 110 p->p.req.header.revision = 0x01; 111 p->p.req.header.gs_type = 0xFC; 112 p->p.req.header.gs_subtype = 0x02; 113 p->p.req.command = cpu_to_be16(cmd); 114 p->p.req.max_rsp_size = cpu_to_be16((rsp_size - 16) / 4); 115 116 return &p->p.req; 117} 118 119int 120qla2x00_chk_ms_status(scsi_qla_host_t *vha, ms_iocb_entry_t *ms_pkt, 121 struct ct_sns_rsp *ct_rsp, const char *routine) 122{ 123 int rval; 124 uint16_t comp_status; 125 struct qla_hw_data *ha = vha->hw; 126 bool lid_is_sns = false; 127 128 rval = QLA_FUNCTION_FAILED; 129 if (ms_pkt->entry_status != 0) { 130 ql_dbg(ql_dbg_disc, vha, 0x2031, 131 "%s failed, error status (%x) on port_id: %02x%02x%02x.\n", 132 routine, ms_pkt->entry_status, vha->d_id.b.domain, 133 vha->d_id.b.area, vha->d_id.b.al_pa); 134 } else { 135 if (IS_FWI2_CAPABLE(ha)) 136 comp_status = le16_to_cpu( 137 ((struct ct_entry_24xx *)ms_pkt)->comp_status); 138 else 139 comp_status = le16_to_cpu(ms_pkt->status); 140 switch (comp_status) { 141 case CS_COMPLETE: 142 case CS_DATA_UNDERRUN: 143 case CS_DATA_OVERRUN: /* Overrun? */ 144 if (ct_rsp->header.response != 145 cpu_to_be16(CT_ACCEPT_RESPONSE)) { 146 ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x2077, 147 "%s failed rejected request on port_id: %02x%02x%02x Completion status 0x%x, response 0x%x\n", 148 routine, vha->d_id.b.domain, 149 vha->d_id.b.area, vha->d_id.b.al_pa, 150 comp_status, ct_rsp->header.response); 151 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 152 0x2078, ct_rsp, 153 offsetof(typeof(*ct_rsp), rsp)); 154 rval = QLA_INVALID_COMMAND; 155 } else 156 rval = QLA_SUCCESS; 157 break; 158 case CS_PORT_LOGGED_OUT: 159 if (IS_FWI2_CAPABLE(ha)) { 160 if (le16_to_cpu(ms_pkt->loop_id.extended) == 161 NPH_SNS) 162 lid_is_sns = true; 163 } else { 164 if (le16_to_cpu(ms_pkt->loop_id.extended) == 165 SIMPLE_NAME_SERVER) 166 lid_is_sns = true; 167 } 168 if (lid_is_sns) { 169 ql_dbg(ql_dbg_async, vha, 0x502b, 170 "%s failed, Name server has logged out", 171 routine); 172 rval = QLA_NOT_LOGGED_IN; 173 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 174 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); 175 } 176 break; 177 case CS_TIMEOUT: 178 rval = QLA_FUNCTION_TIMEOUT; 179 fallthrough; 180 default: 181 ql_dbg(ql_dbg_disc, vha, 0x2033, 182 "%s failed, completion status (%x) on port_id: " 183 "%02x%02x%02x.\n", routine, comp_status, 184 vha->d_id.b.domain, vha->d_id.b.area, 185 vha->d_id.b.al_pa); 186 break; 187 } 188 } 189 return rval; 190} 191 192/** 193 * qla2x00_ga_nxt() - SNS scan for fabric devices via GA_NXT command. 194 * @vha: HA context 195 * @fcport: fcport entry to updated 196 * 197 * Returns 0 on success. 198 */ 199int 200qla2x00_ga_nxt(scsi_qla_host_t *vha, fc_port_t *fcport) 201{ 202 int rval; 203 204 ms_iocb_entry_t *ms_pkt; 205 struct ct_sns_req *ct_req; 206 struct ct_sns_rsp *ct_rsp; 207 struct qla_hw_data *ha = vha->hw; 208 struct ct_arg arg; 209 210 if (IS_QLA2100(ha) || IS_QLA2200(ha)) 211 return qla2x00_sns_ga_nxt(vha, fcport); 212 213 arg.iocb = ha->ms_iocb; 214 arg.req_dma = ha->ct_sns_dma; 215 arg.rsp_dma = ha->ct_sns_dma; 216 arg.req_size = GA_NXT_REQ_SIZE; 217 arg.rsp_size = GA_NXT_RSP_SIZE; 218 arg.nport_handle = NPH_SNS; 219 220 /* Issue GA_NXT */ 221 /* Prepare common MS IOCB */ 222 ms_pkt = ha->isp_ops->prep_ms_iocb(vha, &arg); 223 224 /* Prepare CT request */ 225 ct_req = qla2x00_prep_ct_req(ha->ct_sns, GA_NXT_CMD, 226 GA_NXT_RSP_SIZE); 227 ct_rsp = &ha->ct_sns->p.rsp; 228 229 /* Prepare CT arguments -- port_id */ 230 ct_req->req.port_id.port_id = port_id_to_be_id(fcport->d_id); 231 232 /* Execute MS IOCB */ 233 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma, 234 sizeof(ms_iocb_entry_t)); 235 if (rval != QLA_SUCCESS) { 236 /*EMPTY*/ 237 ql_dbg(ql_dbg_disc, vha, 0x2062, 238 "GA_NXT issue IOCB failed (%d).\n", rval); 239 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "GA_NXT") != 240 QLA_SUCCESS) { 241 rval = QLA_FUNCTION_FAILED; 242 } else { 243 /* Populate fc_port_t entry. */ 244 fcport->d_id = be_to_port_id(ct_rsp->rsp.ga_nxt.port_id); 245 246 memcpy(fcport->node_name, ct_rsp->rsp.ga_nxt.node_name, 247 WWN_SIZE); 248 memcpy(fcport->port_name, ct_rsp->rsp.ga_nxt.port_name, 249 WWN_SIZE); 250 251 fcport->fc4_type = (ct_rsp->rsp.ga_nxt.fc4_types[2] & BIT_0) ? 252 FS_FC4TYPE_FCP : FC4_TYPE_OTHER; 253 254 if (ct_rsp->rsp.ga_nxt.port_type != NS_N_PORT_TYPE && 255 ct_rsp->rsp.ga_nxt.port_type != NS_NL_PORT_TYPE) 256 fcport->d_id.b.domain = 0xf0; 257 258 ql_dbg(ql_dbg_disc, vha, 0x2063, 259 "GA_NXT entry - nn %8phN pn %8phN " 260 "port_id=%02x%02x%02x.\n", 261 fcport->node_name, fcport->port_name, 262 fcport->d_id.b.domain, fcport->d_id.b.area, 263 fcport->d_id.b.al_pa); 264 } 265 266 return (rval); 267} 268 269static inline int 270qla2x00_gid_pt_rsp_size(scsi_qla_host_t *vha) 271{ 272 return vha->hw->max_fibre_devices * 4 + 16; 273} 274 275/** 276 * qla2x00_gid_pt() - SNS scan for fabric devices via GID_PT command. 277 * @vha: HA context 278 * @list: switch info entries to populate 279 * 280 * NOTE: Non-Nx_Ports are not requested. 281 * 282 * Returns 0 on success. 283 */ 284int 285qla2x00_gid_pt(scsi_qla_host_t *vha, sw_info_t *list) 286{ 287 int rval; 288 uint16_t i; 289 290 ms_iocb_entry_t *ms_pkt; 291 struct ct_sns_req *ct_req; 292 struct ct_sns_rsp *ct_rsp; 293 294 struct ct_sns_gid_pt_data *gid_data; 295 struct qla_hw_data *ha = vha->hw; 296 uint16_t gid_pt_rsp_size; 297 struct ct_arg arg; 298 299 if (IS_QLA2100(ha) || IS_QLA2200(ha)) 300 return qla2x00_sns_gid_pt(vha, list); 301 302 gid_data = NULL; 303 gid_pt_rsp_size = qla2x00_gid_pt_rsp_size(vha); 304 305 arg.iocb = ha->ms_iocb; 306 arg.req_dma = ha->ct_sns_dma; 307 arg.rsp_dma = ha->ct_sns_dma; 308 arg.req_size = GID_PT_REQ_SIZE; 309 arg.rsp_size = gid_pt_rsp_size; 310 arg.nport_handle = NPH_SNS; 311 312 /* Issue GID_PT */ 313 /* Prepare common MS IOCB */ 314 ms_pkt = ha->isp_ops->prep_ms_iocb(vha, &arg); 315 316 /* Prepare CT request */ 317 ct_req = qla2x00_prep_ct_req(ha->ct_sns, GID_PT_CMD, gid_pt_rsp_size); 318 ct_rsp = &ha->ct_sns->p.rsp; 319 320 /* Prepare CT arguments -- port_type */ 321 ct_req->req.gid_pt.port_type = NS_NX_PORT_TYPE; 322 323 /* Execute MS IOCB */ 324 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma, 325 sizeof(ms_iocb_entry_t)); 326 if (rval != QLA_SUCCESS) { 327 /*EMPTY*/ 328 ql_dbg(ql_dbg_disc, vha, 0x2055, 329 "GID_PT issue IOCB failed (%d).\n", rval); 330 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "GID_PT") != 331 QLA_SUCCESS) { 332 rval = QLA_FUNCTION_FAILED; 333 } else { 334 /* Set port IDs in switch info list. */ 335 for (i = 0; i < ha->max_fibre_devices; i++) { 336 gid_data = &ct_rsp->rsp.gid_pt.entries[i]; 337 list[i].d_id = be_to_port_id(gid_data->port_id); 338 memset(list[i].fabric_port_name, 0, WWN_SIZE); 339 list[i].fp_speed = PORT_SPEED_UNKNOWN; 340 341 /* Last one exit. */ 342 if (gid_data->control_byte & BIT_7) { 343 list[i].d_id.b.rsvd_1 = gid_data->control_byte; 344 break; 345 } 346 } 347 348 /* 349 * If we've used all available slots, then the switch is 350 * reporting back more devices than we can handle with this 351 * single call. Return a failed status, and let GA_NXT handle 352 * the overload. 353 */ 354 if (i == ha->max_fibre_devices) 355 rval = QLA_FUNCTION_FAILED; 356 } 357 358 return (rval); 359} 360 361/** 362 * qla2x00_gpn_id() - SNS Get Port Name (GPN_ID) query. 363 * @vha: HA context 364 * @list: switch info entries to populate 365 * 366 * Returns 0 on success. 367 */ 368int 369qla2x00_gpn_id(scsi_qla_host_t *vha, sw_info_t *list) 370{ 371 int rval = QLA_SUCCESS; 372 uint16_t i; 373 374 ms_iocb_entry_t *ms_pkt; 375 struct ct_sns_req *ct_req; 376 struct ct_sns_rsp *ct_rsp; 377 struct qla_hw_data *ha = vha->hw; 378 struct ct_arg arg; 379 380 if (IS_QLA2100(ha) || IS_QLA2200(ha)) 381 return qla2x00_sns_gpn_id(vha, list); 382 383 arg.iocb = ha->ms_iocb; 384 arg.req_dma = ha->ct_sns_dma; 385 arg.rsp_dma = ha->ct_sns_dma; 386 arg.req_size = GPN_ID_REQ_SIZE; 387 arg.rsp_size = GPN_ID_RSP_SIZE; 388 arg.nport_handle = NPH_SNS; 389 390 for (i = 0; i < ha->max_fibre_devices; i++) { 391 /* Issue GPN_ID */ 392 /* Prepare common MS IOCB */ 393 ms_pkt = ha->isp_ops->prep_ms_iocb(vha, &arg); 394 395 /* Prepare CT request */ 396 ct_req = qla2x00_prep_ct_req(ha->ct_sns, GPN_ID_CMD, 397 GPN_ID_RSP_SIZE); 398 ct_rsp = &ha->ct_sns->p.rsp; 399 400 /* Prepare CT arguments -- port_id */ 401 ct_req->req.port_id.port_id = port_id_to_be_id(list[i].d_id); 402 403 /* Execute MS IOCB */ 404 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma, 405 sizeof(ms_iocb_entry_t)); 406 if (rval != QLA_SUCCESS) { 407 /*EMPTY*/ 408 ql_dbg(ql_dbg_disc, vha, 0x2056, 409 "GPN_ID issue IOCB failed (%d).\n", rval); 410 break; 411 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, 412 "GPN_ID") != QLA_SUCCESS) { 413 rval = QLA_FUNCTION_FAILED; 414 break; 415 } else { 416 /* Save portname */ 417 memcpy(list[i].port_name, 418 ct_rsp->rsp.gpn_id.port_name, WWN_SIZE); 419 } 420 421 /* Last device exit. */ 422 if (list[i].d_id.b.rsvd_1 != 0) 423 break; 424 } 425 426 return (rval); 427} 428 429/** 430 * qla2x00_gnn_id() - SNS Get Node Name (GNN_ID) query. 431 * @vha: HA context 432 * @list: switch info entries to populate 433 * 434 * Returns 0 on success. 435 */ 436int 437qla2x00_gnn_id(scsi_qla_host_t *vha, sw_info_t *list) 438{ 439 int rval = QLA_SUCCESS; 440 uint16_t i; 441 struct qla_hw_data *ha = vha->hw; 442 ms_iocb_entry_t *ms_pkt; 443 struct ct_sns_req *ct_req; 444 struct ct_sns_rsp *ct_rsp; 445 struct ct_arg arg; 446 447 if (IS_QLA2100(ha) || IS_QLA2200(ha)) 448 return qla2x00_sns_gnn_id(vha, list); 449 450 arg.iocb = ha->ms_iocb; 451 arg.req_dma = ha->ct_sns_dma; 452 arg.rsp_dma = ha->ct_sns_dma; 453 arg.req_size = GNN_ID_REQ_SIZE; 454 arg.rsp_size = GNN_ID_RSP_SIZE; 455 arg.nport_handle = NPH_SNS; 456 457 for (i = 0; i < ha->max_fibre_devices; i++) { 458 /* Issue GNN_ID */ 459 /* Prepare common MS IOCB */ 460 ms_pkt = ha->isp_ops->prep_ms_iocb(vha, &arg); 461 462 /* Prepare CT request */ 463 ct_req = qla2x00_prep_ct_req(ha->ct_sns, GNN_ID_CMD, 464 GNN_ID_RSP_SIZE); 465 ct_rsp = &ha->ct_sns->p.rsp; 466 467 /* Prepare CT arguments -- port_id */ 468 ct_req->req.port_id.port_id = port_id_to_be_id(list[i].d_id); 469 470 /* Execute MS IOCB */ 471 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma, 472 sizeof(ms_iocb_entry_t)); 473 if (rval != QLA_SUCCESS) { 474 /*EMPTY*/ 475 ql_dbg(ql_dbg_disc, vha, 0x2057, 476 "GNN_ID issue IOCB failed (%d).\n", rval); 477 break; 478 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, 479 "GNN_ID") != QLA_SUCCESS) { 480 rval = QLA_FUNCTION_FAILED; 481 break; 482 } else { 483 /* Save nodename */ 484 memcpy(list[i].node_name, 485 ct_rsp->rsp.gnn_id.node_name, WWN_SIZE); 486 487 ql_dbg(ql_dbg_disc, vha, 0x2058, 488 "GID_PT entry - nn %8phN pn %8phN " 489 "portid=%02x%02x%02x.\n", 490 list[i].node_name, list[i].port_name, 491 list[i].d_id.b.domain, list[i].d_id.b.area, 492 list[i].d_id.b.al_pa); 493 } 494 495 /* Last device exit. */ 496 if (list[i].d_id.b.rsvd_1 != 0) 497 break; 498 } 499 500 return (rval); 501} 502 503static void qla2x00_async_sns_sp_done(srb_t *sp, int rc) 504{ 505 struct scsi_qla_host *vha = sp->vha; 506 struct ct_sns_pkt *ct_sns; 507 struct qla_work_evt *e; 508 509 sp->rc = rc; 510 if (rc == QLA_SUCCESS) { 511 ql_dbg(ql_dbg_disc, vha, 0x204f, 512 "Async done-%s exiting normally.\n", 513 sp->name); 514 } else if (rc == QLA_FUNCTION_TIMEOUT) { 515 ql_dbg(ql_dbg_disc, vha, 0x204f, 516 "Async done-%s timeout\n", sp->name); 517 } else { 518 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.rsp; 519 memset(ct_sns, 0, sizeof(*ct_sns)); 520 sp->retry_count++; 521 if (sp->retry_count > 3) 522 goto err; 523 524 ql_dbg(ql_dbg_disc, vha, 0x204f, 525 "Async done-%s fail rc %x. Retry count %d\n", 526 sp->name, rc, sp->retry_count); 527 528 e = qla2x00_alloc_work(vha, QLA_EVT_SP_RETRY); 529 if (!e) 530 goto err2; 531 532 e->u.iosb.sp = sp; 533 qla2x00_post_work(vha, e); 534 return; 535 } 536 537err: 538 e = qla2x00_alloc_work(vha, QLA_EVT_UNMAP); 539err2: 540 if (!e) { 541 /* please ignore kernel warning. otherwise, we have mem leak. */ 542 if (sp->u.iocb_cmd.u.ctarg.req) { 543 dma_free_coherent(&vha->hw->pdev->dev, 544 sp->u.iocb_cmd.u.ctarg.req_allocated_size, 545 sp->u.iocb_cmd.u.ctarg.req, 546 sp->u.iocb_cmd.u.ctarg.req_dma); 547 sp->u.iocb_cmd.u.ctarg.req = NULL; 548 } 549 550 if (sp->u.iocb_cmd.u.ctarg.rsp) { 551 dma_free_coherent(&vha->hw->pdev->dev, 552 sp->u.iocb_cmd.u.ctarg.rsp_allocated_size, 553 sp->u.iocb_cmd.u.ctarg.rsp, 554 sp->u.iocb_cmd.u.ctarg.rsp_dma); 555 sp->u.iocb_cmd.u.ctarg.rsp = NULL; 556 } 557 558 /* ref: INIT */ 559 kref_put(&sp->cmd_kref, qla2x00_sp_release); 560 return; 561 } 562 563 e->u.iosb.sp = sp; 564 qla2x00_post_work(vha, e); 565} 566 567/** 568 * qla2x00_rft_id() - SNS Register FC-4 TYPEs (RFT_ID) supported by the HBA. 569 * @vha: HA context 570 * 571 * Returns 0 on success. 572 */ 573int 574qla2x00_rft_id(scsi_qla_host_t *vha) 575{ 576 struct qla_hw_data *ha = vha->hw; 577 578 if (IS_QLA2100(ha) || IS_QLA2200(ha)) 579 return qla2x00_sns_rft_id(vha); 580 581 return qla_async_rftid(vha, &vha->d_id); 582} 583 584static int qla_async_rftid(scsi_qla_host_t *vha, port_id_t *d_id) 585{ 586 int rval = QLA_MEMORY_ALLOC_FAILED; 587 struct ct_sns_req *ct_req; 588 srb_t *sp; 589 struct ct_sns_pkt *ct_sns; 590 591 if (!vha->flags.online) 592 goto done; 593 594 /* ref: INIT */ 595 sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL); 596 if (!sp) 597 goto done; 598 599 sp->type = SRB_CT_PTHRU_CMD; 600 sp->name = "rft_id"; 601 qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2, 602 qla2x00_async_sns_sp_done); 603 604 sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev, 605 sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma, 606 GFP_KERNEL); 607 sp->u.iocb_cmd.u.ctarg.req_allocated_size = sizeof(struct ct_sns_pkt); 608 if (!sp->u.iocb_cmd.u.ctarg.req) { 609 ql_log(ql_log_warn, vha, 0xd041, 610 "%s: Failed to allocate ct_sns request.\n", 611 __func__); 612 goto done_free_sp; 613 } 614 615 sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev, 616 sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.rsp_dma, 617 GFP_KERNEL); 618 sp->u.iocb_cmd.u.ctarg.rsp_allocated_size = sizeof(struct ct_sns_pkt); 619 if (!sp->u.iocb_cmd.u.ctarg.rsp) { 620 ql_log(ql_log_warn, vha, 0xd042, 621 "%s: Failed to allocate ct_sns request.\n", 622 __func__); 623 goto done_free_sp; 624 } 625 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.rsp; 626 memset(ct_sns, 0, sizeof(*ct_sns)); 627 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.req; 628 629 /* Prepare CT request */ 630 ct_req = qla2x00_prep_ct_req(ct_sns, RFT_ID_CMD, RFT_ID_RSP_SIZE); 631 632 /* Prepare CT arguments -- port_id, FC-4 types */ 633 ct_req->req.rft_id.port_id = port_id_to_be_id(vha->d_id); 634 ct_req->req.rft_id.fc4_types[2] = 0x01; /* FCP-3 */ 635 636 if (vha->flags.nvme_enabled && qla_ini_mode_enabled(vha)) 637 ct_req->req.rft_id.fc4_types[6] = 1; /* NVMe type 28h */ 638 639 sp->u.iocb_cmd.u.ctarg.req_size = RFT_ID_REQ_SIZE; 640 sp->u.iocb_cmd.u.ctarg.rsp_size = RFT_ID_RSP_SIZE; 641 sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS; 642 643 ql_dbg(ql_dbg_disc, vha, 0xffff, 644 "Async-%s - hdl=%x portid %06x.\n", 645 sp->name, sp->handle, d_id->b24); 646 647 rval = qla2x00_start_sp(sp); 648 if (rval != QLA_SUCCESS) { 649 ql_dbg(ql_dbg_disc, vha, 0x2043, 650 "RFT_ID issue IOCB failed (%d).\n", rval); 651 goto done_free_sp; 652 } 653 return rval; 654done_free_sp: 655 /* ref: INIT */ 656 kref_put(&sp->cmd_kref, qla2x00_sp_release); 657done: 658 return rval; 659} 660 661/** 662 * qla2x00_rff_id() - SNS Register FC-4 Features (RFF_ID) supported by the HBA. 663 * @vha: HA context 664 * @type: not used 665 * 666 * Returns 0 on success. 667 */ 668int 669qla2x00_rff_id(scsi_qla_host_t *vha, u8 type) 670{ 671 struct qla_hw_data *ha = vha->hw; 672 673 if (IS_QLA2100(ha) || IS_QLA2200(ha)) { 674 ql_dbg(ql_dbg_disc, vha, 0x2046, 675 "RFF_ID call not supported on ISP2100/ISP2200.\n"); 676 return (QLA_SUCCESS); 677 } 678 679 return qla_async_rffid(vha, &vha->d_id, qlt_rff_id(vha), type); 680} 681 682static int qla_async_rffid(scsi_qla_host_t *vha, port_id_t *d_id, 683 u8 fc4feature, u8 fc4type) 684{ 685 int rval = QLA_MEMORY_ALLOC_FAILED; 686 struct ct_sns_req *ct_req; 687 srb_t *sp; 688 struct ct_sns_pkt *ct_sns; 689 690 /* ref: INIT */ 691 sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL); 692 if (!sp) 693 goto done; 694 695 sp->type = SRB_CT_PTHRU_CMD; 696 sp->name = "rff_id"; 697 qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2, 698 qla2x00_async_sns_sp_done); 699 700 sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev, 701 sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma, 702 GFP_KERNEL); 703 sp->u.iocb_cmd.u.ctarg.req_allocated_size = sizeof(struct ct_sns_pkt); 704 if (!sp->u.iocb_cmd.u.ctarg.req) { 705 ql_log(ql_log_warn, vha, 0xd041, 706 "%s: Failed to allocate ct_sns request.\n", 707 __func__); 708 goto done_free_sp; 709 } 710 711 sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev, 712 sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.rsp_dma, 713 GFP_KERNEL); 714 sp->u.iocb_cmd.u.ctarg.rsp_allocated_size = sizeof(struct ct_sns_pkt); 715 if (!sp->u.iocb_cmd.u.ctarg.rsp) { 716 ql_log(ql_log_warn, vha, 0xd042, 717 "%s: Failed to allocate ct_sns request.\n", 718 __func__); 719 goto done_free_sp; 720 } 721 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.rsp; 722 memset(ct_sns, 0, sizeof(*ct_sns)); 723 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.req; 724 725 /* Prepare CT request */ 726 ct_req = qla2x00_prep_ct_req(ct_sns, RFF_ID_CMD, RFF_ID_RSP_SIZE); 727 728 /* Prepare CT arguments -- port_id, FC-4 feature, FC-4 type */ 729 ct_req->req.rff_id.port_id = port_id_to_be_id(*d_id); 730 ct_req->req.rff_id.fc4_feature = fc4feature; 731 ct_req->req.rff_id.fc4_type = fc4type; /* SCSI-FCP or FC-NVMe */ 732 733 sp->u.iocb_cmd.u.ctarg.req_size = RFF_ID_REQ_SIZE; 734 sp->u.iocb_cmd.u.ctarg.rsp_size = RFF_ID_RSP_SIZE; 735 sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS; 736 737 ql_dbg(ql_dbg_disc, vha, 0xffff, 738 "Async-%s - hdl=%x portid %06x feature %x type %x.\n", 739 sp->name, sp->handle, d_id->b24, fc4feature, fc4type); 740 741 rval = qla2x00_start_sp(sp); 742 if (rval != QLA_SUCCESS) { 743 ql_dbg(ql_dbg_disc, vha, 0x2047, 744 "RFF_ID issue IOCB failed (%d).\n", rval); 745 goto done_free_sp; 746 } 747 748 return rval; 749 750done_free_sp: 751 /* ref: INIT */ 752 kref_put(&sp->cmd_kref, qla2x00_sp_release); 753done: 754 return rval; 755} 756 757/** 758 * qla2x00_rnn_id() - SNS Register Node Name (RNN_ID) of the HBA. 759 * @vha: HA context 760 * 761 * Returns 0 on success. 762 */ 763int 764qla2x00_rnn_id(scsi_qla_host_t *vha) 765{ 766 struct qla_hw_data *ha = vha->hw; 767 768 if (IS_QLA2100(ha) || IS_QLA2200(ha)) 769 return qla2x00_sns_rnn_id(vha); 770 771 return qla_async_rnnid(vha, &vha->d_id, vha->node_name); 772} 773 774static int qla_async_rnnid(scsi_qla_host_t *vha, port_id_t *d_id, 775 u8 *node_name) 776{ 777 int rval = QLA_MEMORY_ALLOC_FAILED; 778 struct ct_sns_req *ct_req; 779 srb_t *sp; 780 struct ct_sns_pkt *ct_sns; 781 782 /* ref: INIT */ 783 sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL); 784 if (!sp) 785 goto done; 786 787 sp->type = SRB_CT_PTHRU_CMD; 788 sp->name = "rnid"; 789 qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2, 790 qla2x00_async_sns_sp_done); 791 792 sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev, 793 sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma, 794 GFP_KERNEL); 795 sp->u.iocb_cmd.u.ctarg.req_allocated_size = sizeof(struct ct_sns_pkt); 796 if (!sp->u.iocb_cmd.u.ctarg.req) { 797 ql_log(ql_log_warn, vha, 0xd041, 798 "%s: Failed to allocate ct_sns request.\n", 799 __func__); 800 goto done_free_sp; 801 } 802 803 sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev, 804 sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.rsp_dma, 805 GFP_KERNEL); 806 sp->u.iocb_cmd.u.ctarg.rsp_allocated_size = sizeof(struct ct_sns_pkt); 807 if (!sp->u.iocb_cmd.u.ctarg.rsp) { 808 ql_log(ql_log_warn, vha, 0xd042, 809 "%s: Failed to allocate ct_sns request.\n", 810 __func__); 811 goto done_free_sp; 812 } 813 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.rsp; 814 memset(ct_sns, 0, sizeof(*ct_sns)); 815 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.req; 816 817 /* Prepare CT request */ 818 ct_req = qla2x00_prep_ct_req(ct_sns, RNN_ID_CMD, RNN_ID_RSP_SIZE); 819 820 /* Prepare CT arguments -- port_id, node_name */ 821 ct_req->req.rnn_id.port_id = port_id_to_be_id(vha->d_id); 822 memcpy(ct_req->req.rnn_id.node_name, vha->node_name, WWN_SIZE); 823 824 sp->u.iocb_cmd.u.ctarg.req_size = RNN_ID_REQ_SIZE; 825 sp->u.iocb_cmd.u.ctarg.rsp_size = RNN_ID_RSP_SIZE; 826 sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS; 827 828 ql_dbg(ql_dbg_disc, vha, 0xffff, 829 "Async-%s - hdl=%x portid %06x\n", 830 sp->name, sp->handle, d_id->b24); 831 832 rval = qla2x00_start_sp(sp); 833 if (rval != QLA_SUCCESS) { 834 ql_dbg(ql_dbg_disc, vha, 0x204d, 835 "RNN_ID issue IOCB failed (%d).\n", rval); 836 goto done_free_sp; 837 } 838 839 return rval; 840 841done_free_sp: 842 /* ref: INIT */ 843 kref_put(&sp->cmd_kref, qla2x00_sp_release); 844done: 845 return rval; 846} 847 848size_t 849qla2x00_get_sym_node_name(scsi_qla_host_t *vha, uint8_t *snn, size_t size) 850{ 851 struct qla_hw_data *ha = vha->hw; 852 853 if (IS_QLAFX00(ha)) 854 return scnprintf(snn, size, "%s FW:v%s DVR:v%s", 855 ha->model_number, ha->mr.fw_version, qla2x00_version_str); 856 857 return scnprintf(snn, size, "%s FW:v%d.%02d.%02d DVR:v%s", 858 ha->model_number, ha->fw_major_version, ha->fw_minor_version, 859 ha->fw_subminor_version, qla2x00_version_str); 860} 861 862/** 863 * qla2x00_rsnn_nn() - SNS Register Symbolic Node Name (RSNN_NN) of the HBA. 864 * @vha: HA context 865 * 866 * Returns 0 on success. 867 */ 868int 869qla2x00_rsnn_nn(scsi_qla_host_t *vha) 870{ 871 struct qla_hw_data *ha = vha->hw; 872 873 if (IS_QLA2100(ha) || IS_QLA2200(ha)) { 874 ql_dbg(ql_dbg_disc, vha, 0x2050, 875 "RSNN_ID call unsupported on ISP2100/ISP2200.\n"); 876 return (QLA_SUCCESS); 877 } 878 879 return qla_async_rsnn_nn(vha); 880} 881 882static int qla_async_rsnn_nn(scsi_qla_host_t *vha) 883{ 884 int rval = QLA_MEMORY_ALLOC_FAILED; 885 struct ct_sns_req *ct_req; 886 srb_t *sp; 887 struct ct_sns_pkt *ct_sns; 888 889 /* ref: INIT */ 890 sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL); 891 if (!sp) 892 goto done; 893 894 sp->type = SRB_CT_PTHRU_CMD; 895 sp->name = "rsnn_nn"; 896 qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2, 897 qla2x00_async_sns_sp_done); 898 899 sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev, 900 sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma, 901 GFP_KERNEL); 902 sp->u.iocb_cmd.u.ctarg.req_allocated_size = sizeof(struct ct_sns_pkt); 903 if (!sp->u.iocb_cmd.u.ctarg.req) { 904 ql_log(ql_log_warn, vha, 0xd041, 905 "%s: Failed to allocate ct_sns request.\n", 906 __func__); 907 goto done_free_sp; 908 } 909 910 sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev, 911 sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.rsp_dma, 912 GFP_KERNEL); 913 sp->u.iocb_cmd.u.ctarg.rsp_allocated_size = sizeof(struct ct_sns_pkt); 914 if (!sp->u.iocb_cmd.u.ctarg.rsp) { 915 ql_log(ql_log_warn, vha, 0xd042, 916 "%s: Failed to allocate ct_sns request.\n", 917 __func__); 918 goto done_free_sp; 919 } 920 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.rsp; 921 memset(ct_sns, 0, sizeof(*ct_sns)); 922 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.req; 923 924 /* Prepare CT request */ 925 ct_req = qla2x00_prep_ct_req(ct_sns, RSNN_NN_CMD, RSNN_NN_RSP_SIZE); 926 927 /* Prepare CT arguments -- node_name, symbolic node_name, size */ 928 memcpy(ct_req->req.rsnn_nn.node_name, vha->node_name, WWN_SIZE); 929 930 /* Prepare the Symbolic Node Name */ 931 qla2x00_get_sym_node_name(vha, ct_req->req.rsnn_nn.sym_node_name, 932 sizeof(ct_req->req.rsnn_nn.sym_node_name)); 933 ct_req->req.rsnn_nn.name_len = 934 (uint8_t)strlen(ct_req->req.rsnn_nn.sym_node_name); 935 936 937 sp->u.iocb_cmd.u.ctarg.req_size = 24 + 1 + ct_req->req.rsnn_nn.name_len; 938 sp->u.iocb_cmd.u.ctarg.rsp_size = RSNN_NN_RSP_SIZE; 939 sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS; 940 941 ql_dbg(ql_dbg_disc, vha, 0xffff, 942 "Async-%s - hdl=%x.\n", 943 sp->name, sp->handle); 944 945 rval = qla2x00_start_sp(sp); 946 if (rval != QLA_SUCCESS) { 947 ql_dbg(ql_dbg_disc, vha, 0x2043, 948 "RFT_ID issue IOCB failed (%d).\n", rval); 949 goto done_free_sp; 950 } 951 952 return rval; 953 954done_free_sp: 955 /* ref: INIT */ 956 kref_put(&sp->cmd_kref, qla2x00_sp_release); 957done: 958 return rval; 959} 960 961/** 962 * qla2x00_prep_sns_cmd() - Prepare common SNS command request fields for query. 963 * @vha: HA context 964 * @cmd: GS command 965 * @scmd_len: Subcommand length 966 * @data_size: response size in bytes 967 * 968 * Returns a pointer to the @ha's sns_cmd. 969 */ 970static inline struct sns_cmd_pkt * 971qla2x00_prep_sns_cmd(scsi_qla_host_t *vha, uint16_t cmd, uint16_t scmd_len, 972 uint16_t data_size) 973{ 974 uint16_t wc; 975 struct sns_cmd_pkt *sns_cmd; 976 struct qla_hw_data *ha = vha->hw; 977 978 sns_cmd = ha->sns_cmd; 979 memset(sns_cmd, 0, sizeof(struct sns_cmd_pkt)); 980 wc = data_size / 2; /* Size in 16bit words. */ 981 sns_cmd->p.cmd.buffer_length = cpu_to_le16(wc); 982 put_unaligned_le64(ha->sns_cmd_dma, &sns_cmd->p.cmd.buffer_address); 983 sns_cmd->p.cmd.subcommand_length = cpu_to_le16(scmd_len); 984 sns_cmd->p.cmd.subcommand = cpu_to_le16(cmd); 985 wc = (data_size - 16) / 4; /* Size in 32bit words. */ 986 sns_cmd->p.cmd.size = cpu_to_le16(wc); 987 988 vha->qla_stats.control_requests++; 989 990 return (sns_cmd); 991} 992 993/** 994 * qla2x00_sns_ga_nxt() - SNS scan for fabric devices via GA_NXT command. 995 * @vha: HA context 996 * @fcport: fcport entry to updated 997 * 998 * This command uses the old Exectute SNS Command mailbox routine. 999 * 1000 * Returns 0 on success. 1001 */ 1002static int 1003qla2x00_sns_ga_nxt(scsi_qla_host_t *vha, fc_port_t *fcport) 1004{ 1005 int rval = QLA_SUCCESS; 1006 struct qla_hw_data *ha = vha->hw; 1007 struct sns_cmd_pkt *sns_cmd; 1008 1009 /* Issue GA_NXT. */ 1010 /* Prepare SNS command request. */ 1011 sns_cmd = qla2x00_prep_sns_cmd(vha, GA_NXT_CMD, GA_NXT_SNS_SCMD_LEN, 1012 GA_NXT_SNS_DATA_SIZE); 1013 1014 /* Prepare SNS command arguments -- port_id. */ 1015 sns_cmd->p.cmd.param[0] = fcport->d_id.b.al_pa; 1016 sns_cmd->p.cmd.param[1] = fcport->d_id.b.area; 1017 sns_cmd->p.cmd.param[2] = fcport->d_id.b.domain; 1018 1019 /* Execute SNS command. */ 1020 rval = qla2x00_send_sns(vha, ha->sns_cmd_dma, GA_NXT_SNS_CMD_SIZE / 2, 1021 sizeof(struct sns_cmd_pkt)); 1022 if (rval != QLA_SUCCESS) { 1023 /*EMPTY*/ 1024 ql_dbg(ql_dbg_disc, vha, 0x205f, 1025 "GA_NXT Send SNS failed (%d).\n", rval); 1026 } else if (sns_cmd->p.gan_data[8] != 0x80 || 1027 sns_cmd->p.gan_data[9] != 0x02) { 1028 ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x2084, 1029 "GA_NXT failed, rejected request ga_nxt_rsp:\n"); 1030 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2074, 1031 sns_cmd->p.gan_data, 16); 1032 rval = QLA_FUNCTION_FAILED; 1033 } else { 1034 /* Populate fc_port_t entry. */ 1035 fcport->d_id.b.domain = sns_cmd->p.gan_data[17]; 1036 fcport->d_id.b.area = sns_cmd->p.gan_data[18]; 1037 fcport->d_id.b.al_pa = sns_cmd->p.gan_data[19]; 1038 1039 memcpy(fcport->node_name, &sns_cmd->p.gan_data[284], WWN_SIZE); 1040 memcpy(fcport->port_name, &sns_cmd->p.gan_data[20], WWN_SIZE); 1041 1042 if (sns_cmd->p.gan_data[16] != NS_N_PORT_TYPE && 1043 sns_cmd->p.gan_data[16] != NS_NL_PORT_TYPE) 1044 fcport->d_id.b.domain = 0xf0; 1045 1046 ql_dbg(ql_dbg_disc, vha, 0x2061, 1047 "GA_NXT entry - nn %8phN pn %8phN " 1048 "port_id=%02x%02x%02x.\n", 1049 fcport->node_name, fcport->port_name, 1050 fcport->d_id.b.domain, fcport->d_id.b.area, 1051 fcport->d_id.b.al_pa); 1052 } 1053 1054 return (rval); 1055} 1056 1057/** 1058 * qla2x00_sns_gid_pt() - SNS scan for fabric devices via GID_PT command. 1059 * @vha: HA context 1060 * @list: switch info entries to populate 1061 * 1062 * This command uses the old Exectute SNS Command mailbox routine. 1063 * 1064 * NOTE: Non-Nx_Ports are not requested. 1065 * 1066 * Returns 0 on success. 1067 */ 1068static int 1069qla2x00_sns_gid_pt(scsi_qla_host_t *vha, sw_info_t *list) 1070{ 1071 int rval; 1072 struct qla_hw_data *ha = vha->hw; 1073 uint16_t i; 1074 uint8_t *entry; 1075 struct sns_cmd_pkt *sns_cmd; 1076 uint16_t gid_pt_sns_data_size; 1077 1078 gid_pt_sns_data_size = qla2x00_gid_pt_rsp_size(vha); 1079 1080 /* Issue GID_PT. */ 1081 /* Prepare SNS command request. */ 1082 sns_cmd = qla2x00_prep_sns_cmd(vha, GID_PT_CMD, GID_PT_SNS_SCMD_LEN, 1083 gid_pt_sns_data_size); 1084 1085 /* Prepare SNS command arguments -- port_type. */ 1086 sns_cmd->p.cmd.param[0] = NS_NX_PORT_TYPE; 1087 1088 /* Execute SNS command. */ 1089 rval = qla2x00_send_sns(vha, ha->sns_cmd_dma, GID_PT_SNS_CMD_SIZE / 2, 1090 sizeof(struct sns_cmd_pkt)); 1091 if (rval != QLA_SUCCESS) { 1092 /*EMPTY*/ 1093 ql_dbg(ql_dbg_disc, vha, 0x206d, 1094 "GID_PT Send SNS failed (%d).\n", rval); 1095 } else if (sns_cmd->p.gid_data[8] != 0x80 || 1096 sns_cmd->p.gid_data[9] != 0x02) { 1097 ql_dbg(ql_dbg_disc, vha, 0x202f, 1098 "GID_PT failed, rejected request, gid_rsp:\n"); 1099 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2081, 1100 sns_cmd->p.gid_data, 16); 1101 rval = QLA_FUNCTION_FAILED; 1102 } else { 1103 /* Set port IDs in switch info list. */ 1104 for (i = 0; i < ha->max_fibre_devices; i++) { 1105 entry = &sns_cmd->p.gid_data[(i * 4) + 16]; 1106 list[i].d_id.b.domain = entry[1]; 1107 list[i].d_id.b.area = entry[2]; 1108 list[i].d_id.b.al_pa = entry[3]; 1109 1110 /* Last one exit. */ 1111 if (entry[0] & BIT_7) { 1112 list[i].d_id.b.rsvd_1 = entry[0]; 1113 break; 1114 } 1115 } 1116 1117 /* 1118 * If we've used all available slots, then the switch is 1119 * reporting back more devices that we can handle with this 1120 * single call. Return a failed status, and let GA_NXT handle 1121 * the overload. 1122 */ 1123 if (i == ha->max_fibre_devices) 1124 rval = QLA_FUNCTION_FAILED; 1125 } 1126 1127 return (rval); 1128} 1129 1130/** 1131 * qla2x00_sns_gpn_id() - SNS Get Port Name (GPN_ID) query. 1132 * @vha: HA context 1133 * @list: switch info entries to populate 1134 * 1135 * This command uses the old Exectute SNS Command mailbox routine. 1136 * 1137 * Returns 0 on success. 1138 */ 1139static int 1140qla2x00_sns_gpn_id(scsi_qla_host_t *vha, sw_info_t *list) 1141{ 1142 int rval = QLA_SUCCESS; 1143 struct qla_hw_data *ha = vha->hw; 1144 uint16_t i; 1145 struct sns_cmd_pkt *sns_cmd; 1146 1147 for (i = 0; i < ha->max_fibre_devices; i++) { 1148 /* Issue GPN_ID */ 1149 /* Prepare SNS command request. */ 1150 sns_cmd = qla2x00_prep_sns_cmd(vha, GPN_ID_CMD, 1151 GPN_ID_SNS_SCMD_LEN, GPN_ID_SNS_DATA_SIZE); 1152 1153 /* Prepare SNS command arguments -- port_id. */ 1154 sns_cmd->p.cmd.param[0] = list[i].d_id.b.al_pa; 1155 sns_cmd->p.cmd.param[1] = list[i].d_id.b.area; 1156 sns_cmd->p.cmd.param[2] = list[i].d_id.b.domain; 1157 1158 /* Execute SNS command. */ 1159 rval = qla2x00_send_sns(vha, ha->sns_cmd_dma, 1160 GPN_ID_SNS_CMD_SIZE / 2, sizeof(struct sns_cmd_pkt)); 1161 if (rval != QLA_SUCCESS) { 1162 /*EMPTY*/ 1163 ql_dbg(ql_dbg_disc, vha, 0x2032, 1164 "GPN_ID Send SNS failed (%d).\n", rval); 1165 } else if (sns_cmd->p.gpn_data[8] != 0x80 || 1166 sns_cmd->p.gpn_data[9] != 0x02) { 1167 ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x207e, 1168 "GPN_ID failed, rejected request, gpn_rsp:\n"); 1169 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x207f, 1170 sns_cmd->p.gpn_data, 16); 1171 rval = QLA_FUNCTION_FAILED; 1172 } else { 1173 /* Save portname */ 1174 memcpy(list[i].port_name, &sns_cmd->p.gpn_data[16], 1175 WWN_SIZE); 1176 } 1177 1178 /* Last device exit. */ 1179 if (list[i].d_id.b.rsvd_1 != 0) 1180 break; 1181 } 1182 1183 return (rval); 1184} 1185 1186/** 1187 * qla2x00_sns_gnn_id() - SNS Get Node Name (GNN_ID) query. 1188 * @vha: HA context 1189 * @list: switch info entries to populate 1190 * 1191 * This command uses the old Exectute SNS Command mailbox routine. 1192 * 1193 * Returns 0 on success. 1194 */ 1195static int 1196qla2x00_sns_gnn_id(scsi_qla_host_t *vha, sw_info_t *list) 1197{ 1198 int rval = QLA_SUCCESS; 1199 struct qla_hw_data *ha = vha->hw; 1200 uint16_t i; 1201 struct sns_cmd_pkt *sns_cmd; 1202 1203 for (i = 0; i < ha->max_fibre_devices; i++) { 1204 /* Issue GNN_ID */ 1205 /* Prepare SNS command request. */ 1206 sns_cmd = qla2x00_prep_sns_cmd(vha, GNN_ID_CMD, 1207 GNN_ID_SNS_SCMD_LEN, GNN_ID_SNS_DATA_SIZE); 1208 1209 /* Prepare SNS command arguments -- port_id. */ 1210 sns_cmd->p.cmd.param[0] = list[i].d_id.b.al_pa; 1211 sns_cmd->p.cmd.param[1] = list[i].d_id.b.area; 1212 sns_cmd->p.cmd.param[2] = list[i].d_id.b.domain; 1213 1214 /* Execute SNS command. */ 1215 rval = qla2x00_send_sns(vha, ha->sns_cmd_dma, 1216 GNN_ID_SNS_CMD_SIZE / 2, sizeof(struct sns_cmd_pkt)); 1217 if (rval != QLA_SUCCESS) { 1218 /*EMPTY*/ 1219 ql_dbg(ql_dbg_disc, vha, 0x203f, 1220 "GNN_ID Send SNS failed (%d).\n", rval); 1221 } else if (sns_cmd->p.gnn_data[8] != 0x80 || 1222 sns_cmd->p.gnn_data[9] != 0x02) { 1223 ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x2082, 1224 "GNN_ID failed, rejected request, gnn_rsp:\n"); 1225 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x207a, 1226 sns_cmd->p.gnn_data, 16); 1227 rval = QLA_FUNCTION_FAILED; 1228 } else { 1229 /* Save nodename */ 1230 memcpy(list[i].node_name, &sns_cmd->p.gnn_data[16], 1231 WWN_SIZE); 1232 1233 ql_dbg(ql_dbg_disc, vha, 0x206e, 1234 "GID_PT entry - nn %8phN pn %8phN " 1235 "port_id=%02x%02x%02x.\n", 1236 list[i].node_name, list[i].port_name, 1237 list[i].d_id.b.domain, list[i].d_id.b.area, 1238 list[i].d_id.b.al_pa); 1239 } 1240 1241 /* Last device exit. */ 1242 if (list[i].d_id.b.rsvd_1 != 0) 1243 break; 1244 } 1245 1246 return (rval); 1247} 1248 1249/** 1250 * qla2x00_sns_rft_id() - SNS Register FC-4 TYPEs (RFT_ID) supported by the HBA. 1251 * @vha: HA context 1252 * 1253 * This command uses the old Exectute SNS Command mailbox routine. 1254 * 1255 * Returns 0 on success. 1256 */ 1257static int 1258qla2x00_sns_rft_id(scsi_qla_host_t *vha) 1259{ 1260 int rval; 1261 struct qla_hw_data *ha = vha->hw; 1262 struct sns_cmd_pkt *sns_cmd; 1263 1264 /* Issue RFT_ID. */ 1265 /* Prepare SNS command request. */ 1266 sns_cmd = qla2x00_prep_sns_cmd(vha, RFT_ID_CMD, RFT_ID_SNS_SCMD_LEN, 1267 RFT_ID_SNS_DATA_SIZE); 1268 1269 /* Prepare SNS command arguments -- port_id, FC-4 types */ 1270 sns_cmd->p.cmd.param[0] = vha->d_id.b.al_pa; 1271 sns_cmd->p.cmd.param[1] = vha->d_id.b.area; 1272 sns_cmd->p.cmd.param[2] = vha->d_id.b.domain; 1273 1274 sns_cmd->p.cmd.param[5] = 0x01; /* FCP-3 */ 1275 1276 /* Execute SNS command. */ 1277 rval = qla2x00_send_sns(vha, ha->sns_cmd_dma, RFT_ID_SNS_CMD_SIZE / 2, 1278 sizeof(struct sns_cmd_pkt)); 1279 if (rval != QLA_SUCCESS) { 1280 /*EMPTY*/ 1281 ql_dbg(ql_dbg_disc, vha, 0x2060, 1282 "RFT_ID Send SNS failed (%d).\n", rval); 1283 } else if (sns_cmd->p.rft_data[8] != 0x80 || 1284 sns_cmd->p.rft_data[9] != 0x02) { 1285 ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x2083, 1286 "RFT_ID failed, rejected request rft_rsp:\n"); 1287 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2080, 1288 sns_cmd->p.rft_data, 16); 1289 rval = QLA_FUNCTION_FAILED; 1290 } else { 1291 ql_dbg(ql_dbg_disc, vha, 0x2073, 1292 "RFT_ID exiting normally.\n"); 1293 } 1294 1295 return (rval); 1296} 1297 1298/** 1299 * qla2x00_sns_rnn_id() - SNS Register Node Name (RNN_ID) of the HBA. 1300 * @vha: HA context 1301 * 1302 * This command uses the old Exectute SNS Command mailbox routine. 1303 * 1304 * Returns 0 on success. 1305 */ 1306static int 1307qla2x00_sns_rnn_id(scsi_qla_host_t *vha) 1308{ 1309 int rval; 1310 struct qla_hw_data *ha = vha->hw; 1311 struct sns_cmd_pkt *sns_cmd; 1312 1313 /* Issue RNN_ID. */ 1314 /* Prepare SNS command request. */ 1315 sns_cmd = qla2x00_prep_sns_cmd(vha, RNN_ID_CMD, RNN_ID_SNS_SCMD_LEN, 1316 RNN_ID_SNS_DATA_SIZE); 1317 1318 /* Prepare SNS command arguments -- port_id, nodename. */ 1319 sns_cmd->p.cmd.param[0] = vha->d_id.b.al_pa; 1320 sns_cmd->p.cmd.param[1] = vha->d_id.b.area; 1321 sns_cmd->p.cmd.param[2] = vha->d_id.b.domain; 1322 1323 sns_cmd->p.cmd.param[4] = vha->node_name[7]; 1324 sns_cmd->p.cmd.param[5] = vha->node_name[6]; 1325 sns_cmd->p.cmd.param[6] = vha->node_name[5]; 1326 sns_cmd->p.cmd.param[7] = vha->node_name[4]; 1327 sns_cmd->p.cmd.param[8] = vha->node_name[3]; 1328 sns_cmd->p.cmd.param[9] = vha->node_name[2]; 1329 sns_cmd->p.cmd.param[10] = vha->node_name[1]; 1330 sns_cmd->p.cmd.param[11] = vha->node_name[0]; 1331 1332 /* Execute SNS command. */ 1333 rval = qla2x00_send_sns(vha, ha->sns_cmd_dma, RNN_ID_SNS_CMD_SIZE / 2, 1334 sizeof(struct sns_cmd_pkt)); 1335 if (rval != QLA_SUCCESS) { 1336 /*EMPTY*/ 1337 ql_dbg(ql_dbg_disc, vha, 0x204a, 1338 "RNN_ID Send SNS failed (%d).\n", rval); 1339 } else if (sns_cmd->p.rnn_data[8] != 0x80 || 1340 sns_cmd->p.rnn_data[9] != 0x02) { 1341 ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x207b, 1342 "RNN_ID failed, rejected request, rnn_rsp:\n"); 1343 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x207c, 1344 sns_cmd->p.rnn_data, 16); 1345 rval = QLA_FUNCTION_FAILED; 1346 } else { 1347 ql_dbg(ql_dbg_disc, vha, 0x204c, 1348 "RNN_ID exiting normally.\n"); 1349 } 1350 1351 return (rval); 1352} 1353 1354/** 1355 * qla2x00_mgmt_svr_login() - Login to fabric Management Service. 1356 * @vha: HA context 1357 * 1358 * Returns 0 on success. 1359 */ 1360int 1361qla2x00_mgmt_svr_login(scsi_qla_host_t *vha) 1362{ 1363 int ret, rval; 1364 uint16_t mb[MAILBOX_REGISTER_COUNT]; 1365 struct qla_hw_data *ha = vha->hw; 1366 1367 ret = QLA_SUCCESS; 1368 if (vha->flags.management_server_logged_in) 1369 return ret; 1370 1371 rval = ha->isp_ops->fabric_login(vha, vha->mgmt_svr_loop_id, 0xff, 0xff, 1372 0xfa, mb, BIT_1); 1373 if (rval != QLA_SUCCESS || mb[0] != MBS_COMMAND_COMPLETE) { 1374 if (rval == QLA_MEMORY_ALLOC_FAILED) 1375 ql_dbg(ql_dbg_disc, vha, 0x2085, 1376 "Failed management_server login: loopid=%x " 1377 "rval=%d\n", vha->mgmt_svr_loop_id, rval); 1378 else 1379 ql_dbg(ql_dbg_disc, vha, 0x2024, 1380 "Failed management_server login: loopid=%x " 1381 "mb[0]=%x mb[1]=%x mb[2]=%x mb[6]=%x mb[7]=%x.\n", 1382 vha->mgmt_svr_loop_id, mb[0], mb[1], mb[2], mb[6], 1383 mb[7]); 1384 ret = QLA_FUNCTION_FAILED; 1385 } else 1386 vha->flags.management_server_logged_in = 1; 1387 1388 return ret; 1389} 1390 1391/** 1392 * qla2x00_prep_ms_fdmi_iocb() - Prepare common MS IOCB fields for FDMI query. 1393 * @vha: HA context 1394 * @req_size: request size in bytes 1395 * @rsp_size: response size in bytes 1396 * 1397 * Returns a pointer to the @ha's ms_iocb. 1398 */ 1399void * 1400qla2x00_prep_ms_fdmi_iocb(scsi_qla_host_t *vha, uint32_t req_size, 1401 uint32_t rsp_size) 1402{ 1403 ms_iocb_entry_t *ms_pkt; 1404 struct qla_hw_data *ha = vha->hw; 1405 1406 ms_pkt = ha->ms_iocb; 1407 memset(ms_pkt, 0, sizeof(ms_iocb_entry_t)); 1408 1409 ms_pkt->entry_type = MS_IOCB_TYPE; 1410 ms_pkt->entry_count = 1; 1411 SET_TARGET_ID(ha, ms_pkt->loop_id, vha->mgmt_svr_loop_id); 1412 ms_pkt->control_flags = cpu_to_le16(CF_READ | CF_HEAD_TAG); 1413 ms_pkt->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2); 1414 ms_pkt->cmd_dsd_count = cpu_to_le16(1); 1415 ms_pkt->total_dsd_count = cpu_to_le16(2); 1416 ms_pkt->rsp_bytecount = cpu_to_le32(rsp_size); 1417 ms_pkt->req_bytecount = cpu_to_le32(req_size); 1418 1419 put_unaligned_le64(ha->ct_sns_dma, &ms_pkt->req_dsd.address); 1420 ms_pkt->req_dsd.length = ms_pkt->req_bytecount; 1421 1422 put_unaligned_le64(ha->ct_sns_dma, &ms_pkt->rsp_dsd.address); 1423 ms_pkt->rsp_dsd.length = ms_pkt->rsp_bytecount; 1424 1425 return ms_pkt; 1426} 1427 1428/** 1429 * qla24xx_prep_ms_fdmi_iocb() - Prepare common MS IOCB fields for FDMI query. 1430 * @vha: HA context 1431 * @req_size: request size in bytes 1432 * @rsp_size: response size in bytes 1433 * 1434 * Returns a pointer to the @ha's ms_iocb. 1435 */ 1436void * 1437qla24xx_prep_ms_fdmi_iocb(scsi_qla_host_t *vha, uint32_t req_size, 1438 uint32_t rsp_size) 1439{ 1440 struct ct_entry_24xx *ct_pkt; 1441 struct qla_hw_data *ha = vha->hw; 1442 1443 ct_pkt = (struct ct_entry_24xx *)ha->ms_iocb; 1444 memset(ct_pkt, 0, sizeof(struct ct_entry_24xx)); 1445 1446 ct_pkt->entry_type = CT_IOCB_TYPE; 1447 ct_pkt->entry_count = 1; 1448 ct_pkt->nport_handle = cpu_to_le16(vha->mgmt_svr_loop_id); 1449 ct_pkt->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2); 1450 ct_pkt->cmd_dsd_count = cpu_to_le16(1); 1451 ct_pkt->rsp_dsd_count = cpu_to_le16(1); 1452 ct_pkt->rsp_byte_count = cpu_to_le32(rsp_size); 1453 ct_pkt->cmd_byte_count = cpu_to_le32(req_size); 1454 1455 put_unaligned_le64(ha->ct_sns_dma, &ct_pkt->dsd[0].address); 1456 ct_pkt->dsd[0].length = ct_pkt->cmd_byte_count; 1457 1458 put_unaligned_le64(ha->ct_sns_dma, &ct_pkt->dsd[1].address); 1459 ct_pkt->dsd[1].length = ct_pkt->rsp_byte_count; 1460 ct_pkt->vp_index = vha->vp_idx; 1461 1462 return ct_pkt; 1463} 1464 1465static void 1466qla2x00_update_ms_fdmi_iocb(scsi_qla_host_t *vha, uint32_t req_size) 1467{ 1468 struct qla_hw_data *ha = vha->hw; 1469 ms_iocb_entry_t *ms_pkt = ha->ms_iocb; 1470 struct ct_entry_24xx *ct_pkt = (struct ct_entry_24xx *)ha->ms_iocb; 1471 1472 if (IS_FWI2_CAPABLE(ha)) { 1473 ct_pkt->cmd_byte_count = cpu_to_le32(req_size); 1474 ct_pkt->dsd[0].length = ct_pkt->cmd_byte_count; 1475 } else { 1476 ms_pkt->req_bytecount = cpu_to_le32(req_size); 1477 ms_pkt->req_dsd.length = ms_pkt->req_bytecount; 1478 } 1479} 1480 1481/** 1482 * qla2x00_prep_ct_fdmi_req() - Prepare common CT request fields for SNS query. 1483 * @p: CT request buffer 1484 * @cmd: GS command 1485 * @rsp_size: response size in bytes 1486 * 1487 * Returns a pointer to the intitialized @ct_req. 1488 */ 1489static inline struct ct_sns_req * 1490qla2x00_prep_ct_fdmi_req(struct ct_sns_pkt *p, uint16_t cmd, 1491 uint16_t rsp_size) 1492{ 1493 memset(p, 0, sizeof(struct ct_sns_pkt)); 1494 1495 p->p.req.header.revision = 0x01; 1496 p->p.req.header.gs_type = 0xFA; 1497 p->p.req.header.gs_subtype = 0x10; 1498 p->p.req.command = cpu_to_be16(cmd); 1499 p->p.req.max_rsp_size = cpu_to_be16((rsp_size - 16) / 4); 1500 1501 return &p->p.req; 1502} 1503 1504uint 1505qla25xx_fdmi_port_speed_capability(struct qla_hw_data *ha) 1506{ 1507 uint speeds = 0; 1508 1509 if (IS_CNA_CAPABLE(ha)) 1510 return FDMI_PORT_SPEED_10GB; 1511 if (IS_QLA28XX(ha) || IS_QLA27XX(ha)) { 1512 if (ha->max_supported_speed == 2) { 1513 if (ha->min_supported_speed <= 6) 1514 speeds |= FDMI_PORT_SPEED_64GB; 1515 } 1516 if (ha->max_supported_speed == 2 || 1517 ha->max_supported_speed == 1) { 1518 if (ha->min_supported_speed <= 5) 1519 speeds |= FDMI_PORT_SPEED_32GB; 1520 } 1521 if (ha->max_supported_speed == 2 || 1522 ha->max_supported_speed == 1 || 1523 ha->max_supported_speed == 0) { 1524 if (ha->min_supported_speed <= 4) 1525 speeds |= FDMI_PORT_SPEED_16GB; 1526 } 1527 if (ha->max_supported_speed == 1 || 1528 ha->max_supported_speed == 0) { 1529 if (ha->min_supported_speed <= 3) 1530 speeds |= FDMI_PORT_SPEED_8GB; 1531 } 1532 if (ha->max_supported_speed == 0) { 1533 if (ha->min_supported_speed <= 2) 1534 speeds |= FDMI_PORT_SPEED_4GB; 1535 } 1536 return speeds; 1537 } 1538 if (IS_QLA2031(ha)) { 1539 if ((ha->pdev->subsystem_vendor == 0x103C) && 1540 ((ha->pdev->subsystem_device == 0x8002) || 1541 (ha->pdev->subsystem_device == 0x8086))) { 1542 speeds = FDMI_PORT_SPEED_16GB; 1543 } else { 1544 speeds = FDMI_PORT_SPEED_16GB|FDMI_PORT_SPEED_8GB| 1545 FDMI_PORT_SPEED_4GB; 1546 } 1547 return speeds; 1548 } 1549 if (IS_QLA25XX(ha) || IS_QLAFX00(ha)) 1550 return FDMI_PORT_SPEED_8GB|FDMI_PORT_SPEED_4GB| 1551 FDMI_PORT_SPEED_2GB|FDMI_PORT_SPEED_1GB; 1552 if (IS_QLA24XX_TYPE(ha)) 1553 return FDMI_PORT_SPEED_4GB|FDMI_PORT_SPEED_2GB| 1554 FDMI_PORT_SPEED_1GB; 1555 if (IS_QLA23XX(ha)) 1556 return FDMI_PORT_SPEED_2GB|FDMI_PORT_SPEED_1GB; 1557 return FDMI_PORT_SPEED_1GB; 1558} 1559 1560uint 1561qla25xx_fdmi_port_speed_currently(struct qla_hw_data *ha) 1562{ 1563 switch (ha->link_data_rate) { 1564 case PORT_SPEED_1GB: 1565 return FDMI_PORT_SPEED_1GB; 1566 case PORT_SPEED_2GB: 1567 return FDMI_PORT_SPEED_2GB; 1568 case PORT_SPEED_4GB: 1569 return FDMI_PORT_SPEED_4GB; 1570 case PORT_SPEED_8GB: 1571 return FDMI_PORT_SPEED_8GB; 1572 case PORT_SPEED_10GB: 1573 return FDMI_PORT_SPEED_10GB; 1574 case PORT_SPEED_16GB: 1575 return FDMI_PORT_SPEED_16GB; 1576 case PORT_SPEED_32GB: 1577 return FDMI_PORT_SPEED_32GB; 1578 case PORT_SPEED_64GB: 1579 return FDMI_PORT_SPEED_64GB; 1580 default: 1581 return FDMI_PORT_SPEED_UNKNOWN; 1582 } 1583} 1584 1585/** 1586 * qla2x00_hba_attributes() - perform HBA attributes registration 1587 * @vha: HA context 1588 * @entries: number of entries to use 1589 * @callopt: Option to issue extended or standard FDMI 1590 * command parameter 1591 * 1592 * Returns 0 on success. 1593 */ 1594static unsigned long 1595qla2x00_hba_attributes(scsi_qla_host_t *vha, void *entries, 1596 unsigned int callopt) 1597{ 1598 struct qla_hw_data *ha = vha->hw; 1599 struct init_cb_24xx *icb24 = (void *)ha->init_cb; 1600 struct new_utsname *p_sysid = utsname(); 1601 struct ct_fdmi_hba_attr *eiter; 1602 uint16_t alen; 1603 unsigned long size = 0; 1604 1605 /* Nodename. */ 1606 eiter = entries + size; 1607 eiter->type = cpu_to_be16(FDMI_HBA_NODE_NAME); 1608 memcpy(eiter->a.node_name, vha->node_name, sizeof(eiter->a.node_name)); 1609 alen = sizeof(eiter->a.node_name); 1610 alen += FDMI_ATTR_TYPELEN(eiter); 1611 eiter->len = cpu_to_be16(alen); 1612 size += alen; 1613 ql_dbg(ql_dbg_disc, vha, 0x20a0, 1614 "NODENAME = %016llx.\n", wwn_to_u64(eiter->a.node_name)); 1615 /* Manufacturer. */ 1616 eiter = entries + size; 1617 eiter->type = cpu_to_be16(FDMI_HBA_MANUFACTURER); 1618 alen = scnprintf( 1619 eiter->a.manufacturer, sizeof(eiter->a.manufacturer), 1620 "%s", "QLogic Corporation"); 1621 alen += FDMI_ATTR_ALIGNMENT(alen); 1622 alen += FDMI_ATTR_TYPELEN(eiter); 1623 eiter->len = cpu_to_be16(alen); 1624 size += alen; 1625 ql_dbg(ql_dbg_disc, vha, 0x20a1, 1626 "MANUFACTURER = %s.\n", eiter->a.manufacturer); 1627 /* Serial number. */ 1628 eiter = entries + size; 1629 eiter->type = cpu_to_be16(FDMI_HBA_SERIAL_NUMBER); 1630 alen = 0; 1631 if (IS_FWI2_CAPABLE(ha)) { 1632 alen = qla2xxx_get_vpd_field(vha, "SN", 1633 eiter->a.serial_num, sizeof(eiter->a.serial_num)); 1634 } 1635 if (!alen) { 1636 uint32_t sn = ((ha->serial0 & 0x1f) << 16) | 1637 (ha->serial2 << 8) | ha->serial1; 1638 alen = scnprintf( 1639 eiter->a.serial_num, sizeof(eiter->a.serial_num), 1640 "%c%05d", 'A' + sn / 100000, sn % 100000); 1641 } 1642 alen += FDMI_ATTR_ALIGNMENT(alen); 1643 alen += FDMI_ATTR_TYPELEN(eiter); 1644 eiter->len = cpu_to_be16(alen); 1645 size += alen; 1646 ql_dbg(ql_dbg_disc, vha, 0x20a2, 1647 "SERIAL NUMBER = %s.\n", eiter->a.serial_num); 1648 /* Model name. */ 1649 eiter = entries + size; 1650 eiter->type = cpu_to_be16(FDMI_HBA_MODEL); 1651 alen = scnprintf( 1652 eiter->a.model, sizeof(eiter->a.model), 1653 "%s", ha->model_number); 1654 alen += FDMI_ATTR_ALIGNMENT(alen); 1655 alen += FDMI_ATTR_TYPELEN(eiter); 1656 eiter->len = cpu_to_be16(alen); 1657 size += alen; 1658 ql_dbg(ql_dbg_disc, vha, 0x20a3, 1659 "MODEL NAME = %s.\n", eiter->a.model); 1660 /* Model description. */ 1661 eiter = entries + size; 1662 eiter->type = cpu_to_be16(FDMI_HBA_MODEL_DESCRIPTION); 1663 alen = scnprintf( 1664 eiter->a.model_desc, sizeof(eiter->a.model_desc), 1665 "%s", ha->model_desc); 1666 alen += FDMI_ATTR_ALIGNMENT(alen); 1667 alen += FDMI_ATTR_TYPELEN(eiter); 1668 eiter->len = cpu_to_be16(alen); 1669 size += alen; 1670 ql_dbg(ql_dbg_disc, vha, 0x20a4, 1671 "MODEL DESCRIPTION = %s.\n", eiter->a.model_desc); 1672 /* Hardware version. */ 1673 eiter = entries + size; 1674 eiter->type = cpu_to_be16(FDMI_HBA_HARDWARE_VERSION); 1675 alen = 0; 1676 if (IS_FWI2_CAPABLE(ha)) { 1677 if (!alen) { 1678 alen = qla2xxx_get_vpd_field(vha, "MN", 1679 eiter->a.hw_version, sizeof(eiter->a.hw_version)); 1680 } 1681 if (!alen) { 1682 alen = qla2xxx_get_vpd_field(vha, "EC", 1683 eiter->a.hw_version, sizeof(eiter->a.hw_version)); 1684 } 1685 } 1686 if (!alen) { 1687 alen = scnprintf( 1688 eiter->a.hw_version, sizeof(eiter->a.hw_version), 1689 "HW:%s", ha->adapter_id); 1690 } 1691 alen += FDMI_ATTR_ALIGNMENT(alen); 1692 alen += FDMI_ATTR_TYPELEN(eiter); 1693 eiter->len = cpu_to_be16(alen); 1694 size += alen; 1695 ql_dbg(ql_dbg_disc, vha, 0x20a5, 1696 "HARDWARE VERSION = %s.\n", eiter->a.hw_version); 1697 /* Driver version. */ 1698 eiter = entries + size; 1699 eiter->type = cpu_to_be16(FDMI_HBA_DRIVER_VERSION); 1700 alen = scnprintf( 1701 eiter->a.driver_version, sizeof(eiter->a.driver_version), 1702 "%s", qla2x00_version_str); 1703 alen += FDMI_ATTR_ALIGNMENT(alen); 1704 alen += FDMI_ATTR_TYPELEN(eiter); 1705 eiter->len = cpu_to_be16(alen); 1706 size += alen; 1707 ql_dbg(ql_dbg_disc, vha, 0x20a6, 1708 "DRIVER VERSION = %s.\n", eiter->a.driver_version); 1709 /* Option ROM version. */ 1710 eiter = entries + size; 1711 eiter->type = cpu_to_be16(FDMI_HBA_OPTION_ROM_VERSION); 1712 alen = scnprintf( 1713 eiter->a.orom_version, sizeof(eiter->a.orom_version), 1714 "%d.%02d", ha->bios_revision[1], ha->bios_revision[0]); 1715 alen += FDMI_ATTR_ALIGNMENT(alen); 1716 alen += FDMI_ATTR_TYPELEN(eiter); 1717 eiter->len = cpu_to_be16(alen); 1718 size += alen; 1719 1720 ql_dbg(ql_dbg_disc, vha, 0x20a7, 1721 "OPTROM VERSION = %d.%02d.\n", 1722 eiter->a.orom_version[1], eiter->a.orom_version[0]); 1723 /* Firmware version */ 1724 eiter = entries + size; 1725 eiter->type = cpu_to_be16(FDMI_HBA_FIRMWARE_VERSION); 1726 ha->isp_ops->fw_version_str(vha, eiter->a.fw_version, 1727 sizeof(eiter->a.fw_version)); 1728 alen += FDMI_ATTR_ALIGNMENT(alen); 1729 alen += FDMI_ATTR_TYPELEN(eiter); 1730 eiter->len = cpu_to_be16(alen); 1731 size += alen; 1732 ql_dbg(ql_dbg_disc, vha, 0x20a8, 1733 "FIRMWARE VERSION = %s.\n", eiter->a.fw_version); 1734 /* OS Name and Version */ 1735 eiter = entries + size; 1736 eiter->type = cpu_to_be16(FDMI_HBA_OS_NAME_AND_VERSION); 1737 alen = 0; 1738 if (p_sysid) { 1739 alen = scnprintf( 1740 eiter->a.os_version, sizeof(eiter->a.os_version), 1741 "%s %s %s", 1742 p_sysid->sysname, p_sysid->release, p_sysid->machine); 1743 } 1744 if (!alen) { 1745 alen = scnprintf( 1746 eiter->a.os_version, sizeof(eiter->a.os_version), 1747 "%s %s", 1748 "Linux", fc_host_system_hostname(vha->host)); 1749 } 1750 alen += FDMI_ATTR_ALIGNMENT(alen); 1751 alen += FDMI_ATTR_TYPELEN(eiter); 1752 eiter->len = cpu_to_be16(alen); 1753 size += alen; 1754 ql_dbg(ql_dbg_disc, vha, 0x20a9, 1755 "OS VERSION = %s.\n", eiter->a.os_version); 1756 if (callopt == CALLOPT_FDMI1) 1757 goto done; 1758 /* MAX CT Payload Length */ 1759 eiter = entries + size; 1760 eiter->type = cpu_to_be16(FDMI_HBA_MAXIMUM_CT_PAYLOAD_LENGTH); 1761 eiter->a.max_ct_len = cpu_to_be32(le16_to_cpu(IS_FWI2_CAPABLE(ha) ? 1762 icb24->frame_payload_size : ha->init_cb->frame_payload_size)); 1763 alen = sizeof(eiter->a.max_ct_len); 1764 alen += FDMI_ATTR_TYPELEN(eiter); 1765 eiter->len = cpu_to_be16(alen); 1766 size += alen; 1767 ql_dbg(ql_dbg_disc, vha, 0x20aa, 1768 "CT PAYLOAD LENGTH = 0x%x.\n", be32_to_cpu(eiter->a.max_ct_len)); 1769 /* Node Symbolic Name */ 1770 eiter = entries + size; 1771 eiter->type = cpu_to_be16(FDMI_HBA_NODE_SYMBOLIC_NAME); 1772 alen = qla2x00_get_sym_node_name(vha, eiter->a.sym_name, 1773 sizeof(eiter->a.sym_name)); 1774 alen += FDMI_ATTR_ALIGNMENT(alen); 1775 alen += FDMI_ATTR_TYPELEN(eiter); 1776 eiter->len = cpu_to_be16(alen); 1777 size += alen; 1778 ql_dbg(ql_dbg_disc, vha, 0x20ab, 1779 "SYMBOLIC NAME = %s.\n", eiter->a.sym_name); 1780 /* Vendor Specific information */ 1781 eiter = entries + size; 1782 eiter->type = cpu_to_be16(FDMI_HBA_VENDOR_SPECIFIC_INFO); 1783 eiter->a.vendor_specific_info = cpu_to_be32(PCI_VENDOR_ID_QLOGIC); 1784 alen = sizeof(eiter->a.vendor_specific_info); 1785 alen += FDMI_ATTR_TYPELEN(eiter); 1786 eiter->len = cpu_to_be16(alen); 1787 size += alen; 1788 ql_dbg(ql_dbg_disc, vha, 0x20ac, 1789 "VENDOR SPECIFIC INFO = 0x%x.\n", 1790 be32_to_cpu(eiter->a.vendor_specific_info)); 1791 /* Num Ports */ 1792 eiter = entries + size; 1793 eiter->type = cpu_to_be16(FDMI_HBA_NUM_PORTS); 1794 eiter->a.num_ports = cpu_to_be32(1); 1795 alen = sizeof(eiter->a.num_ports); 1796 alen += FDMI_ATTR_TYPELEN(eiter); 1797 eiter->len = cpu_to_be16(alen); 1798 size += alen; 1799 ql_dbg(ql_dbg_disc, vha, 0x20ad, 1800 "PORT COUNT = %x.\n", be32_to_cpu(eiter->a.num_ports)); 1801 /* Fabric Name */ 1802 eiter = entries + size; 1803 eiter->type = cpu_to_be16(FDMI_HBA_FABRIC_NAME); 1804 memcpy(eiter->a.fabric_name, vha->fabric_node_name, 1805 sizeof(eiter->a.fabric_name)); 1806 alen = sizeof(eiter->a.fabric_name); 1807 alen += FDMI_ATTR_TYPELEN(eiter); 1808 eiter->len = cpu_to_be16(alen); 1809 size += alen; 1810 ql_dbg(ql_dbg_disc, vha, 0x20ae, 1811 "FABRIC NAME = %016llx.\n", wwn_to_u64(eiter->a.fabric_name)); 1812 /* BIOS Version */ 1813 eiter = entries + size; 1814 eiter->type = cpu_to_be16(FDMI_HBA_BOOT_BIOS_NAME); 1815 alen = scnprintf( 1816 eiter->a.bios_name, sizeof(eiter->a.bios_name), 1817 "BIOS %d.%02d", ha->bios_revision[1], ha->bios_revision[0]); 1818 alen += FDMI_ATTR_ALIGNMENT(alen); 1819 alen += FDMI_ATTR_TYPELEN(eiter); 1820 eiter->len = cpu_to_be16(alen); 1821 size += alen; 1822 ql_dbg(ql_dbg_disc, vha, 0x20af, 1823 "BIOS NAME = %s\n", eiter->a.bios_name); 1824 /* Vendor Identifier */ 1825 eiter = entries + size; 1826 eiter->type = cpu_to_be16(FDMI_HBA_VENDOR_IDENTIFIER); 1827 alen = scnprintf( 1828 eiter->a.vendor_identifier, sizeof(eiter->a.vendor_identifier), 1829 "%s", "QLGC"); 1830 alen += FDMI_ATTR_ALIGNMENT(alen); 1831 alen += FDMI_ATTR_TYPELEN(eiter); 1832 eiter->len = cpu_to_be16(alen); 1833 size += alen; 1834 ql_dbg(ql_dbg_disc, vha, 0x20b0, 1835 "VENDOR IDENTIFIER = %s.\n", eiter->a.vendor_identifier); 1836done: 1837 return size; 1838} 1839 1840/** 1841 * qla2x00_port_attributes() - perform Port attributes registration 1842 * @vha: HA context 1843 * @entries: number of entries to use 1844 * @callopt: Option to issue extended or standard FDMI 1845 * command parameter 1846 * 1847 * Returns 0 on success. 1848 */ 1849static unsigned long 1850qla2x00_port_attributes(scsi_qla_host_t *vha, void *entries, 1851 unsigned int callopt) 1852{ 1853 struct qla_hw_data *ha = vha->hw; 1854 struct init_cb_24xx *icb24 = (void *)ha->init_cb; 1855 struct new_utsname *p_sysid = utsname(); 1856 char *hostname = p_sysid ? 1857 p_sysid->nodename : fc_host_system_hostname(vha->host); 1858 struct ct_fdmi_port_attr *eiter; 1859 uint16_t alen; 1860 unsigned long size = 0; 1861 1862 /* FC4 types. */ 1863 eiter = entries + size; 1864 eiter->type = cpu_to_be16(FDMI_PORT_FC4_TYPES); 1865 eiter->a.fc4_types[0] = 0x00; 1866 eiter->a.fc4_types[1] = 0x00; 1867 eiter->a.fc4_types[2] = 0x01; 1868 eiter->a.fc4_types[3] = 0x00; 1869 alen = sizeof(eiter->a.fc4_types); 1870 alen += FDMI_ATTR_TYPELEN(eiter); 1871 eiter->len = cpu_to_be16(alen); 1872 size += alen; 1873 ql_dbg(ql_dbg_disc, vha, 0x20c0, 1874 "FC4 TYPES = %016llx.\n", *(uint64_t *)eiter->a.fc4_types); 1875 if (vha->flags.nvme_enabled) { 1876 eiter->a.fc4_types[6] = 1; /* NVMe type 28h */ 1877 ql_dbg(ql_dbg_disc, vha, 0x211f, 1878 "NVME FC4 Type = %02x 0x0 0x0 0x0 0x0 0x0.\n", 1879 eiter->a.fc4_types[6]); 1880 } 1881 /* Supported speed. */ 1882 eiter = entries + size; 1883 eiter->type = cpu_to_be16(FDMI_PORT_SUPPORT_SPEED); 1884 eiter->a.sup_speed = cpu_to_be32( 1885 qla25xx_fdmi_port_speed_capability(ha)); 1886 alen = sizeof(eiter->a.sup_speed); 1887 alen += FDMI_ATTR_TYPELEN(eiter); 1888 eiter->len = cpu_to_be16(alen); 1889 size += alen; 1890 ql_dbg(ql_dbg_disc, vha, 0x20c1, 1891 "SUPPORTED SPEED = %x.\n", be32_to_cpu(eiter->a.sup_speed)); 1892 /* Current speed. */ 1893 eiter = entries + size; 1894 eiter->type = cpu_to_be16(FDMI_PORT_CURRENT_SPEED); 1895 eiter->a.cur_speed = cpu_to_be32( 1896 qla25xx_fdmi_port_speed_currently(ha)); 1897 alen = sizeof(eiter->a.cur_speed); 1898 alen += FDMI_ATTR_TYPELEN(eiter); 1899 eiter->len = cpu_to_be16(alen); 1900 size += alen; 1901 ql_dbg(ql_dbg_disc, vha, 0x20c2, 1902 "CURRENT SPEED = %x.\n", be32_to_cpu(eiter->a.cur_speed)); 1903 /* Max frame size. */ 1904 eiter = entries + size; 1905 eiter->type = cpu_to_be16(FDMI_PORT_MAX_FRAME_SIZE); 1906 eiter->a.max_frame_size = cpu_to_be32(le16_to_cpu(IS_FWI2_CAPABLE(ha) ? 1907 icb24->frame_payload_size : ha->init_cb->frame_payload_size)); 1908 alen = sizeof(eiter->a.max_frame_size); 1909 alen += FDMI_ATTR_TYPELEN(eiter); 1910 eiter->len = cpu_to_be16(alen); 1911 size += alen; 1912 ql_dbg(ql_dbg_disc, vha, 0x20c3, 1913 "MAX FRAME SIZE = %x.\n", be32_to_cpu(eiter->a.max_frame_size)); 1914 /* OS device name. */ 1915 eiter = entries + size; 1916 eiter->type = cpu_to_be16(FDMI_PORT_OS_DEVICE_NAME); 1917 alen = scnprintf( 1918 eiter->a.os_dev_name, sizeof(eiter->a.os_dev_name), 1919 "%s:host%lu", QLA2XXX_DRIVER_NAME, vha->host_no); 1920 alen += FDMI_ATTR_ALIGNMENT(alen); 1921 alen += FDMI_ATTR_TYPELEN(eiter); 1922 eiter->len = cpu_to_be16(alen); 1923 size += alen; 1924 ql_dbg(ql_dbg_disc, vha, 0x20c4, 1925 "OS DEVICE NAME = %s.\n", eiter->a.os_dev_name); 1926 /* Hostname. */ 1927 eiter = entries + size; 1928 eiter->type = cpu_to_be16(FDMI_PORT_HOST_NAME); 1929 if (!*hostname || !strncmp(hostname, "(none)", 6)) 1930 hostname = "Linux-default"; 1931 alen = scnprintf( 1932 eiter->a.host_name, sizeof(eiter->a.host_name), 1933 "%s", hostname); 1934 alen += FDMI_ATTR_ALIGNMENT(alen); 1935 alen += FDMI_ATTR_TYPELEN(eiter); 1936 eiter->len = cpu_to_be16(alen); 1937 size += alen; 1938 ql_dbg(ql_dbg_disc, vha, 0x20c5, 1939 "HOSTNAME = %s.\n", eiter->a.host_name); 1940 1941 if (callopt == CALLOPT_FDMI1) 1942 goto done; 1943 1944 /* Node Name */ 1945 eiter = entries + size; 1946 eiter->type = cpu_to_be16(FDMI_PORT_NODE_NAME); 1947 memcpy(eiter->a.node_name, vha->node_name, sizeof(eiter->a.node_name)); 1948 alen = sizeof(eiter->a.node_name); 1949 alen += FDMI_ATTR_TYPELEN(eiter); 1950 eiter->len = cpu_to_be16(alen); 1951 size += alen; 1952 ql_dbg(ql_dbg_disc, vha, 0x20c6, 1953 "NODENAME = %016llx.\n", wwn_to_u64(eiter->a.node_name)); 1954 1955 /* Port Name */ 1956 eiter = entries + size; 1957 eiter->type = cpu_to_be16(FDMI_PORT_NAME); 1958 memcpy(eiter->a.port_name, vha->port_name, sizeof(eiter->a.port_name)); 1959 alen = sizeof(eiter->a.port_name); 1960 alen += FDMI_ATTR_TYPELEN(eiter); 1961 eiter->len = cpu_to_be16(alen); 1962 size += alen; 1963 ql_dbg(ql_dbg_disc, vha, 0x20c7, 1964 "PORTNAME = %016llx.\n", wwn_to_u64(eiter->a.port_name)); 1965 1966 /* Port Symbolic Name */ 1967 eiter = entries + size; 1968 eiter->type = cpu_to_be16(FDMI_PORT_SYM_NAME); 1969 alen = qla2x00_get_sym_node_name(vha, eiter->a.port_sym_name, 1970 sizeof(eiter->a.port_sym_name)); 1971 alen += FDMI_ATTR_ALIGNMENT(alen); 1972 alen += FDMI_ATTR_TYPELEN(eiter); 1973 eiter->len = cpu_to_be16(alen); 1974 size += alen; 1975 ql_dbg(ql_dbg_disc, vha, 0x20c8, 1976 "PORT SYMBOLIC NAME = %s\n", eiter->a.port_sym_name); 1977 1978 /* Port Type */ 1979 eiter = entries + size; 1980 eiter->type = cpu_to_be16(FDMI_PORT_TYPE); 1981 eiter->a.port_type = cpu_to_be32(NS_NX_PORT_TYPE); 1982 alen = sizeof(eiter->a.port_type); 1983 alen += FDMI_ATTR_TYPELEN(eiter); 1984 eiter->len = cpu_to_be16(alen); 1985 size += alen; 1986 ql_dbg(ql_dbg_disc, vha, 0x20c9, 1987 "PORT TYPE = %x.\n", be32_to_cpu(eiter->a.port_type)); 1988 1989 /* Supported Class of Service */ 1990 eiter = entries + size; 1991 eiter->type = cpu_to_be16(FDMI_PORT_SUPP_COS); 1992 eiter->a.port_supported_cos = cpu_to_be32(FC_CLASS_3); 1993 alen = sizeof(eiter->a.port_supported_cos); 1994 alen += FDMI_ATTR_TYPELEN(eiter); 1995 eiter->len = cpu_to_be16(alen); 1996 size += alen; 1997 ql_dbg(ql_dbg_disc, vha, 0x20ca, 1998 "SUPPORTED COS = %08x\n", be32_to_cpu(eiter->a.port_supported_cos)); 1999 2000 /* Port Fabric Name */ 2001 eiter = entries + size; 2002 eiter->type = cpu_to_be16(FDMI_PORT_FABRIC_NAME); 2003 memcpy(eiter->a.fabric_name, vha->fabric_node_name, 2004 sizeof(eiter->a.fabric_name)); 2005 alen = sizeof(eiter->a.fabric_name); 2006 alen += FDMI_ATTR_TYPELEN(eiter); 2007 eiter->len = cpu_to_be16(alen); 2008 size += alen; 2009 ql_dbg(ql_dbg_disc, vha, 0x20cb, 2010 "FABRIC NAME = %016llx.\n", wwn_to_u64(eiter->a.fabric_name)); 2011 2012 /* FC4_type */ 2013 eiter = entries + size; 2014 eiter->type = cpu_to_be16(FDMI_PORT_FC4_TYPE); 2015 eiter->a.port_fc4_type[0] = 0x00; 2016 eiter->a.port_fc4_type[1] = 0x00; 2017 eiter->a.port_fc4_type[2] = 0x01; 2018 eiter->a.port_fc4_type[3] = 0x00; 2019 alen = sizeof(eiter->a.port_fc4_type); 2020 alen += FDMI_ATTR_TYPELEN(eiter); 2021 eiter->len = cpu_to_be16(alen); 2022 size += alen; 2023 ql_dbg(ql_dbg_disc, vha, 0x20cc, 2024 "PORT ACTIVE FC4 TYPE = %016llx.\n", 2025 *(uint64_t *)eiter->a.port_fc4_type); 2026 2027 /* Port State */ 2028 eiter = entries + size; 2029 eiter->type = cpu_to_be16(FDMI_PORT_STATE); 2030 eiter->a.port_state = cpu_to_be32(2); 2031 alen = sizeof(eiter->a.port_state); 2032 alen += FDMI_ATTR_TYPELEN(eiter); 2033 eiter->len = cpu_to_be16(alen); 2034 size += alen; 2035 ql_dbg(ql_dbg_disc, vha, 0x20cd, 2036 "PORT_STATE = %x.\n", be32_to_cpu(eiter->a.port_state)); 2037 2038 /* Number of Ports */ 2039 eiter = entries + size; 2040 eiter->type = cpu_to_be16(FDMI_PORT_COUNT); 2041 eiter->a.num_ports = cpu_to_be32(1); 2042 alen = sizeof(eiter->a.num_ports); 2043 alen += FDMI_ATTR_TYPELEN(eiter); 2044 eiter->len = cpu_to_be16(alen); 2045 size += alen; 2046 ql_dbg(ql_dbg_disc, vha, 0x20ce, 2047 "PORT COUNT = %x.\n", be32_to_cpu(eiter->a.num_ports)); 2048 2049 /* Port Identifier */ 2050 eiter = entries + size; 2051 eiter->type = cpu_to_be16(FDMI_PORT_IDENTIFIER); 2052 eiter->a.port_id = cpu_to_be32(vha->d_id.b24); 2053 alen = sizeof(eiter->a.port_id); 2054 alen += FDMI_ATTR_TYPELEN(eiter); 2055 eiter->len = cpu_to_be16(alen); 2056 size += alen; 2057 ql_dbg(ql_dbg_disc, vha, 0x20cf, 2058 "PORT ID = %x.\n", be32_to_cpu(eiter->a.port_id)); 2059 2060 if (callopt == CALLOPT_FDMI2 || !ql2xsmartsan) 2061 goto done; 2062 2063 /* Smart SAN Service Category (Populate Smart SAN Initiator)*/ 2064 eiter = entries + size; 2065 eiter->type = cpu_to_be16(FDMI_SMARTSAN_SERVICE); 2066 alen = scnprintf( 2067 eiter->a.smartsan_service, sizeof(eiter->a.smartsan_service), 2068 "%s", "Smart SAN Initiator"); 2069 alen += FDMI_ATTR_ALIGNMENT(alen); 2070 alen += FDMI_ATTR_TYPELEN(eiter); 2071 eiter->len = cpu_to_be16(alen); 2072 size += alen; 2073 ql_dbg(ql_dbg_disc, vha, 0x20d0, 2074 "SMARTSAN SERVICE CATEGORY = %s.\n", eiter->a.smartsan_service); 2075 2076 /* Smart SAN GUID (NWWN+PWWN) */ 2077 eiter = entries + size; 2078 eiter->type = cpu_to_be16(FDMI_SMARTSAN_GUID); 2079 memcpy(eiter->a.smartsan_guid, vha->node_name, WWN_SIZE); 2080 memcpy(eiter->a.smartsan_guid + WWN_SIZE, vha->port_name, WWN_SIZE); 2081 alen = sizeof(eiter->a.smartsan_guid); 2082 alen += FDMI_ATTR_TYPELEN(eiter); 2083 eiter->len = cpu_to_be16(alen); 2084 size += alen; 2085 ql_dbg(ql_dbg_disc, vha, 0x20d1, 2086 "Smart SAN GUID = %016llx-%016llx\n", 2087 wwn_to_u64(eiter->a.smartsan_guid), 2088 wwn_to_u64(eiter->a.smartsan_guid + WWN_SIZE)); 2089 2090 /* Smart SAN Version (populate "Smart SAN Version 1.0") */ 2091 eiter = entries + size; 2092 eiter->type = cpu_to_be16(FDMI_SMARTSAN_VERSION); 2093 alen = scnprintf( 2094 eiter->a.smartsan_version, sizeof(eiter->a.smartsan_version), 2095 "%s", "Smart SAN Version 2.0"); 2096 alen += FDMI_ATTR_ALIGNMENT(alen); 2097 alen += FDMI_ATTR_TYPELEN(eiter); 2098 eiter->len = cpu_to_be16(alen); 2099 size += alen; 2100 ql_dbg(ql_dbg_disc, vha, 0x20d2, 2101 "SMARTSAN VERSION = %s\n", eiter->a.smartsan_version); 2102 2103 /* Smart SAN Product Name (Specify Adapter Model No) */ 2104 eiter = entries + size; 2105 eiter->type = cpu_to_be16(FDMI_SMARTSAN_PROD_NAME); 2106 alen = scnprintf(eiter->a.smartsan_prod_name, 2107 sizeof(eiter->a.smartsan_prod_name), 2108 "ISP%04x", ha->pdev->device); 2109 alen += FDMI_ATTR_ALIGNMENT(alen); 2110 alen += FDMI_ATTR_TYPELEN(eiter); 2111 eiter->len = cpu_to_be16(alen); 2112 size += alen; 2113 ql_dbg(ql_dbg_disc, vha, 0x20d3, 2114 "SMARTSAN PRODUCT NAME = %s\n", eiter->a.smartsan_prod_name); 2115 2116 /* Smart SAN Port Info (specify: 1=Physical, 2=NPIV, 3=SRIOV) */ 2117 eiter = entries + size; 2118 eiter->type = cpu_to_be16(FDMI_SMARTSAN_PORT_INFO); 2119 eiter->a.smartsan_port_info = cpu_to_be32(vha->vp_idx ? 2 : 1); 2120 alen = sizeof(eiter->a.smartsan_port_info); 2121 alen += FDMI_ATTR_TYPELEN(eiter); 2122 eiter->len = cpu_to_be16(alen); 2123 size += alen; 2124 ql_dbg(ql_dbg_disc, vha, 0x20d4, 2125 "SMARTSAN PORT INFO = %x\n", eiter->a.smartsan_port_info); 2126 2127 /* Smart SAN Security Support */ 2128 eiter = entries + size; 2129 eiter->type = cpu_to_be16(FDMI_SMARTSAN_SECURITY_SUPPORT); 2130 eiter->a.smartsan_security_support = cpu_to_be32(1); 2131 alen = sizeof(eiter->a.smartsan_security_support); 2132 alen += FDMI_ATTR_TYPELEN(eiter); 2133 eiter->len = cpu_to_be16(alen); 2134 size += alen; 2135 ql_dbg(ql_dbg_disc, vha, 0x20d6, 2136 "SMARTSAN SECURITY SUPPORT = %d\n", 2137 be32_to_cpu(eiter->a.smartsan_security_support)); 2138 2139done: 2140 return size; 2141} 2142 2143/** 2144 * qla2x00_fdmi_rhba() - perform RHBA FDMI registration 2145 * @vha: HA context 2146 * @callopt: Option to issue FDMI registration 2147 * 2148 * Returns 0 on success. 2149 */ 2150static int 2151qla2x00_fdmi_rhba(scsi_qla_host_t *vha, unsigned int callopt) 2152{ 2153 struct qla_hw_data *ha = vha->hw; 2154 unsigned long size = 0; 2155 unsigned int rval, count; 2156 ms_iocb_entry_t *ms_pkt; 2157 struct ct_sns_req *ct_req; 2158 struct ct_sns_rsp *ct_rsp; 2159 void *entries; 2160 2161 count = callopt != CALLOPT_FDMI1 ? 2162 FDMI2_HBA_ATTR_COUNT : FDMI1_HBA_ATTR_COUNT; 2163 2164 size = RHBA_RSP_SIZE; 2165 2166 ql_dbg(ql_dbg_disc, vha, 0x20e0, 2167 "RHBA (callopt=%x count=%u size=%lu).\n", callopt, count, size); 2168 2169 /* Request size adjusted after CT preparation */ 2170 ms_pkt = ha->isp_ops->prep_ms_fdmi_iocb(vha, 0, size); 2171 2172 /* Prepare CT request */ 2173 ct_req = qla2x00_prep_ct_fdmi_req(ha->ct_sns, RHBA_CMD, size); 2174 ct_rsp = &ha->ct_sns->p.rsp; 2175 2176 /* Prepare FDMI command entries */ 2177 memcpy(ct_req->req.rhba.hba_identifier, vha->port_name, 2178 sizeof(ct_req->req.rhba.hba_identifier)); 2179 size += sizeof(ct_req->req.rhba.hba_identifier); 2180 2181 ct_req->req.rhba.entry_count = cpu_to_be32(1); 2182 size += sizeof(ct_req->req.rhba.entry_count); 2183 2184 memcpy(ct_req->req.rhba.port_name, vha->port_name, 2185 sizeof(ct_req->req.rhba.port_name)); 2186 size += sizeof(ct_req->req.rhba.port_name); 2187 2188 /* Attribute count */ 2189 ct_req->req.rhba.attrs.count = cpu_to_be32(count); 2190 size += sizeof(ct_req->req.rhba.attrs.count); 2191 2192 /* Attribute block */ 2193 entries = &ct_req->req.rhba.attrs.entry; 2194 2195 size += qla2x00_hba_attributes(vha, entries, callopt); 2196 2197 /* Update MS request size. */ 2198 qla2x00_update_ms_fdmi_iocb(vha, size + 16); 2199 2200 ql_dbg(ql_dbg_disc, vha, 0x20e1, 2201 "RHBA %016llx %016llx.\n", 2202 wwn_to_u64(ct_req->req.rhba.hba_identifier), 2203 wwn_to_u64(ct_req->req.rhba.port_name)); 2204 2205 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x20e2, 2206 entries, size); 2207 2208 /* Execute MS IOCB */ 2209 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma, 2210 sizeof(*ha->ms_iocb)); 2211 if (rval) { 2212 ql_dbg(ql_dbg_disc, vha, 0x20e3, 2213 "RHBA iocb failed (%d).\n", rval); 2214 return rval; 2215 } 2216 2217 rval = qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RHBA"); 2218 if (rval) { 2219 if (ct_rsp->header.reason_code == CT_REASON_CANNOT_PERFORM && 2220 ct_rsp->header.explanation_code == 2221 CT_EXPL_ALREADY_REGISTERED) { 2222 ql_dbg(ql_dbg_disc, vha, 0x20e4, 2223 "RHBA already registered.\n"); 2224 return QLA_ALREADY_REGISTERED; 2225 } 2226 2227 ql_dbg(ql_dbg_disc, vha, 0x20e5, 2228 "RHBA failed, CT Reason %#x, CT Explanation %#x\n", 2229 ct_rsp->header.reason_code, 2230 ct_rsp->header.explanation_code); 2231 return rval; 2232 } 2233 2234 ql_dbg(ql_dbg_disc, vha, 0x20e6, "RHBA exiting normally.\n"); 2235 return rval; 2236} 2237 2238 2239static int 2240qla2x00_fdmi_dhba(scsi_qla_host_t *vha) 2241{ 2242 int rval; 2243 struct qla_hw_data *ha = vha->hw; 2244 ms_iocb_entry_t *ms_pkt; 2245 struct ct_sns_req *ct_req; 2246 struct ct_sns_rsp *ct_rsp; 2247 /* Issue RPA */ 2248 /* Prepare common MS IOCB */ 2249 ms_pkt = ha->isp_ops->prep_ms_fdmi_iocb(vha, DHBA_REQ_SIZE, 2250 DHBA_RSP_SIZE); 2251 /* Prepare CT request */ 2252 ct_req = qla2x00_prep_ct_fdmi_req(ha->ct_sns, DHBA_CMD, DHBA_RSP_SIZE); 2253 ct_rsp = &ha->ct_sns->p.rsp; 2254 /* Prepare FDMI command arguments -- portname. */ 2255 memcpy(ct_req->req.dhba.port_name, vha->port_name, WWN_SIZE); 2256 ql_dbg(ql_dbg_disc, vha, 0x2036, 2257 "DHBA portname = %8phN.\n", ct_req->req.dhba.port_name); 2258 /* Execute MS IOCB */ 2259 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma, 2260 sizeof(ms_iocb_entry_t)); 2261 if (rval != QLA_SUCCESS) { 2262 /*EMPTY*/ 2263 ql_dbg(ql_dbg_disc, vha, 0x2037, 2264 "DHBA issue IOCB failed (%d).\n", rval); 2265 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "DHBA") != 2266 QLA_SUCCESS) { 2267 rval = QLA_FUNCTION_FAILED; 2268 } else { 2269 ql_dbg(ql_dbg_disc, vha, 0x2038, 2270 "DHBA exiting normally.\n"); 2271 } 2272 return rval; 2273} 2274 2275/** 2276 * qla2x00_fdmi_rprt() - perform RPRT registration 2277 * @vha: HA context 2278 * @callopt: Option to issue extended or standard FDMI 2279 * command parameter 2280 * 2281 * Returns 0 on success. 2282 */ 2283static int 2284qla2x00_fdmi_rprt(scsi_qla_host_t *vha, int callopt) 2285{ 2286 struct scsi_qla_host *base_vha = pci_get_drvdata(vha->hw->pdev); 2287 struct qla_hw_data *ha = vha->hw; 2288 ulong size = 0; 2289 uint rval, count; 2290 ms_iocb_entry_t *ms_pkt; 2291 struct ct_sns_req *ct_req; 2292 struct ct_sns_rsp *ct_rsp; 2293 void *entries; 2294 count = callopt == CALLOPT_FDMI2_SMARTSAN && ql2xsmartsan ? 2295 FDMI2_SMARTSAN_PORT_ATTR_COUNT : 2296 callopt != CALLOPT_FDMI1 ? 2297 FDMI2_PORT_ATTR_COUNT : FDMI1_PORT_ATTR_COUNT; 2298 2299 size = RPRT_RSP_SIZE; 2300 ql_dbg(ql_dbg_disc, vha, 0x20e8, 2301 "RPRT (callopt=%x count=%u size=%lu).\n", callopt, count, size); 2302 /* Request size adjusted after CT preparation */ 2303 ms_pkt = ha->isp_ops->prep_ms_fdmi_iocb(vha, 0, size); 2304 /* Prepare CT request */ 2305 ct_req = qla2x00_prep_ct_fdmi_req(ha->ct_sns, RPRT_CMD, size); 2306 ct_rsp = &ha->ct_sns->p.rsp; 2307 /* Prepare FDMI command entries */ 2308 memcpy(ct_req->req.rprt.hba_identifier, base_vha->port_name, 2309 sizeof(ct_req->req.rprt.hba_identifier)); 2310 size += sizeof(ct_req->req.rprt.hba_identifier); 2311 memcpy(ct_req->req.rprt.port_name, vha->port_name, 2312 sizeof(ct_req->req.rprt.port_name)); 2313 size += sizeof(ct_req->req.rprt.port_name); 2314 /* Attribute count */ 2315 ct_req->req.rprt.attrs.count = cpu_to_be32(count); 2316 size += sizeof(ct_req->req.rprt.attrs.count); 2317 /* Attribute block */ 2318 entries = ct_req->req.rprt.attrs.entry; 2319 size += qla2x00_port_attributes(vha, entries, callopt); 2320 /* Update MS request size. */ 2321 qla2x00_update_ms_fdmi_iocb(vha, size + 16); 2322 ql_dbg(ql_dbg_disc, vha, 0x20e9, 2323 "RPRT %016llx %016llx.\n", 2324 wwn_to_u64(ct_req->req.rprt.port_name), 2325 wwn_to_u64(ct_req->req.rprt.port_name)); 2326 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x20ea, 2327 entries, size); 2328 /* Execute MS IOCB */ 2329 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma, 2330 sizeof(*ha->ms_iocb)); 2331 if (rval) { 2332 ql_dbg(ql_dbg_disc, vha, 0x20eb, 2333 "RPRT iocb failed (%d).\n", rval); 2334 return rval; 2335 } 2336 rval = qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RPRT"); 2337 if (rval) { 2338 if (ct_rsp->header.reason_code == CT_REASON_CANNOT_PERFORM && 2339 ct_rsp->header.explanation_code == 2340 CT_EXPL_ALREADY_REGISTERED) { 2341 ql_dbg(ql_dbg_disc, vha, 0x20ec, 2342 "RPRT already registered.\n"); 2343 return QLA_ALREADY_REGISTERED; 2344 } 2345 2346 ql_dbg(ql_dbg_disc, vha, 0x20ed, 2347 "RPRT failed, CT Reason code: %#x, CT Explanation %#x\n", 2348 ct_rsp->header.reason_code, 2349 ct_rsp->header.explanation_code); 2350 return rval; 2351 } 2352 ql_dbg(ql_dbg_disc, vha, 0x20ee, "RPRT exiting normally.\n"); 2353 return rval; 2354} 2355 2356/** 2357 * qla2x00_fdmi_rpa() - perform RPA registration 2358 * @vha: HA context 2359 * @callopt: Option to issue FDMI registration 2360 * 2361 * Returns 0 on success. 2362 */ 2363static int 2364qla2x00_fdmi_rpa(scsi_qla_host_t *vha, uint callopt) 2365{ 2366 struct qla_hw_data *ha = vha->hw; 2367 ulong size = 0; 2368 uint rval, count; 2369 ms_iocb_entry_t *ms_pkt; 2370 struct ct_sns_req *ct_req; 2371 struct ct_sns_rsp *ct_rsp; 2372 void *entries; 2373 2374 count = 2375 callopt == CALLOPT_FDMI2_SMARTSAN && ql2xsmartsan ? 2376 FDMI2_SMARTSAN_PORT_ATTR_COUNT : 2377 callopt != CALLOPT_FDMI1 ? 2378 FDMI2_PORT_ATTR_COUNT : FDMI1_PORT_ATTR_COUNT; 2379 2380 size = 2381 callopt != CALLOPT_FDMI1 ? 2382 SMARTSAN_RPA_RSP_SIZE : RPA_RSP_SIZE; 2383 2384 ql_dbg(ql_dbg_disc, vha, 0x20f0, 2385 "RPA (callopt=%x count=%u size=%lu).\n", callopt, count, size); 2386 2387 /* Request size adjusted after CT preparation */ 2388 ms_pkt = ha->isp_ops->prep_ms_fdmi_iocb(vha, 0, size); 2389 2390 /* Prepare CT request */ 2391 ct_req = qla2x00_prep_ct_fdmi_req(ha->ct_sns, RPA_CMD, size); 2392 ct_rsp = &ha->ct_sns->p.rsp; 2393 2394 /* Prepare FDMI command entries. */ 2395 memcpy(ct_req->req.rpa.port_name, vha->port_name, 2396 sizeof(ct_req->req.rpa.port_name)); 2397 size += sizeof(ct_req->req.rpa.port_name); 2398 2399 /* Attribute count */ 2400 ct_req->req.rpa.attrs.count = cpu_to_be32(count); 2401 size += sizeof(ct_req->req.rpa.attrs.count); 2402 2403 /* Attribute block */ 2404 entries = ct_req->req.rpa.attrs.entry; 2405 2406 size += qla2x00_port_attributes(vha, entries, callopt); 2407 2408 /* Update MS request size. */ 2409 qla2x00_update_ms_fdmi_iocb(vha, size + 16); 2410 2411 ql_dbg(ql_dbg_disc, vha, 0x20f1, 2412 "RPA %016llx.\n", wwn_to_u64(ct_req->req.rpa.port_name)); 2413 2414 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x20f2, 2415 entries, size); 2416 2417 /* Execute MS IOCB */ 2418 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma, 2419 sizeof(*ha->ms_iocb)); 2420 if (rval) { 2421 ql_dbg(ql_dbg_disc, vha, 0x20f3, 2422 "RPA iocb failed (%d).\n", rval); 2423 return rval; 2424 } 2425 2426 rval = qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RPA"); 2427 if (rval) { 2428 if (ct_rsp->header.reason_code == CT_REASON_CANNOT_PERFORM && 2429 ct_rsp->header.explanation_code == 2430 CT_EXPL_ALREADY_REGISTERED) { 2431 ql_dbg(ql_dbg_disc, vha, 0x20f4, 2432 "RPA already registered.\n"); 2433 return QLA_ALREADY_REGISTERED; 2434 } 2435 2436 ql_dbg(ql_dbg_disc, vha, 0x20f5, 2437 "RPA failed, CT Reason code: %#x, CT Explanation %#x\n", 2438 ct_rsp->header.reason_code, 2439 ct_rsp->header.explanation_code); 2440 return rval; 2441 } 2442 2443 ql_dbg(ql_dbg_disc, vha, 0x20f6, "RPA exiting normally.\n"); 2444 return rval; 2445} 2446 2447/** 2448 * qla2x00_fdmi_register() - 2449 * @vha: HA context 2450 * 2451 * Returns 0 on success. 2452 */ 2453int 2454qla2x00_fdmi_register(scsi_qla_host_t *vha) 2455{ 2456 int rval = QLA_SUCCESS; 2457 struct qla_hw_data *ha = vha->hw; 2458 2459 if (IS_QLA2100(ha) || IS_QLA2200(ha) || 2460 IS_QLAFX00(ha)) 2461 return rval; 2462 2463 rval = qla2x00_mgmt_svr_login(vha); 2464 if (rval) 2465 return rval; 2466 2467 /* For npiv/vport send rprt only */ 2468 if (vha->vp_idx) { 2469 if (ql2xsmartsan) 2470 rval = qla2x00_fdmi_rprt(vha, CALLOPT_FDMI2_SMARTSAN); 2471 if (rval || !ql2xsmartsan) 2472 rval = qla2x00_fdmi_rprt(vha, CALLOPT_FDMI2); 2473 if (rval) 2474 rval = qla2x00_fdmi_rprt(vha, CALLOPT_FDMI1); 2475 2476 return rval; 2477 } 2478 2479 /* Try fdmi2 first, if fails then try fdmi1 */ 2480 rval = qla2x00_fdmi_rhba(vha, CALLOPT_FDMI2); 2481 if (rval) { 2482 if (rval != QLA_ALREADY_REGISTERED) 2483 goto try_fdmi; 2484 2485 rval = qla2x00_fdmi_dhba(vha); 2486 if (rval) 2487 goto try_fdmi; 2488 2489 rval = qla2x00_fdmi_rhba(vha, CALLOPT_FDMI2); 2490 if (rval) 2491 goto try_fdmi; 2492 } 2493 2494 if (ql2xsmartsan) 2495 rval = qla2x00_fdmi_rpa(vha, CALLOPT_FDMI2_SMARTSAN); 2496 if (rval || !ql2xsmartsan) 2497 rval = qla2x00_fdmi_rpa(vha, CALLOPT_FDMI2); 2498 if (rval) 2499 goto try_fdmi; 2500 2501 return rval; 2502 2503try_fdmi: 2504 rval = qla2x00_fdmi_rhba(vha, CALLOPT_FDMI1); 2505 if (rval) { 2506 if (rval != QLA_ALREADY_REGISTERED) 2507 return rval; 2508 2509 rval = qla2x00_fdmi_dhba(vha); 2510 if (rval) 2511 return rval; 2512 2513 rval = qla2x00_fdmi_rhba(vha, CALLOPT_FDMI1); 2514 if (rval) 2515 return rval; 2516 } 2517 2518 rval = qla2x00_fdmi_rpa(vha, CALLOPT_FDMI1); 2519 2520 return rval; 2521} 2522 2523/** 2524 * qla2x00_gfpn_id() - SNS Get Fabric Port Name (GFPN_ID) query. 2525 * @vha: HA context 2526 * @list: switch info entries to populate 2527 * 2528 * Returns 0 on success. 2529 */ 2530int 2531qla2x00_gfpn_id(scsi_qla_host_t *vha, sw_info_t *list) 2532{ 2533 int rval = QLA_SUCCESS; 2534 uint16_t i; 2535 struct qla_hw_data *ha = vha->hw; 2536 ms_iocb_entry_t *ms_pkt; 2537 struct ct_sns_req *ct_req; 2538 struct ct_sns_rsp *ct_rsp; 2539 struct ct_arg arg; 2540 2541 if (!IS_IIDMA_CAPABLE(ha)) 2542 return QLA_FUNCTION_FAILED; 2543 2544 arg.iocb = ha->ms_iocb; 2545 arg.req_dma = ha->ct_sns_dma; 2546 arg.rsp_dma = ha->ct_sns_dma; 2547 arg.req_size = GFPN_ID_REQ_SIZE; 2548 arg.rsp_size = GFPN_ID_RSP_SIZE; 2549 arg.nport_handle = NPH_SNS; 2550 2551 for (i = 0; i < ha->max_fibre_devices; i++) { 2552 /* Issue GFPN_ID */ 2553 /* Prepare common MS IOCB */ 2554 ms_pkt = ha->isp_ops->prep_ms_iocb(vha, &arg); 2555 2556 /* Prepare CT request */ 2557 ct_req = qla2x00_prep_ct_req(ha->ct_sns, GFPN_ID_CMD, 2558 GFPN_ID_RSP_SIZE); 2559 ct_rsp = &ha->ct_sns->p.rsp; 2560 2561 /* Prepare CT arguments -- port_id */ 2562 ct_req->req.port_id.port_id = port_id_to_be_id(list[i].d_id); 2563 2564 /* Execute MS IOCB */ 2565 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma, 2566 sizeof(ms_iocb_entry_t)); 2567 if (rval != QLA_SUCCESS) { 2568 /*EMPTY*/ 2569 ql_dbg(ql_dbg_disc, vha, 0x2023, 2570 "GFPN_ID issue IOCB failed (%d).\n", rval); 2571 break; 2572 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, 2573 "GFPN_ID") != QLA_SUCCESS) { 2574 rval = QLA_FUNCTION_FAILED; 2575 break; 2576 } else { 2577 /* Save fabric portname */ 2578 memcpy(list[i].fabric_port_name, 2579 ct_rsp->rsp.gfpn_id.port_name, WWN_SIZE); 2580 } 2581 2582 /* Last device exit. */ 2583 if (list[i].d_id.b.rsvd_1 != 0) 2584 break; 2585 } 2586 2587 return (rval); 2588} 2589 2590 2591static inline struct ct_sns_req * 2592qla24xx_prep_ct_fm_req(struct ct_sns_pkt *p, uint16_t cmd, 2593 uint16_t rsp_size) 2594{ 2595 memset(p, 0, sizeof(struct ct_sns_pkt)); 2596 2597 p->p.req.header.revision = 0x01; 2598 p->p.req.header.gs_type = 0xFA; 2599 p->p.req.header.gs_subtype = 0x01; 2600 p->p.req.command = cpu_to_be16(cmd); 2601 p->p.req.max_rsp_size = cpu_to_be16((rsp_size - 16) / 4); 2602 2603 return &p->p.req; 2604} 2605 2606static uint16_t 2607qla2x00_port_speed_capability(uint16_t speed) 2608{ 2609 switch (speed) { 2610 case BIT_15: 2611 return PORT_SPEED_1GB; 2612 case BIT_14: 2613 return PORT_SPEED_2GB; 2614 case BIT_13: 2615 return PORT_SPEED_4GB; 2616 case BIT_12: 2617 return PORT_SPEED_10GB; 2618 case BIT_11: 2619 return PORT_SPEED_8GB; 2620 case BIT_10: 2621 return PORT_SPEED_16GB; 2622 case BIT_8: 2623 return PORT_SPEED_32GB; 2624 case BIT_7: 2625 return PORT_SPEED_64GB; 2626 default: 2627 return PORT_SPEED_UNKNOWN; 2628 } 2629} 2630 2631/** 2632 * qla2x00_gpsc() - FCS Get Port Speed Capabilities (GPSC) query. 2633 * @vha: HA context 2634 * @list: switch info entries to populate 2635 * 2636 * Returns 0 on success. 2637 */ 2638int 2639qla2x00_gpsc(scsi_qla_host_t *vha, sw_info_t *list) 2640{ 2641 int rval; 2642 uint16_t i; 2643 struct qla_hw_data *ha = vha->hw; 2644 ms_iocb_entry_t *ms_pkt; 2645 struct ct_sns_req *ct_req; 2646 struct ct_sns_rsp *ct_rsp; 2647 struct ct_arg arg; 2648 2649 if (!IS_IIDMA_CAPABLE(ha)) 2650 return QLA_FUNCTION_FAILED; 2651 if (!ha->flags.gpsc_supported) 2652 return QLA_FUNCTION_FAILED; 2653 2654 rval = qla2x00_mgmt_svr_login(vha); 2655 if (rval) 2656 return rval; 2657 2658 arg.iocb = ha->ms_iocb; 2659 arg.req_dma = ha->ct_sns_dma; 2660 arg.rsp_dma = ha->ct_sns_dma; 2661 arg.req_size = GPSC_REQ_SIZE; 2662 arg.rsp_size = GPSC_RSP_SIZE; 2663 arg.nport_handle = vha->mgmt_svr_loop_id; 2664 2665 for (i = 0; i < ha->max_fibre_devices; i++) { 2666 /* Issue GFPN_ID */ 2667 /* Prepare common MS IOCB */ 2668 ms_pkt = qla24xx_prep_ms_iocb(vha, &arg); 2669 2670 /* Prepare CT request */ 2671 ct_req = qla24xx_prep_ct_fm_req(ha->ct_sns, GPSC_CMD, 2672 GPSC_RSP_SIZE); 2673 ct_rsp = &ha->ct_sns->p.rsp; 2674 2675 /* Prepare CT arguments -- port_name */ 2676 memcpy(ct_req->req.gpsc.port_name, list[i].fabric_port_name, 2677 WWN_SIZE); 2678 2679 /* Execute MS IOCB */ 2680 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma, 2681 sizeof(ms_iocb_entry_t)); 2682 if (rval != QLA_SUCCESS) { 2683 /*EMPTY*/ 2684 ql_dbg(ql_dbg_disc, vha, 0x2059, 2685 "GPSC issue IOCB failed (%d).\n", rval); 2686 } else if ((rval = qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, 2687 "GPSC")) != QLA_SUCCESS) { 2688 /* FM command unsupported? */ 2689 if (rval == QLA_INVALID_COMMAND && 2690 (ct_rsp->header.reason_code == 2691 CT_REASON_INVALID_COMMAND_CODE || 2692 ct_rsp->header.reason_code == 2693 CT_REASON_COMMAND_UNSUPPORTED)) { 2694 ql_dbg(ql_dbg_disc, vha, 0x205a, 2695 "GPSC command unsupported, disabling " 2696 "query.\n"); 2697 ha->flags.gpsc_supported = 0; 2698 rval = QLA_FUNCTION_FAILED; 2699 break; 2700 } 2701 rval = QLA_FUNCTION_FAILED; 2702 } else { 2703 list->fp_speed = qla2x00_port_speed_capability( 2704 be16_to_cpu(ct_rsp->rsp.gpsc.speed)); 2705 ql_dbg(ql_dbg_disc, vha, 0x205b, 2706 "GPSC ext entry - fpn " 2707 "%8phN speeds=%04x speed=%04x.\n", 2708 list[i].fabric_port_name, 2709 be16_to_cpu(ct_rsp->rsp.gpsc.speeds), 2710 be16_to_cpu(ct_rsp->rsp.gpsc.speed)); 2711 } 2712 2713 /* Last device exit. */ 2714 if (list[i].d_id.b.rsvd_1 != 0) 2715 break; 2716 } 2717 2718 return (rval); 2719} 2720 2721/** 2722 * qla2x00_gff_id() - SNS Get FC-4 Features (GFF_ID) query. 2723 * 2724 * @vha: HA context 2725 * @list: switch info entries to populate 2726 * 2727 */ 2728void 2729qla2x00_gff_id(scsi_qla_host_t *vha, sw_info_t *list) 2730{ 2731 int rval; 2732 uint16_t i; 2733 2734 ms_iocb_entry_t *ms_pkt; 2735 struct ct_sns_req *ct_req; 2736 struct ct_sns_rsp *ct_rsp; 2737 struct qla_hw_data *ha = vha->hw; 2738 uint8_t fcp_scsi_features = 0, nvme_features = 0; 2739 struct ct_arg arg; 2740 2741 for (i = 0; i < ha->max_fibre_devices; i++) { 2742 /* Set default FC4 Type as UNKNOWN so the default is to 2743 * Process this port */ 2744 list[i].fc4_type = 0; 2745 2746 /* Do not attempt GFF_ID if we are not FWI_2 capable */ 2747 if (!IS_FWI2_CAPABLE(ha)) 2748 continue; 2749 2750 arg.iocb = ha->ms_iocb; 2751 arg.req_dma = ha->ct_sns_dma; 2752 arg.rsp_dma = ha->ct_sns_dma; 2753 arg.req_size = GFF_ID_REQ_SIZE; 2754 arg.rsp_size = GFF_ID_RSP_SIZE; 2755 arg.nport_handle = NPH_SNS; 2756 2757 /* Prepare common MS IOCB */ 2758 ms_pkt = ha->isp_ops->prep_ms_iocb(vha, &arg); 2759 2760 /* Prepare CT request */ 2761 ct_req = qla2x00_prep_ct_req(ha->ct_sns, GFF_ID_CMD, 2762 GFF_ID_RSP_SIZE); 2763 ct_rsp = &ha->ct_sns->p.rsp; 2764 2765 /* Prepare CT arguments -- port_id */ 2766 ct_req->req.port_id.port_id = port_id_to_be_id(list[i].d_id); 2767 2768 /* Execute MS IOCB */ 2769 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma, 2770 sizeof(ms_iocb_entry_t)); 2771 2772 if (rval != QLA_SUCCESS) { 2773 ql_dbg(ql_dbg_disc, vha, 0x205c, 2774 "GFF_ID issue IOCB failed (%d).\n", rval); 2775 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, 2776 "GFF_ID") != QLA_SUCCESS) { 2777 ql_dbg(ql_dbg_disc, vha, 0x205d, 2778 "GFF_ID IOCB status had a failure status code.\n"); 2779 } else { 2780 fcp_scsi_features = 2781 ct_rsp->rsp.gff_id.fc4_features[GFF_FCP_SCSI_OFFSET]; 2782 fcp_scsi_features &= 0x0f; 2783 2784 if (fcp_scsi_features) { 2785 list[i].fc4_type = FS_FC4TYPE_FCP; 2786 list[i].fc4_features = fcp_scsi_features; 2787 } 2788 2789 nvme_features = 2790 ct_rsp->rsp.gff_id.fc4_features[GFF_NVME_OFFSET]; 2791 nvme_features &= 0xf; 2792 2793 if (nvme_features) { 2794 list[i].fc4_type |= FS_FC4TYPE_NVME; 2795 list[i].fc4_features = nvme_features; 2796 } 2797 } 2798 2799 /* Last device exit. */ 2800 if (list[i].d_id.b.rsvd_1 != 0) 2801 break; 2802 } 2803} 2804 2805int qla24xx_post_gpsc_work(struct scsi_qla_host *vha, fc_port_t *fcport) 2806{ 2807 struct qla_work_evt *e; 2808 2809 e = qla2x00_alloc_work(vha, QLA_EVT_GPSC); 2810 if (!e) 2811 return QLA_FUNCTION_FAILED; 2812 2813 e->u.fcport.fcport = fcport; 2814 return qla2x00_post_work(vha, e); 2815} 2816 2817void qla24xx_handle_gpsc_event(scsi_qla_host_t *vha, struct event_arg *ea) 2818{ 2819 struct fc_port *fcport = ea->fcport; 2820 2821 ql_dbg(ql_dbg_disc, vha, 0x20d8, 2822 "%s %8phC DS %d LS %d rc %d login %d|%d rscn %d|%d lid %d\n", 2823 __func__, fcport->port_name, fcport->disc_state, 2824 fcport->fw_login_state, ea->rc, ea->sp->gen2, fcport->login_gen, 2825 ea->sp->gen2, fcport->rscn_gen|ea->sp->gen1, fcport->loop_id); 2826 2827 if (fcport->disc_state == DSC_DELETE_PEND) 2828 return; 2829 2830 /* We will figure-out what happen after AUTH completes */ 2831 if (fcport->disc_state == DSC_LOGIN_AUTH_PEND) 2832 return; 2833 2834 if (ea->sp->gen2 != fcport->login_gen) { 2835 /* target side must have changed it. */ 2836 ql_dbg(ql_dbg_disc, vha, 0x20d3, 2837 "%s %8phC generation changed\n", 2838 __func__, fcport->port_name); 2839 return; 2840 } else if (ea->sp->gen1 != fcport->rscn_gen) { 2841 return; 2842 } 2843 2844 qla_post_iidma_work(vha, fcport); 2845} 2846 2847static void qla24xx_async_gpsc_sp_done(srb_t *sp, int res) 2848{ 2849 struct scsi_qla_host *vha = sp->vha; 2850 struct qla_hw_data *ha = vha->hw; 2851 fc_port_t *fcport = sp->fcport; 2852 struct ct_sns_rsp *ct_rsp; 2853 struct event_arg ea; 2854 2855 ct_rsp = &fcport->ct_desc.ct_sns->p.rsp; 2856 2857 ql_dbg(ql_dbg_disc, vha, 0x2053, 2858 "Async done-%s res %x, WWPN %8phC \n", 2859 sp->name, res, fcport->port_name); 2860 2861 fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE); 2862 2863 if (res == QLA_FUNCTION_TIMEOUT) 2864 goto done; 2865 2866 if (res == (DID_ERROR << 16)) { 2867 /* entry status error */ 2868 goto done; 2869 } else if (res) { 2870 if ((ct_rsp->header.reason_code == 2871 CT_REASON_INVALID_COMMAND_CODE) || 2872 (ct_rsp->header.reason_code == 2873 CT_REASON_COMMAND_UNSUPPORTED)) { 2874 ql_dbg(ql_dbg_disc, vha, 0x2019, 2875 "GPSC command unsupported, disabling query.\n"); 2876 ha->flags.gpsc_supported = 0; 2877 goto done; 2878 } 2879 } else { 2880 fcport->fp_speed = qla2x00_port_speed_capability( 2881 be16_to_cpu(ct_rsp->rsp.gpsc.speed)); 2882 2883 ql_dbg(ql_dbg_disc, vha, 0x2054, 2884 "Async-%s OUT WWPN %8phC speeds=%04x speed=%04x.\n", 2885 sp->name, fcport->fabric_port_name, 2886 be16_to_cpu(ct_rsp->rsp.gpsc.speeds), 2887 be16_to_cpu(ct_rsp->rsp.gpsc.speed)); 2888 } 2889 memset(&ea, 0, sizeof(ea)); 2890 ea.rc = res; 2891 ea.fcport = fcport; 2892 ea.sp = sp; 2893 qla24xx_handle_gpsc_event(vha, &ea); 2894 2895done: 2896 /* ref: INIT */ 2897 kref_put(&sp->cmd_kref, qla2x00_sp_release); 2898} 2899 2900int qla24xx_async_gpsc(scsi_qla_host_t *vha, fc_port_t *fcport) 2901{ 2902 int rval = QLA_FUNCTION_FAILED; 2903 struct ct_sns_req *ct_req; 2904 srb_t *sp; 2905 2906 if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT)) 2907 return rval; 2908 2909 /* ref: INIT */ 2910 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); 2911 if (!sp) 2912 goto done; 2913 2914 sp->type = SRB_CT_PTHRU_CMD; 2915 sp->name = "gpsc"; 2916 sp->gen1 = fcport->rscn_gen; 2917 sp->gen2 = fcport->login_gen; 2918 qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2, 2919 qla24xx_async_gpsc_sp_done); 2920 2921 /* CT_IU preamble */ 2922 ct_req = qla24xx_prep_ct_fm_req(fcport->ct_desc.ct_sns, GPSC_CMD, 2923 GPSC_RSP_SIZE); 2924 2925 /* GPSC req */ 2926 memcpy(ct_req->req.gpsc.port_name, fcport->fabric_port_name, 2927 WWN_SIZE); 2928 2929 sp->u.iocb_cmd.u.ctarg.req = fcport->ct_desc.ct_sns; 2930 sp->u.iocb_cmd.u.ctarg.req_dma = fcport->ct_desc.ct_sns_dma; 2931 sp->u.iocb_cmd.u.ctarg.rsp = fcport->ct_desc.ct_sns; 2932 sp->u.iocb_cmd.u.ctarg.rsp_dma = fcport->ct_desc.ct_sns_dma; 2933 sp->u.iocb_cmd.u.ctarg.req_size = GPSC_REQ_SIZE; 2934 sp->u.iocb_cmd.u.ctarg.rsp_size = GPSC_RSP_SIZE; 2935 sp->u.iocb_cmd.u.ctarg.nport_handle = vha->mgmt_svr_loop_id; 2936 2937 ql_dbg(ql_dbg_disc, vha, 0x205e, 2938 "Async-%s %8phC hdl=%x loopid=%x portid=%02x%02x%02x.\n", 2939 sp->name, fcport->port_name, sp->handle, 2940 fcport->loop_id, fcport->d_id.b.domain, 2941 fcport->d_id.b.area, fcport->d_id.b.al_pa); 2942 2943 rval = qla2x00_start_sp(sp); 2944 if (rval != QLA_SUCCESS) 2945 goto done_free_sp; 2946 return rval; 2947 2948done_free_sp: 2949 /* ref: INIT */ 2950 kref_put(&sp->cmd_kref, qla2x00_sp_release); 2951done: 2952 return rval; 2953} 2954 2955int qla24xx_post_gpnid_work(struct scsi_qla_host *vha, port_id_t *id) 2956{ 2957 struct qla_work_evt *e; 2958 2959 if (test_bit(UNLOADING, &vha->dpc_flags) || 2960 (vha->vp_idx && test_bit(VPORT_DELETE, &vha->dpc_flags))) 2961 return 0; 2962 2963 e = qla2x00_alloc_work(vha, QLA_EVT_GPNID); 2964 if (!e) 2965 return QLA_FUNCTION_FAILED; 2966 2967 e->u.gpnid.id = *id; 2968 return qla2x00_post_work(vha, e); 2969} 2970 2971void qla24xx_sp_unmap(scsi_qla_host_t *vha, srb_t *sp) 2972{ 2973 struct srb_iocb *c = &sp->u.iocb_cmd; 2974 2975 switch (sp->type) { 2976 case SRB_ELS_DCMD: 2977 qla2x00_els_dcmd2_free(vha, &c->u.els_plogi); 2978 break; 2979 case SRB_CT_PTHRU_CMD: 2980 default: 2981 if (sp->u.iocb_cmd.u.ctarg.req) { 2982 dma_free_coherent(&vha->hw->pdev->dev, 2983 sp->u.iocb_cmd.u.ctarg.req_allocated_size, 2984 sp->u.iocb_cmd.u.ctarg.req, 2985 sp->u.iocb_cmd.u.ctarg.req_dma); 2986 sp->u.iocb_cmd.u.ctarg.req = NULL; 2987 } 2988 2989 if (sp->u.iocb_cmd.u.ctarg.rsp) { 2990 dma_free_coherent(&vha->hw->pdev->dev, 2991 sp->u.iocb_cmd.u.ctarg.rsp_allocated_size, 2992 sp->u.iocb_cmd.u.ctarg.rsp, 2993 sp->u.iocb_cmd.u.ctarg.rsp_dma); 2994 sp->u.iocb_cmd.u.ctarg.rsp = NULL; 2995 } 2996 break; 2997 } 2998 2999 /* ref: INIT */ 3000 kref_put(&sp->cmd_kref, qla2x00_sp_release); 3001} 3002 3003void qla24xx_handle_gpnid_event(scsi_qla_host_t *vha, struct event_arg *ea) 3004{ 3005 fc_port_t *fcport, *conflict, *t; 3006 u16 data[2]; 3007 3008 ql_dbg(ql_dbg_disc, vha, 0xffff, 3009 "%s %d port_id: %06x\n", 3010 __func__, __LINE__, ea->id.b24); 3011 3012 if (ea->rc) { 3013 /* cable is disconnected */ 3014 list_for_each_entry_safe(fcport, t, &vha->vp_fcports, list) { 3015 if (fcport->d_id.b24 == ea->id.b24) 3016 fcport->scan_state = QLA_FCPORT_SCAN; 3017 3018 qlt_schedule_sess_for_deletion(fcport); 3019 } 3020 } else { 3021 /* cable is connected */ 3022 fcport = qla2x00_find_fcport_by_wwpn(vha, ea->port_name, 1); 3023 if (fcport) { 3024 list_for_each_entry_safe(conflict, t, &vha->vp_fcports, 3025 list) { 3026 if ((conflict->d_id.b24 == ea->id.b24) && 3027 (fcport != conflict)) 3028 /* 3029 * 2 fcports with conflict Nport ID or 3030 * an existing fcport is having nport ID 3031 * conflict with new fcport. 3032 */ 3033 3034 conflict->scan_state = QLA_FCPORT_SCAN; 3035 3036 qlt_schedule_sess_for_deletion(conflict); 3037 } 3038 3039 fcport->scan_needed = 0; 3040 fcport->rscn_gen++; 3041 fcport->scan_state = QLA_FCPORT_FOUND; 3042 fcport->flags |= FCF_FABRIC_DEVICE; 3043 if (fcport->login_retry == 0) { 3044 fcport->login_retry = 3045 vha->hw->login_retry_count; 3046 ql_dbg(ql_dbg_disc, vha, 0xffff, 3047 "Port login retry %8phN, lid 0x%04x cnt=%d.\n", 3048 fcport->port_name, fcport->loop_id, 3049 fcport->login_retry); 3050 } 3051 switch (fcport->disc_state) { 3052 case DSC_LOGIN_COMPLETE: 3053 /* recheck session is still intact. */ 3054 ql_dbg(ql_dbg_disc, vha, 0x210d, 3055 "%s %d %8phC revalidate session with ADISC\n", 3056 __func__, __LINE__, fcport->port_name); 3057 data[0] = data[1] = 0; 3058 qla2x00_post_async_adisc_work(vha, fcport, 3059 data); 3060 break; 3061 case DSC_DELETED: 3062 ql_dbg(ql_dbg_disc, vha, 0x210d, 3063 "%s %d %8phC login\n", __func__, __LINE__, 3064 fcport->port_name); 3065 fcport->d_id = ea->id; 3066 qla24xx_fcport_handle_login(vha, fcport); 3067 break; 3068 case DSC_DELETE_PEND: 3069 fcport->d_id = ea->id; 3070 break; 3071 default: 3072 fcport->d_id = ea->id; 3073 break; 3074 } 3075 } else { 3076 list_for_each_entry_safe(conflict, t, &vha->vp_fcports, 3077 list) { 3078 if (conflict->d_id.b24 == ea->id.b24) { 3079 /* 2 fcports with conflict Nport ID or 3080 * an existing fcport is having nport ID 3081 * conflict with new fcport. 3082 */ 3083 ql_dbg(ql_dbg_disc, vha, 0xffff, 3084 "%s %d %8phC DS %d\n", 3085 __func__, __LINE__, 3086 conflict->port_name, 3087 conflict->disc_state); 3088 3089 conflict->scan_state = QLA_FCPORT_SCAN; 3090 qlt_schedule_sess_for_deletion(conflict); 3091 } 3092 } 3093 3094 /* create new fcport */ 3095 ql_dbg(ql_dbg_disc, vha, 0x2065, 3096 "%s %d %8phC post new sess\n", 3097 __func__, __LINE__, ea->port_name); 3098 qla24xx_post_newsess_work(vha, &ea->id, 3099 ea->port_name, NULL, NULL, 0); 3100 } 3101 } 3102} 3103 3104static void qla2x00_async_gpnid_sp_done(srb_t *sp, int res) 3105{ 3106 struct scsi_qla_host *vha = sp->vha; 3107 struct ct_sns_req *ct_req = 3108 (struct ct_sns_req *)sp->u.iocb_cmd.u.ctarg.req; 3109 struct ct_sns_rsp *ct_rsp = 3110 (struct ct_sns_rsp *)sp->u.iocb_cmd.u.ctarg.rsp; 3111 struct event_arg ea; 3112 struct qla_work_evt *e; 3113 unsigned long flags; 3114 3115 if (res) 3116 ql_dbg(ql_dbg_disc, vha, 0x2066, 3117 "Async done-%s fail res %x rscn gen %d ID %3phC. %8phC\n", 3118 sp->name, res, sp->gen1, &ct_req->req.port_id.port_id, 3119 ct_rsp->rsp.gpn_id.port_name); 3120 else 3121 ql_dbg(ql_dbg_disc, vha, 0x2066, 3122 "Async done-%s good rscn gen %d ID %3phC. %8phC\n", 3123 sp->name, sp->gen1, &ct_req->req.port_id.port_id, 3124 ct_rsp->rsp.gpn_id.port_name); 3125 3126 memset(&ea, 0, sizeof(ea)); 3127 memcpy(ea.port_name, ct_rsp->rsp.gpn_id.port_name, WWN_SIZE); 3128 ea.sp = sp; 3129 ea.id = be_to_port_id(ct_req->req.port_id.port_id); 3130 ea.rc = res; 3131 3132 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); 3133 list_del(&sp->elem); 3134 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); 3135 3136 if (res) { 3137 if (res == QLA_FUNCTION_TIMEOUT) { 3138 qla24xx_post_gpnid_work(sp->vha, &ea.id); 3139 /* ref: INIT */ 3140 kref_put(&sp->cmd_kref, qla2x00_sp_release); 3141 return; 3142 } 3143 } else if (sp->gen1) { 3144 /* There was another RSCN for this Nport ID */ 3145 qla24xx_post_gpnid_work(sp->vha, &ea.id); 3146 /* ref: INIT */ 3147 kref_put(&sp->cmd_kref, qla2x00_sp_release); 3148 return; 3149 } 3150 3151 qla24xx_handle_gpnid_event(vha, &ea); 3152 3153 e = qla2x00_alloc_work(vha, QLA_EVT_UNMAP); 3154 if (!e) { 3155 /* please ignore kernel warning. otherwise, we have mem leak. */ 3156 dma_free_coherent(&vha->hw->pdev->dev, 3157 sp->u.iocb_cmd.u.ctarg.req_allocated_size, 3158 sp->u.iocb_cmd.u.ctarg.req, 3159 sp->u.iocb_cmd.u.ctarg.req_dma); 3160 sp->u.iocb_cmd.u.ctarg.req = NULL; 3161 3162 dma_free_coherent(&vha->hw->pdev->dev, 3163 sp->u.iocb_cmd.u.ctarg.rsp_allocated_size, 3164 sp->u.iocb_cmd.u.ctarg.rsp, 3165 sp->u.iocb_cmd.u.ctarg.rsp_dma); 3166 sp->u.iocb_cmd.u.ctarg.rsp = NULL; 3167 3168 /* ref: INIT */ 3169 kref_put(&sp->cmd_kref, qla2x00_sp_release); 3170 return; 3171 } 3172 3173 e->u.iosb.sp = sp; 3174 qla2x00_post_work(vha, e); 3175} 3176 3177/* Get WWPN with Nport ID. */ 3178int qla24xx_async_gpnid(scsi_qla_host_t *vha, port_id_t *id) 3179{ 3180 int rval = QLA_FUNCTION_FAILED; 3181 struct ct_sns_req *ct_req; 3182 srb_t *sp, *tsp; 3183 struct ct_sns_pkt *ct_sns; 3184 unsigned long flags; 3185 3186 if (!vha->flags.online) 3187 goto done; 3188 3189 /* ref: INIT */ 3190 sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL); 3191 if (!sp) 3192 goto done; 3193 3194 sp->type = SRB_CT_PTHRU_CMD; 3195 sp->name = "gpnid"; 3196 sp->u.iocb_cmd.u.ctarg.id = *id; 3197 sp->gen1 = 0; 3198 qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2, 3199 qla2x00_async_gpnid_sp_done); 3200 3201 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); 3202 list_for_each_entry(tsp, &vha->gpnid_list, elem) { 3203 if (tsp->u.iocb_cmd.u.ctarg.id.b24 == id->b24) { 3204 tsp->gen1++; 3205 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); 3206 /* ref: INIT */ 3207 kref_put(&sp->cmd_kref, qla2x00_sp_release); 3208 goto done; 3209 } 3210 } 3211 list_add_tail(&sp->elem, &vha->gpnid_list); 3212 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); 3213 3214 sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev, 3215 sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma, 3216 GFP_KERNEL); 3217 sp->u.iocb_cmd.u.ctarg.req_allocated_size = sizeof(struct ct_sns_pkt); 3218 if (!sp->u.iocb_cmd.u.ctarg.req) { 3219 ql_log(ql_log_warn, vha, 0xd041, 3220 "Failed to allocate ct_sns request.\n"); 3221 goto done_free_sp; 3222 } 3223 3224 sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev, 3225 sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.rsp_dma, 3226 GFP_KERNEL); 3227 sp->u.iocb_cmd.u.ctarg.rsp_allocated_size = sizeof(struct ct_sns_pkt); 3228 if (!sp->u.iocb_cmd.u.ctarg.rsp) { 3229 ql_log(ql_log_warn, vha, 0xd042, 3230 "Failed to allocate ct_sns request.\n"); 3231 goto done_free_sp; 3232 } 3233 3234 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.rsp; 3235 memset(ct_sns, 0, sizeof(*ct_sns)); 3236 3237 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.req; 3238 /* CT_IU preamble */ 3239 ct_req = qla2x00_prep_ct_req(ct_sns, GPN_ID_CMD, GPN_ID_RSP_SIZE); 3240 3241 /* GPN_ID req */ 3242 ct_req->req.port_id.port_id = port_id_to_be_id(*id); 3243 3244 sp->u.iocb_cmd.u.ctarg.req_size = GPN_ID_REQ_SIZE; 3245 sp->u.iocb_cmd.u.ctarg.rsp_size = GPN_ID_RSP_SIZE; 3246 sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS; 3247 3248 ql_dbg(ql_dbg_disc, vha, 0x2067, 3249 "Async-%s hdl=%x ID %3phC.\n", sp->name, 3250 sp->handle, &ct_req->req.port_id.port_id); 3251 3252 rval = qla2x00_start_sp(sp); 3253 if (rval != QLA_SUCCESS) 3254 goto done_free_sp; 3255 3256 return rval; 3257 3258done_free_sp: 3259 spin_lock_irqsave(&vha->hw->vport_slock, flags); 3260 list_del(&sp->elem); 3261 spin_unlock_irqrestore(&vha->hw->vport_slock, flags); 3262 3263 if (sp->u.iocb_cmd.u.ctarg.req) { 3264 dma_free_coherent(&vha->hw->pdev->dev, 3265 sizeof(struct ct_sns_pkt), 3266 sp->u.iocb_cmd.u.ctarg.req, 3267 sp->u.iocb_cmd.u.ctarg.req_dma); 3268 sp->u.iocb_cmd.u.ctarg.req = NULL; 3269 } 3270 if (sp->u.iocb_cmd.u.ctarg.rsp) { 3271 dma_free_coherent(&vha->hw->pdev->dev, 3272 sizeof(struct ct_sns_pkt), 3273 sp->u.iocb_cmd.u.ctarg.rsp, 3274 sp->u.iocb_cmd.u.ctarg.rsp_dma); 3275 sp->u.iocb_cmd.u.ctarg.rsp = NULL; 3276 } 3277 /* ref: INIT */ 3278 kref_put(&sp->cmd_kref, qla2x00_sp_release); 3279done: 3280 return rval; 3281} 3282 3283void qla24xx_handle_gffid_event(scsi_qla_host_t *vha, struct event_arg *ea) 3284{ 3285 fc_port_t *fcport = ea->fcport; 3286 3287 qla24xx_post_gnl_work(vha, fcport); 3288} 3289 3290void qla24xx_async_gffid_sp_done(srb_t *sp, int res) 3291{ 3292 struct scsi_qla_host *vha = sp->vha; 3293 fc_port_t *fcport = sp->fcport; 3294 struct ct_sns_rsp *ct_rsp; 3295 struct event_arg ea; 3296 uint8_t fc4_scsi_feat; 3297 uint8_t fc4_nvme_feat; 3298 3299 ql_dbg(ql_dbg_disc, vha, 0x2133, 3300 "Async done-%s res %x ID %x. %8phC\n", 3301 sp->name, res, fcport->d_id.b24, fcport->port_name); 3302 3303 fcport->flags &= ~FCF_ASYNC_SENT; 3304 ct_rsp = &fcport->ct_desc.ct_sns->p.rsp; 3305 fc4_scsi_feat = ct_rsp->rsp.gff_id.fc4_features[GFF_FCP_SCSI_OFFSET]; 3306 fc4_nvme_feat = ct_rsp->rsp.gff_id.fc4_features[GFF_NVME_OFFSET]; 3307 3308 /* 3309 * FC-GS-7, 5.2.3.12 FC-4 Features - format 3310 * The format of the FC-4 Features object, as defined by the FC-4, 3311 * Shall be an array of 4-bit values, one for each type code value 3312 */ 3313 if (!res) { 3314 if (fc4_scsi_feat & 0xf) { 3315 /* w1 b00:03 */ 3316 fcport->fc4_type = FS_FC4TYPE_FCP; 3317 fcport->fc4_features = fc4_scsi_feat & 0xf; 3318 } 3319 3320 if (fc4_nvme_feat & 0xf) { 3321 /* w5 [00:03]/28h */ 3322 fcport->fc4_type |= FS_FC4TYPE_NVME; 3323 fcport->fc4_features = fc4_nvme_feat & 0xf; 3324 } 3325 } 3326 3327 memset(&ea, 0, sizeof(ea)); 3328 ea.sp = sp; 3329 ea.fcport = sp->fcport; 3330 ea.rc = res; 3331 3332 qla24xx_handle_gffid_event(vha, &ea); 3333 /* ref: INIT */ 3334 kref_put(&sp->cmd_kref, qla2x00_sp_release); 3335} 3336 3337/* Get FC4 Feature with Nport ID. */ 3338int qla24xx_async_gffid(scsi_qla_host_t *vha, fc_port_t *fcport) 3339{ 3340 int rval = QLA_FUNCTION_FAILED; 3341 struct ct_sns_req *ct_req; 3342 srb_t *sp; 3343 3344 if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT)) 3345 return rval; 3346 3347 /* ref: INIT */ 3348 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); 3349 if (!sp) 3350 return rval; 3351 3352 fcport->flags |= FCF_ASYNC_SENT; 3353 sp->type = SRB_CT_PTHRU_CMD; 3354 sp->name = "gffid"; 3355 sp->gen1 = fcport->rscn_gen; 3356 sp->gen2 = fcport->login_gen; 3357 qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2, 3358 qla24xx_async_gffid_sp_done); 3359 3360 /* CT_IU preamble */ 3361 ct_req = qla2x00_prep_ct_req(fcport->ct_desc.ct_sns, GFF_ID_CMD, 3362 GFF_ID_RSP_SIZE); 3363 3364 ct_req->req.gff_id.port_id[0] = fcport->d_id.b.domain; 3365 ct_req->req.gff_id.port_id[1] = fcport->d_id.b.area; 3366 ct_req->req.gff_id.port_id[2] = fcport->d_id.b.al_pa; 3367 3368 sp->u.iocb_cmd.u.ctarg.req = fcport->ct_desc.ct_sns; 3369 sp->u.iocb_cmd.u.ctarg.req_dma = fcport->ct_desc.ct_sns_dma; 3370 sp->u.iocb_cmd.u.ctarg.rsp = fcport->ct_desc.ct_sns; 3371 sp->u.iocb_cmd.u.ctarg.rsp_dma = fcport->ct_desc.ct_sns_dma; 3372 sp->u.iocb_cmd.u.ctarg.req_size = GFF_ID_REQ_SIZE; 3373 sp->u.iocb_cmd.u.ctarg.rsp_size = GFF_ID_RSP_SIZE; 3374 sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS; 3375 3376 ql_dbg(ql_dbg_disc, vha, 0x2132, 3377 "Async-%s hdl=%x %8phC.\n", sp->name, 3378 sp->handle, fcport->port_name); 3379 3380 rval = qla2x00_start_sp(sp); 3381 if (rval != QLA_SUCCESS) 3382 goto done_free_sp; 3383 3384 return rval; 3385done_free_sp: 3386 /* ref: INIT */ 3387 kref_put(&sp->cmd_kref, qla2x00_sp_release); 3388 fcport->flags &= ~FCF_ASYNC_SENT; 3389 return rval; 3390} 3391 3392/* GPN_FT + GNN_FT*/ 3393static int qla2x00_is_a_vp(scsi_qla_host_t *vha, u64 wwn) 3394{ 3395 struct qla_hw_data *ha = vha->hw; 3396 scsi_qla_host_t *vp; 3397 unsigned long flags; 3398 u64 twwn; 3399 int rc = 0; 3400 3401 if (!ha->num_vhosts) 3402 return 0; 3403 3404 spin_lock_irqsave(&ha->vport_slock, flags); 3405 list_for_each_entry(vp, &ha->vp_list, list) { 3406 twwn = wwn_to_u64(vp->port_name); 3407 if (wwn == twwn) { 3408 rc = 1; 3409 break; 3410 } 3411 } 3412 spin_unlock_irqrestore(&ha->vport_slock, flags); 3413 3414 return rc; 3415} 3416 3417void qla24xx_async_gnnft_done(scsi_qla_host_t *vha, srb_t *sp) 3418{ 3419 fc_port_t *fcport; 3420 u32 i, rc; 3421 bool found; 3422 struct fab_scan_rp *rp, *trp; 3423 unsigned long flags; 3424 u8 recheck = 0; 3425 u16 dup = 0, dup_cnt = 0; 3426 3427 ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0xffff, 3428 "%s enter\n", __func__); 3429 3430 if (sp->gen1 != vha->hw->base_qpair->chip_reset) { 3431 ql_dbg(ql_dbg_disc, vha, 0xffff, 3432 "%s scan stop due to chip reset %x/%x\n", 3433 sp->name, sp->gen1, vha->hw->base_qpair->chip_reset); 3434 goto out; 3435 } 3436 3437 rc = sp->rc; 3438 if (rc) { 3439 vha->scan.scan_retry++; 3440 if (vha->scan.scan_retry < MAX_SCAN_RETRIES) { 3441 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); 3442 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 3443 goto out; 3444 } else { 3445 ql_dbg(ql_dbg_disc, vha, 0xffff, 3446 "%s: Fabric scan failed for %d retries.\n", 3447 __func__, vha->scan.scan_retry); 3448 /* 3449 * Unable to scan any rports. logout loop below 3450 * will unregister all sessions. 3451 */ 3452 list_for_each_entry(fcport, &vha->vp_fcports, list) { 3453 if ((fcport->flags & FCF_FABRIC_DEVICE) != 0) { 3454 fcport->scan_state = QLA_FCPORT_SCAN; 3455 if (fcport->loop_id == FC_NO_LOOP_ID) 3456 fcport->logout_on_delete = 0; 3457 else 3458 fcport->logout_on_delete = 1; 3459 } 3460 } 3461 goto login_logout; 3462 } 3463 } 3464 vha->scan.scan_retry = 0; 3465 3466 list_for_each_entry(fcport, &vha->vp_fcports, list) 3467 fcport->scan_state = QLA_FCPORT_SCAN; 3468 3469 for (i = 0; i < vha->hw->max_fibre_devices; i++) { 3470 u64 wwn; 3471 int k; 3472 3473 rp = &vha->scan.l[i]; 3474 found = false; 3475 3476 wwn = wwn_to_u64(rp->port_name); 3477 if (wwn == 0) 3478 continue; 3479 3480 /* Remove duplicate NPORT ID entries from switch data base */ 3481 for (k = i + 1; k < vha->hw->max_fibre_devices; k++) { 3482 trp = &vha->scan.l[k]; 3483 if (rp->id.b24 == trp->id.b24) { 3484 dup = 1; 3485 dup_cnt++; 3486 ql_dbg(ql_dbg_disc + ql_dbg_verbose, 3487 vha, 0xffff, 3488 "Detected duplicate NPORT ID from switch data base: ID %06x WWN %8phN WWN %8phN\n", 3489 rp->id.b24, rp->port_name, trp->port_name); 3490 memset(trp, 0, sizeof(*trp)); 3491 } 3492 } 3493 3494 if (!memcmp(rp->port_name, vha->port_name, WWN_SIZE)) 3495 continue; 3496 3497 /* Bypass reserved domain fields. */ 3498 if ((rp->id.b.domain & 0xf0) == 0xf0) 3499 continue; 3500 3501 /* Bypass virtual ports of the same host. */ 3502 if (qla2x00_is_a_vp(vha, wwn)) 3503 continue; 3504 3505 list_for_each_entry(fcport, &vha->vp_fcports, list) { 3506 if (memcmp(rp->port_name, fcport->port_name, WWN_SIZE)) 3507 continue; 3508 fcport->scan_state = QLA_FCPORT_FOUND; 3509 fcport->last_rscn_gen = fcport->rscn_gen; 3510 fcport->fc4_type = rp->fc4type; 3511 found = true; 3512 3513 if (fcport->scan_needed) { 3514 if (NVME_PRIORITY(vha->hw, fcport)) 3515 fcport->do_prli_nvme = 1; 3516 else 3517 fcport->do_prli_nvme = 0; 3518 } 3519 3520 /* 3521 * If device was not a fabric device before. 3522 */ 3523 if ((fcport->flags & FCF_FABRIC_DEVICE) == 0) { 3524 qla2x00_clear_loop_id(fcport); 3525 fcport->flags |= FCF_FABRIC_DEVICE; 3526 } else if (fcport->d_id.b24 != rp->id.b24 || 3527 (fcport->scan_needed && 3528 fcport->port_type != FCT_INITIATOR && 3529 fcport->port_type != FCT_NVME_INITIATOR)) { 3530 qlt_schedule_sess_for_deletion(fcport); 3531 } 3532 fcport->d_id.b24 = rp->id.b24; 3533 fcport->scan_needed = 0; 3534 break; 3535 } 3536 3537 if (!found) { 3538 ql_dbg(ql_dbg_disc, vha, 0xffff, 3539 "%s %d %8phC post new sess\n", 3540 __func__, __LINE__, rp->port_name); 3541 qla24xx_post_newsess_work(vha, &rp->id, rp->port_name, 3542 rp->node_name, NULL, rp->fc4type); 3543 } 3544 } 3545 3546 if (dup) { 3547 ql_log(ql_log_warn, vha, 0xffff, 3548 "Detected %d duplicate NPORT ID(s) from switch data base\n", 3549 dup_cnt); 3550 } 3551 3552login_logout: 3553 /* 3554 * Logout all previous fabric dev marked lost, except FCP2 devices. 3555 */ 3556 list_for_each_entry(fcport, &vha->vp_fcports, list) { 3557 if ((fcport->flags & FCF_FABRIC_DEVICE) == 0) { 3558 fcport->scan_needed = 0; 3559 continue; 3560 } 3561 3562 if (fcport->scan_state != QLA_FCPORT_FOUND) { 3563 bool do_delete = false; 3564 3565 if (fcport->scan_needed && 3566 fcport->disc_state == DSC_LOGIN_PEND) { 3567 /* Cable got disconnected after we sent 3568 * a login. Do delete to prevent timeout. 3569 */ 3570 fcport->logout_on_delete = 1; 3571 do_delete = true; 3572 } 3573 3574 fcport->scan_needed = 0; 3575 if (((qla_dual_mode_enabled(vha) || 3576 qla_ini_mode_enabled(vha)) && 3577 atomic_read(&fcport->state) == FCS_ONLINE) || 3578 do_delete) { 3579 if (fcport->loop_id != FC_NO_LOOP_ID) { 3580 if (fcport->flags & FCF_FCP2_DEVICE) 3581 fcport->logout_on_delete = 0; 3582 3583 ql_log(ql_log_warn, vha, 0x20f0, 3584 "%s %d %8phC post del sess\n", 3585 __func__, __LINE__, 3586 fcport->port_name); 3587 3588 fcport->tgt_link_down_time = 0; 3589 qlt_schedule_sess_for_deletion(fcport); 3590 continue; 3591 } 3592 } 3593 } else { 3594 if (fcport->scan_needed || 3595 fcport->disc_state != DSC_LOGIN_COMPLETE) { 3596 if (fcport->login_retry == 0) { 3597 fcport->login_retry = 3598 vha->hw->login_retry_count; 3599 ql_dbg(ql_dbg_disc, vha, 0x20a3, 3600 "Port login retry %8phN, lid 0x%04x retry cnt=%d.\n", 3601 fcport->port_name, fcport->loop_id, 3602 fcport->login_retry); 3603 } 3604 fcport->scan_needed = 0; 3605 qla24xx_fcport_handle_login(vha, fcport); 3606 } 3607 } 3608 } 3609 3610 recheck = 1; 3611out: 3612 qla24xx_sp_unmap(vha, sp); 3613 spin_lock_irqsave(&vha->work_lock, flags); 3614 vha->scan.scan_flags &= ~SF_SCANNING; 3615 spin_unlock_irqrestore(&vha->work_lock, flags); 3616 3617 if (recheck) { 3618 list_for_each_entry(fcport, &vha->vp_fcports, list) { 3619 if (fcport->scan_needed) { 3620 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); 3621 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 3622 break; 3623 } 3624 } 3625 } 3626} 3627 3628static int qla2x00_post_gnnft_gpnft_done_work(struct scsi_qla_host *vha, 3629 srb_t *sp, int cmd) 3630{ 3631 struct qla_work_evt *e; 3632 3633 if (cmd != QLA_EVT_GPNFT_DONE && cmd != QLA_EVT_GNNFT_DONE) 3634 return QLA_PARAMETER_ERROR; 3635 3636 e = qla2x00_alloc_work(vha, cmd); 3637 if (!e) 3638 return QLA_FUNCTION_FAILED; 3639 3640 e->u.iosb.sp = sp; 3641 3642 return qla2x00_post_work(vha, e); 3643} 3644 3645static int qla2x00_post_nvme_gpnft_work(struct scsi_qla_host *vha, 3646 srb_t *sp, int cmd) 3647{ 3648 struct qla_work_evt *e; 3649 3650 if (cmd != QLA_EVT_GPNFT) 3651 return QLA_PARAMETER_ERROR; 3652 3653 e = qla2x00_alloc_work(vha, cmd); 3654 if (!e) 3655 return QLA_FUNCTION_FAILED; 3656 3657 e->u.gpnft.fc4_type = FC4_TYPE_NVME; 3658 e->u.gpnft.sp = sp; 3659 3660 return qla2x00_post_work(vha, e); 3661} 3662 3663static void qla2x00_find_free_fcp_nvme_slot(struct scsi_qla_host *vha, 3664 struct srb *sp) 3665{ 3666 struct qla_hw_data *ha = vha->hw; 3667 int num_fibre_dev = ha->max_fibre_devices; 3668 struct ct_sns_req *ct_req = 3669 (struct ct_sns_req *)sp->u.iocb_cmd.u.ctarg.req; 3670 struct ct_sns_gpnft_rsp *ct_rsp = 3671 (struct ct_sns_gpnft_rsp *)sp->u.iocb_cmd.u.ctarg.rsp; 3672 struct ct_sns_gpn_ft_data *d; 3673 struct fab_scan_rp *rp; 3674 u16 cmd = be16_to_cpu(ct_req->command); 3675 u8 fc4_type = sp->gen2; 3676 int i, j, k; 3677 port_id_t id; 3678 u8 found; 3679 u64 wwn; 3680 3681 j = 0; 3682 for (i = 0; i < num_fibre_dev; i++) { 3683 d = &ct_rsp->entries[i]; 3684 3685 id.b.rsvd_1 = 0; 3686 id.b.domain = d->port_id[0]; 3687 id.b.area = d->port_id[1]; 3688 id.b.al_pa = d->port_id[2]; 3689 wwn = wwn_to_u64(d->port_name); 3690 3691 if (id.b24 == 0 || wwn == 0) 3692 continue; 3693 3694 if (fc4_type == FC4_TYPE_FCP_SCSI) { 3695 if (cmd == GPN_FT_CMD) { 3696 rp = &vha->scan.l[j]; 3697 rp->id = id; 3698 memcpy(rp->port_name, d->port_name, 8); 3699 j++; 3700 rp->fc4type = FS_FC4TYPE_FCP; 3701 } else { 3702 for (k = 0; k < num_fibre_dev; k++) { 3703 rp = &vha->scan.l[k]; 3704 if (id.b24 == rp->id.b24) { 3705 memcpy(rp->node_name, 3706 d->port_name, 8); 3707 break; 3708 } 3709 } 3710 } 3711 } else { 3712 /* Search if the fibre device supports FC4_TYPE_NVME */ 3713 if (cmd == GPN_FT_CMD) { 3714 found = 0; 3715 3716 for (k = 0; k < num_fibre_dev; k++) { 3717 rp = &vha->scan.l[k]; 3718 if (!memcmp(rp->port_name, 3719 d->port_name, 8)) { 3720 /* 3721 * Supports FC-NVMe & FCP 3722 */ 3723 rp->fc4type |= FS_FC4TYPE_NVME; 3724 found = 1; 3725 break; 3726 } 3727 } 3728 3729 /* We found new FC-NVMe only port */ 3730 if (!found) { 3731 for (k = 0; k < num_fibre_dev; k++) { 3732 rp = &vha->scan.l[k]; 3733 if (wwn_to_u64(rp->port_name)) { 3734 continue; 3735 } else { 3736 rp->id = id; 3737 memcpy(rp->port_name, 3738 d->port_name, 8); 3739 rp->fc4type = 3740 FS_FC4TYPE_NVME; 3741 break; 3742 } 3743 } 3744 } 3745 } else { 3746 for (k = 0; k < num_fibre_dev; k++) { 3747 rp = &vha->scan.l[k]; 3748 if (id.b24 == rp->id.b24) { 3749 memcpy(rp->node_name, 3750 d->port_name, 8); 3751 break; 3752 } 3753 } 3754 } 3755 } 3756 } 3757} 3758 3759static void qla2x00_async_gpnft_gnnft_sp_done(srb_t *sp, int res) 3760{ 3761 struct scsi_qla_host *vha = sp->vha; 3762 struct ct_sns_req *ct_req = 3763 (struct ct_sns_req *)sp->u.iocb_cmd.u.ctarg.req; 3764 u16 cmd = be16_to_cpu(ct_req->command); 3765 u8 fc4_type = sp->gen2; 3766 unsigned long flags; 3767 int rc; 3768 3769 /* gen2 field is holding the fc4type */ 3770 ql_dbg(ql_dbg_disc, vha, 0xffff, 3771 "Async done-%s res %x FC4Type %x\n", 3772 sp->name, res, sp->gen2); 3773 3774 sp->rc = res; 3775 if (res) { 3776 unsigned long flags; 3777 const char *name = sp->name; 3778 3779 if (res == QLA_OS_TIMER_EXPIRED) { 3780 /* switch is ignoring all commands. 3781 * This might be a zone disable behavior. 3782 * This means we hit 64s timeout. 3783 * 22s GPNFT + 44s Abort = 64s 3784 */ 3785 ql_dbg(ql_dbg_disc, vha, 0xffff, 3786 "%s: Switch Zone check please .\n", 3787 name); 3788 qla2x00_mark_all_devices_lost(vha); 3789 } 3790 3791 /* 3792 * We are in an Interrupt context, queue up this 3793 * sp for GNNFT_DONE work. This will allow all 3794 * the resource to get freed up. 3795 */ 3796 rc = qla2x00_post_gnnft_gpnft_done_work(vha, sp, 3797 QLA_EVT_GNNFT_DONE); 3798 if (rc) { 3799 /* Cleanup here to prevent memory leak */ 3800 qla24xx_sp_unmap(vha, sp); 3801 3802 spin_lock_irqsave(&vha->work_lock, flags); 3803 vha->scan.scan_flags &= ~SF_SCANNING; 3804 vha->scan.scan_retry++; 3805 spin_unlock_irqrestore(&vha->work_lock, flags); 3806 3807 if (vha->scan.scan_retry < MAX_SCAN_RETRIES) { 3808 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); 3809 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 3810 qla2xxx_wake_dpc(vha); 3811 } else { 3812 ql_dbg(ql_dbg_disc, vha, 0xffff, 3813 "Async done-%s rescan failed on all retries.\n", 3814 name); 3815 } 3816 } 3817 return; 3818 } 3819 3820 qla2x00_find_free_fcp_nvme_slot(vha, sp); 3821 3822 if ((fc4_type == FC4_TYPE_FCP_SCSI) && vha->flags.nvme_enabled && 3823 cmd == GNN_FT_CMD) { 3824 spin_lock_irqsave(&vha->work_lock, flags); 3825 vha->scan.scan_flags &= ~SF_SCANNING; 3826 spin_unlock_irqrestore(&vha->work_lock, flags); 3827 3828 sp->rc = res; 3829 rc = qla2x00_post_nvme_gpnft_work(vha, sp, QLA_EVT_GPNFT); 3830 if (rc) { 3831 qla24xx_sp_unmap(vha, sp); 3832 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); 3833 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 3834 } 3835 return; 3836 } 3837 3838 if (cmd == GPN_FT_CMD) { 3839 rc = qla2x00_post_gnnft_gpnft_done_work(vha, sp, 3840 QLA_EVT_GPNFT_DONE); 3841 } else { 3842 rc = qla2x00_post_gnnft_gpnft_done_work(vha, sp, 3843 QLA_EVT_GNNFT_DONE); 3844 } 3845 3846 if (rc) { 3847 qla24xx_sp_unmap(vha, sp); 3848 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); 3849 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 3850 return; 3851 } 3852} 3853 3854/* 3855 * Get WWNN list for fc4_type 3856 * 3857 * It is assumed the same SRB is re-used from GPNFT to avoid 3858 * mem free & re-alloc 3859 */ 3860static int qla24xx_async_gnnft(scsi_qla_host_t *vha, struct srb *sp, 3861 u8 fc4_type) 3862{ 3863 int rval = QLA_FUNCTION_FAILED; 3864 struct ct_sns_req *ct_req; 3865 struct ct_sns_pkt *ct_sns; 3866 unsigned long flags; 3867 3868 if (!vha->flags.online) { 3869 spin_lock_irqsave(&vha->work_lock, flags); 3870 vha->scan.scan_flags &= ~SF_SCANNING; 3871 spin_unlock_irqrestore(&vha->work_lock, flags); 3872 goto done_free_sp; 3873 } 3874 3875 if (!sp->u.iocb_cmd.u.ctarg.req || !sp->u.iocb_cmd.u.ctarg.rsp) { 3876 ql_log(ql_log_warn, vha, 0xffff, 3877 "%s: req %p rsp %p are not setup\n", 3878 __func__, sp->u.iocb_cmd.u.ctarg.req, 3879 sp->u.iocb_cmd.u.ctarg.rsp); 3880 spin_lock_irqsave(&vha->work_lock, flags); 3881 vha->scan.scan_flags &= ~SF_SCANNING; 3882 spin_unlock_irqrestore(&vha->work_lock, flags); 3883 WARN_ON(1); 3884 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); 3885 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 3886 goto done_free_sp; 3887 } 3888 3889 ql_dbg(ql_dbg_disc, vha, 0xfffff, 3890 "%s: FC4Type %x, CT-PASSTHRU %s command ctarg rsp size %d, ctarg req size %d\n", 3891 __func__, fc4_type, sp->name, sp->u.iocb_cmd.u.ctarg.rsp_size, 3892 sp->u.iocb_cmd.u.ctarg.req_size); 3893 3894 sp->type = SRB_CT_PTHRU_CMD; 3895 sp->name = "gnnft"; 3896 sp->gen1 = vha->hw->base_qpair->chip_reset; 3897 sp->gen2 = fc4_type; 3898 qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2, 3899 qla2x00_async_gpnft_gnnft_sp_done); 3900 3901 memset(sp->u.iocb_cmd.u.ctarg.rsp, 0, sp->u.iocb_cmd.u.ctarg.rsp_size); 3902 memset(sp->u.iocb_cmd.u.ctarg.req, 0, sp->u.iocb_cmd.u.ctarg.req_size); 3903 3904 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.req; 3905 /* CT_IU preamble */ 3906 ct_req = qla2x00_prep_ct_req(ct_sns, GNN_FT_CMD, 3907 sp->u.iocb_cmd.u.ctarg.rsp_size); 3908 3909 /* GPN_FT req */ 3910 ct_req->req.gpn_ft.port_type = fc4_type; 3911 3912 sp->u.iocb_cmd.u.ctarg.req_size = GNN_FT_REQ_SIZE; 3913 sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS; 3914 3915 ql_dbg(ql_dbg_disc, vha, 0xffff, 3916 "Async-%s hdl=%x FC4Type %x.\n", sp->name, 3917 sp->handle, ct_req->req.gpn_ft.port_type); 3918 3919 rval = qla2x00_start_sp(sp); 3920 if (rval != QLA_SUCCESS) { 3921 goto done_free_sp; 3922 } 3923 3924 return rval; 3925 3926done_free_sp: 3927 if (sp->u.iocb_cmd.u.ctarg.req) { 3928 dma_free_coherent(&vha->hw->pdev->dev, 3929 sp->u.iocb_cmd.u.ctarg.req_allocated_size, 3930 sp->u.iocb_cmd.u.ctarg.req, 3931 sp->u.iocb_cmd.u.ctarg.req_dma); 3932 sp->u.iocb_cmd.u.ctarg.req = NULL; 3933 } 3934 if (sp->u.iocb_cmd.u.ctarg.rsp) { 3935 dma_free_coherent(&vha->hw->pdev->dev, 3936 sp->u.iocb_cmd.u.ctarg.rsp_allocated_size, 3937 sp->u.iocb_cmd.u.ctarg.rsp, 3938 sp->u.iocb_cmd.u.ctarg.rsp_dma); 3939 sp->u.iocb_cmd.u.ctarg.rsp = NULL; 3940 } 3941 /* ref: INIT */ 3942 kref_put(&sp->cmd_kref, qla2x00_sp_release); 3943 3944 spin_lock_irqsave(&vha->work_lock, flags); 3945 vha->scan.scan_flags &= ~SF_SCANNING; 3946 if (vha->scan.scan_flags == 0) { 3947 ql_dbg(ql_dbg_disc, vha, 0xffff, 3948 "%s: schedule\n", __func__); 3949 vha->scan.scan_flags |= SF_QUEUED; 3950 schedule_delayed_work(&vha->scan.scan_work, 5); 3951 } 3952 spin_unlock_irqrestore(&vha->work_lock, flags); 3953 3954 3955 return rval; 3956} /* GNNFT */ 3957 3958void qla24xx_async_gpnft_done(scsi_qla_host_t *vha, srb_t *sp) 3959{ 3960 ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0xffff, 3961 "%s enter\n", __func__); 3962 qla24xx_async_gnnft(vha, sp, sp->gen2); 3963} 3964 3965/* Get WWPN list for certain fc4_type */ 3966int qla24xx_async_gpnft(scsi_qla_host_t *vha, u8 fc4_type, srb_t *sp) 3967{ 3968 int rval = QLA_FUNCTION_FAILED; 3969 struct ct_sns_req *ct_req; 3970 struct ct_sns_pkt *ct_sns; 3971 u32 rspsz; 3972 unsigned long flags; 3973 3974 ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0xffff, 3975 "%s enter\n", __func__); 3976 3977 if (!vha->flags.online) 3978 return rval; 3979 3980 spin_lock_irqsave(&vha->work_lock, flags); 3981 if (vha->scan.scan_flags & SF_SCANNING) { 3982 spin_unlock_irqrestore(&vha->work_lock, flags); 3983 ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0xffff, 3984 "%s: scan active\n", __func__); 3985 return rval; 3986 } 3987 vha->scan.scan_flags |= SF_SCANNING; 3988 spin_unlock_irqrestore(&vha->work_lock, flags); 3989 3990 if (fc4_type == FC4_TYPE_FCP_SCSI) { 3991 ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0xffff, 3992 "%s: Performing FCP Scan\n", __func__); 3993 3994 if (sp) { 3995 /* ref: INIT */ 3996 kref_put(&sp->cmd_kref, qla2x00_sp_release); 3997 } 3998 3999 /* ref: INIT */ 4000 sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL); 4001 if (!sp) { 4002 spin_lock_irqsave(&vha->work_lock, flags); 4003 vha->scan.scan_flags &= ~SF_SCANNING; 4004 spin_unlock_irqrestore(&vha->work_lock, flags); 4005 return rval; 4006 } 4007 4008 sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev, 4009 sizeof(struct ct_sns_pkt), 4010 &sp->u.iocb_cmd.u.ctarg.req_dma, 4011 GFP_KERNEL); 4012 sp->u.iocb_cmd.u.ctarg.req_allocated_size = sizeof(struct ct_sns_pkt); 4013 if (!sp->u.iocb_cmd.u.ctarg.req) { 4014 ql_log(ql_log_warn, vha, 0xffff, 4015 "Failed to allocate ct_sns request.\n"); 4016 spin_lock_irqsave(&vha->work_lock, flags); 4017 vha->scan.scan_flags &= ~SF_SCANNING; 4018 spin_unlock_irqrestore(&vha->work_lock, flags); 4019 qla2x00_rel_sp(sp); 4020 return rval; 4021 } 4022 sp->u.iocb_cmd.u.ctarg.req_size = GPN_FT_REQ_SIZE; 4023 4024 rspsz = sizeof(struct ct_sns_gpnft_rsp) + 4025 ((vha->hw->max_fibre_devices - 1) * 4026 sizeof(struct ct_sns_gpn_ft_data)); 4027 4028 sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev, 4029 rspsz, 4030 &sp->u.iocb_cmd.u.ctarg.rsp_dma, 4031 GFP_KERNEL); 4032 sp->u.iocb_cmd.u.ctarg.rsp_allocated_size = rspsz; 4033 if (!sp->u.iocb_cmd.u.ctarg.rsp) { 4034 ql_log(ql_log_warn, vha, 0xffff, 4035 "Failed to allocate ct_sns request.\n"); 4036 spin_lock_irqsave(&vha->work_lock, flags); 4037 vha->scan.scan_flags &= ~SF_SCANNING; 4038 spin_unlock_irqrestore(&vha->work_lock, flags); 4039 dma_free_coherent(&vha->hw->pdev->dev, 4040 sp->u.iocb_cmd.u.ctarg.req_allocated_size, 4041 sp->u.iocb_cmd.u.ctarg.req, 4042 sp->u.iocb_cmd.u.ctarg.req_dma); 4043 sp->u.iocb_cmd.u.ctarg.req = NULL; 4044 /* ref: INIT */ 4045 qla2x00_rel_sp(sp); 4046 return rval; 4047 } 4048 sp->u.iocb_cmd.u.ctarg.rsp_size = rspsz; 4049 4050 ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0xffff, 4051 "%s scan list size %d\n", __func__, vha->scan.size); 4052 4053 memset(vha->scan.l, 0, vha->scan.size); 4054 } else if (!sp) { 4055 ql_dbg(ql_dbg_disc, vha, 0xffff, 4056 "NVME scan did not provide SP\n"); 4057 return rval; 4058 } 4059 4060 sp->type = SRB_CT_PTHRU_CMD; 4061 sp->name = "gpnft"; 4062 sp->gen1 = vha->hw->base_qpair->chip_reset; 4063 sp->gen2 = fc4_type; 4064 qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2, 4065 qla2x00_async_gpnft_gnnft_sp_done); 4066 4067 rspsz = sp->u.iocb_cmd.u.ctarg.rsp_size; 4068 memset(sp->u.iocb_cmd.u.ctarg.rsp, 0, sp->u.iocb_cmd.u.ctarg.rsp_size); 4069 memset(sp->u.iocb_cmd.u.ctarg.req, 0, sp->u.iocb_cmd.u.ctarg.req_size); 4070 4071 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.req; 4072 /* CT_IU preamble */ 4073 ct_req = qla2x00_prep_ct_req(ct_sns, GPN_FT_CMD, rspsz); 4074 4075 /* GPN_FT req */ 4076 ct_req->req.gpn_ft.port_type = fc4_type; 4077 4078 sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS; 4079 4080 ql_dbg(ql_dbg_disc, vha, 0xffff, 4081 "Async-%s hdl=%x FC4Type %x.\n", sp->name, 4082 sp->handle, ct_req->req.gpn_ft.port_type); 4083 4084 rval = qla2x00_start_sp(sp); 4085 if (rval != QLA_SUCCESS) { 4086 goto done_free_sp; 4087 } 4088 4089 return rval; 4090 4091done_free_sp: 4092 if (sp->u.iocb_cmd.u.ctarg.req) { 4093 dma_free_coherent(&vha->hw->pdev->dev, 4094 sp->u.iocb_cmd.u.ctarg.req_allocated_size, 4095 sp->u.iocb_cmd.u.ctarg.req, 4096 sp->u.iocb_cmd.u.ctarg.req_dma); 4097 sp->u.iocb_cmd.u.ctarg.req = NULL; 4098 } 4099 if (sp->u.iocb_cmd.u.ctarg.rsp) { 4100 dma_free_coherent(&vha->hw->pdev->dev, 4101 sp->u.iocb_cmd.u.ctarg.rsp_allocated_size, 4102 sp->u.iocb_cmd.u.ctarg.rsp, 4103 sp->u.iocb_cmd.u.ctarg.rsp_dma); 4104 sp->u.iocb_cmd.u.ctarg.rsp = NULL; 4105 } 4106 4107 /* ref: INIT */ 4108 kref_put(&sp->cmd_kref, qla2x00_sp_release); 4109 4110 spin_lock_irqsave(&vha->work_lock, flags); 4111 vha->scan.scan_flags &= ~SF_SCANNING; 4112 if (vha->scan.scan_flags == 0) { 4113 ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0xffff, 4114 "%s: Scan scheduled.\n", __func__); 4115 vha->scan.scan_flags |= SF_QUEUED; 4116 schedule_delayed_work(&vha->scan.scan_work, 5); 4117 } 4118 spin_unlock_irqrestore(&vha->work_lock, flags); 4119 4120 4121 return rval; 4122} 4123 4124void qla_scan_work_fn(struct work_struct *work) 4125{ 4126 struct fab_scan *s = container_of(to_delayed_work(work), 4127 struct fab_scan, scan_work); 4128 struct scsi_qla_host *vha = container_of(s, struct scsi_qla_host, 4129 scan); 4130 unsigned long flags; 4131 4132 ql_dbg(ql_dbg_disc, vha, 0xffff, 4133 "%s: schedule loop resync\n", __func__); 4134 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); 4135 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 4136 qla2xxx_wake_dpc(vha); 4137 spin_lock_irqsave(&vha->work_lock, flags); 4138 vha->scan.scan_flags &= ~SF_QUEUED; 4139 spin_unlock_irqrestore(&vha->work_lock, flags); 4140} 4141 4142/* GNN_ID */ 4143void qla24xx_handle_gnnid_event(scsi_qla_host_t *vha, struct event_arg *ea) 4144{ 4145 qla24xx_post_gnl_work(vha, ea->fcport); 4146} 4147 4148static void qla2x00_async_gnnid_sp_done(srb_t *sp, int res) 4149{ 4150 struct scsi_qla_host *vha = sp->vha; 4151 fc_port_t *fcport = sp->fcport; 4152 u8 *node_name = fcport->ct_desc.ct_sns->p.rsp.rsp.gnn_id.node_name; 4153 struct event_arg ea; 4154 u64 wwnn; 4155 4156 fcport->flags &= ~FCF_ASYNC_SENT; 4157 wwnn = wwn_to_u64(node_name); 4158 if (wwnn) 4159 memcpy(fcport->node_name, node_name, WWN_SIZE); 4160 4161 memset(&ea, 0, sizeof(ea)); 4162 ea.fcport = fcport; 4163 ea.sp = sp; 4164 ea.rc = res; 4165 4166 ql_dbg(ql_dbg_disc, vha, 0x204f, 4167 "Async done-%s res %x, WWPN %8phC %8phC\n", 4168 sp->name, res, fcport->port_name, fcport->node_name); 4169 4170 qla24xx_handle_gnnid_event(vha, &ea); 4171 4172 /* ref: INIT */ 4173 kref_put(&sp->cmd_kref, qla2x00_sp_release); 4174} 4175 4176int qla24xx_async_gnnid(scsi_qla_host_t *vha, fc_port_t *fcport) 4177{ 4178 int rval = QLA_FUNCTION_FAILED; 4179 struct ct_sns_req *ct_req; 4180 srb_t *sp; 4181 4182 if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT)) 4183 return rval; 4184 4185 qla2x00_set_fcport_disc_state(fcport, DSC_GNN_ID); 4186 /* ref: INIT */ 4187 sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC); 4188 if (!sp) 4189 goto done; 4190 4191 fcport->flags |= FCF_ASYNC_SENT; 4192 sp->type = SRB_CT_PTHRU_CMD; 4193 sp->name = "gnnid"; 4194 sp->gen1 = fcport->rscn_gen; 4195 sp->gen2 = fcport->login_gen; 4196 qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2, 4197 qla2x00_async_gnnid_sp_done); 4198 4199 /* CT_IU preamble */ 4200 ct_req = qla2x00_prep_ct_req(fcport->ct_desc.ct_sns, GNN_ID_CMD, 4201 GNN_ID_RSP_SIZE); 4202 4203 /* GNN_ID req */ 4204 ct_req->req.port_id.port_id = port_id_to_be_id(fcport->d_id); 4205 4206 4207 /* req & rsp use the same buffer */ 4208 sp->u.iocb_cmd.u.ctarg.req = fcport->ct_desc.ct_sns; 4209 sp->u.iocb_cmd.u.ctarg.req_dma = fcport->ct_desc.ct_sns_dma; 4210 sp->u.iocb_cmd.u.ctarg.rsp = fcport->ct_desc.ct_sns; 4211 sp->u.iocb_cmd.u.ctarg.rsp_dma = fcport->ct_desc.ct_sns_dma; 4212 sp->u.iocb_cmd.u.ctarg.req_size = GNN_ID_REQ_SIZE; 4213 sp->u.iocb_cmd.u.ctarg.rsp_size = GNN_ID_RSP_SIZE; 4214 sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS; 4215 4216 ql_dbg(ql_dbg_disc, vha, 0xffff, 4217 "Async-%s - %8phC hdl=%x loopid=%x portid %06x.\n", 4218 sp->name, fcport->port_name, 4219 sp->handle, fcport->loop_id, fcport->d_id.b24); 4220 4221 rval = qla2x00_start_sp(sp); 4222 if (rval != QLA_SUCCESS) 4223 goto done_free_sp; 4224 return rval; 4225 4226done_free_sp: 4227 /* ref: INIT */ 4228 kref_put(&sp->cmd_kref, qla2x00_sp_release); 4229 fcport->flags &= ~FCF_ASYNC_SENT; 4230done: 4231 return rval; 4232} 4233 4234int qla24xx_post_gnnid_work(struct scsi_qla_host *vha, fc_port_t *fcport) 4235{ 4236 struct qla_work_evt *e; 4237 int ls; 4238 4239 ls = atomic_read(&vha->loop_state); 4240 if (((ls != LOOP_READY) && (ls != LOOP_UP)) || 4241 test_bit(UNLOADING, &vha->dpc_flags)) 4242 return 0; 4243 4244 e = qla2x00_alloc_work(vha, QLA_EVT_GNNID); 4245 if (!e) 4246 return QLA_FUNCTION_FAILED; 4247 4248 e->u.fcport.fcport = fcport; 4249 return qla2x00_post_work(vha, e); 4250} 4251 4252/* GPFN_ID */ 4253void qla24xx_handle_gfpnid_event(scsi_qla_host_t *vha, struct event_arg *ea) 4254{ 4255 fc_port_t *fcport = ea->fcport; 4256 4257 ql_dbg(ql_dbg_disc, vha, 0xffff, 4258 "%s %8phC DS %d LS %d rc %d login %d|%d rscn %d|%d fcpcnt %d\n", 4259 __func__, fcport->port_name, fcport->disc_state, 4260 fcport->fw_login_state, ea->rc, fcport->login_gen, ea->sp->gen2, 4261 fcport->rscn_gen, ea->sp->gen1, vha->fcport_count); 4262 4263 if (fcport->disc_state == DSC_DELETE_PEND) 4264 return; 4265 4266 if (ea->sp->gen2 != fcport->login_gen) { 4267 /* target side must have changed it. */ 4268 ql_dbg(ql_dbg_disc, vha, 0x20d3, 4269 "%s %8phC generation changed\n", 4270 __func__, fcport->port_name); 4271 return; 4272 } else if (ea->sp->gen1 != fcport->rscn_gen) { 4273 return; 4274 } 4275 4276 qla24xx_post_gpsc_work(vha, fcport); 4277} 4278 4279static void qla2x00_async_gfpnid_sp_done(srb_t *sp, int res) 4280{ 4281 struct scsi_qla_host *vha = sp->vha; 4282 fc_port_t *fcport = sp->fcport; 4283 u8 *fpn = fcport->ct_desc.ct_sns->p.rsp.rsp.gfpn_id.port_name; 4284 struct event_arg ea; 4285 u64 wwn; 4286 4287 wwn = wwn_to_u64(fpn); 4288 if (wwn) 4289 memcpy(fcport->fabric_port_name, fpn, WWN_SIZE); 4290 4291 memset(&ea, 0, sizeof(ea)); 4292 ea.fcport = fcport; 4293 ea.sp = sp; 4294 ea.rc = res; 4295 4296 ql_dbg(ql_dbg_disc, vha, 0x204f, 4297 "Async done-%s res %x, WWPN %8phC %8phC\n", 4298 sp->name, res, fcport->port_name, fcport->fabric_port_name); 4299 4300 qla24xx_handle_gfpnid_event(vha, &ea); 4301 4302 /* ref: INIT */ 4303 kref_put(&sp->cmd_kref, qla2x00_sp_release); 4304} 4305 4306int qla24xx_async_gfpnid(scsi_qla_host_t *vha, fc_port_t *fcport) 4307{ 4308 int rval = QLA_FUNCTION_FAILED; 4309 struct ct_sns_req *ct_req; 4310 srb_t *sp; 4311 4312 if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT)) 4313 return rval; 4314 4315 /* ref: INIT */ 4316 sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC); 4317 if (!sp) 4318 goto done; 4319 4320 sp->type = SRB_CT_PTHRU_CMD; 4321 sp->name = "gfpnid"; 4322 sp->gen1 = fcport->rscn_gen; 4323 sp->gen2 = fcport->login_gen; 4324 qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2, 4325 qla2x00_async_gfpnid_sp_done); 4326 4327 /* CT_IU preamble */ 4328 ct_req = qla2x00_prep_ct_req(fcport->ct_desc.ct_sns, GFPN_ID_CMD, 4329 GFPN_ID_RSP_SIZE); 4330 4331 /* GFPN_ID req */ 4332 ct_req->req.port_id.port_id = port_id_to_be_id(fcport->d_id); 4333 4334 4335 /* req & rsp use the same buffer */ 4336 sp->u.iocb_cmd.u.ctarg.req = fcport->ct_desc.ct_sns; 4337 sp->u.iocb_cmd.u.ctarg.req_dma = fcport->ct_desc.ct_sns_dma; 4338 sp->u.iocb_cmd.u.ctarg.rsp = fcport->ct_desc.ct_sns; 4339 sp->u.iocb_cmd.u.ctarg.rsp_dma = fcport->ct_desc.ct_sns_dma; 4340 sp->u.iocb_cmd.u.ctarg.req_size = GFPN_ID_REQ_SIZE; 4341 sp->u.iocb_cmd.u.ctarg.rsp_size = GFPN_ID_RSP_SIZE; 4342 sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS; 4343 4344 ql_dbg(ql_dbg_disc, vha, 0xffff, 4345 "Async-%s - %8phC hdl=%x loopid=%x portid %06x.\n", 4346 sp->name, fcport->port_name, 4347 sp->handle, fcport->loop_id, fcport->d_id.b24); 4348 4349 rval = qla2x00_start_sp(sp); 4350 if (rval != QLA_SUCCESS) 4351 goto done_free_sp; 4352 4353 return rval; 4354 4355done_free_sp: 4356 /* ref: INIT */ 4357 kref_put(&sp->cmd_kref, qla2x00_sp_release); 4358done: 4359 return rval; 4360} 4361 4362int qla24xx_post_gfpnid_work(struct scsi_qla_host *vha, fc_port_t *fcport) 4363{ 4364 struct qla_work_evt *e; 4365 int ls; 4366 4367 ls = atomic_read(&vha->loop_state); 4368 if (((ls != LOOP_READY) && (ls != LOOP_UP)) || 4369 test_bit(UNLOADING, &vha->dpc_flags)) 4370 return 0; 4371 4372 e = qla2x00_alloc_work(vha, QLA_EVT_GFPNID); 4373 if (!e) 4374 return QLA_FUNCTION_FAILED; 4375 4376 e->u.fcport.fcport = fcport; 4377 return qla2x00_post_work(vha, e); 4378}