cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

qla_iocb.c (111464B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/*
      3 * QLogic Fibre Channel HBA Driver
      4 * Copyright (c)  2003-2014 QLogic Corporation
      5 */
      6#include "qla_def.h"
      7#include "qla_target.h"
      8
      9#include <linux/blkdev.h>
     10#include <linux/delay.h>
     11
     12#include <scsi/scsi_tcq.h>
     13
     14/**
     15 * qla2x00_get_cmd_direction() - Determine control_flag data direction.
     16 * @sp: SCSI command
     17 *
     18 * Returns the proper CF_* direction based on CDB.
     19 */
     20static inline uint16_t
     21qla2x00_get_cmd_direction(srb_t *sp)
     22{
     23	uint16_t cflags;
     24	struct scsi_cmnd *cmd = GET_CMD_SP(sp);
     25	struct scsi_qla_host *vha = sp->vha;
     26
     27	cflags = 0;
     28
     29	/* Set transfer direction */
     30	if (cmd->sc_data_direction == DMA_TO_DEVICE) {
     31		cflags = CF_WRITE;
     32		vha->qla_stats.output_bytes += scsi_bufflen(cmd);
     33		vha->qla_stats.output_requests++;
     34	} else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
     35		cflags = CF_READ;
     36		vha->qla_stats.input_bytes += scsi_bufflen(cmd);
     37		vha->qla_stats.input_requests++;
     38	}
     39	return (cflags);
     40}
     41
     42/**
     43 * qla2x00_calc_iocbs_32() - Determine number of Command Type 2 and
     44 * Continuation Type 0 IOCBs to allocate.
     45 *
     46 * @dsds: number of data segment descriptors needed
     47 *
     48 * Returns the number of IOCB entries needed to store @dsds.
     49 */
     50uint16_t
     51qla2x00_calc_iocbs_32(uint16_t dsds)
     52{
     53	uint16_t iocbs;
     54
     55	iocbs = 1;
     56	if (dsds > 3) {
     57		iocbs += (dsds - 3) / 7;
     58		if ((dsds - 3) % 7)
     59			iocbs++;
     60	}
     61	return (iocbs);
     62}
     63
     64/**
     65 * qla2x00_calc_iocbs_64() - Determine number of Command Type 3 and
     66 * Continuation Type 1 IOCBs to allocate.
     67 *
     68 * @dsds: number of data segment descriptors needed
     69 *
     70 * Returns the number of IOCB entries needed to store @dsds.
     71 */
     72uint16_t
     73qla2x00_calc_iocbs_64(uint16_t dsds)
     74{
     75	uint16_t iocbs;
     76
     77	iocbs = 1;
     78	if (dsds > 2) {
     79		iocbs += (dsds - 2) / 5;
     80		if ((dsds - 2) % 5)
     81			iocbs++;
     82	}
     83	return (iocbs);
     84}
     85
     86/**
     87 * qla2x00_prep_cont_type0_iocb() - Initialize a Continuation Type 0 IOCB.
     88 * @vha: HA context
     89 *
     90 * Returns a pointer to the Continuation Type 0 IOCB packet.
     91 */
     92static inline cont_entry_t *
     93qla2x00_prep_cont_type0_iocb(struct scsi_qla_host *vha)
     94{
     95	cont_entry_t *cont_pkt;
     96	struct req_que *req = vha->req;
     97	/* Adjust ring index. */
     98	req->ring_index++;
     99	if (req->ring_index == req->length) {
    100		req->ring_index = 0;
    101		req->ring_ptr = req->ring;
    102	} else {
    103		req->ring_ptr++;
    104	}
    105
    106	cont_pkt = (cont_entry_t *)req->ring_ptr;
    107
    108	/* Load packet defaults. */
    109	put_unaligned_le32(CONTINUE_TYPE, &cont_pkt->entry_type);
    110
    111	return (cont_pkt);
    112}
    113
    114/**
    115 * qla2x00_prep_cont_type1_iocb() - Initialize a Continuation Type 1 IOCB.
    116 * @vha: HA context
    117 * @req: request queue
    118 *
    119 * Returns a pointer to the continuation type 1 IOCB packet.
    120 */
    121cont_a64_entry_t *
    122qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *vha, struct req_que *req)
    123{
    124	cont_a64_entry_t *cont_pkt;
    125
    126	/* Adjust ring index. */
    127	req->ring_index++;
    128	if (req->ring_index == req->length) {
    129		req->ring_index = 0;
    130		req->ring_ptr = req->ring;
    131	} else {
    132		req->ring_ptr++;
    133	}
    134
    135	cont_pkt = (cont_a64_entry_t *)req->ring_ptr;
    136
    137	/* Load packet defaults. */
    138	put_unaligned_le32(IS_QLAFX00(vha->hw) ? CONTINUE_A64_TYPE_FX00 :
    139			   CONTINUE_A64_TYPE, &cont_pkt->entry_type);
    140
    141	return (cont_pkt);
    142}
    143
    144inline int
    145qla24xx_configure_prot_mode(srb_t *sp, uint16_t *fw_prot_opts)
    146{
    147	struct scsi_cmnd *cmd = GET_CMD_SP(sp);
    148
    149	/* We always use DIFF Bundling for best performance */
    150	*fw_prot_opts = 0;
    151
    152	/* Translate SCSI opcode to a protection opcode */
    153	switch (scsi_get_prot_op(cmd)) {
    154	case SCSI_PROT_READ_STRIP:
    155		*fw_prot_opts |= PO_MODE_DIF_REMOVE;
    156		break;
    157	case SCSI_PROT_WRITE_INSERT:
    158		*fw_prot_opts |= PO_MODE_DIF_INSERT;
    159		break;
    160	case SCSI_PROT_READ_INSERT:
    161		*fw_prot_opts |= PO_MODE_DIF_INSERT;
    162		break;
    163	case SCSI_PROT_WRITE_STRIP:
    164		*fw_prot_opts |= PO_MODE_DIF_REMOVE;
    165		break;
    166	case SCSI_PROT_READ_PASS:
    167	case SCSI_PROT_WRITE_PASS:
    168		if (cmd->prot_flags & SCSI_PROT_IP_CHECKSUM)
    169			*fw_prot_opts |= PO_MODE_DIF_TCP_CKSUM;
    170		else
    171			*fw_prot_opts |= PO_MODE_DIF_PASS;
    172		break;
    173	default:	/* Normal Request */
    174		*fw_prot_opts |= PO_MODE_DIF_PASS;
    175		break;
    176	}
    177
    178	if (!(cmd->prot_flags & SCSI_PROT_GUARD_CHECK))
    179		*fw_prot_opts |= PO_DISABLE_GUARD_CHECK;
    180
    181	return scsi_prot_sg_count(cmd);
    182}
    183
    184/*
    185 * qla2x00_build_scsi_iocbs_32() - Build IOCB command utilizing 32bit
    186 * capable IOCB types.
    187 *
    188 * @sp: SRB command to process
    189 * @cmd_pkt: Command type 2 IOCB
    190 * @tot_dsds: Total number of segments to transfer
    191 */
    192void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt,
    193    uint16_t tot_dsds)
    194{
    195	uint16_t	avail_dsds;
    196	struct dsd32	*cur_dsd;
    197	scsi_qla_host_t	*vha;
    198	struct scsi_cmnd *cmd;
    199	struct scatterlist *sg;
    200	int i;
    201
    202	cmd = GET_CMD_SP(sp);
    203
    204	/* Update entry type to indicate Command Type 2 IOCB */
    205	put_unaligned_le32(COMMAND_TYPE, &cmd_pkt->entry_type);
    206
    207	/* No data transfer */
    208	if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
    209		cmd_pkt->byte_count = cpu_to_le32(0);
    210		return;
    211	}
    212
    213	vha = sp->vha;
    214	cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
    215
    216	/* Three DSDs are available in the Command Type 2 IOCB */
    217	avail_dsds = ARRAY_SIZE(cmd_pkt->dsd32);
    218	cur_dsd = cmd_pkt->dsd32;
    219
    220	/* Load data segments */
    221	scsi_for_each_sg(cmd, sg, tot_dsds, i) {
    222		cont_entry_t *cont_pkt;
    223
    224		/* Allocate additional continuation packets? */
    225		if (avail_dsds == 0) {
    226			/*
    227			 * Seven DSDs are available in the Continuation
    228			 * Type 0 IOCB.
    229			 */
    230			cont_pkt = qla2x00_prep_cont_type0_iocb(vha);
    231			cur_dsd = cont_pkt->dsd;
    232			avail_dsds = ARRAY_SIZE(cont_pkt->dsd);
    233		}
    234
    235		append_dsd32(&cur_dsd, sg);
    236		avail_dsds--;
    237	}
    238}
    239
    240/**
    241 * qla2x00_build_scsi_iocbs_64() - Build IOCB command utilizing 64bit
    242 * capable IOCB types.
    243 *
    244 * @sp: SRB command to process
    245 * @cmd_pkt: Command type 3 IOCB
    246 * @tot_dsds: Total number of segments to transfer
    247 */
    248void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
    249    uint16_t tot_dsds)
    250{
    251	uint16_t	avail_dsds;
    252	struct dsd64	*cur_dsd;
    253	scsi_qla_host_t	*vha;
    254	struct scsi_cmnd *cmd;
    255	struct scatterlist *sg;
    256	int i;
    257
    258	cmd = GET_CMD_SP(sp);
    259
    260	/* Update entry type to indicate Command Type 3 IOCB */
    261	put_unaligned_le32(COMMAND_A64_TYPE, &cmd_pkt->entry_type);
    262
    263	/* No data transfer */
    264	if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
    265		cmd_pkt->byte_count = cpu_to_le32(0);
    266		return;
    267	}
    268
    269	vha = sp->vha;
    270	cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
    271
    272	/* Two DSDs are available in the Command Type 3 IOCB */
    273	avail_dsds = ARRAY_SIZE(cmd_pkt->dsd64);
    274	cur_dsd = cmd_pkt->dsd64;
    275
    276	/* Load data segments */
    277	scsi_for_each_sg(cmd, sg, tot_dsds, i) {
    278		cont_a64_entry_t *cont_pkt;
    279
    280		/* Allocate additional continuation packets? */
    281		if (avail_dsds == 0) {
    282			/*
    283			 * Five DSDs are available in the Continuation
    284			 * Type 1 IOCB.
    285			 */
    286			cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
    287			cur_dsd = cont_pkt->dsd;
    288			avail_dsds = ARRAY_SIZE(cont_pkt->dsd);
    289		}
    290
    291		append_dsd64(&cur_dsd, sg);
    292		avail_dsds--;
    293	}
    294}
    295
    296/*
    297 * Find the first handle that is not in use, starting from
    298 * req->current_outstanding_cmd + 1. The caller must hold the lock that is
    299 * associated with @req.
    300 */
    301uint32_t qla2xxx_get_next_handle(struct req_que *req)
    302{
    303	uint32_t index, handle = req->current_outstanding_cmd;
    304
    305	for (index = 1; index < req->num_outstanding_cmds; index++) {
    306		handle++;
    307		if (handle == req->num_outstanding_cmds)
    308			handle = 1;
    309		if (!req->outstanding_cmds[handle])
    310			return handle;
    311	}
    312
    313	return 0;
    314}
    315
    316/**
    317 * qla2x00_start_scsi() - Send a SCSI command to the ISP
    318 * @sp: command to send to the ISP
    319 *
    320 * Returns non-zero if a failure occurred, else zero.
    321 */
    322int
    323qla2x00_start_scsi(srb_t *sp)
    324{
    325	int		nseg;
    326	unsigned long   flags;
    327	scsi_qla_host_t	*vha;
    328	struct scsi_cmnd *cmd;
    329	uint32_t	*clr_ptr;
    330	uint32_t	handle;
    331	cmd_entry_t	*cmd_pkt;
    332	uint16_t	cnt;
    333	uint16_t	req_cnt;
    334	uint16_t	tot_dsds;
    335	struct device_reg_2xxx __iomem *reg;
    336	struct qla_hw_data *ha;
    337	struct req_que *req;
    338	struct rsp_que *rsp;
    339
    340	/* Setup device pointers. */
    341	vha = sp->vha;
    342	ha = vha->hw;
    343	reg = &ha->iobase->isp;
    344	cmd = GET_CMD_SP(sp);
    345	req = ha->req_q_map[0];
    346	rsp = ha->rsp_q_map[0];
    347	/* So we know we haven't pci_map'ed anything yet */
    348	tot_dsds = 0;
    349
    350	/* Send marker if required */
    351	if (vha->marker_needed != 0) {
    352		if (qla2x00_marker(vha, ha->base_qpair, 0, 0, MK_SYNC_ALL) !=
    353		    QLA_SUCCESS) {
    354			return (QLA_FUNCTION_FAILED);
    355		}
    356		vha->marker_needed = 0;
    357	}
    358
    359	/* Acquire ring specific lock */
    360	spin_lock_irqsave(&ha->hardware_lock, flags);
    361
    362	handle = qla2xxx_get_next_handle(req);
    363	if (handle == 0)
    364		goto queuing_error;
    365
    366	/* Map the sg table so we have an accurate count of sg entries needed */
    367	if (scsi_sg_count(cmd)) {
    368		nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
    369		    scsi_sg_count(cmd), cmd->sc_data_direction);
    370		if (unlikely(!nseg))
    371			goto queuing_error;
    372	} else
    373		nseg = 0;
    374
    375	tot_dsds = nseg;
    376
    377	/* Calculate the number of request entries needed. */
    378	req_cnt = ha->isp_ops->calc_req_entries(tot_dsds);
    379	if (req->cnt < (req_cnt + 2)) {
    380		cnt = rd_reg_word_relaxed(ISP_REQ_Q_OUT(ha, reg));
    381		if (req->ring_index < cnt)
    382			req->cnt = cnt - req->ring_index;
    383		else
    384			req->cnt = req->length -
    385			    (req->ring_index - cnt);
    386		/* If still no head room then bail out */
    387		if (req->cnt < (req_cnt + 2))
    388			goto queuing_error;
    389	}
    390
    391	/* Build command packet */
    392	req->current_outstanding_cmd = handle;
    393	req->outstanding_cmds[handle] = sp;
    394	sp->handle = handle;
    395	cmd->host_scribble = (unsigned char *)(unsigned long)handle;
    396	req->cnt -= req_cnt;
    397
    398	cmd_pkt = (cmd_entry_t *)req->ring_ptr;
    399	cmd_pkt->handle = handle;
    400	/* Zero out remaining portion of packet. */
    401	clr_ptr = (uint32_t *)cmd_pkt + 2;
    402	memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
    403	cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
    404
    405	/* Set target ID and LUN number*/
    406	SET_TARGET_ID(ha, cmd_pkt->target, sp->fcport->loop_id);
    407	cmd_pkt->lun = cpu_to_le16(cmd->device->lun);
    408	cmd_pkt->control_flags = cpu_to_le16(CF_SIMPLE_TAG);
    409
    410	/* Load SCSI command packet. */
    411	memcpy(cmd_pkt->scsi_cdb, cmd->cmnd, cmd->cmd_len);
    412	cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
    413
    414	/* Build IOCB segments */
    415	ha->isp_ops->build_iocbs(sp, cmd_pkt, tot_dsds);
    416
    417	/* Set total data segment count. */
    418	cmd_pkt->entry_count = (uint8_t)req_cnt;
    419	wmb();
    420
    421	/* Adjust ring index. */
    422	req->ring_index++;
    423	if (req->ring_index == req->length) {
    424		req->ring_index = 0;
    425		req->ring_ptr = req->ring;
    426	} else
    427		req->ring_ptr++;
    428
    429	sp->flags |= SRB_DMA_VALID;
    430
    431	/* Set chip new ring index. */
    432	wrt_reg_word(ISP_REQ_Q_IN(ha, reg), req->ring_index);
    433	rd_reg_word_relaxed(ISP_REQ_Q_IN(ha, reg));	/* PCI Posting. */
    434
    435	/* Manage unprocessed RIO/ZIO commands in response queue. */
    436	if (vha->flags.process_response_queue &&
    437	    rsp->ring_ptr->signature != RESPONSE_PROCESSED)
    438		qla2x00_process_response_queue(rsp);
    439
    440	spin_unlock_irqrestore(&ha->hardware_lock, flags);
    441	return (QLA_SUCCESS);
    442
    443queuing_error:
    444	if (tot_dsds)
    445		scsi_dma_unmap(cmd);
    446
    447	spin_unlock_irqrestore(&ha->hardware_lock, flags);
    448
    449	return (QLA_FUNCTION_FAILED);
    450}
    451
    452/**
    453 * qla2x00_start_iocbs() - Execute the IOCB command
    454 * @vha: HA context
    455 * @req: request queue
    456 */
    457void
    458qla2x00_start_iocbs(struct scsi_qla_host *vha, struct req_que *req)
    459{
    460	struct qla_hw_data *ha = vha->hw;
    461	device_reg_t *reg = ISP_QUE_REG(ha, req->id);
    462
    463	if (IS_P3P_TYPE(ha)) {
    464		qla82xx_start_iocbs(vha);
    465	} else {
    466		/* Adjust ring index. */
    467		req->ring_index++;
    468		if (req->ring_index == req->length) {
    469			req->ring_index = 0;
    470			req->ring_ptr = req->ring;
    471		} else
    472			req->ring_ptr++;
    473
    474		/* Set chip new ring index. */
    475		if (ha->mqenable || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
    476			wrt_reg_dword(req->req_q_in, req->ring_index);
    477		} else if (IS_QLA83XX(ha)) {
    478			wrt_reg_dword(req->req_q_in, req->ring_index);
    479			rd_reg_dword_relaxed(&ha->iobase->isp24.hccr);
    480		} else if (IS_QLAFX00(ha)) {
    481			wrt_reg_dword(&reg->ispfx00.req_q_in, req->ring_index);
    482			rd_reg_dword_relaxed(&reg->ispfx00.req_q_in);
    483			QLAFX00_SET_HST_INTR(ha, ha->rqstq_intr_code);
    484		} else if (IS_FWI2_CAPABLE(ha)) {
    485			wrt_reg_dword(&reg->isp24.req_q_in, req->ring_index);
    486			rd_reg_dword_relaxed(&reg->isp24.req_q_in);
    487		} else {
    488			wrt_reg_word(ISP_REQ_Q_IN(ha, &reg->isp),
    489				req->ring_index);
    490			rd_reg_word_relaxed(ISP_REQ_Q_IN(ha, &reg->isp));
    491		}
    492	}
    493}
    494
    495/**
    496 * __qla2x00_marker() - Send a marker IOCB to the firmware.
    497 * @vha: HA context
    498 * @qpair: queue pair pointer
    499 * @loop_id: loop ID
    500 * @lun: LUN
    501 * @type: marker modifier
    502 *
    503 * Can be called from both normal and interrupt context.
    504 *
    505 * Returns non-zero if a failure occurred, else zero.
    506 */
    507static int
    508__qla2x00_marker(struct scsi_qla_host *vha, struct qla_qpair *qpair,
    509    uint16_t loop_id, uint64_t lun, uint8_t type)
    510{
    511	mrk_entry_t *mrk;
    512	struct mrk_entry_24xx *mrk24 = NULL;
    513	struct req_que *req = qpair->req;
    514	struct qla_hw_data *ha = vha->hw;
    515	scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
    516
    517	mrk = (mrk_entry_t *)__qla2x00_alloc_iocbs(qpair, NULL);
    518	if (mrk == NULL) {
    519		ql_log(ql_log_warn, base_vha, 0x3026,
    520		    "Failed to allocate Marker IOCB.\n");
    521
    522		return (QLA_FUNCTION_FAILED);
    523	}
    524
    525	mrk->entry_type = MARKER_TYPE;
    526	mrk->modifier = type;
    527	if (type != MK_SYNC_ALL) {
    528		if (IS_FWI2_CAPABLE(ha)) {
    529			mrk24 = (struct mrk_entry_24xx *) mrk;
    530			mrk24->nport_handle = cpu_to_le16(loop_id);
    531			int_to_scsilun(lun, (struct scsi_lun *)&mrk24->lun);
    532			host_to_fcp_swap(mrk24->lun, sizeof(mrk24->lun));
    533			mrk24->vp_index = vha->vp_idx;
    534			mrk24->handle = make_handle(req->id, mrk24->handle);
    535		} else {
    536			SET_TARGET_ID(ha, mrk->target, loop_id);
    537			mrk->lun = cpu_to_le16((uint16_t)lun);
    538		}
    539	}
    540	wmb();
    541
    542	qla2x00_start_iocbs(vha, req);
    543
    544	return (QLA_SUCCESS);
    545}
    546
    547int
    548qla2x00_marker(struct scsi_qla_host *vha, struct qla_qpair *qpair,
    549    uint16_t loop_id, uint64_t lun, uint8_t type)
    550{
    551	int ret;
    552	unsigned long flags = 0;
    553
    554	spin_lock_irqsave(qpair->qp_lock_ptr, flags);
    555	ret = __qla2x00_marker(vha, qpair, loop_id, lun, type);
    556	spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
    557
    558	return (ret);
    559}
    560
    561/*
    562 * qla2x00_issue_marker
    563 *
    564 * Issue marker
    565 * Caller CAN have hardware lock held as specified by ha_locked parameter.
    566 * Might release it, then reaquire.
    567 */
    568int qla2x00_issue_marker(scsi_qla_host_t *vha, int ha_locked)
    569{
    570	if (ha_locked) {
    571		if (__qla2x00_marker(vha, vha->hw->base_qpair, 0, 0,
    572					MK_SYNC_ALL) != QLA_SUCCESS)
    573			return QLA_FUNCTION_FAILED;
    574	} else {
    575		if (qla2x00_marker(vha, vha->hw->base_qpair, 0, 0,
    576					MK_SYNC_ALL) != QLA_SUCCESS)
    577			return QLA_FUNCTION_FAILED;
    578	}
    579	vha->marker_needed = 0;
    580
    581	return QLA_SUCCESS;
    582}
    583
    584static inline int
    585qla24xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt,
    586	uint16_t tot_dsds)
    587{
    588	struct dsd64 *cur_dsd = NULL, *next_dsd;
    589	scsi_qla_host_t	*vha;
    590	struct qla_hw_data *ha;
    591	struct scsi_cmnd *cmd;
    592	struct	scatterlist *cur_seg;
    593	uint8_t avail_dsds;
    594	uint8_t first_iocb = 1;
    595	uint32_t dsd_list_len;
    596	struct dsd_dma *dsd_ptr;
    597	struct ct6_dsd *ctx;
    598	struct qla_qpair *qpair = sp->qpair;
    599
    600	cmd = GET_CMD_SP(sp);
    601
    602	/* Update entry type to indicate Command Type 3 IOCB */
    603	put_unaligned_le32(COMMAND_TYPE_6, &cmd_pkt->entry_type);
    604
    605	/* No data transfer */
    606	if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
    607		cmd_pkt->byte_count = cpu_to_le32(0);
    608		return 0;
    609	}
    610
    611	vha = sp->vha;
    612	ha = vha->hw;
    613
    614	/* Set transfer direction */
    615	if (cmd->sc_data_direction == DMA_TO_DEVICE) {
    616		cmd_pkt->control_flags = cpu_to_le16(CF_WRITE_DATA);
    617		qpair->counters.output_bytes += scsi_bufflen(cmd);
    618		qpair->counters.output_requests++;
    619	} else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
    620		cmd_pkt->control_flags = cpu_to_le16(CF_READ_DATA);
    621		qpair->counters.input_bytes += scsi_bufflen(cmd);
    622		qpair->counters.input_requests++;
    623	}
    624
    625	cur_seg = scsi_sglist(cmd);
    626	ctx = sp->u.scmd.ct6_ctx;
    627
    628	while (tot_dsds) {
    629		avail_dsds = (tot_dsds > QLA_DSDS_PER_IOCB) ?
    630		    QLA_DSDS_PER_IOCB : tot_dsds;
    631		tot_dsds -= avail_dsds;
    632		dsd_list_len = (avail_dsds + 1) * QLA_DSD_SIZE;
    633
    634		dsd_ptr = list_first_entry(&ha->gbl_dsd_list,
    635		    struct dsd_dma, list);
    636		next_dsd = dsd_ptr->dsd_addr;
    637		list_del(&dsd_ptr->list);
    638		ha->gbl_dsd_avail--;
    639		list_add_tail(&dsd_ptr->list, &ctx->dsd_list);
    640		ctx->dsd_use_cnt++;
    641		ha->gbl_dsd_inuse++;
    642
    643		if (first_iocb) {
    644			first_iocb = 0;
    645			put_unaligned_le64(dsd_ptr->dsd_list_dma,
    646					   &cmd_pkt->fcp_dsd.address);
    647			cmd_pkt->fcp_dsd.length = cpu_to_le32(dsd_list_len);
    648		} else {
    649			put_unaligned_le64(dsd_ptr->dsd_list_dma,
    650					   &cur_dsd->address);
    651			cur_dsd->length = cpu_to_le32(dsd_list_len);
    652			cur_dsd++;
    653		}
    654		cur_dsd = next_dsd;
    655		while (avail_dsds) {
    656			append_dsd64(&cur_dsd, cur_seg);
    657			cur_seg = sg_next(cur_seg);
    658			avail_dsds--;
    659		}
    660	}
    661
    662	/* Null termination */
    663	cur_dsd->address = 0;
    664	cur_dsd->length = 0;
    665	cur_dsd++;
    666	cmd_pkt->control_flags |= cpu_to_le16(CF_DATA_SEG_DESCR_ENABLE);
    667	return 0;
    668}
    669
    670/*
    671 * qla24xx_calc_dsd_lists() - Determine number of DSD list required
    672 * for Command Type 6.
    673 *
    674 * @dsds: number of data segment descriptors needed
    675 *
    676 * Returns the number of dsd list needed to store @dsds.
    677 */
    678static inline uint16_t
    679qla24xx_calc_dsd_lists(uint16_t dsds)
    680{
    681	uint16_t dsd_lists = 0;
    682
    683	dsd_lists = (dsds/QLA_DSDS_PER_IOCB);
    684	if (dsds % QLA_DSDS_PER_IOCB)
    685		dsd_lists++;
    686	return dsd_lists;
    687}
    688
    689
    690/**
    691 * qla24xx_build_scsi_iocbs() - Build IOCB command utilizing Command Type 7
    692 * IOCB types.
    693 *
    694 * @sp: SRB command to process
    695 * @cmd_pkt: Command type 3 IOCB
    696 * @tot_dsds: Total number of segments to transfer
    697 * @req: pointer to request queue
    698 */
    699inline void
    700qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
    701	uint16_t tot_dsds, struct req_que *req)
    702{
    703	uint16_t	avail_dsds;
    704	struct dsd64	*cur_dsd;
    705	scsi_qla_host_t	*vha;
    706	struct scsi_cmnd *cmd;
    707	struct scatterlist *sg;
    708	int i;
    709	struct qla_qpair *qpair = sp->qpair;
    710
    711	cmd = GET_CMD_SP(sp);
    712
    713	/* Update entry type to indicate Command Type 3 IOCB */
    714	put_unaligned_le32(COMMAND_TYPE_7, &cmd_pkt->entry_type);
    715
    716	/* No data transfer */
    717	if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
    718		cmd_pkt->byte_count = cpu_to_le32(0);
    719		return;
    720	}
    721
    722	vha = sp->vha;
    723
    724	/* Set transfer direction */
    725	if (cmd->sc_data_direction == DMA_TO_DEVICE) {
    726		cmd_pkt->task_mgmt_flags = cpu_to_le16(TMF_WRITE_DATA);
    727		qpair->counters.output_bytes += scsi_bufflen(cmd);
    728		qpair->counters.output_requests++;
    729	} else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
    730		cmd_pkt->task_mgmt_flags = cpu_to_le16(TMF_READ_DATA);
    731		qpair->counters.input_bytes += scsi_bufflen(cmd);
    732		qpair->counters.input_requests++;
    733	}
    734
    735	/* One DSD is available in the Command Type 3 IOCB */
    736	avail_dsds = 1;
    737	cur_dsd = &cmd_pkt->dsd;
    738
    739	/* Load data segments */
    740
    741	scsi_for_each_sg(cmd, sg, tot_dsds, i) {
    742		cont_a64_entry_t *cont_pkt;
    743
    744		/* Allocate additional continuation packets? */
    745		if (avail_dsds == 0) {
    746			/*
    747			 * Five DSDs are available in the Continuation
    748			 * Type 1 IOCB.
    749			 */
    750			cont_pkt = qla2x00_prep_cont_type1_iocb(vha, req);
    751			cur_dsd = cont_pkt->dsd;
    752			avail_dsds = ARRAY_SIZE(cont_pkt->dsd);
    753		}
    754
    755		append_dsd64(&cur_dsd, sg);
    756		avail_dsds--;
    757	}
    758}
    759
    760struct fw_dif_context {
    761	__le32	ref_tag;
    762	__le16	app_tag;
    763	uint8_t ref_tag_mask[4];	/* Validation/Replacement Mask*/
    764	uint8_t app_tag_mask[2];	/* Validation/Replacement Mask*/
    765};
    766
    767/*
    768 * qla24xx_set_t10dif_tags_from_cmd - Extract Ref and App tags from SCSI command
    769 *
    770 */
    771static inline void
    772qla24xx_set_t10dif_tags(srb_t *sp, struct fw_dif_context *pkt,
    773    unsigned int protcnt)
    774{
    775	struct scsi_cmnd *cmd = GET_CMD_SP(sp);
    776
    777	pkt->ref_tag = cpu_to_le32(scsi_prot_ref_tag(cmd));
    778
    779	if (cmd->prot_flags & SCSI_PROT_REF_CHECK &&
    780	    qla2x00_hba_err_chk_enabled(sp)) {
    781		pkt->ref_tag_mask[0] = 0xff;
    782		pkt->ref_tag_mask[1] = 0xff;
    783		pkt->ref_tag_mask[2] = 0xff;
    784		pkt->ref_tag_mask[3] = 0xff;
    785	}
    786
    787	pkt->app_tag = cpu_to_le16(0);
    788	pkt->app_tag_mask[0] = 0x0;
    789	pkt->app_tag_mask[1] = 0x0;
    790}
    791
    792int
    793qla24xx_get_one_block_sg(uint32_t blk_sz, struct qla2_sgx *sgx,
    794	uint32_t *partial)
    795{
    796	struct scatterlist *sg;
    797	uint32_t cumulative_partial, sg_len;
    798	dma_addr_t sg_dma_addr;
    799
    800	if (sgx->num_bytes == sgx->tot_bytes)
    801		return 0;
    802
    803	sg = sgx->cur_sg;
    804	cumulative_partial = sgx->tot_partial;
    805
    806	sg_dma_addr = sg_dma_address(sg);
    807	sg_len = sg_dma_len(sg);
    808
    809	sgx->dma_addr = sg_dma_addr + sgx->bytes_consumed;
    810
    811	if ((cumulative_partial + (sg_len - sgx->bytes_consumed)) >= blk_sz) {
    812		sgx->dma_len = (blk_sz - cumulative_partial);
    813		sgx->tot_partial = 0;
    814		sgx->num_bytes += blk_sz;
    815		*partial = 0;
    816	} else {
    817		sgx->dma_len = sg_len - sgx->bytes_consumed;
    818		sgx->tot_partial += sgx->dma_len;
    819		*partial = 1;
    820	}
    821
    822	sgx->bytes_consumed += sgx->dma_len;
    823
    824	if (sg_len == sgx->bytes_consumed) {
    825		sg = sg_next(sg);
    826		sgx->num_sg++;
    827		sgx->cur_sg = sg;
    828		sgx->bytes_consumed = 0;
    829	}
    830
    831	return 1;
    832}
    833
    834int
    835qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *ha, srb_t *sp,
    836	struct dsd64 *dsd, uint16_t tot_dsds, struct qla_tc_param *tc)
    837{
    838	void *next_dsd;
    839	uint8_t avail_dsds = 0;
    840	uint32_t dsd_list_len;
    841	struct dsd_dma *dsd_ptr;
    842	struct scatterlist *sg_prot;
    843	struct dsd64 *cur_dsd = dsd;
    844	uint16_t	used_dsds = tot_dsds;
    845	uint32_t	prot_int; /* protection interval */
    846	uint32_t	partial;
    847	struct qla2_sgx sgx;
    848	dma_addr_t	sle_dma;
    849	uint32_t	sle_dma_len, tot_prot_dma_len = 0;
    850	struct scsi_cmnd *cmd;
    851
    852	memset(&sgx, 0, sizeof(struct qla2_sgx));
    853	if (sp) {
    854		cmd = GET_CMD_SP(sp);
    855		prot_int = scsi_prot_interval(cmd);
    856
    857		sgx.tot_bytes = scsi_bufflen(cmd);
    858		sgx.cur_sg = scsi_sglist(cmd);
    859		sgx.sp = sp;
    860
    861		sg_prot = scsi_prot_sglist(cmd);
    862	} else if (tc) {
    863		prot_int      = tc->blk_sz;
    864		sgx.tot_bytes = tc->bufflen;
    865		sgx.cur_sg    = tc->sg;
    866		sg_prot	      = tc->prot_sg;
    867	} else {
    868		BUG();
    869		return 1;
    870	}
    871
    872	while (qla24xx_get_one_block_sg(prot_int, &sgx, &partial)) {
    873
    874		sle_dma = sgx.dma_addr;
    875		sle_dma_len = sgx.dma_len;
    876alloc_and_fill:
    877		/* Allocate additional continuation packets? */
    878		if (avail_dsds == 0) {
    879			avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
    880					QLA_DSDS_PER_IOCB : used_dsds;
    881			dsd_list_len = (avail_dsds + 1) * 12;
    882			used_dsds -= avail_dsds;
    883
    884			/* allocate tracking DS */
    885			dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
    886			if (!dsd_ptr)
    887				return 1;
    888
    889			/* allocate new list */
    890			dsd_ptr->dsd_addr = next_dsd =
    891			    dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
    892				&dsd_ptr->dsd_list_dma);
    893
    894			if (!next_dsd) {
    895				/*
    896				 * Need to cleanup only this dsd_ptr, rest
    897				 * will be done by sp_free_dma()
    898				 */
    899				kfree(dsd_ptr);
    900				return 1;
    901			}
    902
    903			if (sp) {
    904				list_add_tail(&dsd_ptr->list,
    905					      &sp->u.scmd.crc_ctx->dsd_list);
    906
    907				sp->flags |= SRB_CRC_CTX_DSD_VALID;
    908			} else {
    909				list_add_tail(&dsd_ptr->list,
    910				    &(tc->ctx->dsd_list));
    911				*tc->ctx_dsd_alloced = 1;
    912			}
    913
    914
    915			/* add new list to cmd iocb or last list */
    916			put_unaligned_le64(dsd_ptr->dsd_list_dma,
    917					   &cur_dsd->address);
    918			cur_dsd->length = cpu_to_le32(dsd_list_len);
    919			cur_dsd = next_dsd;
    920		}
    921		put_unaligned_le64(sle_dma, &cur_dsd->address);
    922		cur_dsd->length = cpu_to_le32(sle_dma_len);
    923		cur_dsd++;
    924		avail_dsds--;
    925
    926		if (partial == 0) {
    927			/* Got a full protection interval */
    928			sle_dma = sg_dma_address(sg_prot) + tot_prot_dma_len;
    929			sle_dma_len = 8;
    930
    931			tot_prot_dma_len += sle_dma_len;
    932			if (tot_prot_dma_len == sg_dma_len(sg_prot)) {
    933				tot_prot_dma_len = 0;
    934				sg_prot = sg_next(sg_prot);
    935			}
    936
    937			partial = 1; /* So as to not re-enter this block */
    938			goto alloc_and_fill;
    939		}
    940	}
    941	/* Null termination */
    942	cur_dsd->address = 0;
    943	cur_dsd->length = 0;
    944	cur_dsd++;
    945	return 0;
    946}
    947
    948int
    949qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp,
    950	struct dsd64 *dsd, uint16_t tot_dsds, struct qla_tc_param *tc)
    951{
    952	void *next_dsd;
    953	uint8_t avail_dsds = 0;
    954	uint32_t dsd_list_len;
    955	struct dsd_dma *dsd_ptr;
    956	struct scatterlist *sg, *sgl;
    957	struct dsd64 *cur_dsd = dsd;
    958	int	i;
    959	uint16_t	used_dsds = tot_dsds;
    960	struct scsi_cmnd *cmd;
    961
    962	if (sp) {
    963		cmd = GET_CMD_SP(sp);
    964		sgl = scsi_sglist(cmd);
    965	} else if (tc) {
    966		sgl = tc->sg;
    967	} else {
    968		BUG();
    969		return 1;
    970	}
    971
    972
    973	for_each_sg(sgl, sg, tot_dsds, i) {
    974		/* Allocate additional continuation packets? */
    975		if (avail_dsds == 0) {
    976			avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
    977					QLA_DSDS_PER_IOCB : used_dsds;
    978			dsd_list_len = (avail_dsds + 1) * 12;
    979			used_dsds -= avail_dsds;
    980
    981			/* allocate tracking DS */
    982			dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
    983			if (!dsd_ptr)
    984				return 1;
    985
    986			/* allocate new list */
    987			dsd_ptr->dsd_addr = next_dsd =
    988			    dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
    989				&dsd_ptr->dsd_list_dma);
    990
    991			if (!next_dsd) {
    992				/*
    993				 * Need to cleanup only this dsd_ptr, rest
    994				 * will be done by sp_free_dma()
    995				 */
    996				kfree(dsd_ptr);
    997				return 1;
    998			}
    999
   1000			if (sp) {
   1001				list_add_tail(&dsd_ptr->list,
   1002					      &sp->u.scmd.crc_ctx->dsd_list);
   1003
   1004				sp->flags |= SRB_CRC_CTX_DSD_VALID;
   1005			} else {
   1006				list_add_tail(&dsd_ptr->list,
   1007				    &(tc->ctx->dsd_list));
   1008				*tc->ctx_dsd_alloced = 1;
   1009			}
   1010
   1011			/* add new list to cmd iocb or last list */
   1012			put_unaligned_le64(dsd_ptr->dsd_list_dma,
   1013					   &cur_dsd->address);
   1014			cur_dsd->length = cpu_to_le32(dsd_list_len);
   1015			cur_dsd = next_dsd;
   1016		}
   1017		append_dsd64(&cur_dsd, sg);
   1018		avail_dsds--;
   1019
   1020	}
   1021	/* Null termination */
   1022	cur_dsd->address = 0;
   1023	cur_dsd->length = 0;
   1024	cur_dsd++;
   1025	return 0;
   1026}
   1027
   1028int
   1029qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
   1030	struct dsd64 *cur_dsd, uint16_t tot_dsds, struct qla_tgt_cmd *tc)
   1031{
   1032	struct dsd_dma *dsd_ptr = NULL, *dif_dsd, *nxt_dsd;
   1033	struct scatterlist *sg, *sgl;
   1034	struct crc_context *difctx = NULL;
   1035	struct scsi_qla_host *vha;
   1036	uint dsd_list_len;
   1037	uint avail_dsds = 0;
   1038	uint used_dsds = tot_dsds;
   1039	bool dif_local_dma_alloc = false;
   1040	bool direction_to_device = false;
   1041	int i;
   1042
   1043	if (sp) {
   1044		struct scsi_cmnd *cmd = GET_CMD_SP(sp);
   1045
   1046		sgl = scsi_prot_sglist(cmd);
   1047		vha = sp->vha;
   1048		difctx = sp->u.scmd.crc_ctx;
   1049		direction_to_device = cmd->sc_data_direction == DMA_TO_DEVICE;
   1050		ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe021,
   1051		  "%s: scsi_cmnd: %p, crc_ctx: %p, sp: %p\n",
   1052			__func__, cmd, difctx, sp);
   1053	} else if (tc) {
   1054		vha = tc->vha;
   1055		sgl = tc->prot_sg;
   1056		difctx = tc->ctx;
   1057		direction_to_device = tc->dma_data_direction == DMA_TO_DEVICE;
   1058	} else {
   1059		BUG();
   1060		return 1;
   1061	}
   1062
   1063	ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe021,
   1064	    "%s: enter (write=%u)\n", __func__, direction_to_device);
   1065
   1066	/* if initiator doing write or target doing read */
   1067	if (direction_to_device) {
   1068		for_each_sg(sgl, sg, tot_dsds, i) {
   1069			u64 sle_phys = sg_phys(sg);
   1070
   1071			/* If SGE addr + len flips bits in upper 32-bits */
   1072			if (MSD(sle_phys + sg->length) ^ MSD(sle_phys)) {
   1073				ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe022,
   1074				    "%s: page boundary crossing (phys=%llx len=%x)\n",
   1075				    __func__, sle_phys, sg->length);
   1076
   1077				if (difctx) {
   1078					ha->dif_bundle_crossed_pages++;
   1079					dif_local_dma_alloc = true;
   1080				} else {
   1081					ql_dbg(ql_dbg_tgt + ql_dbg_verbose,
   1082					    vha, 0xe022,
   1083					    "%s: difctx pointer is NULL\n",
   1084					    __func__);
   1085				}
   1086				break;
   1087			}
   1088		}
   1089		ha->dif_bundle_writes++;
   1090	} else {
   1091		ha->dif_bundle_reads++;
   1092	}
   1093
   1094	if (ql2xdifbundlinginternalbuffers)
   1095		dif_local_dma_alloc = direction_to_device;
   1096
   1097	if (dif_local_dma_alloc) {
   1098		u32 track_difbundl_buf = 0;
   1099		u32 ldma_sg_len = 0;
   1100		u8 ldma_needed = 1;
   1101
   1102		difctx->no_dif_bundl = 0;
   1103		difctx->dif_bundl_len = 0;
   1104
   1105		/* Track DSD buffers */
   1106		INIT_LIST_HEAD(&difctx->ldif_dsd_list);
   1107		/* Track local DMA buffers */
   1108		INIT_LIST_HEAD(&difctx->ldif_dma_hndl_list);
   1109
   1110		for_each_sg(sgl, sg, tot_dsds, i) {
   1111			u32 sglen = sg_dma_len(sg);
   1112
   1113			ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe023,
   1114			    "%s: sg[%x] (phys=%llx sglen=%x) ldma_sg_len: %x dif_bundl_len: %x ldma_needed: %x\n",
   1115			    __func__, i, (u64)sg_phys(sg), sglen, ldma_sg_len,
   1116			    difctx->dif_bundl_len, ldma_needed);
   1117
   1118			while (sglen) {
   1119				u32 xfrlen = 0;
   1120
   1121				if (ldma_needed) {
   1122					/*
   1123					 * Allocate list item to store
   1124					 * the DMA buffers
   1125					 */
   1126					dsd_ptr = kzalloc(sizeof(*dsd_ptr),
   1127					    GFP_ATOMIC);
   1128					if (!dsd_ptr) {
   1129						ql_dbg(ql_dbg_tgt, vha, 0xe024,
   1130						    "%s: failed alloc dsd_ptr\n",
   1131						    __func__);
   1132						return 1;
   1133					}
   1134					ha->dif_bundle_kallocs++;
   1135
   1136					/* allocate dma buffer */
   1137					dsd_ptr->dsd_addr = dma_pool_alloc
   1138						(ha->dif_bundl_pool, GFP_ATOMIC,
   1139						 &dsd_ptr->dsd_list_dma);
   1140					if (!dsd_ptr->dsd_addr) {
   1141						ql_dbg(ql_dbg_tgt, vha, 0xe024,
   1142						    "%s: failed alloc ->dsd_ptr\n",
   1143						    __func__);
   1144						/*
   1145						 * need to cleanup only this
   1146						 * dsd_ptr rest will be done
   1147						 * by sp_free_dma()
   1148						 */
   1149						kfree(dsd_ptr);
   1150						ha->dif_bundle_kallocs--;
   1151						return 1;
   1152					}
   1153					ha->dif_bundle_dma_allocs++;
   1154					ldma_needed = 0;
   1155					difctx->no_dif_bundl++;
   1156					list_add_tail(&dsd_ptr->list,
   1157					    &difctx->ldif_dma_hndl_list);
   1158				}
   1159
   1160				/* xfrlen is min of dma pool size and sglen */
   1161				xfrlen = (sglen >
   1162				   (DIF_BUNDLING_DMA_POOL_SIZE - ldma_sg_len)) ?
   1163				    DIF_BUNDLING_DMA_POOL_SIZE - ldma_sg_len :
   1164				    sglen;
   1165
   1166				/* replace with local allocated dma buffer */
   1167				sg_pcopy_to_buffer(sgl, sg_nents(sgl),
   1168				    dsd_ptr->dsd_addr + ldma_sg_len, xfrlen,
   1169				    difctx->dif_bundl_len);
   1170				difctx->dif_bundl_len += xfrlen;
   1171				sglen -= xfrlen;
   1172				ldma_sg_len += xfrlen;
   1173				if (ldma_sg_len == DIF_BUNDLING_DMA_POOL_SIZE ||
   1174				    sg_is_last(sg)) {
   1175					ldma_needed = 1;
   1176					ldma_sg_len = 0;
   1177				}
   1178			}
   1179		}
   1180
   1181		track_difbundl_buf = used_dsds = difctx->no_dif_bundl;
   1182		ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe025,
   1183		    "dif_bundl_len=%x, no_dif_bundl=%x track_difbundl_buf: %x\n",
   1184		    difctx->dif_bundl_len, difctx->no_dif_bundl,
   1185		    track_difbundl_buf);
   1186
   1187		if (sp)
   1188			sp->flags |= SRB_DIF_BUNDL_DMA_VALID;
   1189		else
   1190			tc->prot_flags = DIF_BUNDL_DMA_VALID;
   1191
   1192		list_for_each_entry_safe(dif_dsd, nxt_dsd,
   1193		    &difctx->ldif_dma_hndl_list, list) {
   1194			u32 sglen = (difctx->dif_bundl_len >
   1195			    DIF_BUNDLING_DMA_POOL_SIZE) ?
   1196			    DIF_BUNDLING_DMA_POOL_SIZE : difctx->dif_bundl_len;
   1197
   1198			BUG_ON(track_difbundl_buf == 0);
   1199
   1200			/* Allocate additional continuation packets? */
   1201			if (avail_dsds == 0) {
   1202				ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha,
   1203				    0xe024,
   1204				    "%s: adding continuation iocb's\n",
   1205				    __func__);
   1206				avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
   1207				    QLA_DSDS_PER_IOCB : used_dsds;
   1208				dsd_list_len = (avail_dsds + 1) * 12;
   1209				used_dsds -= avail_dsds;
   1210
   1211				/* allocate tracking DS */
   1212				dsd_ptr = kzalloc(sizeof(*dsd_ptr), GFP_ATOMIC);
   1213				if (!dsd_ptr) {
   1214					ql_dbg(ql_dbg_tgt, vha, 0xe026,
   1215					    "%s: failed alloc dsd_ptr\n",
   1216					    __func__);
   1217					return 1;
   1218				}
   1219				ha->dif_bundle_kallocs++;
   1220
   1221				difctx->no_ldif_dsd++;
   1222				/* allocate new list */
   1223				dsd_ptr->dsd_addr =
   1224				    dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
   1225					&dsd_ptr->dsd_list_dma);
   1226				if (!dsd_ptr->dsd_addr) {
   1227					ql_dbg(ql_dbg_tgt, vha, 0xe026,
   1228					    "%s: failed alloc ->dsd_addr\n",
   1229					    __func__);
   1230					/*
   1231					 * need to cleanup only this dsd_ptr
   1232					 *  rest will be done by sp_free_dma()
   1233					 */
   1234					kfree(dsd_ptr);
   1235					ha->dif_bundle_kallocs--;
   1236					return 1;
   1237				}
   1238				ha->dif_bundle_dma_allocs++;
   1239
   1240				if (sp) {
   1241					list_add_tail(&dsd_ptr->list,
   1242					    &difctx->ldif_dsd_list);
   1243					sp->flags |= SRB_CRC_CTX_DSD_VALID;
   1244				} else {
   1245					list_add_tail(&dsd_ptr->list,
   1246					    &difctx->ldif_dsd_list);
   1247					tc->ctx_dsd_alloced = 1;
   1248				}
   1249
   1250				/* add new list to cmd iocb or last list */
   1251				put_unaligned_le64(dsd_ptr->dsd_list_dma,
   1252						   &cur_dsd->address);
   1253				cur_dsd->length = cpu_to_le32(dsd_list_len);
   1254				cur_dsd = dsd_ptr->dsd_addr;
   1255			}
   1256			put_unaligned_le64(dif_dsd->dsd_list_dma,
   1257					   &cur_dsd->address);
   1258			cur_dsd->length = cpu_to_le32(sglen);
   1259			cur_dsd++;
   1260			avail_dsds--;
   1261			difctx->dif_bundl_len -= sglen;
   1262			track_difbundl_buf--;
   1263		}
   1264
   1265		ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe026,
   1266		    "%s: no_ldif_dsd:%x, no_dif_bundl:%x\n", __func__,
   1267			difctx->no_ldif_dsd, difctx->no_dif_bundl);
   1268	} else {
   1269		for_each_sg(sgl, sg, tot_dsds, i) {
   1270			/* Allocate additional continuation packets? */
   1271			if (avail_dsds == 0) {
   1272				avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
   1273				    QLA_DSDS_PER_IOCB : used_dsds;
   1274				dsd_list_len = (avail_dsds + 1) * 12;
   1275				used_dsds -= avail_dsds;
   1276
   1277				/* allocate tracking DS */
   1278				dsd_ptr = kzalloc(sizeof(*dsd_ptr), GFP_ATOMIC);
   1279				if (!dsd_ptr) {
   1280					ql_dbg(ql_dbg_tgt + ql_dbg_verbose,
   1281					    vha, 0xe027,
   1282					    "%s: failed alloc dsd_dma...\n",
   1283					    __func__);
   1284					return 1;
   1285				}
   1286
   1287				/* allocate new list */
   1288				dsd_ptr->dsd_addr =
   1289				    dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
   1290					&dsd_ptr->dsd_list_dma);
   1291				if (!dsd_ptr->dsd_addr) {
   1292					/* need to cleanup only this dsd_ptr */
   1293					/* rest will be done by sp_free_dma() */
   1294					kfree(dsd_ptr);
   1295					return 1;
   1296				}
   1297
   1298				if (sp) {
   1299					list_add_tail(&dsd_ptr->list,
   1300					    &difctx->dsd_list);
   1301					sp->flags |= SRB_CRC_CTX_DSD_VALID;
   1302				} else {
   1303					list_add_tail(&dsd_ptr->list,
   1304					    &difctx->dsd_list);
   1305					tc->ctx_dsd_alloced = 1;
   1306				}
   1307
   1308				/* add new list to cmd iocb or last list */
   1309				put_unaligned_le64(dsd_ptr->dsd_list_dma,
   1310						   &cur_dsd->address);
   1311				cur_dsd->length = cpu_to_le32(dsd_list_len);
   1312				cur_dsd = dsd_ptr->dsd_addr;
   1313			}
   1314			append_dsd64(&cur_dsd, sg);
   1315			avail_dsds--;
   1316		}
   1317	}
   1318	/* Null termination */
   1319	cur_dsd->address = 0;
   1320	cur_dsd->length = 0;
   1321	cur_dsd++;
   1322	return 0;
   1323}
   1324
   1325/**
   1326 * qla24xx_build_scsi_crc_2_iocbs() - Build IOCB command utilizing Command
   1327 *							Type 6 IOCB types.
   1328 *
   1329 * @sp: SRB command to process
   1330 * @cmd_pkt: Command type 3 IOCB
   1331 * @tot_dsds: Total number of segments to transfer
   1332 * @tot_prot_dsds: Total number of segments with protection information
   1333 * @fw_prot_opts: Protection options to be passed to firmware
   1334 */
   1335static inline int
   1336qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
   1337    uint16_t tot_dsds, uint16_t tot_prot_dsds, uint16_t fw_prot_opts)
   1338{
   1339	struct dsd64		*cur_dsd;
   1340	__be32			*fcp_dl;
   1341	scsi_qla_host_t		*vha;
   1342	struct scsi_cmnd	*cmd;
   1343	uint32_t		total_bytes = 0;
   1344	uint32_t		data_bytes;
   1345	uint32_t		dif_bytes;
   1346	uint8_t			bundling = 1;
   1347	uint16_t		blk_size;
   1348	struct crc_context	*crc_ctx_pkt = NULL;
   1349	struct qla_hw_data	*ha;
   1350	uint8_t			additional_fcpcdb_len;
   1351	uint16_t		fcp_cmnd_len;
   1352	struct fcp_cmnd		*fcp_cmnd;
   1353	dma_addr_t		crc_ctx_dma;
   1354
   1355	cmd = GET_CMD_SP(sp);
   1356
   1357	/* Update entry type to indicate Command Type CRC_2 IOCB */
   1358	put_unaligned_le32(COMMAND_TYPE_CRC_2, &cmd_pkt->entry_type);
   1359
   1360	vha = sp->vha;
   1361	ha = vha->hw;
   1362
   1363	/* No data transfer */
   1364	data_bytes = scsi_bufflen(cmd);
   1365	if (!data_bytes || cmd->sc_data_direction == DMA_NONE) {
   1366		cmd_pkt->byte_count = cpu_to_le32(0);
   1367		return QLA_SUCCESS;
   1368	}
   1369
   1370	cmd_pkt->vp_index = sp->vha->vp_idx;
   1371
   1372	/* Set transfer direction */
   1373	if (cmd->sc_data_direction == DMA_TO_DEVICE) {
   1374		cmd_pkt->control_flags =
   1375		    cpu_to_le16(CF_WRITE_DATA);
   1376	} else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
   1377		cmd_pkt->control_flags =
   1378		    cpu_to_le16(CF_READ_DATA);
   1379	}
   1380
   1381	if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
   1382	    (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP) ||
   1383	    (scsi_get_prot_op(cmd) == SCSI_PROT_READ_STRIP) ||
   1384	    (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_INSERT))
   1385		bundling = 0;
   1386
   1387	/* Allocate CRC context from global pool */
   1388	crc_ctx_pkt = sp->u.scmd.crc_ctx =
   1389	    dma_pool_zalloc(ha->dl_dma_pool, GFP_ATOMIC, &crc_ctx_dma);
   1390
   1391	if (!crc_ctx_pkt)
   1392		goto crc_queuing_error;
   1393
   1394	crc_ctx_pkt->crc_ctx_dma = crc_ctx_dma;
   1395
   1396	sp->flags |= SRB_CRC_CTX_DMA_VALID;
   1397
   1398	/* Set handle */
   1399	crc_ctx_pkt->handle = cmd_pkt->handle;
   1400
   1401	INIT_LIST_HEAD(&crc_ctx_pkt->dsd_list);
   1402
   1403	qla24xx_set_t10dif_tags(sp, (struct fw_dif_context *)
   1404	    &crc_ctx_pkt->ref_tag, tot_prot_dsds);
   1405
   1406	put_unaligned_le64(crc_ctx_dma, &cmd_pkt->crc_context_address);
   1407	cmd_pkt->crc_context_len = cpu_to_le16(CRC_CONTEXT_LEN_FW);
   1408
   1409	/* Determine SCSI command length -- align to 4 byte boundary */
   1410	if (cmd->cmd_len > 16) {
   1411		additional_fcpcdb_len = cmd->cmd_len - 16;
   1412		if ((cmd->cmd_len % 4) != 0) {
   1413			/* SCSI cmd > 16 bytes must be multiple of 4 */
   1414			goto crc_queuing_error;
   1415		}
   1416		fcp_cmnd_len = 12 + cmd->cmd_len + 4;
   1417	} else {
   1418		additional_fcpcdb_len = 0;
   1419		fcp_cmnd_len = 12 + 16 + 4;
   1420	}
   1421
   1422	fcp_cmnd = &crc_ctx_pkt->fcp_cmnd;
   1423
   1424	fcp_cmnd->additional_cdb_len = additional_fcpcdb_len;
   1425	if (cmd->sc_data_direction == DMA_TO_DEVICE)
   1426		fcp_cmnd->additional_cdb_len |= 1;
   1427	else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
   1428		fcp_cmnd->additional_cdb_len |= 2;
   1429
   1430	int_to_scsilun(cmd->device->lun, &fcp_cmnd->lun);
   1431	memcpy(fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
   1432	cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(fcp_cmnd_len);
   1433	put_unaligned_le64(crc_ctx_dma + CRC_CONTEXT_FCPCMND_OFF,
   1434			   &cmd_pkt->fcp_cmnd_dseg_address);
   1435	fcp_cmnd->task_management = 0;
   1436	fcp_cmnd->task_attribute = TSK_SIMPLE;
   1437
   1438	cmd_pkt->fcp_rsp_dseg_len = 0; /* Let response come in status iocb */
   1439
   1440	/* Compute dif len and adjust data len to incude protection */
   1441	dif_bytes = 0;
   1442	blk_size = cmd->device->sector_size;
   1443	dif_bytes = (data_bytes / blk_size) * 8;
   1444
   1445	switch (scsi_get_prot_op(GET_CMD_SP(sp))) {
   1446	case SCSI_PROT_READ_INSERT:
   1447	case SCSI_PROT_WRITE_STRIP:
   1448		total_bytes = data_bytes;
   1449		data_bytes += dif_bytes;
   1450		break;
   1451
   1452	case SCSI_PROT_READ_STRIP:
   1453	case SCSI_PROT_WRITE_INSERT:
   1454	case SCSI_PROT_READ_PASS:
   1455	case SCSI_PROT_WRITE_PASS:
   1456		total_bytes = data_bytes + dif_bytes;
   1457		break;
   1458	default:
   1459		BUG();
   1460	}
   1461
   1462	if (!qla2x00_hba_err_chk_enabled(sp))
   1463		fw_prot_opts |= 0x10; /* Disable Guard tag checking */
   1464	/* HBA error checking enabled */
   1465	else if (IS_PI_UNINIT_CAPABLE(ha)) {
   1466		if ((scsi_get_prot_type(GET_CMD_SP(sp)) == SCSI_PROT_DIF_TYPE1)
   1467		    || (scsi_get_prot_type(GET_CMD_SP(sp)) ==
   1468			SCSI_PROT_DIF_TYPE2))
   1469			fw_prot_opts |= BIT_10;
   1470		else if (scsi_get_prot_type(GET_CMD_SP(sp)) ==
   1471		    SCSI_PROT_DIF_TYPE3)
   1472			fw_prot_opts |= BIT_11;
   1473	}
   1474
   1475	if (!bundling) {
   1476		cur_dsd = &crc_ctx_pkt->u.nobundling.data_dsd[0];
   1477	} else {
   1478		/*
   1479		 * Configure Bundling if we need to fetch interlaving
   1480		 * protection PCI accesses
   1481		 */
   1482		fw_prot_opts |= PO_ENABLE_DIF_BUNDLING;
   1483		crc_ctx_pkt->u.bundling.dif_byte_count = cpu_to_le32(dif_bytes);
   1484		crc_ctx_pkt->u.bundling.dseg_count = cpu_to_le16(tot_dsds -
   1485							tot_prot_dsds);
   1486		cur_dsd = &crc_ctx_pkt->u.bundling.data_dsd[0];
   1487	}
   1488
   1489	/* Finish the common fields of CRC pkt */
   1490	crc_ctx_pkt->blk_size = cpu_to_le16(blk_size);
   1491	crc_ctx_pkt->prot_opts = cpu_to_le16(fw_prot_opts);
   1492	crc_ctx_pkt->byte_count = cpu_to_le32(data_bytes);
   1493	crc_ctx_pkt->guard_seed = cpu_to_le16(0);
   1494	/* Fibre channel byte count */
   1495	cmd_pkt->byte_count = cpu_to_le32(total_bytes);
   1496	fcp_dl = (__be32 *)(crc_ctx_pkt->fcp_cmnd.cdb + 16 +
   1497	    additional_fcpcdb_len);
   1498	*fcp_dl = htonl(total_bytes);
   1499
   1500	if (!data_bytes || cmd->sc_data_direction == DMA_NONE) {
   1501		cmd_pkt->byte_count = cpu_to_le32(0);
   1502		return QLA_SUCCESS;
   1503	}
   1504	/* Walks data segments */
   1505
   1506	cmd_pkt->control_flags |= cpu_to_le16(CF_DATA_SEG_DESCR_ENABLE);
   1507
   1508	if (!bundling && tot_prot_dsds) {
   1509		if (qla24xx_walk_and_build_sglist_no_difb(ha, sp,
   1510			cur_dsd, tot_dsds, NULL))
   1511			goto crc_queuing_error;
   1512	} else if (qla24xx_walk_and_build_sglist(ha, sp, cur_dsd,
   1513			(tot_dsds - tot_prot_dsds), NULL))
   1514		goto crc_queuing_error;
   1515
   1516	if (bundling && tot_prot_dsds) {
   1517		/* Walks dif segments */
   1518		cmd_pkt->control_flags |= cpu_to_le16(CF_DIF_SEG_DESCR_ENABLE);
   1519		cur_dsd = &crc_ctx_pkt->u.bundling.dif_dsd;
   1520		if (qla24xx_walk_and_build_prot_sglist(ha, sp, cur_dsd,
   1521				tot_prot_dsds, NULL))
   1522			goto crc_queuing_error;
   1523	}
   1524	return QLA_SUCCESS;
   1525
   1526crc_queuing_error:
   1527	/* Cleanup will be performed by the caller */
   1528
   1529	return QLA_FUNCTION_FAILED;
   1530}
   1531
   1532/**
   1533 * qla24xx_start_scsi() - Send a SCSI command to the ISP
   1534 * @sp: command to send to the ISP
   1535 *
   1536 * Returns non-zero if a failure occurred, else zero.
   1537 */
   1538int
   1539qla24xx_start_scsi(srb_t *sp)
   1540{
   1541	int		nseg;
   1542	unsigned long   flags;
   1543	uint32_t	*clr_ptr;
   1544	uint32_t	handle;
   1545	struct cmd_type_7 *cmd_pkt;
   1546	uint16_t	cnt;
   1547	uint16_t	req_cnt;
   1548	uint16_t	tot_dsds;
   1549	struct req_que *req = NULL;
   1550	struct rsp_que *rsp;
   1551	struct scsi_cmnd *cmd = GET_CMD_SP(sp);
   1552	struct scsi_qla_host *vha = sp->vha;
   1553	struct qla_hw_data *ha = vha->hw;
   1554
   1555	if (sp->fcport->edif.enable  && (sp->fcport->flags & FCF_FCSP_DEVICE))
   1556		return qla28xx_start_scsi_edif(sp);
   1557
   1558	/* Setup device pointers. */
   1559	req = vha->req;
   1560	rsp = req->rsp;
   1561
   1562	/* So we know we haven't pci_map'ed anything yet */
   1563	tot_dsds = 0;
   1564
   1565	/* Send marker if required */
   1566	if (vha->marker_needed != 0) {
   1567		if (qla2x00_marker(vha, ha->base_qpair, 0, 0, MK_SYNC_ALL) !=
   1568		    QLA_SUCCESS)
   1569			return QLA_FUNCTION_FAILED;
   1570		vha->marker_needed = 0;
   1571	}
   1572
   1573	/* Acquire ring specific lock */
   1574	spin_lock_irqsave(&ha->hardware_lock, flags);
   1575
   1576	handle = qla2xxx_get_next_handle(req);
   1577	if (handle == 0)
   1578		goto queuing_error;
   1579
   1580	/* Map the sg table so we have an accurate count of sg entries needed */
   1581	if (scsi_sg_count(cmd)) {
   1582		nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
   1583		    scsi_sg_count(cmd), cmd->sc_data_direction);
   1584		if (unlikely(!nseg))
   1585			goto queuing_error;
   1586	} else
   1587		nseg = 0;
   1588
   1589	tot_dsds = nseg;
   1590	req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
   1591
   1592	sp->iores.res_type = RESOURCE_INI;
   1593	sp->iores.iocb_cnt = req_cnt;
   1594	if (qla_get_iocbs(sp->qpair, &sp->iores))
   1595		goto queuing_error;
   1596
   1597	if (req->cnt < (req_cnt + 2)) {
   1598		if (IS_SHADOW_REG_CAPABLE(ha)) {
   1599			cnt = *req->out_ptr;
   1600		} else {
   1601			cnt = rd_reg_dword_relaxed(req->req_q_out);
   1602			if (qla2x00_check_reg16_for_disconnect(vha, cnt))
   1603				goto queuing_error;
   1604		}
   1605
   1606		if (req->ring_index < cnt)
   1607			req->cnt = cnt - req->ring_index;
   1608		else
   1609			req->cnt = req->length -
   1610				(req->ring_index - cnt);
   1611		if (req->cnt < (req_cnt + 2))
   1612			goto queuing_error;
   1613	}
   1614
   1615	/* Build command packet. */
   1616	req->current_outstanding_cmd = handle;
   1617	req->outstanding_cmds[handle] = sp;
   1618	sp->handle = handle;
   1619	cmd->host_scribble = (unsigned char *)(unsigned long)handle;
   1620	req->cnt -= req_cnt;
   1621
   1622	cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
   1623	cmd_pkt->handle = make_handle(req->id, handle);
   1624
   1625	/* Zero out remaining portion of packet. */
   1626	/*    tagged queuing modifier -- default is TSK_SIMPLE (0). */
   1627	clr_ptr = (uint32_t *)cmd_pkt + 2;
   1628	memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
   1629	cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
   1630
   1631	/* Set NPORT-ID and LUN number*/
   1632	cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
   1633	cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
   1634	cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
   1635	cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
   1636	cmd_pkt->vp_index = sp->vha->vp_idx;
   1637
   1638	int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
   1639	host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
   1640
   1641	cmd_pkt->task = TSK_SIMPLE;
   1642
   1643	/* Load SCSI command packet. */
   1644	memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
   1645	host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
   1646
   1647	cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
   1648
   1649	/* Build IOCB segments */
   1650	qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds, req);
   1651
   1652	/* Set total data segment count. */
   1653	cmd_pkt->entry_count = (uint8_t)req_cnt;
   1654	wmb();
   1655	/* Adjust ring index. */
   1656	req->ring_index++;
   1657	if (req->ring_index == req->length) {
   1658		req->ring_index = 0;
   1659		req->ring_ptr = req->ring;
   1660	} else
   1661		req->ring_ptr++;
   1662
   1663	sp->qpair->cmd_cnt++;
   1664	sp->flags |= SRB_DMA_VALID;
   1665
   1666	/* Set chip new ring index. */
   1667	wrt_reg_dword(req->req_q_in, req->ring_index);
   1668
   1669	/* Manage unprocessed RIO/ZIO commands in response queue. */
   1670	if (vha->flags.process_response_queue &&
   1671	    rsp->ring_ptr->signature != RESPONSE_PROCESSED)
   1672		qla24xx_process_response_queue(vha, rsp);
   1673
   1674	spin_unlock_irqrestore(&ha->hardware_lock, flags);
   1675	return QLA_SUCCESS;
   1676
   1677queuing_error:
   1678	if (tot_dsds)
   1679		scsi_dma_unmap(cmd);
   1680
   1681	qla_put_iocbs(sp->qpair, &sp->iores);
   1682	spin_unlock_irqrestore(&ha->hardware_lock, flags);
   1683
   1684	return QLA_FUNCTION_FAILED;
   1685}
   1686
   1687/**
   1688 * qla24xx_dif_start_scsi() - Send a SCSI command to the ISP
   1689 * @sp: command to send to the ISP
   1690 *
   1691 * Returns non-zero if a failure occurred, else zero.
   1692 */
   1693int
   1694qla24xx_dif_start_scsi(srb_t *sp)
   1695{
   1696	int			nseg;
   1697	unsigned long		flags;
   1698	uint32_t		*clr_ptr;
   1699	uint32_t		handle;
   1700	uint16_t		cnt;
   1701	uint16_t		req_cnt = 0;
   1702	uint16_t		tot_dsds;
   1703	uint16_t		tot_prot_dsds;
   1704	uint16_t		fw_prot_opts = 0;
   1705	struct req_que		*req = NULL;
   1706	struct rsp_que		*rsp = NULL;
   1707	struct scsi_cmnd	*cmd = GET_CMD_SP(sp);
   1708	struct scsi_qla_host	*vha = sp->vha;
   1709	struct qla_hw_data	*ha = vha->hw;
   1710	struct cmd_type_crc_2	*cmd_pkt;
   1711	uint32_t		status = 0;
   1712
   1713#define QDSS_GOT_Q_SPACE	BIT_0
   1714
   1715	/* Only process protection or >16 cdb in this routine */
   1716	if (scsi_get_prot_op(cmd) == SCSI_PROT_NORMAL) {
   1717		if (cmd->cmd_len <= 16)
   1718			return qla24xx_start_scsi(sp);
   1719	}
   1720
   1721	/* Setup device pointers. */
   1722	req = vha->req;
   1723	rsp = req->rsp;
   1724
   1725	/* So we know we haven't pci_map'ed anything yet */
   1726	tot_dsds = 0;
   1727
   1728	/* Send marker if required */
   1729	if (vha->marker_needed != 0) {
   1730		if (qla2x00_marker(vha, ha->base_qpair, 0, 0, MK_SYNC_ALL) !=
   1731		    QLA_SUCCESS)
   1732			return QLA_FUNCTION_FAILED;
   1733		vha->marker_needed = 0;
   1734	}
   1735
   1736	/* Acquire ring specific lock */
   1737	spin_lock_irqsave(&ha->hardware_lock, flags);
   1738
   1739	handle = qla2xxx_get_next_handle(req);
   1740	if (handle == 0)
   1741		goto queuing_error;
   1742
   1743	/* Compute number of required data segments */
   1744	/* Map the sg table so we have an accurate count of sg entries needed */
   1745	if (scsi_sg_count(cmd)) {
   1746		nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
   1747		    scsi_sg_count(cmd), cmd->sc_data_direction);
   1748		if (unlikely(!nseg))
   1749			goto queuing_error;
   1750		else
   1751			sp->flags |= SRB_DMA_VALID;
   1752
   1753		if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
   1754		    (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
   1755			struct qla2_sgx sgx;
   1756			uint32_t	partial;
   1757
   1758			memset(&sgx, 0, sizeof(struct qla2_sgx));
   1759			sgx.tot_bytes = scsi_bufflen(cmd);
   1760			sgx.cur_sg = scsi_sglist(cmd);
   1761			sgx.sp = sp;
   1762
   1763			nseg = 0;
   1764			while (qla24xx_get_one_block_sg(
   1765			    cmd->device->sector_size, &sgx, &partial))
   1766				nseg++;
   1767		}
   1768	} else
   1769		nseg = 0;
   1770
   1771	/* number of required data segments */
   1772	tot_dsds = nseg;
   1773
   1774	/* Compute number of required protection segments */
   1775	if (qla24xx_configure_prot_mode(sp, &fw_prot_opts)) {
   1776		nseg = dma_map_sg(&ha->pdev->dev, scsi_prot_sglist(cmd),
   1777		    scsi_prot_sg_count(cmd), cmd->sc_data_direction);
   1778		if (unlikely(!nseg))
   1779			goto queuing_error;
   1780		else
   1781			sp->flags |= SRB_CRC_PROT_DMA_VALID;
   1782
   1783		if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
   1784		    (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
   1785			nseg = scsi_bufflen(cmd) / cmd->device->sector_size;
   1786		}
   1787	} else {
   1788		nseg = 0;
   1789	}
   1790
   1791	req_cnt = 1;
   1792	/* Total Data and protection sg segment(s) */
   1793	tot_prot_dsds = nseg;
   1794	tot_dsds += nseg;
   1795
   1796	sp->iores.res_type = RESOURCE_INI;
   1797	sp->iores.iocb_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
   1798	if (qla_get_iocbs(sp->qpair, &sp->iores))
   1799		goto queuing_error;
   1800
   1801	if (req->cnt < (req_cnt + 2)) {
   1802		if (IS_SHADOW_REG_CAPABLE(ha)) {
   1803			cnt = *req->out_ptr;
   1804		} else {
   1805			cnt = rd_reg_dword_relaxed(req->req_q_out);
   1806			if (qla2x00_check_reg16_for_disconnect(vha, cnt))
   1807				goto queuing_error;
   1808		}
   1809		if (req->ring_index < cnt)
   1810			req->cnt = cnt - req->ring_index;
   1811		else
   1812			req->cnt = req->length -
   1813				(req->ring_index - cnt);
   1814		if (req->cnt < (req_cnt + 2))
   1815			goto queuing_error;
   1816	}
   1817
   1818	status |= QDSS_GOT_Q_SPACE;
   1819
   1820	/* Build header part of command packet (excluding the OPCODE). */
   1821	req->current_outstanding_cmd = handle;
   1822	req->outstanding_cmds[handle] = sp;
   1823	sp->handle = handle;
   1824	cmd->host_scribble = (unsigned char *)(unsigned long)handle;
   1825	req->cnt -= req_cnt;
   1826
   1827	/* Fill-in common area */
   1828	cmd_pkt = (struct cmd_type_crc_2 *)req->ring_ptr;
   1829	cmd_pkt->handle = make_handle(req->id, handle);
   1830
   1831	clr_ptr = (uint32_t *)cmd_pkt + 2;
   1832	memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
   1833
   1834	/* Set NPORT-ID and LUN number*/
   1835	cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
   1836	cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
   1837	cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
   1838	cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
   1839
   1840	int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
   1841	host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
   1842
   1843	/* Total Data and protection segment(s) */
   1844	cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
   1845
   1846	/* Build IOCB segments and adjust for data protection segments */
   1847	if (qla24xx_build_scsi_crc_2_iocbs(sp, (struct cmd_type_crc_2 *)
   1848	    req->ring_ptr, tot_dsds, tot_prot_dsds, fw_prot_opts) !=
   1849		QLA_SUCCESS)
   1850		goto queuing_error;
   1851
   1852	cmd_pkt->entry_count = (uint8_t)req_cnt;
   1853	/* Specify response queue number where completion should happen */
   1854	cmd_pkt->entry_status = (uint8_t) rsp->id;
   1855	cmd_pkt->timeout = cpu_to_le16(0);
   1856	wmb();
   1857
   1858	/* Adjust ring index. */
   1859	req->ring_index++;
   1860	if (req->ring_index == req->length) {
   1861		req->ring_index = 0;
   1862		req->ring_ptr = req->ring;
   1863	} else
   1864		req->ring_ptr++;
   1865
   1866	sp->qpair->cmd_cnt++;
   1867	/* Set chip new ring index. */
   1868	wrt_reg_dword(req->req_q_in, req->ring_index);
   1869
   1870	/* Manage unprocessed RIO/ZIO commands in response queue. */
   1871	if (vha->flags.process_response_queue &&
   1872	    rsp->ring_ptr->signature != RESPONSE_PROCESSED)
   1873		qla24xx_process_response_queue(vha, rsp);
   1874
   1875	spin_unlock_irqrestore(&ha->hardware_lock, flags);
   1876
   1877	return QLA_SUCCESS;
   1878
   1879queuing_error:
   1880	if (status & QDSS_GOT_Q_SPACE) {
   1881		req->outstanding_cmds[handle] = NULL;
   1882		req->cnt += req_cnt;
   1883	}
   1884	/* Cleanup will be performed by the caller (queuecommand) */
   1885
   1886	qla_put_iocbs(sp->qpair, &sp->iores);
   1887	spin_unlock_irqrestore(&ha->hardware_lock, flags);
   1888
   1889	return QLA_FUNCTION_FAILED;
   1890}
   1891
   1892/**
   1893 * qla2xxx_start_scsi_mq() - Send a SCSI command to the ISP
   1894 * @sp: command to send to the ISP
   1895 *
   1896 * Returns non-zero if a failure occurred, else zero.
   1897 */
   1898static int
   1899qla2xxx_start_scsi_mq(srb_t *sp)
   1900{
   1901	int		nseg;
   1902	unsigned long   flags;
   1903	uint32_t	*clr_ptr;
   1904	uint32_t	handle;
   1905	struct cmd_type_7 *cmd_pkt;
   1906	uint16_t	cnt;
   1907	uint16_t	req_cnt;
   1908	uint16_t	tot_dsds;
   1909	struct req_que *req = NULL;
   1910	struct rsp_que *rsp;
   1911	struct scsi_cmnd *cmd = GET_CMD_SP(sp);
   1912	struct scsi_qla_host *vha = sp->fcport->vha;
   1913	struct qla_hw_data *ha = vha->hw;
   1914	struct qla_qpair *qpair = sp->qpair;
   1915
   1916	if (sp->fcport->edif.enable && (sp->fcport->flags & FCF_FCSP_DEVICE))
   1917		return qla28xx_start_scsi_edif(sp);
   1918
   1919	/* Acquire qpair specific lock */
   1920	spin_lock_irqsave(&qpair->qp_lock, flags);
   1921
   1922	/* Setup qpair pointers */
   1923	req = qpair->req;
   1924	rsp = qpair->rsp;
   1925
   1926	/* So we know we haven't pci_map'ed anything yet */
   1927	tot_dsds = 0;
   1928
   1929	/* Send marker if required */
   1930	if (vha->marker_needed != 0) {
   1931		if (__qla2x00_marker(vha, qpair, 0, 0, MK_SYNC_ALL) !=
   1932		    QLA_SUCCESS) {
   1933			spin_unlock_irqrestore(&qpair->qp_lock, flags);
   1934			return QLA_FUNCTION_FAILED;
   1935		}
   1936		vha->marker_needed = 0;
   1937	}
   1938
   1939	handle = qla2xxx_get_next_handle(req);
   1940	if (handle == 0)
   1941		goto queuing_error;
   1942
   1943	/* Map the sg table so we have an accurate count of sg entries needed */
   1944	if (scsi_sg_count(cmd)) {
   1945		nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
   1946		    scsi_sg_count(cmd), cmd->sc_data_direction);
   1947		if (unlikely(!nseg))
   1948			goto queuing_error;
   1949	} else
   1950		nseg = 0;
   1951
   1952	tot_dsds = nseg;
   1953	req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
   1954
   1955	sp->iores.res_type = RESOURCE_INI;
   1956	sp->iores.iocb_cnt = req_cnt;
   1957	if (qla_get_iocbs(sp->qpair, &sp->iores))
   1958		goto queuing_error;
   1959
   1960	if (req->cnt < (req_cnt + 2)) {
   1961		if (IS_SHADOW_REG_CAPABLE(ha)) {
   1962			cnt = *req->out_ptr;
   1963		} else {
   1964			cnt = rd_reg_dword_relaxed(req->req_q_out);
   1965			if (qla2x00_check_reg16_for_disconnect(vha, cnt))
   1966				goto queuing_error;
   1967		}
   1968
   1969		if (req->ring_index < cnt)
   1970			req->cnt = cnt - req->ring_index;
   1971		else
   1972			req->cnt = req->length -
   1973				(req->ring_index - cnt);
   1974		if (req->cnt < (req_cnt + 2))
   1975			goto queuing_error;
   1976	}
   1977
   1978	/* Build command packet. */
   1979	req->current_outstanding_cmd = handle;
   1980	req->outstanding_cmds[handle] = sp;
   1981	sp->handle = handle;
   1982	cmd->host_scribble = (unsigned char *)(unsigned long)handle;
   1983	req->cnt -= req_cnt;
   1984
   1985	cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
   1986	cmd_pkt->handle = make_handle(req->id, handle);
   1987
   1988	/* Zero out remaining portion of packet. */
   1989	/*    tagged queuing modifier -- default is TSK_SIMPLE (0). */
   1990	clr_ptr = (uint32_t *)cmd_pkt + 2;
   1991	memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
   1992	cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
   1993
   1994	/* Set NPORT-ID and LUN number*/
   1995	cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
   1996	cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
   1997	cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
   1998	cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
   1999	cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
   2000
   2001	int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
   2002	host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
   2003
   2004	cmd_pkt->task = TSK_SIMPLE;
   2005
   2006	/* Load SCSI command packet. */
   2007	memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
   2008	host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
   2009
   2010	cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
   2011
   2012	/* Build IOCB segments */
   2013	qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds, req);
   2014
   2015	/* Set total data segment count. */
   2016	cmd_pkt->entry_count = (uint8_t)req_cnt;
   2017	wmb();
   2018	/* Adjust ring index. */
   2019	req->ring_index++;
   2020	if (req->ring_index == req->length) {
   2021		req->ring_index = 0;
   2022		req->ring_ptr = req->ring;
   2023	} else
   2024		req->ring_ptr++;
   2025
   2026	sp->qpair->cmd_cnt++;
   2027	sp->flags |= SRB_DMA_VALID;
   2028
   2029	/* Set chip new ring index. */
   2030	wrt_reg_dword(req->req_q_in, req->ring_index);
   2031
   2032	/* Manage unprocessed RIO/ZIO commands in response queue. */
   2033	if (vha->flags.process_response_queue &&
   2034	    rsp->ring_ptr->signature != RESPONSE_PROCESSED)
   2035		qla24xx_process_response_queue(vha, rsp);
   2036
   2037	spin_unlock_irqrestore(&qpair->qp_lock, flags);
   2038	return QLA_SUCCESS;
   2039
   2040queuing_error:
   2041	if (tot_dsds)
   2042		scsi_dma_unmap(cmd);
   2043
   2044	qla_put_iocbs(sp->qpair, &sp->iores);
   2045	spin_unlock_irqrestore(&qpair->qp_lock, flags);
   2046
   2047	return QLA_FUNCTION_FAILED;
   2048}
   2049
   2050
   2051/**
   2052 * qla2xxx_dif_start_scsi_mq() - Send a SCSI command to the ISP
   2053 * @sp: command to send to the ISP
   2054 *
   2055 * Returns non-zero if a failure occurred, else zero.
   2056 */
   2057int
   2058qla2xxx_dif_start_scsi_mq(srb_t *sp)
   2059{
   2060	int			nseg;
   2061	unsigned long		flags;
   2062	uint32_t		*clr_ptr;
   2063	uint32_t		handle;
   2064	uint16_t		cnt;
   2065	uint16_t		req_cnt = 0;
   2066	uint16_t		tot_dsds;
   2067	uint16_t		tot_prot_dsds;
   2068	uint16_t		fw_prot_opts = 0;
   2069	struct req_que		*req = NULL;
   2070	struct rsp_que		*rsp = NULL;
   2071	struct scsi_cmnd	*cmd = GET_CMD_SP(sp);
   2072	struct scsi_qla_host	*vha = sp->fcport->vha;
   2073	struct qla_hw_data	*ha = vha->hw;
   2074	struct cmd_type_crc_2	*cmd_pkt;
   2075	uint32_t		status = 0;
   2076	struct qla_qpair	*qpair = sp->qpair;
   2077
   2078#define QDSS_GOT_Q_SPACE	BIT_0
   2079
   2080	/* Check for host side state */
   2081	if (!qpair->online) {
   2082		cmd->result = DID_NO_CONNECT << 16;
   2083		return QLA_INTERFACE_ERROR;
   2084	}
   2085
   2086	if (!qpair->difdix_supported &&
   2087		scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) {
   2088		cmd->result = DID_NO_CONNECT << 16;
   2089		return QLA_INTERFACE_ERROR;
   2090	}
   2091
   2092	/* Only process protection or >16 cdb in this routine */
   2093	if (scsi_get_prot_op(cmd) == SCSI_PROT_NORMAL) {
   2094		if (cmd->cmd_len <= 16)
   2095			return qla2xxx_start_scsi_mq(sp);
   2096	}
   2097
   2098	spin_lock_irqsave(&qpair->qp_lock, flags);
   2099
   2100	/* Setup qpair pointers */
   2101	rsp = qpair->rsp;
   2102	req = qpair->req;
   2103
   2104	/* So we know we haven't pci_map'ed anything yet */
   2105	tot_dsds = 0;
   2106
   2107	/* Send marker if required */
   2108	if (vha->marker_needed != 0) {
   2109		if (__qla2x00_marker(vha, qpair, 0, 0, MK_SYNC_ALL) !=
   2110		    QLA_SUCCESS) {
   2111			spin_unlock_irqrestore(&qpair->qp_lock, flags);
   2112			return QLA_FUNCTION_FAILED;
   2113		}
   2114		vha->marker_needed = 0;
   2115	}
   2116
   2117	handle = qla2xxx_get_next_handle(req);
   2118	if (handle == 0)
   2119		goto queuing_error;
   2120
   2121	/* Compute number of required data segments */
   2122	/* Map the sg table so we have an accurate count of sg entries needed */
   2123	if (scsi_sg_count(cmd)) {
   2124		nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
   2125		    scsi_sg_count(cmd), cmd->sc_data_direction);
   2126		if (unlikely(!nseg))
   2127			goto queuing_error;
   2128		else
   2129			sp->flags |= SRB_DMA_VALID;
   2130
   2131		if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
   2132		    (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
   2133			struct qla2_sgx sgx;
   2134			uint32_t	partial;
   2135
   2136			memset(&sgx, 0, sizeof(struct qla2_sgx));
   2137			sgx.tot_bytes = scsi_bufflen(cmd);
   2138			sgx.cur_sg = scsi_sglist(cmd);
   2139			sgx.sp = sp;
   2140
   2141			nseg = 0;
   2142			while (qla24xx_get_one_block_sg(
   2143			    cmd->device->sector_size, &sgx, &partial))
   2144				nseg++;
   2145		}
   2146	} else
   2147		nseg = 0;
   2148
   2149	/* number of required data segments */
   2150	tot_dsds = nseg;
   2151
   2152	/* Compute number of required protection segments */
   2153	if (qla24xx_configure_prot_mode(sp, &fw_prot_opts)) {
   2154		nseg = dma_map_sg(&ha->pdev->dev, scsi_prot_sglist(cmd),
   2155		    scsi_prot_sg_count(cmd), cmd->sc_data_direction);
   2156		if (unlikely(!nseg))
   2157			goto queuing_error;
   2158		else
   2159			sp->flags |= SRB_CRC_PROT_DMA_VALID;
   2160
   2161		if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
   2162		    (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
   2163			nseg = scsi_bufflen(cmd) / cmd->device->sector_size;
   2164		}
   2165	} else {
   2166		nseg = 0;
   2167	}
   2168
   2169	req_cnt = 1;
   2170	/* Total Data and protection sg segment(s) */
   2171	tot_prot_dsds = nseg;
   2172	tot_dsds += nseg;
   2173
   2174	sp->iores.res_type = RESOURCE_INI;
   2175	sp->iores.iocb_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
   2176	if (qla_get_iocbs(sp->qpair, &sp->iores))
   2177		goto queuing_error;
   2178
   2179	if (req->cnt < (req_cnt + 2)) {
   2180		if (IS_SHADOW_REG_CAPABLE(ha)) {
   2181			cnt = *req->out_ptr;
   2182		} else {
   2183			cnt = rd_reg_dword_relaxed(req->req_q_out);
   2184			if (qla2x00_check_reg16_for_disconnect(vha, cnt))
   2185				goto queuing_error;
   2186		}
   2187
   2188		if (req->ring_index < cnt)
   2189			req->cnt = cnt - req->ring_index;
   2190		else
   2191			req->cnt = req->length -
   2192				(req->ring_index - cnt);
   2193		if (req->cnt < (req_cnt + 2))
   2194			goto queuing_error;
   2195	}
   2196
   2197	status |= QDSS_GOT_Q_SPACE;
   2198
   2199	/* Build header part of command packet (excluding the OPCODE). */
   2200	req->current_outstanding_cmd = handle;
   2201	req->outstanding_cmds[handle] = sp;
   2202	sp->handle = handle;
   2203	cmd->host_scribble = (unsigned char *)(unsigned long)handle;
   2204	req->cnt -= req_cnt;
   2205
   2206	/* Fill-in common area */
   2207	cmd_pkt = (struct cmd_type_crc_2 *)req->ring_ptr;
   2208	cmd_pkt->handle = make_handle(req->id, handle);
   2209
   2210	clr_ptr = (uint32_t *)cmd_pkt + 2;
   2211	memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
   2212
   2213	/* Set NPORT-ID and LUN number*/
   2214	cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
   2215	cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
   2216	cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
   2217	cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
   2218
   2219	int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
   2220	host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
   2221
   2222	/* Total Data and protection segment(s) */
   2223	cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
   2224
   2225	/* Build IOCB segments and adjust for data protection segments */
   2226	if (qla24xx_build_scsi_crc_2_iocbs(sp, (struct cmd_type_crc_2 *)
   2227	    req->ring_ptr, tot_dsds, tot_prot_dsds, fw_prot_opts) !=
   2228		QLA_SUCCESS)
   2229		goto queuing_error;
   2230
   2231	cmd_pkt->entry_count = (uint8_t)req_cnt;
   2232	cmd_pkt->timeout = cpu_to_le16(0);
   2233	wmb();
   2234
   2235	/* Adjust ring index. */
   2236	req->ring_index++;
   2237	if (req->ring_index == req->length) {
   2238		req->ring_index = 0;
   2239		req->ring_ptr = req->ring;
   2240	} else
   2241		req->ring_ptr++;
   2242
   2243	sp->qpair->cmd_cnt++;
   2244	/* Set chip new ring index. */
   2245	wrt_reg_dword(req->req_q_in, req->ring_index);
   2246
   2247	/* Manage unprocessed RIO/ZIO commands in response queue. */
   2248	if (vha->flags.process_response_queue &&
   2249	    rsp->ring_ptr->signature != RESPONSE_PROCESSED)
   2250		qla24xx_process_response_queue(vha, rsp);
   2251
   2252	spin_unlock_irqrestore(&qpair->qp_lock, flags);
   2253
   2254	return QLA_SUCCESS;
   2255
   2256queuing_error:
   2257	if (status & QDSS_GOT_Q_SPACE) {
   2258		req->outstanding_cmds[handle] = NULL;
   2259		req->cnt += req_cnt;
   2260	}
   2261	/* Cleanup will be performed by the caller (queuecommand) */
   2262
   2263	qla_put_iocbs(sp->qpair, &sp->iores);
   2264	spin_unlock_irqrestore(&qpair->qp_lock, flags);
   2265
   2266	return QLA_FUNCTION_FAILED;
   2267}
   2268
   2269/* Generic Control-SRB manipulation functions. */
   2270
   2271/* hardware_lock assumed to be held. */
   2272
   2273void *
   2274__qla2x00_alloc_iocbs(struct qla_qpair *qpair, srb_t *sp)
   2275{
   2276	scsi_qla_host_t *vha = qpair->vha;
   2277	struct qla_hw_data *ha = vha->hw;
   2278	struct req_que *req = qpair->req;
   2279	device_reg_t *reg = ISP_QUE_REG(ha, req->id);
   2280	uint32_t handle;
   2281	request_t *pkt;
   2282	uint16_t cnt, req_cnt;
   2283
   2284	pkt = NULL;
   2285	req_cnt = 1;
   2286	handle = 0;
   2287
   2288	if (sp && (sp->type != SRB_SCSI_CMD)) {
   2289		/* Adjust entry-counts as needed. */
   2290		req_cnt = sp->iocbs;
   2291	}
   2292
   2293	/* Check for room on request queue. */
   2294	if (req->cnt < req_cnt + 2) {
   2295		if (qpair->use_shadow_reg)
   2296			cnt = *req->out_ptr;
   2297		else if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha) ||
   2298		    IS_QLA28XX(ha))
   2299			cnt = rd_reg_dword(&reg->isp25mq.req_q_out);
   2300		else if (IS_P3P_TYPE(ha))
   2301			cnt = rd_reg_dword(reg->isp82.req_q_out);
   2302		else if (IS_FWI2_CAPABLE(ha))
   2303			cnt = rd_reg_dword(&reg->isp24.req_q_out);
   2304		else if (IS_QLAFX00(ha))
   2305			cnt = rd_reg_dword(&reg->ispfx00.req_q_out);
   2306		else
   2307			cnt = qla2x00_debounce_register(
   2308			    ISP_REQ_Q_OUT(ha, &reg->isp));
   2309
   2310		if (!qpair->use_shadow_reg && cnt == ISP_REG16_DISCONNECT) {
   2311			qla_schedule_eeh_work(vha);
   2312			return NULL;
   2313		}
   2314
   2315		if  (req->ring_index < cnt)
   2316			req->cnt = cnt - req->ring_index;
   2317		else
   2318			req->cnt = req->length -
   2319			    (req->ring_index - cnt);
   2320	}
   2321	if (req->cnt < req_cnt + 2)
   2322		goto queuing_error;
   2323
   2324	if (sp) {
   2325		handle = qla2xxx_get_next_handle(req);
   2326		if (handle == 0) {
   2327			ql_log(ql_log_warn, vha, 0x700b,
   2328			    "No room on outstanding cmd array.\n");
   2329			goto queuing_error;
   2330		}
   2331
   2332		/* Prep command array. */
   2333		req->current_outstanding_cmd = handle;
   2334		req->outstanding_cmds[handle] = sp;
   2335		sp->handle = handle;
   2336	}
   2337
   2338	/* Prep packet */
   2339	req->cnt -= req_cnt;
   2340	pkt = req->ring_ptr;
   2341	memset(pkt, 0, REQUEST_ENTRY_SIZE);
   2342	if (IS_QLAFX00(ha)) {
   2343		wrt_reg_byte((u8 __force __iomem *)&pkt->entry_count, req_cnt);
   2344		wrt_reg_dword((__le32 __force __iomem *)&pkt->handle, handle);
   2345	} else {
   2346		pkt->entry_count = req_cnt;
   2347		pkt->handle = handle;
   2348	}
   2349
   2350	return pkt;
   2351
   2352queuing_error:
   2353	qpair->tgt_counters.num_alloc_iocb_failed++;
   2354	return pkt;
   2355}
   2356
   2357void *
   2358qla2x00_alloc_iocbs_ready(struct qla_qpair *qpair, srb_t *sp)
   2359{
   2360	scsi_qla_host_t *vha = qpair->vha;
   2361
   2362	if (qla2x00_reset_active(vha))
   2363		return NULL;
   2364
   2365	return __qla2x00_alloc_iocbs(qpair, sp);
   2366}
   2367
   2368void *
   2369qla2x00_alloc_iocbs(struct scsi_qla_host *vha, srb_t *sp)
   2370{
   2371	return __qla2x00_alloc_iocbs(vha->hw->base_qpair, sp);
   2372}
   2373
   2374static void
   2375qla24xx_prli_iocb(srb_t *sp, struct logio_entry_24xx *logio)
   2376{
   2377	struct srb_iocb *lio = &sp->u.iocb_cmd;
   2378
   2379	logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
   2380	logio->control_flags = cpu_to_le16(LCF_COMMAND_PRLI);
   2381	if (lio->u.logio.flags & SRB_LOGIN_NVME_PRLI) {
   2382		logio->control_flags |= cpu_to_le16(LCF_NVME_PRLI);
   2383		if (sp->vha->flags.nvme_first_burst)
   2384			logio->io_parameter[0] =
   2385				cpu_to_le32(NVME_PRLI_SP_FIRST_BURST);
   2386		if (sp->vha->flags.nvme2_enabled) {
   2387			/* Set service parameter BIT_7 for NVME CONF support */
   2388			logio->io_parameter[0] |=
   2389				cpu_to_le32(NVME_PRLI_SP_CONF);
   2390			/* Set service parameter BIT_8 for SLER support */
   2391			logio->io_parameter[0] |=
   2392				cpu_to_le32(NVME_PRLI_SP_SLER);
   2393			/* Set service parameter BIT_9 for PI control support */
   2394			logio->io_parameter[0] |=
   2395				cpu_to_le32(NVME_PRLI_SP_PI_CTRL);
   2396		}
   2397	}
   2398
   2399	logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
   2400	logio->port_id[0] = sp->fcport->d_id.b.al_pa;
   2401	logio->port_id[1] = sp->fcport->d_id.b.area;
   2402	logio->port_id[2] = sp->fcport->d_id.b.domain;
   2403	logio->vp_index = sp->vha->vp_idx;
   2404}
   2405
   2406static void
   2407qla24xx_login_iocb(srb_t *sp, struct logio_entry_24xx *logio)
   2408{
   2409	struct srb_iocb *lio = &sp->u.iocb_cmd;
   2410
   2411	logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
   2412	logio->control_flags = cpu_to_le16(LCF_COMMAND_PLOGI);
   2413
   2414	if (lio->u.logio.flags & SRB_LOGIN_PRLI_ONLY) {
   2415		logio->control_flags = cpu_to_le16(LCF_COMMAND_PRLI);
   2416	} else {
   2417		logio->control_flags = cpu_to_le16(LCF_COMMAND_PLOGI);
   2418		if (lio->u.logio.flags & SRB_LOGIN_COND_PLOGI)
   2419			logio->control_flags |= cpu_to_le16(LCF_COND_PLOGI);
   2420		if (lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI)
   2421			logio->control_flags |= cpu_to_le16(LCF_SKIP_PRLI);
   2422		if (lio->u.logio.flags & SRB_LOGIN_FCSP) {
   2423			logio->control_flags |=
   2424			    cpu_to_le16(LCF_COMMON_FEAT | LCF_SKIP_PRLI);
   2425			logio->io_parameter[0] =
   2426			    cpu_to_le32(LIO_COMM_FEAT_FCSP | LIO_COMM_FEAT_CIO);
   2427		}
   2428	}
   2429	logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
   2430	logio->port_id[0] = sp->fcport->d_id.b.al_pa;
   2431	logio->port_id[1] = sp->fcport->d_id.b.area;
   2432	logio->port_id[2] = sp->fcport->d_id.b.domain;
   2433	logio->vp_index = sp->vha->vp_idx;
   2434}
   2435
   2436static void
   2437qla2x00_login_iocb(srb_t *sp, struct mbx_entry *mbx)
   2438{
   2439	struct qla_hw_data *ha = sp->vha->hw;
   2440	struct srb_iocb *lio = &sp->u.iocb_cmd;
   2441	uint16_t opts;
   2442
   2443	mbx->entry_type = MBX_IOCB_TYPE;
   2444	SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
   2445	mbx->mb0 = cpu_to_le16(MBC_LOGIN_FABRIC_PORT);
   2446	opts = lio->u.logio.flags & SRB_LOGIN_COND_PLOGI ? BIT_0 : 0;
   2447	opts |= lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI ? BIT_1 : 0;
   2448	if (HAS_EXTENDED_IDS(ha)) {
   2449		mbx->mb1 = cpu_to_le16(sp->fcport->loop_id);
   2450		mbx->mb10 = cpu_to_le16(opts);
   2451	} else {
   2452		mbx->mb1 = cpu_to_le16((sp->fcport->loop_id << 8) | opts);
   2453	}
   2454	mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain);
   2455	mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 |
   2456	    sp->fcport->d_id.b.al_pa);
   2457	mbx->mb9 = cpu_to_le16(sp->vha->vp_idx);
   2458}
   2459
   2460static void
   2461qla24xx_logout_iocb(srb_t *sp, struct logio_entry_24xx *logio)
   2462{
   2463	u16 control_flags = LCF_COMMAND_LOGO;
   2464	logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
   2465
   2466	if (sp->fcport->explicit_logout) {
   2467		control_flags |= LCF_EXPL_LOGO|LCF_FREE_NPORT;
   2468	} else {
   2469		control_flags |= LCF_IMPL_LOGO;
   2470
   2471		if (!sp->fcport->keep_nport_handle)
   2472			control_flags |= LCF_FREE_NPORT;
   2473	}
   2474
   2475	logio->control_flags = cpu_to_le16(control_flags);
   2476	logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
   2477	logio->port_id[0] = sp->fcport->d_id.b.al_pa;
   2478	logio->port_id[1] = sp->fcport->d_id.b.area;
   2479	logio->port_id[2] = sp->fcport->d_id.b.domain;
   2480	logio->vp_index = sp->vha->vp_idx;
   2481}
   2482
   2483static void
   2484qla2x00_logout_iocb(srb_t *sp, struct mbx_entry *mbx)
   2485{
   2486	struct qla_hw_data *ha = sp->vha->hw;
   2487
   2488	mbx->entry_type = MBX_IOCB_TYPE;
   2489	SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
   2490	mbx->mb0 = cpu_to_le16(MBC_LOGOUT_FABRIC_PORT);
   2491	mbx->mb1 = HAS_EXTENDED_IDS(ha) ?
   2492	    cpu_to_le16(sp->fcport->loop_id) :
   2493	    cpu_to_le16(sp->fcport->loop_id << 8);
   2494	mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain);
   2495	mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 |
   2496	    sp->fcport->d_id.b.al_pa);
   2497	mbx->mb9 = cpu_to_le16(sp->vha->vp_idx);
   2498	/* Implicit: mbx->mbx10 = 0. */
   2499}
   2500
   2501static void
   2502qla24xx_adisc_iocb(srb_t *sp, struct logio_entry_24xx *logio)
   2503{
   2504	logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
   2505	logio->control_flags = cpu_to_le16(LCF_COMMAND_ADISC);
   2506	logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
   2507	logio->vp_index = sp->vha->vp_idx;
   2508}
   2509
   2510static void
   2511qla2x00_adisc_iocb(srb_t *sp, struct mbx_entry *mbx)
   2512{
   2513	struct qla_hw_data *ha = sp->vha->hw;
   2514
   2515	mbx->entry_type = MBX_IOCB_TYPE;
   2516	SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
   2517	mbx->mb0 = cpu_to_le16(MBC_GET_PORT_DATABASE);
   2518	if (HAS_EXTENDED_IDS(ha)) {
   2519		mbx->mb1 = cpu_to_le16(sp->fcport->loop_id);
   2520		mbx->mb10 = cpu_to_le16(BIT_0);
   2521	} else {
   2522		mbx->mb1 = cpu_to_le16((sp->fcport->loop_id << 8) | BIT_0);
   2523	}
   2524	mbx->mb2 = cpu_to_le16(MSW(ha->async_pd_dma));
   2525	mbx->mb3 = cpu_to_le16(LSW(ha->async_pd_dma));
   2526	mbx->mb6 = cpu_to_le16(MSW(MSD(ha->async_pd_dma)));
   2527	mbx->mb7 = cpu_to_le16(LSW(MSD(ha->async_pd_dma)));
   2528	mbx->mb9 = cpu_to_le16(sp->vha->vp_idx);
   2529}
   2530
   2531static void
   2532qla24xx_tm_iocb(srb_t *sp, struct tsk_mgmt_entry *tsk)
   2533{
   2534	uint32_t flags;
   2535	uint64_t lun;
   2536	struct fc_port *fcport = sp->fcport;
   2537	scsi_qla_host_t *vha = fcport->vha;
   2538	struct qla_hw_data *ha = vha->hw;
   2539	struct srb_iocb *iocb = &sp->u.iocb_cmd;
   2540	struct req_que *req = vha->req;
   2541
   2542	flags = iocb->u.tmf.flags;
   2543	lun = iocb->u.tmf.lun;
   2544
   2545	tsk->entry_type = TSK_MGMT_IOCB_TYPE;
   2546	tsk->entry_count = 1;
   2547	tsk->handle = make_handle(req->id, tsk->handle);
   2548	tsk->nport_handle = cpu_to_le16(fcport->loop_id);
   2549	tsk->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
   2550	tsk->control_flags = cpu_to_le32(flags);
   2551	tsk->port_id[0] = fcport->d_id.b.al_pa;
   2552	tsk->port_id[1] = fcport->d_id.b.area;
   2553	tsk->port_id[2] = fcport->d_id.b.domain;
   2554	tsk->vp_index = fcport->vha->vp_idx;
   2555
   2556	if (flags == TCF_LUN_RESET) {
   2557		int_to_scsilun(lun, &tsk->lun);
   2558		host_to_fcp_swap((uint8_t *)&tsk->lun,
   2559			sizeof(tsk->lun));
   2560	}
   2561}
   2562
   2563static void
   2564qla2x00_async_done(struct srb *sp, int res)
   2565{
   2566	if (del_timer(&sp->u.iocb_cmd.timer)) {
   2567		/*
   2568		 * Successfully cancelled the timeout handler
   2569		 * ref: TMR
   2570		 */
   2571		if (kref_put(&sp->cmd_kref, qla2x00_sp_release))
   2572			return;
   2573	}
   2574	sp->async_done(sp, res);
   2575}
   2576
   2577void
   2578qla2x00_sp_release(struct kref *kref)
   2579{
   2580	struct srb *sp = container_of(kref, struct srb, cmd_kref);
   2581
   2582	sp->free(sp);
   2583}
   2584
   2585void
   2586qla2x00_init_async_sp(srb_t *sp, unsigned long tmo,
   2587		     void (*done)(struct srb *sp, int res))
   2588{
   2589	timer_setup(&sp->u.iocb_cmd.timer, qla2x00_sp_timeout, 0);
   2590	sp->done = qla2x00_async_done;
   2591	sp->async_done = done;
   2592	sp->free = qla2x00_sp_free;
   2593	sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
   2594	sp->u.iocb_cmd.timer.expires = jiffies + tmo * HZ;
   2595	if (IS_QLAFX00(sp->vha->hw) && sp->type == SRB_FXIOCB_DCMD)
   2596		init_completion(&sp->u.iocb_cmd.u.fxiocb.fxiocb_comp);
   2597	sp->start_timer = 1;
   2598}
   2599
   2600static void qla2x00_els_dcmd_sp_free(srb_t *sp)
   2601{
   2602	struct srb_iocb *elsio = &sp->u.iocb_cmd;
   2603
   2604	kfree(sp->fcport);
   2605
   2606	if (elsio->u.els_logo.els_logo_pyld)
   2607		dma_free_coherent(&sp->vha->hw->pdev->dev, DMA_POOL_SIZE,
   2608		    elsio->u.els_logo.els_logo_pyld,
   2609		    elsio->u.els_logo.els_logo_pyld_dma);
   2610
   2611	del_timer(&elsio->timer);
   2612	qla2x00_rel_sp(sp);
   2613}
   2614
   2615static void
   2616qla2x00_els_dcmd_iocb_timeout(void *data)
   2617{
   2618	srb_t *sp = data;
   2619	fc_port_t *fcport = sp->fcport;
   2620	struct scsi_qla_host *vha = sp->vha;
   2621	struct srb_iocb *lio = &sp->u.iocb_cmd;
   2622	unsigned long flags = 0;
   2623	int res, h;
   2624
   2625	ql_dbg(ql_dbg_io, vha, 0x3069,
   2626	    "%s Timeout, hdl=%x, portid=%02x%02x%02x\n",
   2627	    sp->name, sp->handle, fcport->d_id.b.domain, fcport->d_id.b.area,
   2628	    fcport->d_id.b.al_pa);
   2629
   2630	/* Abort the exchange */
   2631	res = qla24xx_async_abort_cmd(sp, false);
   2632	if (res) {
   2633		ql_dbg(ql_dbg_io, vha, 0x3070,
   2634		    "mbx abort_command failed.\n");
   2635		spin_lock_irqsave(sp->qpair->qp_lock_ptr, flags);
   2636		for (h = 1; h < sp->qpair->req->num_outstanding_cmds; h++) {
   2637			if (sp->qpair->req->outstanding_cmds[h] == sp) {
   2638				sp->qpair->req->outstanding_cmds[h] = NULL;
   2639				break;
   2640			}
   2641		}
   2642		spin_unlock_irqrestore(sp->qpair->qp_lock_ptr, flags);
   2643		complete(&lio->u.els_logo.comp);
   2644	} else {
   2645		ql_dbg(ql_dbg_io, vha, 0x3071,
   2646		    "mbx abort_command success.\n");
   2647	}
   2648}
   2649
   2650static void qla2x00_els_dcmd_sp_done(srb_t *sp, int res)
   2651{
   2652	fc_port_t *fcport = sp->fcport;
   2653	struct srb_iocb *lio = &sp->u.iocb_cmd;
   2654	struct scsi_qla_host *vha = sp->vha;
   2655
   2656	ql_dbg(ql_dbg_io, vha, 0x3072,
   2657	    "%s hdl=%x, portid=%02x%02x%02x done\n",
   2658	    sp->name, sp->handle, fcport->d_id.b.domain,
   2659	    fcport->d_id.b.area, fcport->d_id.b.al_pa);
   2660
   2661	complete(&lio->u.els_logo.comp);
   2662}
   2663
   2664int
   2665qla24xx_els_dcmd_iocb(scsi_qla_host_t *vha, int els_opcode,
   2666    port_id_t remote_did)
   2667{
   2668	srb_t *sp;
   2669	fc_port_t *fcport = NULL;
   2670	struct srb_iocb *elsio = NULL;
   2671	struct qla_hw_data *ha = vha->hw;
   2672	struct els_logo_payload logo_pyld;
   2673	int rval = QLA_SUCCESS;
   2674
   2675	fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
   2676	if (!fcport) {
   2677	       ql_log(ql_log_info, vha, 0x70e5, "fcport allocation failed\n");
   2678	       return -ENOMEM;
   2679	}
   2680
   2681	/* Alloc SRB structure
   2682	 * ref: INIT
   2683	 */
   2684	sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
   2685	if (!sp) {
   2686		kfree(fcport);
   2687		ql_log(ql_log_info, vha, 0x70e6,
   2688		 "SRB allocation failed\n");
   2689		return -ENOMEM;
   2690	}
   2691
   2692	elsio = &sp->u.iocb_cmd;
   2693	fcport->loop_id = 0xFFFF;
   2694	fcport->d_id.b.domain = remote_did.b.domain;
   2695	fcport->d_id.b.area = remote_did.b.area;
   2696	fcport->d_id.b.al_pa = remote_did.b.al_pa;
   2697
   2698	ql_dbg(ql_dbg_io, vha, 0x3073, "portid=%02x%02x%02x done\n",
   2699	    fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa);
   2700
   2701	sp->type = SRB_ELS_DCMD;
   2702	sp->name = "ELS_DCMD";
   2703	sp->fcport = fcport;
   2704	qla2x00_init_async_sp(sp, ELS_DCMD_TIMEOUT,
   2705			      qla2x00_els_dcmd_sp_done);
   2706	sp->free = qla2x00_els_dcmd_sp_free;
   2707	sp->u.iocb_cmd.timeout = qla2x00_els_dcmd_iocb_timeout;
   2708	init_completion(&sp->u.iocb_cmd.u.els_logo.comp);
   2709
   2710	elsio->u.els_logo.els_logo_pyld = dma_alloc_coherent(&ha->pdev->dev,
   2711			    DMA_POOL_SIZE, &elsio->u.els_logo.els_logo_pyld_dma,
   2712			    GFP_KERNEL);
   2713
   2714	if (!elsio->u.els_logo.els_logo_pyld) {
   2715		/* ref: INIT */
   2716		kref_put(&sp->cmd_kref, qla2x00_sp_release);
   2717		return QLA_FUNCTION_FAILED;
   2718	}
   2719
   2720	memset(&logo_pyld, 0, sizeof(struct els_logo_payload));
   2721
   2722	elsio->u.els_logo.els_cmd = els_opcode;
   2723	logo_pyld.opcode = els_opcode;
   2724	logo_pyld.s_id[0] = vha->d_id.b.al_pa;
   2725	logo_pyld.s_id[1] = vha->d_id.b.area;
   2726	logo_pyld.s_id[2] = vha->d_id.b.domain;
   2727	host_to_fcp_swap(logo_pyld.s_id, sizeof(uint32_t));
   2728	memcpy(&logo_pyld.wwpn, vha->port_name, WWN_SIZE);
   2729
   2730	memcpy(elsio->u.els_logo.els_logo_pyld, &logo_pyld,
   2731	    sizeof(struct els_logo_payload));
   2732	ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x3075, "LOGO buffer:");
   2733	ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x010a,
   2734		       elsio->u.els_logo.els_logo_pyld,
   2735		       sizeof(*elsio->u.els_logo.els_logo_pyld));
   2736
   2737	rval = qla2x00_start_sp(sp);
   2738	if (rval != QLA_SUCCESS) {
   2739		/* ref: INIT */
   2740		kref_put(&sp->cmd_kref, qla2x00_sp_release);
   2741		return QLA_FUNCTION_FAILED;
   2742	}
   2743
   2744	ql_dbg(ql_dbg_io, vha, 0x3074,
   2745	    "%s LOGO sent, hdl=%x, loopid=%x, portid=%02x%02x%02x.\n",
   2746	    sp->name, sp->handle, fcport->loop_id, fcport->d_id.b.domain,
   2747	    fcport->d_id.b.area, fcport->d_id.b.al_pa);
   2748
   2749	wait_for_completion(&elsio->u.els_logo.comp);
   2750
   2751	/* ref: INIT */
   2752	kref_put(&sp->cmd_kref, qla2x00_sp_release);
   2753	return rval;
   2754}
   2755
   2756static void
   2757qla24xx_els_logo_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
   2758{
   2759	scsi_qla_host_t *vha = sp->vha;
   2760	struct srb_iocb *elsio = &sp->u.iocb_cmd;
   2761
   2762	els_iocb->entry_type = ELS_IOCB_TYPE;
   2763	els_iocb->entry_count = 1;
   2764	els_iocb->sys_define = 0;
   2765	els_iocb->entry_status = 0;
   2766	els_iocb->handle = sp->handle;
   2767	els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
   2768	els_iocb->tx_dsd_count = cpu_to_le16(1);
   2769	els_iocb->vp_index = vha->vp_idx;
   2770	els_iocb->sof_type = EST_SOFI3;
   2771	els_iocb->rx_dsd_count = 0;
   2772	els_iocb->opcode = elsio->u.els_logo.els_cmd;
   2773
   2774	els_iocb->d_id[0] = sp->fcport->d_id.b.al_pa;
   2775	els_iocb->d_id[1] = sp->fcport->d_id.b.area;
   2776	els_iocb->d_id[2] = sp->fcport->d_id.b.domain;
   2777	/* For SID the byte order is different than DID */
   2778	els_iocb->s_id[1] = vha->d_id.b.al_pa;
   2779	els_iocb->s_id[2] = vha->d_id.b.area;
   2780	els_iocb->s_id[0] = vha->d_id.b.domain;
   2781
   2782	if (elsio->u.els_logo.els_cmd == ELS_DCMD_PLOGI) {
   2783		if (vha->hw->flags.edif_enabled)
   2784			els_iocb->control_flags = cpu_to_le16(ECF_SEC_LOGIN);
   2785		else
   2786			els_iocb->control_flags = 0;
   2787		els_iocb->tx_byte_count = els_iocb->tx_len =
   2788			cpu_to_le32(sizeof(struct els_plogi_payload));
   2789		put_unaligned_le64(elsio->u.els_plogi.els_plogi_pyld_dma,
   2790				   &els_iocb->tx_address);
   2791		els_iocb->rx_dsd_count = cpu_to_le16(1);
   2792		els_iocb->rx_byte_count = els_iocb->rx_len =
   2793			cpu_to_le32(sizeof(struct els_plogi_payload));
   2794		put_unaligned_le64(elsio->u.els_plogi.els_resp_pyld_dma,
   2795				   &els_iocb->rx_address);
   2796
   2797		ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x3073,
   2798		    "PLOGI ELS IOCB:\n");
   2799		ql_dump_buffer(ql_log_info, vha, 0x0109,
   2800		    (uint8_t *)els_iocb,
   2801		    sizeof(*els_iocb));
   2802	} else {
   2803		els_iocb->tx_byte_count =
   2804			cpu_to_le32(sizeof(struct els_logo_payload));
   2805		put_unaligned_le64(elsio->u.els_logo.els_logo_pyld_dma,
   2806				   &els_iocb->tx_address);
   2807		els_iocb->tx_len = cpu_to_le32(sizeof(struct els_logo_payload));
   2808
   2809		els_iocb->rx_byte_count = 0;
   2810		els_iocb->rx_address = 0;
   2811		els_iocb->rx_len = 0;
   2812		ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x3076,
   2813		       "LOGO ELS IOCB:");
   2814		ql_dump_buffer(ql_log_info, vha, 0x010b,
   2815			       els_iocb,
   2816			       sizeof(*els_iocb));
   2817	}
   2818
   2819	sp->vha->qla_stats.control_requests++;
   2820}
   2821
   2822static void
   2823qla2x00_els_dcmd2_iocb_timeout(void *data)
   2824{
   2825	srb_t *sp = data;
   2826	fc_port_t *fcport = sp->fcport;
   2827	struct scsi_qla_host *vha = sp->vha;
   2828	unsigned long flags = 0;
   2829	int res, h;
   2830
   2831	ql_dbg(ql_dbg_io + ql_dbg_disc, vha, 0x3069,
   2832	    "%s hdl=%x ELS Timeout, %8phC portid=%06x\n",
   2833	    sp->name, sp->handle, fcport->port_name, fcport->d_id.b24);
   2834
   2835	/* Abort the exchange */
   2836	res = qla24xx_async_abort_cmd(sp, false);
   2837	ql_dbg(ql_dbg_io, vha, 0x3070,
   2838	    "mbx abort_command %s\n",
   2839	    (res == QLA_SUCCESS) ? "successful" : "failed");
   2840	if (res) {
   2841		spin_lock_irqsave(sp->qpair->qp_lock_ptr, flags);
   2842		for (h = 1; h < sp->qpair->req->num_outstanding_cmds; h++) {
   2843			if (sp->qpair->req->outstanding_cmds[h] == sp) {
   2844				sp->qpair->req->outstanding_cmds[h] = NULL;
   2845				break;
   2846			}
   2847		}
   2848		spin_unlock_irqrestore(sp->qpair->qp_lock_ptr, flags);
   2849		sp->done(sp, QLA_FUNCTION_TIMEOUT);
   2850	}
   2851}
   2852
   2853void qla2x00_els_dcmd2_free(scsi_qla_host_t *vha, struct els_plogi *els_plogi)
   2854{
   2855	if (els_plogi->els_plogi_pyld)
   2856		dma_free_coherent(&vha->hw->pdev->dev,
   2857				  els_plogi->tx_size,
   2858				  els_plogi->els_plogi_pyld,
   2859				  els_plogi->els_plogi_pyld_dma);
   2860
   2861	if (els_plogi->els_resp_pyld)
   2862		dma_free_coherent(&vha->hw->pdev->dev,
   2863				  els_plogi->rx_size,
   2864				  els_plogi->els_resp_pyld,
   2865				  els_plogi->els_resp_pyld_dma);
   2866}
   2867
   2868static void qla2x00_els_dcmd2_sp_done(srb_t *sp, int res)
   2869{
   2870	fc_port_t *fcport = sp->fcport;
   2871	struct srb_iocb *lio = &sp->u.iocb_cmd;
   2872	struct scsi_qla_host *vha = sp->vha;
   2873	struct event_arg ea;
   2874	struct qla_work_evt *e;
   2875	struct fc_port *conflict_fcport;
   2876	port_id_t cid;	/* conflict Nport id */
   2877	const __le32 *fw_status = sp->u.iocb_cmd.u.els_plogi.fw_status;
   2878	u16 lid;
   2879
   2880	ql_dbg(ql_dbg_disc, vha, 0x3072,
   2881	    "%s ELS done rc %d hdl=%x, portid=%06x %8phC\n",
   2882	    sp->name, res, sp->handle, fcport->d_id.b24, fcport->port_name);
   2883
   2884	fcport->flags &= ~(FCF_ASYNC_SENT|FCF_ASYNC_ACTIVE);
   2885
   2886	if (sp->flags & SRB_WAKEUP_ON_COMP)
   2887		complete(&lio->u.els_plogi.comp);
   2888	else {
   2889		switch (le32_to_cpu(fw_status[0])) {
   2890		case CS_DATA_UNDERRUN:
   2891		case CS_COMPLETE:
   2892			memset(&ea, 0, sizeof(ea));
   2893			ea.fcport = fcport;
   2894			ea.rc = res;
   2895			qla_handle_els_plogi_done(vha, &ea);
   2896			break;
   2897
   2898		case CS_IOCB_ERROR:
   2899			switch (le32_to_cpu(fw_status[1])) {
   2900			case LSC_SCODE_PORTID_USED:
   2901				lid = le32_to_cpu(fw_status[2]) & 0xffff;
   2902				qlt_find_sess_invalidate_other(vha,
   2903				    wwn_to_u64(fcport->port_name),
   2904				    fcport->d_id, lid, &conflict_fcport);
   2905				if (conflict_fcport) {
   2906					/*
   2907					 * Another fcport shares the same
   2908					 * loop_id & nport id; conflict
   2909					 * fcport needs to finish cleanup
   2910					 * before this fcport can proceed
   2911					 * to login.
   2912					 */
   2913					conflict_fcport->conflict = fcport;
   2914					fcport->login_pause = 1;
   2915					ql_dbg(ql_dbg_disc, vha, 0x20ed,
   2916					    "%s %d %8phC pid %06x inuse with lid %#x post gidpn\n",
   2917					    __func__, __LINE__,
   2918					    fcport->port_name,
   2919					    fcport->d_id.b24, lid);
   2920				} else {
   2921					ql_dbg(ql_dbg_disc, vha, 0x20ed,
   2922					    "%s %d %8phC pid %06x inuse with lid %#x sched del\n",
   2923					    __func__, __LINE__,
   2924					    fcport->port_name,
   2925					    fcport->d_id.b24, lid);
   2926					qla2x00_clear_loop_id(fcport);
   2927					set_bit(lid, vha->hw->loop_id_map);
   2928					fcport->loop_id = lid;
   2929					fcport->keep_nport_handle = 0;
   2930					qlt_schedule_sess_for_deletion(fcport);
   2931				}
   2932				break;
   2933
   2934			case LSC_SCODE_NPORT_USED:
   2935				cid.b.domain = (le32_to_cpu(fw_status[2]) >> 16)
   2936					& 0xff;
   2937				cid.b.area   = (le32_to_cpu(fw_status[2]) >>  8)
   2938					& 0xff;
   2939				cid.b.al_pa  = le32_to_cpu(fw_status[2]) & 0xff;
   2940				cid.b.rsvd_1 = 0;
   2941
   2942				ql_dbg(ql_dbg_disc, vha, 0x20ec,
   2943				    "%s %d %8phC lid %#x in use with pid %06x post gnl\n",
   2944				    __func__, __LINE__, fcport->port_name,
   2945				    fcport->loop_id, cid.b24);
   2946				set_bit(fcport->loop_id,
   2947				    vha->hw->loop_id_map);
   2948				fcport->loop_id = FC_NO_LOOP_ID;
   2949				qla24xx_post_gnl_work(vha, fcport);
   2950				break;
   2951
   2952			case LSC_SCODE_NOXCB:
   2953				vha->hw->exch_starvation++;
   2954				if (vha->hw->exch_starvation > 5) {
   2955					ql_log(ql_log_warn, vha, 0xd046,
   2956					    "Exchange starvation. Resetting RISC\n");
   2957					vha->hw->exch_starvation = 0;
   2958					set_bit(ISP_ABORT_NEEDED,
   2959					    &vha->dpc_flags);
   2960					qla2xxx_wake_dpc(vha);
   2961					break;
   2962				}
   2963				fallthrough;
   2964			default:
   2965				ql_dbg(ql_dbg_disc, vha, 0x20eb,
   2966				    "%s %8phC cmd error fw_status 0x%x 0x%x 0x%x\n",
   2967				    __func__, sp->fcport->port_name,
   2968				    fw_status[0], fw_status[1], fw_status[2]);
   2969
   2970				fcport->flags &= ~FCF_ASYNC_SENT;
   2971				qlt_schedule_sess_for_deletion(fcport);
   2972				break;
   2973			}
   2974			break;
   2975
   2976		default:
   2977			ql_dbg(ql_dbg_disc, vha, 0x20eb,
   2978			    "%s %8phC cmd error 2 fw_status 0x%x 0x%x 0x%x\n",
   2979			    __func__, sp->fcport->port_name,
   2980			    fw_status[0], fw_status[1], fw_status[2]);
   2981
   2982			sp->fcport->flags &= ~FCF_ASYNC_SENT;
   2983			qlt_schedule_sess_for_deletion(fcport);
   2984			break;
   2985		}
   2986
   2987		e = qla2x00_alloc_work(vha, QLA_EVT_UNMAP);
   2988		if (!e) {
   2989			struct srb_iocb *elsio = &sp->u.iocb_cmd;
   2990
   2991			qla2x00_els_dcmd2_free(vha, &elsio->u.els_plogi);
   2992			/* ref: INIT */
   2993			kref_put(&sp->cmd_kref, qla2x00_sp_release);
   2994			return;
   2995		}
   2996		e->u.iosb.sp = sp;
   2997		qla2x00_post_work(vha, e);
   2998	}
   2999}
   3000
   3001int
   3002qla24xx_els_dcmd2_iocb(scsi_qla_host_t *vha, int els_opcode,
   3003    fc_port_t *fcport, bool wait)
   3004{
   3005	srb_t *sp;
   3006	struct srb_iocb *elsio = NULL;
   3007	struct qla_hw_data *ha = vha->hw;
   3008	int rval = QLA_SUCCESS;
   3009	void	*ptr, *resp_ptr;
   3010
   3011	/* Alloc SRB structure
   3012	 * ref: INIT
   3013	 */
   3014	sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
   3015	if (!sp) {
   3016		ql_log(ql_log_info, vha, 0x70e6,
   3017		 "SRB allocation failed\n");
   3018		fcport->flags &= ~FCF_ASYNC_ACTIVE;
   3019		return -ENOMEM;
   3020	}
   3021
   3022	fcport->flags |= FCF_ASYNC_SENT;
   3023	qla2x00_set_fcport_disc_state(fcport, DSC_LOGIN_PEND);
   3024	elsio = &sp->u.iocb_cmd;
   3025	ql_dbg(ql_dbg_io, vha, 0x3073,
   3026	       "%s Enter: PLOGI portid=%06x\n", __func__, fcport->d_id.b24);
   3027
   3028	if (wait)
   3029		sp->flags = SRB_WAKEUP_ON_COMP;
   3030
   3031	sp->type = SRB_ELS_DCMD;
   3032	sp->name = "ELS_DCMD";
   3033	sp->fcport = fcport;
   3034	qla2x00_init_async_sp(sp, ELS_DCMD_TIMEOUT + 2,
   3035			     qla2x00_els_dcmd2_sp_done);
   3036	sp->u.iocb_cmd.timeout = qla2x00_els_dcmd2_iocb_timeout;
   3037
   3038	elsio->u.els_plogi.tx_size = elsio->u.els_plogi.rx_size = DMA_POOL_SIZE;
   3039
   3040	ptr = elsio->u.els_plogi.els_plogi_pyld =
   3041	    dma_alloc_coherent(&ha->pdev->dev, elsio->u.els_plogi.tx_size,
   3042		&elsio->u.els_plogi.els_plogi_pyld_dma, GFP_KERNEL);
   3043
   3044	if (!elsio->u.els_plogi.els_plogi_pyld) {
   3045		rval = QLA_FUNCTION_FAILED;
   3046		goto out;
   3047	}
   3048
   3049	resp_ptr = elsio->u.els_plogi.els_resp_pyld =
   3050	    dma_alloc_coherent(&ha->pdev->dev, elsio->u.els_plogi.rx_size,
   3051		&elsio->u.els_plogi.els_resp_pyld_dma, GFP_KERNEL);
   3052
   3053	if (!elsio->u.els_plogi.els_resp_pyld) {
   3054		rval = QLA_FUNCTION_FAILED;
   3055		goto out;
   3056	}
   3057
   3058	ql_dbg(ql_dbg_io, vha, 0x3073, "PLOGI %p %p\n", ptr, resp_ptr);
   3059
   3060	memset(ptr, 0, sizeof(struct els_plogi_payload));
   3061	memset(resp_ptr, 0, sizeof(struct els_plogi_payload));
   3062	memcpy(elsio->u.els_plogi.els_plogi_pyld->data,
   3063	    &ha->plogi_els_payld.fl_csp, LOGIN_TEMPLATE_SIZE);
   3064
   3065	elsio->u.els_plogi.els_cmd = els_opcode;
   3066	elsio->u.els_plogi.els_plogi_pyld->opcode = els_opcode;
   3067
   3068	if (els_opcode == ELS_DCMD_PLOGI && DBELL_ACTIVE(vha)) {
   3069		struct fc_els_flogi *p = ptr;
   3070
   3071		p->fl_csp.sp_features |= cpu_to_be16(FC_SP_FT_SEC);
   3072	}
   3073
   3074	ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x3073, "PLOGI buffer:\n");
   3075	ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x0109,
   3076	    (uint8_t *)elsio->u.els_plogi.els_plogi_pyld,
   3077	    sizeof(*elsio->u.els_plogi.els_plogi_pyld));
   3078
   3079	init_completion(&elsio->u.els_plogi.comp);
   3080	rval = qla2x00_start_sp(sp);
   3081	if (rval != QLA_SUCCESS) {
   3082		rval = QLA_FUNCTION_FAILED;
   3083	} else {
   3084		ql_dbg(ql_dbg_disc, vha, 0x3074,
   3085		    "%s PLOGI sent, hdl=%x, loopid=%x, to port_id %06x from port_id %06x\n",
   3086		    sp->name, sp->handle, fcport->loop_id,
   3087		    fcport->d_id.b24, vha->d_id.b24);
   3088	}
   3089
   3090	if (wait) {
   3091		wait_for_completion(&elsio->u.els_plogi.comp);
   3092
   3093		if (elsio->u.els_plogi.comp_status != CS_COMPLETE)
   3094			rval = QLA_FUNCTION_FAILED;
   3095	} else {
   3096		goto done;
   3097	}
   3098
   3099out:
   3100	fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
   3101	qla2x00_els_dcmd2_free(vha, &elsio->u.els_plogi);
   3102	/* ref: INIT */
   3103	kref_put(&sp->cmd_kref, qla2x00_sp_release);
   3104done:
   3105	return rval;
   3106}
   3107
   3108/* it is assume qpair lock is held */
   3109void qla_els_pt_iocb(struct scsi_qla_host *vha,
   3110	struct els_entry_24xx *els_iocb,
   3111	struct qla_els_pt_arg *a)
   3112{
   3113	els_iocb->entry_type = ELS_IOCB_TYPE;
   3114	els_iocb->entry_count = 1;
   3115	els_iocb->sys_define = 0;
   3116	els_iocb->entry_status = 0;
   3117	els_iocb->handle = QLA_SKIP_HANDLE;
   3118	els_iocb->nport_handle = a->nport_handle;
   3119	els_iocb->rx_xchg_address = a->rx_xchg_address;
   3120	els_iocb->tx_dsd_count = cpu_to_le16(1);
   3121	els_iocb->vp_index = a->vp_idx;
   3122	els_iocb->sof_type = EST_SOFI3;
   3123	els_iocb->rx_dsd_count = cpu_to_le16(0);
   3124	els_iocb->opcode = a->els_opcode;
   3125
   3126	els_iocb->d_id[0] = a->did.b.al_pa;
   3127	els_iocb->d_id[1] = a->did.b.area;
   3128	els_iocb->d_id[2] = a->did.b.domain;
   3129	/* For SID the byte order is different than DID */
   3130	els_iocb->s_id[1] = vha->d_id.b.al_pa;
   3131	els_iocb->s_id[2] = vha->d_id.b.area;
   3132	els_iocb->s_id[0] = vha->d_id.b.domain;
   3133
   3134	els_iocb->control_flags = cpu_to_le16(a->control_flags);
   3135
   3136	els_iocb->tx_byte_count = cpu_to_le32(a->tx_byte_count);
   3137	els_iocb->tx_len = cpu_to_le32(a->tx_len);
   3138	put_unaligned_le64(a->tx_addr, &els_iocb->tx_address);
   3139
   3140	els_iocb->rx_byte_count = cpu_to_le32(a->rx_byte_count);
   3141	els_iocb->rx_len = cpu_to_le32(a->rx_len);
   3142	put_unaligned_le64(a->rx_addr, &els_iocb->rx_address);
   3143}
   3144
   3145static void
   3146qla24xx_els_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
   3147{
   3148	struct bsg_job *bsg_job = sp->u.bsg_job;
   3149	struct fc_bsg_request *bsg_request = bsg_job->request;
   3150
   3151        els_iocb->entry_type = ELS_IOCB_TYPE;
   3152        els_iocb->entry_count = 1;
   3153        els_iocb->sys_define = 0;
   3154        els_iocb->entry_status = 0;
   3155        els_iocb->handle = sp->handle;
   3156	els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
   3157	els_iocb->tx_dsd_count = cpu_to_le16(bsg_job->request_payload.sg_cnt);
   3158	els_iocb->vp_index = sp->vha->vp_idx;
   3159        els_iocb->sof_type = EST_SOFI3;
   3160	els_iocb->rx_dsd_count = cpu_to_le16(bsg_job->reply_payload.sg_cnt);
   3161
   3162	els_iocb->opcode =
   3163	    sp->type == SRB_ELS_CMD_RPT ?
   3164	    bsg_request->rqst_data.r_els.els_code :
   3165	    bsg_request->rqst_data.h_els.command_code;
   3166	els_iocb->d_id[0] = sp->fcport->d_id.b.al_pa;
   3167	els_iocb->d_id[1] = sp->fcport->d_id.b.area;
   3168	els_iocb->d_id[2] = sp->fcport->d_id.b.domain;
   3169        els_iocb->control_flags = 0;
   3170        els_iocb->rx_byte_count =
   3171            cpu_to_le32(bsg_job->reply_payload.payload_len);
   3172        els_iocb->tx_byte_count =
   3173            cpu_to_le32(bsg_job->request_payload.payload_len);
   3174
   3175	put_unaligned_le64(sg_dma_address(bsg_job->request_payload.sg_list),
   3176			   &els_iocb->tx_address);
   3177        els_iocb->tx_len = cpu_to_le32(sg_dma_len
   3178            (bsg_job->request_payload.sg_list));
   3179
   3180	put_unaligned_le64(sg_dma_address(bsg_job->reply_payload.sg_list),
   3181			   &els_iocb->rx_address);
   3182        els_iocb->rx_len = cpu_to_le32(sg_dma_len
   3183            (bsg_job->reply_payload.sg_list));
   3184
   3185	sp->vha->qla_stats.control_requests++;
   3186}
   3187
   3188static void
   3189qla2x00_ct_iocb(srb_t *sp, ms_iocb_entry_t *ct_iocb)
   3190{
   3191	uint16_t        avail_dsds;
   3192	struct dsd64	*cur_dsd;
   3193	struct scatterlist *sg;
   3194	int index;
   3195	uint16_t tot_dsds;
   3196	scsi_qla_host_t *vha = sp->vha;
   3197	struct qla_hw_data *ha = vha->hw;
   3198	struct bsg_job *bsg_job = sp->u.bsg_job;
   3199	int entry_count = 1;
   3200
   3201	memset(ct_iocb, 0, sizeof(ms_iocb_entry_t));
   3202	ct_iocb->entry_type = CT_IOCB_TYPE;
   3203	ct_iocb->entry_status = 0;
   3204	ct_iocb->handle1 = sp->handle;
   3205	SET_TARGET_ID(ha, ct_iocb->loop_id, sp->fcport->loop_id);
   3206	ct_iocb->status = cpu_to_le16(0);
   3207	ct_iocb->control_flags = cpu_to_le16(0);
   3208	ct_iocb->timeout = 0;
   3209	ct_iocb->cmd_dsd_count =
   3210	    cpu_to_le16(bsg_job->request_payload.sg_cnt);
   3211	ct_iocb->total_dsd_count =
   3212	    cpu_to_le16(bsg_job->request_payload.sg_cnt + 1);
   3213	ct_iocb->req_bytecount =
   3214	    cpu_to_le32(bsg_job->request_payload.payload_len);
   3215	ct_iocb->rsp_bytecount =
   3216	    cpu_to_le32(bsg_job->reply_payload.payload_len);
   3217
   3218	put_unaligned_le64(sg_dma_address(bsg_job->request_payload.sg_list),
   3219			   &ct_iocb->req_dsd.address);
   3220	ct_iocb->req_dsd.length = ct_iocb->req_bytecount;
   3221
   3222	put_unaligned_le64(sg_dma_address(bsg_job->reply_payload.sg_list),
   3223			   &ct_iocb->rsp_dsd.address);
   3224	ct_iocb->rsp_dsd.length = ct_iocb->rsp_bytecount;
   3225
   3226	avail_dsds = 1;
   3227	cur_dsd = &ct_iocb->rsp_dsd;
   3228	index = 0;
   3229	tot_dsds = bsg_job->reply_payload.sg_cnt;
   3230
   3231	for_each_sg(bsg_job->reply_payload.sg_list, sg, tot_dsds, index) {
   3232		cont_a64_entry_t *cont_pkt;
   3233
   3234		/* Allocate additional continuation packets? */
   3235		if (avail_dsds == 0) {
   3236			/*
   3237			* Five DSDs are available in the Cont.
   3238			* Type 1 IOCB.
   3239			       */
   3240			cont_pkt = qla2x00_prep_cont_type1_iocb(vha,
   3241			    vha->hw->req_q_map[0]);
   3242			cur_dsd = cont_pkt->dsd;
   3243			avail_dsds = 5;
   3244			entry_count++;
   3245		}
   3246
   3247		append_dsd64(&cur_dsd, sg);
   3248		avail_dsds--;
   3249	}
   3250	ct_iocb->entry_count = entry_count;
   3251
   3252	sp->vha->qla_stats.control_requests++;
   3253}
   3254
   3255static void
   3256qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb)
   3257{
   3258	uint16_t        avail_dsds;
   3259	struct dsd64	*cur_dsd;
   3260	struct scatterlist *sg;
   3261	int index;
   3262	uint16_t cmd_dsds, rsp_dsds;
   3263	scsi_qla_host_t *vha = sp->vha;
   3264	struct qla_hw_data *ha = vha->hw;
   3265	struct bsg_job *bsg_job = sp->u.bsg_job;
   3266	int entry_count = 1;
   3267	cont_a64_entry_t *cont_pkt = NULL;
   3268
   3269	ct_iocb->entry_type = CT_IOCB_TYPE;
   3270        ct_iocb->entry_status = 0;
   3271        ct_iocb->sys_define = 0;
   3272        ct_iocb->handle = sp->handle;
   3273
   3274	ct_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
   3275	ct_iocb->vp_index = sp->vha->vp_idx;
   3276	ct_iocb->comp_status = cpu_to_le16(0);
   3277
   3278	cmd_dsds = bsg_job->request_payload.sg_cnt;
   3279	rsp_dsds = bsg_job->reply_payload.sg_cnt;
   3280
   3281	ct_iocb->cmd_dsd_count = cpu_to_le16(cmd_dsds);
   3282        ct_iocb->timeout = 0;
   3283	ct_iocb->rsp_dsd_count = cpu_to_le16(rsp_dsds);
   3284        ct_iocb->cmd_byte_count =
   3285            cpu_to_le32(bsg_job->request_payload.payload_len);
   3286
   3287	avail_dsds = 2;
   3288	cur_dsd = ct_iocb->dsd;
   3289	index = 0;
   3290
   3291	for_each_sg(bsg_job->request_payload.sg_list, sg, cmd_dsds, index) {
   3292		/* Allocate additional continuation packets? */
   3293		if (avail_dsds == 0) {
   3294			/*
   3295			 * Five DSDs are available in the Cont.
   3296			 * Type 1 IOCB.
   3297			 */
   3298			cont_pkt = qla2x00_prep_cont_type1_iocb(
   3299			    vha, ha->req_q_map[0]);
   3300			cur_dsd = cont_pkt->dsd;
   3301			avail_dsds = 5;
   3302			entry_count++;
   3303		}
   3304
   3305		append_dsd64(&cur_dsd, sg);
   3306		avail_dsds--;
   3307	}
   3308
   3309	index = 0;
   3310
   3311	for_each_sg(bsg_job->reply_payload.sg_list, sg, rsp_dsds, index) {
   3312		/* Allocate additional continuation packets? */
   3313		if (avail_dsds == 0) {
   3314			/*
   3315			* Five DSDs are available in the Cont.
   3316			* Type 1 IOCB.
   3317			       */
   3318			cont_pkt = qla2x00_prep_cont_type1_iocb(vha,
   3319			    ha->req_q_map[0]);
   3320			cur_dsd = cont_pkt->dsd;
   3321			avail_dsds = 5;
   3322			entry_count++;
   3323		}
   3324
   3325		append_dsd64(&cur_dsd, sg);
   3326		avail_dsds--;
   3327	}
   3328        ct_iocb->entry_count = entry_count;
   3329}
   3330
   3331/*
   3332 * qla82xx_start_scsi() - Send a SCSI command to the ISP
   3333 * @sp: command to send to the ISP
   3334 *
   3335 * Returns non-zero if a failure occurred, else zero.
   3336 */
   3337int
   3338qla82xx_start_scsi(srb_t *sp)
   3339{
   3340	int		nseg;
   3341	unsigned long   flags;
   3342	struct scsi_cmnd *cmd;
   3343	uint32_t	*clr_ptr;
   3344	uint32_t	handle;
   3345	uint16_t	cnt;
   3346	uint16_t	req_cnt;
   3347	uint16_t	tot_dsds;
   3348	struct device_reg_82xx __iomem *reg;
   3349	uint32_t dbval;
   3350	__be32 *fcp_dl;
   3351	uint8_t additional_cdb_len;
   3352	struct ct6_dsd *ctx;
   3353	struct scsi_qla_host *vha = sp->vha;
   3354	struct qla_hw_data *ha = vha->hw;
   3355	struct req_que *req = NULL;
   3356	struct rsp_que *rsp = NULL;
   3357
   3358	/* Setup device pointers. */
   3359	reg = &ha->iobase->isp82;
   3360	cmd = GET_CMD_SP(sp);
   3361	req = vha->req;
   3362	rsp = ha->rsp_q_map[0];
   3363
   3364	/* So we know we haven't pci_map'ed anything yet */
   3365	tot_dsds = 0;
   3366
   3367	dbval = 0x04 | (ha->portnum << 5);
   3368
   3369	/* Send marker if required */
   3370	if (vha->marker_needed != 0) {
   3371		if (qla2x00_marker(vha, ha->base_qpair,
   3372			0, 0, MK_SYNC_ALL) != QLA_SUCCESS) {
   3373			ql_log(ql_log_warn, vha, 0x300c,
   3374			    "qla2x00_marker failed for cmd=%p.\n", cmd);
   3375			return QLA_FUNCTION_FAILED;
   3376		}
   3377		vha->marker_needed = 0;
   3378	}
   3379
   3380	/* Acquire ring specific lock */
   3381	spin_lock_irqsave(&ha->hardware_lock, flags);
   3382
   3383	handle = qla2xxx_get_next_handle(req);
   3384	if (handle == 0)
   3385		goto queuing_error;
   3386
   3387	/* Map the sg table so we have an accurate count of sg entries needed */
   3388	if (scsi_sg_count(cmd)) {
   3389		nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
   3390		    scsi_sg_count(cmd), cmd->sc_data_direction);
   3391		if (unlikely(!nseg))
   3392			goto queuing_error;
   3393	} else
   3394		nseg = 0;
   3395
   3396	tot_dsds = nseg;
   3397
   3398	if (tot_dsds > ql2xshiftctondsd) {
   3399		struct cmd_type_6 *cmd_pkt;
   3400		uint16_t more_dsd_lists = 0;
   3401		struct dsd_dma *dsd_ptr;
   3402		uint16_t i;
   3403
   3404		more_dsd_lists = qla24xx_calc_dsd_lists(tot_dsds);
   3405		if ((more_dsd_lists + ha->gbl_dsd_inuse) >= NUM_DSD_CHAIN) {
   3406			ql_dbg(ql_dbg_io, vha, 0x300d,
   3407			    "Num of DSD list %d is than %d for cmd=%p.\n",
   3408			    more_dsd_lists + ha->gbl_dsd_inuse, NUM_DSD_CHAIN,
   3409			    cmd);
   3410			goto queuing_error;
   3411		}
   3412
   3413		if (more_dsd_lists <= ha->gbl_dsd_avail)
   3414			goto sufficient_dsds;
   3415		else
   3416			more_dsd_lists -= ha->gbl_dsd_avail;
   3417
   3418		for (i = 0; i < more_dsd_lists; i++) {
   3419			dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
   3420			if (!dsd_ptr) {
   3421				ql_log(ql_log_fatal, vha, 0x300e,
   3422				    "Failed to allocate memory for dsd_dma "
   3423				    "for cmd=%p.\n", cmd);
   3424				goto queuing_error;
   3425			}
   3426
   3427			dsd_ptr->dsd_addr = dma_pool_alloc(ha->dl_dma_pool,
   3428				GFP_ATOMIC, &dsd_ptr->dsd_list_dma);
   3429			if (!dsd_ptr->dsd_addr) {
   3430				kfree(dsd_ptr);
   3431				ql_log(ql_log_fatal, vha, 0x300f,
   3432				    "Failed to allocate memory for dsd_addr "
   3433				    "for cmd=%p.\n", cmd);
   3434				goto queuing_error;
   3435			}
   3436			list_add_tail(&dsd_ptr->list, &ha->gbl_dsd_list);
   3437			ha->gbl_dsd_avail++;
   3438		}
   3439
   3440sufficient_dsds:
   3441		req_cnt = 1;
   3442
   3443		if (req->cnt < (req_cnt + 2)) {
   3444			cnt = (uint16_t)rd_reg_dword_relaxed(
   3445				&reg->req_q_out[0]);
   3446			if (req->ring_index < cnt)
   3447				req->cnt = cnt - req->ring_index;
   3448			else
   3449				req->cnt = req->length -
   3450					(req->ring_index - cnt);
   3451			if (req->cnt < (req_cnt + 2))
   3452				goto queuing_error;
   3453		}
   3454
   3455		ctx = sp->u.scmd.ct6_ctx =
   3456		    mempool_alloc(ha->ctx_mempool, GFP_ATOMIC);
   3457		if (!ctx) {
   3458			ql_log(ql_log_fatal, vha, 0x3010,
   3459			    "Failed to allocate ctx for cmd=%p.\n", cmd);
   3460			goto queuing_error;
   3461		}
   3462
   3463		memset(ctx, 0, sizeof(struct ct6_dsd));
   3464		ctx->fcp_cmnd = dma_pool_zalloc(ha->fcp_cmnd_dma_pool,
   3465			GFP_ATOMIC, &ctx->fcp_cmnd_dma);
   3466		if (!ctx->fcp_cmnd) {
   3467			ql_log(ql_log_fatal, vha, 0x3011,
   3468			    "Failed to allocate fcp_cmnd for cmd=%p.\n", cmd);
   3469			goto queuing_error;
   3470		}
   3471
   3472		/* Initialize the DSD list and dma handle */
   3473		INIT_LIST_HEAD(&ctx->dsd_list);
   3474		ctx->dsd_use_cnt = 0;
   3475
   3476		if (cmd->cmd_len > 16) {
   3477			additional_cdb_len = cmd->cmd_len - 16;
   3478			if ((cmd->cmd_len % 4) != 0) {
   3479				/* SCSI command bigger than 16 bytes must be
   3480				 * multiple of 4
   3481				 */
   3482				ql_log(ql_log_warn, vha, 0x3012,
   3483				    "scsi cmd len %d not multiple of 4 "
   3484				    "for cmd=%p.\n", cmd->cmd_len, cmd);
   3485				goto queuing_error_fcp_cmnd;
   3486			}
   3487			ctx->fcp_cmnd_len = 12 + cmd->cmd_len + 4;
   3488		} else {
   3489			additional_cdb_len = 0;
   3490			ctx->fcp_cmnd_len = 12 + 16 + 4;
   3491		}
   3492
   3493		cmd_pkt = (struct cmd_type_6 *)req->ring_ptr;
   3494		cmd_pkt->handle = make_handle(req->id, handle);
   3495
   3496		/* Zero out remaining portion of packet. */
   3497		/*    tagged queuing modifier -- default is TSK_SIMPLE (0). */
   3498		clr_ptr = (uint32_t *)cmd_pkt + 2;
   3499		memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
   3500		cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
   3501
   3502		/* Set NPORT-ID and LUN number*/
   3503		cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
   3504		cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
   3505		cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
   3506		cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
   3507		cmd_pkt->vp_index = sp->vha->vp_idx;
   3508
   3509		/* Build IOCB segments */
   3510		if (qla24xx_build_scsi_type_6_iocbs(sp, cmd_pkt, tot_dsds))
   3511			goto queuing_error_fcp_cmnd;
   3512
   3513		int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
   3514		host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
   3515
   3516		/* build FCP_CMND IU */
   3517		int_to_scsilun(cmd->device->lun, &ctx->fcp_cmnd->lun);
   3518		ctx->fcp_cmnd->additional_cdb_len = additional_cdb_len;
   3519
   3520		if (cmd->sc_data_direction == DMA_TO_DEVICE)
   3521			ctx->fcp_cmnd->additional_cdb_len |= 1;
   3522		else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
   3523			ctx->fcp_cmnd->additional_cdb_len |= 2;
   3524
   3525		/* Populate the FCP_PRIO. */
   3526		if (ha->flags.fcp_prio_enabled)
   3527			ctx->fcp_cmnd->task_attribute |=
   3528			    sp->fcport->fcp_prio << 3;
   3529
   3530		memcpy(ctx->fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
   3531
   3532		fcp_dl = (__be32 *)(ctx->fcp_cmnd->cdb + 16 +
   3533		    additional_cdb_len);
   3534		*fcp_dl = htonl((uint32_t)scsi_bufflen(cmd));
   3535
   3536		cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(ctx->fcp_cmnd_len);
   3537		put_unaligned_le64(ctx->fcp_cmnd_dma,
   3538				   &cmd_pkt->fcp_cmnd_dseg_address);
   3539
   3540		sp->flags |= SRB_FCP_CMND_DMA_VALID;
   3541		cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
   3542		/* Set total data segment count. */
   3543		cmd_pkt->entry_count = (uint8_t)req_cnt;
   3544		/* Specify response queue number where
   3545		 * completion should happen
   3546		 */
   3547		cmd_pkt->entry_status = (uint8_t) rsp->id;
   3548	} else {
   3549		struct cmd_type_7 *cmd_pkt;
   3550
   3551		req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
   3552		if (req->cnt < (req_cnt + 2)) {
   3553			cnt = (uint16_t)rd_reg_dword_relaxed(
   3554			    &reg->req_q_out[0]);
   3555			if (req->ring_index < cnt)
   3556				req->cnt = cnt - req->ring_index;
   3557			else
   3558				req->cnt = req->length -
   3559					(req->ring_index - cnt);
   3560		}
   3561		if (req->cnt < (req_cnt + 2))
   3562			goto queuing_error;
   3563
   3564		cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
   3565		cmd_pkt->handle = make_handle(req->id, handle);
   3566
   3567		/* Zero out remaining portion of packet. */
   3568		/* tagged queuing modifier -- default is TSK_SIMPLE (0).*/
   3569		clr_ptr = (uint32_t *)cmd_pkt + 2;
   3570		memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
   3571		cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
   3572
   3573		/* Set NPORT-ID and LUN number*/
   3574		cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
   3575		cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
   3576		cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
   3577		cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
   3578		cmd_pkt->vp_index = sp->vha->vp_idx;
   3579
   3580		int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
   3581		host_to_fcp_swap((uint8_t *)&cmd_pkt->lun,
   3582		    sizeof(cmd_pkt->lun));
   3583
   3584		/* Populate the FCP_PRIO. */
   3585		if (ha->flags.fcp_prio_enabled)
   3586			cmd_pkt->task |= sp->fcport->fcp_prio << 3;
   3587
   3588		/* Load SCSI command packet. */
   3589		memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
   3590		host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
   3591
   3592		cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
   3593
   3594		/* Build IOCB segments */
   3595		qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds, req);
   3596
   3597		/* Set total data segment count. */
   3598		cmd_pkt->entry_count = (uint8_t)req_cnt;
   3599		/* Specify response queue number where
   3600		 * completion should happen.
   3601		 */
   3602		cmd_pkt->entry_status = (uint8_t) rsp->id;
   3603
   3604	}
   3605	/* Build command packet. */
   3606	req->current_outstanding_cmd = handle;
   3607	req->outstanding_cmds[handle] = sp;
   3608	sp->handle = handle;
   3609	cmd->host_scribble = (unsigned char *)(unsigned long)handle;
   3610	req->cnt -= req_cnt;
   3611	wmb();
   3612
   3613	/* Adjust ring index. */
   3614	req->ring_index++;
   3615	if (req->ring_index == req->length) {
   3616		req->ring_index = 0;
   3617		req->ring_ptr = req->ring;
   3618	} else
   3619		req->ring_ptr++;
   3620
   3621	sp->flags |= SRB_DMA_VALID;
   3622
   3623	/* Set chip new ring index. */
   3624	/* write, read and verify logic */
   3625	dbval = dbval | (req->id << 8) | (req->ring_index << 16);
   3626	if (ql2xdbwr)
   3627		qla82xx_wr_32(ha, (uintptr_t __force)ha->nxdb_wr_ptr, dbval);
   3628	else {
   3629		wrt_reg_dword(ha->nxdb_wr_ptr, dbval);
   3630		wmb();
   3631		while (rd_reg_dword(ha->nxdb_rd_ptr) != dbval) {
   3632			wrt_reg_dword(ha->nxdb_wr_ptr, dbval);
   3633			wmb();
   3634		}
   3635	}
   3636
   3637	/* Manage unprocessed RIO/ZIO commands in response queue. */
   3638	if (vha->flags.process_response_queue &&
   3639	    rsp->ring_ptr->signature != RESPONSE_PROCESSED)
   3640		qla24xx_process_response_queue(vha, rsp);
   3641
   3642	spin_unlock_irqrestore(&ha->hardware_lock, flags);
   3643	return QLA_SUCCESS;
   3644
   3645queuing_error_fcp_cmnd:
   3646	dma_pool_free(ha->fcp_cmnd_dma_pool, ctx->fcp_cmnd, ctx->fcp_cmnd_dma);
   3647queuing_error:
   3648	if (tot_dsds)
   3649		scsi_dma_unmap(cmd);
   3650
   3651	if (sp->u.scmd.crc_ctx) {
   3652		mempool_free(sp->u.scmd.crc_ctx, ha->ctx_mempool);
   3653		sp->u.scmd.crc_ctx = NULL;
   3654	}
   3655	spin_unlock_irqrestore(&ha->hardware_lock, flags);
   3656
   3657	return QLA_FUNCTION_FAILED;
   3658}
   3659
   3660static void
   3661qla24xx_abort_iocb(srb_t *sp, struct abort_entry_24xx *abt_iocb)
   3662{
   3663	struct srb_iocb *aio = &sp->u.iocb_cmd;
   3664	scsi_qla_host_t *vha = sp->vha;
   3665	struct req_que *req = sp->qpair->req;
   3666	srb_t *orig_sp = sp->cmd_sp;
   3667
   3668	memset(abt_iocb, 0, sizeof(struct abort_entry_24xx));
   3669	abt_iocb->entry_type = ABORT_IOCB_TYPE;
   3670	abt_iocb->entry_count = 1;
   3671	abt_iocb->handle = make_handle(req->id, sp->handle);
   3672	if (sp->fcport) {
   3673		abt_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
   3674		abt_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
   3675		abt_iocb->port_id[1] = sp->fcport->d_id.b.area;
   3676		abt_iocb->port_id[2] = sp->fcport->d_id.b.domain;
   3677	}
   3678	abt_iocb->handle_to_abort =
   3679		make_handle(le16_to_cpu(aio->u.abt.req_que_no),
   3680			    aio->u.abt.cmd_hndl);
   3681	abt_iocb->vp_index = vha->vp_idx;
   3682	abt_iocb->req_que_no = aio->u.abt.req_que_no;
   3683
   3684	/* need to pass original sp */
   3685	if (orig_sp)
   3686		qla_nvme_abort_set_option(abt_iocb, orig_sp);
   3687
   3688	/* Send the command to the firmware */
   3689	wmb();
   3690}
   3691
   3692static void
   3693qla2x00_mb_iocb(srb_t *sp, struct mbx_24xx_entry *mbx)
   3694{
   3695	int i, sz;
   3696
   3697	mbx->entry_type = MBX_IOCB_TYPE;
   3698	mbx->handle = sp->handle;
   3699	sz = min(ARRAY_SIZE(mbx->mb), ARRAY_SIZE(sp->u.iocb_cmd.u.mbx.out_mb));
   3700
   3701	for (i = 0; i < sz; i++)
   3702		mbx->mb[i] = sp->u.iocb_cmd.u.mbx.out_mb[i];
   3703}
   3704
   3705static void
   3706qla2x00_ctpthru_cmd_iocb(srb_t *sp, struct ct_entry_24xx *ct_pkt)
   3707{
   3708	sp->u.iocb_cmd.u.ctarg.iocb = ct_pkt;
   3709	qla24xx_prep_ms_iocb(sp->vha, &sp->u.iocb_cmd.u.ctarg);
   3710	ct_pkt->handle = sp->handle;
   3711}
   3712
   3713static void qla2x00_send_notify_ack_iocb(srb_t *sp,
   3714	struct nack_to_isp *nack)
   3715{
   3716	struct imm_ntfy_from_isp *ntfy = sp->u.iocb_cmd.u.nack.ntfy;
   3717
   3718	nack->entry_type = NOTIFY_ACK_TYPE;
   3719	nack->entry_count = 1;
   3720	nack->ox_id = ntfy->ox_id;
   3721
   3722	nack->u.isp24.handle = sp->handle;
   3723	nack->u.isp24.nport_handle = ntfy->u.isp24.nport_handle;
   3724	if (le16_to_cpu(ntfy->u.isp24.status) == IMM_NTFY_ELS) {
   3725		nack->u.isp24.flags = ntfy->u.isp24.flags &
   3726			cpu_to_le16(NOTIFY24XX_FLAGS_PUREX_IOCB);
   3727	}
   3728	nack->u.isp24.srr_rx_id = ntfy->u.isp24.srr_rx_id;
   3729	nack->u.isp24.status = ntfy->u.isp24.status;
   3730	nack->u.isp24.status_subcode = ntfy->u.isp24.status_subcode;
   3731	nack->u.isp24.fw_handle = ntfy->u.isp24.fw_handle;
   3732	nack->u.isp24.exchange_address = ntfy->u.isp24.exchange_address;
   3733	nack->u.isp24.srr_rel_offs = ntfy->u.isp24.srr_rel_offs;
   3734	nack->u.isp24.srr_ui = ntfy->u.isp24.srr_ui;
   3735	nack->u.isp24.srr_flags = 0;
   3736	nack->u.isp24.srr_reject_code = 0;
   3737	nack->u.isp24.srr_reject_code_expl = 0;
   3738	nack->u.isp24.vp_index = ntfy->u.isp24.vp_index;
   3739
   3740	if (ntfy->u.isp24.status_subcode == ELS_PLOGI &&
   3741	    (le16_to_cpu(ntfy->u.isp24.flags) & NOTIFY24XX_FLAGS_FCSP) &&
   3742	    sp->vha->hw->flags.edif_enabled) {
   3743		ql_dbg(ql_dbg_disc, sp->vha, 0x3074,
   3744		    "%s PLOGI NACK sent with FC SECURITY bit, hdl=%x, loopid=%x, to pid %06x\n",
   3745		    sp->name, sp->handle, sp->fcport->loop_id,
   3746		    sp->fcport->d_id.b24);
   3747		nack->u.isp24.flags |= cpu_to_le16(NOTIFY_ACK_FLAGS_FCSP);
   3748	}
   3749}
   3750
   3751/*
   3752 * Build NVME LS request
   3753 */
   3754static void
   3755qla_nvme_ls(srb_t *sp, struct pt_ls4_request *cmd_pkt)
   3756{
   3757	struct srb_iocb *nvme;
   3758
   3759	nvme = &sp->u.iocb_cmd;
   3760	cmd_pkt->entry_type = PT_LS4_REQUEST;
   3761	cmd_pkt->entry_count = 1;
   3762	cmd_pkt->control_flags = cpu_to_le16(CF_LS4_ORIGINATOR << CF_LS4_SHIFT);
   3763
   3764	cmd_pkt->timeout = cpu_to_le16(nvme->u.nvme.timeout_sec);
   3765	cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
   3766	cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
   3767
   3768	cmd_pkt->tx_dseg_count = cpu_to_le16(1);
   3769	cmd_pkt->tx_byte_count = cpu_to_le32(nvme->u.nvme.cmd_len);
   3770	cmd_pkt->dsd[0].length = cpu_to_le32(nvme->u.nvme.cmd_len);
   3771	put_unaligned_le64(nvme->u.nvme.cmd_dma, &cmd_pkt->dsd[0].address);
   3772
   3773	cmd_pkt->rx_dseg_count = cpu_to_le16(1);
   3774	cmd_pkt->rx_byte_count = cpu_to_le32(nvme->u.nvme.rsp_len);
   3775	cmd_pkt->dsd[1].length = cpu_to_le32(nvme->u.nvme.rsp_len);
   3776	put_unaligned_le64(nvme->u.nvme.rsp_dma, &cmd_pkt->dsd[1].address);
   3777}
   3778
   3779static void
   3780qla25xx_ctrlvp_iocb(srb_t *sp, struct vp_ctrl_entry_24xx *vce)
   3781{
   3782	int map, pos;
   3783
   3784	vce->entry_type = VP_CTRL_IOCB_TYPE;
   3785	vce->handle = sp->handle;
   3786	vce->entry_count = 1;
   3787	vce->command = cpu_to_le16(sp->u.iocb_cmd.u.ctrlvp.cmd);
   3788	vce->vp_count = cpu_to_le16(1);
   3789
   3790	/*
   3791	 * index map in firmware starts with 1; decrement index
   3792	 * this is ok as we never use index 0
   3793	 */
   3794	map = (sp->u.iocb_cmd.u.ctrlvp.vp_index - 1) / 8;
   3795	pos = (sp->u.iocb_cmd.u.ctrlvp.vp_index - 1) & 7;
   3796	vce->vp_idx_map[map] |= 1 << pos;
   3797}
   3798
   3799static void
   3800qla24xx_prlo_iocb(srb_t *sp, struct logio_entry_24xx *logio)
   3801{
   3802	logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
   3803	logio->control_flags =
   3804	    cpu_to_le16(LCF_COMMAND_PRLO|LCF_IMPL_PRLO);
   3805
   3806	logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
   3807	logio->port_id[0] = sp->fcport->d_id.b.al_pa;
   3808	logio->port_id[1] = sp->fcport->d_id.b.area;
   3809	logio->port_id[2] = sp->fcport->d_id.b.domain;
   3810	logio->vp_index = sp->fcport->vha->vp_idx;
   3811}
   3812
   3813int
   3814qla2x00_start_sp(srb_t *sp)
   3815{
   3816	int rval = QLA_SUCCESS;
   3817	scsi_qla_host_t *vha = sp->vha;
   3818	struct qla_hw_data *ha = vha->hw;
   3819	struct qla_qpair *qp = sp->qpair;
   3820	void *pkt;
   3821	unsigned long flags;
   3822
   3823	if (vha->hw->flags.eeh_busy)
   3824		return -EIO;
   3825
   3826	spin_lock_irqsave(qp->qp_lock_ptr, flags);
   3827	pkt = __qla2x00_alloc_iocbs(sp->qpair, sp);
   3828	if (!pkt) {
   3829		rval = EAGAIN;
   3830		ql_log(ql_log_warn, vha, 0x700c,
   3831		    "qla2x00_alloc_iocbs failed.\n");
   3832		goto done;
   3833	}
   3834
   3835	switch (sp->type) {
   3836	case SRB_LOGIN_CMD:
   3837		IS_FWI2_CAPABLE(ha) ?
   3838		    qla24xx_login_iocb(sp, pkt) :
   3839		    qla2x00_login_iocb(sp, pkt);
   3840		break;
   3841	case SRB_PRLI_CMD:
   3842		qla24xx_prli_iocb(sp, pkt);
   3843		break;
   3844	case SRB_LOGOUT_CMD:
   3845		IS_FWI2_CAPABLE(ha) ?
   3846		    qla24xx_logout_iocb(sp, pkt) :
   3847		    qla2x00_logout_iocb(sp, pkt);
   3848		break;
   3849	case SRB_ELS_CMD_RPT:
   3850	case SRB_ELS_CMD_HST:
   3851		qla24xx_els_iocb(sp, pkt);
   3852		break;
   3853	case SRB_ELS_CMD_HST_NOLOGIN:
   3854		qla_els_pt_iocb(sp->vha, pkt,  &sp->u.bsg_cmd.u.els_arg);
   3855		((struct els_entry_24xx *)pkt)->handle = sp->handle;
   3856		break;
   3857	case SRB_CT_CMD:
   3858		IS_FWI2_CAPABLE(ha) ?
   3859		    qla24xx_ct_iocb(sp, pkt) :
   3860		    qla2x00_ct_iocb(sp, pkt);
   3861		break;
   3862	case SRB_ADISC_CMD:
   3863		IS_FWI2_CAPABLE(ha) ?
   3864		    qla24xx_adisc_iocb(sp, pkt) :
   3865		    qla2x00_adisc_iocb(sp, pkt);
   3866		break;
   3867	case SRB_TM_CMD:
   3868		IS_QLAFX00(ha) ?
   3869		    qlafx00_tm_iocb(sp, pkt) :
   3870		    qla24xx_tm_iocb(sp, pkt);
   3871		break;
   3872	case SRB_FXIOCB_DCMD:
   3873	case SRB_FXIOCB_BCMD:
   3874		qlafx00_fxdisc_iocb(sp, pkt);
   3875		break;
   3876	case SRB_NVME_LS:
   3877		qla_nvme_ls(sp, pkt);
   3878		break;
   3879	case SRB_ABT_CMD:
   3880		IS_QLAFX00(ha) ?
   3881			qlafx00_abort_iocb(sp, pkt) :
   3882			qla24xx_abort_iocb(sp, pkt);
   3883		break;
   3884	case SRB_ELS_DCMD:
   3885		qla24xx_els_logo_iocb(sp, pkt);
   3886		break;
   3887	case SRB_CT_PTHRU_CMD:
   3888		qla2x00_ctpthru_cmd_iocb(sp, pkt);
   3889		break;
   3890	case SRB_MB_IOCB:
   3891		qla2x00_mb_iocb(sp, pkt);
   3892		break;
   3893	case SRB_NACK_PLOGI:
   3894	case SRB_NACK_PRLI:
   3895	case SRB_NACK_LOGO:
   3896		qla2x00_send_notify_ack_iocb(sp, pkt);
   3897		break;
   3898	case SRB_CTRL_VP:
   3899		qla25xx_ctrlvp_iocb(sp, pkt);
   3900		break;
   3901	case SRB_PRLO_CMD:
   3902		qla24xx_prlo_iocb(sp, pkt);
   3903		break;
   3904	case SRB_SA_UPDATE:
   3905		qla24xx_sa_update_iocb(sp, pkt);
   3906		break;
   3907	case SRB_SA_REPLACE:
   3908		qla24xx_sa_replace_iocb(sp, pkt);
   3909		break;
   3910	default:
   3911		break;
   3912	}
   3913
   3914	if (sp->start_timer) {
   3915		/* ref: TMR timer ref
   3916		 * this code should be just before start_iocbs function
   3917		 * This will make sure that caller function don't to do
   3918		 * kref_put even on failure
   3919		 */
   3920		kref_get(&sp->cmd_kref);
   3921		add_timer(&sp->u.iocb_cmd.timer);
   3922	}
   3923
   3924	wmb();
   3925	qla2x00_start_iocbs(vha, qp->req);
   3926done:
   3927	spin_unlock_irqrestore(qp->qp_lock_ptr, flags);
   3928	return rval;
   3929}
   3930
   3931static void
   3932qla25xx_build_bidir_iocb(srb_t *sp, struct scsi_qla_host *vha,
   3933				struct cmd_bidir *cmd_pkt, uint32_t tot_dsds)
   3934{
   3935	uint16_t avail_dsds;
   3936	struct dsd64 *cur_dsd;
   3937	uint32_t req_data_len = 0;
   3938	uint32_t rsp_data_len = 0;
   3939	struct scatterlist *sg;
   3940	int index;
   3941	int entry_count = 1;
   3942	struct bsg_job *bsg_job = sp->u.bsg_job;
   3943
   3944	/*Update entry type to indicate bidir command */
   3945	put_unaligned_le32(COMMAND_BIDIRECTIONAL, &cmd_pkt->entry_type);
   3946
   3947	/* Set the transfer direction, in this set both flags
   3948	 * Also set the BD_WRAP_BACK flag, firmware will take care
   3949	 * assigning DID=SID for outgoing pkts.
   3950	 */
   3951	cmd_pkt->wr_dseg_count = cpu_to_le16(bsg_job->request_payload.sg_cnt);
   3952	cmd_pkt->rd_dseg_count = cpu_to_le16(bsg_job->reply_payload.sg_cnt);
   3953	cmd_pkt->control_flags = cpu_to_le16(BD_WRITE_DATA | BD_READ_DATA |
   3954							BD_WRAP_BACK);
   3955
   3956	req_data_len = rsp_data_len = bsg_job->request_payload.payload_len;
   3957	cmd_pkt->wr_byte_count = cpu_to_le32(req_data_len);
   3958	cmd_pkt->rd_byte_count = cpu_to_le32(rsp_data_len);
   3959	cmd_pkt->timeout = cpu_to_le16(qla2x00_get_async_timeout(vha) + 2);
   3960
   3961	vha->bidi_stats.transfer_bytes += req_data_len;
   3962	vha->bidi_stats.io_count++;
   3963
   3964	vha->qla_stats.output_bytes += req_data_len;
   3965	vha->qla_stats.output_requests++;
   3966
   3967	/* Only one dsd is available for bidirectional IOCB, remaining dsds
   3968	 * are bundled in continuation iocb
   3969	 */
   3970	avail_dsds = 1;
   3971	cur_dsd = &cmd_pkt->fcp_dsd;
   3972
   3973	index = 0;
   3974
   3975	for_each_sg(bsg_job->request_payload.sg_list, sg,
   3976				bsg_job->request_payload.sg_cnt, index) {
   3977		cont_a64_entry_t *cont_pkt;
   3978
   3979		/* Allocate additional continuation packets */
   3980		if (avail_dsds == 0) {
   3981			/* Continuation type 1 IOCB can accomodate
   3982			 * 5 DSDS
   3983			 */
   3984			cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
   3985			cur_dsd = cont_pkt->dsd;
   3986			avail_dsds = 5;
   3987			entry_count++;
   3988		}
   3989		append_dsd64(&cur_dsd, sg);
   3990		avail_dsds--;
   3991	}
   3992	/* For read request DSD will always goes to continuation IOCB
   3993	 * and follow the write DSD. If there is room on the current IOCB
   3994	 * then it is added to that IOCB else new continuation IOCB is
   3995	 * allocated.
   3996	 */
   3997	for_each_sg(bsg_job->reply_payload.sg_list, sg,
   3998				bsg_job->reply_payload.sg_cnt, index) {
   3999		cont_a64_entry_t *cont_pkt;
   4000
   4001		/* Allocate additional continuation packets */
   4002		if (avail_dsds == 0) {
   4003			/* Continuation type 1 IOCB can accomodate
   4004			 * 5 DSDS
   4005			 */
   4006			cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
   4007			cur_dsd = cont_pkt->dsd;
   4008			avail_dsds = 5;
   4009			entry_count++;
   4010		}
   4011		append_dsd64(&cur_dsd, sg);
   4012		avail_dsds--;
   4013	}
   4014	/* This value should be same as number of IOCB required for this cmd */
   4015	cmd_pkt->entry_count = entry_count;
   4016}
   4017
   4018int
   4019qla2x00_start_bidir(srb_t *sp, struct scsi_qla_host *vha, uint32_t tot_dsds)
   4020{
   4021
   4022	struct qla_hw_data *ha = vha->hw;
   4023	unsigned long flags;
   4024	uint32_t handle;
   4025	uint16_t req_cnt;
   4026	uint16_t cnt;
   4027	uint32_t *clr_ptr;
   4028	struct cmd_bidir *cmd_pkt = NULL;
   4029	struct rsp_que *rsp;
   4030	struct req_que *req;
   4031	int rval = EXT_STATUS_OK;
   4032
   4033	rval = QLA_SUCCESS;
   4034
   4035	rsp = ha->rsp_q_map[0];
   4036	req = vha->req;
   4037
   4038	/* Send marker if required */
   4039	if (vha->marker_needed != 0) {
   4040		if (qla2x00_marker(vha, ha->base_qpair,
   4041			0, 0, MK_SYNC_ALL) != QLA_SUCCESS)
   4042			return EXT_STATUS_MAILBOX;
   4043		vha->marker_needed = 0;
   4044	}
   4045
   4046	/* Acquire ring specific lock */
   4047	spin_lock_irqsave(&ha->hardware_lock, flags);
   4048
   4049	handle = qla2xxx_get_next_handle(req);
   4050	if (handle == 0) {
   4051		rval = EXT_STATUS_BUSY;
   4052		goto queuing_error;
   4053	}
   4054
   4055	/* Calculate number of IOCB required */
   4056	req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
   4057
   4058	/* Check for room on request queue. */
   4059	if (req->cnt < req_cnt + 2) {
   4060		if (IS_SHADOW_REG_CAPABLE(ha)) {
   4061			cnt = *req->out_ptr;
   4062		} else {
   4063			cnt = rd_reg_dword_relaxed(req->req_q_out);
   4064			if (qla2x00_check_reg16_for_disconnect(vha, cnt))
   4065				goto queuing_error;
   4066		}
   4067
   4068		if  (req->ring_index < cnt)
   4069			req->cnt = cnt - req->ring_index;
   4070		else
   4071			req->cnt = req->length -
   4072				(req->ring_index - cnt);
   4073	}
   4074	if (req->cnt < req_cnt + 2) {
   4075		rval = EXT_STATUS_BUSY;
   4076		goto queuing_error;
   4077	}
   4078
   4079	cmd_pkt = (struct cmd_bidir *)req->ring_ptr;
   4080	cmd_pkt->handle = make_handle(req->id, handle);
   4081
   4082	/* Zero out remaining portion of packet. */
   4083	/* tagged queuing modifier -- default is TSK_SIMPLE (0).*/
   4084	clr_ptr = (uint32_t *)cmd_pkt + 2;
   4085	memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
   4086
   4087	/* Set NPORT-ID  (of vha)*/
   4088	cmd_pkt->nport_handle = cpu_to_le16(vha->self_login_loop_id);
   4089	cmd_pkt->port_id[0] = vha->d_id.b.al_pa;
   4090	cmd_pkt->port_id[1] = vha->d_id.b.area;
   4091	cmd_pkt->port_id[2] = vha->d_id.b.domain;
   4092
   4093	qla25xx_build_bidir_iocb(sp, vha, cmd_pkt, tot_dsds);
   4094	cmd_pkt->entry_status = (uint8_t) rsp->id;
   4095	/* Build command packet. */
   4096	req->current_outstanding_cmd = handle;
   4097	req->outstanding_cmds[handle] = sp;
   4098	sp->handle = handle;
   4099	req->cnt -= req_cnt;
   4100
   4101	/* Send the command to the firmware */
   4102	wmb();
   4103	qla2x00_start_iocbs(vha, req);
   4104queuing_error:
   4105	spin_unlock_irqrestore(&ha->hardware_lock, flags);
   4106
   4107	return rval;
   4108}