cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

sli4.c (133819B)


      1// SPDX-License-Identifier: GPL-2.0
      2/*
      3 * Copyright (C) 2021 Broadcom. All Rights Reserved. The term
      4 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
      5 */
      6
      7/**
      8 * All common (i.e. transport-independent) SLI-4 functions are implemented
      9 * in this file.
     10 */
     11#include "sli4.h"
     12
     13static struct sli4_asic_entry_t sli4_asic_table[] = {
     14	{ SLI4_ASIC_REV_B0, SLI4_ASIC_GEN_5},
     15	{ SLI4_ASIC_REV_D0, SLI4_ASIC_GEN_5},
     16	{ SLI4_ASIC_REV_A3, SLI4_ASIC_GEN_6},
     17	{ SLI4_ASIC_REV_A0, SLI4_ASIC_GEN_6},
     18	{ SLI4_ASIC_REV_A1, SLI4_ASIC_GEN_6},
     19	{ SLI4_ASIC_REV_A3, SLI4_ASIC_GEN_6},
     20	{ SLI4_ASIC_REV_A1, SLI4_ASIC_GEN_7},
     21	{ SLI4_ASIC_REV_A0, SLI4_ASIC_GEN_7},
     22};
     23
     24/* Convert queue type enum (SLI_QTYPE_*) into a string */
     25static char *SLI4_QNAME[] = {
     26	"Event Queue",
     27	"Completion Queue",
     28	"Mailbox Queue",
     29	"Work Queue",
     30	"Receive Queue",
     31	"Undefined"
     32};
     33
     34/**
     35 * sli_config_cmd_init() - Write a SLI_CONFIG command to the provided buffer.
     36 *
     37 * @sli4: SLI context pointer.
     38 * @buf: Destination buffer for the command.
     39 * @length: Length in bytes of attached command.
     40 * @dma: DMA buffer for non-embedded commands.
     41 * Return: Command payload buffer.
     42 */
     43static void *
     44sli_config_cmd_init(struct sli4 *sli4, void *buf, u32 length,
     45		    struct efc_dma *dma)
     46{
     47	struct sli4_cmd_sli_config *config;
     48	u32 flags;
     49
     50	if (length > sizeof(config->payload.embed) && !dma) {
     51		efc_log_err(sli4, "Too big for an embedded cmd with len(%d)\n",
     52			    length);
     53		return NULL;
     54	}
     55
     56	memset(buf, 0, SLI4_BMBX_SIZE);
     57
     58	config = buf;
     59
     60	config->hdr.command = SLI4_MBX_CMD_SLI_CONFIG;
     61	if (!dma) {
     62		flags = SLI4_SLICONF_EMB;
     63		config->dw1_flags = cpu_to_le32(flags);
     64		config->payload_len = cpu_to_le32(length);
     65		return config->payload.embed;
     66	}
     67
     68	flags = SLI4_SLICONF_PMDCMD_VAL_1;
     69	flags &= ~SLI4_SLICONF_EMB;
     70	config->dw1_flags = cpu_to_le32(flags);
     71
     72	config->payload.mem.addr.low = cpu_to_le32(lower_32_bits(dma->phys));
     73	config->payload.mem.addr.high =	cpu_to_le32(upper_32_bits(dma->phys));
     74	config->payload.mem.length =
     75				cpu_to_le32(dma->size & SLI4_SLICONF_PMD_LEN);
     76	config->payload_len = cpu_to_le32(dma->size);
     77	/* save pointer to DMA for BMBX dumping purposes */
     78	sli4->bmbx_non_emb_pmd = dma;
     79	return dma->virt;
     80}
     81
     82/**
     83 * sli_cmd_common_create_cq() - Write a COMMON_CREATE_CQ V2 command.
     84 *
     85 * @sli4: SLI context pointer.
     86 * @buf: Destination buffer for the command.
     87 * @qmem: DMA memory for queue.
     88 * @eq_id: EQ id assosiated with this cq.
     89 * Return: status -EIO/0.
     90 */
     91static int
     92sli_cmd_common_create_cq(struct sli4 *sli4, void *buf, struct efc_dma *qmem,
     93			 u16 eq_id)
     94{
     95	struct sli4_rqst_cmn_create_cq_v2 *cqv2 = NULL;
     96	u32 p;
     97	uintptr_t addr;
     98	u32 num_pages = 0;
     99	size_t cmd_size = 0;
    100	u32 page_size = 0;
    101	u32 n_cqe = 0;
    102	u32 dw5_flags = 0;
    103	u16 dw6w1_arm = 0;
    104	__le32 len;
    105
    106	/* First calculate number of pages and the mailbox cmd length */
    107	n_cqe = qmem->size / SLI4_CQE_BYTES;
    108	switch (n_cqe) {
    109	case 256:
    110	case 512:
    111	case 1024:
    112	case 2048:
    113		page_size = SZ_4K;
    114		break;
    115	case 4096:
    116		page_size = SZ_8K;
    117		break;
    118	default:
    119		return -EIO;
    120	}
    121	num_pages = sli_page_count(qmem->size, page_size);
    122
    123	cmd_size = SLI4_RQST_CMDSZ(cmn_create_cq_v2)
    124		   + SZ_DMAADDR * num_pages;
    125
    126	cqv2 = sli_config_cmd_init(sli4, buf, cmd_size, NULL);
    127	if (!cqv2)
    128		return -EIO;
    129
    130	len = SLI4_RQST_PYLD_LEN_VAR(cmn_create_cq_v2, SZ_DMAADDR * num_pages);
    131	sli_cmd_fill_hdr(&cqv2->hdr, SLI4_CMN_CREATE_CQ, SLI4_SUBSYSTEM_COMMON,
    132			 CMD_V2, len);
    133	cqv2->page_size = page_size / SLI_PAGE_SIZE;
    134
    135	/* valid values for number of pages: 1, 2, 4, 8 (sec 4.4.3) */
    136	cqv2->num_pages = cpu_to_le16(num_pages);
    137	if (!num_pages || num_pages > SLI4_CREATE_CQV2_MAX_PAGES)
    138		return -EIO;
    139
    140	switch (num_pages) {
    141	case 1:
    142		dw5_flags |= SLI4_CQ_CNT_VAL(256);
    143		break;
    144	case 2:
    145		dw5_flags |= SLI4_CQ_CNT_VAL(512);
    146		break;
    147	case 4:
    148		dw5_flags |= SLI4_CQ_CNT_VAL(1024);
    149		break;
    150	case 8:
    151		dw5_flags |= SLI4_CQ_CNT_VAL(LARGE);
    152		cqv2->cqe_count = cpu_to_le16(n_cqe);
    153		break;
    154	default:
    155		efc_log_err(sli4, "num_pages %d not valid\n", num_pages);
    156		return -EIO;
    157	}
    158
    159	if (sli4->if_type == SLI4_INTF_IF_TYPE_6)
    160		dw5_flags |= SLI4_CREATE_CQV2_AUTOVALID;
    161
    162	dw5_flags |= SLI4_CREATE_CQV2_EVT;
    163	dw5_flags |= SLI4_CREATE_CQV2_VALID;
    164
    165	cqv2->dw5_flags = cpu_to_le32(dw5_flags);
    166	cqv2->dw6w1_arm = cpu_to_le16(dw6w1_arm);
    167	cqv2->eq_id = cpu_to_le16(eq_id);
    168
    169	for (p = 0, addr = qmem->phys; p < num_pages; p++, addr += page_size) {
    170		cqv2->page_phys_addr[p].low = cpu_to_le32(lower_32_bits(addr));
    171		cqv2->page_phys_addr[p].high = cpu_to_le32(upper_32_bits(addr));
    172	}
    173
    174	return 0;
    175}
    176
    177static int
    178sli_cmd_common_create_eq(struct sli4 *sli4, void *buf, struct efc_dma *qmem)
    179{
    180	struct sli4_rqst_cmn_create_eq *eq;
    181	u32 p;
    182	uintptr_t addr;
    183	u16 num_pages;
    184	u32 dw5_flags = 0;
    185	u32 dw6_flags = 0, ver;
    186
    187	eq = sli_config_cmd_init(sli4, buf, SLI4_CFG_PYLD_LENGTH(cmn_create_eq),
    188				 NULL);
    189	if (!eq)
    190		return -EIO;
    191
    192	if (sli4->if_type == SLI4_INTF_IF_TYPE_6)
    193		ver = CMD_V2;
    194	else
    195		ver = CMD_V0;
    196
    197	sli_cmd_fill_hdr(&eq->hdr, SLI4_CMN_CREATE_EQ, SLI4_SUBSYSTEM_COMMON,
    198			 ver, SLI4_RQST_PYLD_LEN(cmn_create_eq));
    199
    200	/* valid values for number of pages: 1, 2, 4 (sec 4.4.3) */
    201	num_pages = qmem->size / SLI_PAGE_SIZE;
    202	eq->num_pages = cpu_to_le16(num_pages);
    203
    204	switch (num_pages) {
    205	case 1:
    206		dw5_flags |= SLI4_EQE_SIZE_4;
    207		dw6_flags |= SLI4_EQ_CNT_VAL(1024);
    208		break;
    209	case 2:
    210		dw5_flags |= SLI4_EQE_SIZE_4;
    211		dw6_flags |= SLI4_EQ_CNT_VAL(2048);
    212		break;
    213	case 4:
    214		dw5_flags |= SLI4_EQE_SIZE_4;
    215		dw6_flags |= SLI4_EQ_CNT_VAL(4096);
    216		break;
    217	default:
    218		efc_log_err(sli4, "num_pages %d not valid\n", num_pages);
    219		return -EIO;
    220	}
    221
    222	if (sli4->if_type == SLI4_INTF_IF_TYPE_6)
    223		dw5_flags |= SLI4_CREATE_EQ_AUTOVALID;
    224
    225	dw5_flags |= SLI4_CREATE_EQ_VALID;
    226	dw6_flags &= (~SLI4_CREATE_EQ_ARM);
    227	eq->dw5_flags = cpu_to_le32(dw5_flags);
    228	eq->dw6_flags = cpu_to_le32(dw6_flags);
    229	eq->dw7_delaymulti = cpu_to_le32(SLI4_CREATE_EQ_DELAYMULTI);
    230
    231	for (p = 0, addr = qmem->phys; p < num_pages;
    232	     p++, addr += SLI_PAGE_SIZE) {
    233		eq->page_address[p].low = cpu_to_le32(lower_32_bits(addr));
    234		eq->page_address[p].high = cpu_to_le32(upper_32_bits(addr));
    235	}
    236
    237	return 0;
    238}
    239
    240static int
    241sli_cmd_common_create_mq_ext(struct sli4 *sli4, void *buf, struct efc_dma *qmem,
    242			     u16 cq_id)
    243{
    244	struct sli4_rqst_cmn_create_mq_ext *mq;
    245	u32 p;
    246	uintptr_t addr;
    247	u32 num_pages;
    248	u16 dw6w1_flags = 0;
    249
    250	mq = sli_config_cmd_init(sli4, buf,
    251				 SLI4_CFG_PYLD_LENGTH(cmn_create_mq_ext), NULL);
    252	if (!mq)
    253		return -EIO;
    254
    255	sli_cmd_fill_hdr(&mq->hdr, SLI4_CMN_CREATE_MQ_EXT,
    256			 SLI4_SUBSYSTEM_COMMON, CMD_V0,
    257			 SLI4_RQST_PYLD_LEN(cmn_create_mq_ext));
    258
    259	/* valid values for number of pages: 1, 2, 4, 8 (sec 4.4.12) */
    260	num_pages = qmem->size / SLI_PAGE_SIZE;
    261	mq->num_pages = cpu_to_le16(num_pages);
    262	switch (num_pages) {
    263	case 1:
    264		dw6w1_flags |= SLI4_MQE_SIZE_16;
    265		break;
    266	case 2:
    267		dw6w1_flags |= SLI4_MQE_SIZE_32;
    268		break;
    269	case 4:
    270		dw6w1_flags |= SLI4_MQE_SIZE_64;
    271		break;
    272	case 8:
    273		dw6w1_flags |= SLI4_MQE_SIZE_128;
    274		break;
    275	default:
    276		efc_log_info(sli4, "num_pages %d not valid\n", num_pages);
    277		return -EIO;
    278	}
    279
    280	mq->async_event_bitmap = cpu_to_le32(SLI4_ASYNC_EVT_FC_ALL);
    281
    282	if (sli4->params.mq_create_version) {
    283		mq->cq_id_v1 = cpu_to_le16(cq_id);
    284		mq->hdr.dw3_version = cpu_to_le32(CMD_V1);
    285	} else {
    286		dw6w1_flags |= (cq_id << SLI4_CREATE_MQEXT_CQID_SHIFT);
    287	}
    288	mq->dw7_val = cpu_to_le32(SLI4_CREATE_MQEXT_VAL);
    289
    290	mq->dw6w1_flags = cpu_to_le16(dw6w1_flags);
    291	for (p = 0, addr = qmem->phys; p < num_pages;
    292	     p++, addr += SLI_PAGE_SIZE) {
    293		mq->page_phys_addr[p].low = cpu_to_le32(lower_32_bits(addr));
    294		mq->page_phys_addr[p].high = cpu_to_le32(upper_32_bits(addr));
    295	}
    296
    297	return 0;
    298}
    299
    300int
    301sli_cmd_wq_create(struct sli4 *sli4, void *buf, struct efc_dma *qmem, u16 cq_id)
    302{
    303	struct sli4_rqst_wq_create *wq;
    304	u32 p;
    305	uintptr_t addr;
    306	u32 page_size = 0;
    307	u32 n_wqe = 0;
    308	u16 num_pages;
    309
    310	wq = sli_config_cmd_init(sli4, buf, SLI4_CFG_PYLD_LENGTH(wq_create),
    311				 NULL);
    312	if (!wq)
    313		return -EIO;
    314
    315	sli_cmd_fill_hdr(&wq->hdr, SLI4_OPC_WQ_CREATE, SLI4_SUBSYSTEM_FC,
    316			 CMD_V1, SLI4_RQST_PYLD_LEN(wq_create));
    317	n_wqe = qmem->size / sli4->wqe_size;
    318
    319	switch (qmem->size) {
    320	case 4096:
    321	case 8192:
    322	case 16384:
    323	case 32768:
    324		page_size = SZ_4K;
    325		break;
    326	case 65536:
    327		page_size = SZ_8K;
    328		break;
    329	case 131072:
    330		page_size = SZ_16K;
    331		break;
    332	case 262144:
    333		page_size = SZ_32K;
    334		break;
    335	case 524288:
    336		page_size = SZ_64K;
    337		break;
    338	default:
    339		return -EIO;
    340	}
    341
    342	/* valid values for number of pages(num_pages): 1-8 */
    343	num_pages = sli_page_count(qmem->size, page_size);
    344	wq->num_pages = cpu_to_le16(num_pages);
    345	if (!num_pages || num_pages > SLI4_WQ_CREATE_MAX_PAGES)
    346		return -EIO;
    347
    348	wq->cq_id = cpu_to_le16(cq_id);
    349
    350	wq->page_size = page_size / SLI_PAGE_SIZE;
    351
    352	if (sli4->wqe_size == SLI4_WQE_EXT_BYTES)
    353		wq->wqe_size_byte |= SLI4_WQE_EXT_SIZE;
    354	else
    355		wq->wqe_size_byte |= SLI4_WQE_SIZE;
    356
    357	wq->wqe_count = cpu_to_le16(n_wqe);
    358
    359	for (p = 0, addr = qmem->phys; p < num_pages; p++, addr += page_size) {
    360		wq->page_phys_addr[p].low  = cpu_to_le32(lower_32_bits(addr));
    361		wq->page_phys_addr[p].high = cpu_to_le32(upper_32_bits(addr));
    362	}
    363
    364	return 0;
    365}
    366
    367static int
    368sli_cmd_rq_create_v1(struct sli4 *sli4, void *buf, struct efc_dma *qmem,
    369		     u16 cq_id, u16 buffer_size)
    370{
    371	struct sli4_rqst_rq_create_v1 *rq;
    372	u32 p;
    373	uintptr_t addr;
    374	u32 num_pages;
    375
    376	rq = sli_config_cmd_init(sli4, buf, SLI4_CFG_PYLD_LENGTH(rq_create_v1),
    377				 NULL);
    378	if (!rq)
    379		return -EIO;
    380
    381	sli_cmd_fill_hdr(&rq->hdr, SLI4_OPC_RQ_CREATE, SLI4_SUBSYSTEM_FC,
    382			 CMD_V1, SLI4_RQST_PYLD_LEN(rq_create_v1));
    383	/* Disable "no buffer warnings" to avoid Lancer bug */
    384	rq->dim_dfd_dnb |= SLI4_RQ_CREATE_V1_DNB;
    385
    386	/* valid values for number of pages: 1-8 (sec 4.5.6) */
    387	num_pages = sli_page_count(qmem->size, SLI_PAGE_SIZE);
    388	rq->num_pages = cpu_to_le16(num_pages);
    389	if (!num_pages ||
    390	    num_pages > SLI4_RQ_CREATE_V1_MAX_PAGES) {
    391		efc_log_info(sli4, "num_pages %d not valid, max %d\n",
    392			     num_pages, SLI4_RQ_CREATE_V1_MAX_PAGES);
    393		return -EIO;
    394	}
    395
    396	/*
    397	 * RQE count is the total number of entries (note not lg2(# entries))
    398	 */
    399	rq->rqe_count = cpu_to_le16(qmem->size / SLI4_RQE_SIZE);
    400
    401	rq->rqe_size_byte |= SLI4_RQE_SIZE_8;
    402
    403	rq->page_size = SLI4_RQ_PAGE_SIZE_4096;
    404
    405	if (buffer_size < sli4->rq_min_buf_size ||
    406	    buffer_size > sli4->rq_max_buf_size) {
    407		efc_log_err(sli4, "buffer_size %d out of range (%d-%d)\n",
    408			    buffer_size, sli4->rq_min_buf_size,
    409			    sli4->rq_max_buf_size);
    410		return -EIO;
    411	}
    412	rq->buffer_size = cpu_to_le32(buffer_size);
    413
    414	rq->cq_id = cpu_to_le16(cq_id);
    415
    416	for (p = 0, addr = qmem->phys;
    417			p < num_pages;
    418			p++, addr += SLI_PAGE_SIZE) {
    419		rq->page_phys_addr[p].low  = cpu_to_le32(lower_32_bits(addr));
    420		rq->page_phys_addr[p].high = cpu_to_le32(upper_32_bits(addr));
    421	}
    422
    423	return 0;
    424}
    425
    426static int
    427sli_cmd_rq_create_v2(struct sli4 *sli4, u32 num_rqs,
    428		     struct sli4_queue *qs[], u32 base_cq_id,
    429		     u32 header_buffer_size,
    430		     u32 payload_buffer_size, struct efc_dma *dma)
    431{
    432	struct sli4_rqst_rq_create_v2 *req = NULL;
    433	u32 i, p, offset = 0;
    434	u32 payload_size, page_count;
    435	uintptr_t addr;
    436	u32 num_pages;
    437	__le32 len;
    438
    439	page_count =  sli_page_count(qs[0]->dma.size, SLI_PAGE_SIZE) * num_rqs;
    440
    441	/* Payload length must accommodate both request and response */
    442	payload_size = max(SLI4_RQST_CMDSZ(rq_create_v2) +
    443			   SZ_DMAADDR * page_count,
    444			   sizeof(struct sli4_rsp_cmn_create_queue_set));
    445
    446	dma->size = payload_size;
    447	dma->virt = dma_alloc_coherent(&sli4->pci->dev, dma->size,
    448				       &dma->phys, GFP_KERNEL);
    449	if (!dma->virt)
    450		return -EIO;
    451
    452	memset(dma->virt, 0, payload_size);
    453
    454	req = sli_config_cmd_init(sli4, sli4->bmbx.virt, payload_size, dma);
    455	if (!req)
    456		return -EIO;
    457
    458	len =  SLI4_RQST_PYLD_LEN_VAR(rq_create_v2, SZ_DMAADDR * page_count);
    459	sli_cmd_fill_hdr(&req->hdr, SLI4_OPC_RQ_CREATE, SLI4_SUBSYSTEM_FC,
    460			 CMD_V2, len);
    461	/* Fill Payload fields */
    462	req->dim_dfd_dnb |= SLI4_RQCREATEV2_DNB;
    463	num_pages = sli_page_count(qs[0]->dma.size, SLI_PAGE_SIZE);
    464	req->num_pages = cpu_to_le16(num_pages);
    465	req->rqe_count = cpu_to_le16(qs[0]->dma.size / SLI4_RQE_SIZE);
    466	req->rqe_size_byte |= SLI4_RQE_SIZE_8;
    467	req->page_size = SLI4_RQ_PAGE_SIZE_4096;
    468	req->rq_count = num_rqs;
    469	req->base_cq_id = cpu_to_le16(base_cq_id);
    470	req->hdr_buffer_size = cpu_to_le16(header_buffer_size);
    471	req->payload_buffer_size = cpu_to_le16(payload_buffer_size);
    472
    473	for (i = 0; i < num_rqs; i++) {
    474		for (p = 0, addr = qs[i]->dma.phys; p < num_pages;
    475		     p++, addr += SLI_PAGE_SIZE) {
    476			req->page_phys_addr[offset].low =
    477					cpu_to_le32(lower_32_bits(addr));
    478			req->page_phys_addr[offset].high =
    479					cpu_to_le32(upper_32_bits(addr));
    480			offset++;
    481		}
    482	}
    483
    484	return 0;
    485}
    486
    487static void
    488__sli_queue_destroy(struct sli4 *sli4, struct sli4_queue *q)
    489{
    490	if (!q->dma.size)
    491		return;
    492
    493	dma_free_coherent(&sli4->pci->dev, q->dma.size,
    494			  q->dma.virt, q->dma.phys);
    495	memset(&q->dma, 0, sizeof(struct efc_dma));
    496}
    497
    498int
    499__sli_queue_init(struct sli4 *sli4, struct sli4_queue *q, u32 qtype,
    500		 size_t size, u32 n_entries, u32 align)
    501{
    502	if (q->dma.virt) {
    503		efc_log_err(sli4, "%s failed\n", __func__);
    504		return -EIO;
    505	}
    506
    507	memset(q, 0, sizeof(struct sli4_queue));
    508
    509	q->dma.size = size * n_entries;
    510	q->dma.virt = dma_alloc_coherent(&sli4->pci->dev, q->dma.size,
    511					 &q->dma.phys, GFP_KERNEL);
    512	if (!q->dma.virt) {
    513		memset(&q->dma, 0, sizeof(struct efc_dma));
    514		efc_log_err(sli4, "%s allocation failed\n", SLI4_QNAME[qtype]);
    515		return -EIO;
    516	}
    517
    518	memset(q->dma.virt, 0, size * n_entries);
    519
    520	spin_lock_init(&q->lock);
    521
    522	q->type = qtype;
    523	q->size = size;
    524	q->length = n_entries;
    525
    526	if (q->type == SLI4_QTYPE_EQ || q->type == SLI4_QTYPE_CQ) {
    527		/* For prism, phase will be flipped after
    528		 * a sweep through eq and cq
    529		 */
    530		q->phase = 1;
    531	}
    532
    533	/* Limit to hwf the queue size per interrupt */
    534	q->proc_limit = n_entries / 2;
    535
    536	if (q->type == SLI4_QTYPE_EQ)
    537		q->posted_limit = q->length / 2;
    538	else
    539		q->posted_limit = 64;
    540
    541	return 0;
    542}
    543
    544int
    545sli_fc_rq_alloc(struct sli4 *sli4, struct sli4_queue *q,
    546		u32 n_entries, u32 buffer_size,
    547		struct sli4_queue *cq, bool is_hdr)
    548{
    549	if (__sli_queue_init(sli4, q, SLI4_QTYPE_RQ, SLI4_RQE_SIZE,
    550			     n_entries, SLI_PAGE_SIZE))
    551		return -EIO;
    552
    553	if (sli_cmd_rq_create_v1(sli4, sli4->bmbx.virt, &q->dma, cq->id,
    554				 buffer_size))
    555		goto error;
    556
    557	if (__sli_create_queue(sli4, q))
    558		goto error;
    559
    560	if (is_hdr && q->id & 1) {
    561		efc_log_info(sli4, "bad header RQ_ID %d\n", q->id);
    562		goto error;
    563	} else if (!is_hdr  && (q->id & 1) == 0) {
    564		efc_log_info(sli4, "bad data RQ_ID %d\n", q->id);
    565		goto error;
    566	}
    567
    568	if (is_hdr)
    569		q->u.flag |= SLI4_QUEUE_FLAG_HDR;
    570	else
    571		q->u.flag &= ~SLI4_QUEUE_FLAG_HDR;
    572
    573	return 0;
    574
    575error:
    576	__sli_queue_destroy(sli4, q);
    577	return -EIO;
    578}
    579
    580int
    581sli_fc_rq_set_alloc(struct sli4 *sli4, u32 num_rq_pairs,
    582		    struct sli4_queue *qs[], u32 base_cq_id,
    583		    u32 n_entries, u32 header_buffer_size,
    584		    u32 payload_buffer_size)
    585{
    586	u32 i;
    587	struct efc_dma dma = {0};
    588	struct sli4_rsp_cmn_create_queue_set *rsp = NULL;
    589	void __iomem *db_regaddr = NULL;
    590	u32 num_rqs = num_rq_pairs * 2;
    591
    592	for (i = 0; i < num_rqs; i++) {
    593		if (__sli_queue_init(sli4, qs[i], SLI4_QTYPE_RQ,
    594				     SLI4_RQE_SIZE, n_entries,
    595				     SLI_PAGE_SIZE)) {
    596			goto error;
    597		}
    598	}
    599
    600	if (sli_cmd_rq_create_v2(sli4, num_rqs, qs, base_cq_id,
    601				 header_buffer_size, payload_buffer_size,
    602				 &dma)) {
    603		goto error;
    604	}
    605
    606	if (sli_bmbx_command(sli4)) {
    607		efc_log_err(sli4, "bootstrap mailbox write failed RQSet\n");
    608		goto error;
    609	}
    610
    611	if (sli4->if_type == SLI4_INTF_IF_TYPE_6)
    612		db_regaddr = sli4->reg[1] + SLI4_IF6_RQ_DB_REG;
    613	else
    614		db_regaddr = sli4->reg[0] + SLI4_RQ_DB_REG;
    615
    616	rsp = dma.virt;
    617	if (rsp->hdr.status) {
    618		efc_log_err(sli4, "bad create RQSet status=%#x addl=%#x\n",
    619			    rsp->hdr.status, rsp->hdr.additional_status);
    620		goto error;
    621	}
    622
    623	for (i = 0; i < num_rqs; i++) {
    624		qs[i]->id = i + le16_to_cpu(rsp->q_id);
    625		if ((qs[i]->id & 1) == 0)
    626			qs[i]->u.flag |= SLI4_QUEUE_FLAG_HDR;
    627		else
    628			qs[i]->u.flag &= ~SLI4_QUEUE_FLAG_HDR;
    629
    630		qs[i]->db_regaddr = db_regaddr;
    631	}
    632
    633	dma_free_coherent(&sli4->pci->dev, dma.size, dma.virt, dma.phys);
    634
    635	return 0;
    636
    637error:
    638	for (i = 0; i < num_rqs; i++)
    639		__sli_queue_destroy(sli4, qs[i]);
    640
    641	if (dma.virt)
    642		dma_free_coherent(&sli4->pci->dev, dma.size, dma.virt,
    643				  dma.phys);
    644
    645	return -EIO;
    646}
    647
    648static int
    649sli_res_sli_config(struct sli4 *sli4, void *buf)
    650{
    651	struct sli4_cmd_sli_config *sli_config = buf;
    652
    653	/* sanity check */
    654	if (!buf || sli_config->hdr.command !=
    655		    SLI4_MBX_CMD_SLI_CONFIG) {
    656		efc_log_err(sli4, "bad parameter buf=%p cmd=%#x\n", buf,
    657			    buf ? sli_config->hdr.command : -1);
    658		return -EIO;
    659	}
    660
    661	if (le16_to_cpu(sli_config->hdr.status))
    662		return le16_to_cpu(sli_config->hdr.status);
    663
    664	if (le32_to_cpu(sli_config->dw1_flags) & SLI4_SLICONF_EMB)
    665		return sli_config->payload.embed[4];
    666
    667	efc_log_info(sli4, "external buffers not supported\n");
    668	return -EIO;
    669}
    670
    671int
    672__sli_create_queue(struct sli4 *sli4, struct sli4_queue *q)
    673{
    674	struct sli4_rsp_cmn_create_queue *res_q = NULL;
    675
    676	if (sli_bmbx_command(sli4)) {
    677		efc_log_crit(sli4, "bootstrap mailbox write fail %s\n",
    678			     SLI4_QNAME[q->type]);
    679		return -EIO;
    680	}
    681	if (sli_res_sli_config(sli4, sli4->bmbx.virt)) {
    682		efc_log_err(sli4, "bad status create %s\n",
    683			    SLI4_QNAME[q->type]);
    684		return -EIO;
    685	}
    686	res_q = (void *)((u8 *)sli4->bmbx.virt +
    687			offsetof(struct sli4_cmd_sli_config, payload));
    688
    689	if (res_q->hdr.status) {
    690		efc_log_err(sli4, "bad create %s status=%#x addl=%#x\n",
    691			    SLI4_QNAME[q->type], res_q->hdr.status,
    692			    res_q->hdr.additional_status);
    693		return -EIO;
    694	}
    695	q->id = le16_to_cpu(res_q->q_id);
    696	switch (q->type) {
    697	case SLI4_QTYPE_EQ:
    698		if (sli4->if_type == SLI4_INTF_IF_TYPE_6)
    699			q->db_regaddr = sli4->reg[1] + SLI4_IF6_EQ_DB_REG;
    700		else
    701			q->db_regaddr =	sli4->reg[0] + SLI4_EQCQ_DB_REG;
    702		break;
    703	case SLI4_QTYPE_CQ:
    704		if (sli4->if_type == SLI4_INTF_IF_TYPE_6)
    705			q->db_regaddr = sli4->reg[1] + SLI4_IF6_CQ_DB_REG;
    706		else
    707			q->db_regaddr =	sli4->reg[0] + SLI4_EQCQ_DB_REG;
    708		break;
    709	case SLI4_QTYPE_MQ:
    710		if (sli4->if_type == SLI4_INTF_IF_TYPE_6)
    711			q->db_regaddr = sli4->reg[1] + SLI4_IF6_MQ_DB_REG;
    712		else
    713			q->db_regaddr =	sli4->reg[0] + SLI4_MQ_DB_REG;
    714		break;
    715	case SLI4_QTYPE_RQ:
    716		if (sli4->if_type == SLI4_INTF_IF_TYPE_6)
    717			q->db_regaddr = sli4->reg[1] + SLI4_IF6_RQ_DB_REG;
    718		else
    719			q->db_regaddr =	sli4->reg[0] + SLI4_RQ_DB_REG;
    720		break;
    721	case SLI4_QTYPE_WQ:
    722		if (sli4->if_type == SLI4_INTF_IF_TYPE_6)
    723			q->db_regaddr = sli4->reg[1] + SLI4_IF6_WQ_DB_REG;
    724		else
    725			q->db_regaddr =	sli4->reg[0] + SLI4_IO_WQ_DB_REG;
    726		break;
    727	default:
    728		break;
    729	}
    730
    731	return 0;
    732}
    733
    734int
    735sli_get_queue_entry_size(struct sli4 *sli4, u32 qtype)
    736{
    737	u32 size = 0;
    738
    739	switch (qtype) {
    740	case SLI4_QTYPE_EQ:
    741		size = sizeof(u32);
    742		break;
    743	case SLI4_QTYPE_CQ:
    744		size = 16;
    745		break;
    746	case SLI4_QTYPE_MQ:
    747		size = 256;
    748		break;
    749	case SLI4_QTYPE_WQ:
    750		size = sli4->wqe_size;
    751		break;
    752	case SLI4_QTYPE_RQ:
    753		size = SLI4_RQE_SIZE;
    754		break;
    755	default:
    756		efc_log_info(sli4, "unknown queue type %d\n", qtype);
    757		return -1;
    758	}
    759	return size;
    760}
    761
    762int
    763sli_queue_alloc(struct sli4 *sli4, u32 qtype,
    764		struct sli4_queue *q, u32 n_entries,
    765		     struct sli4_queue *assoc)
    766{
    767	int size;
    768	u32 align = 0;
    769
    770	/* get queue size */
    771	size = sli_get_queue_entry_size(sli4, qtype);
    772	if (size < 0)
    773		return -EIO;
    774	align = SLI_PAGE_SIZE;
    775
    776	if (__sli_queue_init(sli4, q, qtype, size, n_entries, align))
    777		return -EIO;
    778
    779	switch (qtype) {
    780	case SLI4_QTYPE_EQ:
    781		if (!sli_cmd_common_create_eq(sli4, sli4->bmbx.virt, &q->dma) &&
    782		    !__sli_create_queue(sli4, q))
    783			return 0;
    784
    785		break;
    786	case SLI4_QTYPE_CQ:
    787		if (!sli_cmd_common_create_cq(sli4, sli4->bmbx.virt, &q->dma,
    788					      assoc ? assoc->id : 0) &&
    789		    !__sli_create_queue(sli4, q))
    790			return 0;
    791
    792		break;
    793	case SLI4_QTYPE_MQ:
    794		assoc->u.flag |= SLI4_QUEUE_FLAG_MQ;
    795		if (!sli_cmd_common_create_mq_ext(sli4, sli4->bmbx.virt,
    796						  &q->dma, assoc->id) &&
    797		    !__sli_create_queue(sli4, q))
    798			return 0;
    799
    800		break;
    801	case SLI4_QTYPE_WQ:
    802		if (!sli_cmd_wq_create(sli4, sli4->bmbx.virt, &q->dma,
    803				       assoc ? assoc->id : 0) &&
    804		    !__sli_create_queue(sli4, q))
    805			return 0;
    806
    807		break;
    808	default:
    809		efc_log_info(sli4, "unknown queue type %d\n", qtype);
    810	}
    811
    812	__sli_queue_destroy(sli4, q);
    813	return -EIO;
    814}
    815
    816static int sli_cmd_cq_set_create(struct sli4 *sli4,
    817				 struct sli4_queue *qs[], u32 num_cqs,
    818				 struct sli4_queue *eqs[],
    819				 struct efc_dma *dma)
    820{
    821	struct sli4_rqst_cmn_create_cq_set_v0 *req = NULL;
    822	uintptr_t addr;
    823	u32 i, offset = 0,  page_bytes = 0, payload_size;
    824	u32 p = 0, page_size = 0, n_cqe = 0, num_pages_cq;
    825	u32 dw5_flags = 0;
    826	u16 dw6w1_flags = 0;
    827	__le32 req_len;
    828
    829	n_cqe = qs[0]->dma.size / SLI4_CQE_BYTES;
    830	switch (n_cqe) {
    831	case 256:
    832	case 512:
    833	case 1024:
    834	case 2048:
    835		page_size = 1;
    836		break;
    837	case 4096:
    838		page_size = 2;
    839		break;
    840	default:
    841		return -EIO;
    842	}
    843
    844	page_bytes = page_size * SLI_PAGE_SIZE;
    845	num_pages_cq = sli_page_count(qs[0]->dma.size, page_bytes);
    846	payload_size = max(SLI4_RQST_CMDSZ(cmn_create_cq_set_v0) +
    847			   (SZ_DMAADDR * num_pages_cq * num_cqs),
    848			   sizeof(struct sli4_rsp_cmn_create_queue_set));
    849
    850	dma->size = payload_size;
    851	dma->virt = dma_alloc_coherent(&sli4->pci->dev, dma->size,
    852				       &dma->phys, GFP_KERNEL);
    853	if (!dma->virt)
    854		return -EIO;
    855
    856	memset(dma->virt, 0, payload_size);
    857
    858	req = sli_config_cmd_init(sli4, sli4->bmbx.virt, payload_size, dma);
    859	if (!req)
    860		return -EIO;
    861
    862	req_len = SLI4_RQST_PYLD_LEN_VAR(cmn_create_cq_set_v0,
    863					 SZ_DMAADDR * num_pages_cq * num_cqs);
    864	sli_cmd_fill_hdr(&req->hdr, SLI4_CMN_CREATE_CQ_SET, SLI4_SUBSYSTEM_FC,
    865			 CMD_V0, req_len);
    866	req->page_size = page_size;
    867
    868	req->num_pages = cpu_to_le16(num_pages_cq);
    869	switch (num_pages_cq) {
    870	case 1:
    871		dw5_flags |= SLI4_CQ_CNT_VAL(256);
    872		break;
    873	case 2:
    874		dw5_flags |= SLI4_CQ_CNT_VAL(512);
    875		break;
    876	case 4:
    877		dw5_flags |= SLI4_CQ_CNT_VAL(1024);
    878		break;
    879	case 8:
    880		dw5_flags |= SLI4_CQ_CNT_VAL(LARGE);
    881		dw6w1_flags |= (n_cqe & SLI4_CREATE_CQSETV0_CQE_COUNT);
    882		break;
    883	default:
    884		efc_log_info(sli4, "num_pages %d not valid\n", num_pages_cq);
    885		return -EIO;
    886	}
    887
    888	dw5_flags |= SLI4_CREATE_CQSETV0_EVT;
    889	dw5_flags |= SLI4_CREATE_CQSETV0_VALID;
    890	if (sli4->if_type == SLI4_INTF_IF_TYPE_6)
    891		dw5_flags |= SLI4_CREATE_CQSETV0_AUTOVALID;
    892
    893	dw6w1_flags &= ~SLI4_CREATE_CQSETV0_ARM;
    894
    895	req->dw5_flags = cpu_to_le32(dw5_flags);
    896	req->dw6w1_flags = cpu_to_le16(dw6w1_flags);
    897
    898	req->num_cq_req = cpu_to_le16(num_cqs);
    899
    900	/* Fill page addresses of all the CQs. */
    901	for (i = 0; i < num_cqs; i++) {
    902		req->eq_id[i] = cpu_to_le16(eqs[i]->id);
    903		for (p = 0, addr = qs[i]->dma.phys; p < num_pages_cq;
    904		     p++, addr += page_bytes) {
    905			req->page_phys_addr[offset].low =
    906				cpu_to_le32(lower_32_bits(addr));
    907			req->page_phys_addr[offset].high =
    908				cpu_to_le32(upper_32_bits(addr));
    909			offset++;
    910		}
    911	}
    912
    913	return 0;
    914}
    915
    916int
    917sli_cq_alloc_set(struct sli4 *sli4, struct sli4_queue *qs[],
    918		 u32 num_cqs, u32 n_entries, struct sli4_queue *eqs[])
    919{
    920	u32 i;
    921	struct efc_dma dma = {0};
    922	struct sli4_rsp_cmn_create_queue_set *res;
    923	void __iomem *db_regaddr;
    924
    925	/* Align the queue DMA memory */
    926	for (i = 0; i < num_cqs; i++) {
    927		if (__sli_queue_init(sli4, qs[i], SLI4_QTYPE_CQ, SLI4_CQE_BYTES,
    928				     n_entries, SLI_PAGE_SIZE))
    929			goto error;
    930	}
    931
    932	if (sli_cmd_cq_set_create(sli4, qs, num_cqs, eqs, &dma))
    933		goto error;
    934
    935	if (sli_bmbx_command(sli4))
    936		goto error;
    937
    938	if (sli4->if_type == SLI4_INTF_IF_TYPE_6)
    939		db_regaddr = sli4->reg[1] + SLI4_IF6_CQ_DB_REG;
    940	else
    941		db_regaddr = sli4->reg[0] + SLI4_EQCQ_DB_REG;
    942
    943	res = dma.virt;
    944	if (res->hdr.status) {
    945		efc_log_err(sli4, "bad create CQSet status=%#x addl=%#x\n",
    946			    res->hdr.status, res->hdr.additional_status);
    947		goto error;
    948	}
    949
    950	/* Check if we got all requested CQs. */
    951	if (le16_to_cpu(res->num_q_allocated) != num_cqs) {
    952		efc_log_crit(sli4, "Requested count CQs doesn't match.\n");
    953		goto error;
    954	}
    955	/* Fill the resp cq ids. */
    956	for (i = 0; i < num_cqs; i++) {
    957		qs[i]->id = le16_to_cpu(res->q_id) + i;
    958		qs[i]->db_regaddr = db_regaddr;
    959	}
    960
    961	dma_free_coherent(&sli4->pci->dev, dma.size, dma.virt, dma.phys);
    962
    963	return 0;
    964
    965error:
    966	for (i = 0; i < num_cqs; i++)
    967		__sli_queue_destroy(sli4, qs[i]);
    968
    969	if (dma.virt)
    970		dma_free_coherent(&sli4->pci->dev, dma.size, dma.virt,
    971				  dma.phys);
    972
    973	return -EIO;
    974}
    975
    976static int
    977sli_cmd_common_destroy_q(struct sli4 *sli4, u8 opc, u8 subsystem, u16 q_id)
    978{
    979	struct sli4_rqst_cmn_destroy_q *req;
    980
    981	/* Payload length must accommodate both request and response */
    982	req = sli_config_cmd_init(sli4, sli4->bmbx.virt,
    983				  SLI4_CFG_PYLD_LENGTH(cmn_destroy_q), NULL);
    984	if (!req)
    985		return -EIO;
    986
    987	sli_cmd_fill_hdr(&req->hdr, opc, subsystem,
    988			 CMD_V0, SLI4_RQST_PYLD_LEN(cmn_destroy_q));
    989	req->q_id = cpu_to_le16(q_id);
    990
    991	return 0;
    992}
    993
    994int
    995sli_queue_free(struct sli4 *sli4, struct sli4_queue *q,
    996	       u32 destroy_queues, u32 free_memory)
    997{
    998	int rc = 0;
    999	u8 opcode, subsystem;
   1000	struct sli4_rsp_hdr *res;
   1001
   1002	if (!q) {
   1003		efc_log_err(sli4, "bad parameter sli4=%p q=%p\n", sli4, q);
   1004		return -EIO;
   1005	}
   1006
   1007	if (!destroy_queues)
   1008		goto free_mem;
   1009
   1010	switch (q->type) {
   1011	case SLI4_QTYPE_EQ:
   1012		opcode = SLI4_CMN_DESTROY_EQ;
   1013		subsystem = SLI4_SUBSYSTEM_COMMON;
   1014		break;
   1015	case SLI4_QTYPE_CQ:
   1016		opcode = SLI4_CMN_DESTROY_CQ;
   1017		subsystem = SLI4_SUBSYSTEM_COMMON;
   1018		break;
   1019	case SLI4_QTYPE_MQ:
   1020		opcode = SLI4_CMN_DESTROY_MQ;
   1021		subsystem = SLI4_SUBSYSTEM_COMMON;
   1022		break;
   1023	case SLI4_QTYPE_WQ:
   1024		opcode = SLI4_OPC_WQ_DESTROY;
   1025		subsystem = SLI4_SUBSYSTEM_FC;
   1026		break;
   1027	case SLI4_QTYPE_RQ:
   1028		opcode = SLI4_OPC_RQ_DESTROY;
   1029		subsystem = SLI4_SUBSYSTEM_FC;
   1030		break;
   1031	default:
   1032		efc_log_info(sli4, "bad queue type %d\n", q->type);
   1033		rc = -EIO;
   1034		goto free_mem;
   1035	}
   1036
   1037	rc = sli_cmd_common_destroy_q(sli4, opcode, subsystem, q->id);
   1038	if (rc)
   1039		goto free_mem;
   1040
   1041	rc = sli_bmbx_command(sli4);
   1042	if (rc)
   1043		goto free_mem;
   1044
   1045	rc = sli_res_sli_config(sli4, sli4->bmbx.virt);
   1046	if (rc)
   1047		goto free_mem;
   1048
   1049	res = (void *)((u8 *)sli4->bmbx.virt +
   1050			     offsetof(struct sli4_cmd_sli_config, payload));
   1051	if (res->status) {
   1052		efc_log_err(sli4, "destroy %s st=%#x addl=%#x\n",
   1053			    SLI4_QNAME[q->type], res->status,
   1054			    res->additional_status);
   1055		rc = -EIO;
   1056		goto free_mem;
   1057	}
   1058
   1059free_mem:
   1060	if (free_memory)
   1061		__sli_queue_destroy(sli4, q);
   1062
   1063	return rc;
   1064}
   1065
   1066int
   1067sli_queue_eq_arm(struct sli4 *sli4, struct sli4_queue *q, bool arm)
   1068{
   1069	u32 val;
   1070	unsigned long flags = 0;
   1071	u32 a = arm ? SLI4_EQCQ_ARM : SLI4_EQCQ_UNARM;
   1072
   1073	spin_lock_irqsave(&q->lock, flags);
   1074	if (sli4->if_type == SLI4_INTF_IF_TYPE_6)
   1075		val = sli_format_if6_eq_db_data(q->n_posted, q->id, a);
   1076	else
   1077		val = sli_format_eq_db_data(q->n_posted, q->id, a);
   1078
   1079	writel(val, q->db_regaddr);
   1080	q->n_posted = 0;
   1081	spin_unlock_irqrestore(&q->lock, flags);
   1082
   1083	return 0;
   1084}
   1085
   1086int
   1087sli_queue_arm(struct sli4 *sli4, struct sli4_queue *q, bool arm)
   1088{
   1089	u32 val = 0;
   1090	unsigned long flags = 0;
   1091	u32 a = arm ? SLI4_EQCQ_ARM : SLI4_EQCQ_UNARM;
   1092
   1093	spin_lock_irqsave(&q->lock, flags);
   1094
   1095	switch (q->type) {
   1096	case SLI4_QTYPE_EQ:
   1097		if (sli4->if_type == SLI4_INTF_IF_TYPE_6)
   1098			val = sli_format_if6_eq_db_data(q->n_posted, q->id, a);
   1099		else
   1100			val = sli_format_eq_db_data(q->n_posted, q->id, a);
   1101
   1102		writel(val, q->db_regaddr);
   1103		q->n_posted = 0;
   1104		break;
   1105	case SLI4_QTYPE_CQ:
   1106		if (sli4->if_type == SLI4_INTF_IF_TYPE_6)
   1107			val = sli_format_if6_cq_db_data(q->n_posted, q->id, a);
   1108		else
   1109			val = sli_format_cq_db_data(q->n_posted, q->id, a);
   1110
   1111		writel(val, q->db_regaddr);
   1112		q->n_posted = 0;
   1113		break;
   1114	default:
   1115		efc_log_info(sli4, "should only be used for EQ/CQ, not %s\n",
   1116			     SLI4_QNAME[q->type]);
   1117	}
   1118
   1119	spin_unlock_irqrestore(&q->lock, flags);
   1120
   1121	return 0;
   1122}
   1123
   1124int
   1125sli_wq_write(struct sli4 *sli4, struct sli4_queue *q, u8 *entry)
   1126{
   1127	u8 *qe = q->dma.virt;
   1128	u32 qindex;
   1129	u32 val = 0;
   1130
   1131	qindex = q->index;
   1132	qe += q->index * q->size;
   1133
   1134	if (sli4->params.perf_wq_id_association)
   1135		sli_set_wq_id_association(entry, q->id);
   1136
   1137	memcpy(qe, entry, q->size);
   1138	val = sli_format_wq_db_data(q->id);
   1139
   1140	writel(val, q->db_regaddr);
   1141	q->index = (q->index + 1) & (q->length - 1);
   1142
   1143	return qindex;
   1144}
   1145
   1146int
   1147sli_mq_write(struct sli4 *sli4, struct sli4_queue *q, u8 *entry)
   1148{
   1149	u8 *qe = q->dma.virt;
   1150	u32 qindex;
   1151	u32 val = 0;
   1152	unsigned long flags;
   1153
   1154	spin_lock_irqsave(&q->lock, flags);
   1155	qindex = q->index;
   1156	qe += q->index * q->size;
   1157
   1158	memcpy(qe, entry, q->size);
   1159	val = sli_format_mq_db_data(q->id);
   1160	writel(val, q->db_regaddr);
   1161	q->index = (q->index + 1) & (q->length - 1);
   1162	spin_unlock_irqrestore(&q->lock, flags);
   1163
   1164	return qindex;
   1165}
   1166
   1167int
   1168sli_rq_write(struct sli4 *sli4, struct sli4_queue *q, u8 *entry)
   1169{
   1170	u8 *qe = q->dma.virt;
   1171	u32 qindex;
   1172	u32 val = 0;
   1173
   1174	qindex = q->index;
   1175	qe += q->index * q->size;
   1176
   1177	memcpy(qe, entry, q->size);
   1178
   1179	/*
   1180	 * In RQ-pair, an RQ either contains the FC header
   1181	 * (i.e. is_hdr == TRUE) or the payload.
   1182	 *
   1183	 * Don't ring doorbell for payload RQ
   1184	 */
   1185	if (!(q->u.flag & SLI4_QUEUE_FLAG_HDR))
   1186		goto skip;
   1187
   1188	val = sli_format_rq_db_data(q->id);
   1189	writel(val, q->db_regaddr);
   1190skip:
   1191	q->index = (q->index + 1) & (q->length - 1);
   1192
   1193	return qindex;
   1194}
   1195
   1196int
   1197sli_eq_read(struct sli4 *sli4, struct sli4_queue *q, u8 *entry)
   1198{
   1199	u8 *qe = q->dma.virt;
   1200	unsigned long flags = 0;
   1201	u16 wflags = 0;
   1202
   1203	spin_lock_irqsave(&q->lock, flags);
   1204
   1205	qe += q->index * q->size;
   1206
   1207	/* Check if eqe is valid */
   1208	wflags = le16_to_cpu(((struct sli4_eqe *)qe)->dw0w0_flags);
   1209
   1210	if ((wflags & SLI4_EQE_VALID) != q->phase) {
   1211		spin_unlock_irqrestore(&q->lock, flags);
   1212		return -EIO;
   1213	}
   1214
   1215	if (sli4->if_type != SLI4_INTF_IF_TYPE_6) {
   1216		wflags &= ~SLI4_EQE_VALID;
   1217		((struct sli4_eqe *)qe)->dw0w0_flags = cpu_to_le16(wflags);
   1218	}
   1219
   1220	memcpy(entry, qe, q->size);
   1221	q->index = (q->index + 1) & (q->length - 1);
   1222	q->n_posted++;
   1223	/*
   1224	 * For prism, the phase value will be used
   1225	 * to check the validity of eq/cq entries.
   1226	 * The value toggles after a complete sweep
   1227	 * through the queue.
   1228	 */
   1229
   1230	if (sli4->if_type == SLI4_INTF_IF_TYPE_6 && q->index == 0)
   1231		q->phase ^= (u16)0x1;
   1232
   1233	spin_unlock_irqrestore(&q->lock, flags);
   1234
   1235	return 0;
   1236}
   1237
   1238int
   1239sli_cq_read(struct sli4 *sli4, struct sli4_queue *q, u8 *entry)
   1240{
   1241	u8 *qe = q->dma.virt;
   1242	unsigned long flags = 0;
   1243	u32 dwflags = 0;
   1244	bool valid_bit_set;
   1245
   1246	spin_lock_irqsave(&q->lock, flags);
   1247
   1248	qe += q->index * q->size;
   1249
   1250	/* Check if cqe is valid */
   1251	dwflags = le32_to_cpu(((struct sli4_mcqe *)qe)->dw3_flags);
   1252	valid_bit_set = (dwflags & SLI4_MCQE_VALID) != 0;
   1253
   1254	if (valid_bit_set != q->phase) {
   1255		spin_unlock_irqrestore(&q->lock, flags);
   1256		return -EIO;
   1257	}
   1258
   1259	if (sli4->if_type != SLI4_INTF_IF_TYPE_6) {
   1260		dwflags &= ~SLI4_MCQE_VALID;
   1261		((struct sli4_mcqe *)qe)->dw3_flags = cpu_to_le32(dwflags);
   1262	}
   1263
   1264	memcpy(entry, qe, q->size);
   1265	q->index = (q->index + 1) & (q->length - 1);
   1266	q->n_posted++;
   1267	/*
   1268	 * For prism, the phase value will be used
   1269	 * to check the validity of eq/cq entries.
   1270	 * The value toggles after a complete sweep
   1271	 * through the queue.
   1272	 */
   1273
   1274	if (sli4->if_type == SLI4_INTF_IF_TYPE_6 && q->index == 0)
   1275		q->phase ^= (u16)0x1;
   1276
   1277	spin_unlock_irqrestore(&q->lock, flags);
   1278
   1279	return 0;
   1280}
   1281
   1282int
   1283sli_mq_read(struct sli4 *sli4, struct sli4_queue *q, u8 *entry)
   1284{
   1285	u8 *qe = q->dma.virt;
   1286	unsigned long flags = 0;
   1287
   1288	spin_lock_irqsave(&q->lock, flags);
   1289
   1290	qe += q->u.r_idx * q->size;
   1291
   1292	/* Check if mqe is valid */
   1293	if (q->index == q->u.r_idx) {
   1294		spin_unlock_irqrestore(&q->lock, flags);
   1295		return -EIO;
   1296	}
   1297
   1298	memcpy(entry, qe, q->size);
   1299	q->u.r_idx = (q->u.r_idx + 1) & (q->length - 1);
   1300
   1301	spin_unlock_irqrestore(&q->lock, flags);
   1302
   1303	return 0;
   1304}
   1305
   1306int
   1307sli_eq_parse(struct sli4 *sli4, u8 *buf, u16 *cq_id)
   1308{
   1309	struct sli4_eqe *eqe = (void *)buf;
   1310	int rc = 0;
   1311	u16 flags = 0;
   1312	u16 majorcode;
   1313	u16 minorcode;
   1314
   1315	if (!buf || !cq_id) {
   1316		efc_log_err(sli4, "bad parameters sli4=%p buf=%p cq_id=%p\n",
   1317			    sli4, buf, cq_id);
   1318		return -EIO;
   1319	}
   1320
   1321	flags = le16_to_cpu(eqe->dw0w0_flags);
   1322	majorcode = (flags & SLI4_EQE_MJCODE) >> 1;
   1323	minorcode = (flags & SLI4_EQE_MNCODE) >> 4;
   1324	switch (majorcode) {
   1325	case SLI4_MAJOR_CODE_STANDARD:
   1326		*cq_id = le16_to_cpu(eqe->resource_id);
   1327		break;
   1328	case SLI4_MAJOR_CODE_SENTINEL:
   1329		efc_log_info(sli4, "sentinel EQE\n");
   1330		rc = SLI4_EQE_STATUS_EQ_FULL;
   1331		break;
   1332	default:
   1333		efc_log_info(sli4, "Unsupported EQE: major %x minor %x\n",
   1334			     majorcode, minorcode);
   1335		rc = -EIO;
   1336	}
   1337
   1338	return rc;
   1339}
   1340
   1341int
   1342sli_cq_parse(struct sli4 *sli4, struct sli4_queue *cq, u8 *cqe,
   1343	     enum sli4_qentry *etype, u16 *q_id)
   1344{
   1345	int rc = 0;
   1346
   1347	if (!cq || !cqe || !etype) {
   1348		efc_log_err(sli4, "bad params sli4=%p cq=%p cqe=%p etype=%p q_id=%p\n",
   1349			    sli4, cq, cqe, etype, q_id);
   1350		return -EINVAL;
   1351	}
   1352
   1353	/* Parse a CQ entry to retrieve the event type and the queue id */
   1354	if (cq->u.flag & SLI4_QUEUE_FLAG_MQ) {
   1355		struct sli4_mcqe	*mcqe = (void *)cqe;
   1356
   1357		if (le32_to_cpu(mcqe->dw3_flags) & SLI4_MCQE_AE) {
   1358			*etype = SLI4_QENTRY_ASYNC;
   1359		} else {
   1360			*etype = SLI4_QENTRY_MQ;
   1361			rc = sli_cqe_mq(sli4, mcqe);
   1362		}
   1363		*q_id = -1;
   1364	} else {
   1365		rc = sli_fc_cqe_parse(sli4, cq, cqe, etype, q_id);
   1366	}
   1367
   1368	return rc;
   1369}
   1370
   1371int
   1372sli_abort_wqe(struct sli4 *sli, void *buf, enum sli4_abort_type type,
   1373	      bool send_abts, u32 ids, u32 mask, u16 tag, u16 cq_id)
   1374{
   1375	struct sli4_abort_wqe *abort = buf;
   1376
   1377	memset(buf, 0, sli->wqe_size);
   1378
   1379	switch (type) {
   1380	case SLI4_ABORT_XRI:
   1381		abort->criteria = SLI4_ABORT_CRITERIA_XRI_TAG;
   1382		if (mask) {
   1383			efc_log_warn(sli, "%#x aborting XRI %#x warning non-zero mask",
   1384				     mask, ids);
   1385			mask = 0;
   1386		}
   1387		break;
   1388	case SLI4_ABORT_ABORT_ID:
   1389		abort->criteria = SLI4_ABORT_CRITERIA_ABORT_TAG;
   1390		break;
   1391	case SLI4_ABORT_REQUEST_ID:
   1392		abort->criteria = SLI4_ABORT_CRITERIA_REQUEST_TAG;
   1393		break;
   1394	default:
   1395		efc_log_info(sli, "unsupported type %#x\n", type);
   1396		return -EIO;
   1397	}
   1398
   1399	abort->ia_ir_byte |= send_abts ? 0 : 1;
   1400
   1401	/* Suppress ABTS retries */
   1402	abort->ia_ir_byte |= SLI4_ABRT_WQE_IR;
   1403
   1404	abort->t_mask = cpu_to_le32(mask);
   1405	abort->t_tag  = cpu_to_le32(ids);
   1406	abort->command = SLI4_WQE_ABORT;
   1407	abort->request_tag = cpu_to_le16(tag);
   1408
   1409	abort->dw10w0_flags = cpu_to_le16(SLI4_ABRT_WQE_QOSD);
   1410
   1411	abort->cq_id = cpu_to_le16(cq_id);
   1412	abort->cmdtype_wqec_byte |= SLI4_CMD_ABORT_WQE;
   1413
   1414	return 0;
   1415}
   1416
   1417int
   1418sli_els_request64_wqe(struct sli4 *sli, void *buf, struct efc_dma *sgl,
   1419		      struct sli_els_params *params)
   1420{
   1421	struct sli4_els_request64_wqe *els = buf;
   1422	struct sli4_sge *sge = sgl->virt;
   1423	bool is_fabric = false;
   1424	struct sli4_bde *bptr;
   1425
   1426	memset(buf, 0, sli->wqe_size);
   1427
   1428	bptr = &els->els_request_payload;
   1429	if (sli->params.sgl_pre_registered) {
   1430		els->qosd_xbl_hlm_iod_dbde_wqes &= ~SLI4_REQ_WQE_XBL;
   1431
   1432		els->qosd_xbl_hlm_iod_dbde_wqes |= SLI4_REQ_WQE_DBDE;
   1433		bptr->bde_type_buflen =
   1434			cpu_to_le32((SLI4_BDE_TYPE_VAL(64)) |
   1435				    (params->xmit_len & SLI4_BDE_LEN_MASK));
   1436
   1437		bptr->u.data.low  = sge[0].buffer_address_low;
   1438		bptr->u.data.high = sge[0].buffer_address_high;
   1439	} else {
   1440		els->qosd_xbl_hlm_iod_dbde_wqes |= SLI4_REQ_WQE_XBL;
   1441
   1442		bptr->bde_type_buflen =
   1443			cpu_to_le32((SLI4_BDE_TYPE_VAL(BLP)) |
   1444				    ((2 * sizeof(struct sli4_sge)) &
   1445				     SLI4_BDE_LEN_MASK));
   1446		bptr->u.blp.low  = cpu_to_le32(lower_32_bits(sgl->phys));
   1447		bptr->u.blp.high = cpu_to_le32(upper_32_bits(sgl->phys));
   1448	}
   1449
   1450	els->els_request_payload_length = cpu_to_le32(params->xmit_len);
   1451	els->max_response_payload_length = cpu_to_le32(params->rsp_len);
   1452
   1453	els->xri_tag = cpu_to_le16(params->xri);
   1454	els->timer = params->timeout;
   1455	els->class_byte |= SLI4_GENERIC_CLASS_CLASS_3;
   1456
   1457	els->command = SLI4_WQE_ELS_REQUEST64;
   1458
   1459	els->request_tag = cpu_to_le16(params->tag);
   1460
   1461	els->qosd_xbl_hlm_iod_dbde_wqes |= SLI4_REQ_WQE_IOD;
   1462
   1463	els->qosd_xbl_hlm_iod_dbde_wqes |= SLI4_REQ_WQE_QOSD;
   1464
   1465	/* figure out the ELS_ID value from the request buffer */
   1466
   1467	switch (params->cmd) {
   1468	case ELS_LOGO:
   1469		els->cmdtype_elsid_byte |=
   1470			SLI4_ELS_REQUEST64_LOGO << SLI4_REQ_WQE_ELSID_SHFT;
   1471		if (params->rpi_registered) {
   1472			els->ct_byte |=
   1473			SLI4_GENERIC_CONTEXT_RPI << SLI4_REQ_WQE_CT_SHFT;
   1474			els->context_tag = cpu_to_le16(params->rpi);
   1475		} else {
   1476			els->ct_byte |=
   1477			SLI4_GENERIC_CONTEXT_VPI << SLI4_REQ_WQE_CT_SHFT;
   1478			els->context_tag = cpu_to_le16(params->vpi);
   1479		}
   1480		if (params->d_id == FC_FID_FLOGI)
   1481			is_fabric = true;
   1482		break;
   1483	case ELS_FDISC:
   1484		if (params->d_id == FC_FID_FLOGI)
   1485			is_fabric = true;
   1486		if (params->s_id == 0) {
   1487			els->cmdtype_elsid_byte |=
   1488			SLI4_ELS_REQUEST64_FDISC << SLI4_REQ_WQE_ELSID_SHFT;
   1489			is_fabric = true;
   1490		} else {
   1491			els->cmdtype_elsid_byte |=
   1492			SLI4_ELS_REQUEST64_OTHER << SLI4_REQ_WQE_ELSID_SHFT;
   1493		}
   1494		els->ct_byte |=
   1495			SLI4_GENERIC_CONTEXT_VPI << SLI4_REQ_WQE_CT_SHFT;
   1496		els->context_tag = cpu_to_le16(params->vpi);
   1497		els->sid_sp_dword |= cpu_to_le32(1 << SLI4_REQ_WQE_SP_SHFT);
   1498		break;
   1499	case ELS_FLOGI:
   1500		els->ct_byte |=
   1501			SLI4_GENERIC_CONTEXT_VPI << SLI4_REQ_WQE_CT_SHFT;
   1502		els->context_tag = cpu_to_le16(params->vpi);
   1503		/*
   1504		 * Set SP here ... we haven't done a REG_VPI yet
   1505		 * need to maybe not set this when we have
   1506		 * completed VFI/VPI registrations ...
   1507		 *
   1508		 * Use the FC_ID of the SPORT if it has been allocated,
   1509		 * otherwise use an S_ID of zero.
   1510		 */
   1511		els->sid_sp_dword |= cpu_to_le32(1 << SLI4_REQ_WQE_SP_SHFT);
   1512		if (params->s_id != U32_MAX)
   1513			els->sid_sp_dword |= cpu_to_le32(params->s_id);
   1514		break;
   1515	case ELS_PLOGI:
   1516		els->cmdtype_elsid_byte |=
   1517			SLI4_ELS_REQUEST64_PLOGI << SLI4_REQ_WQE_ELSID_SHFT;
   1518		els->ct_byte |=
   1519			SLI4_GENERIC_CONTEXT_VPI << SLI4_REQ_WQE_CT_SHFT;
   1520		els->context_tag = cpu_to_le16(params->vpi);
   1521		break;
   1522	case ELS_SCR:
   1523		els->cmdtype_elsid_byte |=
   1524			SLI4_ELS_REQUEST64_OTHER << SLI4_REQ_WQE_ELSID_SHFT;
   1525		els->ct_byte |=
   1526			SLI4_GENERIC_CONTEXT_VPI << SLI4_REQ_WQE_CT_SHFT;
   1527		els->context_tag = cpu_to_le16(params->vpi);
   1528		break;
   1529	default:
   1530		els->cmdtype_elsid_byte |=
   1531			SLI4_ELS_REQUEST64_OTHER << SLI4_REQ_WQE_ELSID_SHFT;
   1532		if (params->rpi_registered) {
   1533			els->ct_byte |= (SLI4_GENERIC_CONTEXT_RPI <<
   1534					 SLI4_REQ_WQE_CT_SHFT);
   1535			els->context_tag = cpu_to_le16(params->vpi);
   1536		} else {
   1537			els->ct_byte |=
   1538			SLI4_GENERIC_CONTEXT_VPI << SLI4_REQ_WQE_CT_SHFT;
   1539			els->context_tag = cpu_to_le16(params->vpi);
   1540		}
   1541		break;
   1542	}
   1543
   1544	if (is_fabric)
   1545		els->cmdtype_elsid_byte |= SLI4_ELS_REQUEST64_CMD_FABRIC;
   1546	else
   1547		els->cmdtype_elsid_byte |= SLI4_ELS_REQUEST64_CMD_NON_FABRIC;
   1548
   1549	els->cq_id = cpu_to_le16(SLI4_CQ_DEFAULT);
   1550
   1551	if (((els->ct_byte & SLI4_REQ_WQE_CT) >> SLI4_REQ_WQE_CT_SHFT) !=
   1552					SLI4_GENERIC_CONTEXT_RPI)
   1553		els->remote_id_dword = cpu_to_le32(params->d_id);
   1554
   1555	if (((els->ct_byte & SLI4_REQ_WQE_CT) >> SLI4_REQ_WQE_CT_SHFT) ==
   1556					SLI4_GENERIC_CONTEXT_VPI)
   1557		els->temporary_rpi = cpu_to_le16(params->rpi);
   1558
   1559	return 0;
   1560}
   1561
   1562int
   1563sli_fcp_icmnd64_wqe(struct sli4 *sli, void *buf, struct efc_dma *sgl, u16 xri,
   1564		    u16 tag, u16 cq_id, u32 rpi, u32 rnode_fcid, u8 timeout)
   1565{
   1566	struct sli4_fcp_icmnd64_wqe *icmnd = buf;
   1567	struct sli4_sge *sge = NULL;
   1568	struct sli4_bde *bptr;
   1569	u32 len;
   1570
   1571	memset(buf, 0, sli->wqe_size);
   1572
   1573	if (!sgl || !sgl->virt) {
   1574		efc_log_err(sli, "bad parameter sgl=%p virt=%p\n",
   1575			    sgl, sgl ? sgl->virt : NULL);
   1576		return -EIO;
   1577	}
   1578	sge = sgl->virt;
   1579	bptr = &icmnd->bde;
   1580	if (sli->params.sgl_pre_registered) {
   1581		icmnd->qosd_xbl_hlm_iod_dbde_wqes &= ~SLI4_ICMD_WQE_XBL;
   1582
   1583		icmnd->qosd_xbl_hlm_iod_dbde_wqes |= SLI4_ICMD_WQE_DBDE;
   1584		bptr->bde_type_buflen =
   1585			cpu_to_le32((SLI4_BDE_TYPE_VAL(64)) |
   1586				    (le32_to_cpu(sge[0].buffer_length) &
   1587				     SLI4_BDE_LEN_MASK));
   1588
   1589		bptr->u.data.low  = sge[0].buffer_address_low;
   1590		bptr->u.data.high = sge[0].buffer_address_high;
   1591	} else {
   1592		icmnd->qosd_xbl_hlm_iod_dbde_wqes |= SLI4_ICMD_WQE_XBL;
   1593
   1594		bptr->bde_type_buflen =
   1595			cpu_to_le32((SLI4_BDE_TYPE_VAL(BLP)) |
   1596				    (sgl->size & SLI4_BDE_LEN_MASK));
   1597
   1598		bptr->u.blp.low  = cpu_to_le32(lower_32_bits(sgl->phys));
   1599		bptr->u.blp.high = cpu_to_le32(upper_32_bits(sgl->phys));
   1600	}
   1601
   1602	len = le32_to_cpu(sge[0].buffer_length) +
   1603	      le32_to_cpu(sge[1].buffer_length);
   1604	icmnd->payload_offset_length = cpu_to_le16(len);
   1605	icmnd->xri_tag = cpu_to_le16(xri);
   1606	icmnd->context_tag = cpu_to_le16(rpi);
   1607	icmnd->timer = timeout;
   1608
   1609	/* WQE word 4 contains read transfer length */
   1610	icmnd->class_pu_byte |= 2 << SLI4_ICMD_WQE_PU_SHFT;
   1611	icmnd->class_pu_byte |= SLI4_GENERIC_CLASS_CLASS_3;
   1612	icmnd->command = SLI4_WQE_FCP_ICMND64;
   1613	icmnd->dif_ct_bs_byte |=
   1614		SLI4_GENERIC_CONTEXT_RPI << SLI4_ICMD_WQE_CT_SHFT;
   1615
   1616	icmnd->abort_tag = cpu_to_le32(xri);
   1617
   1618	icmnd->request_tag = cpu_to_le16(tag);
   1619	icmnd->len_loc1_byte |= SLI4_ICMD_WQE_LEN_LOC_BIT1;
   1620	icmnd->qosd_xbl_hlm_iod_dbde_wqes |= SLI4_ICMD_WQE_LEN_LOC_BIT2;
   1621	icmnd->cmd_type_byte |= SLI4_CMD_FCP_ICMND64_WQE;
   1622	icmnd->cq_id = cpu_to_le16(cq_id);
   1623
   1624	return  0;
   1625}
   1626
   1627int
   1628sli_fcp_iread64_wqe(struct sli4 *sli, void *buf, struct efc_dma *sgl,
   1629		    u32 first_data_sge, u32 xfer_len, u16 xri, u16 tag,
   1630		    u16 cq_id, u32 rpi, u32 rnode_fcid,
   1631		    u8 dif, u8 bs, u8 timeout)
   1632{
   1633	struct sli4_fcp_iread64_wqe *iread = buf;
   1634	struct sli4_sge *sge = NULL;
   1635	struct sli4_bde *bptr;
   1636	u32 sge_flags, len;
   1637
   1638	memset(buf, 0, sli->wqe_size);
   1639
   1640	if (!sgl || !sgl->virt) {
   1641		efc_log_err(sli, "bad parameter sgl=%p virt=%p\n",
   1642			    sgl, sgl ? sgl->virt : NULL);
   1643		return -EIO;
   1644	}
   1645
   1646	sge = sgl->virt;
   1647	bptr = &iread->bde;
   1648	if (sli->params.sgl_pre_registered) {
   1649		iread->qosd_xbl_hlm_iod_dbde_wqes &= ~SLI4_IR_WQE_XBL;
   1650
   1651		iread->qosd_xbl_hlm_iod_dbde_wqes |= SLI4_IR_WQE_DBDE;
   1652
   1653		bptr->bde_type_buflen =
   1654			cpu_to_le32((SLI4_BDE_TYPE_VAL(64)) |
   1655				    (le32_to_cpu(sge[0].buffer_length) &
   1656				     SLI4_BDE_LEN_MASK));
   1657
   1658		bptr->u.blp.low  = sge[0].buffer_address_low;
   1659		bptr->u.blp.high = sge[0].buffer_address_high;
   1660	} else {
   1661		iread->qosd_xbl_hlm_iod_dbde_wqes |= SLI4_IR_WQE_XBL;
   1662
   1663		bptr->bde_type_buflen =
   1664			cpu_to_le32((SLI4_BDE_TYPE_VAL(BLP)) |
   1665				    (sgl->size & SLI4_BDE_LEN_MASK));
   1666
   1667		bptr->u.blp.low  =
   1668				cpu_to_le32(lower_32_bits(sgl->phys));
   1669		bptr->u.blp.high =
   1670				cpu_to_le32(upper_32_bits(sgl->phys));
   1671
   1672		/*
   1673		 * fill out fcp_cmnd buffer len and change resp buffer to be of
   1674		 * type "skip" (note: response will still be written to sge[1]
   1675		 * if necessary)
   1676		 */
   1677		len = le32_to_cpu(sge[0].buffer_length);
   1678		iread->fcp_cmd_buffer_length = cpu_to_le16(len);
   1679
   1680		sge_flags = le32_to_cpu(sge[1].dw2_flags);
   1681		sge_flags &= (~SLI4_SGE_TYPE_MASK);
   1682		sge_flags |= (SLI4_SGE_TYPE_SKIP << SLI4_SGE_TYPE_SHIFT);
   1683		sge[1].dw2_flags = cpu_to_le32(sge_flags);
   1684	}
   1685
   1686	len = le32_to_cpu(sge[0].buffer_length) +
   1687	      le32_to_cpu(sge[1].buffer_length);
   1688	iread->payload_offset_length = cpu_to_le16(len);
   1689	iread->total_transfer_length = cpu_to_le32(xfer_len);
   1690
   1691	iread->xri_tag = cpu_to_le16(xri);
   1692	iread->context_tag = cpu_to_le16(rpi);
   1693
   1694	iread->timer = timeout;
   1695
   1696	/* WQE word 4 contains read transfer length */
   1697	iread->class_pu_byte |= 2 << SLI4_IR_WQE_PU_SHFT;
   1698	iread->class_pu_byte |= SLI4_GENERIC_CLASS_CLASS_3;
   1699	iread->command = SLI4_WQE_FCP_IREAD64;
   1700	iread->dif_ct_bs_byte |=
   1701		SLI4_GENERIC_CONTEXT_RPI << SLI4_IR_WQE_CT_SHFT;
   1702	iread->dif_ct_bs_byte |= dif;
   1703	iread->dif_ct_bs_byte  |= bs << SLI4_IR_WQE_BS_SHFT;
   1704
   1705	iread->abort_tag = cpu_to_le32(xri);
   1706
   1707	iread->request_tag = cpu_to_le16(tag);
   1708	iread->len_loc1_byte |= SLI4_IR_WQE_LEN_LOC_BIT1;
   1709	iread->qosd_xbl_hlm_iod_dbde_wqes |= SLI4_IR_WQE_LEN_LOC_BIT2;
   1710	iread->qosd_xbl_hlm_iod_dbde_wqes |= SLI4_IR_WQE_IOD;
   1711	iread->cmd_type_byte |= SLI4_CMD_FCP_IREAD64_WQE;
   1712	iread->cq_id = cpu_to_le16(cq_id);
   1713
   1714	if (sli->params.perf_hint) {
   1715		bptr = &iread->first_data_bde;
   1716		bptr->bde_type_buflen =	cpu_to_le32((SLI4_BDE_TYPE_VAL(64)) |
   1717			  (le32_to_cpu(sge[first_data_sge].buffer_length) &
   1718			     SLI4_BDE_LEN_MASK));
   1719		bptr->u.data.low =
   1720			sge[first_data_sge].buffer_address_low;
   1721		bptr->u.data.high =
   1722			sge[first_data_sge].buffer_address_high;
   1723	}
   1724
   1725	return  0;
   1726}
   1727
   1728int
   1729sli_fcp_iwrite64_wqe(struct sli4 *sli, void *buf, struct efc_dma *sgl,
   1730		     u32 first_data_sge, u32 xfer_len,
   1731		     u32 first_burst, u16 xri, u16 tag,
   1732		     u16 cq_id, u32 rpi,
   1733		     u32 rnode_fcid,
   1734		     u8 dif, u8 bs, u8 timeout)
   1735{
   1736	struct sli4_fcp_iwrite64_wqe *iwrite = buf;
   1737	struct sli4_sge *sge = NULL;
   1738	struct sli4_bde *bptr;
   1739	u32 sge_flags, min, len;
   1740
   1741	memset(buf, 0, sli->wqe_size);
   1742
   1743	if (!sgl || !sgl->virt) {
   1744		efc_log_err(sli, "bad parameter sgl=%p virt=%p\n",
   1745			    sgl, sgl ? sgl->virt : NULL);
   1746		return -EIO;
   1747	}
   1748	sge = sgl->virt;
   1749	bptr = &iwrite->bde;
   1750	if (sli->params.sgl_pre_registered) {
   1751		iwrite->qosd_xbl_hlm_iod_dbde_wqes &= ~SLI4_IWR_WQE_XBL;
   1752
   1753		iwrite->qosd_xbl_hlm_iod_dbde_wqes |= SLI4_IWR_WQE_DBDE;
   1754		bptr->bde_type_buflen = cpu_to_le32((SLI4_BDE_TYPE_VAL(64)) |
   1755		       (le32_to_cpu(sge[0].buffer_length) & SLI4_BDE_LEN_MASK));
   1756		bptr->u.data.low  = sge[0].buffer_address_low;
   1757		bptr->u.data.high = sge[0].buffer_address_high;
   1758	} else {
   1759		iwrite->qosd_xbl_hlm_iod_dbde_wqes |= SLI4_IWR_WQE_XBL;
   1760
   1761		bptr->bde_type_buflen =	cpu_to_le32((SLI4_BDE_TYPE_VAL(64)) |
   1762					(sgl->size & SLI4_BDE_LEN_MASK));
   1763
   1764		bptr->u.blp.low  = cpu_to_le32(lower_32_bits(sgl->phys));
   1765		bptr->u.blp.high = cpu_to_le32(upper_32_bits(sgl->phys));
   1766
   1767		/*
   1768		 * fill out fcp_cmnd buffer len and change resp buffer to be of
   1769		 * type "skip" (note: response will still be written to sge[1]
   1770		 * if necessary)
   1771		 */
   1772		len = le32_to_cpu(sge[0].buffer_length);
   1773		iwrite->fcp_cmd_buffer_length = cpu_to_le16(len);
   1774		sge_flags = le32_to_cpu(sge[1].dw2_flags);
   1775		sge_flags &= ~SLI4_SGE_TYPE_MASK;
   1776		sge_flags |= (SLI4_SGE_TYPE_SKIP << SLI4_SGE_TYPE_SHIFT);
   1777		sge[1].dw2_flags = cpu_to_le32(sge_flags);
   1778	}
   1779
   1780	len = le32_to_cpu(sge[0].buffer_length) +
   1781	      le32_to_cpu(sge[1].buffer_length);
   1782	iwrite->payload_offset_length = cpu_to_le16(len);
   1783	iwrite->total_transfer_length = cpu_to_le16(xfer_len);
   1784	min = (xfer_len < first_burst) ? xfer_len : first_burst;
   1785	iwrite->initial_transfer_length = cpu_to_le16(min);
   1786
   1787	iwrite->xri_tag = cpu_to_le16(xri);
   1788	iwrite->context_tag = cpu_to_le16(rpi);
   1789
   1790	iwrite->timer = timeout;
   1791	/* WQE word 4 contains read transfer length */
   1792	iwrite->class_pu_byte |= 2 << SLI4_IWR_WQE_PU_SHFT;
   1793	iwrite->class_pu_byte |= SLI4_GENERIC_CLASS_CLASS_3;
   1794	iwrite->command = SLI4_WQE_FCP_IWRITE64;
   1795	iwrite->dif_ct_bs_byte |=
   1796			SLI4_GENERIC_CONTEXT_RPI << SLI4_IWR_WQE_CT_SHFT;
   1797	iwrite->dif_ct_bs_byte |= dif;
   1798	iwrite->dif_ct_bs_byte |= bs << SLI4_IWR_WQE_BS_SHFT;
   1799
   1800	iwrite->abort_tag = cpu_to_le32(xri);
   1801
   1802	iwrite->request_tag = cpu_to_le16(tag);
   1803	iwrite->len_loc1_byte |= SLI4_IWR_WQE_LEN_LOC_BIT1;
   1804	iwrite->qosd_xbl_hlm_iod_dbde_wqes |= SLI4_IWR_WQE_LEN_LOC_BIT2;
   1805	iwrite->cmd_type_byte |= SLI4_CMD_FCP_IWRITE64_WQE;
   1806	iwrite->cq_id = cpu_to_le16(cq_id);
   1807
   1808	if (sli->params.perf_hint) {
   1809		bptr = &iwrite->first_data_bde;
   1810
   1811		bptr->bde_type_buflen =	cpu_to_le32((SLI4_BDE_TYPE_VAL(64)) |
   1812			 (le32_to_cpu(sge[first_data_sge].buffer_length) &
   1813			     SLI4_BDE_LEN_MASK));
   1814
   1815		bptr->u.data.low = sge[first_data_sge].buffer_address_low;
   1816		bptr->u.data.high = sge[first_data_sge].buffer_address_high;
   1817	}
   1818
   1819	return  0;
   1820}
   1821
   1822int
   1823sli_fcp_treceive64_wqe(struct sli4 *sli, void *buf, struct efc_dma *sgl,
   1824		       u32 first_data_sge, u16 cq_id, u8 dif, u8 bs,
   1825		       struct sli_fcp_tgt_params *params)
   1826{
   1827	struct sli4_fcp_treceive64_wqe *trecv = buf;
   1828	struct sli4_fcp_128byte_wqe *trecv_128 = buf;
   1829	struct sli4_sge *sge = NULL;
   1830	struct sli4_bde *bptr;
   1831
   1832	memset(buf, 0, sli->wqe_size);
   1833
   1834	if (!sgl || !sgl->virt) {
   1835		efc_log_err(sli, "bad parameter sgl=%p virt=%p\n",
   1836			    sgl, sgl ? sgl->virt : NULL);
   1837		return -EIO;
   1838	}
   1839	sge = sgl->virt;
   1840	bptr = &trecv->bde;
   1841	if (sli->params.sgl_pre_registered) {
   1842		trecv->qosd_xbl_hlm_iod_dbde_wqes &= ~SLI4_TRCV_WQE_XBL;
   1843
   1844		trecv->qosd_xbl_hlm_iod_dbde_wqes |= SLI4_TRCV_WQE_DBDE;
   1845
   1846		bptr->bde_type_buflen =
   1847			cpu_to_le32((SLI4_BDE_TYPE_VAL(64)) |
   1848				    (le32_to_cpu(sge[0].buffer_length)
   1849					& SLI4_BDE_LEN_MASK));
   1850
   1851		bptr->u.data.low  = sge[0].buffer_address_low;
   1852		bptr->u.data.high = sge[0].buffer_address_high;
   1853
   1854		trecv->payload_offset_length = sge[0].buffer_length;
   1855	} else {
   1856		trecv->qosd_xbl_hlm_iod_dbde_wqes |= SLI4_TRCV_WQE_XBL;
   1857
   1858		/* if data is a single physical address, use a BDE */
   1859		if (!dif &&
   1860		    params->xmit_len <= le32_to_cpu(sge[2].buffer_length)) {
   1861			trecv->qosd_xbl_hlm_iod_dbde_wqes |= SLI4_TRCV_WQE_DBDE;
   1862			bptr->bde_type_buflen =
   1863			      cpu_to_le32((SLI4_BDE_TYPE_VAL(64)) |
   1864					  (le32_to_cpu(sge[2].buffer_length)
   1865					  & SLI4_BDE_LEN_MASK));
   1866
   1867			bptr->u.data.low = sge[2].buffer_address_low;
   1868			bptr->u.data.high = sge[2].buffer_address_high;
   1869		} else {
   1870			bptr->bde_type_buflen =
   1871				cpu_to_le32((SLI4_BDE_TYPE_VAL(BLP)) |
   1872				(sgl->size & SLI4_BDE_LEN_MASK));
   1873			bptr->u.blp.low = cpu_to_le32(lower_32_bits(sgl->phys));
   1874			bptr->u.blp.high =
   1875				cpu_to_le32(upper_32_bits(sgl->phys));
   1876		}
   1877	}
   1878
   1879	trecv->relative_offset = cpu_to_le32(params->offset);
   1880
   1881	if (params->flags & SLI4_IO_CONTINUATION)
   1882		trecv->eat_xc_ccpe |= SLI4_TRCV_WQE_XC;
   1883
   1884	trecv->xri_tag = cpu_to_le16(params->xri);
   1885
   1886	trecv->context_tag = cpu_to_le16(params->rpi);
   1887
   1888	/* WQE uses relative offset */
   1889	trecv->class_ar_pu_byte |= 1 << SLI4_TRCV_WQE_PU_SHFT;
   1890
   1891	if (params->flags & SLI4_IO_AUTO_GOOD_RESPONSE)
   1892		trecv->class_ar_pu_byte |= SLI4_TRCV_WQE_AR;
   1893
   1894	trecv->command = SLI4_WQE_FCP_TRECEIVE64;
   1895	trecv->class_ar_pu_byte |= SLI4_GENERIC_CLASS_CLASS_3;
   1896	trecv->dif_ct_bs_byte |=
   1897		SLI4_GENERIC_CONTEXT_RPI << SLI4_TRCV_WQE_CT_SHFT;
   1898	trecv->dif_ct_bs_byte |= bs << SLI4_TRCV_WQE_BS_SHFT;
   1899
   1900	trecv->remote_xid = cpu_to_le16(params->ox_id);
   1901
   1902	trecv->request_tag = cpu_to_le16(params->tag);
   1903
   1904	trecv->qosd_xbl_hlm_iod_dbde_wqes |= SLI4_TRCV_WQE_IOD;
   1905
   1906	trecv->qosd_xbl_hlm_iod_dbde_wqes |= SLI4_TRCV_WQE_LEN_LOC_BIT2;
   1907
   1908	trecv->cmd_type_byte |= SLI4_CMD_FCP_TRECEIVE64_WQE;
   1909
   1910	trecv->cq_id = cpu_to_le16(cq_id);
   1911
   1912	trecv->fcp_data_receive_length = cpu_to_le32(params->xmit_len);
   1913
   1914	if (sli->params.perf_hint) {
   1915		bptr = &trecv->first_data_bde;
   1916
   1917		bptr->bde_type_buflen =
   1918			cpu_to_le32((SLI4_BDE_TYPE_VAL(64)) |
   1919			    (le32_to_cpu(sge[first_data_sge].buffer_length) &
   1920			     SLI4_BDE_LEN_MASK));
   1921		bptr->u.data.low = sge[first_data_sge].buffer_address_low;
   1922		bptr->u.data.high = sge[first_data_sge].buffer_address_high;
   1923	}
   1924
   1925	/* The upper 7 bits of csctl is the priority */
   1926	if (params->cs_ctl & SLI4_MASK_CCP) {
   1927		trecv->eat_xc_ccpe |= SLI4_TRCV_WQE_CCPE;
   1928		trecv->ccp = (params->cs_ctl & SLI4_MASK_CCP);
   1929	}
   1930
   1931	if (params->app_id && sli->wqe_size == SLI4_WQE_EXT_BYTES &&
   1932	    !(trecv->eat_xc_ccpe & SLI4_TRSP_WQE_EAT)) {
   1933		trecv->lloc1_appid |= SLI4_TRCV_WQE_APPID;
   1934		trecv->qosd_xbl_hlm_iod_dbde_wqes |= SLI4_TRCV_WQE_WQES;
   1935		trecv_128->dw[31] = params->app_id;
   1936	}
   1937	return 0;
   1938}
   1939
   1940int
   1941sli_fcp_cont_treceive64_wqe(struct sli4 *sli, void *buf,
   1942			    struct efc_dma *sgl, u32 first_data_sge,
   1943			    u16 sec_xri, u16 cq_id, u8 dif, u8 bs,
   1944			    struct sli_fcp_tgt_params *params)
   1945{
   1946	int rc;
   1947
   1948	rc = sli_fcp_treceive64_wqe(sli, buf, sgl, first_data_sge,
   1949				    cq_id, dif, bs, params);
   1950	if (!rc) {
   1951		struct sli4_fcp_treceive64_wqe *trecv = buf;
   1952
   1953		trecv->command = SLI4_WQE_FCP_CONT_TRECEIVE64;
   1954		trecv->dword5.sec_xri_tag = cpu_to_le16(sec_xri);
   1955	}
   1956	return rc;
   1957}
   1958
   1959int
   1960sli_fcp_trsp64_wqe(struct sli4 *sli4, void *buf, struct efc_dma *sgl,
   1961		   u16 cq_id, u8 port_owned, struct sli_fcp_tgt_params *params)
   1962{
   1963	struct sli4_fcp_trsp64_wqe *trsp = buf;
   1964	struct sli4_fcp_128byte_wqe *trsp_128 = buf;
   1965
   1966	memset(buf, 0, sli4->wqe_size);
   1967
   1968	if (params->flags & SLI4_IO_AUTO_GOOD_RESPONSE) {
   1969		trsp->class_ag_byte |= SLI4_TRSP_WQE_AG;
   1970	} else {
   1971		struct sli4_sge	*sge = sgl->virt;
   1972		struct sli4_bde *bptr;
   1973
   1974		if (sli4->params.sgl_pre_registered || port_owned)
   1975			trsp->qosd_xbl_hlm_dbde_wqes |= SLI4_TRSP_WQE_DBDE;
   1976		else
   1977			trsp->qosd_xbl_hlm_dbde_wqes |= SLI4_TRSP_WQE_XBL;
   1978		bptr = &trsp->bde;
   1979
   1980		bptr->bde_type_buflen =
   1981			cpu_to_le32((SLI4_BDE_TYPE_VAL(64)) |
   1982				     (le32_to_cpu(sge[0].buffer_length) &
   1983				      SLI4_BDE_LEN_MASK));
   1984		bptr->u.data.low  = sge[0].buffer_address_low;
   1985		bptr->u.data.high = sge[0].buffer_address_high;
   1986
   1987		trsp->fcp_response_length = cpu_to_le32(params->xmit_len);
   1988	}
   1989
   1990	if (params->flags & SLI4_IO_CONTINUATION)
   1991		trsp->eat_xc_ccpe |= SLI4_TRSP_WQE_XC;
   1992
   1993	trsp->xri_tag = cpu_to_le16(params->xri);
   1994	trsp->rpi = cpu_to_le16(params->rpi);
   1995
   1996	trsp->command = SLI4_WQE_FCP_TRSP64;
   1997	trsp->class_ag_byte |= SLI4_GENERIC_CLASS_CLASS_3;
   1998
   1999	trsp->remote_xid = cpu_to_le16(params->ox_id);
   2000	trsp->request_tag = cpu_to_le16(params->tag);
   2001	if (params->flags & SLI4_IO_DNRX)
   2002		trsp->ct_dnrx_byte |= SLI4_TRSP_WQE_DNRX;
   2003	else
   2004		trsp->ct_dnrx_byte &= ~SLI4_TRSP_WQE_DNRX;
   2005
   2006	trsp->lloc1_appid |= 0x1;
   2007	trsp->cq_id = cpu_to_le16(cq_id);
   2008	trsp->cmd_type_byte = SLI4_CMD_FCP_TRSP64_WQE;
   2009
   2010	/* The upper 7 bits of csctl is the priority */
   2011	if (params->cs_ctl & SLI4_MASK_CCP) {
   2012		trsp->eat_xc_ccpe |= SLI4_TRSP_WQE_CCPE;
   2013		trsp->ccp = (params->cs_ctl & SLI4_MASK_CCP);
   2014	}
   2015
   2016	if (params->app_id && sli4->wqe_size == SLI4_WQE_EXT_BYTES &&
   2017	    !(trsp->eat_xc_ccpe & SLI4_TRSP_WQE_EAT)) {
   2018		trsp->lloc1_appid |= SLI4_TRSP_WQE_APPID;
   2019		trsp->qosd_xbl_hlm_dbde_wqes |= SLI4_TRSP_WQE_WQES;
   2020		trsp_128->dw[31] = params->app_id;
   2021	}
   2022	return 0;
   2023}
   2024
   2025int
   2026sli_fcp_tsend64_wqe(struct sli4 *sli4, void *buf, struct efc_dma *sgl,
   2027		    u32 first_data_sge, u16 cq_id, u8 dif, u8 bs,
   2028		    struct sli_fcp_tgt_params *params)
   2029{
   2030	struct sli4_fcp_tsend64_wqe *tsend = buf;
   2031	struct sli4_fcp_128byte_wqe *tsend_128 = buf;
   2032	struct sli4_sge *sge = NULL;
   2033	struct sli4_bde *bptr;
   2034
   2035	memset(buf, 0, sli4->wqe_size);
   2036
   2037	if (!sgl || !sgl->virt) {
   2038		efc_log_err(sli4, "bad parameter sgl=%p virt=%p\n",
   2039			    sgl, sgl ? sgl->virt : NULL);
   2040		return -EIO;
   2041	}
   2042	sge = sgl->virt;
   2043
   2044	bptr = &tsend->bde;
   2045	if (sli4->params.sgl_pre_registered) {
   2046		tsend->ll_qd_xbl_hlm_iod_dbde &= ~SLI4_TSEND_WQE_XBL;
   2047
   2048		tsend->ll_qd_xbl_hlm_iod_dbde |= SLI4_TSEND_WQE_DBDE;
   2049
   2050		bptr->bde_type_buflen =
   2051			cpu_to_le32((SLI4_BDE_TYPE_VAL(64)) |
   2052				   (le32_to_cpu(sge[2].buffer_length) &
   2053				    SLI4_BDE_LEN_MASK));
   2054
   2055		/* TSEND64_WQE specifies first two SGE are skipped (3rd is
   2056		 * valid)
   2057		 */
   2058		bptr->u.data.low  = sge[2].buffer_address_low;
   2059		bptr->u.data.high = sge[2].buffer_address_high;
   2060	} else {
   2061		tsend->ll_qd_xbl_hlm_iod_dbde |= SLI4_TSEND_WQE_XBL;
   2062
   2063		/* if data is a single physical address, use a BDE */
   2064		if (!dif &&
   2065		    params->xmit_len <= le32_to_cpu(sge[2].buffer_length)) {
   2066			tsend->ll_qd_xbl_hlm_iod_dbde |= SLI4_TSEND_WQE_DBDE;
   2067
   2068			bptr->bde_type_buflen =
   2069			    cpu_to_le32((SLI4_BDE_TYPE_VAL(64)) |
   2070					(le32_to_cpu(sge[2].buffer_length) &
   2071					SLI4_BDE_LEN_MASK));
   2072			/*
   2073			 * TSEND64_WQE specifies first two SGE are skipped
   2074			 * (i.e. 3rd is valid)
   2075			 */
   2076			bptr->u.data.low =
   2077				sge[2].buffer_address_low;
   2078			bptr->u.data.high =
   2079				sge[2].buffer_address_high;
   2080		} else {
   2081			bptr->bde_type_buflen =
   2082				cpu_to_le32((SLI4_BDE_TYPE_VAL(BLP)) |
   2083					    (sgl->size &
   2084					     SLI4_BDE_LEN_MASK));
   2085			bptr->u.blp.low =
   2086				cpu_to_le32(lower_32_bits(sgl->phys));
   2087			bptr->u.blp.high =
   2088				cpu_to_le32(upper_32_bits(sgl->phys));
   2089		}
   2090	}
   2091
   2092	tsend->relative_offset = cpu_to_le32(params->offset);
   2093
   2094	if (params->flags & SLI4_IO_CONTINUATION)
   2095		tsend->dw10byte2 |= SLI4_TSEND_XC;
   2096
   2097	tsend->xri_tag = cpu_to_le16(params->xri);
   2098
   2099	tsend->rpi = cpu_to_le16(params->rpi);
   2100	/* WQE uses relative offset */
   2101	tsend->class_pu_ar_byte |= 1 << SLI4_TSEND_WQE_PU_SHFT;
   2102
   2103	if (params->flags & SLI4_IO_AUTO_GOOD_RESPONSE)
   2104		tsend->class_pu_ar_byte |= SLI4_TSEND_WQE_AR;
   2105
   2106	tsend->command = SLI4_WQE_FCP_TSEND64;
   2107	tsend->class_pu_ar_byte |= SLI4_GENERIC_CLASS_CLASS_3;
   2108	tsend->ct_byte |= SLI4_GENERIC_CONTEXT_RPI << SLI4_TSEND_CT_SHFT;
   2109	tsend->ct_byte |= dif;
   2110	tsend->ct_byte |= bs << SLI4_TSEND_BS_SHFT;
   2111
   2112	tsend->remote_xid = cpu_to_le16(params->ox_id);
   2113
   2114	tsend->request_tag = cpu_to_le16(params->tag);
   2115
   2116	tsend->ll_qd_xbl_hlm_iod_dbde |= SLI4_TSEND_LEN_LOC_BIT2;
   2117
   2118	tsend->cq_id = cpu_to_le16(cq_id);
   2119
   2120	tsend->cmd_type_byte |= SLI4_CMD_FCP_TSEND64_WQE;
   2121
   2122	tsend->fcp_data_transmit_length = cpu_to_le32(params->xmit_len);
   2123
   2124	if (sli4->params.perf_hint) {
   2125		bptr = &tsend->first_data_bde;
   2126		bptr->bde_type_buflen =
   2127			cpu_to_le32((SLI4_BDE_TYPE_VAL(64)) |
   2128			    (le32_to_cpu(sge[first_data_sge].buffer_length) &
   2129			     SLI4_BDE_LEN_MASK));
   2130		bptr->u.data.low =
   2131			sge[first_data_sge].buffer_address_low;
   2132		bptr->u.data.high =
   2133			sge[first_data_sge].buffer_address_high;
   2134	}
   2135
   2136	/* The upper 7 bits of csctl is the priority */
   2137	if (params->cs_ctl & SLI4_MASK_CCP) {
   2138		tsend->dw10byte2 |= SLI4_TSEND_CCPE;
   2139		tsend->ccp = (params->cs_ctl & SLI4_MASK_CCP);
   2140	}
   2141
   2142	if (params->app_id && sli4->wqe_size == SLI4_WQE_EXT_BYTES &&
   2143	    !(tsend->dw10byte2 & SLI4_TSEND_EAT)) {
   2144		tsend->dw10byte0 |= SLI4_TSEND_APPID_VALID;
   2145		tsend->ll_qd_xbl_hlm_iod_dbde |= SLI4_TSEND_WQES;
   2146		tsend_128->dw[31] = params->app_id;
   2147	}
   2148	return 0;
   2149}
   2150
   2151int
   2152sli_gen_request64_wqe(struct sli4 *sli4, void *buf, struct efc_dma *sgl,
   2153		      struct sli_ct_params *params)
   2154{
   2155	struct sli4_gen_request64_wqe *gen = buf;
   2156	struct sli4_sge *sge = NULL;
   2157	struct sli4_bde *bptr;
   2158
   2159	memset(buf, 0, sli4->wqe_size);
   2160
   2161	if (!sgl || !sgl->virt) {
   2162		efc_log_err(sli4, "bad parameter sgl=%p virt=%p\n",
   2163			    sgl, sgl ? sgl->virt : NULL);
   2164		return -EIO;
   2165	}
   2166	sge = sgl->virt;
   2167	bptr = &gen->bde;
   2168
   2169	if (sli4->params.sgl_pre_registered) {
   2170		gen->dw10flags1 &= ~SLI4_GEN_REQ64_WQE_XBL;
   2171
   2172		gen->dw10flags1 |= SLI4_GEN_REQ64_WQE_DBDE;
   2173		bptr->bde_type_buflen =
   2174			cpu_to_le32((SLI4_BDE_TYPE_VAL(64)) |
   2175				    (params->xmit_len & SLI4_BDE_LEN_MASK));
   2176
   2177		bptr->u.data.low  = sge[0].buffer_address_low;
   2178		bptr->u.data.high = sge[0].buffer_address_high;
   2179	} else {
   2180		gen->dw10flags1 |= SLI4_GEN_REQ64_WQE_XBL;
   2181
   2182		bptr->bde_type_buflen =
   2183			cpu_to_le32((SLI4_BDE_TYPE_VAL(BLP)) |
   2184				    ((2 * sizeof(struct sli4_sge)) &
   2185				     SLI4_BDE_LEN_MASK));
   2186
   2187		bptr->u.blp.low =
   2188			cpu_to_le32(lower_32_bits(sgl->phys));
   2189		bptr->u.blp.high =
   2190			cpu_to_le32(upper_32_bits(sgl->phys));
   2191	}
   2192
   2193	gen->request_payload_length = cpu_to_le32(params->xmit_len);
   2194	gen->max_response_payload_length = cpu_to_le32(params->rsp_len);
   2195
   2196	gen->df_ctl = params->df_ctl;
   2197	gen->type = params->type;
   2198	gen->r_ctl = params->r_ctl;
   2199
   2200	gen->xri_tag = cpu_to_le16(params->xri);
   2201
   2202	gen->ct_byte = SLI4_GENERIC_CONTEXT_RPI << SLI4_GEN_REQ64_CT_SHFT;
   2203	gen->context_tag = cpu_to_le16(params->rpi);
   2204
   2205	gen->class_byte = SLI4_GENERIC_CLASS_CLASS_3;
   2206
   2207	gen->command = SLI4_WQE_GEN_REQUEST64;
   2208
   2209	gen->timer = params->timeout;
   2210
   2211	gen->request_tag = cpu_to_le16(params->tag);
   2212
   2213	gen->dw10flags1 |= SLI4_GEN_REQ64_WQE_IOD;
   2214
   2215	gen->dw10flags0 |= SLI4_GEN_REQ64_WQE_QOSD;
   2216
   2217	gen->cmd_type_byte = SLI4_CMD_GEN_REQUEST64_WQE;
   2218
   2219	gen->cq_id = cpu_to_le16(SLI4_CQ_DEFAULT);
   2220
   2221	return 0;
   2222}
   2223
   2224int
   2225sli_send_frame_wqe(struct sli4 *sli, void *buf, u8 sof, u8 eof, u32 *hdr,
   2226		   struct efc_dma *payload, u32 req_len, u8 timeout, u16 xri,
   2227		   u16 req_tag)
   2228{
   2229	struct sli4_send_frame_wqe *sf = buf;
   2230
   2231	memset(buf, 0, sli->wqe_size);
   2232
   2233	sf->dw10flags1 |= SLI4_SF_WQE_DBDE;
   2234	sf->bde.bde_type_buflen = cpu_to_le32(req_len &
   2235					      SLI4_BDE_LEN_MASK);
   2236	sf->bde.u.data.low = cpu_to_le32(lower_32_bits(payload->phys));
   2237	sf->bde.u.data.high = cpu_to_le32(upper_32_bits(payload->phys));
   2238
   2239	/* Copy FC header */
   2240	sf->fc_header_0_1[0] = cpu_to_le32(hdr[0]);
   2241	sf->fc_header_0_1[1] = cpu_to_le32(hdr[1]);
   2242	sf->fc_header_2_5[0] = cpu_to_le32(hdr[2]);
   2243	sf->fc_header_2_5[1] = cpu_to_le32(hdr[3]);
   2244	sf->fc_header_2_5[2] = cpu_to_le32(hdr[4]);
   2245	sf->fc_header_2_5[3] = cpu_to_le32(hdr[5]);
   2246
   2247	sf->frame_length = cpu_to_le32(req_len);
   2248
   2249	sf->xri_tag = cpu_to_le16(xri);
   2250	sf->dw7flags0 &= ~SLI4_SF_PU;
   2251	sf->context_tag = 0;
   2252
   2253	sf->ct_byte &= ~SLI4_SF_CT;
   2254	sf->command = SLI4_WQE_SEND_FRAME;
   2255	sf->dw7flags0 |= SLI4_GENERIC_CLASS_CLASS_3;
   2256	sf->timer = timeout;
   2257
   2258	sf->request_tag = cpu_to_le16(req_tag);
   2259	sf->eof = eof;
   2260	sf->sof = sof;
   2261
   2262	sf->dw10flags1 &= ~SLI4_SF_QOSD;
   2263	sf->dw10flags0 |= SLI4_SF_LEN_LOC_BIT1;
   2264	sf->dw10flags2 &= ~SLI4_SF_XC;
   2265
   2266	sf->dw10flags1 |= SLI4_SF_XBL;
   2267
   2268	sf->cmd_type_byte |= SLI4_CMD_SEND_FRAME_WQE;
   2269	sf->cq_id = cpu_to_le16(0xffff);
   2270
   2271	return 0;
   2272}
   2273
   2274int
   2275sli_xmit_bls_rsp64_wqe(struct sli4 *sli, void *buf,
   2276		       struct sli_bls_payload *payload,
   2277		       struct sli_bls_params *params)
   2278{
   2279	struct sli4_xmit_bls_rsp_wqe *bls = buf;
   2280	u32 dw_ridflags = 0;
   2281
   2282	/*
   2283	 * Callers can either specify RPI or S_ID, but not both
   2284	 */
   2285	if (params->rpi_registered && params->s_id != U32_MAX) {
   2286		efc_log_info(sli, "S_ID specified for attached remote node %d\n",
   2287			     params->rpi);
   2288		return -EIO;
   2289	}
   2290
   2291	memset(buf, 0, sli->wqe_size);
   2292
   2293	if (payload->type == SLI4_SLI_BLS_ACC) {
   2294		bls->payload_word0 =
   2295			cpu_to_le32((payload->u.acc.seq_id_last << 16) |
   2296				    (payload->u.acc.seq_id_validity << 24));
   2297		bls->high_seq_cnt = payload->u.acc.high_seq_cnt;
   2298		bls->low_seq_cnt = payload->u.acc.low_seq_cnt;
   2299	} else if (payload->type == SLI4_SLI_BLS_RJT) {
   2300		bls->payload_word0 =
   2301				cpu_to_le32(*((u32 *)&payload->u.rjt));
   2302		dw_ridflags |= SLI4_BLS_RSP_WQE_AR;
   2303	} else {
   2304		efc_log_info(sli, "bad BLS type %#x\n", payload->type);
   2305		return -EIO;
   2306	}
   2307
   2308	bls->ox_id = payload->ox_id;
   2309	bls->rx_id = payload->rx_id;
   2310
   2311	if (params->rpi_registered) {
   2312		bls->dw8flags0 |=
   2313		SLI4_GENERIC_CONTEXT_RPI << SLI4_BLS_RSP_WQE_CT_SHFT;
   2314		bls->context_tag = cpu_to_le16(params->rpi);
   2315	} else {
   2316		bls->dw8flags0 |=
   2317		SLI4_GENERIC_CONTEXT_VPI << SLI4_BLS_RSP_WQE_CT_SHFT;
   2318		bls->context_tag = cpu_to_le16(params->vpi);
   2319
   2320		if (params->s_id != U32_MAX)
   2321			bls->local_n_port_id_dword |=
   2322				cpu_to_le32(params->s_id & 0x00ffffff);
   2323		else
   2324			bls->local_n_port_id_dword |=
   2325				cpu_to_le32(params->s_id & 0x00ffffff);
   2326
   2327		dw_ridflags = (dw_ridflags & ~SLI4_BLS_RSP_RID) |
   2328			       (params->d_id & SLI4_BLS_RSP_RID);
   2329
   2330		bls->temporary_rpi = cpu_to_le16(params->rpi);
   2331	}
   2332
   2333	bls->xri_tag = cpu_to_le16(params->xri);
   2334
   2335	bls->dw8flags1 |= SLI4_GENERIC_CLASS_CLASS_3;
   2336
   2337	bls->command = SLI4_WQE_XMIT_BLS_RSP;
   2338
   2339	bls->request_tag = cpu_to_le16(params->tag);
   2340
   2341	bls->dw11flags1 |= SLI4_BLS_RSP_WQE_QOSD;
   2342
   2343	bls->remote_id_dword = cpu_to_le32(dw_ridflags);
   2344	bls->cq_id = cpu_to_le16(SLI4_CQ_DEFAULT);
   2345
   2346	bls->dw12flags0 |= SLI4_CMD_XMIT_BLS_RSP64_WQE;
   2347
   2348	return 0;
   2349}
   2350
   2351int
   2352sli_xmit_els_rsp64_wqe(struct sli4 *sli, void *buf, struct efc_dma *rsp,
   2353		       struct sli_els_params *params)
   2354{
   2355	struct sli4_xmit_els_rsp64_wqe *els = buf;
   2356
   2357	memset(buf, 0, sli->wqe_size);
   2358
   2359	if (sli->params.sgl_pre_registered)
   2360		els->flags2 |= SLI4_ELS_DBDE;
   2361	else
   2362		els->flags2 |= SLI4_ELS_XBL;
   2363
   2364	els->els_response_payload.bde_type_buflen =
   2365		cpu_to_le32((SLI4_BDE_TYPE_VAL(64)) |
   2366			    (params->rsp_len & SLI4_BDE_LEN_MASK));
   2367	els->els_response_payload.u.data.low =
   2368		cpu_to_le32(lower_32_bits(rsp->phys));
   2369	els->els_response_payload.u.data.high =
   2370		cpu_to_le32(upper_32_bits(rsp->phys));
   2371
   2372	els->els_response_payload_length = cpu_to_le32(params->rsp_len);
   2373
   2374	els->xri_tag = cpu_to_le16(params->xri);
   2375
   2376	els->class_byte |= SLI4_GENERIC_CLASS_CLASS_3;
   2377
   2378	els->command = SLI4_WQE_ELS_RSP64;
   2379
   2380	els->request_tag = cpu_to_le16(params->tag);
   2381
   2382	els->ox_id = cpu_to_le16(params->ox_id);
   2383
   2384	els->flags2 |= SLI4_ELS_QOSD;
   2385
   2386	els->cmd_type_wqec = SLI4_ELS_REQUEST64_CMD_GEN;
   2387
   2388	els->cq_id = cpu_to_le16(SLI4_CQ_DEFAULT);
   2389
   2390	if (params->rpi_registered) {
   2391		els->ct_byte |=
   2392			SLI4_GENERIC_CONTEXT_RPI << SLI4_ELS_CT_OFFSET;
   2393		els->context_tag = cpu_to_le16(params->rpi);
   2394		return 0;
   2395	}
   2396
   2397	els->ct_byte |= SLI4_GENERIC_CONTEXT_VPI << SLI4_ELS_CT_OFFSET;
   2398	els->context_tag = cpu_to_le16(params->vpi);
   2399	els->rid_dw = cpu_to_le32(params->d_id & SLI4_ELS_RID);
   2400	els->temporary_rpi = cpu_to_le16(params->rpi);
   2401	if (params->s_id != U32_MAX) {
   2402		els->sid_dw |=
   2403		      cpu_to_le32(SLI4_ELS_SP | (params->s_id & SLI4_ELS_SID));
   2404	}
   2405
   2406	return 0;
   2407}
   2408
   2409int
   2410sli_xmit_sequence64_wqe(struct sli4 *sli4, void *buf, struct efc_dma *payload,
   2411			struct sli_ct_params *params)
   2412{
   2413	struct sli4_xmit_sequence64_wqe *xmit = buf;
   2414
   2415	memset(buf, 0, sli4->wqe_size);
   2416
   2417	if (!payload || !payload->virt) {
   2418		efc_log_err(sli4, "bad parameter sgl=%p virt=%p\n",
   2419			    payload, payload ? payload->virt : NULL);
   2420		return -EIO;
   2421	}
   2422
   2423	if (sli4->params.sgl_pre_registered)
   2424		xmit->dw10w0 |= cpu_to_le16(SLI4_SEQ_WQE_DBDE);
   2425	else
   2426		xmit->dw10w0 |= cpu_to_le16(SLI4_SEQ_WQE_XBL);
   2427
   2428	xmit->bde.bde_type_buflen =
   2429		cpu_to_le32((SLI4_BDE_TYPE_VAL(64)) |
   2430			(params->rsp_len & SLI4_BDE_LEN_MASK));
   2431	xmit->bde.u.data.low  =
   2432			cpu_to_le32(lower_32_bits(payload->phys));
   2433	xmit->bde.u.data.high =
   2434			cpu_to_le32(upper_32_bits(payload->phys));
   2435	xmit->sequence_payload_len = cpu_to_le32(params->rsp_len);
   2436
   2437	xmit->remote_n_port_id_dword |= cpu_to_le32(params->d_id & 0x00ffffff);
   2438
   2439	xmit->relative_offset = 0;
   2440
   2441	/* sequence initiative - this matches what is seen from
   2442	 * FC switches in response to FCGS commands
   2443	 */
   2444	xmit->dw5flags0 &= (~SLI4_SEQ_WQE_SI);
   2445	xmit->dw5flags0 &= (~SLI4_SEQ_WQE_FT);/* force transmit */
   2446	xmit->dw5flags0 &= (~SLI4_SEQ_WQE_XO);/* exchange responder */
   2447	xmit->dw5flags0 |= SLI4_SEQ_WQE_LS;/* last in seqence */
   2448	xmit->df_ctl = params->df_ctl;
   2449	xmit->type = params->type;
   2450	xmit->r_ctl = params->r_ctl;
   2451
   2452	xmit->xri_tag = cpu_to_le16(params->xri);
   2453	xmit->context_tag = cpu_to_le16(params->rpi);
   2454
   2455	xmit->dw7flags0 &= ~SLI4_SEQ_WQE_DIF;
   2456	xmit->dw7flags0 |=
   2457		SLI4_GENERIC_CONTEXT_RPI << SLI4_SEQ_WQE_CT_SHIFT;
   2458	xmit->dw7flags0 &= ~SLI4_SEQ_WQE_BS;
   2459
   2460	xmit->command = SLI4_WQE_XMIT_SEQUENCE64;
   2461	xmit->dw7flags1 |= SLI4_GENERIC_CLASS_CLASS_3;
   2462	xmit->dw7flags1 &= ~SLI4_SEQ_WQE_PU;
   2463	xmit->timer = params->timeout;
   2464
   2465	xmit->abort_tag = 0;
   2466	xmit->request_tag = cpu_to_le16(params->tag);
   2467	xmit->remote_xid = cpu_to_le16(params->ox_id);
   2468
   2469	xmit->dw10w0 |=
   2470	cpu_to_le16(SLI4_ELS_REQUEST64_DIR_READ << SLI4_SEQ_WQE_IOD_SHIFT);
   2471
   2472	xmit->cmd_type_wqec_byte |= SLI4_CMD_XMIT_SEQUENCE64_WQE;
   2473
   2474	xmit->dw10w0 |= cpu_to_le16(2 << SLI4_SEQ_WQE_LEN_LOC_SHIFT);
   2475
   2476	xmit->cq_id = cpu_to_le16(0xFFFF);
   2477
   2478	return 0;
   2479}
   2480
   2481int
   2482sli_requeue_xri_wqe(struct sli4 *sli4, void *buf, u16 xri, u16 tag, u16 cq_id)
   2483{
   2484	struct sli4_requeue_xri_wqe *requeue = buf;
   2485
   2486	memset(buf, 0, sli4->wqe_size);
   2487
   2488	requeue->command = SLI4_WQE_REQUEUE_XRI;
   2489	requeue->xri_tag = cpu_to_le16(xri);
   2490	requeue->request_tag = cpu_to_le16(tag);
   2491	requeue->flags2 |= cpu_to_le16(SLI4_REQU_XRI_WQE_XC);
   2492	requeue->flags1 |= cpu_to_le16(SLI4_REQU_XRI_WQE_QOSD);
   2493	requeue->cq_id = cpu_to_le16(cq_id);
   2494	requeue->cmd_type_wqec_byte = SLI4_CMD_REQUEUE_XRI_WQE;
   2495	return 0;
   2496}
   2497
   2498int
   2499sli_fc_process_link_attention(struct sli4 *sli4, void *acqe)
   2500{
   2501	struct sli4_link_attention *link_attn = acqe;
   2502	struct sli4_link_event event = { 0 };
   2503
   2504	efc_log_info(sli4, "link=%d attn_type=%#x top=%#x speed=%#x pfault=%#x\n",
   2505		     link_attn->link_number, link_attn->attn_type,
   2506		     link_attn->topology, link_attn->port_speed,
   2507		     link_attn->port_fault);
   2508	efc_log_info(sli4, "shared_lnk_status=%#x logl_lnk_speed=%#x evttag=%#x\n",
   2509		     link_attn->shared_link_status,
   2510		     le16_to_cpu(link_attn->logical_link_speed),
   2511		     le32_to_cpu(link_attn->event_tag));
   2512
   2513	if (!sli4->link)
   2514		return -EIO;
   2515
   2516	event.medium   = SLI4_LINK_MEDIUM_FC;
   2517
   2518	switch (link_attn->attn_type) {
   2519	case SLI4_LNK_ATTN_TYPE_LINK_UP:
   2520		event.status = SLI4_LINK_STATUS_UP;
   2521		break;
   2522	case SLI4_LNK_ATTN_TYPE_LINK_DOWN:
   2523		event.status = SLI4_LINK_STATUS_DOWN;
   2524		break;
   2525	case SLI4_LNK_ATTN_TYPE_NO_HARD_ALPA:
   2526		efc_log_info(sli4, "attn_type: no hard alpa\n");
   2527		event.status = SLI4_LINK_STATUS_NO_ALPA;
   2528		break;
   2529	default:
   2530		efc_log_info(sli4, "attn_type: unknown\n");
   2531		break;
   2532	}
   2533
   2534	switch (link_attn->event_type) {
   2535	case SLI4_EVENT_LINK_ATTENTION:
   2536		break;
   2537	case SLI4_EVENT_SHARED_LINK_ATTENTION:
   2538		efc_log_info(sli4, "event_type: FC shared link event\n");
   2539		break;
   2540	default:
   2541		efc_log_info(sli4, "event_type: unknown\n");
   2542		break;
   2543	}
   2544
   2545	switch (link_attn->topology) {
   2546	case SLI4_LNK_ATTN_P2P:
   2547		event.topology = SLI4_LINK_TOPO_NON_FC_AL;
   2548		break;
   2549	case SLI4_LNK_ATTN_FC_AL:
   2550		event.topology = SLI4_LINK_TOPO_FC_AL;
   2551		break;
   2552	case SLI4_LNK_ATTN_INTERNAL_LOOPBACK:
   2553		efc_log_info(sli4, "topology Internal loopback\n");
   2554		event.topology = SLI4_LINK_TOPO_LOOPBACK_INTERNAL;
   2555		break;
   2556	case SLI4_LNK_ATTN_SERDES_LOOPBACK:
   2557		efc_log_info(sli4, "topology serdes loopback\n");
   2558		event.topology = SLI4_LINK_TOPO_LOOPBACK_EXTERNAL;
   2559		break;
   2560	default:
   2561		efc_log_info(sli4, "topology: unknown\n");
   2562		break;
   2563	}
   2564
   2565	event.speed = link_attn->port_speed * 1000;
   2566
   2567	sli4->link(sli4->link_arg, (void *)&event);
   2568
   2569	return 0;
   2570}
   2571
   2572int
   2573sli_fc_cqe_parse(struct sli4 *sli4, struct sli4_queue *cq,
   2574		 u8 *cqe, enum sli4_qentry *etype, u16 *r_id)
   2575{
   2576	u8 code = cqe[SLI4_CQE_CODE_OFFSET];
   2577	int rc;
   2578
   2579	switch (code) {
   2580	case SLI4_CQE_CODE_WORK_REQUEST_COMPLETION:
   2581	{
   2582		struct sli4_fc_wcqe *wcqe = (void *)cqe;
   2583
   2584		*etype = SLI4_QENTRY_WQ;
   2585		*r_id = le16_to_cpu(wcqe->request_tag);
   2586		rc = wcqe->status;
   2587
   2588		/* Flag errors except for FCP_RSP_FAILURE */
   2589		if (rc && rc != SLI4_FC_WCQE_STATUS_FCP_RSP_FAILURE) {
   2590			efc_log_info(sli4, "WCQE: status=%#x hw_status=%#x tag=%#x\n",
   2591				     wcqe->status, wcqe->hw_status,
   2592				     le16_to_cpu(wcqe->request_tag));
   2593			efc_log_info(sli4, "w1=%#x w2=%#x xb=%d\n",
   2594				     le32_to_cpu(wcqe->wqe_specific_1),
   2595				     le32_to_cpu(wcqe->wqe_specific_2),
   2596				     (wcqe->flags & SLI4_WCQE_XB));
   2597			efc_log_info(sli4, "      %08X %08X %08X %08X\n",
   2598				     ((u32 *)cqe)[0], ((u32 *)cqe)[1],
   2599				     ((u32 *)cqe)[2], ((u32 *)cqe)[3]);
   2600		}
   2601
   2602		break;
   2603	}
   2604	case SLI4_CQE_CODE_RQ_ASYNC:
   2605	{
   2606		struct sli4_fc_async_rcqe *rcqe = (void *)cqe;
   2607
   2608		*etype = SLI4_QENTRY_RQ;
   2609		*r_id = le16_to_cpu(rcqe->fcfi_rq_id_word) & SLI4_RACQE_RQ_ID;
   2610		rc = rcqe->status;
   2611		break;
   2612	}
   2613	case SLI4_CQE_CODE_RQ_ASYNC_V1:
   2614	{
   2615		struct sli4_fc_async_rcqe_v1 *rcqe = (void *)cqe;
   2616
   2617		*etype = SLI4_QENTRY_RQ;
   2618		*r_id = le16_to_cpu(rcqe->rq_id);
   2619		rc = rcqe->status;
   2620		break;
   2621	}
   2622	case SLI4_CQE_CODE_OPTIMIZED_WRITE_CMD:
   2623	{
   2624		struct sli4_fc_optimized_write_cmd_cqe *optcqe = (void *)cqe;
   2625
   2626		*etype = SLI4_QENTRY_OPT_WRITE_CMD;
   2627		*r_id = le16_to_cpu(optcqe->rq_id);
   2628		rc = optcqe->status;
   2629		break;
   2630	}
   2631	case SLI4_CQE_CODE_OPTIMIZED_WRITE_DATA:
   2632	{
   2633		struct sli4_fc_optimized_write_data_cqe *dcqe = (void *)cqe;
   2634
   2635		*etype = SLI4_QENTRY_OPT_WRITE_DATA;
   2636		*r_id = le16_to_cpu(dcqe->xri);
   2637		rc = dcqe->status;
   2638
   2639		/* Flag errors */
   2640		if (rc != SLI4_FC_WCQE_STATUS_SUCCESS) {
   2641			efc_log_info(sli4, "Optimized DATA CQE: status=%#x\n",
   2642				     dcqe->status);
   2643			efc_log_info(sli4, "hstat=%#x xri=%#x dpl=%#x w3=%#x xb=%d\n",
   2644				     dcqe->hw_status, le16_to_cpu(dcqe->xri),
   2645				     le32_to_cpu(dcqe->total_data_placed),
   2646				     ((u32 *)cqe)[3],
   2647				     (dcqe->flags & SLI4_OCQE_XB));
   2648		}
   2649		break;
   2650	}
   2651	case SLI4_CQE_CODE_RQ_COALESCING:
   2652	{
   2653		struct sli4_fc_coalescing_rcqe *rcqe = (void *)cqe;
   2654
   2655		*etype = SLI4_QENTRY_RQ;
   2656		*r_id = le16_to_cpu(rcqe->rq_id);
   2657		rc = rcqe->status;
   2658		break;
   2659	}
   2660	case SLI4_CQE_CODE_XRI_ABORTED:
   2661	{
   2662		struct sli4_fc_xri_aborted_cqe *xa = (void *)cqe;
   2663
   2664		*etype = SLI4_QENTRY_XABT;
   2665		*r_id = le16_to_cpu(xa->xri);
   2666		rc = 0;
   2667		break;
   2668	}
   2669	case SLI4_CQE_CODE_RELEASE_WQE:
   2670	{
   2671		struct sli4_fc_wqec *wqec = (void *)cqe;
   2672
   2673		*etype = SLI4_QENTRY_WQ_RELEASE;
   2674		*r_id = le16_to_cpu(wqec->wq_id);
   2675		rc = 0;
   2676		break;
   2677	}
   2678	default:
   2679		efc_log_info(sli4, "CQE completion code %d not handled\n",
   2680			     code);
   2681		*etype = SLI4_QENTRY_MAX;
   2682		*r_id = U16_MAX;
   2683		rc = -EINVAL;
   2684	}
   2685
   2686	return rc;
   2687}
   2688
   2689u32
   2690sli_fc_response_length(struct sli4 *sli4, u8 *cqe)
   2691{
   2692	struct sli4_fc_wcqe *wcqe = (void *)cqe;
   2693
   2694	return le32_to_cpu(wcqe->wqe_specific_1);
   2695}
   2696
   2697u32
   2698sli_fc_io_length(struct sli4 *sli4, u8 *cqe)
   2699{
   2700	struct sli4_fc_wcqe *wcqe = (void *)cqe;
   2701
   2702	return le32_to_cpu(wcqe->wqe_specific_1);
   2703}
   2704
   2705int
   2706sli_fc_els_did(struct sli4 *sli4, u8 *cqe, u32 *d_id)
   2707{
   2708	struct sli4_fc_wcqe *wcqe = (void *)cqe;
   2709
   2710	*d_id = 0;
   2711
   2712	if (wcqe->status)
   2713		return -EIO;
   2714	*d_id = le32_to_cpu(wcqe->wqe_specific_2) & 0x00ffffff;
   2715	return 0;
   2716}
   2717
   2718u32
   2719sli_fc_ext_status(struct sli4 *sli4, u8 *cqe)
   2720{
   2721	struct sli4_fc_wcqe *wcqe = (void *)cqe;
   2722	u32	mask;
   2723
   2724	switch (wcqe->status) {
   2725	case SLI4_FC_WCQE_STATUS_FCP_RSP_FAILURE:
   2726		mask = U32_MAX;
   2727		break;
   2728	case SLI4_FC_WCQE_STATUS_LOCAL_REJECT:
   2729	case SLI4_FC_WCQE_STATUS_CMD_REJECT:
   2730		mask = 0xff;
   2731		break;
   2732	case SLI4_FC_WCQE_STATUS_NPORT_RJT:
   2733	case SLI4_FC_WCQE_STATUS_FABRIC_RJT:
   2734	case SLI4_FC_WCQE_STATUS_NPORT_BSY:
   2735	case SLI4_FC_WCQE_STATUS_FABRIC_BSY:
   2736	case SLI4_FC_WCQE_STATUS_LS_RJT:
   2737		mask = U32_MAX;
   2738		break;
   2739	case SLI4_FC_WCQE_STATUS_DI_ERROR:
   2740		mask = U32_MAX;
   2741		break;
   2742	default:
   2743		mask = 0;
   2744	}
   2745
   2746	return le32_to_cpu(wcqe->wqe_specific_2) & mask;
   2747}
   2748
   2749int
   2750sli_fc_rqe_rqid_and_index(struct sli4 *sli4, u8 *cqe, u16 *rq_id, u32 *index)
   2751{
   2752	int rc = -EIO;
   2753	u8 code = 0;
   2754	u16 rq_element_index;
   2755
   2756	*rq_id = 0;
   2757	*index = U32_MAX;
   2758
   2759	code = cqe[SLI4_CQE_CODE_OFFSET];
   2760
   2761	/* Retrieve the RQ index from the completion */
   2762	if (code == SLI4_CQE_CODE_RQ_ASYNC) {
   2763		struct sli4_fc_async_rcqe *rcqe = (void *)cqe;
   2764
   2765		*rq_id = le16_to_cpu(rcqe->fcfi_rq_id_word) & SLI4_RACQE_RQ_ID;
   2766		rq_element_index =
   2767		le16_to_cpu(rcqe->rq_elmt_indx_word) & SLI4_RACQE_RQ_EL_INDX;
   2768		*index = rq_element_index;
   2769		if (rcqe->status == SLI4_FC_ASYNC_RQ_SUCCESS) {
   2770			rc = 0;
   2771		} else {
   2772			rc = rcqe->status;
   2773			efc_log_info(sli4, "status=%02x (%s) rq_id=%d\n",
   2774				     rcqe->status,
   2775				     sli_fc_get_status_string(rcqe->status),
   2776				     le16_to_cpu(rcqe->fcfi_rq_id_word) &
   2777				     SLI4_RACQE_RQ_ID);
   2778
   2779			efc_log_info(sli4, "pdpl=%x sof=%02x eof=%02x hdpl=%x\n",
   2780				     le16_to_cpu(rcqe->data_placement_length),
   2781				     rcqe->sof_byte, rcqe->eof_byte,
   2782				     rcqe->hdpl_byte & SLI4_RACQE_HDPL);
   2783		}
   2784	} else if (code == SLI4_CQE_CODE_RQ_ASYNC_V1) {
   2785		struct sli4_fc_async_rcqe_v1 *rcqe_v1 = (void *)cqe;
   2786
   2787		*rq_id = le16_to_cpu(rcqe_v1->rq_id);
   2788		rq_element_index =
   2789			(le16_to_cpu(rcqe_v1->rq_elmt_indx_word) &
   2790			 SLI4_RACQE_RQ_EL_INDX);
   2791		*index = rq_element_index;
   2792		if (rcqe_v1->status == SLI4_FC_ASYNC_RQ_SUCCESS) {
   2793			rc = 0;
   2794		} else {
   2795			rc = rcqe_v1->status;
   2796			efc_log_info(sli4, "status=%02x (%s) rq_id=%d, index=%x\n",
   2797				     rcqe_v1->status,
   2798				     sli_fc_get_status_string(rcqe_v1->status),
   2799				     le16_to_cpu(rcqe_v1->rq_id), rq_element_index);
   2800
   2801			efc_log_info(sli4, "pdpl=%x sof=%02x eof=%02x hdpl=%x\n",
   2802				     le16_to_cpu(rcqe_v1->data_placement_length),
   2803			rcqe_v1->sof_byte, rcqe_v1->eof_byte,
   2804			rcqe_v1->hdpl_byte & SLI4_RACQE_HDPL);
   2805		}
   2806	} else if (code == SLI4_CQE_CODE_OPTIMIZED_WRITE_CMD) {
   2807		struct sli4_fc_optimized_write_cmd_cqe *optcqe = (void *)cqe;
   2808
   2809		*rq_id = le16_to_cpu(optcqe->rq_id);
   2810		*index = le16_to_cpu(optcqe->w1) & SLI4_OCQE_RQ_EL_INDX;
   2811		if (optcqe->status == SLI4_FC_ASYNC_RQ_SUCCESS) {
   2812			rc = 0;
   2813		} else {
   2814			rc = optcqe->status;
   2815			efc_log_info(sli4, "stat=%02x (%s) rqid=%d, idx=%x pdpl=%x\n",
   2816				     optcqe->status,
   2817				     sli_fc_get_status_string(optcqe->status),
   2818				     le16_to_cpu(optcqe->rq_id), *index,
   2819				     le16_to_cpu(optcqe->data_placement_length));
   2820
   2821			efc_log_info(sli4, "hdpl=%x oox=%d agxr=%d xri=0x%x rpi=%x\n",
   2822				     (optcqe->hdpl_vld & SLI4_OCQE_HDPL),
   2823				     (optcqe->flags1 & SLI4_OCQE_OOX),
   2824				     (optcqe->flags1 & SLI4_OCQE_AGXR),
   2825				     optcqe->xri, le16_to_cpu(optcqe->rpi));
   2826		}
   2827	} else if (code == SLI4_CQE_CODE_RQ_COALESCING) {
   2828		struct sli4_fc_coalescing_rcqe  *rcqe = (void *)cqe;
   2829
   2830		rq_element_index = (le16_to_cpu(rcqe->rq_elmt_indx_word) &
   2831				    SLI4_RCQE_RQ_EL_INDX);
   2832
   2833		*rq_id = le16_to_cpu(rcqe->rq_id);
   2834		if (rcqe->status == SLI4_FC_COALESCE_RQ_SUCCESS) {
   2835			*index = rq_element_index;
   2836			rc = 0;
   2837		} else {
   2838			*index = U32_MAX;
   2839			rc = rcqe->status;
   2840
   2841			efc_log_info(sli4, "stat=%02x (%s) rq_id=%d, idx=%x\n",
   2842				     rcqe->status,
   2843				     sli_fc_get_status_string(rcqe->status),
   2844				     le16_to_cpu(rcqe->rq_id), rq_element_index);
   2845			efc_log_info(sli4, "rq_id=%#x sdpl=%x\n",
   2846				     le16_to_cpu(rcqe->rq_id),
   2847				     le16_to_cpu(rcqe->seq_placement_length));
   2848		}
   2849	} else {
   2850		struct sli4_fc_async_rcqe *rcqe = (void *)cqe;
   2851
   2852		*index = U32_MAX;
   2853		rc = rcqe->status;
   2854
   2855		efc_log_info(sli4, "status=%02x rq_id=%d, index=%x pdpl=%x\n",
   2856			     rcqe->status,
   2857			     le16_to_cpu(rcqe->fcfi_rq_id_word) & SLI4_RACQE_RQ_ID,
   2858			     (le16_to_cpu(rcqe->rq_elmt_indx_word) & SLI4_RACQE_RQ_EL_INDX),
   2859			     le16_to_cpu(rcqe->data_placement_length));
   2860		efc_log_info(sli4, "sof=%02x eof=%02x hdpl=%x\n",
   2861			     rcqe->sof_byte, rcqe->eof_byte,
   2862			     rcqe->hdpl_byte & SLI4_RACQE_HDPL);
   2863	}
   2864
   2865	return rc;
   2866}
   2867
   2868static int
   2869sli_bmbx_wait(struct sli4 *sli4, u32 msec)
   2870{
   2871	u32 val;
   2872	unsigned long end;
   2873
   2874	/* Wait for the bootstrap mailbox to report "ready" */
   2875	end = jiffies + msecs_to_jiffies(msec);
   2876	do {
   2877		val = readl(sli4->reg[0] + SLI4_BMBX_REG);
   2878		if (val & SLI4_BMBX_RDY)
   2879			return 0;
   2880
   2881		usleep_range(1000, 2000);
   2882	} while (time_before(jiffies, end));
   2883
   2884	return -EIO;
   2885}
   2886
   2887static int
   2888sli_bmbx_write(struct sli4 *sli4)
   2889{
   2890	u32 val;
   2891
   2892	/* write buffer location to bootstrap mailbox register */
   2893	val = sli_bmbx_write_hi(sli4->bmbx.phys);
   2894	writel(val, (sli4->reg[0] + SLI4_BMBX_REG));
   2895
   2896	if (sli_bmbx_wait(sli4, SLI4_BMBX_DELAY_US)) {
   2897		efc_log_crit(sli4, "BMBX WRITE_HI failed\n");
   2898		return -EIO;
   2899	}
   2900	val = sli_bmbx_write_lo(sli4->bmbx.phys);
   2901	writel(val, (sli4->reg[0] + SLI4_BMBX_REG));
   2902
   2903	/* wait for SLI Port to set ready bit */
   2904	return sli_bmbx_wait(sli4, SLI4_BMBX_TIMEOUT_MSEC);
   2905}
   2906
   2907int
   2908sli_bmbx_command(struct sli4 *sli4)
   2909{
   2910	void *cqe = (u8 *)sli4->bmbx.virt + SLI4_BMBX_SIZE;
   2911
   2912	if (sli_fw_error_status(sli4) > 0) {
   2913		efc_log_crit(sli4, "Chip is in an error state -Mailbox command rejected");
   2914		efc_log_crit(sli4, " status=%#x error1=%#x error2=%#x\n",
   2915			     sli_reg_read_status(sli4),
   2916			     sli_reg_read_err1(sli4),
   2917			     sli_reg_read_err2(sli4));
   2918		return -EIO;
   2919	}
   2920
   2921	/* Submit a command to the bootstrap mailbox and check the status */
   2922	if (sli_bmbx_write(sli4)) {
   2923		efc_log_crit(sli4, "bmbx write fail phys=%pad reg=%#x\n",
   2924			     &sli4->bmbx.phys, readl(sli4->reg[0] + SLI4_BMBX_REG));
   2925		return -EIO;
   2926	}
   2927
   2928	/* check completion queue entry status */
   2929	if (le32_to_cpu(((struct sli4_mcqe *)cqe)->dw3_flags) &
   2930	    SLI4_MCQE_VALID) {
   2931		return sli_cqe_mq(sli4, cqe);
   2932	}
   2933	efc_log_crit(sli4, "invalid or wrong type\n");
   2934	return -EIO;
   2935}
   2936
   2937int
   2938sli_cmd_config_link(struct sli4 *sli4, void *buf)
   2939{
   2940	struct sli4_cmd_config_link *config_link = buf;
   2941
   2942	memset(buf, 0, SLI4_BMBX_SIZE);
   2943
   2944	config_link->hdr.command = SLI4_MBX_CMD_CONFIG_LINK;
   2945
   2946	/* Port interprets zero in a field as "use default value" */
   2947
   2948	return 0;
   2949}
   2950
   2951int
   2952sli_cmd_down_link(struct sli4 *sli4, void *buf)
   2953{
   2954	struct sli4_mbox_command_header *hdr = buf;
   2955
   2956	memset(buf, 0, SLI4_BMBX_SIZE);
   2957
   2958	hdr->command = SLI4_MBX_CMD_DOWN_LINK;
   2959
   2960	/* Port interprets zero in a field as "use default value" */
   2961
   2962	return 0;
   2963}
   2964
   2965int
   2966sli_cmd_dump_type4(struct sli4 *sli4, void *buf, u16 wki)
   2967{
   2968	struct sli4_cmd_dump4 *cmd = buf;
   2969
   2970	memset(buf, 0, SLI4_BMBX_SIZE);
   2971
   2972	cmd->hdr.command = SLI4_MBX_CMD_DUMP;
   2973	cmd->type_dword = cpu_to_le32(0x4);
   2974	cmd->wki_selection = cpu_to_le16(wki);
   2975	return 0;
   2976}
   2977
   2978int
   2979sli_cmd_common_read_transceiver_data(struct sli4 *sli4, void *buf, u32 page_num,
   2980				     struct efc_dma *dma)
   2981{
   2982	struct sli4_rqst_cmn_read_transceiver_data *req = NULL;
   2983	u32 psize;
   2984
   2985	if (!dma)
   2986		psize = SLI4_CFG_PYLD_LENGTH(cmn_read_transceiver_data);
   2987	else
   2988		psize = dma->size;
   2989
   2990	req = sli_config_cmd_init(sli4, buf, psize, dma);
   2991	if (!req)
   2992		return -EIO;
   2993
   2994	sli_cmd_fill_hdr(&req->hdr, SLI4_CMN_READ_TRANS_DATA,
   2995			 SLI4_SUBSYSTEM_COMMON, CMD_V0,
   2996			 SLI4_RQST_PYLD_LEN(cmn_read_transceiver_data));
   2997
   2998	req->page_number = cpu_to_le32(page_num);
   2999	req->port = cpu_to_le32(sli4->port_number);
   3000
   3001	return 0;
   3002}
   3003
   3004int
   3005sli_cmd_read_link_stats(struct sli4 *sli4, void *buf, u8 req_ext_counters,
   3006			u8 clear_overflow_flags,
   3007			u8 clear_all_counters)
   3008{
   3009	struct sli4_cmd_read_link_stats *cmd = buf;
   3010	u32 flags;
   3011
   3012	memset(buf, 0, SLI4_BMBX_SIZE);
   3013
   3014	cmd->hdr.command = SLI4_MBX_CMD_READ_LNK_STAT;
   3015
   3016	flags = 0;
   3017	if (req_ext_counters)
   3018		flags |= SLI4_READ_LNKSTAT_REC;
   3019	if (clear_all_counters)
   3020		flags |= SLI4_READ_LNKSTAT_CLRC;
   3021	if (clear_overflow_flags)
   3022		flags |= SLI4_READ_LNKSTAT_CLOF;
   3023
   3024	cmd->dw1_flags = cpu_to_le32(flags);
   3025	return 0;
   3026}
   3027
   3028int
   3029sli_cmd_read_status(struct sli4 *sli4, void *buf, u8 clear_counters)
   3030{
   3031	struct sli4_cmd_read_status *cmd = buf;
   3032	u32 flags = 0;
   3033
   3034	memset(buf, 0, SLI4_BMBX_SIZE);
   3035
   3036	cmd->hdr.command = SLI4_MBX_CMD_READ_STATUS;
   3037	if (clear_counters)
   3038		flags |= SLI4_READSTATUS_CLEAR_COUNTERS;
   3039	else
   3040		flags &= ~SLI4_READSTATUS_CLEAR_COUNTERS;
   3041
   3042	cmd->dw1_flags = cpu_to_le32(flags);
   3043	return 0;
   3044}
   3045
   3046int
   3047sli_cmd_init_link(struct sli4 *sli4, void *buf, u32 speed, u8 reset_alpa)
   3048{
   3049	struct sli4_cmd_init_link *init_link = buf;
   3050	u32 flags = 0;
   3051
   3052	memset(buf, 0, SLI4_BMBX_SIZE);
   3053
   3054	init_link->hdr.command = SLI4_MBX_CMD_INIT_LINK;
   3055
   3056	init_link->sel_reset_al_pa_dword =
   3057				cpu_to_le32(reset_alpa);
   3058	flags &= ~SLI4_INIT_LINK_F_LOOPBACK;
   3059
   3060	init_link->link_speed_sel_code = cpu_to_le32(speed);
   3061	switch (speed) {
   3062	case SLI4_LINK_SPEED_1G:
   3063	case SLI4_LINK_SPEED_2G:
   3064	case SLI4_LINK_SPEED_4G:
   3065	case SLI4_LINK_SPEED_8G:
   3066	case SLI4_LINK_SPEED_16G:
   3067	case SLI4_LINK_SPEED_32G:
   3068	case SLI4_LINK_SPEED_64G:
   3069		flags |= SLI4_INIT_LINK_F_FIXED_SPEED;
   3070		break;
   3071	case SLI4_LINK_SPEED_10G:
   3072		efc_log_info(sli4, "unsupported FC speed %d\n", speed);
   3073		init_link->flags0 = cpu_to_le32(flags);
   3074		return -EIO;
   3075	}
   3076
   3077	switch (sli4->topology) {
   3078	case SLI4_READ_CFG_TOPO_FC:
   3079		/* Attempt P2P but failover to FC-AL */
   3080		flags |= SLI4_INIT_LINK_F_FAIL_OVER;
   3081		flags |= SLI4_INIT_LINK_F_P2P_FAIL_OVER;
   3082		break;
   3083	case SLI4_READ_CFG_TOPO_FC_AL:
   3084		flags |= SLI4_INIT_LINK_F_FCAL_ONLY;
   3085		if (speed == SLI4_LINK_SPEED_16G ||
   3086		    speed == SLI4_LINK_SPEED_32G) {
   3087			efc_log_info(sli4, "unsupported FC-AL speed %d\n",
   3088				     speed);
   3089			init_link->flags0 = cpu_to_le32(flags);
   3090			return -EIO;
   3091		}
   3092		break;
   3093	case SLI4_READ_CFG_TOPO_NON_FC_AL:
   3094		flags |= SLI4_INIT_LINK_F_P2P_ONLY;
   3095		break;
   3096	default:
   3097
   3098		efc_log_info(sli4, "unsupported topology %#x\n", sli4->topology);
   3099
   3100		init_link->flags0 = cpu_to_le32(flags);
   3101		return -EIO;
   3102	}
   3103
   3104	flags &= ~SLI4_INIT_LINK_F_UNFAIR;
   3105	flags &= ~SLI4_INIT_LINK_F_NO_LIRP;
   3106	flags &= ~SLI4_INIT_LINK_F_LOOP_VALID_CHK;
   3107	flags &= ~SLI4_INIT_LINK_F_NO_LISA;
   3108	flags &= ~SLI4_INIT_LINK_F_PICK_HI_ALPA;
   3109	init_link->flags0 = cpu_to_le32(flags);
   3110
   3111	return 0;
   3112}
   3113
   3114int
   3115sli_cmd_init_vfi(struct sli4 *sli4, void *buf, u16 vfi, u16 fcfi, u16 vpi)
   3116{
   3117	struct sli4_cmd_init_vfi *init_vfi = buf;
   3118	u16 flags = 0;
   3119
   3120	memset(buf, 0, SLI4_BMBX_SIZE);
   3121
   3122	init_vfi->hdr.command = SLI4_MBX_CMD_INIT_VFI;
   3123	init_vfi->vfi = cpu_to_le16(vfi);
   3124	init_vfi->fcfi = cpu_to_le16(fcfi);
   3125
   3126	/*
   3127	 * If the VPI is valid, initialize it at the same time as
   3128	 * the VFI
   3129	 */
   3130	if (vpi != U16_MAX) {
   3131		flags |= SLI4_INIT_VFI_FLAG_VP;
   3132		init_vfi->flags0_word = cpu_to_le16(flags);
   3133		init_vfi->vpi = cpu_to_le16(vpi);
   3134	}
   3135
   3136	return 0;
   3137}
   3138
   3139int
   3140sli_cmd_init_vpi(struct sli4 *sli4, void *buf, u16 vpi, u16 vfi)
   3141{
   3142	struct sli4_cmd_init_vpi *init_vpi = buf;
   3143
   3144	memset(buf, 0, SLI4_BMBX_SIZE);
   3145
   3146	init_vpi->hdr.command = SLI4_MBX_CMD_INIT_VPI;
   3147	init_vpi->vpi = cpu_to_le16(vpi);
   3148	init_vpi->vfi = cpu_to_le16(vfi);
   3149
   3150	return 0;
   3151}
   3152
   3153int
   3154sli_cmd_post_xri(struct sli4 *sli4, void *buf, u16 xri_base, u16 xri_count)
   3155{
   3156	struct sli4_cmd_post_xri *post_xri = buf;
   3157	u16 xri_count_flags = 0;
   3158
   3159	memset(buf, 0, SLI4_BMBX_SIZE);
   3160
   3161	post_xri->hdr.command = SLI4_MBX_CMD_POST_XRI;
   3162	post_xri->xri_base = cpu_to_le16(xri_base);
   3163	xri_count_flags = xri_count & SLI4_POST_XRI_COUNT;
   3164	xri_count_flags |= SLI4_POST_XRI_FLAG_ENX;
   3165	xri_count_flags |= SLI4_POST_XRI_FLAG_VAL;
   3166	post_xri->xri_count_flags = cpu_to_le16(xri_count_flags);
   3167
   3168	return 0;
   3169}
   3170
   3171int
   3172sli_cmd_release_xri(struct sli4 *sli4, void *buf, u8 num_xri)
   3173{
   3174	struct sli4_cmd_release_xri *release_xri = buf;
   3175
   3176	memset(buf, 0, SLI4_BMBX_SIZE);
   3177
   3178	release_xri->hdr.command = SLI4_MBX_CMD_RELEASE_XRI;
   3179	release_xri->xri_count_word = cpu_to_le16(num_xri &
   3180					SLI4_RELEASE_XRI_COUNT);
   3181
   3182	return 0;
   3183}
   3184
   3185static int
   3186sli_cmd_read_config(struct sli4 *sli4, void *buf)
   3187{
   3188	struct sli4_cmd_read_config *read_config = buf;
   3189
   3190	memset(buf, 0, SLI4_BMBX_SIZE);
   3191
   3192	read_config->hdr.command = SLI4_MBX_CMD_READ_CONFIG;
   3193
   3194	return 0;
   3195}
   3196
   3197int
   3198sli_cmd_read_nvparms(struct sli4 *sli4, void *buf)
   3199{
   3200	struct sli4_cmd_read_nvparms *read_nvparms = buf;
   3201
   3202	memset(buf, 0, SLI4_BMBX_SIZE);
   3203
   3204	read_nvparms->hdr.command = SLI4_MBX_CMD_READ_NVPARMS;
   3205
   3206	return 0;
   3207}
   3208
   3209int
   3210sli_cmd_write_nvparms(struct sli4 *sli4, void *buf, u8 *wwpn, u8 *wwnn,
   3211		      u8 hard_alpa, u32 preferred_d_id)
   3212{
   3213	struct sli4_cmd_write_nvparms *write_nvparms = buf;
   3214
   3215	memset(buf, 0, SLI4_BMBX_SIZE);
   3216
   3217	write_nvparms->hdr.command = SLI4_MBX_CMD_WRITE_NVPARMS;
   3218	memcpy(write_nvparms->wwpn, wwpn, 8);
   3219	memcpy(write_nvparms->wwnn, wwnn, 8);
   3220
   3221	write_nvparms->hard_alpa_d_id =
   3222			cpu_to_le32((preferred_d_id << 8) | hard_alpa);
   3223	return 0;
   3224}
   3225
   3226static int
   3227sli_cmd_read_rev(struct sli4 *sli4, void *buf, struct efc_dma *vpd)
   3228{
   3229	struct sli4_cmd_read_rev *read_rev = buf;
   3230
   3231	memset(buf, 0, SLI4_BMBX_SIZE);
   3232
   3233	read_rev->hdr.command = SLI4_MBX_CMD_READ_REV;
   3234
   3235	if (vpd && vpd->size) {
   3236		read_rev->flags0_word |= cpu_to_le16(SLI4_READ_REV_FLAG_VPD);
   3237
   3238		read_rev->available_length_dword =
   3239			cpu_to_le32(vpd->size &
   3240				    SLI4_READ_REV_AVAILABLE_LENGTH);
   3241
   3242		read_rev->hostbuf.low =
   3243				cpu_to_le32(lower_32_bits(vpd->phys));
   3244		read_rev->hostbuf.high =
   3245				cpu_to_le32(upper_32_bits(vpd->phys));
   3246	}
   3247
   3248	return 0;
   3249}
   3250
   3251int
   3252sli_cmd_read_sparm64(struct sli4 *sli4, void *buf, struct efc_dma *dma, u16 vpi)
   3253{
   3254	struct sli4_cmd_read_sparm64 *read_sparm64 = buf;
   3255
   3256	if (vpi == U16_MAX) {
   3257		efc_log_err(sli4, "special VPI not supported!!!\n");
   3258		return -EIO;
   3259	}
   3260
   3261	if (!dma || !dma->phys) {
   3262		efc_log_err(sli4, "bad DMA buffer\n");
   3263		return -EIO;
   3264	}
   3265
   3266	memset(buf, 0, SLI4_BMBX_SIZE);
   3267
   3268	read_sparm64->hdr.command = SLI4_MBX_CMD_READ_SPARM64;
   3269
   3270	read_sparm64->bde_64.bde_type_buflen =
   3271			cpu_to_le32((SLI4_BDE_TYPE_VAL(64)) |
   3272				    (dma->size & SLI4_BDE_LEN_MASK));
   3273	read_sparm64->bde_64.u.data.low =
   3274			cpu_to_le32(lower_32_bits(dma->phys));
   3275	read_sparm64->bde_64.u.data.high =
   3276			cpu_to_le32(upper_32_bits(dma->phys));
   3277
   3278	read_sparm64->vpi = cpu_to_le16(vpi);
   3279
   3280	return 0;
   3281}
   3282
   3283int
   3284sli_cmd_read_topology(struct sli4 *sli4, void *buf, struct efc_dma *dma)
   3285{
   3286	struct sli4_cmd_read_topology *read_topo = buf;
   3287
   3288	if (!dma || !dma->size)
   3289		return -EIO;
   3290
   3291	if (dma->size < SLI4_MIN_LOOP_MAP_BYTES) {
   3292		efc_log_err(sli4, "loop map buffer too small %zx\n", dma->size);
   3293		return -EIO;
   3294	}
   3295
   3296	memset(buf, 0, SLI4_BMBX_SIZE);
   3297
   3298	read_topo->hdr.command = SLI4_MBX_CMD_READ_TOPOLOGY;
   3299
   3300	memset(dma->virt, 0, dma->size);
   3301
   3302	read_topo->bde_loop_map.bde_type_buflen =
   3303					cpu_to_le32((SLI4_BDE_TYPE_VAL(64)) |
   3304					(dma->size & SLI4_BDE_LEN_MASK));
   3305	read_topo->bde_loop_map.u.data.low  =
   3306				cpu_to_le32(lower_32_bits(dma->phys));
   3307	read_topo->bde_loop_map.u.data.high =
   3308				cpu_to_le32(upper_32_bits(dma->phys));
   3309
   3310	return 0;
   3311}
   3312
   3313int
   3314sli_cmd_reg_fcfi(struct sli4 *sli4, void *buf, u16 index,
   3315		 struct sli4_cmd_rq_cfg *rq_cfg)
   3316{
   3317	struct sli4_cmd_reg_fcfi *reg_fcfi = buf;
   3318	u32 i;
   3319
   3320	memset(buf, 0, SLI4_BMBX_SIZE);
   3321
   3322	reg_fcfi->hdr.command = SLI4_MBX_CMD_REG_FCFI;
   3323
   3324	reg_fcfi->fcf_index = cpu_to_le16(index);
   3325
   3326	for (i = 0; i < SLI4_CMD_REG_FCFI_NUM_RQ_CFG; i++) {
   3327		switch (i) {
   3328		case 0:
   3329			reg_fcfi->rqid0 = rq_cfg[0].rq_id;
   3330			break;
   3331		case 1:
   3332			reg_fcfi->rqid1 = rq_cfg[1].rq_id;
   3333			break;
   3334		case 2:
   3335			reg_fcfi->rqid2 = rq_cfg[2].rq_id;
   3336			break;
   3337		case 3:
   3338			reg_fcfi->rqid3 = rq_cfg[3].rq_id;
   3339			break;
   3340		}
   3341		reg_fcfi->rq_cfg[i].r_ctl_mask = rq_cfg[i].r_ctl_mask;
   3342		reg_fcfi->rq_cfg[i].r_ctl_match = rq_cfg[i].r_ctl_match;
   3343		reg_fcfi->rq_cfg[i].type_mask = rq_cfg[i].type_mask;
   3344		reg_fcfi->rq_cfg[i].type_match = rq_cfg[i].type_match;
   3345	}
   3346
   3347	return 0;
   3348}
   3349
   3350int
   3351sli_cmd_reg_fcfi_mrq(struct sli4 *sli4, void *buf, u8 mode, u16 fcf_index,
   3352		     u8 rq_selection_policy, u8 mrq_bit_mask, u16 num_mrqs,
   3353		     struct sli4_cmd_rq_cfg *rq_cfg)
   3354{
   3355	struct sli4_cmd_reg_fcfi_mrq *reg_fcfi_mrq = buf;
   3356	u32 i;
   3357	u32 mrq_flags = 0;
   3358
   3359	memset(buf, 0, SLI4_BMBX_SIZE);
   3360
   3361	reg_fcfi_mrq->hdr.command = SLI4_MBX_CMD_REG_FCFI_MRQ;
   3362	if (mode == SLI4_CMD_REG_FCFI_SET_FCFI_MODE) {
   3363		reg_fcfi_mrq->fcf_index = cpu_to_le16(fcf_index);
   3364		goto done;
   3365	}
   3366
   3367	reg_fcfi_mrq->dw8_vlan = cpu_to_le32(SLI4_REGFCFI_MRQ_MODE);
   3368
   3369	for (i = 0; i < SLI4_CMD_REG_FCFI_NUM_RQ_CFG; i++) {
   3370		reg_fcfi_mrq->rq_cfg[i].r_ctl_mask = rq_cfg[i].r_ctl_mask;
   3371		reg_fcfi_mrq->rq_cfg[i].r_ctl_match = rq_cfg[i].r_ctl_match;
   3372		reg_fcfi_mrq->rq_cfg[i].type_mask = rq_cfg[i].type_mask;
   3373		reg_fcfi_mrq->rq_cfg[i].type_match = rq_cfg[i].type_match;
   3374
   3375		switch (i) {
   3376		case 3:
   3377			reg_fcfi_mrq->rqid3 = rq_cfg[i].rq_id;
   3378			break;
   3379		case 2:
   3380			reg_fcfi_mrq->rqid2 = rq_cfg[i].rq_id;
   3381			break;
   3382		case 1:
   3383			reg_fcfi_mrq->rqid1 = rq_cfg[i].rq_id;
   3384			break;
   3385		case 0:
   3386			reg_fcfi_mrq->rqid0 = rq_cfg[i].rq_id;
   3387			break;
   3388		}
   3389	}
   3390
   3391	mrq_flags = num_mrqs & SLI4_REGFCFI_MRQ_MASK_NUM_PAIRS;
   3392	mrq_flags |= (mrq_bit_mask << 8);
   3393	mrq_flags |= (rq_selection_policy << 12);
   3394	reg_fcfi_mrq->dw9_mrqflags = cpu_to_le32(mrq_flags);
   3395done:
   3396	return 0;
   3397}
   3398
   3399int
   3400sli_cmd_reg_rpi(struct sli4 *sli4, void *buf, u32 rpi, u32 vpi, u32 fc_id,
   3401		struct efc_dma *dma, u8 update, u8 enable_t10_pi)
   3402{
   3403	struct sli4_cmd_reg_rpi *reg_rpi = buf;
   3404	u32 rportid_flags = 0;
   3405
   3406	memset(buf, 0, SLI4_BMBX_SIZE);
   3407
   3408	reg_rpi->hdr.command = SLI4_MBX_CMD_REG_RPI;
   3409
   3410	reg_rpi->rpi = cpu_to_le16(rpi);
   3411
   3412	rportid_flags = fc_id & SLI4_REGRPI_REMOTE_N_PORTID;
   3413
   3414	if (update)
   3415		rportid_flags |= SLI4_REGRPI_UPD;
   3416	else
   3417		rportid_flags &= ~SLI4_REGRPI_UPD;
   3418
   3419	if (enable_t10_pi)
   3420		rportid_flags |= SLI4_REGRPI_ETOW;
   3421	else
   3422		rportid_flags &= ~SLI4_REGRPI_ETOW;
   3423
   3424	reg_rpi->dw2_rportid_flags = cpu_to_le32(rportid_flags);
   3425
   3426	reg_rpi->bde_64.bde_type_buflen =
   3427		cpu_to_le32((SLI4_BDE_TYPE_VAL(64)) |
   3428			    (SLI4_REG_RPI_BUF_LEN & SLI4_BDE_LEN_MASK));
   3429	reg_rpi->bde_64.u.data.low  =
   3430		cpu_to_le32(lower_32_bits(dma->phys));
   3431	reg_rpi->bde_64.u.data.high =
   3432		cpu_to_le32(upper_32_bits(dma->phys));
   3433
   3434	reg_rpi->vpi = cpu_to_le16(vpi);
   3435
   3436	return 0;
   3437}
   3438
   3439int
   3440sli_cmd_reg_vfi(struct sli4 *sli4, void *buf, size_t size,
   3441		u16 vfi, u16 fcfi, struct efc_dma dma,
   3442		u16 vpi, __be64 sli_wwpn, u32 fc_id)
   3443{
   3444	struct sli4_cmd_reg_vfi *reg_vfi = buf;
   3445
   3446	memset(buf, 0, SLI4_BMBX_SIZE);
   3447
   3448	reg_vfi->hdr.command = SLI4_MBX_CMD_REG_VFI;
   3449
   3450	reg_vfi->vfi = cpu_to_le16(vfi);
   3451
   3452	reg_vfi->fcfi = cpu_to_le16(fcfi);
   3453
   3454	reg_vfi->sparm.bde_type_buflen =
   3455		cpu_to_le32((SLI4_BDE_TYPE_VAL(64)) |
   3456			    (SLI4_REG_RPI_BUF_LEN & SLI4_BDE_LEN_MASK));
   3457	reg_vfi->sparm.u.data.low  =
   3458		cpu_to_le32(lower_32_bits(dma.phys));
   3459	reg_vfi->sparm.u.data.high =
   3460		cpu_to_le32(upper_32_bits(dma.phys));
   3461
   3462	reg_vfi->e_d_tov = cpu_to_le32(sli4->e_d_tov);
   3463	reg_vfi->r_a_tov = cpu_to_le32(sli4->r_a_tov);
   3464
   3465	reg_vfi->dw0w1_flags |= cpu_to_le16(SLI4_REGVFI_VP);
   3466	reg_vfi->vpi = cpu_to_le16(vpi);
   3467	memcpy(reg_vfi->wwpn, &sli_wwpn, sizeof(reg_vfi->wwpn));
   3468	reg_vfi->dw10_lportid_flags = cpu_to_le32(fc_id);
   3469
   3470	return 0;
   3471}
   3472
   3473int
   3474sli_cmd_reg_vpi(struct sli4 *sli4, void *buf, u32 fc_id, __be64 sli_wwpn,
   3475		u16 vpi, u16 vfi, bool update)
   3476{
   3477	struct sli4_cmd_reg_vpi *reg_vpi = buf;
   3478	u32 flags = 0;
   3479
   3480	memset(buf, 0, SLI4_BMBX_SIZE);
   3481
   3482	reg_vpi->hdr.command = SLI4_MBX_CMD_REG_VPI;
   3483
   3484	flags = (fc_id & SLI4_REGVPI_LOCAL_N_PORTID);
   3485	if (update)
   3486		flags |= SLI4_REGVPI_UPD;
   3487	else
   3488		flags &= ~SLI4_REGVPI_UPD;
   3489
   3490	reg_vpi->dw2_lportid_flags = cpu_to_le32(flags);
   3491	memcpy(reg_vpi->wwpn, &sli_wwpn, sizeof(reg_vpi->wwpn));
   3492	reg_vpi->vpi = cpu_to_le16(vpi);
   3493	reg_vpi->vfi = cpu_to_le16(vfi);
   3494
   3495	return 0;
   3496}
   3497
   3498static int
   3499sli_cmd_request_features(struct sli4 *sli4, void *buf, u32 features_mask,
   3500			 bool query)
   3501{
   3502	struct sli4_cmd_request_features *req_features = buf;
   3503
   3504	memset(buf, 0, SLI4_BMBX_SIZE);
   3505
   3506	req_features->hdr.command = SLI4_MBX_CMD_RQST_FEATURES;
   3507
   3508	if (query)
   3509		req_features->dw1_qry = cpu_to_le32(SLI4_REQFEAT_QRY);
   3510
   3511	req_features->cmd = cpu_to_le32(features_mask);
   3512
   3513	return 0;
   3514}
   3515
   3516int
   3517sli_cmd_unreg_fcfi(struct sli4 *sli4, void *buf, u16 indicator)
   3518{
   3519	struct sli4_cmd_unreg_fcfi *unreg_fcfi = buf;
   3520
   3521	memset(buf, 0, SLI4_BMBX_SIZE);
   3522
   3523	unreg_fcfi->hdr.command = SLI4_MBX_CMD_UNREG_FCFI;
   3524	unreg_fcfi->fcfi = cpu_to_le16(indicator);
   3525
   3526	return 0;
   3527}
   3528
   3529int
   3530sli_cmd_unreg_rpi(struct sli4 *sli4, void *buf, u16 indicator,
   3531		  enum sli4_resource which, u32 fc_id)
   3532{
   3533	struct sli4_cmd_unreg_rpi *unreg_rpi = buf;
   3534	u32 flags = 0;
   3535
   3536	memset(buf, 0, SLI4_BMBX_SIZE);
   3537
   3538	unreg_rpi->hdr.command = SLI4_MBX_CMD_UNREG_RPI;
   3539	switch (which) {
   3540	case SLI4_RSRC_RPI:
   3541		flags |= SLI4_UNREG_RPI_II_RPI;
   3542		if (fc_id == U32_MAX)
   3543			break;
   3544
   3545		flags |= SLI4_UNREG_RPI_DP;
   3546		unreg_rpi->dw2_dest_n_portid =
   3547			cpu_to_le32(fc_id & SLI4_UNREG_RPI_DEST_N_PORTID_MASK);
   3548		break;
   3549	case SLI4_RSRC_VPI:
   3550		flags |= SLI4_UNREG_RPI_II_VPI;
   3551		break;
   3552	case SLI4_RSRC_VFI:
   3553		flags |= SLI4_UNREG_RPI_II_VFI;
   3554		break;
   3555	case SLI4_RSRC_FCFI:
   3556		flags |= SLI4_UNREG_RPI_II_FCFI;
   3557		break;
   3558	default:
   3559		efc_log_info(sli4, "unknown type %#x\n", which);
   3560		return -EIO;
   3561	}
   3562
   3563	unreg_rpi->dw1w1_flags = cpu_to_le16(flags);
   3564	unreg_rpi->index = cpu_to_le16(indicator);
   3565
   3566	return 0;
   3567}
   3568
   3569int
   3570sli_cmd_unreg_vfi(struct sli4 *sli4, void *buf, u16 index, u32 which)
   3571{
   3572	struct sli4_cmd_unreg_vfi *unreg_vfi = buf;
   3573
   3574	memset(buf, 0, SLI4_BMBX_SIZE);
   3575
   3576	unreg_vfi->hdr.command = SLI4_MBX_CMD_UNREG_VFI;
   3577	switch (which) {
   3578	case SLI4_UNREG_TYPE_DOMAIN:
   3579		unreg_vfi->index = cpu_to_le16(index);
   3580		break;
   3581	case SLI4_UNREG_TYPE_FCF:
   3582		unreg_vfi->index = cpu_to_le16(index);
   3583		break;
   3584	case SLI4_UNREG_TYPE_ALL:
   3585		unreg_vfi->index = cpu_to_le16(U32_MAX);
   3586		break;
   3587	default:
   3588		return -EIO;
   3589	}
   3590
   3591	if (which != SLI4_UNREG_TYPE_DOMAIN)
   3592		unreg_vfi->dw2_flags = cpu_to_le16(SLI4_UNREG_VFI_II_FCFI);
   3593
   3594	return 0;
   3595}
   3596
   3597int
   3598sli_cmd_unreg_vpi(struct sli4 *sli4, void *buf, u16 indicator, u32 which)
   3599{
   3600	struct sli4_cmd_unreg_vpi *unreg_vpi = buf;
   3601	u32 flags = 0;
   3602
   3603	memset(buf, 0, SLI4_BMBX_SIZE);
   3604
   3605	unreg_vpi->hdr.command = SLI4_MBX_CMD_UNREG_VPI;
   3606	unreg_vpi->index = cpu_to_le16(indicator);
   3607	switch (which) {
   3608	case SLI4_UNREG_TYPE_PORT:
   3609		flags |= SLI4_UNREG_VPI_II_VPI;
   3610		break;
   3611	case SLI4_UNREG_TYPE_DOMAIN:
   3612		flags |= SLI4_UNREG_VPI_II_VFI;
   3613		break;
   3614	case SLI4_UNREG_TYPE_FCF:
   3615		flags |= SLI4_UNREG_VPI_II_FCFI;
   3616		break;
   3617	case SLI4_UNREG_TYPE_ALL:
   3618		/* override indicator */
   3619		unreg_vpi->index = cpu_to_le16(U32_MAX);
   3620		flags |= SLI4_UNREG_VPI_II_FCFI;
   3621		break;
   3622	default:
   3623		return -EIO;
   3624	}
   3625
   3626	unreg_vpi->dw2w0_flags = cpu_to_le16(flags);
   3627	return 0;
   3628}
   3629
   3630static int
   3631sli_cmd_common_modify_eq_delay(struct sli4 *sli4, void *buf,
   3632			       struct sli4_queue *q, int num_q, u32 shift,
   3633			       u32 delay_mult)
   3634{
   3635	struct sli4_rqst_cmn_modify_eq_delay *req = NULL;
   3636	int i;
   3637
   3638	req = sli_config_cmd_init(sli4, buf,
   3639			SLI4_CFG_PYLD_LENGTH(cmn_modify_eq_delay), NULL);
   3640	if (!req)
   3641		return -EIO;
   3642
   3643	sli_cmd_fill_hdr(&req->hdr, SLI4_CMN_MODIFY_EQ_DELAY,
   3644			 SLI4_SUBSYSTEM_COMMON, CMD_V0,
   3645			 SLI4_RQST_PYLD_LEN(cmn_modify_eq_delay));
   3646	req->num_eq = cpu_to_le32(num_q);
   3647
   3648	for (i = 0; i < num_q; i++) {
   3649		req->eq_delay_record[i].eq_id = cpu_to_le32(q[i].id);
   3650		req->eq_delay_record[i].phase = cpu_to_le32(shift);
   3651		req->eq_delay_record[i].delay_multiplier =
   3652			cpu_to_le32(delay_mult);
   3653	}
   3654
   3655	return 0;
   3656}
   3657
   3658void
   3659sli4_cmd_lowlevel_set_watchdog(struct sli4 *sli4, void *buf,
   3660			       size_t size, u16 timeout)
   3661{
   3662	struct sli4_rqst_lowlevel_set_watchdog *req = NULL;
   3663
   3664	req = sli_config_cmd_init(sli4, buf,
   3665			SLI4_CFG_PYLD_LENGTH(lowlevel_set_watchdog), NULL);
   3666	if (!req)
   3667		return;
   3668
   3669	sli_cmd_fill_hdr(&req->hdr, SLI4_OPC_LOWLEVEL_SET_WATCHDOG,
   3670			 SLI4_SUBSYSTEM_LOWLEVEL, CMD_V0,
   3671			 SLI4_RQST_PYLD_LEN(lowlevel_set_watchdog));
   3672	req->watchdog_timeout = cpu_to_le16(timeout);
   3673}
   3674
   3675static int
   3676sli_cmd_common_get_cntl_attributes(struct sli4 *sli4, void *buf,
   3677				   struct efc_dma *dma)
   3678{
   3679	struct sli4_rqst_hdr *hdr = NULL;
   3680
   3681	hdr = sli_config_cmd_init(sli4, buf, SLI4_RQST_CMDSZ(hdr), dma);
   3682	if (!hdr)
   3683		return -EIO;
   3684
   3685	hdr->opcode = SLI4_CMN_GET_CNTL_ATTRIBUTES;
   3686	hdr->subsystem = SLI4_SUBSYSTEM_COMMON;
   3687	hdr->request_length = cpu_to_le32(dma->size);
   3688
   3689	return 0;
   3690}
   3691
   3692static int
   3693sli_cmd_common_get_cntl_addl_attributes(struct sli4 *sli4, void *buf,
   3694					struct efc_dma *dma)
   3695{
   3696	struct sli4_rqst_hdr *hdr = NULL;
   3697
   3698	hdr = sli_config_cmd_init(sli4, buf, SLI4_RQST_CMDSZ(hdr), dma);
   3699	if (!hdr)
   3700		return -EIO;
   3701
   3702	hdr->opcode = SLI4_CMN_GET_CNTL_ADDL_ATTRS;
   3703	hdr->subsystem = SLI4_SUBSYSTEM_COMMON;
   3704	hdr->request_length = cpu_to_le32(dma->size);
   3705
   3706	return 0;
   3707}
   3708
   3709int
   3710sli_cmd_common_nop(struct sli4 *sli4, void *buf, uint64_t context)
   3711{
   3712	struct sli4_rqst_cmn_nop *nop = NULL;
   3713
   3714	nop = sli_config_cmd_init(sli4, buf, SLI4_CFG_PYLD_LENGTH(cmn_nop),
   3715				  NULL);
   3716	if (!nop)
   3717		return -EIO;
   3718
   3719	sli_cmd_fill_hdr(&nop->hdr, SLI4_CMN_NOP, SLI4_SUBSYSTEM_COMMON,
   3720			 CMD_V0, SLI4_RQST_PYLD_LEN(cmn_nop));
   3721
   3722	memcpy(&nop->context, &context, sizeof(context));
   3723
   3724	return 0;
   3725}
   3726
   3727int
   3728sli_cmd_common_get_resource_extent_info(struct sli4 *sli4, void *buf, u16 rtype)
   3729{
   3730	struct sli4_rqst_cmn_get_resource_extent_info *ext = NULL;
   3731
   3732	ext = sli_config_cmd_init(sli4, buf,
   3733			SLI4_RQST_CMDSZ(cmn_get_resource_extent_info), NULL);
   3734	if (!ext)
   3735		return -EIO;
   3736
   3737	sli_cmd_fill_hdr(&ext->hdr, SLI4_CMN_GET_RSC_EXTENT_INFO,
   3738			 SLI4_SUBSYSTEM_COMMON, CMD_V0,
   3739			 SLI4_RQST_PYLD_LEN(cmn_get_resource_extent_info));
   3740
   3741	ext->resource_type = cpu_to_le16(rtype);
   3742
   3743	return 0;
   3744}
   3745
   3746int
   3747sli_cmd_common_get_sli4_parameters(struct sli4 *sli4, void *buf)
   3748{
   3749	struct sli4_rqst_hdr *hdr = NULL;
   3750
   3751	hdr = sli_config_cmd_init(sli4, buf,
   3752			SLI4_CFG_PYLD_LENGTH(cmn_get_sli4_params), NULL);
   3753	if (!hdr)
   3754		return -EIO;
   3755
   3756	hdr->opcode = SLI4_CMN_GET_SLI4_PARAMS;
   3757	hdr->subsystem = SLI4_SUBSYSTEM_COMMON;
   3758	hdr->request_length = SLI4_RQST_PYLD_LEN(cmn_get_sli4_params);
   3759
   3760	return 0;
   3761}
   3762
   3763static int
   3764sli_cmd_common_get_port_name(struct sli4 *sli4, void *buf)
   3765{
   3766	struct sli4_rqst_cmn_get_port_name *pname;
   3767
   3768	pname = sli_config_cmd_init(sli4, buf,
   3769			SLI4_CFG_PYLD_LENGTH(cmn_get_port_name), NULL);
   3770	if (!pname)
   3771		return -EIO;
   3772
   3773	sli_cmd_fill_hdr(&pname->hdr, SLI4_CMN_GET_PORT_NAME,
   3774			 SLI4_SUBSYSTEM_COMMON, CMD_V1,
   3775			 SLI4_RQST_PYLD_LEN(cmn_get_port_name));
   3776
   3777	/* Set the port type value (ethernet=0, FC=1) for V1 commands */
   3778	pname->port_type = SLI4_PORT_TYPE_FC;
   3779
   3780	return 0;
   3781}
   3782
   3783int
   3784sli_cmd_common_write_object(struct sli4 *sli4, void *buf, u16 noc,
   3785			    u16 eof, u32 desired_write_length,
   3786			    u32 offset, char *obj_name,
   3787			    struct efc_dma *dma)
   3788{
   3789	struct sli4_rqst_cmn_write_object *wr_obj = NULL;
   3790	struct sli4_bde *bde;
   3791	u32 dwflags = 0;
   3792
   3793	wr_obj = sli_config_cmd_init(sli4, buf,
   3794			SLI4_RQST_CMDSZ(cmn_write_object) + sizeof(*bde), NULL);
   3795	if (!wr_obj)
   3796		return -EIO;
   3797
   3798	sli_cmd_fill_hdr(&wr_obj->hdr, SLI4_CMN_WRITE_OBJECT,
   3799		SLI4_SUBSYSTEM_COMMON, CMD_V0,
   3800		SLI4_RQST_PYLD_LEN_VAR(cmn_write_object, sizeof(*bde)));
   3801
   3802	if (noc)
   3803		dwflags |= SLI4_RQ_DES_WRITE_LEN_NOC;
   3804	if (eof)
   3805		dwflags |= SLI4_RQ_DES_WRITE_LEN_EOF;
   3806	dwflags |= (desired_write_length & SLI4_RQ_DES_WRITE_LEN);
   3807
   3808	wr_obj->desired_write_len_dword = cpu_to_le32(dwflags);
   3809
   3810	wr_obj->write_offset = cpu_to_le32(offset);
   3811	strncpy(wr_obj->object_name, obj_name, sizeof(wr_obj->object_name) - 1);
   3812	wr_obj->host_buffer_descriptor_count = cpu_to_le32(1);
   3813
   3814	bde = (struct sli4_bde *)wr_obj->host_buffer_descriptor;
   3815
   3816	/* Setup to transfer xfer_size bytes to device */
   3817	bde->bde_type_buflen =
   3818		cpu_to_le32((SLI4_BDE_TYPE_VAL(64)) |
   3819			    (desired_write_length & SLI4_BDE_LEN_MASK));
   3820	bde->u.data.low = cpu_to_le32(lower_32_bits(dma->phys));
   3821	bde->u.data.high = cpu_to_le32(upper_32_bits(dma->phys));
   3822
   3823	return 0;
   3824}
   3825
   3826int
   3827sli_cmd_common_delete_object(struct sli4 *sli4, void *buf, char *obj_name)
   3828{
   3829	struct sli4_rqst_cmn_delete_object *req = NULL;
   3830
   3831	req = sli_config_cmd_init(sli4, buf,
   3832				  SLI4_RQST_CMDSZ(cmn_delete_object), NULL);
   3833	if (!req)
   3834		return -EIO;
   3835
   3836	sli_cmd_fill_hdr(&req->hdr, SLI4_CMN_DELETE_OBJECT,
   3837			 SLI4_SUBSYSTEM_COMMON, CMD_V0,
   3838			 SLI4_RQST_PYLD_LEN(cmn_delete_object));
   3839
   3840	strncpy(req->object_name, obj_name, sizeof(req->object_name) - 1);
   3841	return 0;
   3842}
   3843
   3844int
   3845sli_cmd_common_read_object(struct sli4 *sli4, void *buf, u32 desired_read_len,
   3846			   u32 offset, char *obj_name, struct efc_dma *dma)
   3847{
   3848	struct sli4_rqst_cmn_read_object *rd_obj = NULL;
   3849	struct sli4_bde *bde;
   3850
   3851	rd_obj = sli_config_cmd_init(sli4, buf,
   3852			SLI4_RQST_CMDSZ(cmn_read_object) + sizeof(*bde), NULL);
   3853	if (!rd_obj)
   3854		return -EIO;
   3855
   3856	sli_cmd_fill_hdr(&rd_obj->hdr, SLI4_CMN_READ_OBJECT,
   3857		SLI4_SUBSYSTEM_COMMON, CMD_V0,
   3858		SLI4_RQST_PYLD_LEN_VAR(cmn_read_object, sizeof(*bde)));
   3859	rd_obj->desired_read_length_dword =
   3860		cpu_to_le32(desired_read_len & SLI4_REQ_DESIRE_READLEN);
   3861
   3862	rd_obj->read_offset = cpu_to_le32(offset);
   3863	strncpy(rd_obj->object_name, obj_name, sizeof(rd_obj->object_name) - 1);
   3864	rd_obj->host_buffer_descriptor_count = cpu_to_le32(1);
   3865
   3866	bde = (struct sli4_bde *)rd_obj->host_buffer_descriptor;
   3867
   3868	/* Setup to transfer xfer_size bytes to device */
   3869	bde->bde_type_buflen =
   3870		cpu_to_le32((SLI4_BDE_TYPE_VAL(64)) |
   3871			    (desired_read_len & SLI4_BDE_LEN_MASK));
   3872	if (dma) {
   3873		bde->u.data.low = cpu_to_le32(lower_32_bits(dma->phys));
   3874		bde->u.data.high = cpu_to_le32(upper_32_bits(dma->phys));
   3875	} else {
   3876		bde->u.data.low = 0;
   3877		bde->u.data.high = 0;
   3878	}
   3879
   3880	return 0;
   3881}
   3882
   3883int
   3884sli_cmd_dmtf_exec_clp_cmd(struct sli4 *sli4, void *buf, struct efc_dma *cmd,
   3885			  struct efc_dma *resp)
   3886{
   3887	struct sli4_rqst_dmtf_exec_clp_cmd *clp_cmd = NULL;
   3888
   3889	clp_cmd = sli_config_cmd_init(sli4, buf,
   3890				SLI4_RQST_CMDSZ(dmtf_exec_clp_cmd), NULL);
   3891	if (!clp_cmd)
   3892		return -EIO;
   3893
   3894	sli_cmd_fill_hdr(&clp_cmd->hdr, DMTF_EXEC_CLP_CMD, SLI4_SUBSYSTEM_DMTF,
   3895			 CMD_V0, SLI4_RQST_PYLD_LEN(dmtf_exec_clp_cmd));
   3896
   3897	clp_cmd->cmd_buf_length = cpu_to_le32(cmd->size);
   3898	clp_cmd->cmd_buf_addr_low =  cpu_to_le32(lower_32_bits(cmd->phys));
   3899	clp_cmd->cmd_buf_addr_high =  cpu_to_le32(upper_32_bits(cmd->phys));
   3900	clp_cmd->resp_buf_length = cpu_to_le32(resp->size);
   3901	clp_cmd->resp_buf_addr_low =  cpu_to_le32(lower_32_bits(resp->phys));
   3902	clp_cmd->resp_buf_addr_high =  cpu_to_le32(upper_32_bits(resp->phys));
   3903	return 0;
   3904}
   3905
   3906int
   3907sli_cmd_common_set_dump_location(struct sli4 *sli4, void *buf, bool query,
   3908				 bool is_buffer_list,
   3909				 struct efc_dma *buffer, u8 fdb)
   3910{
   3911	struct sli4_rqst_cmn_set_dump_location *set_dump_loc = NULL;
   3912	u32 buffer_length_flag = 0;
   3913
   3914	set_dump_loc = sli_config_cmd_init(sli4, buf,
   3915				SLI4_RQST_CMDSZ(cmn_set_dump_location), NULL);
   3916	if (!set_dump_loc)
   3917		return -EIO;
   3918
   3919	sli_cmd_fill_hdr(&set_dump_loc->hdr, SLI4_CMN_SET_DUMP_LOCATION,
   3920			 SLI4_SUBSYSTEM_COMMON, CMD_V0,
   3921			 SLI4_RQST_PYLD_LEN(cmn_set_dump_location));
   3922
   3923	if (is_buffer_list)
   3924		buffer_length_flag |= SLI4_CMN_SET_DUMP_BLP;
   3925
   3926	if (query)
   3927		buffer_length_flag |= SLI4_CMN_SET_DUMP_QRY;
   3928
   3929	if (fdb)
   3930		buffer_length_flag |= SLI4_CMN_SET_DUMP_FDB;
   3931
   3932	if (buffer) {
   3933		set_dump_loc->buf_addr_low =
   3934			cpu_to_le32(lower_32_bits(buffer->phys));
   3935		set_dump_loc->buf_addr_high =
   3936			cpu_to_le32(upper_32_bits(buffer->phys));
   3937
   3938		buffer_length_flag |=
   3939			buffer->len & SLI4_CMN_SET_DUMP_BUFFER_LEN;
   3940	} else {
   3941		set_dump_loc->buf_addr_low = 0;
   3942		set_dump_loc->buf_addr_high = 0;
   3943		set_dump_loc->buffer_length_dword = 0;
   3944	}
   3945	set_dump_loc->buffer_length_dword = cpu_to_le32(buffer_length_flag);
   3946	return 0;
   3947}
   3948
   3949int
   3950sli_cmd_common_set_features(struct sli4 *sli4, void *buf, u32 feature,
   3951			    u32 param_len, void *parameter)
   3952{
   3953	struct sli4_rqst_cmn_set_features *cmd = NULL;
   3954
   3955	cmd = sli_config_cmd_init(sli4, buf,
   3956				  SLI4_RQST_CMDSZ(cmn_set_features), NULL);
   3957	if (!cmd)
   3958		return -EIO;
   3959
   3960	sli_cmd_fill_hdr(&cmd->hdr, SLI4_CMN_SET_FEATURES,
   3961			 SLI4_SUBSYSTEM_COMMON, CMD_V0,
   3962			 SLI4_RQST_PYLD_LEN(cmn_set_features));
   3963
   3964	cmd->feature = cpu_to_le32(feature);
   3965	cmd->param_len = cpu_to_le32(param_len);
   3966	memcpy(cmd->params, parameter, param_len);
   3967
   3968	return 0;
   3969}
   3970
   3971int
   3972sli_cqe_mq(struct sli4 *sli4, void *buf)
   3973{
   3974	struct sli4_mcqe *mcqe = buf;
   3975	u32 dwflags = le32_to_cpu(mcqe->dw3_flags);
   3976	/*
   3977	 * Firmware can split mbx completions into two MCQEs: first with only
   3978	 * the "consumed" bit set and a second with the "complete" bit set.
   3979	 * Thus, ignore MCQE unless "complete" is set.
   3980	 */
   3981	if (!(dwflags & SLI4_MCQE_COMPLETED))
   3982		return SLI4_MCQE_STATUS_NOT_COMPLETED;
   3983
   3984	if (le16_to_cpu(mcqe->completion_status)) {
   3985		efc_log_info(sli4, "status(st=%#x ext=%#x con=%d cmp=%d ae=%d val=%d)\n",
   3986			     le16_to_cpu(mcqe->completion_status),
   3987			     le16_to_cpu(mcqe->extended_status),
   3988			     (dwflags & SLI4_MCQE_CONSUMED),
   3989			     (dwflags & SLI4_MCQE_COMPLETED),
   3990			     (dwflags & SLI4_MCQE_AE),
   3991			     (dwflags & SLI4_MCQE_VALID));
   3992	}
   3993
   3994	return le16_to_cpu(mcqe->completion_status);
   3995}
   3996
   3997int
   3998sli_cqe_async(struct sli4 *sli4, void *buf)
   3999{
   4000	struct sli4_acqe *acqe = buf;
   4001	int rc = -EIO;
   4002
   4003	if (!buf) {
   4004		efc_log_err(sli4, "bad parameter sli4=%p buf=%p\n", sli4, buf);
   4005		return -EIO;
   4006	}
   4007
   4008	switch (acqe->event_code) {
   4009	case SLI4_ACQE_EVENT_CODE_LINK_STATE:
   4010		efc_log_info(sli4, "Unsupported by FC link, evt code:%#x\n",
   4011			     acqe->event_code);
   4012		break;
   4013	case SLI4_ACQE_EVENT_CODE_GRP_5:
   4014		efc_log_info(sli4, "ACQE GRP5\n");
   4015		break;
   4016	case SLI4_ACQE_EVENT_CODE_SLI_PORT_EVENT:
   4017		efc_log_info(sli4, "ACQE SLI Port, type=0x%x, data1,2=0x%08x,0x%08x\n",
   4018			     acqe->event_type,
   4019			     le32_to_cpu(acqe->event_data[0]),
   4020			     le32_to_cpu(acqe->event_data[1]));
   4021		break;
   4022	case SLI4_ACQE_EVENT_CODE_FC_LINK_EVENT:
   4023		rc = sli_fc_process_link_attention(sli4, buf);
   4024		break;
   4025	default:
   4026		efc_log_info(sli4, "ACQE unknown=%#x\n", acqe->event_code);
   4027	}
   4028
   4029	return rc;
   4030}
   4031
   4032bool
   4033sli_fw_ready(struct sli4 *sli4)
   4034{
   4035	u32 val;
   4036
   4037	/* Determine if the chip FW is in a ready state */
   4038	val = sli_reg_read_status(sli4);
   4039	return (val & SLI4_PORT_STATUS_RDY) ? 1 : 0;
   4040}
   4041
   4042static bool
   4043sli_wait_for_fw_ready(struct sli4 *sli4, u32 timeout_ms)
   4044{
   4045	unsigned long end;
   4046
   4047	end = jiffies + msecs_to_jiffies(timeout_ms);
   4048
   4049	do {
   4050		if (sli_fw_ready(sli4))
   4051			return true;
   4052
   4053		usleep_range(1000, 2000);
   4054	} while (time_before(jiffies, end));
   4055
   4056	return false;
   4057}
   4058
   4059static bool
   4060sli_sliport_reset(struct sli4 *sli4)
   4061{
   4062	bool rc;
   4063	u32 val;
   4064
   4065	val = SLI4_PORT_CTRL_IP;
   4066	/* Initialize port, endian */
   4067	writel(val, (sli4->reg[0] + SLI4_PORT_CTRL_REG));
   4068
   4069	rc = sli_wait_for_fw_ready(sli4, SLI4_FW_READY_TIMEOUT_MSEC);
   4070	if (!rc)
   4071		efc_log_crit(sli4, "port failed to become ready after initialization\n");
   4072
   4073	return rc;
   4074}
   4075
   4076static bool
   4077sli_fw_init(struct sli4 *sli4)
   4078{
   4079	/*
   4080	 * Is firmware ready for operation?
   4081	 */
   4082	if (!sli_wait_for_fw_ready(sli4, SLI4_FW_READY_TIMEOUT_MSEC)) {
   4083		efc_log_crit(sli4, "FW status is NOT ready\n");
   4084		return false;
   4085	}
   4086
   4087	/*
   4088	 * Reset port to a known state
   4089	 */
   4090	return sli_sliport_reset(sli4);
   4091}
   4092
   4093static int
   4094sli_request_features(struct sli4 *sli4, u32 *features, bool query)
   4095{
   4096	struct sli4_cmd_request_features *req_features = sli4->bmbx.virt;
   4097
   4098	if (sli_cmd_request_features(sli4, sli4->bmbx.virt, *features, query)) {
   4099		efc_log_err(sli4, "bad REQUEST_FEATURES write\n");
   4100		return -EIO;
   4101	}
   4102
   4103	if (sli_bmbx_command(sli4)) {
   4104		efc_log_crit(sli4, "bootstrap mailbox write fail\n");
   4105		return -EIO;
   4106	}
   4107
   4108	if (le16_to_cpu(req_features->hdr.status)) {
   4109		efc_log_err(sli4, "REQUEST_FEATURES bad status %#x\n",
   4110			    le16_to_cpu(req_features->hdr.status));
   4111		return -EIO;
   4112	}
   4113
   4114	*features = le32_to_cpu(req_features->resp);
   4115	return 0;
   4116}
   4117
   4118void
   4119sli_calc_max_qentries(struct sli4 *sli4)
   4120{
   4121	enum sli4_qtype q;
   4122	u32 qentries;
   4123
   4124	for (q = SLI4_QTYPE_EQ; q < SLI4_QTYPE_MAX; q++) {
   4125		sli4->qinfo.max_qentries[q] =
   4126			sli_convert_mask_to_count(sli4->qinfo.count_method[q],
   4127						  sli4->qinfo.count_mask[q]);
   4128	}
   4129
   4130	/* single, contiguous DMA allocations will be called for each queue
   4131	 * of size (max_qentries * queue entry size); since these can be large,
   4132	 * check against the OS max DMA allocation size
   4133	 */
   4134	for (q = SLI4_QTYPE_EQ; q < SLI4_QTYPE_MAX; q++) {
   4135		qentries = sli4->qinfo.max_qentries[q];
   4136
   4137		efc_log_info(sli4, "[%s]: max_qentries from %d to %d\n",
   4138			     SLI4_QNAME[q],
   4139			     sli4->qinfo.max_qentries[q], qentries);
   4140		sli4->qinfo.max_qentries[q] = qentries;
   4141	}
   4142}
   4143
   4144static int
   4145sli_get_read_config(struct sli4 *sli4)
   4146{
   4147	struct sli4_rsp_read_config *conf = sli4->bmbx.virt;
   4148	u32 i, total;
   4149	u32 *base;
   4150
   4151	if (sli_cmd_read_config(sli4, sli4->bmbx.virt)) {
   4152		efc_log_err(sli4, "bad READ_CONFIG write\n");
   4153		return -EIO;
   4154	}
   4155
   4156	if (sli_bmbx_command(sli4)) {
   4157		efc_log_crit(sli4, "bootstrap mailbox fail (READ_CONFIG)\n");
   4158		return -EIO;
   4159	}
   4160
   4161	if (le16_to_cpu(conf->hdr.status)) {
   4162		efc_log_err(sli4, "READ_CONFIG bad status %#x\n",
   4163			    le16_to_cpu(conf->hdr.status));
   4164		return -EIO;
   4165	}
   4166
   4167	sli4->params.has_extents =
   4168	  le32_to_cpu(conf->ext_dword) & SLI4_READ_CFG_RESP_RESOURCE_EXT;
   4169	if (sli4->params.has_extents) {
   4170		efc_log_err(sli4, "extents not supported\n");
   4171		return -EIO;
   4172	}
   4173
   4174	base = sli4->ext[0].base;
   4175	if (!base) {
   4176		int size = SLI4_RSRC_MAX * sizeof(u32);
   4177
   4178		base = kzalloc(size, GFP_KERNEL);
   4179		if (!base)
   4180			return -EIO;
   4181	}
   4182
   4183	for (i = 0; i < SLI4_RSRC_MAX; i++) {
   4184		sli4->ext[i].number = 1;
   4185		sli4->ext[i].n_alloc = 0;
   4186		sli4->ext[i].base = &base[i];
   4187	}
   4188
   4189	sli4->ext[SLI4_RSRC_VFI].base[0] = le16_to_cpu(conf->vfi_base);
   4190	sli4->ext[SLI4_RSRC_VFI].size = le16_to_cpu(conf->vfi_count);
   4191
   4192	sli4->ext[SLI4_RSRC_VPI].base[0] = le16_to_cpu(conf->vpi_base);
   4193	sli4->ext[SLI4_RSRC_VPI].size = le16_to_cpu(conf->vpi_count);
   4194
   4195	sli4->ext[SLI4_RSRC_RPI].base[0] = le16_to_cpu(conf->rpi_base);
   4196	sli4->ext[SLI4_RSRC_RPI].size = le16_to_cpu(conf->rpi_count);
   4197
   4198	sli4->ext[SLI4_RSRC_XRI].base[0] = le16_to_cpu(conf->xri_base);
   4199	sli4->ext[SLI4_RSRC_XRI].size = le16_to_cpu(conf->xri_count);
   4200
   4201	sli4->ext[SLI4_RSRC_FCFI].base[0] = 0;
   4202	sli4->ext[SLI4_RSRC_FCFI].size = le16_to_cpu(conf->fcfi_count);
   4203
   4204	for (i = 0; i < SLI4_RSRC_MAX; i++) {
   4205		total = sli4->ext[i].number * sli4->ext[i].size;
   4206		sli4->ext[i].use_map = bitmap_zalloc(total, GFP_KERNEL);
   4207		if (!sli4->ext[i].use_map) {
   4208			efc_log_err(sli4, "bitmap memory allocation failed %d\n",
   4209				    i);
   4210			return -EIO;
   4211		}
   4212		sli4->ext[i].map_size = total;
   4213	}
   4214
   4215	sli4->topology = (le32_to_cpu(conf->topology_dword) &
   4216			  SLI4_READ_CFG_RESP_TOPOLOGY) >> 24;
   4217	switch (sli4->topology) {
   4218	case SLI4_READ_CFG_TOPO_FC:
   4219		efc_log_info(sli4, "FC (unknown)\n");
   4220		break;
   4221	case SLI4_READ_CFG_TOPO_NON_FC_AL:
   4222		efc_log_info(sli4, "FC (direct attach)\n");
   4223		break;
   4224	case SLI4_READ_CFG_TOPO_FC_AL:
   4225		efc_log_info(sli4, "FC (arbitrated loop)\n");
   4226		break;
   4227	default:
   4228		efc_log_info(sli4, "bad topology %#x\n", sli4->topology);
   4229	}
   4230
   4231	sli4->e_d_tov = le16_to_cpu(conf->e_d_tov);
   4232	sli4->r_a_tov = le16_to_cpu(conf->r_a_tov);
   4233
   4234	sli4->link_module_type = le16_to_cpu(conf->lmt);
   4235
   4236	sli4->qinfo.max_qcount[SLI4_QTYPE_EQ] =	le16_to_cpu(conf->eq_count);
   4237	sli4->qinfo.max_qcount[SLI4_QTYPE_CQ] =	le16_to_cpu(conf->cq_count);
   4238	sli4->qinfo.max_qcount[SLI4_QTYPE_WQ] =	le16_to_cpu(conf->wq_count);
   4239	sli4->qinfo.max_qcount[SLI4_QTYPE_RQ] =	le16_to_cpu(conf->rq_count);
   4240
   4241	/*
   4242	 * READ_CONFIG doesn't give the max number of MQ. Applications
   4243	 * will typically want 1, but we may need another at some future
   4244	 * date. Dummy up a "max" MQ count here.
   4245	 */
   4246	sli4->qinfo.max_qcount[SLI4_QTYPE_MQ] = SLI4_USER_MQ_COUNT;
   4247	return 0;
   4248}
   4249
   4250static int
   4251sli_get_sli4_parameters(struct sli4 *sli4)
   4252{
   4253	struct sli4_rsp_cmn_get_sli4_params *parms;
   4254	u32 dw_loopback;
   4255	u32 dw_eq_pg_cnt;
   4256	u32 dw_cq_pg_cnt;
   4257	u32 dw_mq_pg_cnt;
   4258	u32 dw_wq_pg_cnt;
   4259	u32 dw_rq_pg_cnt;
   4260	u32 dw_sgl_pg_cnt;
   4261
   4262	if (sli_cmd_common_get_sli4_parameters(sli4, sli4->bmbx.virt))
   4263		return -EIO;
   4264
   4265	parms = (struct sli4_rsp_cmn_get_sli4_params *)
   4266		 (((u8 *)sli4->bmbx.virt) +
   4267		  offsetof(struct sli4_cmd_sli_config, payload.embed));
   4268
   4269	if (sli_bmbx_command(sli4)) {
   4270		efc_log_crit(sli4, "bootstrap mailbox write fail\n");
   4271		return -EIO;
   4272	}
   4273
   4274	if (parms->hdr.status) {
   4275		efc_log_err(sli4, "COMMON_GET_SLI4_PARAMETERS bad status %#x",
   4276			    parms->hdr.status);
   4277		efc_log_err(sli4, "additional status %#x\n",
   4278			    parms->hdr.additional_status);
   4279		return -EIO;
   4280	}
   4281
   4282	dw_loopback = le32_to_cpu(parms->dw16_loopback_scope);
   4283	dw_eq_pg_cnt = le32_to_cpu(parms->dw6_eq_page_cnt);
   4284	dw_cq_pg_cnt = le32_to_cpu(parms->dw8_cq_page_cnt);
   4285	dw_mq_pg_cnt = le32_to_cpu(parms->dw10_mq_page_cnt);
   4286	dw_wq_pg_cnt = le32_to_cpu(parms->dw12_wq_page_cnt);
   4287	dw_rq_pg_cnt = le32_to_cpu(parms->dw14_rq_page_cnt);
   4288
   4289	sli4->params.auto_reg =	(dw_loopback & SLI4_PARAM_AREG);
   4290	sli4->params.auto_xfer_rdy = (dw_loopback & SLI4_PARAM_AGXF);
   4291	sli4->params.hdr_template_req =	(dw_loopback & SLI4_PARAM_HDRR);
   4292	sli4->params.t10_dif_inline_capable = (dw_loopback & SLI4_PARAM_TIMM);
   4293	sli4->params.t10_dif_separate_capable =	(dw_loopback & SLI4_PARAM_TSMM);
   4294
   4295	sli4->params.mq_create_version = GET_Q_CREATE_VERSION(dw_mq_pg_cnt);
   4296	sli4->params.cq_create_version = GET_Q_CREATE_VERSION(dw_cq_pg_cnt);
   4297
   4298	sli4->rq_min_buf_size =	le16_to_cpu(parms->min_rq_buffer_size);
   4299	sli4->rq_max_buf_size = le32_to_cpu(parms->max_rq_buffer_size);
   4300
   4301	sli4->qinfo.qpage_count[SLI4_QTYPE_EQ] =
   4302		(dw_eq_pg_cnt & SLI4_PARAM_EQ_PAGE_CNT_MASK);
   4303	sli4->qinfo.qpage_count[SLI4_QTYPE_CQ] =
   4304		(dw_cq_pg_cnt & SLI4_PARAM_CQ_PAGE_CNT_MASK);
   4305	sli4->qinfo.qpage_count[SLI4_QTYPE_MQ] =
   4306		(dw_mq_pg_cnt & SLI4_PARAM_MQ_PAGE_CNT_MASK);
   4307	sli4->qinfo.qpage_count[SLI4_QTYPE_WQ] =
   4308		(dw_wq_pg_cnt & SLI4_PARAM_WQ_PAGE_CNT_MASK);
   4309	sli4->qinfo.qpage_count[SLI4_QTYPE_RQ] =
   4310		(dw_rq_pg_cnt & SLI4_PARAM_RQ_PAGE_CNT_MASK);
   4311
   4312	/* save count methods and masks for each queue type */
   4313
   4314	sli4->qinfo.count_mask[SLI4_QTYPE_EQ] =
   4315			le16_to_cpu(parms->eqe_count_mask);
   4316	sli4->qinfo.count_method[SLI4_QTYPE_EQ] =
   4317			GET_Q_CNT_METHOD(dw_eq_pg_cnt);
   4318
   4319	sli4->qinfo.count_mask[SLI4_QTYPE_CQ] =
   4320			le16_to_cpu(parms->cqe_count_mask);
   4321	sli4->qinfo.count_method[SLI4_QTYPE_CQ] =
   4322			GET_Q_CNT_METHOD(dw_cq_pg_cnt);
   4323
   4324	sli4->qinfo.count_mask[SLI4_QTYPE_MQ] =
   4325			le16_to_cpu(parms->mqe_count_mask);
   4326	sli4->qinfo.count_method[SLI4_QTYPE_MQ] =
   4327			GET_Q_CNT_METHOD(dw_mq_pg_cnt);
   4328
   4329	sli4->qinfo.count_mask[SLI4_QTYPE_WQ] =
   4330			le16_to_cpu(parms->wqe_count_mask);
   4331	sli4->qinfo.count_method[SLI4_QTYPE_WQ] =
   4332			GET_Q_CNT_METHOD(dw_wq_pg_cnt);
   4333
   4334	sli4->qinfo.count_mask[SLI4_QTYPE_RQ] =
   4335			le16_to_cpu(parms->rqe_count_mask);
   4336	sli4->qinfo.count_method[SLI4_QTYPE_RQ] =
   4337			GET_Q_CNT_METHOD(dw_rq_pg_cnt);
   4338
   4339	/* now calculate max queue entries */
   4340	sli_calc_max_qentries(sli4);
   4341
   4342	dw_sgl_pg_cnt = le32_to_cpu(parms->dw18_sgl_page_cnt);
   4343
   4344	/* max # of pages */
   4345	sli4->max_sgl_pages = (dw_sgl_pg_cnt & SLI4_PARAM_SGL_PAGE_CNT_MASK);
   4346
   4347	/* bit map of available sizes */
   4348	sli4->sgl_page_sizes = (dw_sgl_pg_cnt &
   4349				SLI4_PARAM_SGL_PAGE_SZS_MASK) >> 8;
   4350	/* ignore HLM here. Use value from REQUEST_FEATURES */
   4351	sli4->sge_supported_length = le32_to_cpu(parms->sge_supported_length);
   4352	sli4->params.sgl_pre_reg_required = (dw_loopback & SLI4_PARAM_SGLR);
   4353	/* default to using pre-registered SGL's */
   4354	sli4->params.sgl_pre_registered = true;
   4355
   4356	sli4->params.perf_hint = dw_loopback & SLI4_PARAM_PHON;
   4357	sli4->params.perf_wq_id_association = (dw_loopback & SLI4_PARAM_PHWQ);
   4358
   4359	sli4->rq_batch = (le16_to_cpu(parms->dw15w1_rq_db_window) &
   4360			  SLI4_PARAM_RQ_DB_WINDOW_MASK) >> 12;
   4361
   4362	/* Use the highest available WQE size. */
   4363	if (((dw_wq_pg_cnt & SLI4_PARAM_WQE_SZS_MASK) >> 8) &
   4364	    SLI4_128BYTE_WQE_SUPPORT)
   4365		sli4->wqe_size = SLI4_WQE_EXT_BYTES;
   4366	else
   4367		sli4->wqe_size = SLI4_WQE_BYTES;
   4368
   4369	return 0;
   4370}
   4371
   4372static int
   4373sli_get_ctrl_attributes(struct sli4 *sli4)
   4374{
   4375	struct sli4_rsp_cmn_get_cntl_attributes *attr;
   4376	struct sli4_rsp_cmn_get_cntl_addl_attributes *add_attr;
   4377	struct efc_dma data;
   4378	u32 psize;
   4379
   4380	/*
   4381	 * Issue COMMON_GET_CNTL_ATTRIBUTES to get port_number. Temporarily
   4382	 * uses VPD DMA buffer as the response won't fit in the embedded
   4383	 * buffer.
   4384	 */
   4385	memset(sli4->vpd_data.virt, 0, sli4->vpd_data.size);
   4386	if (sli_cmd_common_get_cntl_attributes(sli4, sli4->bmbx.virt,
   4387					       &sli4->vpd_data)) {
   4388		efc_log_err(sli4, "bad COMMON_GET_CNTL_ATTRIBUTES write\n");
   4389		return -EIO;
   4390	}
   4391
   4392	attr =	sli4->vpd_data.virt;
   4393
   4394	if (sli_bmbx_command(sli4)) {
   4395		efc_log_crit(sli4, "bootstrap mailbox write fail\n");
   4396		return -EIO;
   4397	}
   4398
   4399	if (attr->hdr.status) {
   4400		efc_log_err(sli4, "COMMON_GET_CNTL_ATTRIBUTES bad status %#x",
   4401			    attr->hdr.status);
   4402		efc_log_err(sli4, "additional status %#x\n",
   4403			    attr->hdr.additional_status);
   4404		return -EIO;
   4405	}
   4406
   4407	sli4->port_number = attr->port_num_type_flags & SLI4_CNTL_ATTR_PORTNUM;
   4408
   4409	memcpy(sli4->bios_version_string, attr->bios_version_str,
   4410	       sizeof(sli4->bios_version_string));
   4411
   4412	/* get additional attributes */
   4413	psize = sizeof(struct sli4_rsp_cmn_get_cntl_addl_attributes);
   4414	data.size = psize;
   4415	data.virt = dma_alloc_coherent(&sli4->pci->dev, data.size,
   4416				       &data.phys, GFP_KERNEL);
   4417	if (!data.virt) {
   4418		memset(&data, 0, sizeof(struct efc_dma));
   4419		efc_log_err(sli4, "Failed to allocate memory for GET_CNTL_ADDL_ATTR\n");
   4420		return -EIO;
   4421	}
   4422
   4423	if (sli_cmd_common_get_cntl_addl_attributes(sli4, sli4->bmbx.virt,
   4424						    &data)) {
   4425		efc_log_err(sli4, "bad GET_CNTL_ADDL_ATTR write\n");
   4426		dma_free_coherent(&sli4->pci->dev, data.size,
   4427				  data.virt, data.phys);
   4428		return -EIO;
   4429	}
   4430
   4431	if (sli_bmbx_command(sli4)) {
   4432		efc_log_crit(sli4, "mailbox fail (GET_CNTL_ADDL_ATTR)\n");
   4433		dma_free_coherent(&sli4->pci->dev, data.size,
   4434				  data.virt, data.phys);
   4435		return -EIO;
   4436	}
   4437
   4438	add_attr = data.virt;
   4439	if (add_attr->hdr.status) {
   4440		efc_log_err(sli4, "GET_CNTL_ADDL_ATTR bad status %#x\n",
   4441			    add_attr->hdr.status);
   4442		dma_free_coherent(&sli4->pci->dev, data.size,
   4443				  data.virt, data.phys);
   4444		return -EIO;
   4445	}
   4446
   4447	memcpy(sli4->ipl_name, add_attr->ipl_file_name, sizeof(sli4->ipl_name));
   4448
   4449	efc_log_info(sli4, "IPL:%s\n", (char *)sli4->ipl_name);
   4450
   4451	dma_free_coherent(&sli4->pci->dev, data.size, data.virt,
   4452			  data.phys);
   4453	memset(&data, 0, sizeof(struct efc_dma));
   4454	return 0;
   4455}
   4456
   4457static int
   4458sli_get_fw_rev(struct sli4 *sli4)
   4459{
   4460	struct sli4_cmd_read_rev	*read_rev = sli4->bmbx.virt;
   4461
   4462	if (sli_cmd_read_rev(sli4, sli4->bmbx.virt, &sli4->vpd_data))
   4463		return -EIO;
   4464
   4465	if (sli_bmbx_command(sli4)) {
   4466		efc_log_crit(sli4, "bootstrap mailbox write fail (READ_REV)\n");
   4467		return -EIO;
   4468	}
   4469
   4470	if (le16_to_cpu(read_rev->hdr.status)) {
   4471		efc_log_err(sli4, "READ_REV bad status %#x\n",
   4472			    le16_to_cpu(read_rev->hdr.status));
   4473		return -EIO;
   4474	}
   4475
   4476	sli4->fw_rev[0] = le32_to_cpu(read_rev->first_fw_id);
   4477	memcpy(sli4->fw_name[0], read_rev->first_fw_name,
   4478	       sizeof(sli4->fw_name[0]));
   4479
   4480	sli4->fw_rev[1] = le32_to_cpu(read_rev->second_fw_id);
   4481	memcpy(sli4->fw_name[1], read_rev->second_fw_name,
   4482	       sizeof(sli4->fw_name[1]));
   4483
   4484	sli4->hw_rev[0] = le32_to_cpu(read_rev->first_hw_rev);
   4485	sli4->hw_rev[1] = le32_to_cpu(read_rev->second_hw_rev);
   4486	sli4->hw_rev[2] = le32_to_cpu(read_rev->third_hw_rev);
   4487
   4488	efc_log_info(sli4, "FW1:%s (%08x) / FW2:%s (%08x)\n",
   4489		     read_rev->first_fw_name, le32_to_cpu(read_rev->first_fw_id),
   4490		     read_rev->second_fw_name, le32_to_cpu(read_rev->second_fw_id));
   4491
   4492	efc_log_info(sli4, "HW1: %08x / HW2: %08x\n",
   4493		     le32_to_cpu(read_rev->first_hw_rev),
   4494		     le32_to_cpu(read_rev->second_hw_rev));
   4495
   4496	/* Check that all VPD data was returned */
   4497	if (le32_to_cpu(read_rev->returned_vpd_length) !=
   4498	    le32_to_cpu(read_rev->actual_vpd_length)) {
   4499		efc_log_info(sli4, "VPD length: avail=%d return=%d actual=%d\n",
   4500			     le32_to_cpu(read_rev->available_length_dword) &
   4501				    SLI4_READ_REV_AVAILABLE_LENGTH,
   4502			     le32_to_cpu(read_rev->returned_vpd_length),
   4503			     le32_to_cpu(read_rev->actual_vpd_length));
   4504	}
   4505	sli4->vpd_length = le32_to_cpu(read_rev->returned_vpd_length);
   4506	return 0;
   4507}
   4508
   4509static int
   4510sli_get_config(struct sli4 *sli4)
   4511{
   4512	struct sli4_rsp_cmn_get_port_name *port_name;
   4513	struct sli4_cmd_read_nvparms *read_nvparms;
   4514
   4515	/*
   4516	 * Read the device configuration
   4517	 */
   4518	if (sli_get_read_config(sli4))
   4519		return -EIO;
   4520
   4521	if (sli_get_sli4_parameters(sli4))
   4522		return -EIO;
   4523
   4524	if (sli_get_ctrl_attributes(sli4))
   4525		return -EIO;
   4526
   4527	if (sli_cmd_common_get_port_name(sli4, sli4->bmbx.virt))
   4528		return -EIO;
   4529
   4530	port_name = (struct sli4_rsp_cmn_get_port_name *)
   4531		    (((u8 *)sli4->bmbx.virt) +
   4532		    offsetof(struct sli4_cmd_sli_config, payload.embed));
   4533
   4534	if (sli_bmbx_command(sli4)) {
   4535		efc_log_crit(sli4, "bootstrap mailbox fail (GET_PORT_NAME)\n");
   4536		return -EIO;
   4537	}
   4538
   4539	sli4->port_name[0] = port_name->port_name[sli4->port_number];
   4540	sli4->port_name[1] = '\0';
   4541
   4542	if (sli_get_fw_rev(sli4))
   4543		return -EIO;
   4544
   4545	if (sli_cmd_read_nvparms(sli4, sli4->bmbx.virt)) {
   4546		efc_log_err(sli4, "bad READ_NVPARMS write\n");
   4547		return -EIO;
   4548	}
   4549
   4550	if (sli_bmbx_command(sli4)) {
   4551		efc_log_crit(sli4, "bootstrap mailbox fail (READ_NVPARMS)\n");
   4552		return -EIO;
   4553	}
   4554
   4555	read_nvparms = sli4->bmbx.virt;
   4556	if (le16_to_cpu(read_nvparms->hdr.status)) {
   4557		efc_log_err(sli4, "READ_NVPARMS bad status %#x\n",
   4558			    le16_to_cpu(read_nvparms->hdr.status));
   4559		return -EIO;
   4560	}
   4561
   4562	memcpy(sli4->wwpn, read_nvparms->wwpn, sizeof(sli4->wwpn));
   4563	memcpy(sli4->wwnn, read_nvparms->wwnn, sizeof(sli4->wwnn));
   4564
   4565	efc_log_info(sli4, "WWPN %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x\n",
   4566		     sli4->wwpn[0], sli4->wwpn[1], sli4->wwpn[2], sli4->wwpn[3],
   4567		     sli4->wwpn[4], sli4->wwpn[5], sli4->wwpn[6], sli4->wwpn[7]);
   4568	efc_log_info(sli4, "WWNN %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x\n",
   4569		     sli4->wwnn[0], sli4->wwnn[1], sli4->wwnn[2], sli4->wwnn[3],
   4570		     sli4->wwnn[4], sli4->wwnn[5], sli4->wwnn[6], sli4->wwnn[7]);
   4571
   4572	return 0;
   4573}
   4574
   4575int
   4576sli_setup(struct sli4 *sli4, void *os, struct pci_dev  *pdev,
   4577	  void __iomem *reg[])
   4578{
   4579	u32 intf = U32_MAX;
   4580	u32 pci_class_rev = 0;
   4581	u32 rev_id = 0;
   4582	u32 family = 0;
   4583	u32 asic_id = 0;
   4584	u32 i;
   4585	struct sli4_asic_entry_t *asic;
   4586
   4587	memset(sli4, 0, sizeof(struct sli4));
   4588
   4589	sli4->os = os;
   4590	sli4->pci = pdev;
   4591
   4592	for (i = 0; i < 6; i++)
   4593		sli4->reg[i] = reg[i];
   4594	/*
   4595	 * Read the SLI_INTF register to discover the register layout
   4596	 * and other capability information
   4597	 */
   4598	if (pci_read_config_dword(pdev, SLI4_INTF_REG, &intf))
   4599		return -EIO;
   4600
   4601	if ((intf & SLI4_INTF_VALID_MASK) != (u32)SLI4_INTF_VALID_VALUE) {
   4602		efc_log_err(sli4, "SLI_INTF is not valid\n");
   4603		return -EIO;
   4604	}
   4605
   4606	/* driver only support SLI-4 */
   4607	if ((intf & SLI4_INTF_REV_MASK) != SLI4_INTF_REV_S4) {
   4608		efc_log_err(sli4, "Unsupported SLI revision (intf=%#x)\n", intf);
   4609		return -EIO;
   4610	}
   4611
   4612	sli4->sli_family = intf & SLI4_INTF_FAMILY_MASK;
   4613
   4614	sli4->if_type = intf & SLI4_INTF_IF_TYPE_MASK;
   4615	efc_log_info(sli4, "status=%#x error1=%#x error2=%#x\n",
   4616		     sli_reg_read_status(sli4),
   4617		     sli_reg_read_err1(sli4),
   4618		     sli_reg_read_err2(sli4));
   4619
   4620	/*
   4621	 * set the ASIC type and revision
   4622	 */
   4623	if (pci_read_config_dword(pdev, PCI_CLASS_REVISION, &pci_class_rev))
   4624		return -EIO;
   4625
   4626	rev_id = pci_class_rev & 0xff;
   4627	family = sli4->sli_family;
   4628	if (family == SLI4_FAMILY_CHECK_ASIC_TYPE) {
   4629		if (!pci_read_config_dword(pdev, SLI4_ASIC_ID_REG, &asic_id))
   4630			family = asic_id & SLI4_ASIC_GEN_MASK;
   4631	}
   4632
   4633	for (i = 0, asic = sli4_asic_table; i < ARRAY_SIZE(sli4_asic_table);
   4634	     i++, asic++) {
   4635		if (rev_id == asic->rev_id && family == asic->family) {
   4636			sli4->asic_type = family;
   4637			sli4->asic_rev = rev_id;
   4638			break;
   4639		}
   4640	}
   4641	/* Fail if no matching asic type/rev was found */
   4642	if (!sli4->asic_type) {
   4643		efc_log_err(sli4, "no matching asic family/rev found: %02x/%02x\n",
   4644			    family, rev_id);
   4645		return -EIO;
   4646	}
   4647
   4648	/*
   4649	 * The bootstrap mailbox is equivalent to a MQ with a single 256 byte
   4650	 * entry, a CQ with a single 16 byte entry, and no event queue.
   4651	 * Alignment must be 16 bytes as the low order address bits in the
   4652	 * address register are also control / status.
   4653	 */
   4654	sli4->bmbx.size = SLI4_BMBX_SIZE + sizeof(struct sli4_mcqe);
   4655	sli4->bmbx.virt = dma_alloc_coherent(&pdev->dev, sli4->bmbx.size,
   4656					     &sli4->bmbx.phys, GFP_KERNEL);
   4657	if (!sli4->bmbx.virt) {
   4658		memset(&sli4->bmbx, 0, sizeof(struct efc_dma));
   4659		efc_log_err(sli4, "bootstrap mailbox allocation failed\n");
   4660		return -EIO;
   4661	}
   4662
   4663	if (sli4->bmbx.phys & SLI4_BMBX_MASK_LO) {
   4664		efc_log_err(sli4, "bad alignment for bootstrap mailbox\n");
   4665		return -EIO;
   4666	}
   4667
   4668	efc_log_info(sli4, "bmbx v=%p p=0x%x %08x s=%zd\n", sli4->bmbx.virt,
   4669		     upper_32_bits(sli4->bmbx.phys),
   4670		     lower_32_bits(sli4->bmbx.phys), sli4->bmbx.size);
   4671
   4672	/* 4096 is arbitrary. What should this value actually be? */
   4673	sli4->vpd_data.size = 4096;
   4674	sli4->vpd_data.virt = dma_alloc_coherent(&pdev->dev,
   4675						 sli4->vpd_data.size,
   4676						 &sli4->vpd_data.phys,
   4677						 GFP_KERNEL);
   4678	if (!sli4->vpd_data.virt) {
   4679		memset(&sli4->vpd_data, 0, sizeof(struct efc_dma));
   4680		/* Note that failure isn't fatal in this specific case */
   4681		efc_log_info(sli4, "VPD buffer allocation failed\n");
   4682	}
   4683
   4684	if (!sli_fw_init(sli4)) {
   4685		efc_log_err(sli4, "FW initialization failed\n");
   4686		return -EIO;
   4687	}
   4688
   4689	/*
   4690	 * Set one of fcpi(initiator), fcpt(target), fcpc(combined) to true
   4691	 * in addition to any other desired features
   4692	 */
   4693	sli4->features = (SLI4_REQFEAT_IAAB | SLI4_REQFEAT_NPIV |
   4694				 SLI4_REQFEAT_DIF | SLI4_REQFEAT_VF |
   4695				 SLI4_REQFEAT_FCPC | SLI4_REQFEAT_IAAR |
   4696				 SLI4_REQFEAT_HLM | SLI4_REQFEAT_PERFH |
   4697				 SLI4_REQFEAT_RXSEQ | SLI4_REQFEAT_RXRI |
   4698				 SLI4_REQFEAT_MRQP);
   4699
   4700	/* use performance hints if available */
   4701	if (sli4->params.perf_hint)
   4702		sli4->features |= SLI4_REQFEAT_PERFH;
   4703
   4704	if (sli_request_features(sli4, &sli4->features, true))
   4705		return -EIO;
   4706
   4707	if (sli_get_config(sli4))
   4708		return -EIO;
   4709
   4710	return 0;
   4711}
   4712
   4713int
   4714sli_init(struct sli4 *sli4)
   4715{
   4716	if (sli4->params.has_extents) {
   4717		efc_log_info(sli4, "extend allocation not supported\n");
   4718		return -EIO;
   4719	}
   4720
   4721	sli4->features &= (~SLI4_REQFEAT_HLM);
   4722	sli4->features &= (~SLI4_REQFEAT_RXSEQ);
   4723	sli4->features &= (~SLI4_REQFEAT_RXRI);
   4724
   4725	if (sli_request_features(sli4, &sli4->features, false))
   4726		return -EIO;
   4727
   4728	return 0;
   4729}
   4730
   4731int
   4732sli_reset(struct sli4 *sli4)
   4733{
   4734	u32	i;
   4735
   4736	if (!sli_fw_init(sli4)) {
   4737		efc_log_crit(sli4, "FW initialization failed\n");
   4738		return -EIO;
   4739	}
   4740
   4741	kfree(sli4->ext[0].base);
   4742	sli4->ext[0].base = NULL;
   4743
   4744	for (i = 0; i < SLI4_RSRC_MAX; i++) {
   4745		bitmap_free(sli4->ext[i].use_map);
   4746		sli4->ext[i].use_map = NULL;
   4747		sli4->ext[i].base = NULL;
   4748	}
   4749
   4750	return sli_get_config(sli4);
   4751}
   4752
   4753int
   4754sli_fw_reset(struct sli4 *sli4)
   4755{
   4756	/*
   4757	 * Firmware must be ready before issuing the reset.
   4758	 */
   4759	if (!sli_wait_for_fw_ready(sli4, SLI4_FW_READY_TIMEOUT_MSEC)) {
   4760		efc_log_crit(sli4, "FW status is NOT ready\n");
   4761		return -EIO;
   4762	}
   4763
   4764	/* Lancer uses PHYDEV_CONTROL */
   4765	writel(SLI4_PHYDEV_CTRL_FRST, (sli4->reg[0] + SLI4_PHYDEV_CTRL_REG));
   4766
   4767	/* wait for the FW to become ready after the reset */
   4768	if (!sli_wait_for_fw_ready(sli4, SLI4_FW_READY_TIMEOUT_MSEC)) {
   4769		efc_log_crit(sli4, "Failed to be ready after firmware reset\n");
   4770		return -EIO;
   4771	}
   4772	return 0;
   4773}
   4774
   4775void
   4776sli_teardown(struct sli4 *sli4)
   4777{
   4778	u32 i;
   4779
   4780	kfree(sli4->ext[0].base);
   4781	sli4->ext[0].base = NULL;
   4782
   4783	for (i = 0; i < SLI4_RSRC_MAX; i++) {
   4784		sli4->ext[i].base = NULL;
   4785
   4786		bitmap_free(sli4->ext[i].use_map);
   4787		sli4->ext[i].use_map = NULL;
   4788	}
   4789
   4790	if (!sli_sliport_reset(sli4))
   4791		efc_log_err(sli4, "FW deinitialization failed\n");
   4792
   4793	dma_free_coherent(&sli4->pci->dev, sli4->vpd_data.size,
   4794			  sli4->vpd_data.virt, sli4->vpd_data.phys);
   4795	memset(&sli4->vpd_data, 0, sizeof(struct efc_dma));
   4796
   4797	dma_free_coherent(&sli4->pci->dev, sli4->bmbx.size,
   4798			  sli4->bmbx.virt, sli4->bmbx.phys);
   4799	memset(&sli4->bmbx, 0, sizeof(struct efc_dma));
   4800}
   4801
   4802int
   4803sli_callback(struct sli4 *sli4, enum sli4_callback which,
   4804	     void *func, void *arg)
   4805{
   4806	if (!func) {
   4807		efc_log_err(sli4, "bad parameter sli4=%p which=%#x func=%p\n",
   4808			    sli4, which, func);
   4809		return -EIO;
   4810	}
   4811
   4812	switch (which) {
   4813	case SLI4_CB_LINK:
   4814		sli4->link = func;
   4815		sli4->link_arg = arg;
   4816		break;
   4817	default:
   4818		efc_log_info(sli4, "unknown callback %#x\n", which);
   4819		return -EIO;
   4820	}
   4821
   4822	return 0;
   4823}
   4824
   4825int
   4826sli_eq_modify_delay(struct sli4 *sli4, struct sli4_queue *eq,
   4827		    u32 num_eq, u32 shift, u32 delay_mult)
   4828{
   4829	sli_cmd_common_modify_eq_delay(sli4, sli4->bmbx.virt, eq, num_eq,
   4830				       shift, delay_mult);
   4831
   4832	if (sli_bmbx_command(sli4)) {
   4833		efc_log_crit(sli4, "bootstrap mailbox write fail (MODIFY EQ DELAY)\n");
   4834		return -EIO;
   4835	}
   4836	if (sli_res_sli_config(sli4, sli4->bmbx.virt)) {
   4837		efc_log_err(sli4, "bad status MODIFY EQ DELAY\n");
   4838		return -EIO;
   4839	}
   4840
   4841	return 0;
   4842}
   4843
   4844int
   4845sli_resource_alloc(struct sli4 *sli4, enum sli4_resource rtype,
   4846		   u32 *rid, u32 *index)
   4847{
   4848	int rc = 0;
   4849	u32 size;
   4850	u32 ext_idx;
   4851	u32 item_idx;
   4852	u32 position;
   4853
   4854	*rid = U32_MAX;
   4855	*index = U32_MAX;
   4856
   4857	switch (rtype) {
   4858	case SLI4_RSRC_VFI:
   4859	case SLI4_RSRC_VPI:
   4860	case SLI4_RSRC_RPI:
   4861	case SLI4_RSRC_XRI:
   4862		position =
   4863		find_first_zero_bit(sli4->ext[rtype].use_map,
   4864				    sli4->ext[rtype].map_size);
   4865		if (position >= sli4->ext[rtype].map_size) {
   4866			efc_log_err(sli4, "out of resource %d (alloc=%d)\n",
   4867				    rtype, sli4->ext[rtype].n_alloc);
   4868			rc = -EIO;
   4869			break;
   4870		}
   4871		set_bit(position, sli4->ext[rtype].use_map);
   4872		*index = position;
   4873
   4874		size = sli4->ext[rtype].size;
   4875
   4876		ext_idx = *index / size;
   4877		item_idx   = *index % size;
   4878
   4879		*rid = sli4->ext[rtype].base[ext_idx] + item_idx;
   4880
   4881		sli4->ext[rtype].n_alloc++;
   4882		break;
   4883	default:
   4884		rc = -EIO;
   4885	}
   4886
   4887	return rc;
   4888}
   4889
   4890int
   4891sli_resource_free(struct sli4 *sli4, enum sli4_resource rtype, u32 rid)
   4892{
   4893	int rc = -EIO;
   4894	u32 x;
   4895	u32 size, *base;
   4896
   4897	switch (rtype) {
   4898	case SLI4_RSRC_VFI:
   4899	case SLI4_RSRC_VPI:
   4900	case SLI4_RSRC_RPI:
   4901	case SLI4_RSRC_XRI:
   4902		/*
   4903		 * Figure out which extent contains the resource ID. I.e. find
   4904		 * the extent such that
   4905		 *   extent->base <= resource ID < extent->base + extent->size
   4906		 */
   4907		base = sli4->ext[rtype].base;
   4908		size = sli4->ext[rtype].size;
   4909
   4910		/*
   4911		 * In the case of FW reset, this may be cleared
   4912		 * but the force_free path will still attempt to
   4913		 * free the resource. Prevent a NULL pointer access.
   4914		 */
   4915		if (!base)
   4916			break;
   4917
   4918		for (x = 0; x < sli4->ext[rtype].number; x++) {
   4919			if ((rid < base[x] || (rid >= (base[x] + size))))
   4920				continue;
   4921
   4922			rid -= base[x];
   4923			clear_bit((x * size) + rid, sli4->ext[rtype].use_map);
   4924			rc = 0;
   4925			break;
   4926		}
   4927		break;
   4928	default:
   4929		break;
   4930	}
   4931
   4932	return rc;
   4933}
   4934
   4935int
   4936sli_resource_reset(struct sli4 *sli4, enum sli4_resource rtype)
   4937{
   4938	int rc = -EIO;
   4939	u32 i;
   4940
   4941	switch (rtype) {
   4942	case SLI4_RSRC_VFI:
   4943	case SLI4_RSRC_VPI:
   4944	case SLI4_RSRC_RPI:
   4945	case SLI4_RSRC_XRI:
   4946		for (i = 0; i < sli4->ext[rtype].map_size; i++)
   4947			clear_bit(i, sli4->ext[rtype].use_map);
   4948		rc = 0;
   4949		break;
   4950	default:
   4951		break;
   4952	}
   4953
   4954	return rc;
   4955}
   4956
   4957int sli_raise_ue(struct sli4 *sli4, u8 dump)
   4958{
   4959	u32 val = 0;
   4960
   4961	if (dump == SLI4_FUNC_DESC_DUMP) {
   4962		val = SLI4_PORT_CTRL_FDD | SLI4_PORT_CTRL_IP;
   4963		writel(val, (sli4->reg[0] + SLI4_PORT_CTRL_REG));
   4964	} else {
   4965		val = SLI4_PHYDEV_CTRL_FRST;
   4966
   4967		if (dump == SLI4_CHIP_LEVEL_DUMP)
   4968			val |= SLI4_PHYDEV_CTRL_DD;
   4969		writel(val, (sli4->reg[0] + SLI4_PHYDEV_CTRL_REG));
   4970	}
   4971
   4972	return 0;
   4973}
   4974
   4975int sli_dump_is_ready(struct sli4 *sli4)
   4976{
   4977	int rc = SLI4_DUMP_READY_STATUS_NOT_READY;
   4978	u32 port_val;
   4979	u32 bmbx_val;
   4980
   4981	/*
   4982	 * Ensure that the port is ready AND the mailbox is
   4983	 * ready before signaling that the dump is ready to go.
   4984	 */
   4985	port_val = sli_reg_read_status(sli4);
   4986	bmbx_val = readl(sli4->reg[0] + SLI4_BMBX_REG);
   4987
   4988	if ((bmbx_val & SLI4_BMBX_RDY) &&
   4989	    (port_val & SLI4_PORT_STATUS_RDY)) {
   4990		if (port_val & SLI4_PORT_STATUS_DIP)
   4991			rc = SLI4_DUMP_READY_STATUS_DD_PRESENT;
   4992		else if (port_val & SLI4_PORT_STATUS_FDP)
   4993			rc = SLI4_DUMP_READY_STATUS_FDB_PRESENT;
   4994	}
   4995
   4996	return rc;
   4997}
   4998
   4999bool sli_reset_required(struct sli4 *sli4)
   5000{
   5001	u32 val;
   5002
   5003	val = sli_reg_read_status(sli4);
   5004	return (val & SLI4_PORT_STATUS_RN);
   5005}
   5006
   5007int
   5008sli_cmd_post_sgl_pages(struct sli4 *sli4, void *buf, u16 xri,
   5009		       u32 xri_count, struct efc_dma *page0[],
   5010		       struct efc_dma *page1[], struct efc_dma *dma)
   5011{
   5012	struct sli4_rqst_post_sgl_pages *post = NULL;
   5013	u32 i;
   5014	__le32 req_len;
   5015
   5016	post = sli_config_cmd_init(sli4, buf,
   5017				   SLI4_CFG_PYLD_LENGTH(post_sgl_pages), dma);
   5018	if (!post)
   5019		return -EIO;
   5020
   5021	/* payload size calculation */
   5022	/* 4 = xri_start + xri_count */
   5023	/* xri_count = # of XRI's registered */
   5024	/* sizeof(uint64_t) = physical address size */
   5025	/* 2 = # of physical addresses per page set */
   5026	req_len = cpu_to_le32(4 + (xri_count * (sizeof(uint64_t) * 2)));
   5027	sli_cmd_fill_hdr(&post->hdr, SLI4_OPC_POST_SGL_PAGES, SLI4_SUBSYSTEM_FC,
   5028			 CMD_V0, req_len);
   5029	post->xri_start = cpu_to_le16(xri);
   5030	post->xri_count = cpu_to_le16(xri_count);
   5031
   5032	for (i = 0; i < xri_count; i++) {
   5033		post->page_set[i].page0_low  =
   5034				cpu_to_le32(lower_32_bits(page0[i]->phys));
   5035		post->page_set[i].page0_high =
   5036				cpu_to_le32(upper_32_bits(page0[i]->phys));
   5037	}
   5038
   5039	if (page1) {
   5040		for (i = 0; i < xri_count; i++) {
   5041			post->page_set[i].page1_low =
   5042				cpu_to_le32(lower_32_bits(page1[i]->phys));
   5043			post->page_set[i].page1_high =
   5044				cpu_to_le32(upper_32_bits(page1[i]->phys));
   5045		}
   5046	}
   5047
   5048	return 0;
   5049}
   5050
   5051int
   5052sli_cmd_post_hdr_templates(struct sli4 *sli4, void *buf, struct efc_dma *dma,
   5053			   u16 rpi, struct efc_dma *payload_dma)
   5054{
   5055	struct sli4_rqst_post_hdr_templates *req = NULL;
   5056	uintptr_t phys = 0;
   5057	u32 i = 0;
   5058	u32 page_count, payload_size;
   5059
   5060	page_count = sli_page_count(dma->size, SLI_PAGE_SIZE);
   5061
   5062	payload_size = ((sizeof(struct sli4_rqst_post_hdr_templates) +
   5063		(page_count * SZ_DMAADDR)) - sizeof(struct sli4_rqst_hdr));
   5064
   5065	if (page_count > 16) {
   5066		/*
   5067		 * We can't fit more than 16 descriptors into an embedded mbox
   5068		 * command, it has to be non-embedded
   5069		 */
   5070		payload_dma->size = payload_size;
   5071		payload_dma->virt = dma_alloc_coherent(&sli4->pci->dev,
   5072						       payload_dma->size,
   5073					     &payload_dma->phys, GFP_KERNEL);
   5074		if (!payload_dma->virt) {
   5075			memset(payload_dma, 0, sizeof(struct efc_dma));
   5076			efc_log_err(sli4, "mbox payload memory allocation fail\n");
   5077			return -EIO;
   5078		}
   5079		req = sli_config_cmd_init(sli4, buf, payload_size, payload_dma);
   5080	} else {
   5081		req = sli_config_cmd_init(sli4, buf, payload_size, NULL);
   5082	}
   5083
   5084	if (!req)
   5085		return -EIO;
   5086
   5087	if (rpi == U16_MAX)
   5088		rpi = sli4->ext[SLI4_RSRC_RPI].base[0];
   5089
   5090	sli_cmd_fill_hdr(&req->hdr, SLI4_OPC_POST_HDR_TEMPLATES,
   5091			 SLI4_SUBSYSTEM_FC, CMD_V0,
   5092			 SLI4_RQST_PYLD_LEN(post_hdr_templates));
   5093
   5094	req->rpi_offset = cpu_to_le16(rpi);
   5095	req->page_count = cpu_to_le16(page_count);
   5096	phys = dma->phys;
   5097	for (i = 0; i < page_count; i++) {
   5098		req->page_descriptor[i].low  = cpu_to_le32(lower_32_bits(phys));
   5099		req->page_descriptor[i].high = cpu_to_le32(upper_32_bits(phys));
   5100
   5101		phys += SLI_PAGE_SIZE;
   5102	}
   5103
   5104	return 0;
   5105}
   5106
   5107u32
   5108sli_fc_get_rpi_requirements(struct sli4 *sli4, u32 n_rpi)
   5109{
   5110	u32 bytes = 0;
   5111
   5112	/* Check if header templates needed */
   5113	if (sli4->params.hdr_template_req)
   5114		/* round up to a page */
   5115		bytes = round_up(n_rpi * SLI4_HDR_TEMPLATE_SIZE, SLI_PAGE_SIZE);
   5116
   5117	return bytes;
   5118}
   5119
   5120const char *
   5121sli_fc_get_status_string(u32 status)
   5122{
   5123	static struct {
   5124		u32 code;
   5125		const char *label;
   5126	} lookup[] = {
   5127		{SLI4_FC_WCQE_STATUS_SUCCESS,		"SUCCESS"},
   5128		{SLI4_FC_WCQE_STATUS_FCP_RSP_FAILURE,	"FCP_RSP_FAILURE"},
   5129		{SLI4_FC_WCQE_STATUS_REMOTE_STOP,	"REMOTE_STOP"},
   5130		{SLI4_FC_WCQE_STATUS_LOCAL_REJECT,	"LOCAL_REJECT"},
   5131		{SLI4_FC_WCQE_STATUS_NPORT_RJT,		"NPORT_RJT"},
   5132		{SLI4_FC_WCQE_STATUS_FABRIC_RJT,	"FABRIC_RJT"},
   5133		{SLI4_FC_WCQE_STATUS_NPORT_BSY,		"NPORT_BSY"},
   5134		{SLI4_FC_WCQE_STATUS_FABRIC_BSY,	"FABRIC_BSY"},
   5135		{SLI4_FC_WCQE_STATUS_LS_RJT,		"LS_RJT"},
   5136		{SLI4_FC_WCQE_STATUS_CMD_REJECT,	"CMD_REJECT"},
   5137		{SLI4_FC_WCQE_STATUS_FCP_TGT_LENCHECK,	"FCP_TGT_LENCHECK"},
   5138		{SLI4_FC_WCQE_STATUS_RQ_BUF_LEN_EXCEEDED, "BUF_LEN_EXCEEDED"},
   5139		{SLI4_FC_WCQE_STATUS_RQ_INSUFF_BUF_NEEDED,
   5140				"RQ_INSUFF_BUF_NEEDED"},
   5141		{SLI4_FC_WCQE_STATUS_RQ_INSUFF_FRM_DISC, "RQ_INSUFF_FRM_DESC"},
   5142		{SLI4_FC_WCQE_STATUS_RQ_DMA_FAILURE,	"RQ_DMA_FAILURE"},
   5143		{SLI4_FC_WCQE_STATUS_FCP_RSP_TRUNCATE,	"FCP_RSP_TRUNCATE"},
   5144		{SLI4_FC_WCQE_STATUS_DI_ERROR,		"DI_ERROR"},
   5145		{SLI4_FC_WCQE_STATUS_BA_RJT,		"BA_RJT"},
   5146		{SLI4_FC_WCQE_STATUS_RQ_INSUFF_XRI_NEEDED,
   5147				"RQ_INSUFF_XRI_NEEDED"},
   5148		{SLI4_FC_WCQE_STATUS_RQ_INSUFF_XRI_DISC, "INSUFF_XRI_DISC"},
   5149		{SLI4_FC_WCQE_STATUS_RX_ERROR_DETECT,	"RX_ERROR_DETECT"},
   5150		{SLI4_FC_WCQE_STATUS_RX_ABORT_REQUEST,	"RX_ABORT_REQUEST"},
   5151		};
   5152	u32 i;
   5153
   5154	for (i = 0; i < ARRAY_SIZE(lookup); i++) {
   5155		if (status == lookup[i].code)
   5156			return lookup[i].label;
   5157	}
   5158	return "unknown";
   5159}