cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

netxen_nic_ctx.c (23285B)


      1// SPDX-License-Identifier: GPL-2.0-or-later
      2/*
      3 * Copyright (C) 2003 - 2009 NetXen, Inc.
      4 * Copyright (C) 2009 - QLogic Corporation.
      5 * All rights reserved.
      6 */
      7
      8#include "netxen_nic_hw.h"
      9#include "netxen_nic.h"
     10
     11#define NXHAL_VERSION	1
     12
     13static u32
     14netxen_poll_rsp(struct netxen_adapter *adapter)
     15{
     16	u32 rsp = NX_CDRP_RSP_OK;
     17	int	timeout = 0;
     18
     19	do {
     20		/* give atleast 1ms for firmware to respond */
     21		msleep(1);
     22
     23		if (++timeout > NX_OS_CRB_RETRY_COUNT)
     24			return NX_CDRP_RSP_TIMEOUT;
     25
     26		rsp = NXRD32(adapter, NX_CDRP_CRB_OFFSET);
     27	} while (!NX_CDRP_IS_RSP(rsp));
     28
     29	return rsp;
     30}
     31
     32static u32
     33netxen_issue_cmd(struct netxen_adapter *adapter, struct netxen_cmd_args *cmd)
     34{
     35	u32 rsp;
     36	u32 signature = 0;
     37	u32 rcode = NX_RCODE_SUCCESS;
     38
     39	signature = NX_CDRP_SIGNATURE_MAKE(adapter->ahw.pci_func,
     40						NXHAL_VERSION);
     41	/* Acquire semaphore before accessing CRB */
     42	if (netxen_api_lock(adapter))
     43		return NX_RCODE_TIMEOUT;
     44
     45	NXWR32(adapter, NX_SIGN_CRB_OFFSET, signature);
     46
     47	NXWR32(adapter, NX_ARG1_CRB_OFFSET, cmd->req.arg1);
     48
     49	NXWR32(adapter, NX_ARG2_CRB_OFFSET, cmd->req.arg2);
     50
     51	NXWR32(adapter, NX_ARG3_CRB_OFFSET, cmd->req.arg3);
     52
     53	NXWR32(adapter, NX_CDRP_CRB_OFFSET, NX_CDRP_FORM_CMD(cmd->req.cmd));
     54
     55	rsp = netxen_poll_rsp(adapter);
     56
     57	if (rsp == NX_CDRP_RSP_TIMEOUT) {
     58		printk(KERN_ERR "%s: card response timeout.\n",
     59				netxen_nic_driver_name);
     60
     61		rcode = NX_RCODE_TIMEOUT;
     62	} else if (rsp == NX_CDRP_RSP_FAIL) {
     63		rcode = NXRD32(adapter, NX_ARG1_CRB_OFFSET);
     64
     65		printk(KERN_ERR "%s: failed card response code:0x%x\n",
     66				netxen_nic_driver_name, rcode);
     67	} else if (rsp == NX_CDRP_RSP_OK) {
     68		cmd->rsp.cmd = NX_RCODE_SUCCESS;
     69		if (cmd->rsp.arg2)
     70			cmd->rsp.arg2 = NXRD32(adapter, NX_ARG2_CRB_OFFSET);
     71		if (cmd->rsp.arg3)
     72			cmd->rsp.arg3 = NXRD32(adapter, NX_ARG3_CRB_OFFSET);
     73	}
     74
     75	if (cmd->rsp.arg1)
     76		cmd->rsp.arg1 = NXRD32(adapter, NX_ARG1_CRB_OFFSET);
     77	/* Release semaphore */
     78	netxen_api_unlock(adapter);
     79
     80	return rcode;
     81}
     82
     83static int
     84netxen_get_minidump_template_size(struct netxen_adapter *adapter)
     85{
     86	struct netxen_cmd_args cmd;
     87	memset(&cmd, 0, sizeof(cmd));
     88	cmd.req.cmd = NX_CDRP_CMD_TEMP_SIZE;
     89	memset(&cmd.rsp, 1, sizeof(struct _cdrp_cmd));
     90	netxen_issue_cmd(adapter, &cmd);
     91	if (cmd.rsp.cmd != NX_RCODE_SUCCESS) {
     92		dev_info(&adapter->pdev->dev,
     93			"Can't get template size %d\n", cmd.rsp.cmd);
     94		return -EIO;
     95	}
     96	adapter->mdump.md_template_size = cmd.rsp.arg2;
     97	adapter->mdump.md_template_ver = cmd.rsp.arg3;
     98	return 0;
     99}
    100
    101static int
    102netxen_get_minidump_template(struct netxen_adapter *adapter)
    103{
    104	dma_addr_t md_template_addr;
    105	void *addr;
    106	u32 size;
    107	struct netxen_cmd_args cmd;
    108	size = adapter->mdump.md_template_size;
    109
    110	if (size == 0) {
    111		dev_err(&adapter->pdev->dev, "Can not capture Minidump "
    112			"template. Invalid template size.\n");
    113		return NX_RCODE_INVALID_ARGS;
    114	}
    115
    116	addr = dma_alloc_coherent(&adapter->pdev->dev, size,
    117				  &md_template_addr, GFP_KERNEL);
    118	if (!addr) {
    119		dev_err(&adapter->pdev->dev, "Unable to allocate dmable memory for template.\n");
    120		return -ENOMEM;
    121	}
    122
    123	memset(&cmd, 0, sizeof(cmd));
    124	memset(&cmd.rsp, 1, sizeof(struct _cdrp_cmd));
    125	cmd.req.cmd = NX_CDRP_CMD_GET_TEMP_HDR;
    126	cmd.req.arg1 = LSD(md_template_addr);
    127	cmd.req.arg2 = MSD(md_template_addr);
    128	cmd.req.arg3 |= size;
    129	netxen_issue_cmd(adapter, &cmd);
    130
    131	if ((cmd.rsp.cmd == NX_RCODE_SUCCESS) && (size == cmd.rsp.arg2)) {
    132		memcpy(adapter->mdump.md_template, addr, size);
    133	} else {
    134		dev_err(&adapter->pdev->dev, "Failed to get minidump template, err_code : %d, requested_size : %d, actual_size : %d\n",
    135			cmd.rsp.cmd, size, cmd.rsp.arg2);
    136	}
    137	dma_free_coherent(&adapter->pdev->dev, size, addr, md_template_addr);
    138	return 0;
    139}
    140
    141static u32
    142netxen_check_template_checksum(struct netxen_adapter *adapter)
    143{
    144	u64 sum =  0 ;
    145	u32 *buff = adapter->mdump.md_template;
    146	int count =  adapter->mdump.md_template_size/sizeof(uint32_t) ;
    147
    148	while (count-- > 0)
    149		sum += *buff++ ;
    150	while (sum >> 32)
    151		sum = (sum & 0xFFFFFFFF) +  (sum >> 32) ;
    152
    153	return ~sum;
    154}
    155
    156int
    157netxen_setup_minidump(struct netxen_adapter *adapter)
    158{
    159	int err = 0, i;
    160	u32 *template, *tmp_buf;
    161	err = netxen_get_minidump_template_size(adapter);
    162	if (err) {
    163		adapter->mdump.fw_supports_md = 0;
    164		if ((err == NX_RCODE_CMD_INVALID) ||
    165			(err == NX_RCODE_CMD_NOT_IMPL)) {
    166			dev_info(&adapter->pdev->dev,
    167				"Flashed firmware version does not support minidump, minimum version required is [ %u.%u.%u ]\n",
    168				NX_MD_SUPPORT_MAJOR, NX_MD_SUPPORT_MINOR,
    169				NX_MD_SUPPORT_SUBVERSION);
    170		}
    171		return err;
    172	}
    173
    174	if (!adapter->mdump.md_template_size) {
    175		dev_err(&adapter->pdev->dev, "Error : Invalid template size "
    176		",should be non-zero.\n");
    177		return -EIO;
    178	}
    179	adapter->mdump.md_template =
    180		kmalloc(adapter->mdump.md_template_size, GFP_KERNEL);
    181
    182	if (!adapter->mdump.md_template)
    183		return -ENOMEM;
    184
    185	err = netxen_get_minidump_template(adapter);
    186	if (err) {
    187		if (err == NX_RCODE_CMD_NOT_IMPL)
    188			adapter->mdump.fw_supports_md = 0;
    189		goto free_template;
    190	}
    191
    192	if (netxen_check_template_checksum(adapter)) {
    193		dev_err(&adapter->pdev->dev, "Minidump template checksum Error\n");
    194		err = -EIO;
    195		goto free_template;
    196	}
    197
    198	adapter->mdump.md_capture_mask = NX_DUMP_MASK_DEF;
    199	tmp_buf = (u32 *) adapter->mdump.md_template;
    200	template = (u32 *) adapter->mdump.md_template;
    201	for (i = 0; i < adapter->mdump.md_template_size/sizeof(u32); i++)
    202		*template++ = __le32_to_cpu(*tmp_buf++);
    203	adapter->mdump.md_capture_buff = NULL;
    204	adapter->mdump.fw_supports_md = 1;
    205	adapter->mdump.md_enabled = 0;
    206
    207	return err;
    208
    209free_template:
    210	kfree(adapter->mdump.md_template);
    211	adapter->mdump.md_template = NULL;
    212	return err;
    213}
    214
    215
    216int
    217nx_fw_cmd_set_mtu(struct netxen_adapter *adapter, int mtu)
    218{
    219	u32 rcode = NX_RCODE_SUCCESS;
    220	struct netxen_recv_context *recv_ctx = &adapter->recv_ctx;
    221	struct netxen_cmd_args cmd;
    222
    223	memset(&cmd, 0, sizeof(cmd));
    224	cmd.req.cmd = NX_CDRP_CMD_SET_MTU;
    225	cmd.req.arg1 = recv_ctx->context_id;
    226	cmd.req.arg2 = mtu;
    227	cmd.req.arg3 = 0;
    228
    229	if (recv_ctx->state == NX_HOST_CTX_STATE_ACTIVE)
    230		rcode = netxen_issue_cmd(adapter, &cmd);
    231
    232	if (rcode != NX_RCODE_SUCCESS)
    233		return -EIO;
    234
    235	return 0;
    236}
    237
    238int
    239nx_fw_cmd_set_gbe_port(struct netxen_adapter *adapter,
    240			u32 speed, u32 duplex, u32 autoneg)
    241{
    242	struct netxen_cmd_args cmd;
    243
    244	memset(&cmd, 0, sizeof(cmd));
    245	cmd.req.cmd = NX_CDRP_CMD_CONFIG_GBE_PORT;
    246	cmd.req.arg1 = speed;
    247	cmd.req.arg2 = duplex;
    248	cmd.req.arg3 = autoneg;
    249	return netxen_issue_cmd(adapter, &cmd);
    250}
    251
    252static int
    253nx_fw_cmd_create_rx_ctx(struct netxen_adapter *adapter)
    254{
    255	void *addr;
    256	nx_hostrq_rx_ctx_t *prq;
    257	nx_cardrsp_rx_ctx_t *prsp;
    258	nx_hostrq_rds_ring_t *prq_rds;
    259	nx_hostrq_sds_ring_t *prq_sds;
    260	nx_cardrsp_rds_ring_t *prsp_rds;
    261	nx_cardrsp_sds_ring_t *prsp_sds;
    262	struct nx_host_rds_ring *rds_ring;
    263	struct nx_host_sds_ring *sds_ring;
    264	struct netxen_cmd_args cmd;
    265
    266	dma_addr_t hostrq_phys_addr, cardrsp_phys_addr;
    267	u64 phys_addr;
    268
    269	int i, nrds_rings, nsds_rings;
    270	size_t rq_size, rsp_size;
    271	u32 cap, reg, val;
    272
    273	int err;
    274
    275	struct netxen_recv_context *recv_ctx = &adapter->recv_ctx;
    276
    277	nrds_rings = adapter->max_rds_rings;
    278	nsds_rings = adapter->max_sds_rings;
    279
    280	rq_size =
    281		SIZEOF_HOSTRQ_RX(nx_hostrq_rx_ctx_t, nrds_rings, nsds_rings);
    282	rsp_size =
    283		SIZEOF_CARDRSP_RX(nx_cardrsp_rx_ctx_t, nrds_rings, nsds_rings);
    284
    285	addr = dma_alloc_coherent(&adapter->pdev->dev, rq_size,
    286				  &hostrq_phys_addr, GFP_KERNEL);
    287	if (addr == NULL)
    288		return -ENOMEM;
    289	prq = addr;
    290
    291	addr = dma_alloc_coherent(&adapter->pdev->dev, rsp_size,
    292				  &cardrsp_phys_addr, GFP_KERNEL);
    293	if (addr == NULL) {
    294		err = -ENOMEM;
    295		goto out_free_rq;
    296	}
    297	prsp = addr;
    298
    299	prq->host_rsp_dma_addr = cpu_to_le64(cardrsp_phys_addr);
    300
    301	cap = (NX_CAP0_LEGACY_CONTEXT | NX_CAP0_LEGACY_MN);
    302	cap |= (NX_CAP0_JUMBO_CONTIGUOUS | NX_CAP0_LRO_CONTIGUOUS);
    303
    304	if (adapter->flags & NETXEN_FW_MSS_CAP)
    305		cap |= NX_CAP0_HW_LRO_MSS;
    306
    307	prq->capabilities[0] = cpu_to_le32(cap);
    308	prq->host_int_crb_mode =
    309		cpu_to_le32(NX_HOST_INT_CRB_MODE_SHARED);
    310	prq->host_rds_crb_mode =
    311		cpu_to_le32(NX_HOST_RDS_CRB_MODE_UNIQUE);
    312
    313	prq->num_rds_rings = cpu_to_le16(nrds_rings);
    314	prq->num_sds_rings = cpu_to_le16(nsds_rings);
    315	prq->rds_ring_offset = cpu_to_le32(0);
    316
    317	val = le32_to_cpu(prq->rds_ring_offset) +
    318		(sizeof(nx_hostrq_rds_ring_t) * nrds_rings);
    319	prq->sds_ring_offset = cpu_to_le32(val);
    320
    321	prq_rds = (nx_hostrq_rds_ring_t *)(prq->data +
    322			le32_to_cpu(prq->rds_ring_offset));
    323
    324	for (i = 0; i < nrds_rings; i++) {
    325
    326		rds_ring = &recv_ctx->rds_rings[i];
    327
    328		prq_rds[i].host_phys_addr = cpu_to_le64(rds_ring->phys_addr);
    329		prq_rds[i].ring_size = cpu_to_le32(rds_ring->num_desc);
    330		prq_rds[i].ring_kind = cpu_to_le32(i);
    331		prq_rds[i].buff_size = cpu_to_le64(rds_ring->dma_size);
    332	}
    333
    334	prq_sds = (nx_hostrq_sds_ring_t *)(prq->data +
    335			le32_to_cpu(prq->sds_ring_offset));
    336
    337	for (i = 0; i < nsds_rings; i++) {
    338
    339		sds_ring = &recv_ctx->sds_rings[i];
    340
    341		prq_sds[i].host_phys_addr = cpu_to_le64(sds_ring->phys_addr);
    342		prq_sds[i].ring_size = cpu_to_le32(sds_ring->num_desc);
    343		prq_sds[i].msi_index = cpu_to_le16(i);
    344	}
    345
    346	phys_addr = hostrq_phys_addr;
    347	memset(&cmd, 0, sizeof(cmd));
    348	cmd.req.arg1 = (u32)(phys_addr >> 32);
    349	cmd.req.arg2 = (u32)(phys_addr & 0xffffffff);
    350	cmd.req.arg3 = rq_size;
    351	cmd.req.cmd = NX_CDRP_CMD_CREATE_RX_CTX;
    352	err = netxen_issue_cmd(adapter, &cmd);
    353	if (err) {
    354		printk(KERN_WARNING
    355			"Failed to create rx ctx in firmware%d\n", err);
    356		goto out_free_rsp;
    357	}
    358
    359
    360	prsp_rds = ((nx_cardrsp_rds_ring_t *)
    361			 &prsp->data[le32_to_cpu(prsp->rds_ring_offset)]);
    362
    363	for (i = 0; i < le16_to_cpu(prsp->num_rds_rings); i++) {
    364		rds_ring = &recv_ctx->rds_rings[i];
    365
    366		reg = le32_to_cpu(prsp_rds[i].host_producer_crb);
    367		rds_ring->crb_rcv_producer = netxen_get_ioaddr(adapter,
    368				NETXEN_NIC_REG(reg - 0x200));
    369	}
    370
    371	prsp_sds = ((nx_cardrsp_sds_ring_t *)
    372			&prsp->data[le32_to_cpu(prsp->sds_ring_offset)]);
    373
    374	for (i = 0; i < le16_to_cpu(prsp->num_sds_rings); i++) {
    375		sds_ring = &recv_ctx->sds_rings[i];
    376
    377		reg = le32_to_cpu(prsp_sds[i].host_consumer_crb);
    378		sds_ring->crb_sts_consumer = netxen_get_ioaddr(adapter,
    379				NETXEN_NIC_REG(reg - 0x200));
    380
    381		reg = le32_to_cpu(prsp_sds[i].interrupt_crb);
    382		sds_ring->crb_intr_mask = netxen_get_ioaddr(adapter,
    383				NETXEN_NIC_REG(reg - 0x200));
    384	}
    385
    386	recv_ctx->state = le32_to_cpu(prsp->host_ctx_state);
    387	recv_ctx->context_id = le16_to_cpu(prsp->context_id);
    388	recv_ctx->virt_port = prsp->virt_port;
    389
    390out_free_rsp:
    391	dma_free_coherent(&adapter->pdev->dev, rsp_size, prsp,
    392			  cardrsp_phys_addr);
    393out_free_rq:
    394	dma_free_coherent(&adapter->pdev->dev, rq_size, prq, hostrq_phys_addr);
    395	return err;
    396}
    397
    398static void
    399nx_fw_cmd_destroy_rx_ctx(struct netxen_adapter *adapter)
    400{
    401	struct netxen_recv_context *recv_ctx = &adapter->recv_ctx;
    402	struct netxen_cmd_args cmd;
    403
    404	memset(&cmd, 0, sizeof(cmd));
    405	cmd.req.arg1 = recv_ctx->context_id;
    406	cmd.req.arg2 = NX_DESTROY_CTX_RESET;
    407	cmd.req.arg3 = 0;
    408	cmd.req.cmd = NX_CDRP_CMD_DESTROY_RX_CTX;
    409
    410	if (netxen_issue_cmd(adapter, &cmd)) {
    411		printk(KERN_WARNING
    412			"%s: Failed to destroy rx ctx in firmware\n",
    413			netxen_nic_driver_name);
    414	}
    415}
    416
    417static int
    418nx_fw_cmd_create_tx_ctx(struct netxen_adapter *adapter)
    419{
    420	nx_hostrq_tx_ctx_t	*prq;
    421	nx_hostrq_cds_ring_t	*prq_cds;
    422	nx_cardrsp_tx_ctx_t	*prsp;
    423	void	*rq_addr, *rsp_addr;
    424	size_t	rq_size, rsp_size;
    425	u32	temp;
    426	int	err = 0;
    427	u64	offset, phys_addr;
    428	dma_addr_t	rq_phys_addr, rsp_phys_addr;
    429	struct nx_host_tx_ring *tx_ring = adapter->tx_ring;
    430	struct netxen_recv_context *recv_ctx = &adapter->recv_ctx;
    431	struct netxen_cmd_args cmd;
    432
    433	rq_size = SIZEOF_HOSTRQ_TX(nx_hostrq_tx_ctx_t);
    434	rq_addr = dma_alloc_coherent(&adapter->pdev->dev, rq_size,
    435				     &rq_phys_addr, GFP_KERNEL);
    436	if (!rq_addr)
    437		return -ENOMEM;
    438
    439	rsp_size = SIZEOF_CARDRSP_TX(nx_cardrsp_tx_ctx_t);
    440	rsp_addr = dma_alloc_coherent(&adapter->pdev->dev, rsp_size,
    441				      &rsp_phys_addr, GFP_KERNEL);
    442	if (!rsp_addr) {
    443		err = -ENOMEM;
    444		goto out_free_rq;
    445	}
    446
    447	prq = rq_addr;
    448
    449	prsp = rsp_addr;
    450
    451	prq->host_rsp_dma_addr = cpu_to_le64(rsp_phys_addr);
    452
    453	temp = (NX_CAP0_LEGACY_CONTEXT | NX_CAP0_LEGACY_MN | NX_CAP0_LSO);
    454	prq->capabilities[0] = cpu_to_le32(temp);
    455
    456	prq->host_int_crb_mode =
    457		cpu_to_le32(NX_HOST_INT_CRB_MODE_SHARED);
    458
    459	prq->interrupt_ctl = 0;
    460	prq->msi_index = 0;
    461
    462	prq->dummy_dma_addr = cpu_to_le64(adapter->dummy_dma.phys_addr);
    463
    464	offset = recv_ctx->phys_addr + sizeof(struct netxen_ring_ctx);
    465	prq->cmd_cons_dma_addr = cpu_to_le64(offset);
    466
    467	prq_cds = &prq->cds_ring;
    468
    469	prq_cds->host_phys_addr = cpu_to_le64(tx_ring->phys_addr);
    470	prq_cds->ring_size = cpu_to_le32(tx_ring->num_desc);
    471
    472	phys_addr = rq_phys_addr;
    473	memset(&cmd, 0, sizeof(cmd));
    474	cmd.req.arg1 = (u32)(phys_addr >> 32);
    475	cmd.req.arg2 = ((u32)phys_addr & 0xffffffff);
    476	cmd.req.arg3 = rq_size;
    477	cmd.req.cmd = NX_CDRP_CMD_CREATE_TX_CTX;
    478	err = netxen_issue_cmd(adapter, &cmd);
    479
    480	if (err == NX_RCODE_SUCCESS) {
    481		temp = le32_to_cpu(prsp->cds_ring.host_producer_crb);
    482		tx_ring->crb_cmd_producer = netxen_get_ioaddr(adapter,
    483				NETXEN_NIC_REG(temp - 0x200));
    484#if 0
    485		adapter->tx_state =
    486			le32_to_cpu(prsp->host_ctx_state);
    487#endif
    488		adapter->tx_context_id =
    489			le16_to_cpu(prsp->context_id);
    490	} else {
    491		printk(KERN_WARNING
    492			"Failed to create tx ctx in firmware%d\n", err);
    493		err = -EIO;
    494	}
    495
    496	dma_free_coherent(&adapter->pdev->dev, rsp_size, rsp_addr,
    497			  rsp_phys_addr);
    498
    499out_free_rq:
    500	dma_free_coherent(&adapter->pdev->dev, rq_size, rq_addr, rq_phys_addr);
    501
    502	return err;
    503}
    504
    505static void
    506nx_fw_cmd_destroy_tx_ctx(struct netxen_adapter *adapter)
    507{
    508	struct netxen_cmd_args cmd;
    509
    510	memset(&cmd, 0, sizeof(cmd));
    511	cmd.req.arg1 = adapter->tx_context_id;
    512	cmd.req.arg2 = NX_DESTROY_CTX_RESET;
    513	cmd.req.arg3 = 0;
    514	cmd.req.cmd = NX_CDRP_CMD_DESTROY_TX_CTX;
    515	if (netxen_issue_cmd(adapter, &cmd)) {
    516		printk(KERN_WARNING
    517			"%s: Failed to destroy tx ctx in firmware\n",
    518			netxen_nic_driver_name);
    519	}
    520}
    521
    522int
    523nx_fw_cmd_query_phy(struct netxen_adapter *adapter, u32 reg, u32 *val)
    524{
    525	u32 rcode;
    526	struct netxen_cmd_args cmd;
    527
    528	memset(&cmd, 0, sizeof(cmd));
    529	cmd.req.arg1 = reg;
    530	cmd.req.arg2 = 0;
    531	cmd.req.arg3 = 0;
    532	cmd.req.cmd = NX_CDRP_CMD_READ_PHY;
    533	cmd.rsp.arg1 = 1;
    534	rcode = netxen_issue_cmd(adapter, &cmd);
    535	if (rcode != NX_RCODE_SUCCESS)
    536		return -EIO;
    537
    538	if (val == NULL)
    539		return -EIO;
    540
    541	*val = cmd.rsp.arg1;
    542	return 0;
    543}
    544
    545int
    546nx_fw_cmd_set_phy(struct netxen_adapter *adapter, u32 reg, u32 val)
    547{
    548	u32 rcode;
    549	struct netxen_cmd_args cmd;
    550
    551	memset(&cmd, 0, sizeof(cmd));
    552	cmd.req.arg1 = reg;
    553	cmd.req.arg2 = val;
    554	cmd.req.arg3 = 0;
    555	cmd.req.cmd = NX_CDRP_CMD_WRITE_PHY;
    556	rcode = netxen_issue_cmd(adapter, &cmd);
    557	if (rcode != NX_RCODE_SUCCESS)
    558		return -EIO;
    559
    560	return 0;
    561}
    562
    563static u64 ctx_addr_sig_regs[][3] = {
    564	{NETXEN_NIC_REG(0x188), NETXEN_NIC_REG(0x18c), NETXEN_NIC_REG(0x1c0)},
    565	{NETXEN_NIC_REG(0x190), NETXEN_NIC_REG(0x194), NETXEN_NIC_REG(0x1c4)},
    566	{NETXEN_NIC_REG(0x198), NETXEN_NIC_REG(0x19c), NETXEN_NIC_REG(0x1c8)},
    567	{NETXEN_NIC_REG(0x1a0), NETXEN_NIC_REG(0x1a4), NETXEN_NIC_REG(0x1cc)}
    568};
    569
    570#define CRB_CTX_ADDR_REG_LO(FUNC_ID)	(ctx_addr_sig_regs[FUNC_ID][0])
    571#define CRB_CTX_ADDR_REG_HI(FUNC_ID)	(ctx_addr_sig_regs[FUNC_ID][2])
    572#define CRB_CTX_SIGNATURE_REG(FUNC_ID)	(ctx_addr_sig_regs[FUNC_ID][1])
    573
    574#define lower32(x)	((u32)((x) & 0xffffffff))
    575#define upper32(x)	((u32)(((u64)(x) >> 32) & 0xffffffff))
    576
    577static struct netxen_recv_crb recv_crb_registers[] = {
    578	/* Instance 0 */
    579	{
    580		/* crb_rcv_producer: */
    581		{
    582			NETXEN_NIC_REG(0x100),
    583			/* Jumbo frames */
    584			NETXEN_NIC_REG(0x110),
    585			/* LRO */
    586			NETXEN_NIC_REG(0x120)
    587		},
    588		/* crb_sts_consumer: */
    589		{
    590			NETXEN_NIC_REG(0x138),
    591			NETXEN_NIC_REG_2(0x000),
    592			NETXEN_NIC_REG_2(0x004),
    593			NETXEN_NIC_REG_2(0x008),
    594		},
    595		/* sw_int_mask */
    596		{
    597			CRB_SW_INT_MASK_0,
    598			NETXEN_NIC_REG_2(0x044),
    599			NETXEN_NIC_REG_2(0x048),
    600			NETXEN_NIC_REG_2(0x04c),
    601		},
    602	},
    603	/* Instance 1 */
    604	{
    605		/* crb_rcv_producer: */
    606		{
    607			NETXEN_NIC_REG(0x144),
    608			/* Jumbo frames */
    609			NETXEN_NIC_REG(0x154),
    610			/* LRO */
    611			NETXEN_NIC_REG(0x164)
    612		},
    613		/* crb_sts_consumer: */
    614		{
    615			NETXEN_NIC_REG(0x17c),
    616			NETXEN_NIC_REG_2(0x020),
    617			NETXEN_NIC_REG_2(0x024),
    618			NETXEN_NIC_REG_2(0x028),
    619		},
    620		/* sw_int_mask */
    621		{
    622			CRB_SW_INT_MASK_1,
    623			NETXEN_NIC_REG_2(0x064),
    624			NETXEN_NIC_REG_2(0x068),
    625			NETXEN_NIC_REG_2(0x06c),
    626		},
    627	},
    628	/* Instance 2 */
    629	{
    630		/* crb_rcv_producer: */
    631		{
    632			NETXEN_NIC_REG(0x1d8),
    633			/* Jumbo frames */
    634			NETXEN_NIC_REG(0x1f8),
    635			/* LRO */
    636			NETXEN_NIC_REG(0x208)
    637		},
    638		/* crb_sts_consumer: */
    639		{
    640			NETXEN_NIC_REG(0x220),
    641			NETXEN_NIC_REG_2(0x03c),
    642			NETXEN_NIC_REG_2(0x03c),
    643			NETXEN_NIC_REG_2(0x03c),
    644		},
    645		/* sw_int_mask */
    646		{
    647			CRB_SW_INT_MASK_2,
    648			NETXEN_NIC_REG_2(0x03c),
    649			NETXEN_NIC_REG_2(0x03c),
    650			NETXEN_NIC_REG_2(0x03c),
    651		},
    652	},
    653	/* Instance 3 */
    654	{
    655		/* crb_rcv_producer: */
    656		{
    657			NETXEN_NIC_REG(0x22c),
    658			/* Jumbo frames */
    659			NETXEN_NIC_REG(0x23c),
    660			/* LRO */
    661			NETXEN_NIC_REG(0x24c)
    662		},
    663		/* crb_sts_consumer: */
    664		{
    665			NETXEN_NIC_REG(0x264),
    666			NETXEN_NIC_REG_2(0x03c),
    667			NETXEN_NIC_REG_2(0x03c),
    668			NETXEN_NIC_REG_2(0x03c),
    669		},
    670		/* sw_int_mask */
    671		{
    672			CRB_SW_INT_MASK_3,
    673			NETXEN_NIC_REG_2(0x03c),
    674			NETXEN_NIC_REG_2(0x03c),
    675			NETXEN_NIC_REG_2(0x03c),
    676		},
    677	},
    678};
    679
    680static int
    681netxen_init_old_ctx(struct netxen_adapter *adapter)
    682{
    683	struct netxen_recv_context *recv_ctx;
    684	struct nx_host_rds_ring *rds_ring;
    685	struct nx_host_sds_ring *sds_ring;
    686	struct nx_host_tx_ring *tx_ring;
    687	int ring;
    688	int port = adapter->portnum;
    689	struct netxen_ring_ctx *hwctx;
    690	u32 signature;
    691
    692	tx_ring = adapter->tx_ring;
    693	recv_ctx = &adapter->recv_ctx;
    694	hwctx = recv_ctx->hwctx;
    695
    696	hwctx->cmd_ring_addr = cpu_to_le64(tx_ring->phys_addr);
    697	hwctx->cmd_ring_size = cpu_to_le32(tx_ring->num_desc);
    698
    699
    700	for (ring = 0; ring < adapter->max_rds_rings; ring++) {
    701		rds_ring = &recv_ctx->rds_rings[ring];
    702
    703		hwctx->rcv_rings[ring].addr =
    704			cpu_to_le64(rds_ring->phys_addr);
    705		hwctx->rcv_rings[ring].size =
    706			cpu_to_le32(rds_ring->num_desc);
    707	}
    708
    709	for (ring = 0; ring < adapter->max_sds_rings; ring++) {
    710		sds_ring = &recv_ctx->sds_rings[ring];
    711
    712		if (ring == 0) {
    713			hwctx->sts_ring_addr = cpu_to_le64(sds_ring->phys_addr);
    714			hwctx->sts_ring_size = cpu_to_le32(sds_ring->num_desc);
    715		}
    716		hwctx->sts_rings[ring].addr = cpu_to_le64(sds_ring->phys_addr);
    717		hwctx->sts_rings[ring].size = cpu_to_le32(sds_ring->num_desc);
    718		hwctx->sts_rings[ring].msi_index = cpu_to_le16(ring);
    719	}
    720	hwctx->sts_ring_count = cpu_to_le32(adapter->max_sds_rings);
    721
    722	signature = (adapter->max_sds_rings > 1) ?
    723		NETXEN_CTX_SIGNATURE_V2 : NETXEN_CTX_SIGNATURE;
    724
    725	NXWR32(adapter, CRB_CTX_ADDR_REG_LO(port),
    726			lower32(recv_ctx->phys_addr));
    727	NXWR32(adapter, CRB_CTX_ADDR_REG_HI(port),
    728			upper32(recv_ctx->phys_addr));
    729	NXWR32(adapter, CRB_CTX_SIGNATURE_REG(port),
    730			signature | port);
    731	return 0;
    732}
    733
    734int netxen_alloc_hw_resources(struct netxen_adapter *adapter)
    735{
    736	void *addr;
    737	int err = 0;
    738	int ring;
    739	struct netxen_recv_context *recv_ctx;
    740	struct nx_host_rds_ring *rds_ring;
    741	struct nx_host_sds_ring *sds_ring;
    742	struct nx_host_tx_ring *tx_ring;
    743
    744	struct pci_dev *pdev = adapter->pdev;
    745	struct net_device *netdev = adapter->netdev;
    746	int port = adapter->portnum;
    747
    748	recv_ctx = &adapter->recv_ctx;
    749	tx_ring = adapter->tx_ring;
    750
    751	addr = dma_alloc_coherent(&pdev->dev,
    752				  sizeof(struct netxen_ring_ctx) + sizeof(uint32_t),
    753				  &recv_ctx->phys_addr, GFP_KERNEL);
    754	if (addr == NULL) {
    755		dev_err(&pdev->dev, "failed to allocate hw context\n");
    756		return -ENOMEM;
    757	}
    758
    759	recv_ctx->hwctx = addr;
    760	recv_ctx->hwctx->ctx_id = cpu_to_le32(port);
    761	recv_ctx->hwctx->cmd_consumer_offset =
    762		cpu_to_le64(recv_ctx->phys_addr +
    763			sizeof(struct netxen_ring_ctx));
    764	tx_ring->hw_consumer =
    765		(__le32 *)(((char *)addr) + sizeof(struct netxen_ring_ctx));
    766
    767	/* cmd desc ring */
    768	addr = dma_alloc_coherent(&pdev->dev, TX_DESC_RINGSIZE(tx_ring),
    769				  &tx_ring->phys_addr, GFP_KERNEL);
    770
    771	if (addr == NULL) {
    772		dev_err(&pdev->dev, "%s: failed to allocate tx desc ring\n",
    773				netdev->name);
    774		err = -ENOMEM;
    775		goto err_out_free;
    776	}
    777
    778	tx_ring->desc_head = addr;
    779
    780	for (ring = 0; ring < adapter->max_rds_rings; ring++) {
    781		rds_ring = &recv_ctx->rds_rings[ring];
    782		addr = dma_alloc_coherent(&adapter->pdev->dev,
    783					  RCV_DESC_RINGSIZE(rds_ring),
    784					  &rds_ring->phys_addr, GFP_KERNEL);
    785		if (addr == NULL) {
    786			dev_err(&pdev->dev,
    787				"%s: failed to allocate rds ring [%d]\n",
    788				netdev->name, ring);
    789			err = -ENOMEM;
    790			goto err_out_free;
    791		}
    792		rds_ring->desc_head = addr;
    793
    794		if (NX_IS_REVISION_P2(adapter->ahw.revision_id))
    795			rds_ring->crb_rcv_producer =
    796				netxen_get_ioaddr(adapter,
    797			recv_crb_registers[port].crb_rcv_producer[ring]);
    798	}
    799
    800	for (ring = 0; ring < adapter->max_sds_rings; ring++) {
    801		sds_ring = &recv_ctx->sds_rings[ring];
    802
    803		addr = dma_alloc_coherent(&adapter->pdev->dev,
    804					  STATUS_DESC_RINGSIZE(sds_ring),
    805					  &sds_ring->phys_addr, GFP_KERNEL);
    806		if (addr == NULL) {
    807			dev_err(&pdev->dev,
    808				"%s: failed to allocate sds ring [%d]\n",
    809				netdev->name, ring);
    810			err = -ENOMEM;
    811			goto err_out_free;
    812		}
    813		sds_ring->desc_head = addr;
    814
    815		if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) {
    816			sds_ring->crb_sts_consumer =
    817				netxen_get_ioaddr(adapter,
    818				recv_crb_registers[port].crb_sts_consumer[ring]);
    819
    820			sds_ring->crb_intr_mask =
    821				netxen_get_ioaddr(adapter,
    822				recv_crb_registers[port].sw_int_mask[ring]);
    823		}
    824	}
    825
    826
    827	if (!NX_IS_REVISION_P2(adapter->ahw.revision_id)) {
    828		if (test_and_set_bit(__NX_FW_ATTACHED, &adapter->state))
    829			goto done;
    830		err = nx_fw_cmd_create_rx_ctx(adapter);
    831		if (err)
    832			goto err_out_free;
    833		err = nx_fw_cmd_create_tx_ctx(adapter);
    834		if (err)
    835			goto err_out_free;
    836	} else {
    837		err = netxen_init_old_ctx(adapter);
    838		if (err)
    839			goto err_out_free;
    840	}
    841
    842done:
    843	return 0;
    844
    845err_out_free:
    846	netxen_free_hw_resources(adapter);
    847	return err;
    848}
    849
    850void netxen_free_hw_resources(struct netxen_adapter *adapter)
    851{
    852	struct netxen_recv_context *recv_ctx;
    853	struct nx_host_rds_ring *rds_ring;
    854	struct nx_host_sds_ring *sds_ring;
    855	struct nx_host_tx_ring *tx_ring;
    856	int ring;
    857
    858	int port = adapter->portnum;
    859
    860	if (!NX_IS_REVISION_P2(adapter->ahw.revision_id)) {
    861		if (!test_and_clear_bit(__NX_FW_ATTACHED, &adapter->state))
    862			goto done;
    863
    864		nx_fw_cmd_destroy_rx_ctx(adapter);
    865		nx_fw_cmd_destroy_tx_ctx(adapter);
    866	} else {
    867		netxen_api_lock(adapter);
    868		NXWR32(adapter, CRB_CTX_SIGNATURE_REG(port),
    869				NETXEN_CTX_D3_RESET | port);
    870		netxen_api_unlock(adapter);
    871	}
    872
    873	/* Allow dma queues to drain after context reset */
    874	msleep(20);
    875
    876done:
    877	recv_ctx = &adapter->recv_ctx;
    878
    879	if (recv_ctx->hwctx != NULL) {
    880		dma_free_coherent(&adapter->pdev->dev,
    881				  sizeof(struct netxen_ring_ctx) + sizeof(uint32_t),
    882				  recv_ctx->hwctx, recv_ctx->phys_addr);
    883		recv_ctx->hwctx = NULL;
    884	}
    885
    886	tx_ring = adapter->tx_ring;
    887	if (tx_ring->desc_head != NULL) {
    888		dma_free_coherent(&adapter->pdev->dev,
    889				  TX_DESC_RINGSIZE(tx_ring),
    890				  tx_ring->desc_head, tx_ring->phys_addr);
    891		tx_ring->desc_head = NULL;
    892	}
    893
    894	for (ring = 0; ring < adapter->max_rds_rings; ring++) {
    895		rds_ring = &recv_ctx->rds_rings[ring];
    896
    897		if (rds_ring->desc_head != NULL) {
    898			dma_free_coherent(&adapter->pdev->dev,
    899					  RCV_DESC_RINGSIZE(rds_ring),
    900					  rds_ring->desc_head,
    901					  rds_ring->phys_addr);
    902			rds_ring->desc_head = NULL;
    903		}
    904	}
    905
    906	for (ring = 0; ring < adapter->max_sds_rings; ring++) {
    907		sds_ring = &recv_ctx->sds_rings[ring];
    908
    909		if (sds_ring->desc_head != NULL) {
    910			dma_free_coherent(&adapter->pdev->dev,
    911					  STATUS_DESC_RINGSIZE(sds_ring),
    912					  sds_ring->desc_head,
    913					  sds_ring->phys_addr);
    914			sds_ring->desc_head = NULL;
    915		}
    916	}
    917}
    918