cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

t4_hw.c (314294B)


      1/*
      2 * This file is part of the Chelsio T4 Ethernet driver for Linux.
      3 *
      4 * Copyright (c) 2003-2016 Chelsio Communications, Inc. All rights reserved.
      5 *
      6 * This software is available to you under a choice of one of two
      7 * licenses.  You may choose to be licensed under the terms of the GNU
      8 * General Public License (GPL) Version 2, available from the file
      9 * COPYING in the main directory of this source tree, or the
     10 * OpenIB.org BSD license below:
     11 *
     12 *     Redistribution and use in source and binary forms, with or
     13 *     without modification, are permitted provided that the following
     14 *     conditions are met:
     15 *
     16 *      - Redistributions of source code must retain the above
     17 *        copyright notice, this list of conditions and the following
     18 *        disclaimer.
     19 *
     20 *      - Redistributions in binary form must reproduce the above
     21 *        copyright notice, this list of conditions and the following
     22 *        disclaimer in the documentation and/or other materials
     23 *        provided with the distribution.
     24 *
     25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
     26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
     27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
     28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
     29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
     30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
     31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
     32 * SOFTWARE.
     33 */
     34
     35#include <linux/delay.h>
     36#include "cxgb4.h"
     37#include "t4_regs.h"
     38#include "t4_values.h"
     39#include "t4fw_api.h"
     40#include "t4fw_version.h"
     41
     42/**
     43 *	t4_wait_op_done_val - wait until an operation is completed
     44 *	@adapter: the adapter performing the operation
     45 *	@reg: the register to check for completion
     46 *	@mask: a single-bit field within @reg that indicates completion
     47 *	@polarity: the value of the field when the operation is completed
     48 *	@attempts: number of check iterations
     49 *	@delay: delay in usecs between iterations
     50 *	@valp: where to store the value of the register at completion time
     51 *
     52 *	Wait until an operation is completed by checking a bit in a register
     53 *	up to @attempts times.  If @valp is not NULL the value of the register
     54 *	at the time it indicated completion is stored there.  Returns 0 if the
     55 *	operation completes and	-EAGAIN	otherwise.
     56 */
     57static int t4_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
     58			       int polarity, int attempts, int delay, u32 *valp)
     59{
     60	while (1) {
     61		u32 val = t4_read_reg(adapter, reg);
     62
     63		if (!!(val & mask) == polarity) {
     64			if (valp)
     65				*valp = val;
     66			return 0;
     67		}
     68		if (--attempts == 0)
     69			return -EAGAIN;
     70		if (delay)
     71			udelay(delay);
     72	}
     73}
     74
     75static inline int t4_wait_op_done(struct adapter *adapter, int reg, u32 mask,
     76				  int polarity, int attempts, int delay)
     77{
     78	return t4_wait_op_done_val(adapter, reg, mask, polarity, attempts,
     79				   delay, NULL);
     80}
     81
     82/**
     83 *	t4_set_reg_field - set a register field to a value
     84 *	@adapter: the adapter to program
     85 *	@addr: the register address
     86 *	@mask: specifies the portion of the register to modify
     87 *	@val: the new value for the register field
     88 *
     89 *	Sets a register field specified by the supplied mask to the
     90 *	given value.
     91 */
     92void t4_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask,
     93		      u32 val)
     94{
     95	u32 v = t4_read_reg(adapter, addr) & ~mask;
     96
     97	t4_write_reg(adapter, addr, v | val);
     98	(void) t4_read_reg(adapter, addr);      /* flush */
     99}
    100
    101/**
    102 *	t4_read_indirect - read indirectly addressed registers
    103 *	@adap: the adapter
    104 *	@addr_reg: register holding the indirect address
    105 *	@data_reg: register holding the value of the indirect register
    106 *	@vals: where the read register values are stored
    107 *	@nregs: how many indirect registers to read
    108 *	@start_idx: index of first indirect register to read
    109 *
    110 *	Reads registers that are accessed indirectly through an address/data
    111 *	register pair.
    112 */
    113void t4_read_indirect(struct adapter *adap, unsigned int addr_reg,
    114			     unsigned int data_reg, u32 *vals,
    115			     unsigned int nregs, unsigned int start_idx)
    116{
    117	while (nregs--) {
    118		t4_write_reg(adap, addr_reg, start_idx);
    119		*vals++ = t4_read_reg(adap, data_reg);
    120		start_idx++;
    121	}
    122}
    123
    124/**
    125 *	t4_write_indirect - write indirectly addressed registers
    126 *	@adap: the adapter
    127 *	@addr_reg: register holding the indirect addresses
    128 *	@data_reg: register holding the value for the indirect registers
    129 *	@vals: values to write
    130 *	@nregs: how many indirect registers to write
    131 *	@start_idx: address of first indirect register to write
    132 *
    133 *	Writes a sequential block of registers that are accessed indirectly
    134 *	through an address/data register pair.
    135 */
    136void t4_write_indirect(struct adapter *adap, unsigned int addr_reg,
    137		       unsigned int data_reg, const u32 *vals,
    138		       unsigned int nregs, unsigned int start_idx)
    139{
    140	while (nregs--) {
    141		t4_write_reg(adap, addr_reg, start_idx++);
    142		t4_write_reg(adap, data_reg, *vals++);
    143	}
    144}
    145
    146/*
    147 * Read a 32-bit PCI Configuration Space register via the PCI-E backdoor
    148 * mechanism.  This guarantees that we get the real value even if we're
    149 * operating within a Virtual Machine and the Hypervisor is trapping our
    150 * Configuration Space accesses.
    151 */
    152void t4_hw_pci_read_cfg4(struct adapter *adap, int reg, u32 *val)
    153{
    154	u32 req = FUNCTION_V(adap->pf) | REGISTER_V(reg);
    155
    156	if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
    157		req |= ENABLE_F;
    158	else
    159		req |= T6_ENABLE_F;
    160
    161	if (is_t4(adap->params.chip))
    162		req |= LOCALCFG_F;
    163
    164	t4_write_reg(adap, PCIE_CFG_SPACE_REQ_A, req);
    165	*val = t4_read_reg(adap, PCIE_CFG_SPACE_DATA_A);
    166
    167	/* Reset ENABLE to 0 so reads of PCIE_CFG_SPACE_DATA won't cause a
    168	 * Configuration Space read.  (None of the other fields matter when
    169	 * ENABLE is 0 so a simple register write is easier than a
    170	 * read-modify-write via t4_set_reg_field().)
    171	 */
    172	t4_write_reg(adap, PCIE_CFG_SPACE_REQ_A, 0);
    173}
    174
    175/*
    176 * t4_report_fw_error - report firmware error
    177 * @adap: the adapter
    178 *
    179 * The adapter firmware can indicate error conditions to the host.
    180 * If the firmware has indicated an error, print out the reason for
    181 * the firmware error.
    182 */
    183static void t4_report_fw_error(struct adapter *adap)
    184{
    185	static const char *const reason[] = {
    186		"Crash",                        /* PCIE_FW_EVAL_CRASH */
    187		"During Device Preparation",    /* PCIE_FW_EVAL_PREP */
    188		"During Device Configuration",  /* PCIE_FW_EVAL_CONF */
    189		"During Device Initialization", /* PCIE_FW_EVAL_INIT */
    190		"Unexpected Event",             /* PCIE_FW_EVAL_UNEXPECTEDEVENT */
    191		"Insufficient Airflow",         /* PCIE_FW_EVAL_OVERHEAT */
    192		"Device Shutdown",              /* PCIE_FW_EVAL_DEVICESHUTDOWN */
    193		"Reserved",                     /* reserved */
    194	};
    195	u32 pcie_fw;
    196
    197	pcie_fw = t4_read_reg(adap, PCIE_FW_A);
    198	if (pcie_fw & PCIE_FW_ERR_F) {
    199		dev_err(adap->pdev_dev, "Firmware reports adapter error: %s\n",
    200			reason[PCIE_FW_EVAL_G(pcie_fw)]);
    201		adap->flags &= ~CXGB4_FW_OK;
    202	}
    203}
    204
    205/*
    206 * Get the reply to a mailbox command and store it in @rpl in big-endian order.
    207 */
    208static void get_mbox_rpl(struct adapter *adap, __be64 *rpl, int nflit,
    209			 u32 mbox_addr)
    210{
    211	for ( ; nflit; nflit--, mbox_addr += 8)
    212		*rpl++ = cpu_to_be64(t4_read_reg64(adap, mbox_addr));
    213}
    214
    215/*
    216 * Handle a FW assertion reported in a mailbox.
    217 */
    218static void fw_asrt(struct adapter *adap, u32 mbox_addr)
    219{
    220	struct fw_debug_cmd asrt;
    221
    222	get_mbox_rpl(adap, (__be64 *)&asrt, sizeof(asrt) / 8, mbox_addr);
    223	dev_alert(adap->pdev_dev,
    224		  "FW assertion at %.16s:%u, val0 %#x, val1 %#x\n",
    225		  asrt.u.assert.filename_0_7, be32_to_cpu(asrt.u.assert.line),
    226		  be32_to_cpu(asrt.u.assert.x), be32_to_cpu(asrt.u.assert.y));
    227}
    228
    229/**
    230 *	t4_record_mbox - record a Firmware Mailbox Command/Reply in the log
    231 *	@adapter: the adapter
    232 *	@cmd: the Firmware Mailbox Command or Reply
    233 *	@size: command length in bytes
    234 *	@access: the time (ms) needed to access the Firmware Mailbox
    235 *	@execute: the time (ms) the command spent being executed
    236 */
    237static void t4_record_mbox(struct adapter *adapter,
    238			   const __be64 *cmd, unsigned int size,
    239			   int access, int execute)
    240{
    241	struct mbox_cmd_log *log = adapter->mbox_log;
    242	struct mbox_cmd *entry;
    243	int i;
    244
    245	entry = mbox_cmd_log_entry(log, log->cursor++);
    246	if (log->cursor == log->size)
    247		log->cursor = 0;
    248
    249	for (i = 0; i < size / 8; i++)
    250		entry->cmd[i] = be64_to_cpu(cmd[i]);
    251	while (i < MBOX_LEN / 8)
    252		entry->cmd[i++] = 0;
    253	entry->timestamp = jiffies;
    254	entry->seqno = log->seqno++;
    255	entry->access = access;
    256	entry->execute = execute;
    257}
    258
    259/**
    260 *	t4_wr_mbox_meat_timeout - send a command to FW through the given mailbox
    261 *	@adap: the adapter
    262 *	@mbox: index of the mailbox to use
    263 *	@cmd: the command to write
    264 *	@size: command length in bytes
    265 *	@rpl: where to optionally store the reply
    266 *	@sleep_ok: if true we may sleep while awaiting command completion
    267 *	@timeout: time to wait for command to finish before timing out
    268 *
    269 *	Sends the given command to FW through the selected mailbox and waits
    270 *	for the FW to execute the command.  If @rpl is not %NULL it is used to
    271 *	store the FW's reply to the command.  The command and its optional
    272 *	reply are of the same length.  FW can take up to %FW_CMD_MAX_TIMEOUT ms
    273 *	to respond.  @sleep_ok determines whether we may sleep while awaiting
    274 *	the response.  If sleeping is allowed we use progressive backoff
    275 *	otherwise we spin.
    276 *
    277 *	The return value is 0 on success or a negative errno on failure.  A
    278 *	failure can happen either because we are not able to execute the
    279 *	command or FW executes it but signals an error.  In the latter case
    280 *	the return value is the error code indicated by FW (negated).
    281 */
    282int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, const void *cmd,
    283			    int size, void *rpl, bool sleep_ok, int timeout)
    284{
    285	static const int delay[] = {
    286		1, 1, 3, 5, 10, 10, 20, 50, 100, 200
    287	};
    288
    289	struct mbox_list entry;
    290	u16 access = 0;
    291	u16 execute = 0;
    292	u32 v;
    293	u64 res;
    294	int i, ms, delay_idx, ret;
    295	const __be64 *p = cmd;
    296	u32 data_reg = PF_REG(mbox, CIM_PF_MAILBOX_DATA_A);
    297	u32 ctl_reg = PF_REG(mbox, CIM_PF_MAILBOX_CTRL_A);
    298	__be64 cmd_rpl[MBOX_LEN / 8];
    299	u32 pcie_fw;
    300
    301	if ((size & 15) || size > MBOX_LEN)
    302		return -EINVAL;
    303
    304	/*
    305	 * If the device is off-line, as in EEH, commands will time out.
    306	 * Fail them early so we don't waste time waiting.
    307	 */
    308	if (adap->pdev->error_state != pci_channel_io_normal)
    309		return -EIO;
    310
    311	/* If we have a negative timeout, that implies that we can't sleep. */
    312	if (timeout < 0) {
    313		sleep_ok = false;
    314		timeout = -timeout;
    315	}
    316
    317	/* Queue ourselves onto the mailbox access list.  When our entry is at
    318	 * the front of the list, we have rights to access the mailbox.  So we
    319	 * wait [for a while] till we're at the front [or bail out with an
    320	 * EBUSY] ...
    321	 */
    322	spin_lock_bh(&adap->mbox_lock);
    323	list_add_tail(&entry.list, &adap->mlist.list);
    324	spin_unlock_bh(&adap->mbox_lock);
    325
    326	delay_idx = 0;
    327	ms = delay[0];
    328
    329	for (i = 0; ; i += ms) {
    330		/* If we've waited too long, return a busy indication.  This
    331		 * really ought to be based on our initial position in the
    332		 * mailbox access list but this is a start.  We very rarely
    333		 * contend on access to the mailbox ...
    334		 */
    335		pcie_fw = t4_read_reg(adap, PCIE_FW_A);
    336		if (i > FW_CMD_MAX_TIMEOUT || (pcie_fw & PCIE_FW_ERR_F)) {
    337			spin_lock_bh(&adap->mbox_lock);
    338			list_del(&entry.list);
    339			spin_unlock_bh(&adap->mbox_lock);
    340			ret = (pcie_fw & PCIE_FW_ERR_F) ? -ENXIO : -EBUSY;
    341			t4_record_mbox(adap, cmd, size, access, ret);
    342			return ret;
    343		}
    344
    345		/* If we're at the head, break out and start the mailbox
    346		 * protocol.
    347		 */
    348		if (list_first_entry(&adap->mlist.list, struct mbox_list,
    349				     list) == &entry)
    350			break;
    351
    352		/* Delay for a bit before checking again ... */
    353		if (sleep_ok) {
    354			ms = delay[delay_idx];  /* last element may repeat */
    355			if (delay_idx < ARRAY_SIZE(delay) - 1)
    356				delay_idx++;
    357			msleep(ms);
    358		} else {
    359			mdelay(ms);
    360		}
    361	}
    362
    363	/* Loop trying to get ownership of the mailbox.  Return an error
    364	 * if we can't gain ownership.
    365	 */
    366	v = MBOWNER_G(t4_read_reg(adap, ctl_reg));
    367	for (i = 0; v == MBOX_OWNER_NONE && i < 3; i++)
    368		v = MBOWNER_G(t4_read_reg(adap, ctl_reg));
    369	if (v != MBOX_OWNER_DRV) {
    370		spin_lock_bh(&adap->mbox_lock);
    371		list_del(&entry.list);
    372		spin_unlock_bh(&adap->mbox_lock);
    373		ret = (v == MBOX_OWNER_FW) ? -EBUSY : -ETIMEDOUT;
    374		t4_record_mbox(adap, cmd, size, access, ret);
    375		return ret;
    376	}
    377
    378	/* Copy in the new mailbox command and send it on its way ... */
    379	t4_record_mbox(adap, cmd, size, access, 0);
    380	for (i = 0; i < size; i += 8)
    381		t4_write_reg64(adap, data_reg + i, be64_to_cpu(*p++));
    382
    383	t4_write_reg(adap, ctl_reg, MBMSGVALID_F | MBOWNER_V(MBOX_OWNER_FW));
    384	t4_read_reg(adap, ctl_reg);          /* flush write */
    385
    386	delay_idx = 0;
    387	ms = delay[0];
    388
    389	for (i = 0;
    390	     !((pcie_fw = t4_read_reg(adap, PCIE_FW_A)) & PCIE_FW_ERR_F) &&
    391	     i < timeout;
    392	     i += ms) {
    393		if (sleep_ok) {
    394			ms = delay[delay_idx];  /* last element may repeat */
    395			if (delay_idx < ARRAY_SIZE(delay) - 1)
    396				delay_idx++;
    397			msleep(ms);
    398		} else
    399			mdelay(ms);
    400
    401		v = t4_read_reg(adap, ctl_reg);
    402		if (MBOWNER_G(v) == MBOX_OWNER_DRV) {
    403			if (!(v & MBMSGVALID_F)) {
    404				t4_write_reg(adap, ctl_reg, 0);
    405				continue;
    406			}
    407
    408			get_mbox_rpl(adap, cmd_rpl, MBOX_LEN / 8, data_reg);
    409			res = be64_to_cpu(cmd_rpl[0]);
    410
    411			if (FW_CMD_OP_G(res >> 32) == FW_DEBUG_CMD) {
    412				fw_asrt(adap, data_reg);
    413				res = FW_CMD_RETVAL_V(EIO);
    414			} else if (rpl) {
    415				memcpy(rpl, cmd_rpl, size);
    416			}
    417
    418			t4_write_reg(adap, ctl_reg, 0);
    419
    420			execute = i + ms;
    421			t4_record_mbox(adap, cmd_rpl,
    422				       MBOX_LEN, access, execute);
    423			spin_lock_bh(&adap->mbox_lock);
    424			list_del(&entry.list);
    425			spin_unlock_bh(&adap->mbox_lock);
    426			return -FW_CMD_RETVAL_G((int)res);
    427		}
    428	}
    429
    430	ret = (pcie_fw & PCIE_FW_ERR_F) ? -ENXIO : -ETIMEDOUT;
    431	t4_record_mbox(adap, cmd, size, access, ret);
    432	dev_err(adap->pdev_dev, "command %#x in mailbox %d timed out\n",
    433		*(const u8 *)cmd, mbox);
    434	t4_report_fw_error(adap);
    435	spin_lock_bh(&adap->mbox_lock);
    436	list_del(&entry.list);
    437	spin_unlock_bh(&adap->mbox_lock);
    438	t4_fatal_err(adap);
    439	return ret;
    440}
    441
    442int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
    443		    void *rpl, bool sleep_ok)
    444{
    445	return t4_wr_mbox_meat_timeout(adap, mbox, cmd, size, rpl, sleep_ok,
    446				       FW_CMD_MAX_TIMEOUT);
    447}
    448
    449static int t4_edc_err_read(struct adapter *adap, int idx)
    450{
    451	u32 edc_ecc_err_addr_reg;
    452	u32 rdata_reg;
    453
    454	if (is_t4(adap->params.chip)) {
    455		CH_WARN(adap, "%s: T4 NOT supported.\n", __func__);
    456		return 0;
    457	}
    458	if (idx != 0 && idx != 1) {
    459		CH_WARN(adap, "%s: idx %d NOT supported.\n", __func__, idx);
    460		return 0;
    461	}
    462
    463	edc_ecc_err_addr_reg = EDC_T5_REG(EDC_H_ECC_ERR_ADDR_A, idx);
    464	rdata_reg = EDC_T5_REG(EDC_H_BIST_STATUS_RDATA_A, idx);
    465
    466	CH_WARN(adap,
    467		"edc%d err addr 0x%x: 0x%x.\n",
    468		idx, edc_ecc_err_addr_reg,
    469		t4_read_reg(adap, edc_ecc_err_addr_reg));
    470	CH_WARN(adap,
    471		"bist: 0x%x, status %llx %llx %llx %llx %llx %llx %llx %llx %llx.\n",
    472		rdata_reg,
    473		(unsigned long long)t4_read_reg64(adap, rdata_reg),
    474		(unsigned long long)t4_read_reg64(adap, rdata_reg + 8),
    475		(unsigned long long)t4_read_reg64(adap, rdata_reg + 16),
    476		(unsigned long long)t4_read_reg64(adap, rdata_reg + 24),
    477		(unsigned long long)t4_read_reg64(adap, rdata_reg + 32),
    478		(unsigned long long)t4_read_reg64(adap, rdata_reg + 40),
    479		(unsigned long long)t4_read_reg64(adap, rdata_reg + 48),
    480		(unsigned long long)t4_read_reg64(adap, rdata_reg + 56),
    481		(unsigned long long)t4_read_reg64(adap, rdata_reg + 64));
    482
    483	return 0;
    484}
    485
    486/**
    487 * t4_memory_rw_init - Get memory window relative offset, base, and size.
    488 * @adap: the adapter
    489 * @win: PCI-E Memory Window to use
    490 * @mtype: memory type: MEM_EDC0, MEM_EDC1, MEM_HMA or MEM_MC
    491 * @mem_off: memory relative offset with respect to @mtype.
    492 * @mem_base: configured memory base address.
    493 * @mem_aperture: configured memory window aperture.
    494 *
    495 * Get the configured memory window's relative offset, base, and size.
    496 */
    497int t4_memory_rw_init(struct adapter *adap, int win, int mtype, u32 *mem_off,
    498		      u32 *mem_base, u32 *mem_aperture)
    499{
    500	u32 edc_size, mc_size, mem_reg;
    501
    502	/* Offset into the region of memory which is being accessed
    503	 * MEM_EDC0 = 0
    504	 * MEM_EDC1 = 1
    505	 * MEM_MC   = 2 -- MEM_MC for chips with only 1 memory controller
    506	 * MEM_MC1  = 3 -- for chips with 2 memory controllers (e.g. T5)
    507	 * MEM_HMA  = 4
    508	 */
    509	edc_size  = EDRAM0_SIZE_G(t4_read_reg(adap, MA_EDRAM0_BAR_A));
    510	if (mtype == MEM_HMA) {
    511		*mem_off = 2 * (edc_size * 1024 * 1024);
    512	} else if (mtype != MEM_MC1) {
    513		*mem_off = (mtype * (edc_size * 1024 * 1024));
    514	} else {
    515		mc_size = EXT_MEM0_SIZE_G(t4_read_reg(adap,
    516						      MA_EXT_MEMORY0_BAR_A));
    517		*mem_off = (MEM_MC0 * edc_size + mc_size) * 1024 * 1024;
    518	}
    519
    520	/* Each PCI-E Memory Window is programmed with a window size -- or
    521	 * "aperture" -- which controls the granularity of its mapping onto
    522	 * adapter memory.  We need to grab that aperture in order to know
    523	 * how to use the specified window.  The window is also programmed
    524	 * with the base address of the Memory Window in BAR0's address
    525	 * space.  For T4 this is an absolute PCI-E Bus Address.  For T5
    526	 * the address is relative to BAR0.
    527	 */
    528	mem_reg = t4_read_reg(adap,
    529			      PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A,
    530						  win));
    531	/* a dead adapter will return 0xffffffff for PIO reads */
    532	if (mem_reg == 0xffffffff)
    533		return -ENXIO;
    534
    535	*mem_aperture = 1 << (WINDOW_G(mem_reg) + WINDOW_SHIFT_X);
    536	*mem_base = PCIEOFST_G(mem_reg) << PCIEOFST_SHIFT_X;
    537	if (is_t4(adap->params.chip))
    538		*mem_base -= adap->t4_bar0;
    539
    540	return 0;
    541}
    542
    543/**
    544 * t4_memory_update_win - Move memory window to specified address.
    545 * @adap: the adapter
    546 * @win: PCI-E Memory Window to use
    547 * @addr: location to move.
    548 *
    549 * Move memory window to specified address.
    550 */
    551void t4_memory_update_win(struct adapter *adap, int win, u32 addr)
    552{
    553	t4_write_reg(adap,
    554		     PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A, win),
    555		     addr);
    556	/* Read it back to ensure that changes propagate before we
    557	 * attempt to use the new value.
    558	 */
    559	t4_read_reg(adap,
    560		    PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A, win));
    561}
    562
    563/**
    564 * t4_memory_rw_residual - Read/Write residual data.
    565 * @adap: the adapter
    566 * @off: relative offset within residual to start read/write.
    567 * @addr: address within indicated memory type.
    568 * @buf: host memory buffer
    569 * @dir: direction of transfer T4_MEMORY_READ (1) or T4_MEMORY_WRITE (0)
    570 *
    571 * Read/Write residual data less than 32-bits.
    572 */
    573void t4_memory_rw_residual(struct adapter *adap, u32 off, u32 addr, u8 *buf,
    574			   int dir)
    575{
    576	union {
    577		u32 word;
    578		char byte[4];
    579	} last;
    580	unsigned char *bp;
    581	int i;
    582
    583	if (dir == T4_MEMORY_READ) {
    584		last.word = le32_to_cpu((__force __le32)
    585					t4_read_reg(adap, addr));
    586		for (bp = (unsigned char *)buf, i = off; i < 4; i++)
    587			bp[i] = last.byte[i];
    588	} else {
    589		last.word = *buf;
    590		for (i = off; i < 4; i++)
    591			last.byte[i] = 0;
    592		t4_write_reg(adap, addr,
    593			     (__force u32)cpu_to_le32(last.word));
    594	}
    595}
    596
    597/**
    598 *	t4_memory_rw - read/write EDC 0, EDC 1 or MC via PCIE memory window
    599 *	@adap: the adapter
    600 *	@win: PCI-E Memory Window to use
    601 *	@mtype: memory type: MEM_EDC0, MEM_EDC1 or MEM_MC
    602 *	@addr: address within indicated memory type
    603 *	@len: amount of memory to transfer
    604 *	@hbuf: host memory buffer
    605 *	@dir: direction of transfer T4_MEMORY_READ (1) or T4_MEMORY_WRITE (0)
    606 *
    607 *	Reads/writes an [almost] arbitrary memory region in the firmware: the
    608 *	firmware memory address and host buffer must be aligned on 32-bit
    609 *	boundaries; the length may be arbitrary.  The memory is transferred as
    610 *	a raw byte sequence from/to the firmware's memory.  If this memory
    611 *	contains data structures which contain multi-byte integers, it's the
    612 *	caller's responsibility to perform appropriate byte order conversions.
    613 */
    614int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr,
    615		 u32 len, void *hbuf, int dir)
    616{
    617	u32 pos, offset, resid, memoffset;
    618	u32 win_pf, mem_aperture, mem_base;
    619	u32 *buf;
    620	int ret;
    621
    622	/* Argument sanity checks ...
    623	 */
    624	if (addr & 0x3 || (uintptr_t)hbuf & 0x3)
    625		return -EINVAL;
    626	buf = (u32 *)hbuf;
    627
    628	/* It's convenient to be able to handle lengths which aren't a
    629	 * multiple of 32-bits because we often end up transferring files to
    630	 * the firmware.  So we'll handle that by normalizing the length here
    631	 * and then handling any residual transfer at the end.
    632	 */
    633	resid = len & 0x3;
    634	len -= resid;
    635
    636	ret = t4_memory_rw_init(adap, win, mtype, &memoffset, &mem_base,
    637				&mem_aperture);
    638	if (ret)
    639		return ret;
    640
    641	/* Determine the PCIE_MEM_ACCESS_OFFSET */
    642	addr = addr + memoffset;
    643
    644	win_pf = is_t4(adap->params.chip) ? 0 : PFNUM_V(adap->pf);
    645
    646	/* Calculate our initial PCI-E Memory Window Position and Offset into
    647	 * that Window.
    648	 */
    649	pos = addr & ~(mem_aperture - 1);
    650	offset = addr - pos;
    651
    652	/* Set up initial PCI-E Memory Window to cover the start of our
    653	 * transfer.
    654	 */
    655	t4_memory_update_win(adap, win, pos | win_pf);
    656
    657	/* Transfer data to/from the adapter as long as there's an integral
    658	 * number of 32-bit transfers to complete.
    659	 *
    660	 * A note on Endianness issues:
    661	 *
    662	 * The "register" reads and writes below from/to the PCI-E Memory
    663	 * Window invoke the standard adapter Big-Endian to PCI-E Link
    664	 * Little-Endian "swizzel."  As a result, if we have the following
    665	 * data in adapter memory:
    666	 *
    667	 *     Memory:  ... | b0 | b1 | b2 | b3 | ...
    668	 *     Address:      i+0  i+1  i+2  i+3
    669	 *
    670	 * Then a read of the adapter memory via the PCI-E Memory Window
    671	 * will yield:
    672	 *
    673	 *     x = readl(i)
    674	 *         31                  0
    675	 *         [ b3 | b2 | b1 | b0 ]
    676	 *
    677	 * If this value is stored into local memory on a Little-Endian system
    678	 * it will show up correctly in local memory as:
    679	 *
    680	 *     ( ..., b0, b1, b2, b3, ... )
    681	 *
    682	 * But on a Big-Endian system, the store will show up in memory
    683	 * incorrectly swizzled as:
    684	 *
    685	 *     ( ..., b3, b2, b1, b0, ... )
    686	 *
    687	 * So we need to account for this in the reads and writes to the
    688	 * PCI-E Memory Window below by undoing the register read/write
    689	 * swizzels.
    690	 */
    691	while (len > 0) {
    692		if (dir == T4_MEMORY_READ)
    693			*buf++ = le32_to_cpu((__force __le32)t4_read_reg(adap,
    694						mem_base + offset));
    695		else
    696			t4_write_reg(adap, mem_base + offset,
    697				     (__force u32)cpu_to_le32(*buf++));
    698		offset += sizeof(__be32);
    699		len -= sizeof(__be32);
    700
    701		/* If we've reached the end of our current window aperture,
    702		 * move the PCI-E Memory Window on to the next.  Note that
    703		 * doing this here after "len" may be 0 allows us to set up
    704		 * the PCI-E Memory Window for a possible final residual
    705		 * transfer below ...
    706		 */
    707		if (offset == mem_aperture) {
    708			pos += mem_aperture;
    709			offset = 0;
    710			t4_memory_update_win(adap, win, pos | win_pf);
    711		}
    712	}
    713
    714	/* If the original transfer had a length which wasn't a multiple of
    715	 * 32-bits, now's where we need to finish off the transfer of the
    716	 * residual amount.  The PCI-E Memory Window has already been moved
    717	 * above (if necessary) to cover this final transfer.
    718	 */
    719	if (resid)
    720		t4_memory_rw_residual(adap, resid, mem_base + offset,
    721				      (u8 *)buf, dir);
    722
    723	return 0;
    724}
    725
    726/* Return the specified PCI-E Configuration Space register from our Physical
    727 * Function.  We try first via a Firmware LDST Command since we prefer to let
    728 * the firmware own all of these registers, but if that fails we go for it
    729 * directly ourselves.
    730 */
    731u32 t4_read_pcie_cfg4(struct adapter *adap, int reg)
    732{
    733	u32 val, ldst_addrspace;
    734
    735	/* If fw_attach != 0, construct and send the Firmware LDST Command to
    736	 * retrieve the specified PCI-E Configuration Space register.
    737	 */
    738	struct fw_ldst_cmd ldst_cmd;
    739	int ret;
    740
    741	memset(&ldst_cmd, 0, sizeof(ldst_cmd));
    742	ldst_addrspace = FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_FUNC_PCIE);
    743	ldst_cmd.op_to_addrspace = cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) |
    744					       FW_CMD_REQUEST_F |
    745					       FW_CMD_READ_F |
    746					       ldst_addrspace);
    747	ldst_cmd.cycles_to_len16 = cpu_to_be32(FW_LEN16(ldst_cmd));
    748	ldst_cmd.u.pcie.select_naccess = FW_LDST_CMD_NACCESS_V(1);
    749	ldst_cmd.u.pcie.ctrl_to_fn =
    750		(FW_LDST_CMD_LC_F | FW_LDST_CMD_FN_V(adap->pf));
    751	ldst_cmd.u.pcie.r = reg;
    752
    753	/* If the LDST Command succeeds, return the result, otherwise
    754	 * fall through to reading it directly ourselves ...
    755	 */
    756	ret = t4_wr_mbox(adap, adap->mbox, &ldst_cmd, sizeof(ldst_cmd),
    757			 &ldst_cmd);
    758	if (ret == 0)
    759		val = be32_to_cpu(ldst_cmd.u.pcie.data[0]);
    760	else
    761		/* Read the desired Configuration Space register via the PCI-E
    762		 * Backdoor mechanism.
    763		 */
    764		t4_hw_pci_read_cfg4(adap, reg, &val);
    765	return val;
    766}
    767
    768/* Get the window based on base passed to it.
    769 * Window aperture is currently unhandled, but there is no use case for it
    770 * right now
    771 */
    772static u32 t4_get_window(struct adapter *adap, u32 pci_base, u64 pci_mask,
    773			 u32 memwin_base)
    774{
    775	u32 ret;
    776
    777	if (is_t4(adap->params.chip)) {
    778		u32 bar0;
    779
    780		/* Truncation intentional: we only read the bottom 32-bits of
    781		 * the 64-bit BAR0/BAR1 ...  We use the hardware backdoor
    782		 * mechanism to read BAR0 instead of using
    783		 * pci_resource_start() because we could be operating from
    784		 * within a Virtual Machine which is trapping our accesses to
    785		 * our Configuration Space and we need to set up the PCI-E
    786		 * Memory Window decoders with the actual addresses which will
    787		 * be coming across the PCI-E link.
    788		 */
    789		bar0 = t4_read_pcie_cfg4(adap, pci_base);
    790		bar0 &= pci_mask;
    791		adap->t4_bar0 = bar0;
    792
    793		ret = bar0 + memwin_base;
    794	} else {
    795		/* For T5, only relative offset inside the PCIe BAR is passed */
    796		ret = memwin_base;
    797	}
    798	return ret;
    799}
    800
    801/* Get the default utility window (win0) used by everyone */
    802u32 t4_get_util_window(struct adapter *adap)
    803{
    804	return t4_get_window(adap, PCI_BASE_ADDRESS_0,
    805			     PCI_BASE_ADDRESS_MEM_MASK, MEMWIN0_BASE);
    806}
    807
    808/* Set up memory window for accessing adapter memory ranges.  (Read
    809 * back MA register to ensure that changes propagate before we attempt
    810 * to use the new values.)
    811 */
    812void t4_setup_memwin(struct adapter *adap, u32 memwin_base, u32 window)
    813{
    814	t4_write_reg(adap,
    815		     PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, window),
    816		     memwin_base | BIR_V(0) |
    817		     WINDOW_V(ilog2(MEMWIN0_APERTURE) - WINDOW_SHIFT_X));
    818	t4_read_reg(adap,
    819		    PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, window));
    820}
    821
    822/**
    823 *	t4_get_regs_len - return the size of the chips register set
    824 *	@adapter: the adapter
    825 *
    826 *	Returns the size of the chip's BAR0 register space.
    827 */
    828unsigned int t4_get_regs_len(struct adapter *adapter)
    829{
    830	unsigned int chip_version = CHELSIO_CHIP_VERSION(adapter->params.chip);
    831
    832	switch (chip_version) {
    833	case CHELSIO_T4:
    834		return T4_REGMAP_SIZE;
    835
    836	case CHELSIO_T5:
    837	case CHELSIO_T6:
    838		return T5_REGMAP_SIZE;
    839	}
    840
    841	dev_err(adapter->pdev_dev,
    842		"Unsupported chip version %d\n", chip_version);
    843	return 0;
    844}
    845
    846/**
    847 *	t4_get_regs - read chip registers into provided buffer
    848 *	@adap: the adapter
    849 *	@buf: register buffer
    850 *	@buf_size: size (in bytes) of register buffer
    851 *
    852 *	If the provided register buffer isn't large enough for the chip's
    853 *	full register range, the register dump will be truncated to the
    854 *	register buffer's size.
    855 */
    856void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
    857{
    858	static const unsigned int t4_reg_ranges[] = {
    859		0x1008, 0x1108,
    860		0x1180, 0x1184,
    861		0x1190, 0x1194,
    862		0x11a0, 0x11a4,
    863		0x11b0, 0x11b4,
    864		0x11fc, 0x123c,
    865		0x1300, 0x173c,
    866		0x1800, 0x18fc,
    867		0x3000, 0x30d8,
    868		0x30e0, 0x30e4,
    869		0x30ec, 0x5910,
    870		0x5920, 0x5924,
    871		0x5960, 0x5960,
    872		0x5968, 0x5968,
    873		0x5970, 0x5970,
    874		0x5978, 0x5978,
    875		0x5980, 0x5980,
    876		0x5988, 0x5988,
    877		0x5990, 0x5990,
    878		0x5998, 0x5998,
    879		0x59a0, 0x59d4,
    880		0x5a00, 0x5ae0,
    881		0x5ae8, 0x5ae8,
    882		0x5af0, 0x5af0,
    883		0x5af8, 0x5af8,
    884		0x6000, 0x6098,
    885		0x6100, 0x6150,
    886		0x6200, 0x6208,
    887		0x6240, 0x6248,
    888		0x6280, 0x62b0,
    889		0x62c0, 0x6338,
    890		0x6370, 0x638c,
    891		0x6400, 0x643c,
    892		0x6500, 0x6524,
    893		0x6a00, 0x6a04,
    894		0x6a14, 0x6a38,
    895		0x6a60, 0x6a70,
    896		0x6a78, 0x6a78,
    897		0x6b00, 0x6b0c,
    898		0x6b1c, 0x6b84,
    899		0x6bf0, 0x6bf8,
    900		0x6c00, 0x6c0c,
    901		0x6c1c, 0x6c84,
    902		0x6cf0, 0x6cf8,
    903		0x6d00, 0x6d0c,
    904		0x6d1c, 0x6d84,
    905		0x6df0, 0x6df8,
    906		0x6e00, 0x6e0c,
    907		0x6e1c, 0x6e84,
    908		0x6ef0, 0x6ef8,
    909		0x6f00, 0x6f0c,
    910		0x6f1c, 0x6f84,
    911		0x6ff0, 0x6ff8,
    912		0x7000, 0x700c,
    913		0x701c, 0x7084,
    914		0x70f0, 0x70f8,
    915		0x7100, 0x710c,
    916		0x711c, 0x7184,
    917		0x71f0, 0x71f8,
    918		0x7200, 0x720c,
    919		0x721c, 0x7284,
    920		0x72f0, 0x72f8,
    921		0x7300, 0x730c,
    922		0x731c, 0x7384,
    923		0x73f0, 0x73f8,
    924		0x7400, 0x7450,
    925		0x7500, 0x7530,
    926		0x7600, 0x760c,
    927		0x7614, 0x761c,
    928		0x7680, 0x76cc,
    929		0x7700, 0x7798,
    930		0x77c0, 0x77fc,
    931		0x7900, 0x79fc,
    932		0x7b00, 0x7b58,
    933		0x7b60, 0x7b84,
    934		0x7b8c, 0x7c38,
    935		0x7d00, 0x7d38,
    936		0x7d40, 0x7d80,
    937		0x7d8c, 0x7ddc,
    938		0x7de4, 0x7e04,
    939		0x7e10, 0x7e1c,
    940		0x7e24, 0x7e38,
    941		0x7e40, 0x7e44,
    942		0x7e4c, 0x7e78,
    943		0x7e80, 0x7ea4,
    944		0x7eac, 0x7edc,
    945		0x7ee8, 0x7efc,
    946		0x8dc0, 0x8e04,
    947		0x8e10, 0x8e1c,
    948		0x8e30, 0x8e78,
    949		0x8ea0, 0x8eb8,
    950		0x8ec0, 0x8f6c,
    951		0x8fc0, 0x9008,
    952		0x9010, 0x9058,
    953		0x9060, 0x9060,
    954		0x9068, 0x9074,
    955		0x90fc, 0x90fc,
    956		0x9400, 0x9408,
    957		0x9410, 0x9458,
    958		0x9600, 0x9600,
    959		0x9608, 0x9638,
    960		0x9640, 0x96bc,
    961		0x9800, 0x9808,
    962		0x9820, 0x983c,
    963		0x9850, 0x9864,
    964		0x9c00, 0x9c6c,
    965		0x9c80, 0x9cec,
    966		0x9d00, 0x9d6c,
    967		0x9d80, 0x9dec,
    968		0x9e00, 0x9e6c,
    969		0x9e80, 0x9eec,
    970		0x9f00, 0x9f6c,
    971		0x9f80, 0x9fec,
    972		0xd004, 0xd004,
    973		0xd010, 0xd03c,
    974		0xdfc0, 0xdfe0,
    975		0xe000, 0xea7c,
    976		0xf000, 0x11110,
    977		0x11118, 0x11190,
    978		0x19040, 0x1906c,
    979		0x19078, 0x19080,
    980		0x1908c, 0x190e4,
    981		0x190f0, 0x190f8,
    982		0x19100, 0x19110,
    983		0x19120, 0x19124,
    984		0x19150, 0x19194,
    985		0x1919c, 0x191b0,
    986		0x191d0, 0x191e8,
    987		0x19238, 0x1924c,
    988		0x193f8, 0x1943c,
    989		0x1944c, 0x19474,
    990		0x19490, 0x194e0,
    991		0x194f0, 0x194f8,
    992		0x19800, 0x19c08,
    993		0x19c10, 0x19c90,
    994		0x19ca0, 0x19ce4,
    995		0x19cf0, 0x19d40,
    996		0x19d50, 0x19d94,
    997		0x19da0, 0x19de8,
    998		0x19df0, 0x19e40,
    999		0x19e50, 0x19e90,
   1000		0x19ea0, 0x19f4c,
   1001		0x1a000, 0x1a004,
   1002		0x1a010, 0x1a06c,
   1003		0x1a0b0, 0x1a0e4,
   1004		0x1a0ec, 0x1a0f4,
   1005		0x1a100, 0x1a108,
   1006		0x1a114, 0x1a120,
   1007		0x1a128, 0x1a130,
   1008		0x1a138, 0x1a138,
   1009		0x1a190, 0x1a1c4,
   1010		0x1a1fc, 0x1a1fc,
   1011		0x1e040, 0x1e04c,
   1012		0x1e284, 0x1e28c,
   1013		0x1e2c0, 0x1e2c0,
   1014		0x1e2e0, 0x1e2e0,
   1015		0x1e300, 0x1e384,
   1016		0x1e3c0, 0x1e3c8,
   1017		0x1e440, 0x1e44c,
   1018		0x1e684, 0x1e68c,
   1019		0x1e6c0, 0x1e6c0,
   1020		0x1e6e0, 0x1e6e0,
   1021		0x1e700, 0x1e784,
   1022		0x1e7c0, 0x1e7c8,
   1023		0x1e840, 0x1e84c,
   1024		0x1ea84, 0x1ea8c,
   1025		0x1eac0, 0x1eac0,
   1026		0x1eae0, 0x1eae0,
   1027		0x1eb00, 0x1eb84,
   1028		0x1ebc0, 0x1ebc8,
   1029		0x1ec40, 0x1ec4c,
   1030		0x1ee84, 0x1ee8c,
   1031		0x1eec0, 0x1eec0,
   1032		0x1eee0, 0x1eee0,
   1033		0x1ef00, 0x1ef84,
   1034		0x1efc0, 0x1efc8,
   1035		0x1f040, 0x1f04c,
   1036		0x1f284, 0x1f28c,
   1037		0x1f2c0, 0x1f2c0,
   1038		0x1f2e0, 0x1f2e0,
   1039		0x1f300, 0x1f384,
   1040		0x1f3c0, 0x1f3c8,
   1041		0x1f440, 0x1f44c,
   1042		0x1f684, 0x1f68c,
   1043		0x1f6c0, 0x1f6c0,
   1044		0x1f6e0, 0x1f6e0,
   1045		0x1f700, 0x1f784,
   1046		0x1f7c0, 0x1f7c8,
   1047		0x1f840, 0x1f84c,
   1048		0x1fa84, 0x1fa8c,
   1049		0x1fac0, 0x1fac0,
   1050		0x1fae0, 0x1fae0,
   1051		0x1fb00, 0x1fb84,
   1052		0x1fbc0, 0x1fbc8,
   1053		0x1fc40, 0x1fc4c,
   1054		0x1fe84, 0x1fe8c,
   1055		0x1fec0, 0x1fec0,
   1056		0x1fee0, 0x1fee0,
   1057		0x1ff00, 0x1ff84,
   1058		0x1ffc0, 0x1ffc8,
   1059		0x20000, 0x2002c,
   1060		0x20100, 0x2013c,
   1061		0x20190, 0x201a0,
   1062		0x201a8, 0x201b8,
   1063		0x201c4, 0x201c8,
   1064		0x20200, 0x20318,
   1065		0x20400, 0x204b4,
   1066		0x204c0, 0x20528,
   1067		0x20540, 0x20614,
   1068		0x21000, 0x21040,
   1069		0x2104c, 0x21060,
   1070		0x210c0, 0x210ec,
   1071		0x21200, 0x21268,
   1072		0x21270, 0x21284,
   1073		0x212fc, 0x21388,
   1074		0x21400, 0x21404,
   1075		0x21500, 0x21500,
   1076		0x21510, 0x21518,
   1077		0x2152c, 0x21530,
   1078		0x2153c, 0x2153c,
   1079		0x21550, 0x21554,
   1080		0x21600, 0x21600,
   1081		0x21608, 0x2161c,
   1082		0x21624, 0x21628,
   1083		0x21630, 0x21634,
   1084		0x2163c, 0x2163c,
   1085		0x21700, 0x2171c,
   1086		0x21780, 0x2178c,
   1087		0x21800, 0x21818,
   1088		0x21820, 0x21828,
   1089		0x21830, 0x21848,
   1090		0x21850, 0x21854,
   1091		0x21860, 0x21868,
   1092		0x21870, 0x21870,
   1093		0x21878, 0x21898,
   1094		0x218a0, 0x218a8,
   1095		0x218b0, 0x218c8,
   1096		0x218d0, 0x218d4,
   1097		0x218e0, 0x218e8,
   1098		0x218f0, 0x218f0,
   1099		0x218f8, 0x21a18,
   1100		0x21a20, 0x21a28,
   1101		0x21a30, 0x21a48,
   1102		0x21a50, 0x21a54,
   1103		0x21a60, 0x21a68,
   1104		0x21a70, 0x21a70,
   1105		0x21a78, 0x21a98,
   1106		0x21aa0, 0x21aa8,
   1107		0x21ab0, 0x21ac8,
   1108		0x21ad0, 0x21ad4,
   1109		0x21ae0, 0x21ae8,
   1110		0x21af0, 0x21af0,
   1111		0x21af8, 0x21c18,
   1112		0x21c20, 0x21c20,
   1113		0x21c28, 0x21c30,
   1114		0x21c38, 0x21c38,
   1115		0x21c80, 0x21c98,
   1116		0x21ca0, 0x21ca8,
   1117		0x21cb0, 0x21cc8,
   1118		0x21cd0, 0x21cd4,
   1119		0x21ce0, 0x21ce8,
   1120		0x21cf0, 0x21cf0,
   1121		0x21cf8, 0x21d7c,
   1122		0x21e00, 0x21e04,
   1123		0x22000, 0x2202c,
   1124		0x22100, 0x2213c,
   1125		0x22190, 0x221a0,
   1126		0x221a8, 0x221b8,
   1127		0x221c4, 0x221c8,
   1128		0x22200, 0x22318,
   1129		0x22400, 0x224b4,
   1130		0x224c0, 0x22528,
   1131		0x22540, 0x22614,
   1132		0x23000, 0x23040,
   1133		0x2304c, 0x23060,
   1134		0x230c0, 0x230ec,
   1135		0x23200, 0x23268,
   1136		0x23270, 0x23284,
   1137		0x232fc, 0x23388,
   1138		0x23400, 0x23404,
   1139		0x23500, 0x23500,
   1140		0x23510, 0x23518,
   1141		0x2352c, 0x23530,
   1142		0x2353c, 0x2353c,
   1143		0x23550, 0x23554,
   1144		0x23600, 0x23600,
   1145		0x23608, 0x2361c,
   1146		0x23624, 0x23628,
   1147		0x23630, 0x23634,
   1148		0x2363c, 0x2363c,
   1149		0x23700, 0x2371c,
   1150		0x23780, 0x2378c,
   1151		0x23800, 0x23818,
   1152		0x23820, 0x23828,
   1153		0x23830, 0x23848,
   1154		0x23850, 0x23854,
   1155		0x23860, 0x23868,
   1156		0x23870, 0x23870,
   1157		0x23878, 0x23898,
   1158		0x238a0, 0x238a8,
   1159		0x238b0, 0x238c8,
   1160		0x238d0, 0x238d4,
   1161		0x238e0, 0x238e8,
   1162		0x238f0, 0x238f0,
   1163		0x238f8, 0x23a18,
   1164		0x23a20, 0x23a28,
   1165		0x23a30, 0x23a48,
   1166		0x23a50, 0x23a54,
   1167		0x23a60, 0x23a68,
   1168		0x23a70, 0x23a70,
   1169		0x23a78, 0x23a98,
   1170		0x23aa0, 0x23aa8,
   1171		0x23ab0, 0x23ac8,
   1172		0x23ad0, 0x23ad4,
   1173		0x23ae0, 0x23ae8,
   1174		0x23af0, 0x23af0,
   1175		0x23af8, 0x23c18,
   1176		0x23c20, 0x23c20,
   1177		0x23c28, 0x23c30,
   1178		0x23c38, 0x23c38,
   1179		0x23c80, 0x23c98,
   1180		0x23ca0, 0x23ca8,
   1181		0x23cb0, 0x23cc8,
   1182		0x23cd0, 0x23cd4,
   1183		0x23ce0, 0x23ce8,
   1184		0x23cf0, 0x23cf0,
   1185		0x23cf8, 0x23d7c,
   1186		0x23e00, 0x23e04,
   1187		0x24000, 0x2402c,
   1188		0x24100, 0x2413c,
   1189		0x24190, 0x241a0,
   1190		0x241a8, 0x241b8,
   1191		0x241c4, 0x241c8,
   1192		0x24200, 0x24318,
   1193		0x24400, 0x244b4,
   1194		0x244c0, 0x24528,
   1195		0x24540, 0x24614,
   1196		0x25000, 0x25040,
   1197		0x2504c, 0x25060,
   1198		0x250c0, 0x250ec,
   1199		0x25200, 0x25268,
   1200		0x25270, 0x25284,
   1201		0x252fc, 0x25388,
   1202		0x25400, 0x25404,
   1203		0x25500, 0x25500,
   1204		0x25510, 0x25518,
   1205		0x2552c, 0x25530,
   1206		0x2553c, 0x2553c,
   1207		0x25550, 0x25554,
   1208		0x25600, 0x25600,
   1209		0x25608, 0x2561c,
   1210		0x25624, 0x25628,
   1211		0x25630, 0x25634,
   1212		0x2563c, 0x2563c,
   1213		0x25700, 0x2571c,
   1214		0x25780, 0x2578c,
   1215		0x25800, 0x25818,
   1216		0x25820, 0x25828,
   1217		0x25830, 0x25848,
   1218		0x25850, 0x25854,
   1219		0x25860, 0x25868,
   1220		0x25870, 0x25870,
   1221		0x25878, 0x25898,
   1222		0x258a0, 0x258a8,
   1223		0x258b0, 0x258c8,
   1224		0x258d0, 0x258d4,
   1225		0x258e0, 0x258e8,
   1226		0x258f0, 0x258f0,
   1227		0x258f8, 0x25a18,
   1228		0x25a20, 0x25a28,
   1229		0x25a30, 0x25a48,
   1230		0x25a50, 0x25a54,
   1231		0x25a60, 0x25a68,
   1232		0x25a70, 0x25a70,
   1233		0x25a78, 0x25a98,
   1234		0x25aa0, 0x25aa8,
   1235		0x25ab0, 0x25ac8,
   1236		0x25ad0, 0x25ad4,
   1237		0x25ae0, 0x25ae8,
   1238		0x25af0, 0x25af0,
   1239		0x25af8, 0x25c18,
   1240		0x25c20, 0x25c20,
   1241		0x25c28, 0x25c30,
   1242		0x25c38, 0x25c38,
   1243		0x25c80, 0x25c98,
   1244		0x25ca0, 0x25ca8,
   1245		0x25cb0, 0x25cc8,
   1246		0x25cd0, 0x25cd4,
   1247		0x25ce0, 0x25ce8,
   1248		0x25cf0, 0x25cf0,
   1249		0x25cf8, 0x25d7c,
   1250		0x25e00, 0x25e04,
   1251		0x26000, 0x2602c,
   1252		0x26100, 0x2613c,
   1253		0x26190, 0x261a0,
   1254		0x261a8, 0x261b8,
   1255		0x261c4, 0x261c8,
   1256		0x26200, 0x26318,
   1257		0x26400, 0x264b4,
   1258		0x264c0, 0x26528,
   1259		0x26540, 0x26614,
   1260		0x27000, 0x27040,
   1261		0x2704c, 0x27060,
   1262		0x270c0, 0x270ec,
   1263		0x27200, 0x27268,
   1264		0x27270, 0x27284,
   1265		0x272fc, 0x27388,
   1266		0x27400, 0x27404,
   1267		0x27500, 0x27500,
   1268		0x27510, 0x27518,
   1269		0x2752c, 0x27530,
   1270		0x2753c, 0x2753c,
   1271		0x27550, 0x27554,
   1272		0x27600, 0x27600,
   1273		0x27608, 0x2761c,
   1274		0x27624, 0x27628,
   1275		0x27630, 0x27634,
   1276		0x2763c, 0x2763c,
   1277		0x27700, 0x2771c,
   1278		0x27780, 0x2778c,
   1279		0x27800, 0x27818,
   1280		0x27820, 0x27828,
   1281		0x27830, 0x27848,
   1282		0x27850, 0x27854,
   1283		0x27860, 0x27868,
   1284		0x27870, 0x27870,
   1285		0x27878, 0x27898,
   1286		0x278a0, 0x278a8,
   1287		0x278b0, 0x278c8,
   1288		0x278d0, 0x278d4,
   1289		0x278e0, 0x278e8,
   1290		0x278f0, 0x278f0,
   1291		0x278f8, 0x27a18,
   1292		0x27a20, 0x27a28,
   1293		0x27a30, 0x27a48,
   1294		0x27a50, 0x27a54,
   1295		0x27a60, 0x27a68,
   1296		0x27a70, 0x27a70,
   1297		0x27a78, 0x27a98,
   1298		0x27aa0, 0x27aa8,
   1299		0x27ab0, 0x27ac8,
   1300		0x27ad0, 0x27ad4,
   1301		0x27ae0, 0x27ae8,
   1302		0x27af0, 0x27af0,
   1303		0x27af8, 0x27c18,
   1304		0x27c20, 0x27c20,
   1305		0x27c28, 0x27c30,
   1306		0x27c38, 0x27c38,
   1307		0x27c80, 0x27c98,
   1308		0x27ca0, 0x27ca8,
   1309		0x27cb0, 0x27cc8,
   1310		0x27cd0, 0x27cd4,
   1311		0x27ce0, 0x27ce8,
   1312		0x27cf0, 0x27cf0,
   1313		0x27cf8, 0x27d7c,
   1314		0x27e00, 0x27e04,
   1315	};
   1316
   1317	static const unsigned int t5_reg_ranges[] = {
   1318		0x1008, 0x10c0,
   1319		0x10cc, 0x10f8,
   1320		0x1100, 0x1100,
   1321		0x110c, 0x1148,
   1322		0x1180, 0x1184,
   1323		0x1190, 0x1194,
   1324		0x11a0, 0x11a4,
   1325		0x11b0, 0x11b4,
   1326		0x11fc, 0x123c,
   1327		0x1280, 0x173c,
   1328		0x1800, 0x18fc,
   1329		0x3000, 0x3028,
   1330		0x3060, 0x30b0,
   1331		0x30b8, 0x30d8,
   1332		0x30e0, 0x30fc,
   1333		0x3140, 0x357c,
   1334		0x35a8, 0x35cc,
   1335		0x35ec, 0x35ec,
   1336		0x3600, 0x5624,
   1337		0x56cc, 0x56ec,
   1338		0x56f4, 0x5720,
   1339		0x5728, 0x575c,
   1340		0x580c, 0x5814,
   1341		0x5890, 0x589c,
   1342		0x58a4, 0x58ac,
   1343		0x58b8, 0x58bc,
   1344		0x5940, 0x59c8,
   1345		0x59d0, 0x59dc,
   1346		0x59fc, 0x5a18,
   1347		0x5a60, 0x5a70,
   1348		0x5a80, 0x5a9c,
   1349		0x5b94, 0x5bfc,
   1350		0x6000, 0x6020,
   1351		0x6028, 0x6040,
   1352		0x6058, 0x609c,
   1353		0x60a8, 0x614c,
   1354		0x7700, 0x7798,
   1355		0x77c0, 0x78fc,
   1356		0x7b00, 0x7b58,
   1357		0x7b60, 0x7b84,
   1358		0x7b8c, 0x7c54,
   1359		0x7d00, 0x7d38,
   1360		0x7d40, 0x7d80,
   1361		0x7d8c, 0x7ddc,
   1362		0x7de4, 0x7e04,
   1363		0x7e10, 0x7e1c,
   1364		0x7e24, 0x7e38,
   1365		0x7e40, 0x7e44,
   1366		0x7e4c, 0x7e78,
   1367		0x7e80, 0x7edc,
   1368		0x7ee8, 0x7efc,
   1369		0x8dc0, 0x8de0,
   1370		0x8df8, 0x8e04,
   1371		0x8e10, 0x8e84,
   1372		0x8ea0, 0x8f84,
   1373		0x8fc0, 0x9058,
   1374		0x9060, 0x9060,
   1375		0x9068, 0x90f8,
   1376		0x9400, 0x9408,
   1377		0x9410, 0x9470,
   1378		0x9600, 0x9600,
   1379		0x9608, 0x9638,
   1380		0x9640, 0x96f4,
   1381		0x9800, 0x9808,
   1382		0x9810, 0x9864,
   1383		0x9c00, 0x9c6c,
   1384		0x9c80, 0x9cec,
   1385		0x9d00, 0x9d6c,
   1386		0x9d80, 0x9dec,
   1387		0x9e00, 0x9e6c,
   1388		0x9e80, 0x9eec,
   1389		0x9f00, 0x9f6c,
   1390		0x9f80, 0xa020,
   1391		0xd000, 0xd004,
   1392		0xd010, 0xd03c,
   1393		0xdfc0, 0xdfe0,
   1394		0xe000, 0x1106c,
   1395		0x11074, 0x11088,
   1396		0x1109c, 0x1117c,
   1397		0x11190, 0x11204,
   1398		0x19040, 0x1906c,
   1399		0x19078, 0x19080,
   1400		0x1908c, 0x190e8,
   1401		0x190f0, 0x190f8,
   1402		0x19100, 0x19110,
   1403		0x19120, 0x19124,
   1404		0x19150, 0x19194,
   1405		0x1919c, 0x191b0,
   1406		0x191d0, 0x191e8,
   1407		0x19238, 0x19290,
   1408		0x193f8, 0x19428,
   1409		0x19430, 0x19444,
   1410		0x1944c, 0x1946c,
   1411		0x19474, 0x19474,
   1412		0x19490, 0x194cc,
   1413		0x194f0, 0x194f8,
   1414		0x19c00, 0x19c08,
   1415		0x19c10, 0x19c60,
   1416		0x19c94, 0x19ce4,
   1417		0x19cf0, 0x19d40,
   1418		0x19d50, 0x19d94,
   1419		0x19da0, 0x19de8,
   1420		0x19df0, 0x19e10,
   1421		0x19e50, 0x19e90,
   1422		0x19ea0, 0x19f24,
   1423		0x19f34, 0x19f34,
   1424		0x19f40, 0x19f50,
   1425		0x19f90, 0x19fb4,
   1426		0x19fc4, 0x19fe4,
   1427		0x1a000, 0x1a004,
   1428		0x1a010, 0x1a06c,
   1429		0x1a0b0, 0x1a0e4,
   1430		0x1a0ec, 0x1a0f8,
   1431		0x1a100, 0x1a108,
   1432		0x1a114, 0x1a130,
   1433		0x1a138, 0x1a1c4,
   1434		0x1a1fc, 0x1a1fc,
   1435		0x1e008, 0x1e00c,
   1436		0x1e040, 0x1e044,
   1437		0x1e04c, 0x1e04c,
   1438		0x1e284, 0x1e290,
   1439		0x1e2c0, 0x1e2c0,
   1440		0x1e2e0, 0x1e2e0,
   1441		0x1e300, 0x1e384,
   1442		0x1e3c0, 0x1e3c8,
   1443		0x1e408, 0x1e40c,
   1444		0x1e440, 0x1e444,
   1445		0x1e44c, 0x1e44c,
   1446		0x1e684, 0x1e690,
   1447		0x1e6c0, 0x1e6c0,
   1448		0x1e6e0, 0x1e6e0,
   1449		0x1e700, 0x1e784,
   1450		0x1e7c0, 0x1e7c8,
   1451		0x1e808, 0x1e80c,
   1452		0x1e840, 0x1e844,
   1453		0x1e84c, 0x1e84c,
   1454		0x1ea84, 0x1ea90,
   1455		0x1eac0, 0x1eac0,
   1456		0x1eae0, 0x1eae0,
   1457		0x1eb00, 0x1eb84,
   1458		0x1ebc0, 0x1ebc8,
   1459		0x1ec08, 0x1ec0c,
   1460		0x1ec40, 0x1ec44,
   1461		0x1ec4c, 0x1ec4c,
   1462		0x1ee84, 0x1ee90,
   1463		0x1eec0, 0x1eec0,
   1464		0x1eee0, 0x1eee0,
   1465		0x1ef00, 0x1ef84,
   1466		0x1efc0, 0x1efc8,
   1467		0x1f008, 0x1f00c,
   1468		0x1f040, 0x1f044,
   1469		0x1f04c, 0x1f04c,
   1470		0x1f284, 0x1f290,
   1471		0x1f2c0, 0x1f2c0,
   1472		0x1f2e0, 0x1f2e0,
   1473		0x1f300, 0x1f384,
   1474		0x1f3c0, 0x1f3c8,
   1475		0x1f408, 0x1f40c,
   1476		0x1f440, 0x1f444,
   1477		0x1f44c, 0x1f44c,
   1478		0x1f684, 0x1f690,
   1479		0x1f6c0, 0x1f6c0,
   1480		0x1f6e0, 0x1f6e0,
   1481		0x1f700, 0x1f784,
   1482		0x1f7c0, 0x1f7c8,
   1483		0x1f808, 0x1f80c,
   1484		0x1f840, 0x1f844,
   1485		0x1f84c, 0x1f84c,
   1486		0x1fa84, 0x1fa90,
   1487		0x1fac0, 0x1fac0,
   1488		0x1fae0, 0x1fae0,
   1489		0x1fb00, 0x1fb84,
   1490		0x1fbc0, 0x1fbc8,
   1491		0x1fc08, 0x1fc0c,
   1492		0x1fc40, 0x1fc44,
   1493		0x1fc4c, 0x1fc4c,
   1494		0x1fe84, 0x1fe90,
   1495		0x1fec0, 0x1fec0,
   1496		0x1fee0, 0x1fee0,
   1497		0x1ff00, 0x1ff84,
   1498		0x1ffc0, 0x1ffc8,
   1499		0x30000, 0x30030,
   1500		0x30100, 0x30144,
   1501		0x30190, 0x301a0,
   1502		0x301a8, 0x301b8,
   1503		0x301c4, 0x301c8,
   1504		0x301d0, 0x301d0,
   1505		0x30200, 0x30318,
   1506		0x30400, 0x304b4,
   1507		0x304c0, 0x3052c,
   1508		0x30540, 0x3061c,
   1509		0x30800, 0x30828,
   1510		0x30834, 0x30834,
   1511		0x308c0, 0x30908,
   1512		0x30910, 0x309ac,
   1513		0x30a00, 0x30a14,
   1514		0x30a1c, 0x30a2c,
   1515		0x30a44, 0x30a50,
   1516		0x30a74, 0x30a74,
   1517		0x30a7c, 0x30afc,
   1518		0x30b08, 0x30c24,
   1519		0x30d00, 0x30d00,
   1520		0x30d08, 0x30d14,
   1521		0x30d1c, 0x30d20,
   1522		0x30d3c, 0x30d3c,
   1523		0x30d48, 0x30d50,
   1524		0x31200, 0x3120c,
   1525		0x31220, 0x31220,
   1526		0x31240, 0x31240,
   1527		0x31600, 0x3160c,
   1528		0x31a00, 0x31a1c,
   1529		0x31e00, 0x31e20,
   1530		0x31e38, 0x31e3c,
   1531		0x31e80, 0x31e80,
   1532		0x31e88, 0x31ea8,
   1533		0x31eb0, 0x31eb4,
   1534		0x31ec8, 0x31ed4,
   1535		0x31fb8, 0x32004,
   1536		0x32200, 0x32200,
   1537		0x32208, 0x32240,
   1538		0x32248, 0x32280,
   1539		0x32288, 0x322c0,
   1540		0x322c8, 0x322fc,
   1541		0x32600, 0x32630,
   1542		0x32a00, 0x32abc,
   1543		0x32b00, 0x32b10,
   1544		0x32b20, 0x32b30,
   1545		0x32b40, 0x32b50,
   1546		0x32b60, 0x32b70,
   1547		0x33000, 0x33028,
   1548		0x33030, 0x33048,
   1549		0x33060, 0x33068,
   1550		0x33070, 0x3309c,
   1551		0x330f0, 0x33128,
   1552		0x33130, 0x33148,
   1553		0x33160, 0x33168,
   1554		0x33170, 0x3319c,
   1555		0x331f0, 0x33238,
   1556		0x33240, 0x33240,
   1557		0x33248, 0x33250,
   1558		0x3325c, 0x33264,
   1559		0x33270, 0x332b8,
   1560		0x332c0, 0x332e4,
   1561		0x332f8, 0x33338,
   1562		0x33340, 0x33340,
   1563		0x33348, 0x33350,
   1564		0x3335c, 0x33364,
   1565		0x33370, 0x333b8,
   1566		0x333c0, 0x333e4,
   1567		0x333f8, 0x33428,
   1568		0x33430, 0x33448,
   1569		0x33460, 0x33468,
   1570		0x33470, 0x3349c,
   1571		0x334f0, 0x33528,
   1572		0x33530, 0x33548,
   1573		0x33560, 0x33568,
   1574		0x33570, 0x3359c,
   1575		0x335f0, 0x33638,
   1576		0x33640, 0x33640,
   1577		0x33648, 0x33650,
   1578		0x3365c, 0x33664,
   1579		0x33670, 0x336b8,
   1580		0x336c0, 0x336e4,
   1581		0x336f8, 0x33738,
   1582		0x33740, 0x33740,
   1583		0x33748, 0x33750,
   1584		0x3375c, 0x33764,
   1585		0x33770, 0x337b8,
   1586		0x337c0, 0x337e4,
   1587		0x337f8, 0x337fc,
   1588		0x33814, 0x33814,
   1589		0x3382c, 0x3382c,
   1590		0x33880, 0x3388c,
   1591		0x338e8, 0x338ec,
   1592		0x33900, 0x33928,
   1593		0x33930, 0x33948,
   1594		0x33960, 0x33968,
   1595		0x33970, 0x3399c,
   1596		0x339f0, 0x33a38,
   1597		0x33a40, 0x33a40,
   1598		0x33a48, 0x33a50,
   1599		0x33a5c, 0x33a64,
   1600		0x33a70, 0x33ab8,
   1601		0x33ac0, 0x33ae4,
   1602		0x33af8, 0x33b10,
   1603		0x33b28, 0x33b28,
   1604		0x33b3c, 0x33b50,
   1605		0x33bf0, 0x33c10,
   1606		0x33c28, 0x33c28,
   1607		0x33c3c, 0x33c50,
   1608		0x33cf0, 0x33cfc,
   1609		0x34000, 0x34030,
   1610		0x34100, 0x34144,
   1611		0x34190, 0x341a0,
   1612		0x341a8, 0x341b8,
   1613		0x341c4, 0x341c8,
   1614		0x341d0, 0x341d0,
   1615		0x34200, 0x34318,
   1616		0x34400, 0x344b4,
   1617		0x344c0, 0x3452c,
   1618		0x34540, 0x3461c,
   1619		0x34800, 0x34828,
   1620		0x34834, 0x34834,
   1621		0x348c0, 0x34908,
   1622		0x34910, 0x349ac,
   1623		0x34a00, 0x34a14,
   1624		0x34a1c, 0x34a2c,
   1625		0x34a44, 0x34a50,
   1626		0x34a74, 0x34a74,
   1627		0x34a7c, 0x34afc,
   1628		0x34b08, 0x34c24,
   1629		0x34d00, 0x34d00,
   1630		0x34d08, 0x34d14,
   1631		0x34d1c, 0x34d20,
   1632		0x34d3c, 0x34d3c,
   1633		0x34d48, 0x34d50,
   1634		0x35200, 0x3520c,
   1635		0x35220, 0x35220,
   1636		0x35240, 0x35240,
   1637		0x35600, 0x3560c,
   1638		0x35a00, 0x35a1c,
   1639		0x35e00, 0x35e20,
   1640		0x35e38, 0x35e3c,
   1641		0x35e80, 0x35e80,
   1642		0x35e88, 0x35ea8,
   1643		0x35eb0, 0x35eb4,
   1644		0x35ec8, 0x35ed4,
   1645		0x35fb8, 0x36004,
   1646		0x36200, 0x36200,
   1647		0x36208, 0x36240,
   1648		0x36248, 0x36280,
   1649		0x36288, 0x362c0,
   1650		0x362c8, 0x362fc,
   1651		0x36600, 0x36630,
   1652		0x36a00, 0x36abc,
   1653		0x36b00, 0x36b10,
   1654		0x36b20, 0x36b30,
   1655		0x36b40, 0x36b50,
   1656		0x36b60, 0x36b70,
   1657		0x37000, 0x37028,
   1658		0x37030, 0x37048,
   1659		0x37060, 0x37068,
   1660		0x37070, 0x3709c,
   1661		0x370f0, 0x37128,
   1662		0x37130, 0x37148,
   1663		0x37160, 0x37168,
   1664		0x37170, 0x3719c,
   1665		0x371f0, 0x37238,
   1666		0x37240, 0x37240,
   1667		0x37248, 0x37250,
   1668		0x3725c, 0x37264,
   1669		0x37270, 0x372b8,
   1670		0x372c0, 0x372e4,
   1671		0x372f8, 0x37338,
   1672		0x37340, 0x37340,
   1673		0x37348, 0x37350,
   1674		0x3735c, 0x37364,
   1675		0x37370, 0x373b8,
   1676		0x373c0, 0x373e4,
   1677		0x373f8, 0x37428,
   1678		0x37430, 0x37448,
   1679		0x37460, 0x37468,
   1680		0x37470, 0x3749c,
   1681		0x374f0, 0x37528,
   1682		0x37530, 0x37548,
   1683		0x37560, 0x37568,
   1684		0x37570, 0x3759c,
   1685		0x375f0, 0x37638,
   1686		0x37640, 0x37640,
   1687		0x37648, 0x37650,
   1688		0x3765c, 0x37664,
   1689		0x37670, 0x376b8,
   1690		0x376c0, 0x376e4,
   1691		0x376f8, 0x37738,
   1692		0x37740, 0x37740,
   1693		0x37748, 0x37750,
   1694		0x3775c, 0x37764,
   1695		0x37770, 0x377b8,
   1696		0x377c0, 0x377e4,
   1697		0x377f8, 0x377fc,
   1698		0x37814, 0x37814,
   1699		0x3782c, 0x3782c,
   1700		0x37880, 0x3788c,
   1701		0x378e8, 0x378ec,
   1702		0x37900, 0x37928,
   1703		0x37930, 0x37948,
   1704		0x37960, 0x37968,
   1705		0x37970, 0x3799c,
   1706		0x379f0, 0x37a38,
   1707		0x37a40, 0x37a40,
   1708		0x37a48, 0x37a50,
   1709		0x37a5c, 0x37a64,
   1710		0x37a70, 0x37ab8,
   1711		0x37ac0, 0x37ae4,
   1712		0x37af8, 0x37b10,
   1713		0x37b28, 0x37b28,
   1714		0x37b3c, 0x37b50,
   1715		0x37bf0, 0x37c10,
   1716		0x37c28, 0x37c28,
   1717		0x37c3c, 0x37c50,
   1718		0x37cf0, 0x37cfc,
   1719		0x38000, 0x38030,
   1720		0x38100, 0x38144,
   1721		0x38190, 0x381a0,
   1722		0x381a8, 0x381b8,
   1723		0x381c4, 0x381c8,
   1724		0x381d0, 0x381d0,
   1725		0x38200, 0x38318,
   1726		0x38400, 0x384b4,
   1727		0x384c0, 0x3852c,
   1728		0x38540, 0x3861c,
   1729		0x38800, 0x38828,
   1730		0x38834, 0x38834,
   1731		0x388c0, 0x38908,
   1732		0x38910, 0x389ac,
   1733		0x38a00, 0x38a14,
   1734		0x38a1c, 0x38a2c,
   1735		0x38a44, 0x38a50,
   1736		0x38a74, 0x38a74,
   1737		0x38a7c, 0x38afc,
   1738		0x38b08, 0x38c24,
   1739		0x38d00, 0x38d00,
   1740		0x38d08, 0x38d14,
   1741		0x38d1c, 0x38d20,
   1742		0x38d3c, 0x38d3c,
   1743		0x38d48, 0x38d50,
   1744		0x39200, 0x3920c,
   1745		0x39220, 0x39220,
   1746		0x39240, 0x39240,
   1747		0x39600, 0x3960c,
   1748		0x39a00, 0x39a1c,
   1749		0x39e00, 0x39e20,
   1750		0x39e38, 0x39e3c,
   1751		0x39e80, 0x39e80,
   1752		0x39e88, 0x39ea8,
   1753		0x39eb0, 0x39eb4,
   1754		0x39ec8, 0x39ed4,
   1755		0x39fb8, 0x3a004,
   1756		0x3a200, 0x3a200,
   1757		0x3a208, 0x3a240,
   1758		0x3a248, 0x3a280,
   1759		0x3a288, 0x3a2c0,
   1760		0x3a2c8, 0x3a2fc,
   1761		0x3a600, 0x3a630,
   1762		0x3aa00, 0x3aabc,
   1763		0x3ab00, 0x3ab10,
   1764		0x3ab20, 0x3ab30,
   1765		0x3ab40, 0x3ab50,
   1766		0x3ab60, 0x3ab70,
   1767		0x3b000, 0x3b028,
   1768		0x3b030, 0x3b048,
   1769		0x3b060, 0x3b068,
   1770		0x3b070, 0x3b09c,
   1771		0x3b0f0, 0x3b128,
   1772		0x3b130, 0x3b148,
   1773		0x3b160, 0x3b168,
   1774		0x3b170, 0x3b19c,
   1775		0x3b1f0, 0x3b238,
   1776		0x3b240, 0x3b240,
   1777		0x3b248, 0x3b250,
   1778		0x3b25c, 0x3b264,
   1779		0x3b270, 0x3b2b8,
   1780		0x3b2c0, 0x3b2e4,
   1781		0x3b2f8, 0x3b338,
   1782		0x3b340, 0x3b340,
   1783		0x3b348, 0x3b350,
   1784		0x3b35c, 0x3b364,
   1785		0x3b370, 0x3b3b8,
   1786		0x3b3c0, 0x3b3e4,
   1787		0x3b3f8, 0x3b428,
   1788		0x3b430, 0x3b448,
   1789		0x3b460, 0x3b468,
   1790		0x3b470, 0x3b49c,
   1791		0x3b4f0, 0x3b528,
   1792		0x3b530, 0x3b548,
   1793		0x3b560, 0x3b568,
   1794		0x3b570, 0x3b59c,
   1795		0x3b5f0, 0x3b638,
   1796		0x3b640, 0x3b640,
   1797		0x3b648, 0x3b650,
   1798		0x3b65c, 0x3b664,
   1799		0x3b670, 0x3b6b8,
   1800		0x3b6c0, 0x3b6e4,
   1801		0x3b6f8, 0x3b738,
   1802		0x3b740, 0x3b740,
   1803		0x3b748, 0x3b750,
   1804		0x3b75c, 0x3b764,
   1805		0x3b770, 0x3b7b8,
   1806		0x3b7c0, 0x3b7e4,
   1807		0x3b7f8, 0x3b7fc,
   1808		0x3b814, 0x3b814,
   1809		0x3b82c, 0x3b82c,
   1810		0x3b880, 0x3b88c,
   1811		0x3b8e8, 0x3b8ec,
   1812		0x3b900, 0x3b928,
   1813		0x3b930, 0x3b948,
   1814		0x3b960, 0x3b968,
   1815		0x3b970, 0x3b99c,
   1816		0x3b9f0, 0x3ba38,
   1817		0x3ba40, 0x3ba40,
   1818		0x3ba48, 0x3ba50,
   1819		0x3ba5c, 0x3ba64,
   1820		0x3ba70, 0x3bab8,
   1821		0x3bac0, 0x3bae4,
   1822		0x3baf8, 0x3bb10,
   1823		0x3bb28, 0x3bb28,
   1824		0x3bb3c, 0x3bb50,
   1825		0x3bbf0, 0x3bc10,
   1826		0x3bc28, 0x3bc28,
   1827		0x3bc3c, 0x3bc50,
   1828		0x3bcf0, 0x3bcfc,
   1829		0x3c000, 0x3c030,
   1830		0x3c100, 0x3c144,
   1831		0x3c190, 0x3c1a0,
   1832		0x3c1a8, 0x3c1b8,
   1833		0x3c1c4, 0x3c1c8,
   1834		0x3c1d0, 0x3c1d0,
   1835		0x3c200, 0x3c318,
   1836		0x3c400, 0x3c4b4,
   1837		0x3c4c0, 0x3c52c,
   1838		0x3c540, 0x3c61c,
   1839		0x3c800, 0x3c828,
   1840		0x3c834, 0x3c834,
   1841		0x3c8c0, 0x3c908,
   1842		0x3c910, 0x3c9ac,
   1843		0x3ca00, 0x3ca14,
   1844		0x3ca1c, 0x3ca2c,
   1845		0x3ca44, 0x3ca50,
   1846		0x3ca74, 0x3ca74,
   1847		0x3ca7c, 0x3cafc,
   1848		0x3cb08, 0x3cc24,
   1849		0x3cd00, 0x3cd00,
   1850		0x3cd08, 0x3cd14,
   1851		0x3cd1c, 0x3cd20,
   1852		0x3cd3c, 0x3cd3c,
   1853		0x3cd48, 0x3cd50,
   1854		0x3d200, 0x3d20c,
   1855		0x3d220, 0x3d220,
   1856		0x3d240, 0x3d240,
   1857		0x3d600, 0x3d60c,
   1858		0x3da00, 0x3da1c,
   1859		0x3de00, 0x3de20,
   1860		0x3de38, 0x3de3c,
   1861		0x3de80, 0x3de80,
   1862		0x3de88, 0x3dea8,
   1863		0x3deb0, 0x3deb4,
   1864		0x3dec8, 0x3ded4,
   1865		0x3dfb8, 0x3e004,
   1866		0x3e200, 0x3e200,
   1867		0x3e208, 0x3e240,
   1868		0x3e248, 0x3e280,
   1869		0x3e288, 0x3e2c0,
   1870		0x3e2c8, 0x3e2fc,
   1871		0x3e600, 0x3e630,
   1872		0x3ea00, 0x3eabc,
   1873		0x3eb00, 0x3eb10,
   1874		0x3eb20, 0x3eb30,
   1875		0x3eb40, 0x3eb50,
   1876		0x3eb60, 0x3eb70,
   1877		0x3f000, 0x3f028,
   1878		0x3f030, 0x3f048,
   1879		0x3f060, 0x3f068,
   1880		0x3f070, 0x3f09c,
   1881		0x3f0f0, 0x3f128,
   1882		0x3f130, 0x3f148,
   1883		0x3f160, 0x3f168,
   1884		0x3f170, 0x3f19c,
   1885		0x3f1f0, 0x3f238,
   1886		0x3f240, 0x3f240,
   1887		0x3f248, 0x3f250,
   1888		0x3f25c, 0x3f264,
   1889		0x3f270, 0x3f2b8,
   1890		0x3f2c0, 0x3f2e4,
   1891		0x3f2f8, 0x3f338,
   1892		0x3f340, 0x3f340,
   1893		0x3f348, 0x3f350,
   1894		0x3f35c, 0x3f364,
   1895		0x3f370, 0x3f3b8,
   1896		0x3f3c0, 0x3f3e4,
   1897		0x3f3f8, 0x3f428,
   1898		0x3f430, 0x3f448,
   1899		0x3f460, 0x3f468,
   1900		0x3f470, 0x3f49c,
   1901		0x3f4f0, 0x3f528,
   1902		0x3f530, 0x3f548,
   1903		0x3f560, 0x3f568,
   1904		0x3f570, 0x3f59c,
   1905		0x3f5f0, 0x3f638,
   1906		0x3f640, 0x3f640,
   1907		0x3f648, 0x3f650,
   1908		0x3f65c, 0x3f664,
   1909		0x3f670, 0x3f6b8,
   1910		0x3f6c0, 0x3f6e4,
   1911		0x3f6f8, 0x3f738,
   1912		0x3f740, 0x3f740,
   1913		0x3f748, 0x3f750,
   1914		0x3f75c, 0x3f764,
   1915		0x3f770, 0x3f7b8,
   1916		0x3f7c0, 0x3f7e4,
   1917		0x3f7f8, 0x3f7fc,
   1918		0x3f814, 0x3f814,
   1919		0x3f82c, 0x3f82c,
   1920		0x3f880, 0x3f88c,
   1921		0x3f8e8, 0x3f8ec,
   1922		0x3f900, 0x3f928,
   1923		0x3f930, 0x3f948,
   1924		0x3f960, 0x3f968,
   1925		0x3f970, 0x3f99c,
   1926		0x3f9f0, 0x3fa38,
   1927		0x3fa40, 0x3fa40,
   1928		0x3fa48, 0x3fa50,
   1929		0x3fa5c, 0x3fa64,
   1930		0x3fa70, 0x3fab8,
   1931		0x3fac0, 0x3fae4,
   1932		0x3faf8, 0x3fb10,
   1933		0x3fb28, 0x3fb28,
   1934		0x3fb3c, 0x3fb50,
   1935		0x3fbf0, 0x3fc10,
   1936		0x3fc28, 0x3fc28,
   1937		0x3fc3c, 0x3fc50,
   1938		0x3fcf0, 0x3fcfc,
   1939		0x40000, 0x4000c,
   1940		0x40040, 0x40050,
   1941		0x40060, 0x40068,
   1942		0x4007c, 0x4008c,
   1943		0x40094, 0x400b0,
   1944		0x400c0, 0x40144,
   1945		0x40180, 0x4018c,
   1946		0x40200, 0x40254,
   1947		0x40260, 0x40264,
   1948		0x40270, 0x40288,
   1949		0x40290, 0x40298,
   1950		0x402ac, 0x402c8,
   1951		0x402d0, 0x402e0,
   1952		0x402f0, 0x402f0,
   1953		0x40300, 0x4033c,
   1954		0x403f8, 0x403fc,
   1955		0x41304, 0x413c4,
   1956		0x41400, 0x4140c,
   1957		0x41414, 0x4141c,
   1958		0x41480, 0x414d0,
   1959		0x44000, 0x44054,
   1960		0x4405c, 0x44078,
   1961		0x440c0, 0x44174,
   1962		0x44180, 0x441ac,
   1963		0x441b4, 0x441b8,
   1964		0x441c0, 0x44254,
   1965		0x4425c, 0x44278,
   1966		0x442c0, 0x44374,
   1967		0x44380, 0x443ac,
   1968		0x443b4, 0x443b8,
   1969		0x443c0, 0x44454,
   1970		0x4445c, 0x44478,
   1971		0x444c0, 0x44574,
   1972		0x44580, 0x445ac,
   1973		0x445b4, 0x445b8,
   1974		0x445c0, 0x44654,
   1975		0x4465c, 0x44678,
   1976		0x446c0, 0x44774,
   1977		0x44780, 0x447ac,
   1978		0x447b4, 0x447b8,
   1979		0x447c0, 0x44854,
   1980		0x4485c, 0x44878,
   1981		0x448c0, 0x44974,
   1982		0x44980, 0x449ac,
   1983		0x449b4, 0x449b8,
   1984		0x449c0, 0x449fc,
   1985		0x45000, 0x45004,
   1986		0x45010, 0x45030,
   1987		0x45040, 0x45060,
   1988		0x45068, 0x45068,
   1989		0x45080, 0x45084,
   1990		0x450a0, 0x450b0,
   1991		0x45200, 0x45204,
   1992		0x45210, 0x45230,
   1993		0x45240, 0x45260,
   1994		0x45268, 0x45268,
   1995		0x45280, 0x45284,
   1996		0x452a0, 0x452b0,
   1997		0x460c0, 0x460e4,
   1998		0x47000, 0x4703c,
   1999		0x47044, 0x4708c,
   2000		0x47200, 0x47250,
   2001		0x47400, 0x47408,
   2002		0x47414, 0x47420,
   2003		0x47600, 0x47618,
   2004		0x47800, 0x47814,
   2005		0x48000, 0x4800c,
   2006		0x48040, 0x48050,
   2007		0x48060, 0x48068,
   2008		0x4807c, 0x4808c,
   2009		0x48094, 0x480b0,
   2010		0x480c0, 0x48144,
   2011		0x48180, 0x4818c,
   2012		0x48200, 0x48254,
   2013		0x48260, 0x48264,
   2014		0x48270, 0x48288,
   2015		0x48290, 0x48298,
   2016		0x482ac, 0x482c8,
   2017		0x482d0, 0x482e0,
   2018		0x482f0, 0x482f0,
   2019		0x48300, 0x4833c,
   2020		0x483f8, 0x483fc,
   2021		0x49304, 0x493c4,
   2022		0x49400, 0x4940c,
   2023		0x49414, 0x4941c,
   2024		0x49480, 0x494d0,
   2025		0x4c000, 0x4c054,
   2026		0x4c05c, 0x4c078,
   2027		0x4c0c0, 0x4c174,
   2028		0x4c180, 0x4c1ac,
   2029		0x4c1b4, 0x4c1b8,
   2030		0x4c1c0, 0x4c254,
   2031		0x4c25c, 0x4c278,
   2032		0x4c2c0, 0x4c374,
   2033		0x4c380, 0x4c3ac,
   2034		0x4c3b4, 0x4c3b8,
   2035		0x4c3c0, 0x4c454,
   2036		0x4c45c, 0x4c478,
   2037		0x4c4c0, 0x4c574,
   2038		0x4c580, 0x4c5ac,
   2039		0x4c5b4, 0x4c5b8,
   2040		0x4c5c0, 0x4c654,
   2041		0x4c65c, 0x4c678,
   2042		0x4c6c0, 0x4c774,
   2043		0x4c780, 0x4c7ac,
   2044		0x4c7b4, 0x4c7b8,
   2045		0x4c7c0, 0x4c854,
   2046		0x4c85c, 0x4c878,
   2047		0x4c8c0, 0x4c974,
   2048		0x4c980, 0x4c9ac,
   2049		0x4c9b4, 0x4c9b8,
   2050		0x4c9c0, 0x4c9fc,
   2051		0x4d000, 0x4d004,
   2052		0x4d010, 0x4d030,
   2053		0x4d040, 0x4d060,
   2054		0x4d068, 0x4d068,
   2055		0x4d080, 0x4d084,
   2056		0x4d0a0, 0x4d0b0,
   2057		0x4d200, 0x4d204,
   2058		0x4d210, 0x4d230,
   2059		0x4d240, 0x4d260,
   2060		0x4d268, 0x4d268,
   2061		0x4d280, 0x4d284,
   2062		0x4d2a0, 0x4d2b0,
   2063		0x4e0c0, 0x4e0e4,
   2064		0x4f000, 0x4f03c,
   2065		0x4f044, 0x4f08c,
   2066		0x4f200, 0x4f250,
   2067		0x4f400, 0x4f408,
   2068		0x4f414, 0x4f420,
   2069		0x4f600, 0x4f618,
   2070		0x4f800, 0x4f814,
   2071		0x50000, 0x50084,
   2072		0x50090, 0x500cc,
   2073		0x50400, 0x50400,
   2074		0x50800, 0x50884,
   2075		0x50890, 0x508cc,
   2076		0x50c00, 0x50c00,
   2077		0x51000, 0x5101c,
   2078		0x51300, 0x51308,
   2079	};
   2080
   2081	static const unsigned int t6_reg_ranges[] = {
   2082		0x1008, 0x101c,
   2083		0x1024, 0x10a8,
   2084		0x10b4, 0x10f8,
   2085		0x1100, 0x1114,
   2086		0x111c, 0x112c,
   2087		0x1138, 0x113c,
   2088		0x1144, 0x114c,
   2089		0x1180, 0x1184,
   2090		0x1190, 0x1194,
   2091		0x11a0, 0x11a4,
   2092		0x11b0, 0x11b4,
   2093		0x11fc, 0x123c,
   2094		0x1254, 0x1274,
   2095		0x1280, 0x133c,
   2096		0x1800, 0x18fc,
   2097		0x3000, 0x302c,
   2098		0x3060, 0x30b0,
   2099		0x30b8, 0x30d8,
   2100		0x30e0, 0x30fc,
   2101		0x3140, 0x357c,
   2102		0x35a8, 0x35cc,
   2103		0x35ec, 0x35ec,
   2104		0x3600, 0x5624,
   2105		0x56cc, 0x56ec,
   2106		0x56f4, 0x5720,
   2107		0x5728, 0x575c,
   2108		0x580c, 0x5814,
   2109		0x5890, 0x589c,
   2110		0x58a4, 0x58ac,
   2111		0x58b8, 0x58bc,
   2112		0x5940, 0x595c,
   2113		0x5980, 0x598c,
   2114		0x59b0, 0x59c8,
   2115		0x59d0, 0x59dc,
   2116		0x59fc, 0x5a18,
   2117		0x5a60, 0x5a6c,
   2118		0x5a80, 0x5a8c,
   2119		0x5a94, 0x5a9c,
   2120		0x5b94, 0x5bfc,
   2121		0x5c10, 0x5e48,
   2122		0x5e50, 0x5e94,
   2123		0x5ea0, 0x5eb0,
   2124		0x5ec0, 0x5ec0,
   2125		0x5ec8, 0x5ed0,
   2126		0x5ee0, 0x5ee0,
   2127		0x5ef0, 0x5ef0,
   2128		0x5f00, 0x5f00,
   2129		0x6000, 0x6020,
   2130		0x6028, 0x6040,
   2131		0x6058, 0x609c,
   2132		0x60a8, 0x619c,
   2133		0x7700, 0x7798,
   2134		0x77c0, 0x7880,
   2135		0x78cc, 0x78fc,
   2136		0x7b00, 0x7b58,
   2137		0x7b60, 0x7b84,
   2138		0x7b8c, 0x7c54,
   2139		0x7d00, 0x7d38,
   2140		0x7d40, 0x7d84,
   2141		0x7d8c, 0x7ddc,
   2142		0x7de4, 0x7e04,
   2143		0x7e10, 0x7e1c,
   2144		0x7e24, 0x7e38,
   2145		0x7e40, 0x7e44,
   2146		0x7e4c, 0x7e78,
   2147		0x7e80, 0x7edc,
   2148		0x7ee8, 0x7efc,
   2149		0x8dc0, 0x8de4,
   2150		0x8df8, 0x8e04,
   2151		0x8e10, 0x8e84,
   2152		0x8ea0, 0x8f88,
   2153		0x8fb8, 0x9058,
   2154		0x9060, 0x9060,
   2155		0x9068, 0x90f8,
   2156		0x9100, 0x9124,
   2157		0x9400, 0x9470,
   2158		0x9600, 0x9600,
   2159		0x9608, 0x9638,
   2160		0x9640, 0x9704,
   2161		0x9710, 0x971c,
   2162		0x9800, 0x9808,
   2163		0x9810, 0x9864,
   2164		0x9c00, 0x9c6c,
   2165		0x9c80, 0x9cec,
   2166		0x9d00, 0x9d6c,
   2167		0x9d80, 0x9dec,
   2168		0x9e00, 0x9e6c,
   2169		0x9e80, 0x9eec,
   2170		0x9f00, 0x9f6c,
   2171		0x9f80, 0xa020,
   2172		0xd000, 0xd03c,
   2173		0xd100, 0xd118,
   2174		0xd200, 0xd214,
   2175		0xd220, 0xd234,
   2176		0xd240, 0xd254,
   2177		0xd260, 0xd274,
   2178		0xd280, 0xd294,
   2179		0xd2a0, 0xd2b4,
   2180		0xd2c0, 0xd2d4,
   2181		0xd2e0, 0xd2f4,
   2182		0xd300, 0xd31c,
   2183		0xdfc0, 0xdfe0,
   2184		0xe000, 0xf008,
   2185		0xf010, 0xf018,
   2186		0xf020, 0xf028,
   2187		0x11000, 0x11014,
   2188		0x11048, 0x1106c,
   2189		0x11074, 0x11088,
   2190		0x11098, 0x11120,
   2191		0x1112c, 0x1117c,
   2192		0x11190, 0x112e0,
   2193		0x11300, 0x1130c,
   2194		0x12000, 0x1206c,
   2195		0x19040, 0x1906c,
   2196		0x19078, 0x19080,
   2197		0x1908c, 0x190e8,
   2198		0x190f0, 0x190f8,
   2199		0x19100, 0x19110,
   2200		0x19120, 0x19124,
   2201		0x19150, 0x19194,
   2202		0x1919c, 0x191b0,
   2203		0x191d0, 0x191e8,
   2204		0x19238, 0x19290,
   2205		0x192a4, 0x192b0,
   2206		0x192bc, 0x192bc,
   2207		0x19348, 0x1934c,
   2208		0x193f8, 0x19418,
   2209		0x19420, 0x19428,
   2210		0x19430, 0x19444,
   2211		0x1944c, 0x1946c,
   2212		0x19474, 0x19474,
   2213		0x19490, 0x194cc,
   2214		0x194f0, 0x194f8,
   2215		0x19c00, 0x19c48,
   2216		0x19c50, 0x19c80,
   2217		0x19c94, 0x19c98,
   2218		0x19ca0, 0x19cbc,
   2219		0x19ce4, 0x19ce4,
   2220		0x19cf0, 0x19cf8,
   2221		0x19d00, 0x19d28,
   2222		0x19d50, 0x19d78,
   2223		0x19d94, 0x19d98,
   2224		0x19da0, 0x19dc8,
   2225		0x19df0, 0x19e10,
   2226		0x19e50, 0x19e6c,
   2227		0x19ea0, 0x19ebc,
   2228		0x19ec4, 0x19ef4,
   2229		0x19f04, 0x19f2c,
   2230		0x19f34, 0x19f34,
   2231		0x19f40, 0x19f50,
   2232		0x19f90, 0x19fac,
   2233		0x19fc4, 0x19fc8,
   2234		0x19fd0, 0x19fe4,
   2235		0x1a000, 0x1a004,
   2236		0x1a010, 0x1a06c,
   2237		0x1a0b0, 0x1a0e4,
   2238		0x1a0ec, 0x1a0f8,
   2239		0x1a100, 0x1a108,
   2240		0x1a114, 0x1a130,
   2241		0x1a138, 0x1a1c4,
   2242		0x1a1fc, 0x1a1fc,
   2243		0x1e008, 0x1e00c,
   2244		0x1e040, 0x1e044,
   2245		0x1e04c, 0x1e04c,
   2246		0x1e284, 0x1e290,
   2247		0x1e2c0, 0x1e2c0,
   2248		0x1e2e0, 0x1e2e0,
   2249		0x1e300, 0x1e384,
   2250		0x1e3c0, 0x1e3c8,
   2251		0x1e408, 0x1e40c,
   2252		0x1e440, 0x1e444,
   2253		0x1e44c, 0x1e44c,
   2254		0x1e684, 0x1e690,
   2255		0x1e6c0, 0x1e6c0,
   2256		0x1e6e0, 0x1e6e0,
   2257		0x1e700, 0x1e784,
   2258		0x1e7c0, 0x1e7c8,
   2259		0x1e808, 0x1e80c,
   2260		0x1e840, 0x1e844,
   2261		0x1e84c, 0x1e84c,
   2262		0x1ea84, 0x1ea90,
   2263		0x1eac0, 0x1eac0,
   2264		0x1eae0, 0x1eae0,
   2265		0x1eb00, 0x1eb84,
   2266		0x1ebc0, 0x1ebc8,
   2267		0x1ec08, 0x1ec0c,
   2268		0x1ec40, 0x1ec44,
   2269		0x1ec4c, 0x1ec4c,
   2270		0x1ee84, 0x1ee90,
   2271		0x1eec0, 0x1eec0,
   2272		0x1eee0, 0x1eee0,
   2273		0x1ef00, 0x1ef84,
   2274		0x1efc0, 0x1efc8,
   2275		0x1f008, 0x1f00c,
   2276		0x1f040, 0x1f044,
   2277		0x1f04c, 0x1f04c,
   2278		0x1f284, 0x1f290,
   2279		0x1f2c0, 0x1f2c0,
   2280		0x1f2e0, 0x1f2e0,
   2281		0x1f300, 0x1f384,
   2282		0x1f3c0, 0x1f3c8,
   2283		0x1f408, 0x1f40c,
   2284		0x1f440, 0x1f444,
   2285		0x1f44c, 0x1f44c,
   2286		0x1f684, 0x1f690,
   2287		0x1f6c0, 0x1f6c0,
   2288		0x1f6e0, 0x1f6e0,
   2289		0x1f700, 0x1f784,
   2290		0x1f7c0, 0x1f7c8,
   2291		0x1f808, 0x1f80c,
   2292		0x1f840, 0x1f844,
   2293		0x1f84c, 0x1f84c,
   2294		0x1fa84, 0x1fa90,
   2295		0x1fac0, 0x1fac0,
   2296		0x1fae0, 0x1fae0,
   2297		0x1fb00, 0x1fb84,
   2298		0x1fbc0, 0x1fbc8,
   2299		0x1fc08, 0x1fc0c,
   2300		0x1fc40, 0x1fc44,
   2301		0x1fc4c, 0x1fc4c,
   2302		0x1fe84, 0x1fe90,
   2303		0x1fec0, 0x1fec0,
   2304		0x1fee0, 0x1fee0,
   2305		0x1ff00, 0x1ff84,
   2306		0x1ffc0, 0x1ffc8,
   2307		0x30000, 0x30030,
   2308		0x30100, 0x30168,
   2309		0x30190, 0x301a0,
   2310		0x301a8, 0x301b8,
   2311		0x301c4, 0x301c8,
   2312		0x301d0, 0x301d0,
   2313		0x30200, 0x30320,
   2314		0x30400, 0x304b4,
   2315		0x304c0, 0x3052c,
   2316		0x30540, 0x3061c,
   2317		0x30800, 0x308a0,
   2318		0x308c0, 0x30908,
   2319		0x30910, 0x309b8,
   2320		0x30a00, 0x30a04,
   2321		0x30a0c, 0x30a14,
   2322		0x30a1c, 0x30a2c,
   2323		0x30a44, 0x30a50,
   2324		0x30a74, 0x30a74,
   2325		0x30a7c, 0x30afc,
   2326		0x30b08, 0x30c24,
   2327		0x30d00, 0x30d14,
   2328		0x30d1c, 0x30d3c,
   2329		0x30d44, 0x30d4c,
   2330		0x30d54, 0x30d74,
   2331		0x30d7c, 0x30d7c,
   2332		0x30de0, 0x30de0,
   2333		0x30e00, 0x30ed4,
   2334		0x30f00, 0x30fa4,
   2335		0x30fc0, 0x30fc4,
   2336		0x31000, 0x31004,
   2337		0x31080, 0x310fc,
   2338		0x31208, 0x31220,
   2339		0x3123c, 0x31254,
   2340		0x31300, 0x31300,
   2341		0x31308, 0x3131c,
   2342		0x31338, 0x3133c,
   2343		0x31380, 0x31380,
   2344		0x31388, 0x313a8,
   2345		0x313b4, 0x313b4,
   2346		0x31400, 0x31420,
   2347		0x31438, 0x3143c,
   2348		0x31480, 0x31480,
   2349		0x314a8, 0x314a8,
   2350		0x314b0, 0x314b4,
   2351		0x314c8, 0x314d4,
   2352		0x31a40, 0x31a4c,
   2353		0x31af0, 0x31b20,
   2354		0x31b38, 0x31b3c,
   2355		0x31b80, 0x31b80,
   2356		0x31ba8, 0x31ba8,
   2357		0x31bb0, 0x31bb4,
   2358		0x31bc8, 0x31bd4,
   2359		0x32140, 0x3218c,
   2360		0x321f0, 0x321f4,
   2361		0x32200, 0x32200,
   2362		0x32218, 0x32218,
   2363		0x32400, 0x32400,
   2364		0x32408, 0x3241c,
   2365		0x32618, 0x32620,
   2366		0x32664, 0x32664,
   2367		0x326a8, 0x326a8,
   2368		0x326ec, 0x326ec,
   2369		0x32a00, 0x32abc,
   2370		0x32b00, 0x32b18,
   2371		0x32b20, 0x32b38,
   2372		0x32b40, 0x32b58,
   2373		0x32b60, 0x32b78,
   2374		0x32c00, 0x32c00,
   2375		0x32c08, 0x32c3c,
   2376		0x33000, 0x3302c,
   2377		0x33034, 0x33050,
   2378		0x33058, 0x33058,
   2379		0x33060, 0x3308c,
   2380		0x3309c, 0x330ac,
   2381		0x330c0, 0x330c0,
   2382		0x330c8, 0x330d0,
   2383		0x330d8, 0x330e0,
   2384		0x330ec, 0x3312c,
   2385		0x33134, 0x33150,
   2386		0x33158, 0x33158,
   2387		0x33160, 0x3318c,
   2388		0x3319c, 0x331ac,
   2389		0x331c0, 0x331c0,
   2390		0x331c8, 0x331d0,
   2391		0x331d8, 0x331e0,
   2392		0x331ec, 0x33290,
   2393		0x33298, 0x332c4,
   2394		0x332e4, 0x33390,
   2395		0x33398, 0x333c4,
   2396		0x333e4, 0x3342c,
   2397		0x33434, 0x33450,
   2398		0x33458, 0x33458,
   2399		0x33460, 0x3348c,
   2400		0x3349c, 0x334ac,
   2401		0x334c0, 0x334c0,
   2402		0x334c8, 0x334d0,
   2403		0x334d8, 0x334e0,
   2404		0x334ec, 0x3352c,
   2405		0x33534, 0x33550,
   2406		0x33558, 0x33558,
   2407		0x33560, 0x3358c,
   2408		0x3359c, 0x335ac,
   2409		0x335c0, 0x335c0,
   2410		0x335c8, 0x335d0,
   2411		0x335d8, 0x335e0,
   2412		0x335ec, 0x33690,
   2413		0x33698, 0x336c4,
   2414		0x336e4, 0x33790,
   2415		0x33798, 0x337c4,
   2416		0x337e4, 0x337fc,
   2417		0x33814, 0x33814,
   2418		0x33854, 0x33868,
   2419		0x33880, 0x3388c,
   2420		0x338c0, 0x338d0,
   2421		0x338e8, 0x338ec,
   2422		0x33900, 0x3392c,
   2423		0x33934, 0x33950,
   2424		0x33958, 0x33958,
   2425		0x33960, 0x3398c,
   2426		0x3399c, 0x339ac,
   2427		0x339c0, 0x339c0,
   2428		0x339c8, 0x339d0,
   2429		0x339d8, 0x339e0,
   2430		0x339ec, 0x33a90,
   2431		0x33a98, 0x33ac4,
   2432		0x33ae4, 0x33b10,
   2433		0x33b24, 0x33b28,
   2434		0x33b38, 0x33b50,
   2435		0x33bf0, 0x33c10,
   2436		0x33c24, 0x33c28,
   2437		0x33c38, 0x33c50,
   2438		0x33cf0, 0x33cfc,
   2439		0x34000, 0x34030,
   2440		0x34100, 0x34168,
   2441		0x34190, 0x341a0,
   2442		0x341a8, 0x341b8,
   2443		0x341c4, 0x341c8,
   2444		0x341d0, 0x341d0,
   2445		0x34200, 0x34320,
   2446		0x34400, 0x344b4,
   2447		0x344c0, 0x3452c,
   2448		0x34540, 0x3461c,
   2449		0x34800, 0x348a0,
   2450		0x348c0, 0x34908,
   2451		0x34910, 0x349b8,
   2452		0x34a00, 0x34a04,
   2453		0x34a0c, 0x34a14,
   2454		0x34a1c, 0x34a2c,
   2455		0x34a44, 0x34a50,
   2456		0x34a74, 0x34a74,
   2457		0x34a7c, 0x34afc,
   2458		0x34b08, 0x34c24,
   2459		0x34d00, 0x34d14,
   2460		0x34d1c, 0x34d3c,
   2461		0x34d44, 0x34d4c,
   2462		0x34d54, 0x34d74,
   2463		0x34d7c, 0x34d7c,
   2464		0x34de0, 0x34de0,
   2465		0x34e00, 0x34ed4,
   2466		0x34f00, 0x34fa4,
   2467		0x34fc0, 0x34fc4,
   2468		0x35000, 0x35004,
   2469		0x35080, 0x350fc,
   2470		0x35208, 0x35220,
   2471		0x3523c, 0x35254,
   2472		0x35300, 0x35300,
   2473		0x35308, 0x3531c,
   2474		0x35338, 0x3533c,
   2475		0x35380, 0x35380,
   2476		0x35388, 0x353a8,
   2477		0x353b4, 0x353b4,
   2478		0x35400, 0x35420,
   2479		0x35438, 0x3543c,
   2480		0x35480, 0x35480,
   2481		0x354a8, 0x354a8,
   2482		0x354b0, 0x354b4,
   2483		0x354c8, 0x354d4,
   2484		0x35a40, 0x35a4c,
   2485		0x35af0, 0x35b20,
   2486		0x35b38, 0x35b3c,
   2487		0x35b80, 0x35b80,
   2488		0x35ba8, 0x35ba8,
   2489		0x35bb0, 0x35bb4,
   2490		0x35bc8, 0x35bd4,
   2491		0x36140, 0x3618c,
   2492		0x361f0, 0x361f4,
   2493		0x36200, 0x36200,
   2494		0x36218, 0x36218,
   2495		0x36400, 0x36400,
   2496		0x36408, 0x3641c,
   2497		0x36618, 0x36620,
   2498		0x36664, 0x36664,
   2499		0x366a8, 0x366a8,
   2500		0x366ec, 0x366ec,
   2501		0x36a00, 0x36abc,
   2502		0x36b00, 0x36b18,
   2503		0x36b20, 0x36b38,
   2504		0x36b40, 0x36b58,
   2505		0x36b60, 0x36b78,
   2506		0x36c00, 0x36c00,
   2507		0x36c08, 0x36c3c,
   2508		0x37000, 0x3702c,
   2509		0x37034, 0x37050,
   2510		0x37058, 0x37058,
   2511		0x37060, 0x3708c,
   2512		0x3709c, 0x370ac,
   2513		0x370c0, 0x370c0,
   2514		0x370c8, 0x370d0,
   2515		0x370d8, 0x370e0,
   2516		0x370ec, 0x3712c,
   2517		0x37134, 0x37150,
   2518		0x37158, 0x37158,
   2519		0x37160, 0x3718c,
   2520		0x3719c, 0x371ac,
   2521		0x371c0, 0x371c0,
   2522		0x371c8, 0x371d0,
   2523		0x371d8, 0x371e0,
   2524		0x371ec, 0x37290,
   2525		0x37298, 0x372c4,
   2526		0x372e4, 0x37390,
   2527		0x37398, 0x373c4,
   2528		0x373e4, 0x3742c,
   2529		0x37434, 0x37450,
   2530		0x37458, 0x37458,
   2531		0x37460, 0x3748c,
   2532		0x3749c, 0x374ac,
   2533		0x374c0, 0x374c0,
   2534		0x374c8, 0x374d0,
   2535		0x374d8, 0x374e0,
   2536		0x374ec, 0x3752c,
   2537		0x37534, 0x37550,
   2538		0x37558, 0x37558,
   2539		0x37560, 0x3758c,
   2540		0x3759c, 0x375ac,
   2541		0x375c0, 0x375c0,
   2542		0x375c8, 0x375d0,
   2543		0x375d8, 0x375e0,
   2544		0x375ec, 0x37690,
   2545		0x37698, 0x376c4,
   2546		0x376e4, 0x37790,
   2547		0x37798, 0x377c4,
   2548		0x377e4, 0x377fc,
   2549		0x37814, 0x37814,
   2550		0x37854, 0x37868,
   2551		0x37880, 0x3788c,
   2552		0x378c0, 0x378d0,
   2553		0x378e8, 0x378ec,
   2554		0x37900, 0x3792c,
   2555		0x37934, 0x37950,
   2556		0x37958, 0x37958,
   2557		0x37960, 0x3798c,
   2558		0x3799c, 0x379ac,
   2559		0x379c0, 0x379c0,
   2560		0x379c8, 0x379d0,
   2561		0x379d8, 0x379e0,
   2562		0x379ec, 0x37a90,
   2563		0x37a98, 0x37ac4,
   2564		0x37ae4, 0x37b10,
   2565		0x37b24, 0x37b28,
   2566		0x37b38, 0x37b50,
   2567		0x37bf0, 0x37c10,
   2568		0x37c24, 0x37c28,
   2569		0x37c38, 0x37c50,
   2570		0x37cf0, 0x37cfc,
   2571		0x40040, 0x40040,
   2572		0x40080, 0x40084,
   2573		0x40100, 0x40100,
   2574		0x40140, 0x401bc,
   2575		0x40200, 0x40214,
   2576		0x40228, 0x40228,
   2577		0x40240, 0x40258,
   2578		0x40280, 0x40280,
   2579		0x40304, 0x40304,
   2580		0x40330, 0x4033c,
   2581		0x41304, 0x413c8,
   2582		0x413d0, 0x413dc,
   2583		0x413f0, 0x413f0,
   2584		0x41400, 0x4140c,
   2585		0x41414, 0x4141c,
   2586		0x41480, 0x414d0,
   2587		0x44000, 0x4407c,
   2588		0x440c0, 0x441ac,
   2589		0x441b4, 0x4427c,
   2590		0x442c0, 0x443ac,
   2591		0x443b4, 0x4447c,
   2592		0x444c0, 0x445ac,
   2593		0x445b4, 0x4467c,
   2594		0x446c0, 0x447ac,
   2595		0x447b4, 0x4487c,
   2596		0x448c0, 0x449ac,
   2597		0x449b4, 0x44a7c,
   2598		0x44ac0, 0x44bac,
   2599		0x44bb4, 0x44c7c,
   2600		0x44cc0, 0x44dac,
   2601		0x44db4, 0x44e7c,
   2602		0x44ec0, 0x44fac,
   2603		0x44fb4, 0x4507c,
   2604		0x450c0, 0x451ac,
   2605		0x451b4, 0x451fc,
   2606		0x45800, 0x45804,
   2607		0x45810, 0x45830,
   2608		0x45840, 0x45860,
   2609		0x45868, 0x45868,
   2610		0x45880, 0x45884,
   2611		0x458a0, 0x458b0,
   2612		0x45a00, 0x45a04,
   2613		0x45a10, 0x45a30,
   2614		0x45a40, 0x45a60,
   2615		0x45a68, 0x45a68,
   2616		0x45a80, 0x45a84,
   2617		0x45aa0, 0x45ab0,
   2618		0x460c0, 0x460e4,
   2619		0x47000, 0x4703c,
   2620		0x47044, 0x4708c,
   2621		0x47200, 0x47250,
   2622		0x47400, 0x47408,
   2623		0x47414, 0x47420,
   2624		0x47600, 0x47618,
   2625		0x47800, 0x47814,
   2626		0x47820, 0x4782c,
   2627		0x50000, 0x50084,
   2628		0x50090, 0x500cc,
   2629		0x50300, 0x50384,
   2630		0x50400, 0x50400,
   2631		0x50800, 0x50884,
   2632		0x50890, 0x508cc,
   2633		0x50b00, 0x50b84,
   2634		0x50c00, 0x50c00,
   2635		0x51000, 0x51020,
   2636		0x51028, 0x510b0,
   2637		0x51300, 0x51324,
   2638	};
   2639
   2640	u32 *buf_end = (u32 *)((char *)buf + buf_size);
   2641	const unsigned int *reg_ranges;
   2642	int reg_ranges_size, range;
   2643	unsigned int chip_version = CHELSIO_CHIP_VERSION(adap->params.chip);
   2644
   2645	/* Select the right set of register ranges to dump depending on the
   2646	 * adapter chip type.
   2647	 */
   2648	switch (chip_version) {
   2649	case CHELSIO_T4:
   2650		reg_ranges = t4_reg_ranges;
   2651		reg_ranges_size = ARRAY_SIZE(t4_reg_ranges);
   2652		break;
   2653
   2654	case CHELSIO_T5:
   2655		reg_ranges = t5_reg_ranges;
   2656		reg_ranges_size = ARRAY_SIZE(t5_reg_ranges);
   2657		break;
   2658
   2659	case CHELSIO_T6:
   2660		reg_ranges = t6_reg_ranges;
   2661		reg_ranges_size = ARRAY_SIZE(t6_reg_ranges);
   2662		break;
   2663
   2664	default:
   2665		dev_err(adap->pdev_dev,
   2666			"Unsupported chip version %d\n", chip_version);
   2667		return;
   2668	}
   2669
   2670	/* Clear the register buffer and insert the appropriate register
   2671	 * values selected by the above register ranges.
   2672	 */
   2673	memset(buf, 0, buf_size);
   2674	for (range = 0; range < reg_ranges_size; range += 2) {
   2675		unsigned int reg = reg_ranges[range];
   2676		unsigned int last_reg = reg_ranges[range + 1];
   2677		u32 *bufp = (u32 *)((char *)buf + reg);
   2678
   2679		/* Iterate across the register range filling in the register
   2680		 * buffer but don't write past the end of the register buffer.
   2681		 */
   2682		while (reg <= last_reg && bufp < buf_end) {
   2683			*bufp++ = t4_read_reg(adap, reg);
   2684			reg += sizeof(u32);
   2685		}
   2686	}
   2687}
   2688
   2689#define EEPROM_STAT_ADDR   0x7bfc
   2690#define VPD_BASE           0x400
   2691#define VPD_BASE_OLD       0
   2692#define VPD_LEN            1024
   2693
   2694/**
   2695 * t4_eeprom_ptov - translate a physical EEPROM address to virtual
   2696 * @phys_addr: the physical EEPROM address
   2697 * @fn: the PCI function number
   2698 * @sz: size of function-specific area
   2699 *
   2700 * Translate a physical EEPROM address to virtual.  The first 1K is
   2701 * accessed through virtual addresses starting at 31K, the rest is
   2702 * accessed through virtual addresses starting at 0.
   2703 *
   2704 * The mapping is as follows:
   2705 * [0..1K) -> [31K..32K)
   2706 * [1K..1K+A) -> [31K-A..31K)
   2707 * [1K+A..ES) -> [0..ES-A-1K)
   2708 *
   2709 * where A = @fn * @sz, and ES = EEPROM size.
   2710 */
   2711int t4_eeprom_ptov(unsigned int phys_addr, unsigned int fn, unsigned int sz)
   2712{
   2713	fn *= sz;
   2714	if (phys_addr < 1024)
   2715		return phys_addr + (31 << 10);
   2716	if (phys_addr < 1024 + fn)
   2717		return 31744 - fn + phys_addr - 1024;
   2718	if (phys_addr < EEPROMSIZE)
   2719		return phys_addr - 1024 - fn;
   2720	return -EINVAL;
   2721}
   2722
   2723/**
   2724 *	t4_seeprom_wp - enable/disable EEPROM write protection
   2725 *	@adapter: the adapter
   2726 *	@enable: whether to enable or disable write protection
   2727 *
   2728 *	Enables or disables write protection on the serial EEPROM.
   2729 */
   2730int t4_seeprom_wp(struct adapter *adapter, bool enable)
   2731{
   2732	unsigned int v = enable ? 0xc : 0;
   2733	int ret = pci_write_vpd(adapter->pdev, EEPROM_STAT_ADDR, 4, &v);
   2734	return ret < 0 ? ret : 0;
   2735}
   2736
   2737/**
   2738 *	t4_get_raw_vpd_params - read VPD parameters from VPD EEPROM
   2739 *	@adapter: adapter to read
   2740 *	@p: where to store the parameters
   2741 *
   2742 *	Reads card parameters stored in VPD EEPROM.
   2743 */
   2744int t4_get_raw_vpd_params(struct adapter *adapter, struct vpd_params *p)
   2745{
   2746	unsigned int id_len, pn_len, sn_len, na_len;
   2747	int id, sn, pn, na, addr, ret = 0;
   2748	u8 *vpd, base_val = 0;
   2749
   2750	vpd = vmalloc(VPD_LEN);
   2751	if (!vpd)
   2752		return -ENOMEM;
   2753
   2754	/* Card information normally starts at VPD_BASE but early cards had
   2755	 * it at 0.
   2756	 */
   2757	ret = pci_read_vpd(adapter->pdev, VPD_BASE, 1, &base_val);
   2758	if (ret < 0)
   2759		goto out;
   2760
   2761	addr = base_val == PCI_VPD_LRDT_ID_STRING ? VPD_BASE : VPD_BASE_OLD;
   2762
   2763	ret = pci_read_vpd(adapter->pdev, addr, VPD_LEN, vpd);
   2764	if (ret < 0)
   2765		goto out;
   2766
   2767	ret = pci_vpd_find_id_string(vpd, VPD_LEN, &id_len);
   2768	if (ret < 0)
   2769		goto out;
   2770	id = ret;
   2771
   2772	ret = pci_vpd_check_csum(vpd, VPD_LEN);
   2773	if (ret) {
   2774		dev_err(adapter->pdev_dev, "VPD checksum incorrect or missing\n");
   2775		ret = -EINVAL;
   2776		goto out;
   2777	}
   2778
   2779	ret = pci_vpd_find_ro_info_keyword(vpd, VPD_LEN,
   2780					   PCI_VPD_RO_KEYWORD_SERIALNO, &sn_len);
   2781	if (ret < 0)
   2782		goto out;
   2783	sn = ret;
   2784
   2785	ret = pci_vpd_find_ro_info_keyword(vpd, VPD_LEN,
   2786					   PCI_VPD_RO_KEYWORD_PARTNO, &pn_len);
   2787	if (ret < 0)
   2788		goto out;
   2789	pn = ret;
   2790
   2791	ret = pci_vpd_find_ro_info_keyword(vpd, VPD_LEN, "NA", &na_len);
   2792	if (ret < 0)
   2793		goto out;
   2794	na = ret;
   2795
   2796	memcpy(p->id, vpd + id, min_t(unsigned int, id_len, ID_LEN));
   2797	strim(p->id);
   2798	memcpy(p->sn, vpd + sn, min_t(unsigned int, sn_len, SERNUM_LEN));
   2799	strim(p->sn);
   2800	memcpy(p->pn, vpd + pn, min_t(unsigned int, pn_len, PN_LEN));
   2801	strim(p->pn);
   2802	memcpy(p->na, vpd + na, min_t(unsigned int, na_len, MACADDR_LEN));
   2803	strim(p->na);
   2804
   2805out:
   2806	vfree(vpd);
   2807	if (ret < 0) {
   2808		dev_err(adapter->pdev_dev, "error reading VPD\n");
   2809		return ret;
   2810	}
   2811
   2812	return 0;
   2813}
   2814
   2815/**
   2816 *	t4_get_vpd_params - read VPD parameters & retrieve Core Clock
   2817 *	@adapter: adapter to read
   2818 *	@p: where to store the parameters
   2819 *
   2820 *	Reads card parameters stored in VPD EEPROM and retrieves the Core
   2821 *	Clock.  This can only be called after a connection to the firmware
   2822 *	is established.
   2823 */
   2824int t4_get_vpd_params(struct adapter *adapter, struct vpd_params *p)
   2825{
   2826	u32 cclk_param, cclk_val;
   2827	int ret;
   2828
   2829	/* Grab the raw VPD parameters.
   2830	 */
   2831	ret = t4_get_raw_vpd_params(adapter, p);
   2832	if (ret)
   2833		return ret;
   2834
   2835	/* Ask firmware for the Core Clock since it knows how to translate the
   2836	 * Reference Clock ('V2') VPD field into a Core Clock value ...
   2837	 */
   2838	cclk_param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
   2839		      FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_CCLK));
   2840	ret = t4_query_params(adapter, adapter->mbox, adapter->pf, 0,
   2841			      1, &cclk_param, &cclk_val);
   2842
   2843	if (ret)
   2844		return ret;
   2845	p->cclk = cclk_val;
   2846
   2847	return 0;
   2848}
   2849
   2850/**
   2851 *	t4_get_pfres - retrieve VF resource limits
   2852 *	@adapter: the adapter
   2853 *
   2854 *	Retrieves configured resource limits and capabilities for a physical
   2855 *	function.  The results are stored in @adapter->pfres.
   2856 */
   2857int t4_get_pfres(struct adapter *adapter)
   2858{
   2859	struct pf_resources *pfres = &adapter->params.pfres;
   2860	struct fw_pfvf_cmd cmd, rpl;
   2861	int v;
   2862	u32 word;
   2863
   2864	/* Execute PFVF Read command to get VF resource limits; bail out early
   2865	 * with error on command failure.
   2866	 */
   2867	memset(&cmd, 0, sizeof(cmd));
   2868	cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_PFVF_CMD) |
   2869				    FW_CMD_REQUEST_F |
   2870				    FW_CMD_READ_F |
   2871				    FW_PFVF_CMD_PFN_V(adapter->pf) |
   2872				    FW_PFVF_CMD_VFN_V(0));
   2873	cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
   2874	v = t4_wr_mbox(adapter, adapter->mbox, &cmd, sizeof(cmd), &rpl);
   2875	if (v != FW_SUCCESS)
   2876		return v;
   2877
   2878	/* Extract PF resource limits and return success.
   2879	 */
   2880	word = be32_to_cpu(rpl.niqflint_niq);
   2881	pfres->niqflint = FW_PFVF_CMD_NIQFLINT_G(word);
   2882	pfres->niq = FW_PFVF_CMD_NIQ_G(word);
   2883
   2884	word = be32_to_cpu(rpl.type_to_neq);
   2885	pfres->neq = FW_PFVF_CMD_NEQ_G(word);
   2886	pfres->pmask = FW_PFVF_CMD_PMASK_G(word);
   2887
   2888	word = be32_to_cpu(rpl.tc_to_nexactf);
   2889	pfres->tc = FW_PFVF_CMD_TC_G(word);
   2890	pfres->nvi = FW_PFVF_CMD_NVI_G(word);
   2891	pfres->nexactf = FW_PFVF_CMD_NEXACTF_G(word);
   2892
   2893	word = be32_to_cpu(rpl.r_caps_to_nethctrl);
   2894	pfres->r_caps = FW_PFVF_CMD_R_CAPS_G(word);
   2895	pfres->wx_caps = FW_PFVF_CMD_WX_CAPS_G(word);
   2896	pfres->nethctrl = FW_PFVF_CMD_NETHCTRL_G(word);
   2897
   2898	return 0;
   2899}
   2900
   2901/* serial flash and firmware constants */
   2902enum {
   2903	SF_ATTEMPTS = 10,             /* max retries for SF operations */
   2904
   2905	/* flash command opcodes */
   2906	SF_PROG_PAGE    = 2,          /* program page */
   2907	SF_WR_DISABLE   = 4,          /* disable writes */
   2908	SF_RD_STATUS    = 5,          /* read status register */
   2909	SF_WR_ENABLE    = 6,          /* enable writes */
   2910	SF_RD_DATA_FAST = 0xb,        /* read flash */
   2911	SF_RD_ID        = 0x9f,       /* read ID */
   2912	SF_ERASE_SECTOR = 0xd8,       /* erase sector */
   2913};
   2914
   2915/**
   2916 *	sf1_read - read data from the serial flash
   2917 *	@adapter: the adapter
   2918 *	@byte_cnt: number of bytes to read
   2919 *	@cont: whether another operation will be chained
   2920 *	@lock: whether to lock SF for PL access only
   2921 *	@valp: where to store the read data
   2922 *
   2923 *	Reads up to 4 bytes of data from the serial flash.  The location of
   2924 *	the read needs to be specified prior to calling this by issuing the
   2925 *	appropriate commands to the serial flash.
   2926 */
   2927static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont,
   2928		    int lock, u32 *valp)
   2929{
   2930	int ret;
   2931
   2932	if (!byte_cnt || byte_cnt > 4)
   2933		return -EINVAL;
   2934	if (t4_read_reg(adapter, SF_OP_A) & SF_BUSY_F)
   2935		return -EBUSY;
   2936	t4_write_reg(adapter, SF_OP_A, SF_LOCK_V(lock) |
   2937		     SF_CONT_V(cont) | BYTECNT_V(byte_cnt - 1));
   2938	ret = t4_wait_op_done(adapter, SF_OP_A, SF_BUSY_F, 0, SF_ATTEMPTS, 5);
   2939	if (!ret)
   2940		*valp = t4_read_reg(adapter, SF_DATA_A);
   2941	return ret;
   2942}
   2943
   2944/**
   2945 *	sf1_write - write data to the serial flash
   2946 *	@adapter: the adapter
   2947 *	@byte_cnt: number of bytes to write
   2948 *	@cont: whether another operation will be chained
   2949 *	@lock: whether to lock SF for PL access only
   2950 *	@val: value to write
   2951 *
   2952 *	Writes up to 4 bytes of data to the serial flash.  The location of
   2953 *	the write needs to be specified prior to calling this by issuing the
   2954 *	appropriate commands to the serial flash.
   2955 */
   2956static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont,
   2957		     int lock, u32 val)
   2958{
   2959	if (!byte_cnt || byte_cnt > 4)
   2960		return -EINVAL;
   2961	if (t4_read_reg(adapter, SF_OP_A) & SF_BUSY_F)
   2962		return -EBUSY;
   2963	t4_write_reg(adapter, SF_DATA_A, val);
   2964	t4_write_reg(adapter, SF_OP_A, SF_LOCK_V(lock) |
   2965		     SF_CONT_V(cont) | BYTECNT_V(byte_cnt - 1) | OP_V(1));
   2966	return t4_wait_op_done(adapter, SF_OP_A, SF_BUSY_F, 0, SF_ATTEMPTS, 5);
   2967}
   2968
   2969/**
   2970 *	flash_wait_op - wait for a flash operation to complete
   2971 *	@adapter: the adapter
   2972 *	@attempts: max number of polls of the status register
   2973 *	@delay: delay between polls in ms
   2974 *
   2975 *	Wait for a flash operation to complete by polling the status register.
   2976 */
   2977static int flash_wait_op(struct adapter *adapter, int attempts, int delay)
   2978{
   2979	int ret;
   2980	u32 status;
   2981
   2982	while (1) {
   2983		if ((ret = sf1_write(adapter, 1, 1, 1, SF_RD_STATUS)) != 0 ||
   2984		    (ret = sf1_read(adapter, 1, 0, 1, &status)) != 0)
   2985			return ret;
   2986		if (!(status & 1))
   2987			return 0;
   2988		if (--attempts == 0)
   2989			return -EAGAIN;
   2990		if (delay)
   2991			msleep(delay);
   2992	}
   2993}
   2994
   2995/**
   2996 *	t4_read_flash - read words from serial flash
   2997 *	@adapter: the adapter
   2998 *	@addr: the start address for the read
   2999 *	@nwords: how many 32-bit words to read
   3000 *	@data: where to store the read data
   3001 *	@byte_oriented: whether to store data as bytes or as words
   3002 *
   3003 *	Read the specified number of 32-bit words from the serial flash.
   3004 *	If @byte_oriented is set the read data is stored as a byte array
   3005 *	(i.e., big-endian), otherwise as 32-bit words in the platform's
   3006 *	natural endianness.
   3007 */
   3008int t4_read_flash(struct adapter *adapter, unsigned int addr,
   3009		  unsigned int nwords, u32 *data, int byte_oriented)
   3010{
   3011	int ret;
   3012
   3013	if (addr + nwords * sizeof(u32) > adapter->params.sf_size || (addr & 3))
   3014		return -EINVAL;
   3015
   3016	addr = swab32(addr) | SF_RD_DATA_FAST;
   3017
   3018	if ((ret = sf1_write(adapter, 4, 1, 0, addr)) != 0 ||
   3019	    (ret = sf1_read(adapter, 1, 1, 0, data)) != 0)
   3020		return ret;
   3021
   3022	for ( ; nwords; nwords--, data++) {
   3023		ret = sf1_read(adapter, 4, nwords > 1, nwords == 1, data);
   3024		if (nwords == 1)
   3025			t4_write_reg(adapter, SF_OP_A, 0);    /* unlock SF */
   3026		if (ret)
   3027			return ret;
   3028		if (byte_oriented)
   3029			*data = (__force __u32)(cpu_to_be32(*data));
   3030	}
   3031	return 0;
   3032}
   3033
   3034/**
   3035 *	t4_write_flash - write up to a page of data to the serial flash
   3036 *	@adapter: the adapter
   3037 *	@addr: the start address to write
   3038 *	@n: length of data to write in bytes
   3039 *	@data: the data to write
   3040 *	@byte_oriented: whether to store data as bytes or as words
   3041 *
   3042 *	Writes up to a page of data (256 bytes) to the serial flash starting
   3043 *	at the given address.  All the data must be written to the same page.
   3044 *	If @byte_oriented is set the write data is stored as byte stream
   3045 *	(i.e. matches what on disk), otherwise in big-endian.
   3046 */
   3047static int t4_write_flash(struct adapter *adapter, unsigned int addr,
   3048			  unsigned int n, const u8 *data, bool byte_oriented)
   3049{
   3050	unsigned int i, c, left, val, offset = addr & 0xff;
   3051	u32 buf[64];
   3052	int ret;
   3053
   3054	if (addr >= adapter->params.sf_size || offset + n > SF_PAGE_SIZE)
   3055		return -EINVAL;
   3056
   3057	val = swab32(addr) | SF_PROG_PAGE;
   3058
   3059	if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
   3060	    (ret = sf1_write(adapter, 4, 1, 1, val)) != 0)
   3061		goto unlock;
   3062
   3063	for (left = n; left; left -= c, data += c) {
   3064		c = min(left, 4U);
   3065		for (val = 0, i = 0; i < c; ++i) {
   3066			if (byte_oriented)
   3067				val = (val << 8) + data[i];
   3068			else
   3069				val = (val << 8) + data[c - i - 1];
   3070		}
   3071
   3072		ret = sf1_write(adapter, c, c != left, 1, val);
   3073		if (ret)
   3074			goto unlock;
   3075	}
   3076	ret = flash_wait_op(adapter, 8, 1);
   3077	if (ret)
   3078		goto unlock;
   3079
   3080	t4_write_reg(adapter, SF_OP_A, 0);    /* unlock SF */
   3081
   3082	/* Read the page to verify the write succeeded */
   3083	ret = t4_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf,
   3084			    byte_oriented);
   3085	if (ret)
   3086		return ret;
   3087
   3088	if (memcmp(data - n, (u8 *)buf + offset, n)) {
   3089		dev_err(adapter->pdev_dev,
   3090			"failed to correctly write the flash page at %#x\n",
   3091			addr);
   3092		return -EIO;
   3093	}
   3094	return 0;
   3095
   3096unlock:
   3097	t4_write_reg(adapter, SF_OP_A, 0);    /* unlock SF */
   3098	return ret;
   3099}
   3100
   3101/**
   3102 *	t4_get_fw_version - read the firmware version
   3103 *	@adapter: the adapter
   3104 *	@vers: where to place the version
   3105 *
   3106 *	Reads the FW version from flash.
   3107 */
   3108int t4_get_fw_version(struct adapter *adapter, u32 *vers)
   3109{
   3110	return t4_read_flash(adapter, FLASH_FW_START +
   3111			     offsetof(struct fw_hdr, fw_ver), 1,
   3112			     vers, 0);
   3113}
   3114
   3115/**
   3116 *	t4_get_bs_version - read the firmware bootstrap version
   3117 *	@adapter: the adapter
   3118 *	@vers: where to place the version
   3119 *
   3120 *	Reads the FW Bootstrap version from flash.
   3121 */
   3122int t4_get_bs_version(struct adapter *adapter, u32 *vers)
   3123{
   3124	return t4_read_flash(adapter, FLASH_FWBOOTSTRAP_START +
   3125			     offsetof(struct fw_hdr, fw_ver), 1,
   3126			     vers, 0);
   3127}
   3128
   3129/**
   3130 *	t4_get_tp_version - read the TP microcode version
   3131 *	@adapter: the adapter
   3132 *	@vers: where to place the version
   3133 *
   3134 *	Reads the TP microcode version from flash.
   3135 */
   3136int t4_get_tp_version(struct adapter *adapter, u32 *vers)
   3137{
   3138	return t4_read_flash(adapter, FLASH_FW_START +
   3139			     offsetof(struct fw_hdr, tp_microcode_ver),
   3140			     1, vers, 0);
   3141}
   3142
   3143/**
   3144 *	t4_get_exprom_version - return the Expansion ROM version (if any)
   3145 *	@adap: the adapter
   3146 *	@vers: where to place the version
   3147 *
   3148 *	Reads the Expansion ROM header from FLASH and returns the version
   3149 *	number (if present) through the @vers return value pointer.  We return
   3150 *	this in the Firmware Version Format since it's convenient.  Return
   3151 *	0 on success, -ENOENT if no Expansion ROM is present.
   3152 */
   3153int t4_get_exprom_version(struct adapter *adap, u32 *vers)
   3154{
   3155	struct exprom_header {
   3156		unsigned char hdr_arr[16];	/* must start with 0x55aa */
   3157		unsigned char hdr_ver[4];	/* Expansion ROM version */
   3158	} *hdr;
   3159	u32 exprom_header_buf[DIV_ROUND_UP(sizeof(struct exprom_header),
   3160					   sizeof(u32))];
   3161	int ret;
   3162
   3163	ret = t4_read_flash(adap, FLASH_EXP_ROM_START,
   3164			    ARRAY_SIZE(exprom_header_buf), exprom_header_buf,
   3165			    0);
   3166	if (ret)
   3167		return ret;
   3168
   3169	hdr = (struct exprom_header *)exprom_header_buf;
   3170	if (hdr->hdr_arr[0] != 0x55 || hdr->hdr_arr[1] != 0xaa)
   3171		return -ENOENT;
   3172
   3173	*vers = (FW_HDR_FW_VER_MAJOR_V(hdr->hdr_ver[0]) |
   3174		 FW_HDR_FW_VER_MINOR_V(hdr->hdr_ver[1]) |
   3175		 FW_HDR_FW_VER_MICRO_V(hdr->hdr_ver[2]) |
   3176		 FW_HDR_FW_VER_BUILD_V(hdr->hdr_ver[3]));
   3177	return 0;
   3178}
   3179
   3180/**
   3181 *      t4_get_vpd_version - return the VPD version
   3182 *      @adapter: the adapter
   3183 *      @vers: where to place the version
   3184 *
   3185 *      Reads the VPD via the Firmware interface (thus this can only be called
   3186 *      once we're ready to issue Firmware commands).  The format of the
   3187 *      VPD version is adapter specific.  Returns 0 on success, an error on
   3188 *      failure.
   3189 *
   3190 *      Note that early versions of the Firmware didn't include the ability
   3191 *      to retrieve the VPD version, so we zero-out the return-value parameter
   3192 *      in that case to avoid leaving it with garbage in it.
   3193 *
   3194 *      Also note that the Firmware will return its cached copy of the VPD
   3195 *      Revision ID, not the actual Revision ID as written in the Serial
   3196 *      EEPROM.  This is only an issue if a new VPD has been written and the
   3197 *      Firmware/Chip haven't yet gone through a RESET sequence.  So it's best
   3198 *      to defer calling this routine till after a FW_RESET_CMD has been issued
   3199 *      if the Host Driver will be performing a full adapter initialization.
   3200 */
   3201int t4_get_vpd_version(struct adapter *adapter, u32 *vers)
   3202{
   3203	u32 vpdrev_param;
   3204	int ret;
   3205
   3206	vpdrev_param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
   3207			FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_VPDREV));
   3208	ret = t4_query_params(adapter, adapter->mbox, adapter->pf, 0,
   3209			      1, &vpdrev_param, vers);
   3210	if (ret)
   3211		*vers = 0;
   3212	return ret;
   3213}
   3214
   3215/**
   3216 *      t4_get_scfg_version - return the Serial Configuration version
   3217 *      @adapter: the adapter
   3218 *      @vers: where to place the version
   3219 *
   3220 *      Reads the Serial Configuration Version via the Firmware interface
   3221 *      (thus this can only be called once we're ready to issue Firmware
   3222 *      commands).  The format of the Serial Configuration version is
   3223 *      adapter specific.  Returns 0 on success, an error on failure.
   3224 *
   3225 *      Note that early versions of the Firmware didn't include the ability
   3226 *      to retrieve the Serial Configuration version, so we zero-out the
   3227 *      return-value parameter in that case to avoid leaving it with
   3228 *      garbage in it.
   3229 *
   3230 *      Also note that the Firmware will return its cached copy of the Serial
   3231 *      Initialization Revision ID, not the actual Revision ID as written in
   3232 *      the Serial EEPROM.  This is only an issue if a new VPD has been written
   3233 *      and the Firmware/Chip haven't yet gone through a RESET sequence.  So
   3234 *      it's best to defer calling this routine till after a FW_RESET_CMD has
   3235 *      been issued if the Host Driver will be performing a full adapter
   3236 *      initialization.
   3237 */
   3238int t4_get_scfg_version(struct adapter *adapter, u32 *vers)
   3239{
   3240	u32 scfgrev_param;
   3241	int ret;
   3242
   3243	scfgrev_param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
   3244			 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_SCFGREV));
   3245	ret = t4_query_params(adapter, adapter->mbox, adapter->pf, 0,
   3246			      1, &scfgrev_param, vers);
   3247	if (ret)
   3248		*vers = 0;
   3249	return ret;
   3250}
   3251
   3252/**
   3253 *      t4_get_version_info - extract various chip/firmware version information
   3254 *      @adapter: the adapter
   3255 *
   3256 *      Reads various chip/firmware version numbers and stores them into the
   3257 *      adapter Adapter Parameters structure.  If any of the efforts fails
   3258 *      the first failure will be returned, but all of the version numbers
   3259 *      will be read.
   3260 */
   3261int t4_get_version_info(struct adapter *adapter)
   3262{
   3263	int ret = 0;
   3264
   3265	#define FIRST_RET(__getvinfo) \
   3266	do { \
   3267		int __ret = __getvinfo; \
   3268		if (__ret && !ret) \
   3269			ret = __ret; \
   3270	} while (0)
   3271
   3272	FIRST_RET(t4_get_fw_version(adapter, &adapter->params.fw_vers));
   3273	FIRST_RET(t4_get_bs_version(adapter, &adapter->params.bs_vers));
   3274	FIRST_RET(t4_get_tp_version(adapter, &adapter->params.tp_vers));
   3275	FIRST_RET(t4_get_exprom_version(adapter, &adapter->params.er_vers));
   3276	FIRST_RET(t4_get_scfg_version(adapter, &adapter->params.scfg_vers));
   3277	FIRST_RET(t4_get_vpd_version(adapter, &adapter->params.vpd_vers));
   3278
   3279	#undef FIRST_RET
   3280	return ret;
   3281}
   3282
   3283/**
   3284 *      t4_dump_version_info - dump all of the adapter configuration IDs
   3285 *      @adapter: the adapter
   3286 *
   3287 *      Dumps all of the various bits of adapter configuration version/revision
   3288 *      IDs information.  This is typically called at some point after
   3289 *      t4_get_version_info() has been called.
   3290 */
   3291void t4_dump_version_info(struct adapter *adapter)
   3292{
   3293	/* Device information */
   3294	dev_info(adapter->pdev_dev, "Chelsio %s rev %d\n",
   3295		 adapter->params.vpd.id,
   3296		 CHELSIO_CHIP_RELEASE(adapter->params.chip));
   3297	dev_info(adapter->pdev_dev, "S/N: %s, P/N: %s\n",
   3298		 adapter->params.vpd.sn, adapter->params.vpd.pn);
   3299
   3300	/* Firmware Version */
   3301	if (!adapter->params.fw_vers)
   3302		dev_warn(adapter->pdev_dev, "No firmware loaded\n");
   3303	else
   3304		dev_info(adapter->pdev_dev, "Firmware version: %u.%u.%u.%u\n",
   3305			 FW_HDR_FW_VER_MAJOR_G(adapter->params.fw_vers),
   3306			 FW_HDR_FW_VER_MINOR_G(adapter->params.fw_vers),
   3307			 FW_HDR_FW_VER_MICRO_G(adapter->params.fw_vers),
   3308			 FW_HDR_FW_VER_BUILD_G(adapter->params.fw_vers));
   3309
   3310	/* Bootstrap Firmware Version. (Some adapters don't have Bootstrap
   3311	 * Firmware, so dev_info() is more appropriate here.)
   3312	 */
   3313	if (!adapter->params.bs_vers)
   3314		dev_info(adapter->pdev_dev, "No bootstrap loaded\n");
   3315	else
   3316		dev_info(adapter->pdev_dev, "Bootstrap version: %u.%u.%u.%u\n",
   3317			 FW_HDR_FW_VER_MAJOR_G(adapter->params.bs_vers),
   3318			 FW_HDR_FW_VER_MINOR_G(adapter->params.bs_vers),
   3319			 FW_HDR_FW_VER_MICRO_G(adapter->params.bs_vers),
   3320			 FW_HDR_FW_VER_BUILD_G(adapter->params.bs_vers));
   3321
   3322	/* TP Microcode Version */
   3323	if (!adapter->params.tp_vers)
   3324		dev_warn(adapter->pdev_dev, "No TP Microcode loaded\n");
   3325	else
   3326		dev_info(adapter->pdev_dev,
   3327			 "TP Microcode version: %u.%u.%u.%u\n",
   3328			 FW_HDR_FW_VER_MAJOR_G(adapter->params.tp_vers),
   3329			 FW_HDR_FW_VER_MINOR_G(adapter->params.tp_vers),
   3330			 FW_HDR_FW_VER_MICRO_G(adapter->params.tp_vers),
   3331			 FW_HDR_FW_VER_BUILD_G(adapter->params.tp_vers));
   3332
   3333	/* Expansion ROM version */
   3334	if (!adapter->params.er_vers)
   3335		dev_info(adapter->pdev_dev, "No Expansion ROM loaded\n");
   3336	else
   3337		dev_info(adapter->pdev_dev,
   3338			 "Expansion ROM version: %u.%u.%u.%u\n",
   3339			 FW_HDR_FW_VER_MAJOR_G(adapter->params.er_vers),
   3340			 FW_HDR_FW_VER_MINOR_G(adapter->params.er_vers),
   3341			 FW_HDR_FW_VER_MICRO_G(adapter->params.er_vers),
   3342			 FW_HDR_FW_VER_BUILD_G(adapter->params.er_vers));
   3343
   3344	/* Serial Configuration version */
   3345	dev_info(adapter->pdev_dev, "Serial Configuration version: %#x\n",
   3346		 adapter->params.scfg_vers);
   3347
   3348	/* VPD Version */
   3349	dev_info(adapter->pdev_dev, "VPD version: %#x\n",
   3350		 adapter->params.vpd_vers);
   3351}
   3352
   3353/**
   3354 *	t4_check_fw_version - check if the FW is supported with this driver
   3355 *	@adap: the adapter
   3356 *
   3357 *	Checks if an adapter's FW is compatible with the driver.  Returns 0
   3358 *	if there's exact match, a negative error if the version could not be
   3359 *	read or there's a major version mismatch
   3360 */
   3361int t4_check_fw_version(struct adapter *adap)
   3362{
   3363	int i, ret, major, minor, micro;
   3364	int exp_major, exp_minor, exp_micro;
   3365	unsigned int chip_version = CHELSIO_CHIP_VERSION(adap->params.chip);
   3366
   3367	ret = t4_get_fw_version(adap, &adap->params.fw_vers);
   3368	/* Try multiple times before returning error */
   3369	for (i = 0; (ret == -EBUSY || ret == -EAGAIN) && i < 3; i++)
   3370		ret = t4_get_fw_version(adap, &adap->params.fw_vers);
   3371
   3372	if (ret)
   3373		return ret;
   3374
   3375	major = FW_HDR_FW_VER_MAJOR_G(adap->params.fw_vers);
   3376	minor = FW_HDR_FW_VER_MINOR_G(adap->params.fw_vers);
   3377	micro = FW_HDR_FW_VER_MICRO_G(adap->params.fw_vers);
   3378
   3379	switch (chip_version) {
   3380	case CHELSIO_T4:
   3381		exp_major = T4FW_MIN_VERSION_MAJOR;
   3382		exp_minor = T4FW_MIN_VERSION_MINOR;
   3383		exp_micro = T4FW_MIN_VERSION_MICRO;
   3384		break;
   3385	case CHELSIO_T5:
   3386		exp_major = T5FW_MIN_VERSION_MAJOR;
   3387		exp_minor = T5FW_MIN_VERSION_MINOR;
   3388		exp_micro = T5FW_MIN_VERSION_MICRO;
   3389		break;
   3390	case CHELSIO_T6:
   3391		exp_major = T6FW_MIN_VERSION_MAJOR;
   3392		exp_minor = T6FW_MIN_VERSION_MINOR;
   3393		exp_micro = T6FW_MIN_VERSION_MICRO;
   3394		break;
   3395	default:
   3396		dev_err(adap->pdev_dev, "Unsupported chip type, %x\n",
   3397			adap->chip);
   3398		return -EINVAL;
   3399	}
   3400
   3401	if (major < exp_major || (major == exp_major && minor < exp_minor) ||
   3402	    (major == exp_major && minor == exp_minor && micro < exp_micro)) {
   3403		dev_err(adap->pdev_dev,
   3404			"Card has firmware version %u.%u.%u, minimum "
   3405			"supported firmware is %u.%u.%u.\n", major, minor,
   3406			micro, exp_major, exp_minor, exp_micro);
   3407		return -EFAULT;
   3408	}
   3409	return 0;
   3410}
   3411
   3412/* Is the given firmware API compatible with the one the driver was compiled
   3413 * with?
   3414 */
   3415static int fw_compatible(const struct fw_hdr *hdr1, const struct fw_hdr *hdr2)
   3416{
   3417
   3418	/* short circuit if it's the exact same firmware version */
   3419	if (hdr1->chip == hdr2->chip && hdr1->fw_ver == hdr2->fw_ver)
   3420		return 1;
   3421
   3422#define SAME_INTF(x) (hdr1->intfver_##x == hdr2->intfver_##x)
   3423	if (hdr1->chip == hdr2->chip && SAME_INTF(nic) && SAME_INTF(vnic) &&
   3424	    SAME_INTF(ri) && SAME_INTF(iscsi) && SAME_INTF(fcoe))
   3425		return 1;
   3426#undef SAME_INTF
   3427
   3428	return 0;
   3429}
   3430
   3431/* The firmware in the filesystem is usable, but should it be installed?
   3432 * This routine explains itself in detail if it indicates the filesystem
   3433 * firmware should be installed.
   3434 */
   3435static int should_install_fs_fw(struct adapter *adap, int card_fw_usable,
   3436				int k, int c)
   3437{
   3438	const char *reason;
   3439
   3440	if (!card_fw_usable) {
   3441		reason = "incompatible or unusable";
   3442		goto install;
   3443	}
   3444
   3445	if (k > c) {
   3446		reason = "older than the version supported with this driver";
   3447		goto install;
   3448	}
   3449
   3450	return 0;
   3451
   3452install:
   3453	dev_err(adap->pdev_dev, "firmware on card (%u.%u.%u.%u) is %s, "
   3454		"installing firmware %u.%u.%u.%u on card.\n",
   3455		FW_HDR_FW_VER_MAJOR_G(c), FW_HDR_FW_VER_MINOR_G(c),
   3456		FW_HDR_FW_VER_MICRO_G(c), FW_HDR_FW_VER_BUILD_G(c), reason,
   3457		FW_HDR_FW_VER_MAJOR_G(k), FW_HDR_FW_VER_MINOR_G(k),
   3458		FW_HDR_FW_VER_MICRO_G(k), FW_HDR_FW_VER_BUILD_G(k));
   3459
   3460	return 1;
   3461}
   3462
   3463int t4_prep_fw(struct adapter *adap, struct fw_info *fw_info,
   3464	       const u8 *fw_data, unsigned int fw_size,
   3465	       struct fw_hdr *card_fw, enum dev_state state,
   3466	       int *reset)
   3467{
   3468	int ret, card_fw_usable, fs_fw_usable;
   3469	const struct fw_hdr *fs_fw;
   3470	const struct fw_hdr *drv_fw;
   3471
   3472	drv_fw = &fw_info->fw_hdr;
   3473
   3474	/* Read the header of the firmware on the card */
   3475	ret = t4_read_flash(adap, FLASH_FW_START,
   3476			    sizeof(*card_fw) / sizeof(uint32_t),
   3477			    (uint32_t *)card_fw, 1);
   3478	if (ret == 0) {
   3479		card_fw_usable = fw_compatible(drv_fw, (const void *)card_fw);
   3480	} else {
   3481		dev_err(adap->pdev_dev,
   3482			"Unable to read card's firmware header: %d\n", ret);
   3483		card_fw_usable = 0;
   3484	}
   3485
   3486	if (fw_data != NULL) {
   3487		fs_fw = (const void *)fw_data;
   3488		fs_fw_usable = fw_compatible(drv_fw, fs_fw);
   3489	} else {
   3490		fs_fw = NULL;
   3491		fs_fw_usable = 0;
   3492	}
   3493
   3494	if (card_fw_usable && card_fw->fw_ver == drv_fw->fw_ver &&
   3495	    (!fs_fw_usable || fs_fw->fw_ver == drv_fw->fw_ver)) {
   3496		/* Common case: the firmware on the card is an exact match and
   3497		 * the filesystem one is an exact match too, or the filesystem
   3498		 * one is absent/incompatible.
   3499		 */
   3500	} else if (fs_fw_usable && state == DEV_STATE_UNINIT &&
   3501		   should_install_fs_fw(adap, card_fw_usable,
   3502					be32_to_cpu(fs_fw->fw_ver),
   3503					be32_to_cpu(card_fw->fw_ver))) {
   3504		ret = t4_fw_upgrade(adap, adap->mbox, fw_data,
   3505				    fw_size, 0);
   3506		if (ret != 0) {
   3507			dev_err(adap->pdev_dev,
   3508				"failed to install firmware: %d\n", ret);
   3509			goto bye;
   3510		}
   3511
   3512		/* Installed successfully, update the cached header too. */
   3513		*card_fw = *fs_fw;
   3514		card_fw_usable = 1;
   3515		*reset = 0;	/* already reset as part of load_fw */
   3516	}
   3517
   3518	if (!card_fw_usable) {
   3519		uint32_t d, c, k;
   3520
   3521		d = be32_to_cpu(drv_fw->fw_ver);
   3522		c = be32_to_cpu(card_fw->fw_ver);
   3523		k = fs_fw ? be32_to_cpu(fs_fw->fw_ver) : 0;
   3524
   3525		dev_err(adap->pdev_dev, "Cannot find a usable firmware: "
   3526			"chip state %d, "
   3527			"driver compiled with %d.%d.%d.%d, "
   3528			"card has %d.%d.%d.%d, filesystem has %d.%d.%d.%d\n",
   3529			state,
   3530			FW_HDR_FW_VER_MAJOR_G(d), FW_HDR_FW_VER_MINOR_G(d),
   3531			FW_HDR_FW_VER_MICRO_G(d), FW_HDR_FW_VER_BUILD_G(d),
   3532			FW_HDR_FW_VER_MAJOR_G(c), FW_HDR_FW_VER_MINOR_G(c),
   3533			FW_HDR_FW_VER_MICRO_G(c), FW_HDR_FW_VER_BUILD_G(c),
   3534			FW_HDR_FW_VER_MAJOR_G(k), FW_HDR_FW_VER_MINOR_G(k),
   3535			FW_HDR_FW_VER_MICRO_G(k), FW_HDR_FW_VER_BUILD_G(k));
   3536		ret = -EINVAL;
   3537		goto bye;
   3538	}
   3539
   3540	/* We're using whatever's on the card and it's known to be good. */
   3541	adap->params.fw_vers = be32_to_cpu(card_fw->fw_ver);
   3542	adap->params.tp_vers = be32_to_cpu(card_fw->tp_microcode_ver);
   3543
   3544bye:
   3545	return ret;
   3546}
   3547
   3548/**
   3549 *	t4_flash_erase_sectors - erase a range of flash sectors
   3550 *	@adapter: the adapter
   3551 *	@start: the first sector to erase
   3552 *	@end: the last sector to erase
   3553 *
   3554 *	Erases the sectors in the given inclusive range.
   3555 */
   3556static int t4_flash_erase_sectors(struct adapter *adapter, int start, int end)
   3557{
   3558	int ret = 0;
   3559
   3560	if (end >= adapter->params.sf_nsec)
   3561		return -EINVAL;
   3562
   3563	while (start <= end) {
   3564		if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
   3565		    (ret = sf1_write(adapter, 4, 0, 1,
   3566				     SF_ERASE_SECTOR | (start << 8))) != 0 ||
   3567		    (ret = flash_wait_op(adapter, 14, 500)) != 0) {
   3568			dev_err(adapter->pdev_dev,
   3569				"erase of flash sector %d failed, error %d\n",
   3570				start, ret);
   3571			break;
   3572		}
   3573		start++;
   3574	}
   3575	t4_write_reg(adapter, SF_OP_A, 0);    /* unlock SF */
   3576	return ret;
   3577}
   3578
   3579/**
   3580 *	t4_flash_cfg_addr - return the address of the flash configuration file
   3581 *	@adapter: the adapter
   3582 *
   3583 *	Return the address within the flash where the Firmware Configuration
   3584 *	File is stored.
   3585 */
   3586unsigned int t4_flash_cfg_addr(struct adapter *adapter)
   3587{
   3588	if (adapter->params.sf_size == 0x100000)
   3589		return FLASH_FPGA_CFG_START;
   3590	else
   3591		return FLASH_CFG_START;
   3592}
   3593
   3594/* Return TRUE if the specified firmware matches the adapter.  I.e. T4
   3595 * firmware for T4 adapters, T5 firmware for T5 adapters, etc.  We go ahead
   3596 * and emit an error message for mismatched firmware to save our caller the
   3597 * effort ...
   3598 */
   3599static bool t4_fw_matches_chip(const struct adapter *adap,
   3600			       const struct fw_hdr *hdr)
   3601{
   3602	/* The expression below will return FALSE for any unsupported adapter
   3603	 * which will keep us "honest" in the future ...
   3604	 */
   3605	if ((is_t4(adap->params.chip) && hdr->chip == FW_HDR_CHIP_T4) ||
   3606	    (is_t5(adap->params.chip) && hdr->chip == FW_HDR_CHIP_T5) ||
   3607	    (is_t6(adap->params.chip) && hdr->chip == FW_HDR_CHIP_T6))
   3608		return true;
   3609
   3610	dev_err(adap->pdev_dev,
   3611		"FW image (%d) is not suitable for this adapter (%d)\n",
   3612		hdr->chip, CHELSIO_CHIP_VERSION(adap->params.chip));
   3613	return false;
   3614}
   3615
   3616/**
   3617 *	t4_load_fw - download firmware
   3618 *	@adap: the adapter
   3619 *	@fw_data: the firmware image to write
   3620 *	@size: image size
   3621 *
   3622 *	Write the supplied firmware image to the card's serial flash.
   3623 */
   3624int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)
   3625{
   3626	u32 csum;
   3627	int ret, addr;
   3628	unsigned int i;
   3629	u8 first_page[SF_PAGE_SIZE];
   3630	const __be32 *p = (const __be32 *)fw_data;
   3631	const struct fw_hdr *hdr = (const struct fw_hdr *)fw_data;
   3632	unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
   3633	unsigned int fw_start_sec = FLASH_FW_START_SEC;
   3634	unsigned int fw_size = FLASH_FW_MAX_SIZE;
   3635	unsigned int fw_start = FLASH_FW_START;
   3636
   3637	if (!size) {
   3638		dev_err(adap->pdev_dev, "FW image has no data\n");
   3639		return -EINVAL;
   3640	}
   3641	if (size & 511) {
   3642		dev_err(adap->pdev_dev,
   3643			"FW image size not multiple of 512 bytes\n");
   3644		return -EINVAL;
   3645	}
   3646	if ((unsigned int)be16_to_cpu(hdr->len512) * 512 != size) {
   3647		dev_err(adap->pdev_dev,
   3648			"FW image size differs from size in FW header\n");
   3649		return -EINVAL;
   3650	}
   3651	if (size > fw_size) {
   3652		dev_err(adap->pdev_dev, "FW image too large, max is %u bytes\n",
   3653			fw_size);
   3654		return -EFBIG;
   3655	}
   3656	if (!t4_fw_matches_chip(adap, hdr))
   3657		return -EINVAL;
   3658
   3659	for (csum = 0, i = 0; i < size / sizeof(csum); i++)
   3660		csum += be32_to_cpu(p[i]);
   3661
   3662	if (csum != 0xffffffff) {
   3663		dev_err(adap->pdev_dev,
   3664			"corrupted firmware image, checksum %#x\n", csum);
   3665		return -EINVAL;
   3666	}
   3667
   3668	i = DIV_ROUND_UP(size, sf_sec_size);        /* # of sectors spanned */
   3669	ret = t4_flash_erase_sectors(adap, fw_start_sec, fw_start_sec + i - 1);
   3670	if (ret)
   3671		goto out;
   3672
   3673	/*
   3674	 * We write the correct version at the end so the driver can see a bad
   3675	 * version if the FW write fails.  Start by writing a copy of the
   3676	 * first page with a bad version.
   3677	 */
   3678	memcpy(first_page, fw_data, SF_PAGE_SIZE);
   3679	((struct fw_hdr *)first_page)->fw_ver = cpu_to_be32(0xffffffff);
   3680	ret = t4_write_flash(adap, fw_start, SF_PAGE_SIZE, first_page, true);
   3681	if (ret)
   3682		goto out;
   3683
   3684	addr = fw_start;
   3685	for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
   3686		addr += SF_PAGE_SIZE;
   3687		fw_data += SF_PAGE_SIZE;
   3688		ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, fw_data, true);
   3689		if (ret)
   3690			goto out;
   3691	}
   3692
   3693	ret = t4_write_flash(adap, fw_start + offsetof(struct fw_hdr, fw_ver),
   3694			     sizeof(hdr->fw_ver), (const u8 *)&hdr->fw_ver,
   3695			     true);
   3696out:
   3697	if (ret)
   3698		dev_err(adap->pdev_dev, "firmware download failed, error %d\n",
   3699			ret);
   3700	else
   3701		ret = t4_get_fw_version(adap, &adap->params.fw_vers);
   3702	return ret;
   3703}
   3704
   3705/**
   3706 *	t4_phy_fw_ver - return current PHY firmware version
   3707 *	@adap: the adapter
   3708 *	@phy_fw_ver: return value buffer for PHY firmware version
   3709 *
   3710 *	Returns the current version of external PHY firmware on the
   3711 *	adapter.
   3712 */
   3713int t4_phy_fw_ver(struct adapter *adap, int *phy_fw_ver)
   3714{
   3715	u32 param, val;
   3716	int ret;
   3717
   3718	param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
   3719		 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_PHYFW) |
   3720		 FW_PARAMS_PARAM_Y_V(adap->params.portvec) |
   3721		 FW_PARAMS_PARAM_Z_V(FW_PARAMS_PARAM_DEV_PHYFW_VERSION));
   3722	ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1,
   3723			      &param, &val);
   3724	if (ret)
   3725		return ret;
   3726	*phy_fw_ver = val;
   3727	return 0;
   3728}
   3729
   3730/**
   3731 *	t4_load_phy_fw - download port PHY firmware
   3732 *	@adap: the adapter
   3733 *	@win: the PCI-E Memory Window index to use for t4_memory_rw()
   3734 *	@phy_fw_version: function to check PHY firmware versions
   3735 *	@phy_fw_data: the PHY firmware image to write
   3736 *	@phy_fw_size: image size
   3737 *
   3738 *	Transfer the specified PHY firmware to the adapter.  If a non-NULL
   3739 *	@phy_fw_version is supplied, then it will be used to determine if
   3740 *	it's necessary to perform the transfer by comparing the version
   3741 *	of any existing adapter PHY firmware with that of the passed in
   3742 *	PHY firmware image.
   3743 *
   3744 *	A negative error number will be returned if an error occurs.  If
   3745 *	version number support is available and there's no need to upgrade
   3746 *	the firmware, 0 will be returned.  If firmware is successfully
   3747 *	transferred to the adapter, 1 will be returned.
   3748 *
   3749 *	NOTE: some adapters only have local RAM to store the PHY firmware.  As
   3750 *	a result, a RESET of the adapter would cause that RAM to lose its
   3751 *	contents.  Thus, loading PHY firmware on such adapters must happen
   3752 *	after any FW_RESET_CMDs ...
   3753 */
   3754int t4_load_phy_fw(struct adapter *adap, int win,
   3755		   int (*phy_fw_version)(const u8 *, size_t),
   3756		   const u8 *phy_fw_data, size_t phy_fw_size)
   3757{
   3758	int cur_phy_fw_ver = 0, new_phy_fw_vers = 0;
   3759	unsigned long mtype = 0, maddr = 0;
   3760	u32 param, val;
   3761	int ret;
   3762
   3763	/* If we have version number support, then check to see if the adapter
   3764	 * already has up-to-date PHY firmware loaded.
   3765	 */
   3766	if (phy_fw_version) {
   3767		new_phy_fw_vers = phy_fw_version(phy_fw_data, phy_fw_size);
   3768		ret = t4_phy_fw_ver(adap, &cur_phy_fw_ver);
   3769		if (ret < 0)
   3770			return ret;
   3771
   3772		if (cur_phy_fw_ver >= new_phy_fw_vers) {
   3773			CH_WARN(adap, "PHY Firmware already up-to-date, "
   3774				"version %#x\n", cur_phy_fw_ver);
   3775			return 0;
   3776		}
   3777	}
   3778
   3779	/* Ask the firmware where it wants us to copy the PHY firmware image.
   3780	 * The size of the file requires a special version of the READ command
   3781	 * which will pass the file size via the values field in PARAMS_CMD and
   3782	 * retrieve the return value from firmware and place it in the same
   3783	 * buffer values
   3784	 */
   3785	param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
   3786		 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_PHYFW) |
   3787		 FW_PARAMS_PARAM_Y_V(adap->params.portvec) |
   3788		 FW_PARAMS_PARAM_Z_V(FW_PARAMS_PARAM_DEV_PHYFW_DOWNLOAD));
   3789	val = phy_fw_size;
   3790	ret = t4_query_params_rw(adap, adap->mbox, adap->pf, 0, 1,
   3791				 &param, &val, 1, true);
   3792	if (ret < 0)
   3793		return ret;
   3794	mtype = val >> 8;
   3795	maddr = (val & 0xff) << 16;
   3796
   3797	/* Copy the supplied PHY Firmware image to the adapter memory location
   3798	 * allocated by the adapter firmware.
   3799	 */
   3800	spin_lock_bh(&adap->win0_lock);
   3801	ret = t4_memory_rw(adap, win, mtype, maddr,
   3802			   phy_fw_size, (__be32 *)phy_fw_data,
   3803			   T4_MEMORY_WRITE);
   3804	spin_unlock_bh(&adap->win0_lock);
   3805	if (ret)
   3806		return ret;
   3807
   3808	/* Tell the firmware that the PHY firmware image has been written to
   3809	 * RAM and it can now start copying it over to the PHYs.  The chip
   3810	 * firmware will RESET the affected PHYs as part of this operation
   3811	 * leaving them running the new PHY firmware image.
   3812	 */
   3813	param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
   3814		 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_PHYFW) |
   3815		 FW_PARAMS_PARAM_Y_V(adap->params.portvec) |
   3816		 FW_PARAMS_PARAM_Z_V(FW_PARAMS_PARAM_DEV_PHYFW_DOWNLOAD));
   3817	ret = t4_set_params_timeout(adap, adap->mbox, adap->pf, 0, 1,
   3818				    &param, &val, 30000);
   3819
   3820	/* If we have version number support, then check to see that the new
   3821	 * firmware got loaded properly.
   3822	 */
   3823	if (phy_fw_version) {
   3824		ret = t4_phy_fw_ver(adap, &cur_phy_fw_ver);
   3825		if (ret < 0)
   3826			return ret;
   3827
   3828		if (cur_phy_fw_ver != new_phy_fw_vers) {
   3829			CH_WARN(adap, "PHY Firmware did not update: "
   3830				"version on adapter %#x, "
   3831				"version flashed %#x\n",
   3832				cur_phy_fw_ver, new_phy_fw_vers);
   3833			return -ENXIO;
   3834		}
   3835	}
   3836
   3837	return 1;
   3838}
   3839
   3840/**
   3841 *	t4_fwcache - firmware cache operation
   3842 *	@adap: the adapter
   3843 *	@op  : the operation (flush or flush and invalidate)
   3844 */
   3845int t4_fwcache(struct adapter *adap, enum fw_params_param_dev_fwcache op)
   3846{
   3847	struct fw_params_cmd c;
   3848
   3849	memset(&c, 0, sizeof(c));
   3850	c.op_to_vfn =
   3851		cpu_to_be32(FW_CMD_OP_V(FW_PARAMS_CMD) |
   3852			    FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
   3853			    FW_PARAMS_CMD_PFN_V(adap->pf) |
   3854			    FW_PARAMS_CMD_VFN_V(0));
   3855	c.retval_len16 = cpu_to_be32(FW_LEN16(c));
   3856	c.param[0].mnem =
   3857		cpu_to_be32(FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
   3858			    FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_FWCACHE));
   3859	c.param[0].val = cpu_to_be32(op);
   3860
   3861	return t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), NULL);
   3862}
   3863
   3864void t4_cim_read_pif_la(struct adapter *adap, u32 *pif_req, u32 *pif_rsp,
   3865			unsigned int *pif_req_wrptr,
   3866			unsigned int *pif_rsp_wrptr)
   3867{
   3868	int i, j;
   3869	u32 cfg, val, req, rsp;
   3870
   3871	cfg = t4_read_reg(adap, CIM_DEBUGCFG_A);
   3872	if (cfg & LADBGEN_F)
   3873		t4_write_reg(adap, CIM_DEBUGCFG_A, cfg ^ LADBGEN_F);
   3874
   3875	val = t4_read_reg(adap, CIM_DEBUGSTS_A);
   3876	req = POLADBGWRPTR_G(val);
   3877	rsp = PILADBGWRPTR_G(val);
   3878	if (pif_req_wrptr)
   3879		*pif_req_wrptr = req;
   3880	if (pif_rsp_wrptr)
   3881		*pif_rsp_wrptr = rsp;
   3882
   3883	for (i = 0; i < CIM_PIFLA_SIZE; i++) {
   3884		for (j = 0; j < 6; j++) {
   3885			t4_write_reg(adap, CIM_DEBUGCFG_A, POLADBGRDPTR_V(req) |
   3886				     PILADBGRDPTR_V(rsp));
   3887			*pif_req++ = t4_read_reg(adap, CIM_PO_LA_DEBUGDATA_A);
   3888			*pif_rsp++ = t4_read_reg(adap, CIM_PI_LA_DEBUGDATA_A);
   3889			req++;
   3890			rsp++;
   3891		}
   3892		req = (req + 2) & POLADBGRDPTR_M;
   3893		rsp = (rsp + 2) & PILADBGRDPTR_M;
   3894	}
   3895	t4_write_reg(adap, CIM_DEBUGCFG_A, cfg);
   3896}
   3897
   3898void t4_cim_read_ma_la(struct adapter *adap, u32 *ma_req, u32 *ma_rsp)
   3899{
   3900	u32 cfg;
   3901	int i, j, idx;
   3902
   3903	cfg = t4_read_reg(adap, CIM_DEBUGCFG_A);
   3904	if (cfg & LADBGEN_F)
   3905		t4_write_reg(adap, CIM_DEBUGCFG_A, cfg ^ LADBGEN_F);
   3906
   3907	for (i = 0; i < CIM_MALA_SIZE; i++) {
   3908		for (j = 0; j < 5; j++) {
   3909			idx = 8 * i + j;
   3910			t4_write_reg(adap, CIM_DEBUGCFG_A, POLADBGRDPTR_V(idx) |
   3911				     PILADBGRDPTR_V(idx));
   3912			*ma_req++ = t4_read_reg(adap, CIM_PO_LA_MADEBUGDATA_A);
   3913			*ma_rsp++ = t4_read_reg(adap, CIM_PI_LA_MADEBUGDATA_A);
   3914		}
   3915	}
   3916	t4_write_reg(adap, CIM_DEBUGCFG_A, cfg);
   3917}
   3918
   3919void t4_ulprx_read_la(struct adapter *adap, u32 *la_buf)
   3920{
   3921	unsigned int i, j;
   3922
   3923	for (i = 0; i < 8; i++) {
   3924		u32 *p = la_buf + i;
   3925
   3926		t4_write_reg(adap, ULP_RX_LA_CTL_A, i);
   3927		j = t4_read_reg(adap, ULP_RX_LA_WRPTR_A);
   3928		t4_write_reg(adap, ULP_RX_LA_RDPTR_A, j);
   3929		for (j = 0; j < ULPRX_LA_SIZE; j++, p += 8)
   3930			*p = t4_read_reg(adap, ULP_RX_LA_RDDATA_A);
   3931	}
   3932}
   3933
   3934/* The ADVERT_MASK is used to mask out all of the Advertised Firmware Port
   3935 * Capabilities which we control with separate controls -- see, for instance,
   3936 * Pause Frames and Forward Error Correction.  In order to determine what the
   3937 * full set of Advertised Port Capabilities are, the base Advertised Port
   3938 * Capabilities (masked by ADVERT_MASK) must be combined with the Advertised
   3939 * Port Capabilities associated with those other controls.  See
   3940 * t4_link_acaps() for how this is done.
   3941 */
   3942#define ADVERT_MASK (FW_PORT_CAP32_SPEED_V(FW_PORT_CAP32_SPEED_M) | \
   3943		     FW_PORT_CAP32_ANEG)
   3944
   3945/**
   3946 *	fwcaps16_to_caps32 - convert 16-bit Port Capabilities to 32-bits
   3947 *	@caps16: a 16-bit Port Capabilities value
   3948 *
   3949 *	Returns the equivalent 32-bit Port Capabilities value.
   3950 */
   3951static fw_port_cap32_t fwcaps16_to_caps32(fw_port_cap16_t caps16)
   3952{
   3953	fw_port_cap32_t caps32 = 0;
   3954
   3955	#define CAP16_TO_CAP32(__cap) \
   3956		do { \
   3957			if (caps16 & FW_PORT_CAP_##__cap) \
   3958				caps32 |= FW_PORT_CAP32_##__cap; \
   3959		} while (0)
   3960
   3961	CAP16_TO_CAP32(SPEED_100M);
   3962	CAP16_TO_CAP32(SPEED_1G);
   3963	CAP16_TO_CAP32(SPEED_25G);
   3964	CAP16_TO_CAP32(SPEED_10G);
   3965	CAP16_TO_CAP32(SPEED_40G);
   3966	CAP16_TO_CAP32(SPEED_100G);
   3967	CAP16_TO_CAP32(FC_RX);
   3968	CAP16_TO_CAP32(FC_TX);
   3969	CAP16_TO_CAP32(ANEG);
   3970	CAP16_TO_CAP32(FORCE_PAUSE);
   3971	CAP16_TO_CAP32(MDIAUTO);
   3972	CAP16_TO_CAP32(MDISTRAIGHT);
   3973	CAP16_TO_CAP32(FEC_RS);
   3974	CAP16_TO_CAP32(FEC_BASER_RS);
   3975	CAP16_TO_CAP32(802_3_PAUSE);
   3976	CAP16_TO_CAP32(802_3_ASM_DIR);
   3977
   3978	#undef CAP16_TO_CAP32
   3979
   3980	return caps32;
   3981}
   3982
   3983/**
   3984 *	fwcaps32_to_caps16 - convert 32-bit Port Capabilities to 16-bits
   3985 *	@caps32: a 32-bit Port Capabilities value
   3986 *
   3987 *	Returns the equivalent 16-bit Port Capabilities value.  Note that
   3988 *	not all 32-bit Port Capabilities can be represented in the 16-bit
   3989 *	Port Capabilities and some fields/values may not make it.
   3990 */
   3991static fw_port_cap16_t fwcaps32_to_caps16(fw_port_cap32_t caps32)
   3992{
   3993	fw_port_cap16_t caps16 = 0;
   3994
   3995	#define CAP32_TO_CAP16(__cap) \
   3996		do { \
   3997			if (caps32 & FW_PORT_CAP32_##__cap) \
   3998				caps16 |= FW_PORT_CAP_##__cap; \
   3999		} while (0)
   4000
   4001	CAP32_TO_CAP16(SPEED_100M);
   4002	CAP32_TO_CAP16(SPEED_1G);
   4003	CAP32_TO_CAP16(SPEED_10G);
   4004	CAP32_TO_CAP16(SPEED_25G);
   4005	CAP32_TO_CAP16(SPEED_40G);
   4006	CAP32_TO_CAP16(SPEED_100G);
   4007	CAP32_TO_CAP16(FC_RX);
   4008	CAP32_TO_CAP16(FC_TX);
   4009	CAP32_TO_CAP16(802_3_PAUSE);
   4010	CAP32_TO_CAP16(802_3_ASM_DIR);
   4011	CAP32_TO_CAP16(ANEG);
   4012	CAP32_TO_CAP16(FORCE_PAUSE);
   4013	CAP32_TO_CAP16(MDIAUTO);
   4014	CAP32_TO_CAP16(MDISTRAIGHT);
   4015	CAP32_TO_CAP16(FEC_RS);
   4016	CAP32_TO_CAP16(FEC_BASER_RS);
   4017
   4018	#undef CAP32_TO_CAP16
   4019
   4020	return caps16;
   4021}
   4022
   4023/* Translate Firmware Port Capabilities Pause specification to Common Code */
   4024static inline enum cc_pause fwcap_to_cc_pause(fw_port_cap32_t fw_pause)
   4025{
   4026	enum cc_pause cc_pause = 0;
   4027
   4028	if (fw_pause & FW_PORT_CAP32_FC_RX)
   4029		cc_pause |= PAUSE_RX;
   4030	if (fw_pause & FW_PORT_CAP32_FC_TX)
   4031		cc_pause |= PAUSE_TX;
   4032
   4033	return cc_pause;
   4034}
   4035
   4036/* Translate Common Code Pause specification into Firmware Port Capabilities */
   4037static inline fw_port_cap32_t cc_to_fwcap_pause(enum cc_pause cc_pause)
   4038{
   4039	/* Translate orthogonal RX/TX Pause Controls for L1 Configure
   4040	 * commands, etc.
   4041	 */
   4042	fw_port_cap32_t fw_pause = 0;
   4043
   4044	if (cc_pause & PAUSE_RX)
   4045		fw_pause |= FW_PORT_CAP32_FC_RX;
   4046	if (cc_pause & PAUSE_TX)
   4047		fw_pause |= FW_PORT_CAP32_FC_TX;
   4048	if (!(cc_pause & PAUSE_AUTONEG))
   4049		fw_pause |= FW_PORT_CAP32_FORCE_PAUSE;
   4050
   4051	/* Translate orthogonal Pause controls into IEEE 802.3 Pause,
   4052	 * Asymmetrical Pause for use in reporting to upper layer OS code, etc.
   4053	 * Note that these bits are ignored in L1 Configure commands.
   4054	 */
   4055	if (cc_pause & PAUSE_RX) {
   4056		if (cc_pause & PAUSE_TX)
   4057			fw_pause |= FW_PORT_CAP32_802_3_PAUSE;
   4058		else
   4059			fw_pause |= FW_PORT_CAP32_802_3_ASM_DIR |
   4060				    FW_PORT_CAP32_802_3_PAUSE;
   4061	} else if (cc_pause & PAUSE_TX) {
   4062		fw_pause |= FW_PORT_CAP32_802_3_ASM_DIR;
   4063	}
   4064
   4065	return fw_pause;
   4066}
   4067
   4068/* Translate Firmware Forward Error Correction specification to Common Code */
   4069static inline enum cc_fec fwcap_to_cc_fec(fw_port_cap32_t fw_fec)
   4070{
   4071	enum cc_fec cc_fec = 0;
   4072
   4073	if (fw_fec & FW_PORT_CAP32_FEC_RS)
   4074		cc_fec |= FEC_RS;
   4075	if (fw_fec & FW_PORT_CAP32_FEC_BASER_RS)
   4076		cc_fec |= FEC_BASER_RS;
   4077
   4078	return cc_fec;
   4079}
   4080
   4081/* Translate Common Code Forward Error Correction specification to Firmware */
   4082static inline fw_port_cap32_t cc_to_fwcap_fec(enum cc_fec cc_fec)
   4083{
   4084	fw_port_cap32_t fw_fec = 0;
   4085
   4086	if (cc_fec & FEC_RS)
   4087		fw_fec |= FW_PORT_CAP32_FEC_RS;
   4088	if (cc_fec & FEC_BASER_RS)
   4089		fw_fec |= FW_PORT_CAP32_FEC_BASER_RS;
   4090
   4091	return fw_fec;
   4092}
   4093
   4094/**
   4095 *	t4_link_acaps - compute Link Advertised Port Capabilities
   4096 *	@adapter: the adapter
   4097 *	@port: the Port ID
   4098 *	@lc: the Port's Link Configuration
   4099 *
   4100 *	Synthesize the Advertised Port Capabilities we'll be using based on
   4101 *	the base Advertised Port Capabilities (which have been filtered by
   4102 *	ADVERT_MASK) plus the individual controls for things like Pause
   4103 *	Frames, Forward Error Correction, MDI, etc.
   4104 */
   4105fw_port_cap32_t t4_link_acaps(struct adapter *adapter, unsigned int port,
   4106			      struct link_config *lc)
   4107{
   4108	fw_port_cap32_t fw_fc, fw_fec, acaps;
   4109	unsigned int fw_mdi;
   4110	char cc_fec;
   4111
   4112	fw_mdi = (FW_PORT_CAP32_MDI_V(FW_PORT_CAP32_MDI_AUTO) & lc->pcaps);
   4113
   4114	/* Convert driver coding of Pause Frame Flow Control settings into the
   4115	 * Firmware's API.
   4116	 */
   4117	fw_fc = cc_to_fwcap_pause(lc->requested_fc);
   4118
   4119	/* Convert Common Code Forward Error Control settings into the
   4120	 * Firmware's API.  If the current Requested FEC has "Automatic"
   4121	 * (IEEE 802.3) specified, then we use whatever the Firmware
   4122	 * sent us as part of its IEEE 802.3-based interpretation of
   4123	 * the Transceiver Module EPROM FEC parameters.  Otherwise we
   4124	 * use whatever is in the current Requested FEC settings.
   4125	 */
   4126	if (lc->requested_fec & FEC_AUTO)
   4127		cc_fec = fwcap_to_cc_fec(lc->def_acaps);
   4128	else
   4129		cc_fec = lc->requested_fec;
   4130	fw_fec = cc_to_fwcap_fec(cc_fec);
   4131
   4132	/* Figure out what our Requested Port Capabilities are going to be.
   4133	 * Note parallel structure in t4_handle_get_port_info() and
   4134	 * init_link_config().
   4135	 */
   4136	if (!(lc->pcaps & FW_PORT_CAP32_ANEG)) {
   4137		acaps = lc->acaps | fw_fc | fw_fec;
   4138		lc->fc = lc->requested_fc & ~PAUSE_AUTONEG;
   4139		lc->fec = cc_fec;
   4140	} else if (lc->autoneg == AUTONEG_DISABLE) {
   4141		acaps = lc->speed_caps | fw_fc | fw_fec | fw_mdi;
   4142		lc->fc = lc->requested_fc & ~PAUSE_AUTONEG;
   4143		lc->fec = cc_fec;
   4144	} else {
   4145		acaps = lc->acaps | fw_fc | fw_fec | fw_mdi;
   4146	}
   4147
   4148	/* Some Requested Port Capabilities are trivially wrong if they exceed
   4149	 * the Physical Port Capabilities.  We can check that here and provide
   4150	 * moderately useful feedback in the system log.
   4151	 *
   4152	 * Note that older Firmware doesn't have FW_PORT_CAP32_FORCE_PAUSE, so
   4153	 * we need to exclude this from this check in order to maintain
   4154	 * compatibility ...
   4155	 */
   4156	if ((acaps & ~lc->pcaps) & ~FW_PORT_CAP32_FORCE_PAUSE) {
   4157		dev_err(adapter->pdev_dev, "Requested Port Capabilities %#x exceed Physical Port Capabilities %#x\n",
   4158			acaps, lc->pcaps);
   4159		return -EINVAL;
   4160	}
   4161
   4162	return acaps;
   4163}
   4164
   4165/**
   4166 *	t4_link_l1cfg_core - apply link configuration to MAC/PHY
   4167 *	@adapter: the adapter
   4168 *	@mbox: the Firmware Mailbox to use
   4169 *	@port: the Port ID
   4170 *	@lc: the Port's Link Configuration
   4171 *	@sleep_ok: if true we may sleep while awaiting command completion
   4172 *	@timeout: time to wait for command to finish before timing out
   4173 *		(negative implies @sleep_ok=false)
   4174 *
   4175 *	Set up a port's MAC and PHY according to a desired link configuration.
   4176 *	- If the PHY can auto-negotiate first decide what to advertise, then
   4177 *	  enable/disable auto-negotiation as desired, and reset.
   4178 *	- If the PHY does not auto-negotiate just reset it.
   4179 *	- If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
   4180 *	  otherwise do it later based on the outcome of auto-negotiation.
   4181 */
   4182int t4_link_l1cfg_core(struct adapter *adapter, unsigned int mbox,
   4183		       unsigned int port, struct link_config *lc,
   4184		       u8 sleep_ok, int timeout)
   4185{
   4186	unsigned int fw_caps = adapter->params.fw_caps_support;
   4187	struct fw_port_cmd cmd;
   4188	fw_port_cap32_t rcap;
   4189	int ret;
   4190
   4191	if (!(lc->pcaps & FW_PORT_CAP32_ANEG) &&
   4192	    lc->autoneg == AUTONEG_ENABLE) {
   4193		return -EINVAL;
   4194	}
   4195
   4196	/* Compute our Requested Port Capabilities and send that on to the
   4197	 * Firmware.
   4198	 */
   4199	rcap = t4_link_acaps(adapter, port, lc);
   4200	memset(&cmd, 0, sizeof(cmd));
   4201	cmd.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PORT_CMD) |
   4202				       FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
   4203				       FW_PORT_CMD_PORTID_V(port));
   4204	cmd.action_to_len16 =
   4205		cpu_to_be32(FW_PORT_CMD_ACTION_V(fw_caps == FW_CAPS16
   4206						 ? FW_PORT_ACTION_L1_CFG
   4207						 : FW_PORT_ACTION_L1_CFG32) |
   4208						 FW_LEN16(cmd));
   4209	if (fw_caps == FW_CAPS16)
   4210		cmd.u.l1cfg.rcap = cpu_to_be32(fwcaps32_to_caps16(rcap));
   4211	else
   4212		cmd.u.l1cfg32.rcap32 = cpu_to_be32(rcap);
   4213
   4214	ret = t4_wr_mbox_meat_timeout(adapter, mbox, &cmd, sizeof(cmd), NULL,
   4215				      sleep_ok, timeout);
   4216
   4217	/* Unfortunately, even if the Requested Port Capabilities "fit" within
   4218	 * the Physical Port Capabilities, some combinations of features may
   4219	 * still not be legal.  For example, 40Gb/s and Reed-Solomon Forward
   4220	 * Error Correction.  So if the Firmware rejects the L1 Configure
   4221	 * request, flag that here.
   4222	 */
   4223	if (ret) {
   4224		dev_err(adapter->pdev_dev,
   4225			"Requested Port Capabilities %#x rejected, error %d\n",
   4226			rcap, -ret);
   4227		return ret;
   4228	}
   4229	return 0;
   4230}
   4231
   4232/**
   4233 *	t4_restart_aneg - restart autonegotiation
   4234 *	@adap: the adapter
   4235 *	@mbox: mbox to use for the FW command
   4236 *	@port: the port id
   4237 *
   4238 *	Restarts autonegotiation for the selected port.
   4239 */
   4240int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port)
   4241{
   4242	unsigned int fw_caps = adap->params.fw_caps_support;
   4243	struct fw_port_cmd c;
   4244
   4245	memset(&c, 0, sizeof(c));
   4246	c.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PORT_CMD) |
   4247				     FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
   4248				     FW_PORT_CMD_PORTID_V(port));
   4249	c.action_to_len16 =
   4250		cpu_to_be32(FW_PORT_CMD_ACTION_V(fw_caps == FW_CAPS16
   4251						 ? FW_PORT_ACTION_L1_CFG
   4252						 : FW_PORT_ACTION_L1_CFG32) |
   4253			    FW_LEN16(c));
   4254	if (fw_caps == FW_CAPS16)
   4255		c.u.l1cfg.rcap = cpu_to_be32(FW_PORT_CAP_ANEG);
   4256	else
   4257		c.u.l1cfg32.rcap32 = cpu_to_be32(FW_PORT_CAP32_ANEG);
   4258	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
   4259}
   4260
   4261typedef void (*int_handler_t)(struct adapter *adap);
   4262
   4263struct intr_info {
   4264	unsigned int mask;       /* bits to check in interrupt status */
   4265	const char *msg;         /* message to print or NULL */
   4266	short stat_idx;          /* stat counter to increment or -1 */
   4267	unsigned short fatal;    /* whether the condition reported is fatal */
   4268	int_handler_t int_handler; /* platform-specific int handler */
   4269};
   4270
   4271/**
   4272 *	t4_handle_intr_status - table driven interrupt handler
   4273 *	@adapter: the adapter that generated the interrupt
   4274 *	@reg: the interrupt status register to process
   4275 *	@acts: table of interrupt actions
   4276 *
   4277 *	A table driven interrupt handler that applies a set of masks to an
   4278 *	interrupt status word and performs the corresponding actions if the
   4279 *	interrupts described by the mask have occurred.  The actions include
   4280 *	optionally emitting a warning or alert message.  The table is terminated
   4281 *	by an entry specifying mask 0.  Returns the number of fatal interrupt
   4282 *	conditions.
   4283 */
   4284static int t4_handle_intr_status(struct adapter *adapter, unsigned int reg,
   4285				 const struct intr_info *acts)
   4286{
   4287	int fatal = 0;
   4288	unsigned int mask = 0;
   4289	unsigned int status = t4_read_reg(adapter, reg);
   4290
   4291	for ( ; acts->mask; ++acts) {
   4292		if (!(status & acts->mask))
   4293			continue;
   4294		if (acts->fatal) {
   4295			fatal++;
   4296			dev_alert(adapter->pdev_dev, "%s (0x%x)\n", acts->msg,
   4297				  status & acts->mask);
   4298		} else if (acts->msg && printk_ratelimit())
   4299			dev_warn(adapter->pdev_dev, "%s (0x%x)\n", acts->msg,
   4300				 status & acts->mask);
   4301		if (acts->int_handler)
   4302			acts->int_handler(adapter);
   4303		mask |= acts->mask;
   4304	}
   4305	status &= mask;
   4306	if (status)                           /* clear processed interrupts */
   4307		t4_write_reg(adapter, reg, status);
   4308	return fatal;
   4309}
   4310
   4311/*
   4312 * Interrupt handler for the PCIE module.
   4313 */
   4314static void pcie_intr_handler(struct adapter *adapter)
   4315{
   4316	static const struct intr_info sysbus_intr_info[] = {
   4317		{ RNPP_F, "RXNP array parity error", -1, 1 },
   4318		{ RPCP_F, "RXPC array parity error", -1, 1 },
   4319		{ RCIP_F, "RXCIF array parity error", -1, 1 },
   4320		{ RCCP_F, "Rx completions control array parity error", -1, 1 },
   4321		{ RFTP_F, "RXFT array parity error", -1, 1 },
   4322		{ 0 }
   4323	};
   4324	static const struct intr_info pcie_port_intr_info[] = {
   4325		{ TPCP_F, "TXPC array parity error", -1, 1 },
   4326		{ TNPP_F, "TXNP array parity error", -1, 1 },
   4327		{ TFTP_F, "TXFT array parity error", -1, 1 },
   4328		{ TCAP_F, "TXCA array parity error", -1, 1 },
   4329		{ TCIP_F, "TXCIF array parity error", -1, 1 },
   4330		{ RCAP_F, "RXCA array parity error", -1, 1 },
   4331		{ OTDD_F, "outbound request TLP discarded", -1, 1 },
   4332		{ RDPE_F, "Rx data parity error", -1, 1 },
   4333		{ TDUE_F, "Tx uncorrectable data error", -1, 1 },
   4334		{ 0 }
   4335	};
   4336	static const struct intr_info pcie_intr_info[] = {
   4337		{ MSIADDRLPERR_F, "MSI AddrL parity error", -1, 1 },
   4338		{ MSIADDRHPERR_F, "MSI AddrH parity error", -1, 1 },
   4339		{ MSIDATAPERR_F, "MSI data parity error", -1, 1 },
   4340		{ MSIXADDRLPERR_F, "MSI-X AddrL parity error", -1, 1 },
   4341		{ MSIXADDRHPERR_F, "MSI-X AddrH parity error", -1, 1 },
   4342		{ MSIXDATAPERR_F, "MSI-X data parity error", -1, 1 },
   4343		{ MSIXDIPERR_F, "MSI-X DI parity error", -1, 1 },
   4344		{ PIOCPLPERR_F, "PCI PIO completion FIFO parity error", -1, 1 },
   4345		{ PIOREQPERR_F, "PCI PIO request FIFO parity error", -1, 1 },
   4346		{ TARTAGPERR_F, "PCI PCI target tag FIFO parity error", -1, 1 },
   4347		{ CCNTPERR_F, "PCI CMD channel count parity error", -1, 1 },
   4348		{ CREQPERR_F, "PCI CMD channel request parity error", -1, 1 },
   4349		{ CRSPPERR_F, "PCI CMD channel response parity error", -1, 1 },
   4350		{ DCNTPERR_F, "PCI DMA channel count parity error", -1, 1 },
   4351		{ DREQPERR_F, "PCI DMA channel request parity error", -1, 1 },
   4352		{ DRSPPERR_F, "PCI DMA channel response parity error", -1, 1 },
   4353		{ HCNTPERR_F, "PCI HMA channel count parity error", -1, 1 },
   4354		{ HREQPERR_F, "PCI HMA channel request parity error", -1, 1 },
   4355		{ HRSPPERR_F, "PCI HMA channel response parity error", -1, 1 },
   4356		{ CFGSNPPERR_F, "PCI config snoop FIFO parity error", -1, 1 },
   4357		{ FIDPERR_F, "PCI FID parity error", -1, 1 },
   4358		{ INTXCLRPERR_F, "PCI INTx clear parity error", -1, 1 },
   4359		{ MATAGPERR_F, "PCI MA tag parity error", -1, 1 },
   4360		{ PIOTAGPERR_F, "PCI PIO tag parity error", -1, 1 },
   4361		{ RXCPLPERR_F, "PCI Rx completion parity error", -1, 1 },
   4362		{ RXWRPERR_F, "PCI Rx write parity error", -1, 1 },
   4363		{ RPLPERR_F, "PCI replay buffer parity error", -1, 1 },
   4364		{ PCIESINT_F, "PCI core secondary fault", -1, 1 },
   4365		{ PCIEPINT_F, "PCI core primary fault", -1, 1 },
   4366		{ UNXSPLCPLERR_F, "PCI unexpected split completion error",
   4367		  -1, 0 },
   4368		{ 0 }
   4369	};
   4370
   4371	static struct intr_info t5_pcie_intr_info[] = {
   4372		{ MSTGRPPERR_F, "Master Response Read Queue parity error",
   4373		  -1, 1 },
   4374		{ MSTTIMEOUTPERR_F, "Master Timeout FIFO parity error", -1, 1 },
   4375		{ MSIXSTIPERR_F, "MSI-X STI SRAM parity error", -1, 1 },
   4376		{ MSIXADDRLPERR_F, "MSI-X AddrL parity error", -1, 1 },
   4377		{ MSIXADDRHPERR_F, "MSI-X AddrH parity error", -1, 1 },
   4378		{ MSIXDATAPERR_F, "MSI-X data parity error", -1, 1 },
   4379		{ MSIXDIPERR_F, "MSI-X DI parity error", -1, 1 },
   4380		{ PIOCPLGRPPERR_F, "PCI PIO completion Group FIFO parity error",
   4381		  -1, 1 },
   4382		{ PIOREQGRPPERR_F, "PCI PIO request Group FIFO parity error",
   4383		  -1, 1 },
   4384		{ TARTAGPERR_F, "PCI PCI target tag FIFO parity error", -1, 1 },
   4385		{ MSTTAGQPERR_F, "PCI master tag queue parity error", -1, 1 },
   4386		{ CREQPERR_F, "PCI CMD channel request parity error", -1, 1 },
   4387		{ CRSPPERR_F, "PCI CMD channel response parity error", -1, 1 },
   4388		{ DREQWRPERR_F, "PCI DMA channel write request parity error",
   4389		  -1, 1 },
   4390		{ DREQPERR_F, "PCI DMA channel request parity error", -1, 1 },
   4391		{ DRSPPERR_F, "PCI DMA channel response parity error", -1, 1 },
   4392		{ HREQWRPERR_F, "PCI HMA channel count parity error", -1, 1 },
   4393		{ HREQPERR_F, "PCI HMA channel request parity error", -1, 1 },
   4394		{ HRSPPERR_F, "PCI HMA channel response parity error", -1, 1 },
   4395		{ CFGSNPPERR_F, "PCI config snoop FIFO parity error", -1, 1 },
   4396		{ FIDPERR_F, "PCI FID parity error", -1, 1 },
   4397		{ VFIDPERR_F, "PCI INTx clear parity error", -1, 1 },
   4398		{ MAGRPPERR_F, "PCI MA group FIFO parity error", -1, 1 },
   4399		{ PIOTAGPERR_F, "PCI PIO tag parity error", -1, 1 },
   4400		{ IPRXHDRGRPPERR_F, "PCI IP Rx header group parity error",
   4401		  -1, 1 },
   4402		{ IPRXDATAGRPPERR_F, "PCI IP Rx data group parity error",
   4403		  -1, 1 },
   4404		{ RPLPERR_F, "PCI IP replay buffer parity error", -1, 1 },
   4405		{ IPSOTPERR_F, "PCI IP SOT buffer parity error", -1, 1 },
   4406		{ TRGT1GRPPERR_F, "PCI TRGT1 group FIFOs parity error", -1, 1 },
   4407		{ READRSPERR_F, "Outbound read error", -1, 0 },
   4408		{ 0 }
   4409	};
   4410
   4411	int fat;
   4412
   4413	if (is_t4(adapter->params.chip))
   4414		fat = t4_handle_intr_status(adapter,
   4415				PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS_A,
   4416				sysbus_intr_info) +
   4417			t4_handle_intr_status(adapter,
   4418					PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS_A,
   4419					pcie_port_intr_info) +
   4420			t4_handle_intr_status(adapter, PCIE_INT_CAUSE_A,
   4421					      pcie_intr_info);
   4422	else
   4423		fat = t4_handle_intr_status(adapter, PCIE_INT_CAUSE_A,
   4424					    t5_pcie_intr_info);
   4425
   4426	if (fat)
   4427		t4_fatal_err(adapter);
   4428}
   4429
   4430/*
   4431 * TP interrupt handler.
   4432 */
   4433static void tp_intr_handler(struct adapter *adapter)
   4434{
   4435	static const struct intr_info tp_intr_info[] = {
   4436		{ 0x3fffffff, "TP parity error", -1, 1 },
   4437		{ FLMTXFLSTEMPTY_F, "TP out of Tx pages", -1, 1 },
   4438		{ 0 }
   4439	};
   4440
   4441	if (t4_handle_intr_status(adapter, TP_INT_CAUSE_A, tp_intr_info))
   4442		t4_fatal_err(adapter);
   4443}
   4444
   4445/*
   4446 * SGE interrupt handler.
   4447 */
   4448static void sge_intr_handler(struct adapter *adapter)
   4449{
   4450	u32 v = 0, perr;
   4451	u32 err;
   4452
   4453	static const struct intr_info sge_intr_info[] = {
   4454		{ ERR_CPL_EXCEED_IQE_SIZE_F,
   4455		  "SGE received CPL exceeding IQE size", -1, 1 },
   4456		{ ERR_INVALID_CIDX_INC_F,
   4457		  "SGE GTS CIDX increment too large", -1, 0 },
   4458		{ ERR_CPL_OPCODE_0_F, "SGE received 0-length CPL", -1, 0 },
   4459		{ DBFIFO_LP_INT_F, NULL, -1, 0, t4_db_full },
   4460		{ ERR_DATA_CPL_ON_HIGH_QID1_F | ERR_DATA_CPL_ON_HIGH_QID0_F,
   4461		  "SGE IQID > 1023 received CPL for FL", -1, 0 },
   4462		{ ERR_BAD_DB_PIDX3_F, "SGE DBP 3 pidx increment too large", -1,
   4463		  0 },
   4464		{ ERR_BAD_DB_PIDX2_F, "SGE DBP 2 pidx increment too large", -1,
   4465		  0 },
   4466		{ ERR_BAD_DB_PIDX1_F, "SGE DBP 1 pidx increment too large", -1,
   4467		  0 },
   4468		{ ERR_BAD_DB_PIDX0_F, "SGE DBP 0 pidx increment too large", -1,
   4469		  0 },
   4470		{ ERR_ING_CTXT_PRIO_F,
   4471		  "SGE too many priority ingress contexts", -1, 0 },
   4472		{ INGRESS_SIZE_ERR_F, "SGE illegal ingress QID", -1, 0 },
   4473		{ EGRESS_SIZE_ERR_F, "SGE illegal egress QID", -1, 0 },
   4474		{ 0 }
   4475	};
   4476
   4477	static struct intr_info t4t5_sge_intr_info[] = {
   4478		{ ERR_DROPPED_DB_F, NULL, -1, 0, t4_db_dropped },
   4479		{ DBFIFO_HP_INT_F, NULL, -1, 0, t4_db_full },
   4480		{ ERR_EGR_CTXT_PRIO_F,
   4481		  "SGE too many priority egress contexts", -1, 0 },
   4482		{ 0 }
   4483	};
   4484
   4485	perr = t4_read_reg(adapter, SGE_INT_CAUSE1_A);
   4486	if (perr) {
   4487		v |= perr;
   4488		dev_alert(adapter->pdev_dev, "SGE Cause1 Parity Error %#x\n",
   4489			  perr);
   4490	}
   4491
   4492	perr = t4_read_reg(adapter, SGE_INT_CAUSE2_A);
   4493	if (perr) {
   4494		v |= perr;
   4495		dev_alert(adapter->pdev_dev, "SGE Cause2 Parity Error %#x\n",
   4496			  perr);
   4497	}
   4498
   4499	if (CHELSIO_CHIP_VERSION(adapter->params.chip) >= CHELSIO_T5) {
   4500		perr = t4_read_reg(adapter, SGE_INT_CAUSE5_A);
   4501		/* Parity error (CRC) for err_T_RxCRC is trivial, ignore it */
   4502		perr &= ~ERR_T_RXCRC_F;
   4503		if (perr) {
   4504			v |= perr;
   4505			dev_alert(adapter->pdev_dev,
   4506				  "SGE Cause5 Parity Error %#x\n", perr);
   4507		}
   4508	}
   4509
   4510	v |= t4_handle_intr_status(adapter, SGE_INT_CAUSE3_A, sge_intr_info);
   4511	if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5)
   4512		v |= t4_handle_intr_status(adapter, SGE_INT_CAUSE3_A,
   4513					   t4t5_sge_intr_info);
   4514
   4515	err = t4_read_reg(adapter, SGE_ERROR_STATS_A);
   4516	if (err & ERROR_QID_VALID_F) {
   4517		dev_err(adapter->pdev_dev, "SGE error for queue %u\n",
   4518			ERROR_QID_G(err));
   4519		if (err & UNCAPTURED_ERROR_F)
   4520			dev_err(adapter->pdev_dev,
   4521				"SGE UNCAPTURED_ERROR set (clearing)\n");
   4522		t4_write_reg(adapter, SGE_ERROR_STATS_A, ERROR_QID_VALID_F |
   4523			     UNCAPTURED_ERROR_F);
   4524	}
   4525
   4526	if (v != 0)
   4527		t4_fatal_err(adapter);
   4528}
   4529
   4530#define CIM_OBQ_INTR (OBQULP0PARERR_F | OBQULP1PARERR_F | OBQULP2PARERR_F |\
   4531		      OBQULP3PARERR_F | OBQSGEPARERR_F | OBQNCSIPARERR_F)
   4532#define CIM_IBQ_INTR (IBQTP0PARERR_F | IBQTP1PARERR_F | IBQULPPARERR_F |\
   4533		      IBQSGEHIPARERR_F | IBQSGELOPARERR_F | IBQNCSIPARERR_F)
   4534
   4535/*
   4536 * CIM interrupt handler.
   4537 */
   4538static void cim_intr_handler(struct adapter *adapter)
   4539{
   4540	static const struct intr_info cim_intr_info[] = {
   4541		{ PREFDROPINT_F, "CIM control register prefetch drop", -1, 1 },
   4542		{ CIM_OBQ_INTR, "CIM OBQ parity error", -1, 1 },
   4543		{ CIM_IBQ_INTR, "CIM IBQ parity error", -1, 1 },
   4544		{ MBUPPARERR_F, "CIM mailbox uP parity error", -1, 1 },
   4545		{ MBHOSTPARERR_F, "CIM mailbox host parity error", -1, 1 },
   4546		{ TIEQINPARERRINT_F, "CIM TIEQ outgoing parity error", -1, 1 },
   4547		{ TIEQOUTPARERRINT_F, "CIM TIEQ incoming parity error", -1, 1 },
   4548		{ TIMER0INT_F, "CIM TIMER0 interrupt", -1, 1 },
   4549		{ 0 }
   4550	};
   4551	static const struct intr_info cim_upintr_info[] = {
   4552		{ RSVDSPACEINT_F, "CIM reserved space access", -1, 1 },
   4553		{ ILLTRANSINT_F, "CIM illegal transaction", -1, 1 },
   4554		{ ILLWRINT_F, "CIM illegal write", -1, 1 },
   4555		{ ILLRDINT_F, "CIM illegal read", -1, 1 },
   4556		{ ILLRDBEINT_F, "CIM illegal read BE", -1, 1 },
   4557		{ ILLWRBEINT_F, "CIM illegal write BE", -1, 1 },
   4558		{ SGLRDBOOTINT_F, "CIM single read from boot space", -1, 1 },
   4559		{ SGLWRBOOTINT_F, "CIM single write to boot space", -1, 1 },
   4560		{ BLKWRBOOTINT_F, "CIM block write to boot space", -1, 1 },
   4561		{ SGLRDFLASHINT_F, "CIM single read from flash space", -1, 1 },
   4562		{ SGLWRFLASHINT_F, "CIM single write to flash space", -1, 1 },
   4563		{ BLKWRFLASHINT_F, "CIM block write to flash space", -1, 1 },
   4564		{ SGLRDEEPROMINT_F, "CIM single EEPROM read", -1, 1 },
   4565		{ SGLWREEPROMINT_F, "CIM single EEPROM write", -1, 1 },
   4566		{ BLKRDEEPROMINT_F, "CIM block EEPROM read", -1, 1 },
   4567		{ BLKWREEPROMINT_F, "CIM block EEPROM write", -1, 1 },
   4568		{ SGLRDCTLINT_F, "CIM single read from CTL space", -1, 1 },
   4569		{ SGLWRCTLINT_F, "CIM single write to CTL space", -1, 1 },
   4570		{ BLKRDCTLINT_F, "CIM block read from CTL space", -1, 1 },
   4571		{ BLKWRCTLINT_F, "CIM block write to CTL space", -1, 1 },
   4572		{ SGLRDPLINT_F, "CIM single read from PL space", -1, 1 },
   4573		{ SGLWRPLINT_F, "CIM single write to PL space", -1, 1 },
   4574		{ BLKRDPLINT_F, "CIM block read from PL space", -1, 1 },
   4575		{ BLKWRPLINT_F, "CIM block write to PL space", -1, 1 },
   4576		{ REQOVRLOOKUPINT_F, "CIM request FIFO overwrite", -1, 1 },
   4577		{ RSPOVRLOOKUPINT_F, "CIM response FIFO overwrite", -1, 1 },
   4578		{ TIMEOUTINT_F, "CIM PIF timeout", -1, 1 },
   4579		{ TIMEOUTMAINT_F, "CIM PIF MA timeout", -1, 1 },
   4580		{ 0 }
   4581	};
   4582
   4583	u32 val, fw_err;
   4584	int fat;
   4585
   4586	fw_err = t4_read_reg(adapter, PCIE_FW_A);
   4587	if (fw_err & PCIE_FW_ERR_F)
   4588		t4_report_fw_error(adapter);
   4589
   4590	/* When the Firmware detects an internal error which normally
   4591	 * wouldn't raise a Host Interrupt, it forces a CIM Timer0 interrupt
   4592	 * in order to make sure the Host sees the Firmware Crash.  So
   4593	 * if we have a Timer0 interrupt and don't see a Firmware Crash,
   4594	 * ignore the Timer0 interrupt.
   4595	 */
   4596
   4597	val = t4_read_reg(adapter, CIM_HOST_INT_CAUSE_A);
   4598	if (val & TIMER0INT_F)
   4599		if (!(fw_err & PCIE_FW_ERR_F) ||
   4600		    (PCIE_FW_EVAL_G(fw_err) != PCIE_FW_EVAL_CRASH))
   4601			t4_write_reg(adapter, CIM_HOST_INT_CAUSE_A,
   4602				     TIMER0INT_F);
   4603
   4604	fat = t4_handle_intr_status(adapter, CIM_HOST_INT_CAUSE_A,
   4605				    cim_intr_info) +
   4606	      t4_handle_intr_status(adapter, CIM_HOST_UPACC_INT_CAUSE_A,
   4607				    cim_upintr_info);
   4608	if (fat)
   4609		t4_fatal_err(adapter);
   4610}
   4611
   4612/*
   4613 * ULP RX interrupt handler.
   4614 */
   4615static void ulprx_intr_handler(struct adapter *adapter)
   4616{
   4617	static const struct intr_info ulprx_intr_info[] = {
   4618		{ 0x1800000, "ULPRX context error", -1, 1 },
   4619		{ 0x7fffff, "ULPRX parity error", -1, 1 },
   4620		{ 0 }
   4621	};
   4622
   4623	if (t4_handle_intr_status(adapter, ULP_RX_INT_CAUSE_A, ulprx_intr_info))
   4624		t4_fatal_err(adapter);
   4625}
   4626
   4627/*
   4628 * ULP TX interrupt handler.
   4629 */
   4630static void ulptx_intr_handler(struct adapter *adapter)
   4631{
   4632	static const struct intr_info ulptx_intr_info[] = {
   4633		{ PBL_BOUND_ERR_CH3_F, "ULPTX channel 3 PBL out of bounds", -1,
   4634		  0 },
   4635		{ PBL_BOUND_ERR_CH2_F, "ULPTX channel 2 PBL out of bounds", -1,
   4636		  0 },
   4637		{ PBL_BOUND_ERR_CH1_F, "ULPTX channel 1 PBL out of bounds", -1,
   4638		  0 },
   4639		{ PBL_BOUND_ERR_CH0_F, "ULPTX channel 0 PBL out of bounds", -1,
   4640		  0 },
   4641		{ 0xfffffff, "ULPTX parity error", -1, 1 },
   4642		{ 0 }
   4643	};
   4644
   4645	if (t4_handle_intr_status(adapter, ULP_TX_INT_CAUSE_A, ulptx_intr_info))
   4646		t4_fatal_err(adapter);
   4647}
   4648
   4649/*
   4650 * PM TX interrupt handler.
   4651 */
   4652static void pmtx_intr_handler(struct adapter *adapter)
   4653{
   4654	static const struct intr_info pmtx_intr_info[] = {
   4655		{ PCMD_LEN_OVFL0_F, "PMTX channel 0 pcmd too large", -1, 1 },
   4656		{ PCMD_LEN_OVFL1_F, "PMTX channel 1 pcmd too large", -1, 1 },
   4657		{ PCMD_LEN_OVFL2_F, "PMTX channel 2 pcmd too large", -1, 1 },
   4658		{ ZERO_C_CMD_ERROR_F, "PMTX 0-length pcmd", -1, 1 },
   4659		{ PMTX_FRAMING_ERROR_F, "PMTX framing error", -1, 1 },
   4660		{ OESPI_PAR_ERROR_F, "PMTX oespi parity error", -1, 1 },
   4661		{ DB_OPTIONS_PAR_ERROR_F, "PMTX db_options parity error",
   4662		  -1, 1 },
   4663		{ ICSPI_PAR_ERROR_F, "PMTX icspi parity error", -1, 1 },
   4664		{ PMTX_C_PCMD_PAR_ERROR_F, "PMTX c_pcmd parity error", -1, 1},
   4665		{ 0 }
   4666	};
   4667
   4668	if (t4_handle_intr_status(adapter, PM_TX_INT_CAUSE_A, pmtx_intr_info))
   4669		t4_fatal_err(adapter);
   4670}
   4671
   4672/*
   4673 * PM RX interrupt handler.
   4674 */
   4675static void pmrx_intr_handler(struct adapter *adapter)
   4676{
   4677	static const struct intr_info pmrx_intr_info[] = {
   4678		{ ZERO_E_CMD_ERROR_F, "PMRX 0-length pcmd", -1, 1 },
   4679		{ PMRX_FRAMING_ERROR_F, "PMRX framing error", -1, 1 },
   4680		{ OCSPI_PAR_ERROR_F, "PMRX ocspi parity error", -1, 1 },
   4681		{ DB_OPTIONS_PAR_ERROR_F, "PMRX db_options parity error",
   4682		  -1, 1 },
   4683		{ IESPI_PAR_ERROR_F, "PMRX iespi parity error", -1, 1 },
   4684		{ PMRX_E_PCMD_PAR_ERROR_F, "PMRX e_pcmd parity error", -1, 1},
   4685		{ 0 }
   4686	};
   4687
   4688	if (t4_handle_intr_status(adapter, PM_RX_INT_CAUSE_A, pmrx_intr_info))
   4689		t4_fatal_err(adapter);
   4690}
   4691
   4692/*
   4693 * CPL switch interrupt handler.
   4694 */
   4695static void cplsw_intr_handler(struct adapter *adapter)
   4696{
   4697	static const struct intr_info cplsw_intr_info[] = {
   4698		{ CIM_OP_MAP_PERR_F, "CPLSW CIM op_map parity error", -1, 1 },
   4699		{ CIM_OVFL_ERROR_F, "CPLSW CIM overflow", -1, 1 },
   4700		{ TP_FRAMING_ERROR_F, "CPLSW TP framing error", -1, 1 },
   4701		{ SGE_FRAMING_ERROR_F, "CPLSW SGE framing error", -1, 1 },
   4702		{ CIM_FRAMING_ERROR_F, "CPLSW CIM framing error", -1, 1 },
   4703		{ ZERO_SWITCH_ERROR_F, "CPLSW no-switch error", -1, 1 },
   4704		{ 0 }
   4705	};
   4706
   4707	if (t4_handle_intr_status(adapter, CPL_INTR_CAUSE_A, cplsw_intr_info))
   4708		t4_fatal_err(adapter);
   4709}
   4710
   4711/*
   4712 * LE interrupt handler.
   4713 */
   4714static void le_intr_handler(struct adapter *adap)
   4715{
   4716	enum chip_type chip = CHELSIO_CHIP_VERSION(adap->params.chip);
   4717	static const struct intr_info le_intr_info[] = {
   4718		{ LIPMISS_F, "LE LIP miss", -1, 0 },
   4719		{ LIP0_F, "LE 0 LIP error", -1, 0 },
   4720		{ PARITYERR_F, "LE parity error", -1, 1 },
   4721		{ UNKNOWNCMD_F, "LE unknown command", -1, 1 },
   4722		{ REQQPARERR_F, "LE request queue parity error", -1, 1 },
   4723		{ 0 }
   4724	};
   4725
   4726	static struct intr_info t6_le_intr_info[] = {
   4727		{ T6_LIPMISS_F, "LE LIP miss", -1, 0 },
   4728		{ T6_LIP0_F, "LE 0 LIP error", -1, 0 },
   4729		{ CMDTIDERR_F, "LE cmd tid error", -1, 1 },
   4730		{ TCAMINTPERR_F, "LE parity error", -1, 1 },
   4731		{ T6_UNKNOWNCMD_F, "LE unknown command", -1, 1 },
   4732		{ SSRAMINTPERR_F, "LE request queue parity error", -1, 1 },
   4733		{ HASHTBLMEMCRCERR_F, "LE hash table mem crc error", -1, 0 },
   4734		{ 0 }
   4735	};
   4736
   4737	if (t4_handle_intr_status(adap, LE_DB_INT_CAUSE_A,
   4738				  (chip <= CHELSIO_T5) ?
   4739				  le_intr_info : t6_le_intr_info))
   4740		t4_fatal_err(adap);
   4741}
   4742
   4743/*
   4744 * MPS interrupt handler.
   4745 */
   4746static void mps_intr_handler(struct adapter *adapter)
   4747{
   4748	static const struct intr_info mps_rx_intr_info[] = {
   4749		{ 0xffffff, "MPS Rx parity error", -1, 1 },
   4750		{ 0 }
   4751	};
   4752	static const struct intr_info mps_tx_intr_info[] = {
   4753		{ TPFIFO_V(TPFIFO_M), "MPS Tx TP FIFO parity error", -1, 1 },
   4754		{ NCSIFIFO_F, "MPS Tx NC-SI FIFO parity error", -1, 1 },
   4755		{ TXDATAFIFO_V(TXDATAFIFO_M), "MPS Tx data FIFO parity error",
   4756		  -1, 1 },
   4757		{ TXDESCFIFO_V(TXDESCFIFO_M), "MPS Tx desc FIFO parity error",
   4758		  -1, 1 },
   4759		{ BUBBLE_F, "MPS Tx underflow", -1, 1 },
   4760		{ SECNTERR_F, "MPS Tx SOP/EOP error", -1, 1 },
   4761		{ FRMERR_F, "MPS Tx framing error", -1, 1 },
   4762		{ 0 }
   4763	};
   4764	static const struct intr_info t6_mps_tx_intr_info[] = {
   4765		{ TPFIFO_V(TPFIFO_M), "MPS Tx TP FIFO parity error", -1, 1 },
   4766		{ NCSIFIFO_F, "MPS Tx NC-SI FIFO parity error", -1, 1 },
   4767		{ TXDATAFIFO_V(TXDATAFIFO_M), "MPS Tx data FIFO parity error",
   4768		  -1, 1 },
   4769		{ TXDESCFIFO_V(TXDESCFIFO_M), "MPS Tx desc FIFO parity error",
   4770		  -1, 1 },
   4771		/* MPS Tx Bubble is normal for T6 */
   4772		{ SECNTERR_F, "MPS Tx SOP/EOP error", -1, 1 },
   4773		{ FRMERR_F, "MPS Tx framing error", -1, 1 },
   4774		{ 0 }
   4775	};
   4776	static const struct intr_info mps_trc_intr_info[] = {
   4777		{ FILTMEM_V(FILTMEM_M), "MPS TRC filter parity error", -1, 1 },
   4778		{ PKTFIFO_V(PKTFIFO_M), "MPS TRC packet FIFO parity error",
   4779		  -1, 1 },
   4780		{ MISCPERR_F, "MPS TRC misc parity error", -1, 1 },
   4781		{ 0 }
   4782	};
   4783	static const struct intr_info mps_stat_sram_intr_info[] = {
   4784		{ 0x1fffff, "MPS statistics SRAM parity error", -1, 1 },
   4785		{ 0 }
   4786	};
   4787	static const struct intr_info mps_stat_tx_intr_info[] = {
   4788		{ 0xfffff, "MPS statistics Tx FIFO parity error", -1, 1 },
   4789		{ 0 }
   4790	};
   4791	static const struct intr_info mps_stat_rx_intr_info[] = {
   4792		{ 0xffffff, "MPS statistics Rx FIFO parity error", -1, 1 },
   4793		{ 0 }
   4794	};
   4795	static const struct intr_info mps_cls_intr_info[] = {
   4796		{ MATCHSRAM_F, "MPS match SRAM parity error", -1, 1 },
   4797		{ MATCHTCAM_F, "MPS match TCAM parity error", -1, 1 },
   4798		{ HASHSRAM_F, "MPS hash SRAM parity error", -1, 1 },
   4799		{ 0 }
   4800	};
   4801
   4802	int fat;
   4803
   4804	fat = t4_handle_intr_status(adapter, MPS_RX_PERR_INT_CAUSE_A,
   4805				    mps_rx_intr_info) +
   4806	      t4_handle_intr_status(adapter, MPS_TX_INT_CAUSE_A,
   4807				    is_t6(adapter->params.chip)
   4808				    ? t6_mps_tx_intr_info
   4809				    : mps_tx_intr_info) +
   4810	      t4_handle_intr_status(adapter, MPS_TRC_INT_CAUSE_A,
   4811				    mps_trc_intr_info) +
   4812	      t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_SRAM_A,
   4813				    mps_stat_sram_intr_info) +
   4814	      t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_TX_FIFO_A,
   4815				    mps_stat_tx_intr_info) +
   4816	      t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_RX_FIFO_A,
   4817				    mps_stat_rx_intr_info) +
   4818	      t4_handle_intr_status(adapter, MPS_CLS_INT_CAUSE_A,
   4819				    mps_cls_intr_info);
   4820
   4821	t4_write_reg(adapter, MPS_INT_CAUSE_A, 0);
   4822	t4_read_reg(adapter, MPS_INT_CAUSE_A);                    /* flush */
   4823	if (fat)
   4824		t4_fatal_err(adapter);
   4825}
   4826
   4827#define MEM_INT_MASK (PERR_INT_CAUSE_F | ECC_CE_INT_CAUSE_F | \
   4828		      ECC_UE_INT_CAUSE_F)
   4829
   4830/*
   4831 * EDC/MC interrupt handler.
   4832 */
   4833static void mem_intr_handler(struct adapter *adapter, int idx)
   4834{
   4835	static const char name[4][7] = { "EDC0", "EDC1", "MC/MC0", "MC1" };
   4836
   4837	unsigned int addr, cnt_addr, v;
   4838
   4839	if (idx <= MEM_EDC1) {
   4840		addr = EDC_REG(EDC_INT_CAUSE_A, idx);
   4841		cnt_addr = EDC_REG(EDC_ECC_STATUS_A, idx);
   4842	} else if (idx == MEM_MC) {
   4843		if (is_t4(adapter->params.chip)) {
   4844			addr = MC_INT_CAUSE_A;
   4845			cnt_addr = MC_ECC_STATUS_A;
   4846		} else {
   4847			addr = MC_P_INT_CAUSE_A;
   4848			cnt_addr = MC_P_ECC_STATUS_A;
   4849		}
   4850	} else {
   4851		addr = MC_REG(MC_P_INT_CAUSE_A, 1);
   4852		cnt_addr = MC_REG(MC_P_ECC_STATUS_A, 1);
   4853	}
   4854
   4855	v = t4_read_reg(adapter, addr) & MEM_INT_MASK;
   4856	if (v & PERR_INT_CAUSE_F)
   4857		dev_alert(adapter->pdev_dev, "%s FIFO parity error\n",
   4858			  name[idx]);
   4859	if (v & ECC_CE_INT_CAUSE_F) {
   4860		u32 cnt = ECC_CECNT_G(t4_read_reg(adapter, cnt_addr));
   4861
   4862		t4_edc_err_read(adapter, idx);
   4863
   4864		t4_write_reg(adapter, cnt_addr, ECC_CECNT_V(ECC_CECNT_M));
   4865		if (printk_ratelimit())
   4866			dev_warn(adapter->pdev_dev,
   4867				 "%u %s correctable ECC data error%s\n",
   4868				 cnt, name[idx], cnt > 1 ? "s" : "");
   4869	}
   4870	if (v & ECC_UE_INT_CAUSE_F)
   4871		dev_alert(adapter->pdev_dev,
   4872			  "%s uncorrectable ECC data error\n", name[idx]);
   4873
   4874	t4_write_reg(adapter, addr, v);
   4875	if (v & (PERR_INT_CAUSE_F | ECC_UE_INT_CAUSE_F))
   4876		t4_fatal_err(adapter);
   4877}
   4878
   4879/*
   4880 * MA interrupt handler.
   4881 */
   4882static void ma_intr_handler(struct adapter *adap)
   4883{
   4884	u32 v, status = t4_read_reg(adap, MA_INT_CAUSE_A);
   4885
   4886	if (status & MEM_PERR_INT_CAUSE_F) {
   4887		dev_alert(adap->pdev_dev,
   4888			  "MA parity error, parity status %#x\n",
   4889			  t4_read_reg(adap, MA_PARITY_ERROR_STATUS1_A));
   4890		if (is_t5(adap->params.chip))
   4891			dev_alert(adap->pdev_dev,
   4892				  "MA parity error, parity status %#x\n",
   4893				  t4_read_reg(adap,
   4894					      MA_PARITY_ERROR_STATUS2_A));
   4895	}
   4896	if (status & MEM_WRAP_INT_CAUSE_F) {
   4897		v = t4_read_reg(adap, MA_INT_WRAP_STATUS_A);
   4898		dev_alert(adap->pdev_dev, "MA address wrap-around error by "
   4899			  "client %u to address %#x\n",
   4900			  MEM_WRAP_CLIENT_NUM_G(v),
   4901			  MEM_WRAP_ADDRESS_G(v) << 4);
   4902	}
   4903	t4_write_reg(adap, MA_INT_CAUSE_A, status);
   4904	t4_fatal_err(adap);
   4905}
   4906
   4907/*
   4908 * SMB interrupt handler.
   4909 */
   4910static void smb_intr_handler(struct adapter *adap)
   4911{
   4912	static const struct intr_info smb_intr_info[] = {
   4913		{ MSTTXFIFOPARINT_F, "SMB master Tx FIFO parity error", -1, 1 },
   4914		{ MSTRXFIFOPARINT_F, "SMB master Rx FIFO parity error", -1, 1 },
   4915		{ SLVFIFOPARINT_F, "SMB slave FIFO parity error", -1, 1 },
   4916		{ 0 }
   4917	};
   4918
   4919	if (t4_handle_intr_status(adap, SMB_INT_CAUSE_A, smb_intr_info))
   4920		t4_fatal_err(adap);
   4921}
   4922
   4923/*
   4924 * NC-SI interrupt handler.
   4925 */
   4926static void ncsi_intr_handler(struct adapter *adap)
   4927{
   4928	static const struct intr_info ncsi_intr_info[] = {
   4929		{ CIM_DM_PRTY_ERR_F, "NC-SI CIM parity error", -1, 1 },
   4930		{ MPS_DM_PRTY_ERR_F, "NC-SI MPS parity error", -1, 1 },
   4931		{ TXFIFO_PRTY_ERR_F, "NC-SI Tx FIFO parity error", -1, 1 },
   4932		{ RXFIFO_PRTY_ERR_F, "NC-SI Rx FIFO parity error", -1, 1 },
   4933		{ 0 }
   4934	};
   4935
   4936	if (t4_handle_intr_status(adap, NCSI_INT_CAUSE_A, ncsi_intr_info))
   4937		t4_fatal_err(adap);
   4938}
   4939
   4940/*
   4941 * XGMAC interrupt handler.
   4942 */
   4943static void xgmac_intr_handler(struct adapter *adap, int port)
   4944{
   4945	u32 v, int_cause_reg;
   4946
   4947	if (is_t4(adap->params.chip))
   4948		int_cause_reg = PORT_REG(port, XGMAC_PORT_INT_CAUSE_A);
   4949	else
   4950		int_cause_reg = T5_PORT_REG(port, MAC_PORT_INT_CAUSE_A);
   4951
   4952	v = t4_read_reg(adap, int_cause_reg);
   4953
   4954	v &= TXFIFO_PRTY_ERR_F | RXFIFO_PRTY_ERR_F;
   4955	if (!v)
   4956		return;
   4957
   4958	if (v & TXFIFO_PRTY_ERR_F)
   4959		dev_alert(adap->pdev_dev, "XGMAC %d Tx FIFO parity error\n",
   4960			  port);
   4961	if (v & RXFIFO_PRTY_ERR_F)
   4962		dev_alert(adap->pdev_dev, "XGMAC %d Rx FIFO parity error\n",
   4963			  port);
   4964	t4_write_reg(adap, PORT_REG(port, XGMAC_PORT_INT_CAUSE_A), v);
   4965	t4_fatal_err(adap);
   4966}
   4967
   4968/*
   4969 * PL interrupt handler.
   4970 */
   4971static void pl_intr_handler(struct adapter *adap)
   4972{
   4973	static const struct intr_info pl_intr_info[] = {
   4974		{ FATALPERR_F, "T4 fatal parity error", -1, 1 },
   4975		{ PERRVFID_F, "PL VFID_MAP parity error", -1, 1 },
   4976		{ 0 }
   4977	};
   4978
   4979	if (t4_handle_intr_status(adap, PL_PL_INT_CAUSE_A, pl_intr_info))
   4980		t4_fatal_err(adap);
   4981}
   4982
   4983#define PF_INTR_MASK (PFSW_F)
   4984#define GLBL_INTR_MASK (CIM_F | MPS_F | PL_F | PCIE_F | MC_F | EDC0_F | \
   4985		EDC1_F | LE_F | TP_F | MA_F | PM_TX_F | PM_RX_F | ULP_RX_F | \
   4986		CPL_SWITCH_F | SGE_F | ULP_TX_F | SF_F)
   4987
   4988/**
   4989 *	t4_slow_intr_handler - control path interrupt handler
   4990 *	@adapter: the adapter
   4991 *
   4992 *	T4 interrupt handler for non-data global interrupt events, e.g., errors.
   4993 *	The designation 'slow' is because it involves register reads, while
   4994 *	data interrupts typically don't involve any MMIOs.
   4995 */
   4996int t4_slow_intr_handler(struct adapter *adapter)
   4997{
   4998	/* There are rare cases where a PL_INT_CAUSE bit may end up getting
   4999	 * set when the corresponding PL_INT_ENABLE bit isn't set.  It's
   5000	 * easiest just to mask that case here.
   5001	 */
   5002	u32 raw_cause = t4_read_reg(adapter, PL_INT_CAUSE_A);
   5003	u32 enable = t4_read_reg(adapter, PL_INT_ENABLE_A);
   5004	u32 cause = raw_cause & enable;
   5005
   5006	if (!(cause & GLBL_INTR_MASK))
   5007		return 0;
   5008	if (cause & CIM_F)
   5009		cim_intr_handler(adapter);
   5010	if (cause & MPS_F)
   5011		mps_intr_handler(adapter);
   5012	if (cause & NCSI_F)
   5013		ncsi_intr_handler(adapter);
   5014	if (cause & PL_F)
   5015		pl_intr_handler(adapter);
   5016	if (cause & SMB_F)
   5017		smb_intr_handler(adapter);
   5018	if (cause & XGMAC0_F)
   5019		xgmac_intr_handler(adapter, 0);
   5020	if (cause & XGMAC1_F)
   5021		xgmac_intr_handler(adapter, 1);
   5022	if (cause & XGMAC_KR0_F)
   5023		xgmac_intr_handler(adapter, 2);
   5024	if (cause & XGMAC_KR1_F)
   5025		xgmac_intr_handler(adapter, 3);
   5026	if (cause & PCIE_F)
   5027		pcie_intr_handler(adapter);
   5028	if (cause & MC_F)
   5029		mem_intr_handler(adapter, MEM_MC);
   5030	if (is_t5(adapter->params.chip) && (cause & MC1_F))
   5031		mem_intr_handler(adapter, MEM_MC1);
   5032	if (cause & EDC0_F)
   5033		mem_intr_handler(adapter, MEM_EDC0);
   5034	if (cause & EDC1_F)
   5035		mem_intr_handler(adapter, MEM_EDC1);
   5036	if (cause & LE_F)
   5037		le_intr_handler(adapter);
   5038	if (cause & TP_F)
   5039		tp_intr_handler(adapter);
   5040	if (cause & MA_F)
   5041		ma_intr_handler(adapter);
   5042	if (cause & PM_TX_F)
   5043		pmtx_intr_handler(adapter);
   5044	if (cause & PM_RX_F)
   5045		pmrx_intr_handler(adapter);
   5046	if (cause & ULP_RX_F)
   5047		ulprx_intr_handler(adapter);
   5048	if (cause & CPL_SWITCH_F)
   5049		cplsw_intr_handler(adapter);
   5050	if (cause & SGE_F)
   5051		sge_intr_handler(adapter);
   5052	if (cause & ULP_TX_F)
   5053		ulptx_intr_handler(adapter);
   5054
   5055	/* Clear the interrupts just processed for which we are the master. */
   5056	t4_write_reg(adapter, PL_INT_CAUSE_A, raw_cause & GLBL_INTR_MASK);
   5057	(void)t4_read_reg(adapter, PL_INT_CAUSE_A); /* flush */
   5058	return 1;
   5059}
   5060
   5061/**
   5062 *	t4_intr_enable - enable interrupts
   5063 *	@adapter: the adapter whose interrupts should be enabled
   5064 *
   5065 *	Enable PF-specific interrupts for the calling function and the top-level
   5066 *	interrupt concentrator for global interrupts.  Interrupts are already
   5067 *	enabled at each module,	here we just enable the roots of the interrupt
   5068 *	hierarchies.
   5069 *
   5070 *	Note: this function should be called only when the driver manages
   5071 *	non PF-specific interrupts from the various HW modules.  Only one PCI
   5072 *	function at a time should be doing this.
   5073 */
   5074void t4_intr_enable(struct adapter *adapter)
   5075{
   5076	u32 val = 0;
   5077	u32 whoami = t4_read_reg(adapter, PL_WHOAMI_A);
   5078	u32 pf = CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5 ?
   5079			SOURCEPF_G(whoami) : T6_SOURCEPF_G(whoami);
   5080
   5081	if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5)
   5082		val = ERR_DROPPED_DB_F | ERR_EGR_CTXT_PRIO_F | DBFIFO_HP_INT_F;
   5083	t4_write_reg(adapter, SGE_INT_ENABLE3_A, ERR_CPL_EXCEED_IQE_SIZE_F |
   5084		     ERR_INVALID_CIDX_INC_F | ERR_CPL_OPCODE_0_F |
   5085		     ERR_DATA_CPL_ON_HIGH_QID1_F | INGRESS_SIZE_ERR_F |
   5086		     ERR_DATA_CPL_ON_HIGH_QID0_F | ERR_BAD_DB_PIDX3_F |
   5087		     ERR_BAD_DB_PIDX2_F | ERR_BAD_DB_PIDX1_F |
   5088		     ERR_BAD_DB_PIDX0_F | ERR_ING_CTXT_PRIO_F |
   5089		     DBFIFO_LP_INT_F | EGRESS_SIZE_ERR_F | val);
   5090	t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE_A), PF_INTR_MASK);
   5091	t4_set_reg_field(adapter, PL_INT_MAP0_A, 0, 1 << pf);
   5092}
   5093
   5094/**
   5095 *	t4_intr_disable - disable interrupts
   5096 *	@adapter: the adapter whose interrupts should be disabled
   5097 *
   5098 *	Disable interrupts.  We only disable the top-level interrupt
   5099 *	concentrators.  The caller must be a PCI function managing global
   5100 *	interrupts.
   5101 */
   5102void t4_intr_disable(struct adapter *adapter)
   5103{
   5104	u32 whoami, pf;
   5105
   5106	if (pci_channel_offline(adapter->pdev))
   5107		return;
   5108
   5109	whoami = t4_read_reg(adapter, PL_WHOAMI_A);
   5110	pf = CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5 ?
   5111			SOURCEPF_G(whoami) : T6_SOURCEPF_G(whoami);
   5112
   5113	t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE_A), 0);
   5114	t4_set_reg_field(adapter, PL_INT_MAP0_A, 1 << pf, 0);
   5115}
   5116
   5117unsigned int t4_chip_rss_size(struct adapter *adap)
   5118{
   5119	if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
   5120		return RSS_NENTRIES;
   5121	else
   5122		return T6_RSS_NENTRIES;
   5123}
   5124
   5125/**
   5126 *	t4_config_rss_range - configure a portion of the RSS mapping table
   5127 *	@adapter: the adapter
   5128 *	@mbox: mbox to use for the FW command
   5129 *	@viid: virtual interface whose RSS subtable is to be written
   5130 *	@start: start entry in the table to write
   5131 *	@n: how many table entries to write
   5132 *	@rspq: values for the response queue lookup table
   5133 *	@nrspq: number of values in @rspq
   5134 *
   5135 *	Programs the selected part of the VI's RSS mapping table with the
   5136 *	provided values.  If @nrspq < @n the supplied values are used repeatedly
   5137 *	until the full table range is populated.
   5138 *
   5139 *	The caller must ensure the values in @rspq are in the range allowed for
   5140 *	@viid.
   5141 */
   5142int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
   5143			int start, int n, const u16 *rspq, unsigned int nrspq)
   5144{
   5145	int ret;
   5146	const u16 *rsp = rspq;
   5147	const u16 *rsp_end = rspq + nrspq;
   5148	struct fw_rss_ind_tbl_cmd cmd;
   5149
   5150	memset(&cmd, 0, sizeof(cmd));
   5151	cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_RSS_IND_TBL_CMD) |
   5152			       FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
   5153			       FW_RSS_IND_TBL_CMD_VIID_V(viid));
   5154	cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
   5155
   5156	/* each fw_rss_ind_tbl_cmd takes up to 32 entries */
   5157	while (n > 0) {
   5158		int nq = min(n, 32);
   5159		__be32 *qp = &cmd.iq0_to_iq2;
   5160
   5161		cmd.niqid = cpu_to_be16(nq);
   5162		cmd.startidx = cpu_to_be16(start);
   5163
   5164		start += nq;
   5165		n -= nq;
   5166
   5167		while (nq > 0) {
   5168			unsigned int v;
   5169
   5170			v = FW_RSS_IND_TBL_CMD_IQ0_V(*rsp);
   5171			if (++rsp >= rsp_end)
   5172				rsp = rspq;
   5173			v |= FW_RSS_IND_TBL_CMD_IQ1_V(*rsp);
   5174			if (++rsp >= rsp_end)
   5175				rsp = rspq;
   5176			v |= FW_RSS_IND_TBL_CMD_IQ2_V(*rsp);
   5177			if (++rsp >= rsp_end)
   5178				rsp = rspq;
   5179
   5180			*qp++ = cpu_to_be32(v);
   5181			nq -= 3;
   5182		}
   5183
   5184		ret = t4_wr_mbox(adapter, mbox, &cmd, sizeof(cmd), NULL);
   5185		if (ret)
   5186			return ret;
   5187	}
   5188	return 0;
   5189}
   5190
   5191/**
   5192 *	t4_config_glbl_rss - configure the global RSS mode
   5193 *	@adapter: the adapter
   5194 *	@mbox: mbox to use for the FW command
   5195 *	@mode: global RSS mode
   5196 *	@flags: mode-specific flags
   5197 *
   5198 *	Sets the global RSS mode.
   5199 */
   5200int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode,
   5201		       unsigned int flags)
   5202{
   5203	struct fw_rss_glb_config_cmd c;
   5204
   5205	memset(&c, 0, sizeof(c));
   5206	c.op_to_write = cpu_to_be32(FW_CMD_OP_V(FW_RSS_GLB_CONFIG_CMD) |
   5207				    FW_CMD_REQUEST_F | FW_CMD_WRITE_F);
   5208	c.retval_len16 = cpu_to_be32(FW_LEN16(c));
   5209	if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_MANUAL) {
   5210		c.u.manual.mode_pkd =
   5211			cpu_to_be32(FW_RSS_GLB_CONFIG_CMD_MODE_V(mode));
   5212	} else if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL) {
   5213		c.u.basicvirtual.mode_pkd =
   5214			cpu_to_be32(FW_RSS_GLB_CONFIG_CMD_MODE_V(mode));
   5215		c.u.basicvirtual.synmapen_to_hashtoeplitz = cpu_to_be32(flags);
   5216	} else
   5217		return -EINVAL;
   5218	return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL);
   5219}
   5220
   5221/**
   5222 *	t4_config_vi_rss - configure per VI RSS settings
   5223 *	@adapter: the adapter
   5224 *	@mbox: mbox to use for the FW command
   5225 *	@viid: the VI id
   5226 *	@flags: RSS flags
   5227 *	@defq: id of the default RSS queue for the VI.
   5228 *
   5229 *	Configures VI-specific RSS properties.
   5230 */
   5231int t4_config_vi_rss(struct adapter *adapter, int mbox, unsigned int viid,
   5232		     unsigned int flags, unsigned int defq)
   5233{
   5234	struct fw_rss_vi_config_cmd c;
   5235
   5236	memset(&c, 0, sizeof(c));
   5237	c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_RSS_VI_CONFIG_CMD) |
   5238				   FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
   5239				   FW_RSS_VI_CONFIG_CMD_VIID_V(viid));
   5240	c.retval_len16 = cpu_to_be32(FW_LEN16(c));
   5241	c.u.basicvirtual.defaultq_to_udpen = cpu_to_be32(flags |
   5242					FW_RSS_VI_CONFIG_CMD_DEFAULTQ_V(defq));
   5243	return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL);
   5244}
   5245
   5246/* Read an RSS table row */
   5247static int rd_rss_row(struct adapter *adap, int row, u32 *val)
   5248{
   5249	t4_write_reg(adap, TP_RSS_LKP_TABLE_A, 0xfff00000 | row);
   5250	return t4_wait_op_done_val(adap, TP_RSS_LKP_TABLE_A, LKPTBLROWVLD_F, 1,
   5251				   5, 0, val);
   5252}
   5253
   5254/**
   5255 *	t4_read_rss - read the contents of the RSS mapping table
   5256 *	@adapter: the adapter
   5257 *	@map: holds the contents of the RSS mapping table
   5258 *
   5259 *	Reads the contents of the RSS hash->queue mapping table.
   5260 */
   5261int t4_read_rss(struct adapter *adapter, u16 *map)
   5262{
   5263	int i, ret, nentries;
   5264	u32 val;
   5265
   5266	nentries = t4_chip_rss_size(adapter);
   5267	for (i = 0; i < nentries / 2; ++i) {
   5268		ret = rd_rss_row(adapter, i, &val);
   5269		if (ret)
   5270			return ret;
   5271		*map++ = LKPTBLQUEUE0_G(val);
   5272		*map++ = LKPTBLQUEUE1_G(val);
   5273	}
   5274	return 0;
   5275}
   5276
   5277static unsigned int t4_use_ldst(struct adapter *adap)
   5278{
   5279	return (adap->flags & CXGB4_FW_OK) && !adap->use_bd;
   5280}
   5281
   5282/**
   5283 * t4_tp_fw_ldst_rw - Access TP indirect register through LDST
   5284 * @adap: the adapter
   5285 * @cmd: TP fw ldst address space type
   5286 * @vals: where the indirect register values are stored/written
   5287 * @nregs: how many indirect registers to read/write
   5288 * @start_index: index of first indirect register to read/write
   5289 * @rw: Read (1) or Write (0)
   5290 * @sleep_ok: if true we may sleep while awaiting command completion
   5291 *
   5292 * Access TP indirect registers through LDST
   5293 */
   5294static int t4_tp_fw_ldst_rw(struct adapter *adap, int cmd, u32 *vals,
   5295			    unsigned int nregs, unsigned int start_index,
   5296			    unsigned int rw, bool sleep_ok)
   5297{
   5298	int ret = 0;
   5299	unsigned int i;
   5300	struct fw_ldst_cmd c;
   5301
   5302	for (i = 0; i < nregs; i++) {
   5303		memset(&c, 0, sizeof(c));
   5304		c.op_to_addrspace = cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) |
   5305						FW_CMD_REQUEST_F |
   5306						(rw ? FW_CMD_READ_F :
   5307						      FW_CMD_WRITE_F) |
   5308						FW_LDST_CMD_ADDRSPACE_V(cmd));
   5309		c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
   5310
   5311		c.u.addrval.addr = cpu_to_be32(start_index + i);
   5312		c.u.addrval.val  = rw ? 0 : cpu_to_be32(vals[i]);
   5313		ret = t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c,
   5314				      sleep_ok);
   5315		if (ret)
   5316			return ret;
   5317
   5318		if (rw)
   5319			vals[i] = be32_to_cpu(c.u.addrval.val);
   5320	}
   5321	return 0;
   5322}
   5323
   5324/**
   5325 * t4_tp_indirect_rw - Read/Write TP indirect register through LDST or backdoor
   5326 * @adap: the adapter
   5327 * @reg_addr: Address Register
   5328 * @reg_data: Data register
   5329 * @buff: where the indirect register values are stored/written
   5330 * @nregs: how many indirect registers to read/write
   5331 * @start_index: index of first indirect register to read/write
   5332 * @rw: READ(1) or WRITE(0)
   5333 * @sleep_ok: if true we may sleep while awaiting command completion
   5334 *
   5335 * Read/Write TP indirect registers through LDST if possible.
   5336 * Else, use backdoor access
   5337 **/
   5338static void t4_tp_indirect_rw(struct adapter *adap, u32 reg_addr, u32 reg_data,
   5339			      u32 *buff, u32 nregs, u32 start_index, int rw,
   5340			      bool sleep_ok)
   5341{
   5342	int rc = -EINVAL;
   5343	int cmd;
   5344
   5345	switch (reg_addr) {
   5346	case TP_PIO_ADDR_A:
   5347		cmd = FW_LDST_ADDRSPC_TP_PIO;
   5348		break;
   5349	case TP_TM_PIO_ADDR_A:
   5350		cmd = FW_LDST_ADDRSPC_TP_TM_PIO;
   5351		break;
   5352	case TP_MIB_INDEX_A:
   5353		cmd = FW_LDST_ADDRSPC_TP_MIB;
   5354		break;
   5355	default:
   5356		goto indirect_access;
   5357	}
   5358
   5359	if (t4_use_ldst(adap))
   5360		rc = t4_tp_fw_ldst_rw(adap, cmd, buff, nregs, start_index, rw,
   5361				      sleep_ok);
   5362
   5363indirect_access:
   5364
   5365	if (rc) {
   5366		if (rw)
   5367			t4_read_indirect(adap, reg_addr, reg_data, buff, nregs,
   5368					 start_index);
   5369		else
   5370			t4_write_indirect(adap, reg_addr, reg_data, buff, nregs,
   5371					  start_index);
   5372	}
   5373}
   5374
   5375/**
   5376 * t4_tp_pio_read - Read TP PIO registers
   5377 * @adap: the adapter
   5378 * @buff: where the indirect register values are written
   5379 * @nregs: how many indirect registers to read
   5380 * @start_index: index of first indirect register to read
   5381 * @sleep_ok: if true we may sleep while awaiting command completion
   5382 *
   5383 * Read TP PIO Registers
   5384 **/
   5385void t4_tp_pio_read(struct adapter *adap, u32 *buff, u32 nregs,
   5386		    u32 start_index, bool sleep_ok)
   5387{
   5388	t4_tp_indirect_rw(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A, buff, nregs,
   5389			  start_index, 1, sleep_ok);
   5390}
   5391
   5392/**
   5393 * t4_tp_pio_write - Write TP PIO registers
   5394 * @adap: the adapter
   5395 * @buff: where the indirect register values are stored
   5396 * @nregs: how many indirect registers to write
   5397 * @start_index: index of first indirect register to write
   5398 * @sleep_ok: if true we may sleep while awaiting command completion
   5399 *
   5400 * Write TP PIO Registers
   5401 **/
   5402static void t4_tp_pio_write(struct adapter *adap, u32 *buff, u32 nregs,
   5403			    u32 start_index, bool sleep_ok)
   5404{
   5405	t4_tp_indirect_rw(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A, buff, nregs,
   5406			  start_index, 0, sleep_ok);
   5407}
   5408
   5409/**
   5410 * t4_tp_tm_pio_read - Read TP TM PIO registers
   5411 * @adap: the adapter
   5412 * @buff: where the indirect register values are written
   5413 * @nregs: how many indirect registers to read
   5414 * @start_index: index of first indirect register to read
   5415 * @sleep_ok: if true we may sleep while awaiting command completion
   5416 *
   5417 * Read TP TM PIO Registers
   5418 **/
   5419void t4_tp_tm_pio_read(struct adapter *adap, u32 *buff, u32 nregs,
   5420		       u32 start_index, bool sleep_ok)
   5421{
   5422	t4_tp_indirect_rw(adap, TP_TM_PIO_ADDR_A, TP_TM_PIO_DATA_A, buff,
   5423			  nregs, start_index, 1, sleep_ok);
   5424}
   5425
   5426/**
   5427 * t4_tp_mib_read - Read TP MIB registers
   5428 * @adap: the adapter
   5429 * @buff: where the indirect register values are written
   5430 * @nregs: how many indirect registers to read
   5431 * @start_index: index of first indirect register to read
   5432 * @sleep_ok: if true we may sleep while awaiting command completion
   5433 *
   5434 * Read TP MIB Registers
   5435 **/
   5436void t4_tp_mib_read(struct adapter *adap, u32 *buff, u32 nregs, u32 start_index,
   5437		    bool sleep_ok)
   5438{
   5439	t4_tp_indirect_rw(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, buff, nregs,
   5440			  start_index, 1, sleep_ok);
   5441}
   5442
   5443/**
   5444 *	t4_read_rss_key - read the global RSS key
   5445 *	@adap: the adapter
   5446 *	@key: 10-entry array holding the 320-bit RSS key
   5447 *      @sleep_ok: if true we may sleep while awaiting command completion
   5448 *
   5449 *	Reads the global 320-bit RSS key.
   5450 */
   5451void t4_read_rss_key(struct adapter *adap, u32 *key, bool sleep_ok)
   5452{
   5453	t4_tp_pio_read(adap, key, 10, TP_RSS_SECRET_KEY0_A, sleep_ok);
   5454}
   5455
   5456/**
   5457 *	t4_write_rss_key - program one of the RSS keys
   5458 *	@adap: the adapter
   5459 *	@key: 10-entry array holding the 320-bit RSS key
   5460 *	@idx: which RSS key to write
   5461 *      @sleep_ok: if true we may sleep while awaiting command completion
   5462 *
   5463 *	Writes one of the RSS keys with the given 320-bit value.  If @idx is
   5464 *	0..15 the corresponding entry in the RSS key table is written,
   5465 *	otherwise the global RSS key is written.
   5466 */
   5467void t4_write_rss_key(struct adapter *adap, const u32 *key, int idx,
   5468		      bool sleep_ok)
   5469{
   5470	u8 rss_key_addr_cnt = 16;
   5471	u32 vrt = t4_read_reg(adap, TP_RSS_CONFIG_VRT_A);
   5472
   5473	/* T6 and later: for KeyMode 3 (per-vf and per-vf scramble),
   5474	 * allows access to key addresses 16-63 by using KeyWrAddrX
   5475	 * as index[5:4](upper 2) into key table
   5476	 */
   5477	if ((CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5) &&
   5478	    (vrt & KEYEXTEND_F) && (KEYMODE_G(vrt) == 3))
   5479		rss_key_addr_cnt = 32;
   5480
   5481	t4_tp_pio_write(adap, (void *)key, 10, TP_RSS_SECRET_KEY0_A, sleep_ok);
   5482
   5483	if (idx >= 0 && idx < rss_key_addr_cnt) {
   5484		if (rss_key_addr_cnt > 16)
   5485			t4_write_reg(adap, TP_RSS_CONFIG_VRT_A,
   5486				     KEYWRADDRX_V(idx >> 4) |
   5487				     T6_VFWRADDR_V(idx) | KEYWREN_F);
   5488		else
   5489			t4_write_reg(adap, TP_RSS_CONFIG_VRT_A,
   5490				     KEYWRADDR_V(idx) | KEYWREN_F);
   5491	}
   5492}
   5493
   5494/**
   5495 *	t4_read_rss_pf_config - read PF RSS Configuration Table
   5496 *	@adapter: the adapter
   5497 *	@index: the entry in the PF RSS table to read
   5498 *	@valp: where to store the returned value
   5499 *      @sleep_ok: if true we may sleep while awaiting command completion
   5500 *
   5501 *	Reads the PF RSS Configuration Table at the specified index and returns
   5502 *	the value found there.
   5503 */
   5504void t4_read_rss_pf_config(struct adapter *adapter, unsigned int index,
   5505			   u32 *valp, bool sleep_ok)
   5506{
   5507	t4_tp_pio_read(adapter, valp, 1, TP_RSS_PF0_CONFIG_A + index, sleep_ok);
   5508}
   5509
   5510/**
   5511 *	t4_read_rss_vf_config - read VF RSS Configuration Table
   5512 *	@adapter: the adapter
   5513 *	@index: the entry in the VF RSS table to read
   5514 *	@vfl: where to store the returned VFL
   5515 *	@vfh: where to store the returned VFH
   5516 *      @sleep_ok: if true we may sleep while awaiting command completion
   5517 *
   5518 *	Reads the VF RSS Configuration Table at the specified index and returns
   5519 *	the (VFL, VFH) values found there.
   5520 */
   5521void t4_read_rss_vf_config(struct adapter *adapter, unsigned int index,
   5522			   u32 *vfl, u32 *vfh, bool sleep_ok)
   5523{
   5524	u32 vrt, mask, data;
   5525
   5526	if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5) {
   5527		mask = VFWRADDR_V(VFWRADDR_M);
   5528		data = VFWRADDR_V(index);
   5529	} else {
   5530		 mask =  T6_VFWRADDR_V(T6_VFWRADDR_M);
   5531		 data = T6_VFWRADDR_V(index);
   5532	}
   5533
   5534	/* Request that the index'th VF Table values be read into VFL/VFH.
   5535	 */
   5536	vrt = t4_read_reg(adapter, TP_RSS_CONFIG_VRT_A);
   5537	vrt &= ~(VFRDRG_F | VFWREN_F | KEYWREN_F | mask);
   5538	vrt |= data | VFRDEN_F;
   5539	t4_write_reg(adapter, TP_RSS_CONFIG_VRT_A, vrt);
   5540
   5541	/* Grab the VFL/VFH values ...
   5542	 */
   5543	t4_tp_pio_read(adapter, vfl, 1, TP_RSS_VFL_CONFIG_A, sleep_ok);
   5544	t4_tp_pio_read(adapter, vfh, 1, TP_RSS_VFH_CONFIG_A, sleep_ok);
   5545}
   5546
   5547/**
   5548 *	t4_read_rss_pf_map - read PF RSS Map
   5549 *	@adapter: the adapter
   5550 *      @sleep_ok: if true we may sleep while awaiting command completion
   5551 *
   5552 *	Reads the PF RSS Map register and returns its value.
   5553 */
   5554u32 t4_read_rss_pf_map(struct adapter *adapter, bool sleep_ok)
   5555{
   5556	u32 pfmap;
   5557
   5558	t4_tp_pio_read(adapter, &pfmap, 1, TP_RSS_PF_MAP_A, sleep_ok);
   5559	return pfmap;
   5560}
   5561
   5562/**
   5563 *	t4_read_rss_pf_mask - read PF RSS Mask
   5564 *	@adapter: the adapter
   5565 *      @sleep_ok: if true we may sleep while awaiting command completion
   5566 *
   5567 *	Reads the PF RSS Mask register and returns its value.
   5568 */
   5569u32 t4_read_rss_pf_mask(struct adapter *adapter, bool sleep_ok)
   5570{
   5571	u32 pfmask;
   5572
   5573	t4_tp_pio_read(adapter, &pfmask, 1, TP_RSS_PF_MSK_A, sleep_ok);
   5574	return pfmask;
   5575}
   5576
   5577/**
   5578 *	t4_tp_get_tcp_stats - read TP's TCP MIB counters
   5579 *	@adap: the adapter
   5580 *	@v4: holds the TCP/IP counter values
   5581 *	@v6: holds the TCP/IPv6 counter values
   5582 *      @sleep_ok: if true we may sleep while awaiting command completion
   5583 *
   5584 *	Returns the values of TP's TCP/IP and TCP/IPv6 MIB counters.
   5585 *	Either @v4 or @v6 may be %NULL to skip the corresponding stats.
   5586 */
   5587void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4,
   5588			 struct tp_tcp_stats *v6, bool sleep_ok)
   5589{
   5590	u32 val[TP_MIB_TCP_RXT_SEG_LO_A - TP_MIB_TCP_OUT_RST_A + 1];
   5591
   5592#define STAT_IDX(x) ((TP_MIB_TCP_##x##_A) - TP_MIB_TCP_OUT_RST_A)
   5593#define STAT(x)     val[STAT_IDX(x)]
   5594#define STAT64(x)   (((u64)STAT(x##_HI) << 32) | STAT(x##_LO))
   5595
   5596	if (v4) {
   5597		t4_tp_mib_read(adap, val, ARRAY_SIZE(val),
   5598			       TP_MIB_TCP_OUT_RST_A, sleep_ok);
   5599		v4->tcp_out_rsts = STAT(OUT_RST);
   5600		v4->tcp_in_segs  = STAT64(IN_SEG);
   5601		v4->tcp_out_segs = STAT64(OUT_SEG);
   5602		v4->tcp_retrans_segs = STAT64(RXT_SEG);
   5603	}
   5604	if (v6) {
   5605		t4_tp_mib_read(adap, val, ARRAY_SIZE(val),
   5606			       TP_MIB_TCP_V6OUT_RST_A, sleep_ok);
   5607		v6->tcp_out_rsts = STAT(OUT_RST);
   5608		v6->tcp_in_segs  = STAT64(IN_SEG);
   5609		v6->tcp_out_segs = STAT64(OUT_SEG);
   5610		v6->tcp_retrans_segs = STAT64(RXT_SEG);
   5611	}
   5612#undef STAT64
   5613#undef STAT
   5614#undef STAT_IDX
   5615}
   5616
   5617/**
   5618 *	t4_tp_get_err_stats - read TP's error MIB counters
   5619 *	@adap: the adapter
   5620 *	@st: holds the counter values
   5621 *      @sleep_ok: if true we may sleep while awaiting command completion
   5622 *
   5623 *	Returns the values of TP's error counters.
   5624 */
   5625void t4_tp_get_err_stats(struct adapter *adap, struct tp_err_stats *st,
   5626			 bool sleep_ok)
   5627{
   5628	int nchan = adap->params.arch.nchan;
   5629
   5630	t4_tp_mib_read(adap, st->mac_in_errs, nchan, TP_MIB_MAC_IN_ERR_0_A,
   5631		       sleep_ok);
   5632	t4_tp_mib_read(adap, st->hdr_in_errs, nchan, TP_MIB_HDR_IN_ERR_0_A,
   5633		       sleep_ok);
   5634	t4_tp_mib_read(adap, st->tcp_in_errs, nchan, TP_MIB_TCP_IN_ERR_0_A,
   5635		       sleep_ok);
   5636	t4_tp_mib_read(adap, st->tnl_cong_drops, nchan,
   5637		       TP_MIB_TNL_CNG_DROP_0_A, sleep_ok);
   5638	t4_tp_mib_read(adap, st->ofld_chan_drops, nchan,
   5639		       TP_MIB_OFD_CHN_DROP_0_A, sleep_ok);
   5640	t4_tp_mib_read(adap, st->tnl_tx_drops, nchan, TP_MIB_TNL_DROP_0_A,
   5641		       sleep_ok);
   5642	t4_tp_mib_read(adap, st->ofld_vlan_drops, nchan,
   5643		       TP_MIB_OFD_VLN_DROP_0_A, sleep_ok);
   5644	t4_tp_mib_read(adap, st->tcp6_in_errs, nchan,
   5645		       TP_MIB_TCP_V6IN_ERR_0_A, sleep_ok);
   5646	t4_tp_mib_read(adap, &st->ofld_no_neigh, 2, TP_MIB_OFD_ARP_DROP_A,
   5647		       sleep_ok);
   5648}
   5649
   5650/**
   5651 *	t4_tp_get_cpl_stats - read TP's CPL MIB counters
   5652 *	@adap: the adapter
   5653 *	@st: holds the counter values
   5654 *      @sleep_ok: if true we may sleep while awaiting command completion
   5655 *
   5656 *	Returns the values of TP's CPL counters.
   5657 */
   5658void t4_tp_get_cpl_stats(struct adapter *adap, struct tp_cpl_stats *st,
   5659			 bool sleep_ok)
   5660{
   5661	int nchan = adap->params.arch.nchan;
   5662
   5663	t4_tp_mib_read(adap, st->req, nchan, TP_MIB_CPL_IN_REQ_0_A, sleep_ok);
   5664
   5665	t4_tp_mib_read(adap, st->rsp, nchan, TP_MIB_CPL_OUT_RSP_0_A, sleep_ok);
   5666}
   5667
   5668/**
   5669 *	t4_tp_get_rdma_stats - read TP's RDMA MIB counters
   5670 *	@adap: the adapter
   5671 *	@st: holds the counter values
   5672 *      @sleep_ok: if true we may sleep while awaiting command completion
   5673 *
   5674 *	Returns the values of TP's RDMA counters.
   5675 */
   5676void t4_tp_get_rdma_stats(struct adapter *adap, struct tp_rdma_stats *st,
   5677			  bool sleep_ok)
   5678{
   5679	t4_tp_mib_read(adap, &st->rqe_dfr_pkt, 2, TP_MIB_RQE_DFR_PKT_A,
   5680		       sleep_ok);
   5681}
   5682
   5683/**
   5684 *	t4_get_fcoe_stats - read TP's FCoE MIB counters for a port
   5685 *	@adap: the adapter
   5686 *	@idx: the port index
   5687 *	@st: holds the counter values
   5688 *      @sleep_ok: if true we may sleep while awaiting command completion
   5689 *
   5690 *	Returns the values of TP's FCoE counters for the selected port.
   5691 */
   5692void t4_get_fcoe_stats(struct adapter *adap, unsigned int idx,
   5693		       struct tp_fcoe_stats *st, bool sleep_ok)
   5694{
   5695	u32 val[2];
   5696
   5697	t4_tp_mib_read(adap, &st->frames_ddp, 1, TP_MIB_FCOE_DDP_0_A + idx,
   5698		       sleep_ok);
   5699
   5700	t4_tp_mib_read(adap, &st->frames_drop, 1,
   5701		       TP_MIB_FCOE_DROP_0_A + idx, sleep_ok);
   5702
   5703	t4_tp_mib_read(adap, val, 2, TP_MIB_FCOE_BYTE_0_HI_A + 2 * idx,
   5704		       sleep_ok);
   5705
   5706	st->octets_ddp = ((u64)val[0] << 32) | val[1];
   5707}
   5708
   5709/**
   5710 *	t4_get_usm_stats - read TP's non-TCP DDP MIB counters
   5711 *	@adap: the adapter
   5712 *	@st: holds the counter values
   5713 *      @sleep_ok: if true we may sleep while awaiting command completion
   5714 *
   5715 *	Returns the values of TP's counters for non-TCP directly-placed packets.
   5716 */
   5717void t4_get_usm_stats(struct adapter *adap, struct tp_usm_stats *st,
   5718		      bool sleep_ok)
   5719{
   5720	u32 val[4];
   5721
   5722	t4_tp_mib_read(adap, val, 4, TP_MIB_USM_PKTS_A, sleep_ok);
   5723	st->frames = val[0];
   5724	st->drops = val[1];
   5725	st->octets = ((u64)val[2] << 32) | val[3];
   5726}
   5727
   5728/**
   5729 *	t4_read_mtu_tbl - returns the values in the HW path MTU table
   5730 *	@adap: the adapter
   5731 *	@mtus: where to store the MTU values
   5732 *	@mtu_log: where to store the MTU base-2 log (may be %NULL)
   5733 *
   5734 *	Reads the HW path MTU table.
   5735 */
   5736void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log)
   5737{
   5738	u32 v;
   5739	int i;
   5740
   5741	for (i = 0; i < NMTUS; ++i) {
   5742		t4_write_reg(adap, TP_MTU_TABLE_A,
   5743			     MTUINDEX_V(0xff) | MTUVALUE_V(i));
   5744		v = t4_read_reg(adap, TP_MTU_TABLE_A);
   5745		mtus[i] = MTUVALUE_G(v);
   5746		if (mtu_log)
   5747			mtu_log[i] = MTUWIDTH_G(v);
   5748	}
   5749}
   5750
   5751/**
   5752 *	t4_read_cong_tbl - reads the congestion control table
   5753 *	@adap: the adapter
   5754 *	@incr: where to store the alpha values
   5755 *
   5756 *	Reads the additive increments programmed into the HW congestion
   5757 *	control table.
   5758 */
   5759void t4_read_cong_tbl(struct adapter *adap, u16 incr[NMTUS][NCCTRL_WIN])
   5760{
   5761	unsigned int mtu, w;
   5762
   5763	for (mtu = 0; mtu < NMTUS; ++mtu)
   5764		for (w = 0; w < NCCTRL_WIN; ++w) {
   5765			t4_write_reg(adap, TP_CCTRL_TABLE_A,
   5766				     ROWINDEX_V(0xffff) | (mtu << 5) | w);
   5767			incr[mtu][w] = (u16)t4_read_reg(adap,
   5768						TP_CCTRL_TABLE_A) & 0x1fff;
   5769		}
   5770}
   5771
   5772/**
   5773 *	t4_tp_wr_bits_indirect - set/clear bits in an indirect TP register
   5774 *	@adap: the adapter
   5775 *	@addr: the indirect TP register address
   5776 *	@mask: specifies the field within the register to modify
   5777 *	@val: new value for the field
   5778 *
   5779 *	Sets a field of an indirect TP register to the given value.
   5780 */
   5781void t4_tp_wr_bits_indirect(struct adapter *adap, unsigned int addr,
   5782			    unsigned int mask, unsigned int val)
   5783{
   5784	t4_write_reg(adap, TP_PIO_ADDR_A, addr);
   5785	val |= t4_read_reg(adap, TP_PIO_DATA_A) & ~mask;
   5786	t4_write_reg(adap, TP_PIO_DATA_A, val);
   5787}
   5788
   5789/**
   5790 *	init_cong_ctrl - initialize congestion control parameters
   5791 *	@a: the alpha values for congestion control
   5792 *	@b: the beta values for congestion control
   5793 *
   5794 *	Initialize the congestion control parameters.
   5795 */
   5796static void init_cong_ctrl(unsigned short *a, unsigned short *b)
   5797{
   5798	a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1;
   5799	a[9] = 2;
   5800	a[10] = 3;
   5801	a[11] = 4;
   5802	a[12] = 5;
   5803	a[13] = 6;
   5804	a[14] = 7;
   5805	a[15] = 8;
   5806	a[16] = 9;
   5807	a[17] = 10;
   5808	a[18] = 14;
   5809	a[19] = 17;
   5810	a[20] = 21;
   5811	a[21] = 25;
   5812	a[22] = 30;
   5813	a[23] = 35;
   5814	a[24] = 45;
   5815	a[25] = 60;
   5816	a[26] = 80;
   5817	a[27] = 100;
   5818	a[28] = 200;
   5819	a[29] = 300;
   5820	a[30] = 400;
   5821	a[31] = 500;
   5822
   5823	b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0;
   5824	b[9] = b[10] = 1;
   5825	b[11] = b[12] = 2;
   5826	b[13] = b[14] = b[15] = b[16] = 3;
   5827	b[17] = b[18] = b[19] = b[20] = b[21] = 4;
   5828	b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5;
   5829	b[28] = b[29] = 6;
   5830	b[30] = b[31] = 7;
   5831}
   5832
   5833/* The minimum additive increment value for the congestion control table */
   5834#define CC_MIN_INCR 2U
   5835
   5836/**
   5837 *	t4_load_mtus - write the MTU and congestion control HW tables
   5838 *	@adap: the adapter
   5839 *	@mtus: the values for the MTU table
   5840 *	@alpha: the values for the congestion control alpha parameter
   5841 *	@beta: the values for the congestion control beta parameter
   5842 *
   5843 *	Write the HW MTU table with the supplied MTUs and the high-speed
   5844 *	congestion control table with the supplied alpha, beta, and MTUs.
   5845 *	We write the two tables together because the additive increments
   5846 *	depend on the MTUs.
   5847 */
   5848void t4_load_mtus(struct adapter *adap, const unsigned short *mtus,
   5849		  const unsigned short *alpha, const unsigned short *beta)
   5850{
   5851	static const unsigned int avg_pkts[NCCTRL_WIN] = {
   5852		2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640,
   5853		896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480,
   5854		28672, 40960, 57344, 81920, 114688, 163840, 229376
   5855	};
   5856
   5857	unsigned int i, w;
   5858
   5859	for (i = 0; i < NMTUS; ++i) {
   5860		unsigned int mtu = mtus[i];
   5861		unsigned int log2 = fls(mtu);
   5862
   5863		if (!(mtu & ((1 << log2) >> 2)))     /* round */
   5864			log2--;
   5865		t4_write_reg(adap, TP_MTU_TABLE_A, MTUINDEX_V(i) |
   5866			     MTUWIDTH_V(log2) | MTUVALUE_V(mtu));
   5867
   5868		for (w = 0; w < NCCTRL_WIN; ++w) {
   5869			unsigned int inc;
   5870
   5871			inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w],
   5872				  CC_MIN_INCR);
   5873
   5874			t4_write_reg(adap, TP_CCTRL_TABLE_A, (i << 21) |
   5875				     (w << 16) | (beta[w] << 13) | inc);
   5876		}
   5877	}
   5878}
   5879
   5880/* Calculates a rate in bytes/s given the number of 256-byte units per 4K core
   5881 * clocks.  The formula is
   5882 *
   5883 * bytes/s = bytes256 * 256 * ClkFreq / 4096
   5884 *
   5885 * which is equivalent to
   5886 *
   5887 * bytes/s = 62.5 * bytes256 * ClkFreq_ms
   5888 */
   5889static u64 chan_rate(struct adapter *adap, unsigned int bytes256)
   5890{
   5891	u64 v = bytes256 * adap->params.vpd.cclk;
   5892
   5893	return v * 62 + v / 2;
   5894}
   5895
   5896/**
   5897 *	t4_get_chan_txrate - get the current per channel Tx rates
   5898 *	@adap: the adapter
   5899 *	@nic_rate: rates for NIC traffic
   5900 *	@ofld_rate: rates for offloaded traffic
   5901 *
   5902 *	Return the current Tx rates in bytes/s for NIC and offloaded traffic
   5903 *	for each channel.
   5904 */
   5905void t4_get_chan_txrate(struct adapter *adap, u64 *nic_rate, u64 *ofld_rate)
   5906{
   5907	u32 v;
   5908
   5909	v = t4_read_reg(adap, TP_TX_TRATE_A);
   5910	nic_rate[0] = chan_rate(adap, TNLRATE0_G(v));
   5911	nic_rate[1] = chan_rate(adap, TNLRATE1_G(v));
   5912	if (adap->params.arch.nchan == NCHAN) {
   5913		nic_rate[2] = chan_rate(adap, TNLRATE2_G(v));
   5914		nic_rate[3] = chan_rate(adap, TNLRATE3_G(v));
   5915	}
   5916
   5917	v = t4_read_reg(adap, TP_TX_ORATE_A);
   5918	ofld_rate[0] = chan_rate(adap, OFDRATE0_G(v));
   5919	ofld_rate[1] = chan_rate(adap, OFDRATE1_G(v));
   5920	if (adap->params.arch.nchan == NCHAN) {
   5921		ofld_rate[2] = chan_rate(adap, OFDRATE2_G(v));
   5922		ofld_rate[3] = chan_rate(adap, OFDRATE3_G(v));
   5923	}
   5924}
   5925
   5926/**
   5927 *	t4_set_trace_filter - configure one of the tracing filters
   5928 *	@adap: the adapter
   5929 *	@tp: the desired trace filter parameters
   5930 *	@idx: which filter to configure
   5931 *	@enable: whether to enable or disable the filter
   5932 *
   5933 *	Configures one of the tracing filters available in HW.  If @enable is
   5934 *	%0 @tp is not examined and may be %NULL. The user is responsible to
   5935 *	set the single/multiple trace mode by writing to MPS_TRC_CFG_A register
   5936 */
   5937int t4_set_trace_filter(struct adapter *adap, const struct trace_params *tp,
   5938			int idx, int enable)
   5939{
   5940	int i, ofst = idx * 4;
   5941	u32 data_reg, mask_reg, cfg;
   5942
   5943	if (!enable) {
   5944		t4_write_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A_A + ofst, 0);
   5945		return 0;
   5946	}
   5947
   5948	cfg = t4_read_reg(adap, MPS_TRC_CFG_A);
   5949	if (cfg & TRCMULTIFILTER_F) {
   5950		/* If multiple tracers are enabled, then maximum
   5951		 * capture size is 2.5KB (FIFO size of a single channel)
   5952		 * minus 2 flits for CPL_TRACE_PKT header.
   5953		 */
   5954		if (tp->snap_len > ((10 * 1024 / 4) - (2 * 8)))
   5955			return -EINVAL;
   5956	} else {
   5957		/* If multiple tracers are disabled, to avoid deadlocks
   5958		 * maximum packet capture size of 9600 bytes is recommended.
   5959		 * Also in this mode, only trace0 can be enabled and running.
   5960		 */
   5961		if (tp->snap_len > 9600 || idx)
   5962			return -EINVAL;
   5963	}
   5964
   5965	if (tp->port > (is_t4(adap->params.chip) ? 11 : 19) || tp->invert > 1 ||
   5966	    tp->skip_len > TFLENGTH_M || tp->skip_ofst > TFOFFSET_M ||
   5967	    tp->min_len > TFMINPKTSIZE_M)
   5968		return -EINVAL;
   5969
   5970	/* stop the tracer we'll be changing */
   5971	t4_write_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A_A + ofst, 0);
   5972
   5973	idx *= (MPS_TRC_FILTER1_MATCH_A - MPS_TRC_FILTER0_MATCH_A);
   5974	data_reg = MPS_TRC_FILTER0_MATCH_A + idx;
   5975	mask_reg = MPS_TRC_FILTER0_DONT_CARE_A + idx;
   5976
   5977	for (i = 0; i < TRACE_LEN / 4; i++, data_reg += 4, mask_reg += 4) {
   5978		t4_write_reg(adap, data_reg, tp->data[i]);
   5979		t4_write_reg(adap, mask_reg, ~tp->mask[i]);
   5980	}
   5981	t4_write_reg(adap, MPS_TRC_FILTER_MATCH_CTL_B_A + ofst,
   5982		     TFCAPTUREMAX_V(tp->snap_len) |
   5983		     TFMINPKTSIZE_V(tp->min_len));
   5984	t4_write_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A_A + ofst,
   5985		     TFOFFSET_V(tp->skip_ofst) | TFLENGTH_V(tp->skip_len) |
   5986		     (is_t4(adap->params.chip) ?
   5987		     TFPORT_V(tp->port) | TFEN_F | TFINVERTMATCH_V(tp->invert) :
   5988		     T5_TFPORT_V(tp->port) | T5_TFEN_F |
   5989		     T5_TFINVERTMATCH_V(tp->invert)));
   5990
   5991	return 0;
   5992}
   5993
   5994/**
   5995 *	t4_get_trace_filter - query one of the tracing filters
   5996 *	@adap: the adapter
   5997 *	@tp: the current trace filter parameters
   5998 *	@idx: which trace filter to query
   5999 *	@enabled: non-zero if the filter is enabled
   6000 *
   6001 *	Returns the current settings of one of the HW tracing filters.
   6002 */
   6003void t4_get_trace_filter(struct adapter *adap, struct trace_params *tp, int idx,
   6004			 int *enabled)
   6005{
   6006	u32 ctla, ctlb;
   6007	int i, ofst = idx * 4;
   6008	u32 data_reg, mask_reg;
   6009
   6010	ctla = t4_read_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A_A + ofst);
   6011	ctlb = t4_read_reg(adap, MPS_TRC_FILTER_MATCH_CTL_B_A + ofst);
   6012
   6013	if (is_t4(adap->params.chip)) {
   6014		*enabled = !!(ctla & TFEN_F);
   6015		tp->port =  TFPORT_G(ctla);
   6016		tp->invert = !!(ctla & TFINVERTMATCH_F);
   6017	} else {
   6018		*enabled = !!(ctla & T5_TFEN_F);
   6019		tp->port = T5_TFPORT_G(ctla);
   6020		tp->invert = !!(ctla & T5_TFINVERTMATCH_F);
   6021	}
   6022	tp->snap_len = TFCAPTUREMAX_G(ctlb);
   6023	tp->min_len = TFMINPKTSIZE_G(ctlb);
   6024	tp->skip_ofst = TFOFFSET_G(ctla);
   6025	tp->skip_len = TFLENGTH_G(ctla);
   6026
   6027	ofst = (MPS_TRC_FILTER1_MATCH_A - MPS_TRC_FILTER0_MATCH_A) * idx;
   6028	data_reg = MPS_TRC_FILTER0_MATCH_A + ofst;
   6029	mask_reg = MPS_TRC_FILTER0_DONT_CARE_A + ofst;
   6030
   6031	for (i = 0; i < TRACE_LEN / 4; i++, data_reg += 4, mask_reg += 4) {
   6032		tp->mask[i] = ~t4_read_reg(adap, mask_reg);
   6033		tp->data[i] = t4_read_reg(adap, data_reg) & tp->mask[i];
   6034	}
   6035}
   6036
   6037/**
   6038 *	t4_pmtx_get_stats - returns the HW stats from PMTX
   6039 *	@adap: the adapter
   6040 *	@cnt: where to store the count statistics
   6041 *	@cycles: where to store the cycle statistics
   6042 *
   6043 *	Returns performance statistics from PMTX.
   6044 */
   6045void t4_pmtx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[])
   6046{
   6047	int i;
   6048	u32 data[2];
   6049
   6050	for (i = 0; i < adap->params.arch.pm_stats_cnt; i++) {
   6051		t4_write_reg(adap, PM_TX_STAT_CONFIG_A, i + 1);
   6052		cnt[i] = t4_read_reg(adap, PM_TX_STAT_COUNT_A);
   6053		if (is_t4(adap->params.chip)) {
   6054			cycles[i] = t4_read_reg64(adap, PM_TX_STAT_LSB_A);
   6055		} else {
   6056			t4_read_indirect(adap, PM_TX_DBG_CTRL_A,
   6057					 PM_TX_DBG_DATA_A, data, 2,
   6058					 PM_TX_DBG_STAT_MSB_A);
   6059			cycles[i] = (((u64)data[0] << 32) | data[1]);
   6060		}
   6061	}
   6062}
   6063
   6064/**
   6065 *	t4_pmrx_get_stats - returns the HW stats from PMRX
   6066 *	@adap: the adapter
   6067 *	@cnt: where to store the count statistics
   6068 *	@cycles: where to store the cycle statistics
   6069 *
   6070 *	Returns performance statistics from PMRX.
   6071 */
   6072void t4_pmrx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[])
   6073{
   6074	int i;
   6075	u32 data[2];
   6076
   6077	for (i = 0; i < adap->params.arch.pm_stats_cnt; i++) {
   6078		t4_write_reg(adap, PM_RX_STAT_CONFIG_A, i + 1);
   6079		cnt[i] = t4_read_reg(adap, PM_RX_STAT_COUNT_A);
   6080		if (is_t4(adap->params.chip)) {
   6081			cycles[i] = t4_read_reg64(adap, PM_RX_STAT_LSB_A);
   6082		} else {
   6083			t4_read_indirect(adap, PM_RX_DBG_CTRL_A,
   6084					 PM_RX_DBG_DATA_A, data, 2,
   6085					 PM_RX_DBG_STAT_MSB_A);
   6086			cycles[i] = (((u64)data[0] << 32) | data[1]);
   6087		}
   6088	}
   6089}
   6090
   6091/**
   6092 *	compute_mps_bg_map - compute the MPS Buffer Group Map for a Port
   6093 *	@adapter: the adapter
   6094 *	@pidx: the port index
   6095 *
   6096 *	Computes and returns a bitmap indicating which MPS buffer groups are
   6097 *	associated with the given Port.  Bit i is set if buffer group i is
   6098 *	used by the Port.
   6099 */
   6100static inline unsigned int compute_mps_bg_map(struct adapter *adapter,
   6101					      int pidx)
   6102{
   6103	unsigned int chip_version, nports;
   6104
   6105	chip_version = CHELSIO_CHIP_VERSION(adapter->params.chip);
   6106	nports = 1 << NUMPORTS_G(t4_read_reg(adapter, MPS_CMN_CTL_A));
   6107
   6108	switch (chip_version) {
   6109	case CHELSIO_T4:
   6110	case CHELSIO_T5:
   6111		switch (nports) {
   6112		case 1: return 0xf;
   6113		case 2: return 3 << (2 * pidx);
   6114		case 4: return 1 << pidx;
   6115		}
   6116		break;
   6117
   6118	case CHELSIO_T6:
   6119		switch (nports) {
   6120		case 2: return 1 << (2 * pidx);
   6121		}
   6122		break;
   6123	}
   6124
   6125	dev_err(adapter->pdev_dev, "Need MPS Buffer Group Map for Chip %0x, Nports %d\n",
   6126		chip_version, nports);
   6127
   6128	return 0;
   6129}
   6130
   6131/**
   6132 *	t4_get_mps_bg_map - return the buffer groups associated with a port
   6133 *	@adapter: the adapter
   6134 *	@pidx: the port index
   6135 *
   6136 *	Returns a bitmap indicating which MPS buffer groups are associated
   6137 *	with the given Port.  Bit i is set if buffer group i is used by the
   6138 *	Port.
   6139 */
   6140unsigned int t4_get_mps_bg_map(struct adapter *adapter, int pidx)
   6141{
   6142	u8 *mps_bg_map;
   6143	unsigned int nports;
   6144
   6145	nports = 1 << NUMPORTS_G(t4_read_reg(adapter, MPS_CMN_CTL_A));
   6146	if (pidx >= nports) {
   6147		CH_WARN(adapter, "MPS Port Index %d >= Nports %d\n",
   6148			pidx, nports);
   6149		return 0;
   6150	}
   6151
   6152	/* If we've already retrieved/computed this, just return the result.
   6153	 */
   6154	mps_bg_map = adapter->params.mps_bg_map;
   6155	if (mps_bg_map[pidx])
   6156		return mps_bg_map[pidx];
   6157
   6158	/* Newer Firmware can tell us what the MPS Buffer Group Map is.
   6159	 * If we're talking to such Firmware, let it tell us.  If the new
   6160	 * API isn't supported, revert back to old hardcoded way.  The value
   6161	 * obtained from Firmware is encoded in below format:
   6162	 *
   6163	 * val = (( MPSBGMAP[Port 3] << 24 ) |
   6164	 *        ( MPSBGMAP[Port 2] << 16 ) |
   6165	 *        ( MPSBGMAP[Port 1] <<  8 ) |
   6166	 *        ( MPSBGMAP[Port 0] <<  0 ))
   6167	 */
   6168	if (adapter->flags & CXGB4_FW_OK) {
   6169		u32 param, val;
   6170		int ret;
   6171
   6172		param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
   6173			 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_MPSBGMAP));
   6174		ret = t4_query_params_ns(adapter, adapter->mbox, adapter->pf,
   6175					 0, 1, &param, &val);
   6176		if (!ret) {
   6177			int p;
   6178
   6179			/* Store the BG Map for all of the Ports in order to
   6180			 * avoid more calls to the Firmware in the future.
   6181			 */
   6182			for (p = 0; p < MAX_NPORTS; p++, val >>= 8)
   6183				mps_bg_map[p] = val & 0xff;
   6184
   6185			return mps_bg_map[pidx];
   6186		}
   6187	}
   6188
   6189	/* Either we're not talking to the Firmware or we're dealing with
   6190	 * older Firmware which doesn't support the new API to get the MPS
   6191	 * Buffer Group Map.  Fall back to computing it ourselves.
   6192	 */
   6193	mps_bg_map[pidx] = compute_mps_bg_map(adapter, pidx);
   6194	return mps_bg_map[pidx];
   6195}
   6196
   6197/**
   6198 *      t4_get_tp_e2c_map - return the E2C channel map associated with a port
   6199 *      @adapter: the adapter
   6200 *      @pidx: the port index
   6201 */
   6202static unsigned int t4_get_tp_e2c_map(struct adapter *adapter, int pidx)
   6203{
   6204	unsigned int nports;
   6205	u32 param, val = 0;
   6206	int ret;
   6207
   6208	nports = 1 << NUMPORTS_G(t4_read_reg(adapter, MPS_CMN_CTL_A));
   6209	if (pidx >= nports) {
   6210		CH_WARN(adapter, "TP E2C Channel Port Index %d >= Nports %d\n",
   6211			pidx, nports);
   6212		return 0;
   6213	}
   6214
   6215	/* FW version >= 1.16.44.0 can determine E2C channel map using
   6216	 * FW_PARAMS_PARAM_DEV_TPCHMAP API.
   6217	 */
   6218	param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
   6219		 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_TPCHMAP));
   6220	ret = t4_query_params_ns(adapter, adapter->mbox, adapter->pf,
   6221				 0, 1, &param, &val);
   6222	if (!ret)
   6223		return (val >> (8 * pidx)) & 0xff;
   6224
   6225	return 0;
   6226}
   6227
   6228/**
   6229 *	t4_get_tp_ch_map - return TP ingress channels associated with a port
   6230 *	@adap: the adapter
   6231 *	@pidx: the port index
   6232 *
   6233 *	Returns a bitmap indicating which TP Ingress Channels are associated
   6234 *	with a given Port.  Bit i is set if TP Ingress Channel i is used by
   6235 *	the Port.
   6236 */
   6237unsigned int t4_get_tp_ch_map(struct adapter *adap, int pidx)
   6238{
   6239	unsigned int chip_version = CHELSIO_CHIP_VERSION(adap->params.chip);
   6240	unsigned int nports = 1 << NUMPORTS_G(t4_read_reg(adap, MPS_CMN_CTL_A));
   6241
   6242	if (pidx >= nports) {
   6243		dev_warn(adap->pdev_dev, "TP Port Index %d >= Nports %d\n",
   6244			 pidx, nports);
   6245		return 0;
   6246	}
   6247
   6248	switch (chip_version) {
   6249	case CHELSIO_T4:
   6250	case CHELSIO_T5:
   6251		/* Note that this happens to be the same values as the MPS
   6252		 * Buffer Group Map for these Chips.  But we replicate the code
   6253		 * here because they're really separate concepts.
   6254		 */
   6255		switch (nports) {
   6256		case 1: return 0xf;
   6257		case 2: return 3 << (2 * pidx);
   6258		case 4: return 1 << pidx;
   6259		}
   6260		break;
   6261
   6262	case CHELSIO_T6:
   6263		switch (nports) {
   6264		case 1:
   6265		case 2: return 1 << pidx;
   6266		}
   6267		break;
   6268	}
   6269
   6270	dev_err(adap->pdev_dev, "Need TP Channel Map for Chip %0x, Nports %d\n",
   6271		chip_version, nports);
   6272	return 0;
   6273}
   6274
   6275/**
   6276 *      t4_get_port_type_description - return Port Type string description
   6277 *      @port_type: firmware Port Type enumeration
   6278 */
   6279const char *t4_get_port_type_description(enum fw_port_type port_type)
   6280{
   6281	static const char *const port_type_description[] = {
   6282		"Fiber_XFI",
   6283		"Fiber_XAUI",
   6284		"BT_SGMII",
   6285		"BT_XFI",
   6286		"BT_XAUI",
   6287		"KX4",
   6288		"CX4",
   6289		"KX",
   6290		"KR",
   6291		"SFP",
   6292		"BP_AP",
   6293		"BP4_AP",
   6294		"QSFP_10G",
   6295		"QSA",
   6296		"QSFP",
   6297		"BP40_BA",
   6298		"KR4_100G",
   6299		"CR4_QSFP",
   6300		"CR_QSFP",
   6301		"CR2_QSFP",
   6302		"SFP28",
   6303		"KR_SFP28",
   6304		"KR_XLAUI"
   6305	};
   6306
   6307	if (port_type < ARRAY_SIZE(port_type_description))
   6308		return port_type_description[port_type];
   6309	return "UNKNOWN";
   6310}
   6311
   6312/**
   6313 *      t4_get_port_stats_offset - collect port stats relative to a previous
   6314 *                                 snapshot
   6315 *      @adap: The adapter
   6316 *      @idx: The port
   6317 *      @stats: Current stats to fill
   6318 *      @offset: Previous stats snapshot
   6319 */
   6320void t4_get_port_stats_offset(struct adapter *adap, int idx,
   6321			      struct port_stats *stats,
   6322			      struct port_stats *offset)
   6323{
   6324	u64 *s, *o;
   6325	int i;
   6326
   6327	t4_get_port_stats(adap, idx, stats);
   6328	for (i = 0, s = (u64 *)stats, o = (u64 *)offset;
   6329			i < (sizeof(struct port_stats) / sizeof(u64));
   6330			i++, s++, o++)
   6331		*s -= *o;
   6332}
   6333
   6334/**
   6335 *	t4_get_port_stats - collect port statistics
   6336 *	@adap: the adapter
   6337 *	@idx: the port index
   6338 *	@p: the stats structure to fill
   6339 *
   6340 *	Collect statistics related to the given port from HW.
   6341 */
   6342void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p)
   6343{
   6344	u32 bgmap = t4_get_mps_bg_map(adap, idx);
   6345	u32 stat_ctl = t4_read_reg(adap, MPS_STAT_CTL_A);
   6346
   6347#define GET_STAT(name) \
   6348	t4_read_reg64(adap, \
   6349	(is_t4(adap->params.chip) ? PORT_REG(idx, MPS_PORT_STAT_##name##_L) : \
   6350	T5_PORT_REG(idx, MPS_PORT_STAT_##name##_L)))
   6351#define GET_STAT_COM(name) t4_read_reg64(adap, MPS_STAT_##name##_L)
   6352
   6353	p->tx_octets           = GET_STAT(TX_PORT_BYTES);
   6354	p->tx_frames           = GET_STAT(TX_PORT_FRAMES);
   6355	p->tx_bcast_frames     = GET_STAT(TX_PORT_BCAST);
   6356	p->tx_mcast_frames     = GET_STAT(TX_PORT_MCAST);
   6357	p->tx_ucast_frames     = GET_STAT(TX_PORT_UCAST);
   6358	p->tx_error_frames     = GET_STAT(TX_PORT_ERROR);
   6359	p->tx_frames_64        = GET_STAT(TX_PORT_64B);
   6360	p->tx_frames_65_127    = GET_STAT(TX_PORT_65B_127B);
   6361	p->tx_frames_128_255   = GET_STAT(TX_PORT_128B_255B);
   6362	p->tx_frames_256_511   = GET_STAT(TX_PORT_256B_511B);
   6363	p->tx_frames_512_1023  = GET_STAT(TX_PORT_512B_1023B);
   6364	p->tx_frames_1024_1518 = GET_STAT(TX_PORT_1024B_1518B);
   6365	p->tx_frames_1519_max  = GET_STAT(TX_PORT_1519B_MAX);
   6366	p->tx_drop             = GET_STAT(TX_PORT_DROP);
   6367	p->tx_pause            = GET_STAT(TX_PORT_PAUSE);
   6368	p->tx_ppp0             = GET_STAT(TX_PORT_PPP0);
   6369	p->tx_ppp1             = GET_STAT(TX_PORT_PPP1);
   6370	p->tx_ppp2             = GET_STAT(TX_PORT_PPP2);
   6371	p->tx_ppp3             = GET_STAT(TX_PORT_PPP3);
   6372	p->tx_ppp4             = GET_STAT(TX_PORT_PPP4);
   6373	p->tx_ppp5             = GET_STAT(TX_PORT_PPP5);
   6374	p->tx_ppp6             = GET_STAT(TX_PORT_PPP6);
   6375	p->tx_ppp7             = GET_STAT(TX_PORT_PPP7);
   6376
   6377	if (CHELSIO_CHIP_VERSION(adap->params.chip) >= CHELSIO_T5) {
   6378		if (stat_ctl & COUNTPAUSESTATTX_F)
   6379			p->tx_frames_64 -= p->tx_pause;
   6380		if (stat_ctl & COUNTPAUSEMCTX_F)
   6381			p->tx_mcast_frames -= p->tx_pause;
   6382	}
   6383	p->rx_octets           = GET_STAT(RX_PORT_BYTES);
   6384	p->rx_frames           = GET_STAT(RX_PORT_FRAMES);
   6385	p->rx_bcast_frames     = GET_STAT(RX_PORT_BCAST);
   6386	p->rx_mcast_frames     = GET_STAT(RX_PORT_MCAST);
   6387	p->rx_ucast_frames     = GET_STAT(RX_PORT_UCAST);
   6388	p->rx_too_long         = GET_STAT(RX_PORT_MTU_ERROR);
   6389	p->rx_jabber           = GET_STAT(RX_PORT_MTU_CRC_ERROR);
   6390	p->rx_fcs_err          = GET_STAT(RX_PORT_CRC_ERROR);
   6391	p->rx_len_err          = GET_STAT(RX_PORT_LEN_ERROR);
   6392	p->rx_symbol_err       = GET_STAT(RX_PORT_SYM_ERROR);
   6393	p->rx_runt             = GET_STAT(RX_PORT_LESS_64B);
   6394	p->rx_frames_64        = GET_STAT(RX_PORT_64B);
   6395	p->rx_frames_65_127    = GET_STAT(RX_PORT_65B_127B);
   6396	p->rx_frames_128_255   = GET_STAT(RX_PORT_128B_255B);
   6397	p->rx_frames_256_511   = GET_STAT(RX_PORT_256B_511B);
   6398	p->rx_frames_512_1023  = GET_STAT(RX_PORT_512B_1023B);
   6399	p->rx_frames_1024_1518 = GET_STAT(RX_PORT_1024B_1518B);
   6400	p->rx_frames_1519_max  = GET_STAT(RX_PORT_1519B_MAX);
   6401	p->rx_pause            = GET_STAT(RX_PORT_PAUSE);
   6402	p->rx_ppp0             = GET_STAT(RX_PORT_PPP0);
   6403	p->rx_ppp1             = GET_STAT(RX_PORT_PPP1);
   6404	p->rx_ppp2             = GET_STAT(RX_PORT_PPP2);
   6405	p->rx_ppp3             = GET_STAT(RX_PORT_PPP3);
   6406	p->rx_ppp4             = GET_STAT(RX_PORT_PPP4);
   6407	p->rx_ppp5             = GET_STAT(RX_PORT_PPP5);
   6408	p->rx_ppp6             = GET_STAT(RX_PORT_PPP6);
   6409	p->rx_ppp7             = GET_STAT(RX_PORT_PPP7);
   6410
   6411	if (CHELSIO_CHIP_VERSION(adap->params.chip) >= CHELSIO_T5) {
   6412		if (stat_ctl & COUNTPAUSESTATRX_F)
   6413			p->rx_frames_64 -= p->rx_pause;
   6414		if (stat_ctl & COUNTPAUSEMCRX_F)
   6415			p->rx_mcast_frames -= p->rx_pause;
   6416	}
   6417
   6418	p->rx_ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_DROP_FRAME) : 0;
   6419	p->rx_ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_DROP_FRAME) : 0;
   6420	p->rx_ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_DROP_FRAME) : 0;
   6421	p->rx_ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_DROP_FRAME) : 0;
   6422	p->rx_trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_TRUNC_FRAME) : 0;
   6423	p->rx_trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_TRUNC_FRAME) : 0;
   6424	p->rx_trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_TRUNC_FRAME) : 0;
   6425	p->rx_trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_TRUNC_FRAME) : 0;
   6426
   6427#undef GET_STAT
   6428#undef GET_STAT_COM
   6429}
   6430
   6431/**
   6432 *	t4_get_lb_stats - collect loopback port statistics
   6433 *	@adap: the adapter
   6434 *	@idx: the loopback port index
   6435 *	@p: the stats structure to fill
   6436 *
   6437 *	Return HW statistics for the given loopback port.
   6438 */
   6439void t4_get_lb_stats(struct adapter *adap, int idx, struct lb_port_stats *p)
   6440{
   6441	u32 bgmap = t4_get_mps_bg_map(adap, idx);
   6442
   6443#define GET_STAT(name) \
   6444	t4_read_reg64(adap, \
   6445	(is_t4(adap->params.chip) ? \
   6446	PORT_REG(idx, MPS_PORT_STAT_LB_PORT_##name##_L) : \
   6447	T5_PORT_REG(idx, MPS_PORT_STAT_LB_PORT_##name##_L)))
   6448#define GET_STAT_COM(name) t4_read_reg64(adap, MPS_STAT_##name##_L)
   6449
   6450	p->octets           = GET_STAT(BYTES);
   6451	p->frames           = GET_STAT(FRAMES);
   6452	p->bcast_frames     = GET_STAT(BCAST);
   6453	p->mcast_frames     = GET_STAT(MCAST);
   6454	p->ucast_frames     = GET_STAT(UCAST);
   6455	p->error_frames     = GET_STAT(ERROR);
   6456
   6457	p->frames_64        = GET_STAT(64B);
   6458	p->frames_65_127    = GET_STAT(65B_127B);
   6459	p->frames_128_255   = GET_STAT(128B_255B);
   6460	p->frames_256_511   = GET_STAT(256B_511B);
   6461	p->frames_512_1023  = GET_STAT(512B_1023B);
   6462	p->frames_1024_1518 = GET_STAT(1024B_1518B);
   6463	p->frames_1519_max  = GET_STAT(1519B_MAX);
   6464	p->drop             = GET_STAT(DROP_FRAMES);
   6465
   6466	p->ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_LB_DROP_FRAME) : 0;
   6467	p->ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_LB_DROP_FRAME) : 0;
   6468	p->ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_LB_DROP_FRAME) : 0;
   6469	p->ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_LB_DROP_FRAME) : 0;
   6470	p->trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_LB_TRUNC_FRAME) : 0;
   6471	p->trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_LB_TRUNC_FRAME) : 0;
   6472	p->trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_LB_TRUNC_FRAME) : 0;
   6473	p->trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_LB_TRUNC_FRAME) : 0;
   6474
   6475#undef GET_STAT
   6476#undef GET_STAT_COM
   6477}
   6478
   6479/*     t4_mk_filtdelwr - create a delete filter WR
   6480 *     @ftid: the filter ID
   6481 *     @wr: the filter work request to populate
   6482 *     @qid: ingress queue to receive the delete notification
   6483 *
   6484 *     Creates a filter work request to delete the supplied filter.  If @qid is
   6485 *     negative the delete notification is suppressed.
   6486 */
   6487void t4_mk_filtdelwr(unsigned int ftid, struct fw_filter_wr *wr, int qid)
   6488{
   6489	memset(wr, 0, sizeof(*wr));
   6490	wr->op_pkd = cpu_to_be32(FW_WR_OP_V(FW_FILTER_WR));
   6491	wr->len16_pkd = cpu_to_be32(FW_WR_LEN16_V(sizeof(*wr) / 16));
   6492	wr->tid_to_iq = cpu_to_be32(FW_FILTER_WR_TID_V(ftid) |
   6493				    FW_FILTER_WR_NOREPLY_V(qid < 0));
   6494	wr->del_filter_to_l2tix = cpu_to_be32(FW_FILTER_WR_DEL_FILTER_F);
   6495	if (qid >= 0)
   6496		wr->rx_chan_rx_rpl_iq =
   6497			cpu_to_be16(FW_FILTER_WR_RX_RPL_IQ_V(qid));
   6498}
   6499
   6500#define INIT_CMD(var, cmd, rd_wr) do { \
   6501	(var).op_to_write = cpu_to_be32(FW_CMD_OP_V(FW_##cmd##_CMD) | \
   6502					FW_CMD_REQUEST_F | \
   6503					FW_CMD_##rd_wr##_F); \
   6504	(var).retval_len16 = cpu_to_be32(FW_LEN16(var)); \
   6505} while (0)
   6506
   6507int t4_fwaddrspace_write(struct adapter *adap, unsigned int mbox,
   6508			  u32 addr, u32 val)
   6509{
   6510	u32 ldst_addrspace;
   6511	struct fw_ldst_cmd c;
   6512
   6513	memset(&c, 0, sizeof(c));
   6514	ldst_addrspace = FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_FIRMWARE);
   6515	c.op_to_addrspace = cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) |
   6516					FW_CMD_REQUEST_F |
   6517					FW_CMD_WRITE_F |
   6518					ldst_addrspace);
   6519	c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
   6520	c.u.addrval.addr = cpu_to_be32(addr);
   6521	c.u.addrval.val = cpu_to_be32(val);
   6522
   6523	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
   6524}
   6525
   6526/**
   6527 *	t4_mdio_rd - read a PHY register through MDIO
   6528 *	@adap: the adapter
   6529 *	@mbox: mailbox to use for the FW command
   6530 *	@phy_addr: the PHY address
   6531 *	@mmd: the PHY MMD to access (0 for clause 22 PHYs)
   6532 *	@reg: the register to read
   6533 *	@valp: where to store the value
   6534 *
   6535 *	Issues a FW command through the given mailbox to read a PHY register.
   6536 */
   6537int t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
   6538	       unsigned int mmd, unsigned int reg, u16 *valp)
   6539{
   6540	int ret;
   6541	u32 ldst_addrspace;
   6542	struct fw_ldst_cmd c;
   6543
   6544	memset(&c, 0, sizeof(c));
   6545	ldst_addrspace = FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_MDIO);
   6546	c.op_to_addrspace = cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) |
   6547					FW_CMD_REQUEST_F | FW_CMD_READ_F |
   6548					ldst_addrspace);
   6549	c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
   6550	c.u.mdio.paddr_mmd = cpu_to_be16(FW_LDST_CMD_PADDR_V(phy_addr) |
   6551					 FW_LDST_CMD_MMD_V(mmd));
   6552	c.u.mdio.raddr = cpu_to_be16(reg);
   6553
   6554	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
   6555	if (ret == 0)
   6556		*valp = be16_to_cpu(c.u.mdio.rval);
   6557	return ret;
   6558}
   6559
   6560/**
   6561 *	t4_mdio_wr - write a PHY register through MDIO
   6562 *	@adap: the adapter
   6563 *	@mbox: mailbox to use for the FW command
   6564 *	@phy_addr: the PHY address
   6565 *	@mmd: the PHY MMD to access (0 for clause 22 PHYs)
   6566 *	@reg: the register to write
   6567 *	@val: value to write
   6568 *
   6569 *	Issues a FW command through the given mailbox to write a PHY register.
   6570 */
   6571int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
   6572	       unsigned int mmd, unsigned int reg, u16 val)
   6573{
   6574	u32 ldst_addrspace;
   6575	struct fw_ldst_cmd c;
   6576
   6577	memset(&c, 0, sizeof(c));
   6578	ldst_addrspace = FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_MDIO);
   6579	c.op_to_addrspace = cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) |
   6580					FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
   6581					ldst_addrspace);
   6582	c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
   6583	c.u.mdio.paddr_mmd = cpu_to_be16(FW_LDST_CMD_PADDR_V(phy_addr) |
   6584					 FW_LDST_CMD_MMD_V(mmd));
   6585	c.u.mdio.raddr = cpu_to_be16(reg);
   6586	c.u.mdio.rval = cpu_to_be16(val);
   6587
   6588	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
   6589}
   6590
   6591/**
   6592 *	t4_sge_decode_idma_state - decode the idma state
   6593 *	@adapter: the adapter
   6594 *	@state: the state idma is stuck in
   6595 */
   6596void t4_sge_decode_idma_state(struct adapter *adapter, int state)
   6597{
   6598	static const char * const t4_decode[] = {
   6599		"IDMA_IDLE",
   6600		"IDMA_PUSH_MORE_CPL_FIFO",
   6601		"IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO",
   6602		"Not used",
   6603		"IDMA_PHYSADDR_SEND_PCIEHDR",
   6604		"IDMA_PHYSADDR_SEND_PAYLOAD_FIRST",
   6605		"IDMA_PHYSADDR_SEND_PAYLOAD",
   6606		"IDMA_SEND_FIFO_TO_IMSG",
   6607		"IDMA_FL_REQ_DATA_FL_PREP",
   6608		"IDMA_FL_REQ_DATA_FL",
   6609		"IDMA_FL_DROP",
   6610		"IDMA_FL_H_REQ_HEADER_FL",
   6611		"IDMA_FL_H_SEND_PCIEHDR",
   6612		"IDMA_FL_H_PUSH_CPL_FIFO",
   6613		"IDMA_FL_H_SEND_CPL",
   6614		"IDMA_FL_H_SEND_IP_HDR_FIRST",
   6615		"IDMA_FL_H_SEND_IP_HDR",
   6616		"IDMA_FL_H_REQ_NEXT_HEADER_FL",
   6617		"IDMA_FL_H_SEND_NEXT_PCIEHDR",
   6618		"IDMA_FL_H_SEND_IP_HDR_PADDING",
   6619		"IDMA_FL_D_SEND_PCIEHDR",
   6620		"IDMA_FL_D_SEND_CPL_AND_IP_HDR",
   6621		"IDMA_FL_D_REQ_NEXT_DATA_FL",
   6622		"IDMA_FL_SEND_PCIEHDR",
   6623		"IDMA_FL_PUSH_CPL_FIFO",
   6624		"IDMA_FL_SEND_CPL",
   6625		"IDMA_FL_SEND_PAYLOAD_FIRST",
   6626		"IDMA_FL_SEND_PAYLOAD",
   6627		"IDMA_FL_REQ_NEXT_DATA_FL",
   6628		"IDMA_FL_SEND_NEXT_PCIEHDR",
   6629		"IDMA_FL_SEND_PADDING",
   6630		"IDMA_FL_SEND_COMPLETION_TO_IMSG",
   6631		"IDMA_FL_SEND_FIFO_TO_IMSG",
   6632		"IDMA_FL_REQ_DATAFL_DONE",
   6633		"IDMA_FL_REQ_HEADERFL_DONE",
   6634	};
   6635	static const char * const t5_decode[] = {
   6636		"IDMA_IDLE",
   6637		"IDMA_ALMOST_IDLE",
   6638		"IDMA_PUSH_MORE_CPL_FIFO",
   6639		"IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO",
   6640		"IDMA_SGEFLRFLUSH_SEND_PCIEHDR",
   6641		"IDMA_PHYSADDR_SEND_PCIEHDR",
   6642		"IDMA_PHYSADDR_SEND_PAYLOAD_FIRST",
   6643		"IDMA_PHYSADDR_SEND_PAYLOAD",
   6644		"IDMA_SEND_FIFO_TO_IMSG",
   6645		"IDMA_FL_REQ_DATA_FL",
   6646		"IDMA_FL_DROP",
   6647		"IDMA_FL_DROP_SEND_INC",
   6648		"IDMA_FL_H_REQ_HEADER_FL",
   6649		"IDMA_FL_H_SEND_PCIEHDR",
   6650		"IDMA_FL_H_PUSH_CPL_FIFO",
   6651		"IDMA_FL_H_SEND_CPL",
   6652		"IDMA_FL_H_SEND_IP_HDR_FIRST",
   6653		"IDMA_FL_H_SEND_IP_HDR",
   6654		"IDMA_FL_H_REQ_NEXT_HEADER_FL",
   6655		"IDMA_FL_H_SEND_NEXT_PCIEHDR",
   6656		"IDMA_FL_H_SEND_IP_HDR_PADDING",
   6657		"IDMA_FL_D_SEND_PCIEHDR",
   6658		"IDMA_FL_D_SEND_CPL_AND_IP_HDR",
   6659		"IDMA_FL_D_REQ_NEXT_DATA_FL",
   6660		"IDMA_FL_SEND_PCIEHDR",
   6661		"IDMA_FL_PUSH_CPL_FIFO",
   6662		"IDMA_FL_SEND_CPL",
   6663		"IDMA_FL_SEND_PAYLOAD_FIRST",
   6664		"IDMA_FL_SEND_PAYLOAD",
   6665		"IDMA_FL_REQ_NEXT_DATA_FL",
   6666		"IDMA_FL_SEND_NEXT_PCIEHDR",
   6667		"IDMA_FL_SEND_PADDING",
   6668		"IDMA_FL_SEND_COMPLETION_TO_IMSG",
   6669	};
   6670	static const char * const t6_decode[] = {
   6671		"IDMA_IDLE",
   6672		"IDMA_PUSH_MORE_CPL_FIFO",
   6673		"IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO",
   6674		"IDMA_SGEFLRFLUSH_SEND_PCIEHDR",
   6675		"IDMA_PHYSADDR_SEND_PCIEHDR",
   6676		"IDMA_PHYSADDR_SEND_PAYLOAD_FIRST",
   6677		"IDMA_PHYSADDR_SEND_PAYLOAD",
   6678		"IDMA_FL_REQ_DATA_FL",
   6679		"IDMA_FL_DROP",
   6680		"IDMA_FL_DROP_SEND_INC",
   6681		"IDMA_FL_H_REQ_HEADER_FL",
   6682		"IDMA_FL_H_SEND_PCIEHDR",
   6683		"IDMA_FL_H_PUSH_CPL_FIFO",
   6684		"IDMA_FL_H_SEND_CPL",
   6685		"IDMA_FL_H_SEND_IP_HDR_FIRST",
   6686		"IDMA_FL_H_SEND_IP_HDR",
   6687		"IDMA_FL_H_REQ_NEXT_HEADER_FL",
   6688		"IDMA_FL_H_SEND_NEXT_PCIEHDR",
   6689		"IDMA_FL_H_SEND_IP_HDR_PADDING",
   6690		"IDMA_FL_D_SEND_PCIEHDR",
   6691		"IDMA_FL_D_SEND_CPL_AND_IP_HDR",
   6692		"IDMA_FL_D_REQ_NEXT_DATA_FL",
   6693		"IDMA_FL_SEND_PCIEHDR",
   6694		"IDMA_FL_PUSH_CPL_FIFO",
   6695		"IDMA_FL_SEND_CPL",
   6696		"IDMA_FL_SEND_PAYLOAD_FIRST",
   6697		"IDMA_FL_SEND_PAYLOAD",
   6698		"IDMA_FL_REQ_NEXT_DATA_FL",
   6699		"IDMA_FL_SEND_NEXT_PCIEHDR",
   6700		"IDMA_FL_SEND_PADDING",
   6701		"IDMA_FL_SEND_COMPLETION_TO_IMSG",
   6702	};
   6703	static const u32 sge_regs[] = {
   6704		SGE_DEBUG_DATA_LOW_INDEX_2_A,
   6705		SGE_DEBUG_DATA_LOW_INDEX_3_A,
   6706		SGE_DEBUG_DATA_HIGH_INDEX_10_A,
   6707	};
   6708	const char **sge_idma_decode;
   6709	int sge_idma_decode_nstates;
   6710	int i;
   6711	unsigned int chip_version = CHELSIO_CHIP_VERSION(adapter->params.chip);
   6712
   6713	/* Select the right set of decode strings to dump depending on the
   6714	 * adapter chip type.
   6715	 */
   6716	switch (chip_version) {
   6717	case CHELSIO_T4:
   6718		sge_idma_decode = (const char **)t4_decode;
   6719		sge_idma_decode_nstates = ARRAY_SIZE(t4_decode);
   6720		break;
   6721
   6722	case CHELSIO_T5:
   6723		sge_idma_decode = (const char **)t5_decode;
   6724		sge_idma_decode_nstates = ARRAY_SIZE(t5_decode);
   6725		break;
   6726
   6727	case CHELSIO_T6:
   6728		sge_idma_decode = (const char **)t6_decode;
   6729		sge_idma_decode_nstates = ARRAY_SIZE(t6_decode);
   6730		break;
   6731
   6732	default:
   6733		dev_err(adapter->pdev_dev,
   6734			"Unsupported chip version %d\n", chip_version);
   6735		return;
   6736	}
   6737
   6738	if (is_t4(adapter->params.chip)) {
   6739		sge_idma_decode = (const char **)t4_decode;
   6740		sge_idma_decode_nstates = ARRAY_SIZE(t4_decode);
   6741	} else {
   6742		sge_idma_decode = (const char **)t5_decode;
   6743		sge_idma_decode_nstates = ARRAY_SIZE(t5_decode);
   6744	}
   6745
   6746	if (state < sge_idma_decode_nstates)
   6747		CH_WARN(adapter, "idma state %s\n", sge_idma_decode[state]);
   6748	else
   6749		CH_WARN(adapter, "idma state %d unknown\n", state);
   6750
   6751	for (i = 0; i < ARRAY_SIZE(sge_regs); i++)
   6752		CH_WARN(adapter, "SGE register %#x value %#x\n",
   6753			sge_regs[i], t4_read_reg(adapter, sge_regs[i]));
   6754}
   6755
   6756/**
   6757 *      t4_sge_ctxt_flush - flush the SGE context cache
   6758 *      @adap: the adapter
   6759 *      @mbox: mailbox to use for the FW command
   6760 *      @ctxt_type: Egress or Ingress
   6761 *
   6762 *      Issues a FW command through the given mailbox to flush the
   6763 *      SGE context cache.
   6764 */
   6765int t4_sge_ctxt_flush(struct adapter *adap, unsigned int mbox, int ctxt_type)
   6766{
   6767	int ret;
   6768	u32 ldst_addrspace;
   6769	struct fw_ldst_cmd c;
   6770
   6771	memset(&c, 0, sizeof(c));
   6772	ldst_addrspace = FW_LDST_CMD_ADDRSPACE_V(ctxt_type == CTXT_EGRESS ?
   6773						 FW_LDST_ADDRSPC_SGE_EGRC :
   6774						 FW_LDST_ADDRSPC_SGE_INGC);
   6775	c.op_to_addrspace = cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) |
   6776					FW_CMD_REQUEST_F | FW_CMD_READ_F |
   6777					ldst_addrspace);
   6778	c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
   6779	c.u.idctxt.msg_ctxtflush = cpu_to_be32(FW_LDST_CMD_CTXTFLUSH_F);
   6780
   6781	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
   6782	return ret;
   6783}
   6784
   6785/**
   6786 *	t4_read_sge_dbqtimers - read SGE Doorbell Queue Timer values
   6787 *	@adap: the adapter
   6788 *	@ndbqtimers: size of the provided SGE Doorbell Queue Timer table
   6789 *	@dbqtimers: SGE Doorbell Queue Timer table
   6790 *
   6791 *	Reads the SGE Doorbell Queue Timer values into the provided table.
   6792 *	Returns 0 on success (Firmware and Hardware support this feature),
   6793 *	an error on failure.
   6794 */
   6795int t4_read_sge_dbqtimers(struct adapter *adap, unsigned int ndbqtimers,
   6796			  u16 *dbqtimers)
   6797{
   6798	int ret, dbqtimerix;
   6799
   6800	ret = 0;
   6801	dbqtimerix = 0;
   6802	while (dbqtimerix < ndbqtimers) {
   6803		int nparams, param;
   6804		u32 params[7], vals[7];
   6805
   6806		nparams = ndbqtimers - dbqtimerix;
   6807		if (nparams > ARRAY_SIZE(params))
   6808			nparams = ARRAY_SIZE(params);
   6809
   6810		for (param = 0; param < nparams; param++)
   6811			params[param] =
   6812			  (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
   6813			   FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_DBQ_TIMER) |
   6814			   FW_PARAMS_PARAM_Y_V(dbqtimerix + param));
   6815		ret = t4_query_params(adap, adap->mbox, adap->pf, 0,
   6816				      nparams, params, vals);
   6817		if (ret)
   6818			break;
   6819
   6820		for (param = 0; param < nparams; param++)
   6821			dbqtimers[dbqtimerix++] = vals[param];
   6822	}
   6823	return ret;
   6824}
   6825
   6826/**
   6827 *      t4_fw_hello - establish communication with FW
   6828 *      @adap: the adapter
   6829 *      @mbox: mailbox to use for the FW command
   6830 *      @evt_mbox: mailbox to receive async FW events
   6831 *      @master: specifies the caller's willingness to be the device master
   6832 *	@state: returns the current device state (if non-NULL)
   6833 *
   6834 *	Issues a command to establish communication with FW.  Returns either
   6835 *	an error (negative integer) or the mailbox of the Master PF.
   6836 */
   6837int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox,
   6838		enum dev_master master, enum dev_state *state)
   6839{
   6840	int ret;
   6841	struct fw_hello_cmd c;
   6842	u32 v;
   6843	unsigned int master_mbox;
   6844	int retries = FW_CMD_HELLO_RETRIES;
   6845
   6846retry:
   6847	memset(&c, 0, sizeof(c));
   6848	INIT_CMD(c, HELLO, WRITE);
   6849	c.err_to_clearinit = cpu_to_be32(
   6850		FW_HELLO_CMD_MASTERDIS_V(master == MASTER_CANT) |
   6851		FW_HELLO_CMD_MASTERFORCE_V(master == MASTER_MUST) |
   6852		FW_HELLO_CMD_MBMASTER_V(master == MASTER_MUST ?
   6853					mbox : FW_HELLO_CMD_MBMASTER_M) |
   6854		FW_HELLO_CMD_MBASYNCNOT_V(evt_mbox) |
   6855		FW_HELLO_CMD_STAGE_V(fw_hello_cmd_stage_os) |
   6856		FW_HELLO_CMD_CLEARINIT_F);
   6857
   6858	/*
   6859	 * Issue the HELLO command to the firmware.  If it's not successful
   6860	 * but indicates that we got a "busy" or "timeout" condition, retry
   6861	 * the HELLO until we exhaust our retry limit.  If we do exceed our
   6862	 * retry limit, check to see if the firmware left us any error
   6863	 * information and report that if so.
   6864	 */
   6865	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
   6866	if (ret < 0) {
   6867		if ((ret == -EBUSY || ret == -ETIMEDOUT) && retries-- > 0)
   6868			goto retry;
   6869		if (t4_read_reg(adap, PCIE_FW_A) & PCIE_FW_ERR_F)
   6870			t4_report_fw_error(adap);
   6871		return ret;
   6872	}
   6873
   6874	v = be32_to_cpu(c.err_to_clearinit);
   6875	master_mbox = FW_HELLO_CMD_MBMASTER_G(v);
   6876	if (state) {
   6877		if (v & FW_HELLO_CMD_ERR_F)
   6878			*state = DEV_STATE_ERR;
   6879		else if (v & FW_HELLO_CMD_INIT_F)
   6880			*state = DEV_STATE_INIT;
   6881		else
   6882			*state = DEV_STATE_UNINIT;
   6883	}
   6884
   6885	/*
   6886	 * If we're not the Master PF then we need to wait around for the
   6887	 * Master PF Driver to finish setting up the adapter.
   6888	 *
   6889	 * Note that we also do this wait if we're a non-Master-capable PF and
   6890	 * there is no current Master PF; a Master PF may show up momentarily
   6891	 * and we wouldn't want to fail pointlessly.  (This can happen when an
   6892	 * OS loads lots of different drivers rapidly at the same time).  In
   6893	 * this case, the Master PF returned by the firmware will be
   6894	 * PCIE_FW_MASTER_M so the test below will work ...
   6895	 */
   6896	if ((v & (FW_HELLO_CMD_ERR_F|FW_HELLO_CMD_INIT_F)) == 0 &&
   6897	    master_mbox != mbox) {
   6898		int waiting = FW_CMD_HELLO_TIMEOUT;
   6899
   6900		/*
   6901		 * Wait for the firmware to either indicate an error or
   6902		 * initialized state.  If we see either of these we bail out
   6903		 * and report the issue to the caller.  If we exhaust the
   6904		 * "hello timeout" and we haven't exhausted our retries, try
   6905		 * again.  Otherwise bail with a timeout error.
   6906		 */
   6907		for (;;) {
   6908			u32 pcie_fw;
   6909
   6910			msleep(50);
   6911			waiting -= 50;
   6912
   6913			/*
   6914			 * If neither Error nor Initialized are indicated
   6915			 * by the firmware keep waiting till we exhaust our
   6916			 * timeout ... and then retry if we haven't exhausted
   6917			 * our retries ...
   6918			 */
   6919			pcie_fw = t4_read_reg(adap, PCIE_FW_A);
   6920			if (!(pcie_fw & (PCIE_FW_ERR_F|PCIE_FW_INIT_F))) {
   6921				if (waiting <= 0) {
   6922					if (retries-- > 0)
   6923						goto retry;
   6924
   6925					return -ETIMEDOUT;
   6926				}
   6927				continue;
   6928			}
   6929
   6930			/*
   6931			 * We either have an Error or Initialized condition
   6932			 * report errors preferentially.
   6933			 */
   6934			if (state) {
   6935				if (pcie_fw & PCIE_FW_ERR_F)
   6936					*state = DEV_STATE_ERR;
   6937				else if (pcie_fw & PCIE_FW_INIT_F)
   6938					*state = DEV_STATE_INIT;
   6939			}
   6940
   6941			/*
   6942			 * If we arrived before a Master PF was selected and
   6943			 * there's not a valid Master PF, grab its identity
   6944			 * for our caller.
   6945			 */
   6946			if (master_mbox == PCIE_FW_MASTER_M &&
   6947			    (pcie_fw & PCIE_FW_MASTER_VLD_F))
   6948				master_mbox = PCIE_FW_MASTER_G(pcie_fw);
   6949			break;
   6950		}
   6951	}
   6952
   6953	return master_mbox;
   6954}
   6955
   6956/**
   6957 *	t4_fw_bye - end communication with FW
   6958 *	@adap: the adapter
   6959 *	@mbox: mailbox to use for the FW command
   6960 *
   6961 *	Issues a command to terminate communication with FW.
   6962 */
   6963int t4_fw_bye(struct adapter *adap, unsigned int mbox)
   6964{
   6965	struct fw_bye_cmd c;
   6966
   6967	memset(&c, 0, sizeof(c));
   6968	INIT_CMD(c, BYE, WRITE);
   6969	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
   6970}
   6971
   6972/**
   6973 *	t4_early_init - ask FW to initialize the device
   6974 *	@adap: the adapter
   6975 *	@mbox: mailbox to use for the FW command
   6976 *
   6977 *	Issues a command to FW to partially initialize the device.  This
   6978 *	performs initialization that generally doesn't depend on user input.
   6979 */
   6980int t4_early_init(struct adapter *adap, unsigned int mbox)
   6981{
   6982	struct fw_initialize_cmd c;
   6983
   6984	memset(&c, 0, sizeof(c));
   6985	INIT_CMD(c, INITIALIZE, WRITE);
   6986	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
   6987}
   6988
   6989/**
   6990 *	t4_fw_reset - issue a reset to FW
   6991 *	@adap: the adapter
   6992 *	@mbox: mailbox to use for the FW command
   6993 *	@reset: specifies the type of reset to perform
   6994 *
   6995 *	Issues a reset command of the specified type to FW.
   6996 */
   6997int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset)
   6998{
   6999	struct fw_reset_cmd c;
   7000
   7001	memset(&c, 0, sizeof(c));
   7002	INIT_CMD(c, RESET, WRITE);
   7003	c.val = cpu_to_be32(reset);
   7004	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
   7005}
   7006
   7007/**
   7008 *	t4_fw_halt - issue a reset/halt to FW and put uP into RESET
   7009 *	@adap: the adapter
   7010 *	@mbox: mailbox to use for the FW RESET command (if desired)
   7011 *	@force: force uP into RESET even if FW RESET command fails
   7012 *
   7013 *	Issues a RESET command to firmware (if desired) with a HALT indication
   7014 *	and then puts the microprocessor into RESET state.  The RESET command
   7015 *	will only be issued if a legitimate mailbox is provided (mbox <=
   7016 *	PCIE_FW_MASTER_M).
   7017 *
   7018 *	This is generally used in order for the host to safely manipulate the
   7019 *	adapter without fear of conflicting with whatever the firmware might
   7020 *	be doing.  The only way out of this state is to RESTART the firmware
   7021 *	...
   7022 */
   7023static int t4_fw_halt(struct adapter *adap, unsigned int mbox, int force)
   7024{
   7025	int ret = 0;
   7026
   7027	/*
   7028	 * If a legitimate mailbox is provided, issue a RESET command
   7029	 * with a HALT indication.
   7030	 */
   7031	if (mbox <= PCIE_FW_MASTER_M) {
   7032		struct fw_reset_cmd c;
   7033
   7034		memset(&c, 0, sizeof(c));
   7035		INIT_CMD(c, RESET, WRITE);
   7036		c.val = cpu_to_be32(PIORST_F | PIORSTMODE_F);
   7037		c.halt_pkd = cpu_to_be32(FW_RESET_CMD_HALT_F);
   7038		ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
   7039	}
   7040
   7041	/*
   7042	 * Normally we won't complete the operation if the firmware RESET
   7043	 * command fails but if our caller insists we'll go ahead and put the
   7044	 * uP into RESET.  This can be useful if the firmware is hung or even
   7045	 * missing ...  We'll have to take the risk of putting the uP into
   7046	 * RESET without the cooperation of firmware in that case.
   7047	 *
   7048	 * We also force the firmware's HALT flag to be on in case we bypassed
   7049	 * the firmware RESET command above or we're dealing with old firmware
   7050	 * which doesn't have the HALT capability.  This will serve as a flag
   7051	 * for the incoming firmware to know that it's coming out of a HALT
   7052	 * rather than a RESET ... if it's new enough to understand that ...
   7053	 */
   7054	if (ret == 0 || force) {
   7055		t4_set_reg_field(adap, CIM_BOOT_CFG_A, UPCRST_F, UPCRST_F);
   7056		t4_set_reg_field(adap, PCIE_FW_A, PCIE_FW_HALT_F,
   7057				 PCIE_FW_HALT_F);
   7058	}
   7059
   7060	/*
   7061	 * And we always return the result of the firmware RESET command
   7062	 * even when we force the uP into RESET ...
   7063	 */
   7064	return ret;
   7065}
   7066
   7067/**
   7068 *	t4_fw_restart - restart the firmware by taking the uP out of RESET
   7069 *	@adap: the adapter
   7070 *	@mbox: mailbox to use for the FW command
   7071 *	@reset: if we want to do a RESET to restart things
   7072 *
   7073 *	Restart firmware previously halted by t4_fw_halt().  On successful
   7074 *	return the previous PF Master remains as the new PF Master and there
   7075 *	is no need to issue a new HELLO command, etc.
   7076 *
   7077 *	We do this in two ways:
   7078 *
   7079 *	 1. If we're dealing with newer firmware we'll simply want to take
   7080 *	    the chip's microprocessor out of RESET.  This will cause the
   7081 *	    firmware to start up from its start vector.  And then we'll loop
   7082 *	    until the firmware indicates it's started again (PCIE_FW.HALT
   7083 *	    reset to 0) or we timeout.
   7084 *
   7085 *	 2. If we're dealing with older firmware then we'll need to RESET
   7086 *	    the chip since older firmware won't recognize the PCIE_FW.HALT
   7087 *	    flag and automatically RESET itself on startup.
   7088 */
   7089static int t4_fw_restart(struct adapter *adap, unsigned int mbox, int reset)
   7090{
   7091	if (reset) {
   7092		/*
   7093		 * Since we're directing the RESET instead of the firmware
   7094		 * doing it automatically, we need to clear the PCIE_FW.HALT
   7095		 * bit.
   7096		 */
   7097		t4_set_reg_field(adap, PCIE_FW_A, PCIE_FW_HALT_F, 0);
   7098
   7099		/*
   7100		 * If we've been given a valid mailbox, first try to get the
   7101		 * firmware to do the RESET.  If that works, great and we can
   7102		 * return success.  Otherwise, if we haven't been given a
   7103		 * valid mailbox or the RESET command failed, fall back to
   7104		 * hitting the chip with a hammer.
   7105		 */
   7106		if (mbox <= PCIE_FW_MASTER_M) {
   7107			t4_set_reg_field(adap, CIM_BOOT_CFG_A, UPCRST_F, 0);
   7108			msleep(100);
   7109			if (t4_fw_reset(adap, mbox,
   7110					PIORST_F | PIORSTMODE_F) == 0)
   7111				return 0;
   7112		}
   7113
   7114		t4_write_reg(adap, PL_RST_A, PIORST_F | PIORSTMODE_F);
   7115		msleep(2000);
   7116	} else {
   7117		int ms;
   7118
   7119		t4_set_reg_field(adap, CIM_BOOT_CFG_A, UPCRST_F, 0);
   7120		for (ms = 0; ms < FW_CMD_MAX_TIMEOUT; ) {
   7121			if (!(t4_read_reg(adap, PCIE_FW_A) & PCIE_FW_HALT_F))
   7122				return 0;
   7123			msleep(100);
   7124			ms += 100;
   7125		}
   7126		return -ETIMEDOUT;
   7127	}
   7128	return 0;
   7129}
   7130
   7131/**
   7132 *	t4_fw_upgrade - perform all of the steps necessary to upgrade FW
   7133 *	@adap: the adapter
   7134 *	@mbox: mailbox to use for the FW RESET command (if desired)
   7135 *	@fw_data: the firmware image to write
   7136 *	@size: image size
   7137 *	@force: force upgrade even if firmware doesn't cooperate
   7138 *
   7139 *	Perform all of the steps necessary for upgrading an adapter's
   7140 *	firmware image.  Normally this requires the cooperation of the
   7141 *	existing firmware in order to halt all existing activities
   7142 *	but if an invalid mailbox token is passed in we skip that step
   7143 *	(though we'll still put the adapter microprocessor into RESET in
   7144 *	that case).
   7145 *
   7146 *	On successful return the new firmware will have been loaded and
   7147 *	the adapter will have been fully RESET losing all previous setup
   7148 *	state.  On unsuccessful return the adapter may be completely hosed ...
   7149 *	positive errno indicates that the adapter is ~probably~ intact, a
   7150 *	negative errno indicates that things are looking bad ...
   7151 */
   7152int t4_fw_upgrade(struct adapter *adap, unsigned int mbox,
   7153		  const u8 *fw_data, unsigned int size, int force)
   7154{
   7155	const struct fw_hdr *fw_hdr = (const struct fw_hdr *)fw_data;
   7156	int reset, ret;
   7157
   7158	if (!t4_fw_matches_chip(adap, fw_hdr))
   7159		return -EINVAL;
   7160
   7161	/* Disable CXGB4_FW_OK flag so that mbox commands with CXGB4_FW_OK flag
   7162	 * set wont be sent when we are flashing FW.
   7163	 */
   7164	adap->flags &= ~CXGB4_FW_OK;
   7165
   7166	ret = t4_fw_halt(adap, mbox, force);
   7167	if (ret < 0 && !force)
   7168		goto out;
   7169
   7170	ret = t4_load_fw(adap, fw_data, size);
   7171	if (ret < 0)
   7172		goto out;
   7173
   7174	/*
   7175	 * If there was a Firmware Configuration File stored in FLASH,
   7176	 * there's a good chance that it won't be compatible with the new
   7177	 * Firmware.  In order to prevent difficult to diagnose adapter
   7178	 * initialization issues, we clear out the Firmware Configuration File
   7179	 * portion of the FLASH .  The user will need to re-FLASH a new
   7180	 * Firmware Configuration File which is compatible with the new
   7181	 * Firmware if that's desired.
   7182	 */
   7183	(void)t4_load_cfg(adap, NULL, 0);
   7184
   7185	/*
   7186	 * Older versions of the firmware don't understand the new
   7187	 * PCIE_FW.HALT flag and so won't know to perform a RESET when they
   7188	 * restart.  So for newly loaded older firmware we'll have to do the
   7189	 * RESET for it so it starts up on a clean slate.  We can tell if
   7190	 * the newly loaded firmware will handle this right by checking
   7191	 * its header flags to see if it advertises the capability.
   7192	 */
   7193	reset = ((be32_to_cpu(fw_hdr->flags) & FW_HDR_FLAGS_RESET_HALT) == 0);
   7194	ret = t4_fw_restart(adap, mbox, reset);
   7195
   7196	/* Grab potentially new Firmware Device Log parameters so we can see
   7197	 * how healthy the new Firmware is.  It's okay to contact the new
   7198	 * Firmware for these parameters even though, as far as it's
   7199	 * concerned, we've never said "HELLO" to it ...
   7200	 */
   7201	(void)t4_init_devlog_params(adap);
   7202out:
   7203	adap->flags |= CXGB4_FW_OK;
   7204	return ret;
   7205}
   7206
   7207/**
   7208 *	t4_fl_pkt_align - return the fl packet alignment
   7209 *	@adap: the adapter
   7210 *
   7211 *	T4 has a single field to specify the packing and padding boundary.
   7212 *	T5 onwards has separate fields for this and hence the alignment for
   7213 *	next packet offset is maximum of these two.
   7214 *
   7215 */
   7216int t4_fl_pkt_align(struct adapter *adap)
   7217{
   7218	u32 sge_control, sge_control2;
   7219	unsigned int ingpadboundary, ingpackboundary, fl_align, ingpad_shift;
   7220
   7221	sge_control = t4_read_reg(adap, SGE_CONTROL_A);
   7222
   7223	/* T4 uses a single control field to specify both the PCIe Padding and
   7224	 * Packing Boundary.  T5 introduced the ability to specify these
   7225	 * separately.  The actual Ingress Packet Data alignment boundary
   7226	 * within Packed Buffer Mode is the maximum of these two
   7227	 * specifications.  (Note that it makes no real practical sense to
   7228	 * have the Padding Boundary be larger than the Packing Boundary but you
   7229	 * could set the chip up that way and, in fact, legacy T4 code would
   7230	 * end doing this because it would initialize the Padding Boundary and
   7231	 * leave the Packing Boundary initialized to 0 (16 bytes).)
   7232	 * Padding Boundary values in T6 starts from 8B,
   7233	 * where as it is 32B for T4 and T5.
   7234	 */
   7235	if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
   7236		ingpad_shift = INGPADBOUNDARY_SHIFT_X;
   7237	else
   7238		ingpad_shift = T6_INGPADBOUNDARY_SHIFT_X;
   7239
   7240	ingpadboundary = 1 << (INGPADBOUNDARY_G(sge_control) + ingpad_shift);
   7241
   7242	fl_align = ingpadboundary;
   7243	if (!is_t4(adap->params.chip)) {
   7244		/* T5 has a weird interpretation of one of the PCIe Packing
   7245		 * Boundary values.  No idea why ...
   7246		 */
   7247		sge_control2 = t4_read_reg(adap, SGE_CONTROL2_A);
   7248		ingpackboundary = INGPACKBOUNDARY_G(sge_control2);
   7249		if (ingpackboundary == INGPACKBOUNDARY_16B_X)
   7250			ingpackboundary = 16;
   7251		else
   7252			ingpackboundary = 1 << (ingpackboundary +
   7253						INGPACKBOUNDARY_SHIFT_X);
   7254
   7255		fl_align = max(ingpadboundary, ingpackboundary);
   7256	}
   7257	return fl_align;
   7258}
   7259
   7260/**
   7261 *	t4_fixup_host_params - fix up host-dependent parameters
   7262 *	@adap: the adapter
   7263 *	@page_size: the host's Base Page Size
   7264 *	@cache_line_size: the host's Cache Line Size
   7265 *
   7266 *	Various registers in T4 contain values which are dependent on the
   7267 *	host's Base Page and Cache Line Sizes.  This function will fix all of
   7268 *	those registers with the appropriate values as passed in ...
   7269 */
   7270int t4_fixup_host_params(struct adapter *adap, unsigned int page_size,
   7271			 unsigned int cache_line_size)
   7272{
   7273	unsigned int page_shift = fls(page_size) - 1;
   7274	unsigned int sge_hps = page_shift - 10;
   7275	unsigned int stat_len = cache_line_size > 64 ? 128 : 64;
   7276	unsigned int fl_align = cache_line_size < 32 ? 32 : cache_line_size;
   7277	unsigned int fl_align_log = fls(fl_align) - 1;
   7278
   7279	t4_write_reg(adap, SGE_HOST_PAGE_SIZE_A,
   7280		     HOSTPAGESIZEPF0_V(sge_hps) |
   7281		     HOSTPAGESIZEPF1_V(sge_hps) |
   7282		     HOSTPAGESIZEPF2_V(sge_hps) |
   7283		     HOSTPAGESIZEPF3_V(sge_hps) |
   7284		     HOSTPAGESIZEPF4_V(sge_hps) |
   7285		     HOSTPAGESIZEPF5_V(sge_hps) |
   7286		     HOSTPAGESIZEPF6_V(sge_hps) |
   7287		     HOSTPAGESIZEPF7_V(sge_hps));
   7288
   7289	if (is_t4(adap->params.chip)) {
   7290		t4_set_reg_field(adap, SGE_CONTROL_A,
   7291				 INGPADBOUNDARY_V(INGPADBOUNDARY_M) |
   7292				 EGRSTATUSPAGESIZE_F,
   7293				 INGPADBOUNDARY_V(fl_align_log -
   7294						  INGPADBOUNDARY_SHIFT_X) |
   7295				 EGRSTATUSPAGESIZE_V(stat_len != 64));
   7296	} else {
   7297		unsigned int pack_align;
   7298		unsigned int ingpad, ingpack;
   7299
   7300		/* T5 introduced the separation of the Free List Padding and
   7301		 * Packing Boundaries.  Thus, we can select a smaller Padding
   7302		 * Boundary to avoid uselessly chewing up PCIe Link and Memory
   7303		 * Bandwidth, and use a Packing Boundary which is large enough
   7304		 * to avoid false sharing between CPUs, etc.
   7305		 *
   7306		 * For the PCI Link, the smaller the Padding Boundary the
   7307		 * better.  For the Memory Controller, a smaller Padding
   7308		 * Boundary is better until we cross under the Memory Line
   7309		 * Size (the minimum unit of transfer to/from Memory).  If we
   7310		 * have a Padding Boundary which is smaller than the Memory
   7311		 * Line Size, that'll involve a Read-Modify-Write cycle on the
   7312		 * Memory Controller which is never good.
   7313		 */
   7314
   7315		/* We want the Packing Boundary to be based on the Cache Line
   7316		 * Size in order to help avoid False Sharing performance
   7317		 * issues between CPUs, etc.  We also want the Packing
   7318		 * Boundary to incorporate the PCI-E Maximum Payload Size.  We
   7319		 * get best performance when the Packing Boundary is a
   7320		 * multiple of the Maximum Payload Size.
   7321		 */
   7322		pack_align = fl_align;
   7323		if (pci_is_pcie(adap->pdev)) {
   7324			unsigned int mps, mps_log;
   7325			u16 devctl;
   7326
   7327			/* The PCIe Device Control Maximum Payload Size field
   7328			 * [bits 7:5] encodes sizes as powers of 2 starting at
   7329			 * 128 bytes.
   7330			 */
   7331			pcie_capability_read_word(adap->pdev, PCI_EXP_DEVCTL,
   7332						  &devctl);
   7333			mps_log = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5) + 7;
   7334			mps = 1 << mps_log;
   7335			if (mps > pack_align)
   7336				pack_align = mps;
   7337		}
   7338
   7339		/* N.B. T5/T6 have a crazy special interpretation of the "0"
   7340		 * value for the Packing Boundary.  This corresponds to 16
   7341		 * bytes instead of the expected 32 bytes.  So if we want 32
   7342		 * bytes, the best we can really do is 64 bytes ...
   7343		 */
   7344		if (pack_align <= 16) {
   7345			ingpack = INGPACKBOUNDARY_16B_X;
   7346			fl_align = 16;
   7347		} else if (pack_align == 32) {
   7348			ingpack = INGPACKBOUNDARY_64B_X;
   7349			fl_align = 64;
   7350		} else {
   7351			unsigned int pack_align_log = fls(pack_align) - 1;
   7352
   7353			ingpack = pack_align_log - INGPACKBOUNDARY_SHIFT_X;
   7354			fl_align = pack_align;
   7355		}
   7356
   7357		/* Use the smallest Ingress Padding which isn't smaller than
   7358		 * the Memory Controller Read/Write Size.  We'll take that as
   7359		 * being 8 bytes since we don't know of any system with a
   7360		 * wider Memory Controller Bus Width.
   7361		 */
   7362		if (is_t5(adap->params.chip))
   7363			ingpad = INGPADBOUNDARY_32B_X;
   7364		else
   7365			ingpad = T6_INGPADBOUNDARY_8B_X;
   7366
   7367		t4_set_reg_field(adap, SGE_CONTROL_A,
   7368				 INGPADBOUNDARY_V(INGPADBOUNDARY_M) |
   7369				 EGRSTATUSPAGESIZE_F,
   7370				 INGPADBOUNDARY_V(ingpad) |
   7371				 EGRSTATUSPAGESIZE_V(stat_len != 64));
   7372		t4_set_reg_field(adap, SGE_CONTROL2_A,
   7373				 INGPACKBOUNDARY_V(INGPACKBOUNDARY_M),
   7374				 INGPACKBOUNDARY_V(ingpack));
   7375	}
   7376	/*
   7377	 * Adjust various SGE Free List Host Buffer Sizes.
   7378	 *
   7379	 * This is something of a crock since we're using fixed indices into
   7380	 * the array which are also known by the sge.c code and the T4
   7381	 * Firmware Configuration File.  We need to come up with a much better
   7382	 * approach to managing this array.  For now, the first four entries
   7383	 * are:
   7384	 *
   7385	 *   0: Host Page Size
   7386	 *   1: 64KB
   7387	 *   2: Buffer size corresponding to 1500 byte MTU (unpacked mode)
   7388	 *   3: Buffer size corresponding to 9000 byte MTU (unpacked mode)
   7389	 *
   7390	 * For the single-MTU buffers in unpacked mode we need to include
   7391	 * space for the SGE Control Packet Shift, 14 byte Ethernet header,
   7392	 * possible 4 byte VLAN tag, all rounded up to the next Ingress Packet
   7393	 * Padding boundary.  All of these are accommodated in the Factory
   7394	 * Default Firmware Configuration File but we need to adjust it for
   7395	 * this host's cache line size.
   7396	 */
   7397	t4_write_reg(adap, SGE_FL_BUFFER_SIZE0_A, page_size);
   7398	t4_write_reg(adap, SGE_FL_BUFFER_SIZE2_A,
   7399		     (t4_read_reg(adap, SGE_FL_BUFFER_SIZE2_A) + fl_align-1)
   7400		     & ~(fl_align-1));
   7401	t4_write_reg(adap, SGE_FL_BUFFER_SIZE3_A,
   7402		     (t4_read_reg(adap, SGE_FL_BUFFER_SIZE3_A) + fl_align-1)
   7403		     & ~(fl_align-1));
   7404
   7405	t4_write_reg(adap, ULP_RX_TDDP_PSZ_A, HPZ0_V(page_shift - 12));
   7406
   7407	return 0;
   7408}
   7409
   7410/**
   7411 *	t4_fw_initialize - ask FW to initialize the device
   7412 *	@adap: the adapter
   7413 *	@mbox: mailbox to use for the FW command
   7414 *
   7415 *	Issues a command to FW to partially initialize the device.  This
   7416 *	performs initialization that generally doesn't depend on user input.
   7417 */
   7418int t4_fw_initialize(struct adapter *adap, unsigned int mbox)
   7419{
   7420	struct fw_initialize_cmd c;
   7421
   7422	memset(&c, 0, sizeof(c));
   7423	INIT_CMD(c, INITIALIZE, WRITE);
   7424	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
   7425}
   7426
   7427/**
   7428 *	t4_query_params_rw - query FW or device parameters
   7429 *	@adap: the adapter
   7430 *	@mbox: mailbox to use for the FW command
   7431 *	@pf: the PF
   7432 *	@vf: the VF
   7433 *	@nparams: the number of parameters
   7434 *	@params: the parameter names
   7435 *	@val: the parameter values
   7436 *	@rw: Write and read flag
   7437 *	@sleep_ok: if true, we may sleep awaiting mbox cmd completion
   7438 *
   7439 *	Reads the value of FW or device parameters.  Up to 7 parameters can be
   7440 *	queried at once.
   7441 */
   7442int t4_query_params_rw(struct adapter *adap, unsigned int mbox, unsigned int pf,
   7443		       unsigned int vf, unsigned int nparams, const u32 *params,
   7444		       u32 *val, int rw, bool sleep_ok)
   7445{
   7446	int i, ret;
   7447	struct fw_params_cmd c;
   7448	__be32 *p = &c.param[0].mnem;
   7449
   7450	if (nparams > 7)
   7451		return -EINVAL;
   7452
   7453	memset(&c, 0, sizeof(c));
   7454	c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_PARAMS_CMD) |
   7455				  FW_CMD_REQUEST_F | FW_CMD_READ_F |
   7456				  FW_PARAMS_CMD_PFN_V(pf) |
   7457				  FW_PARAMS_CMD_VFN_V(vf));
   7458	c.retval_len16 = cpu_to_be32(FW_LEN16(c));
   7459
   7460	for (i = 0; i < nparams; i++) {
   7461		*p++ = cpu_to_be32(*params++);
   7462		if (rw)
   7463			*p = cpu_to_be32(*(val + i));
   7464		p++;
   7465	}
   7466
   7467	ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok);
   7468	if (ret == 0)
   7469		for (i = 0, p = &c.param[0].val; i < nparams; i++, p += 2)
   7470			*val++ = be32_to_cpu(*p);
   7471	return ret;
   7472}
   7473
   7474int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
   7475		    unsigned int vf, unsigned int nparams, const u32 *params,
   7476		    u32 *val)
   7477{
   7478	return t4_query_params_rw(adap, mbox, pf, vf, nparams, params, val, 0,
   7479				  true);
   7480}
   7481
   7482int t4_query_params_ns(struct adapter *adap, unsigned int mbox, unsigned int pf,
   7483		       unsigned int vf, unsigned int nparams, const u32 *params,
   7484		       u32 *val)
   7485{
   7486	return t4_query_params_rw(adap, mbox, pf, vf, nparams, params, val, 0,
   7487				  false);
   7488}
   7489
   7490/**
   7491 *      t4_set_params_timeout - sets FW or device parameters
   7492 *      @adap: the adapter
   7493 *      @mbox: mailbox to use for the FW command
   7494 *      @pf: the PF
   7495 *      @vf: the VF
   7496 *      @nparams: the number of parameters
   7497 *      @params: the parameter names
   7498 *      @val: the parameter values
   7499 *      @timeout: the timeout time
   7500 *
   7501 *      Sets the value of FW or device parameters.  Up to 7 parameters can be
   7502 *      specified at once.
   7503 */
   7504int t4_set_params_timeout(struct adapter *adap, unsigned int mbox,
   7505			  unsigned int pf, unsigned int vf,
   7506			  unsigned int nparams, const u32 *params,
   7507			  const u32 *val, int timeout)
   7508{
   7509	struct fw_params_cmd c;
   7510	__be32 *p = &c.param[0].mnem;
   7511
   7512	if (nparams > 7)
   7513		return -EINVAL;
   7514
   7515	memset(&c, 0, sizeof(c));
   7516	c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_PARAMS_CMD) |
   7517				  FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
   7518				  FW_PARAMS_CMD_PFN_V(pf) |
   7519				  FW_PARAMS_CMD_VFN_V(vf));
   7520	c.retval_len16 = cpu_to_be32(FW_LEN16(c));
   7521
   7522	while (nparams--) {
   7523		*p++ = cpu_to_be32(*params++);
   7524		*p++ = cpu_to_be32(*val++);
   7525	}
   7526
   7527	return t4_wr_mbox_timeout(adap, mbox, &c, sizeof(c), NULL, timeout);
   7528}
   7529
   7530/**
   7531 *	t4_set_params - sets FW or device parameters
   7532 *	@adap: the adapter
   7533 *	@mbox: mailbox to use for the FW command
   7534 *	@pf: the PF
   7535 *	@vf: the VF
   7536 *	@nparams: the number of parameters
   7537 *	@params: the parameter names
   7538 *	@val: the parameter values
   7539 *
   7540 *	Sets the value of FW or device parameters.  Up to 7 parameters can be
   7541 *	specified at once.
   7542 */
   7543int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
   7544		  unsigned int vf, unsigned int nparams, const u32 *params,
   7545		  const u32 *val)
   7546{
   7547	return t4_set_params_timeout(adap, mbox, pf, vf, nparams, params, val,
   7548				     FW_CMD_MAX_TIMEOUT);
   7549}
   7550
   7551/**
   7552 *	t4_cfg_pfvf - configure PF/VF resource limits
   7553 *	@adap: the adapter
   7554 *	@mbox: mailbox to use for the FW command
   7555 *	@pf: the PF being configured
   7556 *	@vf: the VF being configured
   7557 *	@txq: the max number of egress queues
   7558 *	@txq_eth_ctrl: the max number of egress Ethernet or control queues
   7559 *	@rxqi: the max number of interrupt-capable ingress queues
   7560 *	@rxq: the max number of interruptless ingress queues
   7561 *	@tc: the PCI traffic class
   7562 *	@vi: the max number of virtual interfaces
   7563 *	@cmask: the channel access rights mask for the PF/VF
   7564 *	@pmask: the port access rights mask for the PF/VF
   7565 *	@nexact: the maximum number of exact MPS filters
   7566 *	@rcaps: read capabilities
   7567 *	@wxcaps: write/execute capabilities
   7568 *
   7569 *	Configures resource limits and capabilities for a physical or virtual
   7570 *	function.
   7571 */
   7572int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf,
   7573		unsigned int vf, unsigned int txq, unsigned int txq_eth_ctrl,
   7574		unsigned int rxqi, unsigned int rxq, unsigned int tc,
   7575		unsigned int vi, unsigned int cmask, unsigned int pmask,
   7576		unsigned int nexact, unsigned int rcaps, unsigned int wxcaps)
   7577{
   7578	struct fw_pfvf_cmd c;
   7579
   7580	memset(&c, 0, sizeof(c));
   7581	c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_PFVF_CMD) | FW_CMD_REQUEST_F |
   7582				  FW_CMD_WRITE_F | FW_PFVF_CMD_PFN_V(pf) |
   7583				  FW_PFVF_CMD_VFN_V(vf));
   7584	c.retval_len16 = cpu_to_be32(FW_LEN16(c));
   7585	c.niqflint_niq = cpu_to_be32(FW_PFVF_CMD_NIQFLINT_V(rxqi) |
   7586				     FW_PFVF_CMD_NIQ_V(rxq));
   7587	c.type_to_neq = cpu_to_be32(FW_PFVF_CMD_CMASK_V(cmask) |
   7588				    FW_PFVF_CMD_PMASK_V(pmask) |
   7589				    FW_PFVF_CMD_NEQ_V(txq));
   7590	c.tc_to_nexactf = cpu_to_be32(FW_PFVF_CMD_TC_V(tc) |
   7591				      FW_PFVF_CMD_NVI_V(vi) |
   7592				      FW_PFVF_CMD_NEXACTF_V(nexact));
   7593	c.r_caps_to_nethctrl = cpu_to_be32(FW_PFVF_CMD_R_CAPS_V(rcaps) |
   7594					FW_PFVF_CMD_WX_CAPS_V(wxcaps) |
   7595					FW_PFVF_CMD_NETHCTRL_V(txq_eth_ctrl));
   7596	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
   7597}
   7598
   7599/**
   7600 *	t4_alloc_vi - allocate a virtual interface
   7601 *	@adap: the adapter
   7602 *	@mbox: mailbox to use for the FW command
   7603 *	@port: physical port associated with the VI
   7604 *	@pf: the PF owning the VI
   7605 *	@vf: the VF owning the VI
   7606 *	@nmac: number of MAC addresses needed (1 to 5)
   7607 *	@mac: the MAC addresses of the VI
   7608 *	@rss_size: size of RSS table slice associated with this VI
   7609 *	@vivld: the destination to store the VI Valid value.
   7610 *	@vin: the destination to store the VIN value.
   7611 *
   7612 *	Allocates a virtual interface for the given physical port.  If @mac is
   7613 *	not %NULL it contains the MAC addresses of the VI as assigned by FW.
   7614 *	@mac should be large enough to hold @nmac Ethernet addresses, they are
   7615 *	stored consecutively so the space needed is @nmac * 6 bytes.
   7616 *	Returns a negative error number or the non-negative VI id.
   7617 */
   7618int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port,
   7619		unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac,
   7620		unsigned int *rss_size, u8 *vivld, u8 *vin)
   7621{
   7622	int ret;
   7623	struct fw_vi_cmd c;
   7624
   7625	memset(&c, 0, sizeof(c));
   7626	c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_VI_CMD) | FW_CMD_REQUEST_F |
   7627				  FW_CMD_WRITE_F | FW_CMD_EXEC_F |
   7628				  FW_VI_CMD_PFN_V(pf) | FW_VI_CMD_VFN_V(vf));
   7629	c.alloc_to_len16 = cpu_to_be32(FW_VI_CMD_ALLOC_F | FW_LEN16(c));
   7630	c.portid_pkd = FW_VI_CMD_PORTID_V(port);
   7631	c.nmac = nmac - 1;
   7632
   7633	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
   7634	if (ret)
   7635		return ret;
   7636
   7637	if (mac) {
   7638		memcpy(mac, c.mac, sizeof(c.mac));
   7639		switch (nmac) {
   7640		case 5:
   7641			memcpy(mac + 24, c.nmac3, sizeof(c.nmac3));
   7642			fallthrough;
   7643		case 4:
   7644			memcpy(mac + 18, c.nmac2, sizeof(c.nmac2));
   7645			fallthrough;
   7646		case 3:
   7647			memcpy(mac + 12, c.nmac1, sizeof(c.nmac1));
   7648			fallthrough;
   7649		case 2:
   7650			memcpy(mac + 6,  c.nmac0, sizeof(c.nmac0));
   7651		}
   7652	}
   7653	if (rss_size)
   7654		*rss_size = FW_VI_CMD_RSSSIZE_G(be16_to_cpu(c.rsssize_pkd));
   7655
   7656	if (vivld)
   7657		*vivld = FW_VI_CMD_VFVLD_G(be32_to_cpu(c.alloc_to_len16));
   7658
   7659	if (vin)
   7660		*vin = FW_VI_CMD_VIN_G(be32_to_cpu(c.alloc_to_len16));
   7661
   7662	return FW_VI_CMD_VIID_G(be16_to_cpu(c.type_viid));
   7663}
   7664
   7665/**
   7666 *	t4_free_vi - free a virtual interface
   7667 *	@adap: the adapter
   7668 *	@mbox: mailbox to use for the FW command
   7669 *	@pf: the PF owning the VI
   7670 *	@vf: the VF owning the VI
   7671 *	@viid: virtual interface identifiler
   7672 *
   7673 *	Free a previously allocated virtual interface.
   7674 */
   7675int t4_free_vi(struct adapter *adap, unsigned int mbox, unsigned int pf,
   7676	       unsigned int vf, unsigned int viid)
   7677{
   7678	struct fw_vi_cmd c;
   7679
   7680	memset(&c, 0, sizeof(c));
   7681	c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_VI_CMD) |
   7682				  FW_CMD_REQUEST_F |
   7683				  FW_CMD_EXEC_F |
   7684				  FW_VI_CMD_PFN_V(pf) |
   7685				  FW_VI_CMD_VFN_V(vf));
   7686	c.alloc_to_len16 = cpu_to_be32(FW_VI_CMD_FREE_F | FW_LEN16(c));
   7687	c.type_viid = cpu_to_be16(FW_VI_CMD_VIID_V(viid));
   7688
   7689	return t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
   7690}
   7691
   7692/**
   7693 *	t4_set_rxmode - set Rx properties of a virtual interface
   7694 *	@adap: the adapter
   7695 *	@mbox: mailbox to use for the FW command
   7696 *	@viid: the VI id
   7697 *	@viid_mirror: the mirror VI id
   7698 *	@mtu: the new MTU or -1
   7699 *	@promisc: 1 to enable promiscuous mode, 0 to disable it, -1 no change
   7700 *	@all_multi: 1 to enable all-multi mode, 0 to disable it, -1 no change
   7701 *	@bcast: 1 to enable broadcast Rx, 0 to disable it, -1 no change
   7702 *	@vlanex: 1 to enable HW VLAN extraction, 0 to disable it, -1 no change
   7703 *	@sleep_ok: if true we may sleep while awaiting command completion
   7704 *
   7705 *	Sets Rx properties of a virtual interface.
   7706 */
   7707int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid,
   7708		  unsigned int viid_mirror, int mtu, int promisc, int all_multi,
   7709		  int bcast, int vlanex, bool sleep_ok)
   7710{
   7711	struct fw_vi_rxmode_cmd c, c_mirror;
   7712	int ret;
   7713
   7714	/* convert to FW values */
   7715	if (mtu < 0)
   7716		mtu = FW_RXMODE_MTU_NO_CHG;
   7717	if (promisc < 0)
   7718		promisc = FW_VI_RXMODE_CMD_PROMISCEN_M;
   7719	if (all_multi < 0)
   7720		all_multi = FW_VI_RXMODE_CMD_ALLMULTIEN_M;
   7721	if (bcast < 0)
   7722		bcast = FW_VI_RXMODE_CMD_BROADCASTEN_M;
   7723	if (vlanex < 0)
   7724		vlanex = FW_VI_RXMODE_CMD_VLANEXEN_M;
   7725
   7726	memset(&c, 0, sizeof(c));
   7727	c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_RXMODE_CMD) |
   7728				   FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
   7729				   FW_VI_RXMODE_CMD_VIID_V(viid));
   7730	c.retval_len16 = cpu_to_be32(FW_LEN16(c));
   7731	c.mtu_to_vlanexen =
   7732		cpu_to_be32(FW_VI_RXMODE_CMD_MTU_V(mtu) |
   7733			    FW_VI_RXMODE_CMD_PROMISCEN_V(promisc) |
   7734			    FW_VI_RXMODE_CMD_ALLMULTIEN_V(all_multi) |
   7735			    FW_VI_RXMODE_CMD_BROADCASTEN_V(bcast) |
   7736			    FW_VI_RXMODE_CMD_VLANEXEN_V(vlanex));
   7737
   7738	if (viid_mirror) {
   7739		memcpy(&c_mirror, &c, sizeof(c_mirror));
   7740		c_mirror.op_to_viid =
   7741			cpu_to_be32(FW_CMD_OP_V(FW_VI_RXMODE_CMD) |
   7742				    FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
   7743				    FW_VI_RXMODE_CMD_VIID_V(viid_mirror));
   7744	}
   7745
   7746	ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
   7747	if (ret)
   7748		return ret;
   7749
   7750	if (viid_mirror)
   7751		ret = t4_wr_mbox_meat(adap, mbox, &c_mirror, sizeof(c_mirror),
   7752				      NULL, sleep_ok);
   7753
   7754	return ret;
   7755}
   7756
   7757/**
   7758 *      t4_free_encap_mac_filt - frees MPS entry at given index
   7759 *      @adap: the adapter
   7760 *      @viid: the VI id
   7761 *      @idx: index of MPS entry to be freed
   7762 *      @sleep_ok: call is allowed to sleep
   7763 *
   7764 *      Frees the MPS entry at supplied index
   7765 *
   7766 *      Returns a negative error number or zero on success
   7767 */
   7768int t4_free_encap_mac_filt(struct adapter *adap, unsigned int viid,
   7769			   int idx, bool sleep_ok)
   7770{
   7771	struct fw_vi_mac_exact *p;
   7772	struct fw_vi_mac_cmd c;
   7773	int ret = 0;
   7774	u32 exact;
   7775
   7776	memset(&c, 0, sizeof(c));
   7777	c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) |
   7778				   FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
   7779				   FW_CMD_EXEC_V(0) |
   7780				   FW_VI_MAC_CMD_VIID_V(viid));
   7781	exact = FW_VI_MAC_CMD_ENTRY_TYPE_V(FW_VI_MAC_TYPE_EXACTMAC);
   7782	c.freemacs_to_len16 = cpu_to_be32(FW_VI_MAC_CMD_FREEMACS_V(0) |
   7783					  exact |
   7784					  FW_CMD_LEN16_V(1));
   7785	p = c.u.exact;
   7786	p->valid_to_idx = cpu_to_be16(FW_VI_MAC_CMD_VALID_F |
   7787				      FW_VI_MAC_CMD_IDX_V(idx));
   7788	eth_zero_addr(p->macaddr);
   7789	ret = t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, sleep_ok);
   7790	return ret;
   7791}
   7792
   7793/**
   7794 *	t4_free_raw_mac_filt - Frees a raw mac entry in mps tcam
   7795 *	@adap: the adapter
   7796 *	@viid: the VI id
   7797 *	@addr: the MAC address
   7798 *	@mask: the mask
   7799 *	@idx: index of the entry in mps tcam
   7800 *	@lookup_type: MAC address for inner (1) or outer (0) header
   7801 *	@port_id: the port index
   7802 *	@sleep_ok: call is allowed to sleep
   7803 *
   7804 *	Removes the mac entry at the specified index using raw mac interface.
   7805 *
   7806 *	Returns a negative error number on failure.
   7807 */
   7808int t4_free_raw_mac_filt(struct adapter *adap, unsigned int viid,
   7809			 const u8 *addr, const u8 *mask, unsigned int idx,
   7810			 u8 lookup_type, u8 port_id, bool sleep_ok)
   7811{
   7812	struct fw_vi_mac_cmd c;
   7813	struct fw_vi_mac_raw *p = &c.u.raw;
   7814	u32 val;
   7815
   7816	memset(&c, 0, sizeof(c));
   7817	c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) |
   7818				   FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
   7819				   FW_CMD_EXEC_V(0) |
   7820				   FW_VI_MAC_CMD_VIID_V(viid));
   7821	val = FW_CMD_LEN16_V(1) |
   7822	      FW_VI_MAC_CMD_ENTRY_TYPE_V(FW_VI_MAC_TYPE_RAW);
   7823	c.freemacs_to_len16 = cpu_to_be32(FW_VI_MAC_CMD_FREEMACS_V(0) |
   7824					  FW_CMD_LEN16_V(val));
   7825
   7826	p->raw_idx_pkd = cpu_to_be32(FW_VI_MAC_CMD_RAW_IDX_V(idx) |
   7827				     FW_VI_MAC_ID_BASED_FREE);
   7828
   7829	/* Lookup Type. Outer header: 0, Inner header: 1 */
   7830	p->data0_pkd = cpu_to_be32(DATALKPTYPE_V(lookup_type) |
   7831				   DATAPORTNUM_V(port_id));
   7832	/* Lookup mask and port mask */
   7833	p->data0m_pkd = cpu_to_be64(DATALKPTYPE_V(DATALKPTYPE_M) |
   7834				    DATAPORTNUM_V(DATAPORTNUM_M));
   7835
   7836	/* Copy the address and the mask */
   7837	memcpy((u8 *)&p->data1[0] + 2, addr, ETH_ALEN);
   7838	memcpy((u8 *)&p->data1m[0] + 2, mask, ETH_ALEN);
   7839
   7840	return t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, sleep_ok);
   7841}
   7842
   7843/**
   7844 *      t4_alloc_encap_mac_filt - Adds a mac entry in mps tcam with VNI support
   7845 *      @adap: the adapter
   7846 *      @viid: the VI id
   7847 *      @addr: the MAC address
   7848 *      @mask: the mask
   7849 *      @vni: the VNI id for the tunnel protocol
   7850 *      @vni_mask: mask for the VNI id
   7851 *      @dip_hit: to enable DIP match for the MPS entry
   7852 *      @lookup_type: MAC address for inner (1) or outer (0) header
   7853 *      @sleep_ok: call is allowed to sleep
   7854 *
   7855 *      Allocates an MPS entry with specified MAC address and VNI value.
   7856 *
   7857 *      Returns a negative error number or the allocated index for this mac.
   7858 */
   7859int t4_alloc_encap_mac_filt(struct adapter *adap, unsigned int viid,
   7860			    const u8 *addr, const u8 *mask, unsigned int vni,
   7861			    unsigned int vni_mask, u8 dip_hit, u8 lookup_type,
   7862			    bool sleep_ok)
   7863{
   7864	struct fw_vi_mac_cmd c;
   7865	struct fw_vi_mac_vni *p = c.u.exact_vni;
   7866	int ret = 0;
   7867	u32 val;
   7868
   7869	memset(&c, 0, sizeof(c));
   7870	c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) |
   7871				   FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
   7872				   FW_VI_MAC_CMD_VIID_V(viid));
   7873	val = FW_CMD_LEN16_V(1) |
   7874	      FW_VI_MAC_CMD_ENTRY_TYPE_V(FW_VI_MAC_TYPE_EXACTMAC_VNI);
   7875	c.freemacs_to_len16 = cpu_to_be32(val);
   7876	p->valid_to_idx = cpu_to_be16(FW_VI_MAC_CMD_VALID_F |
   7877				      FW_VI_MAC_CMD_IDX_V(FW_VI_MAC_ADD_MAC));
   7878	memcpy(p->macaddr, addr, sizeof(p->macaddr));
   7879	memcpy(p->macaddr_mask, mask, sizeof(p->macaddr_mask));
   7880
   7881	p->lookup_type_to_vni =
   7882		cpu_to_be32(FW_VI_MAC_CMD_VNI_V(vni) |
   7883			    FW_VI_MAC_CMD_DIP_HIT_V(dip_hit) |
   7884			    FW_VI_MAC_CMD_LOOKUP_TYPE_V(lookup_type));
   7885	p->vni_mask_pkd = cpu_to_be32(FW_VI_MAC_CMD_VNI_MASK_V(vni_mask));
   7886	ret = t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, sleep_ok);
   7887	if (ret == 0)
   7888		ret = FW_VI_MAC_CMD_IDX_G(be16_to_cpu(p->valid_to_idx));
   7889	return ret;
   7890}
   7891
   7892/**
   7893 *	t4_alloc_raw_mac_filt - Adds a mac entry in mps tcam
   7894 *	@adap: the adapter
   7895 *	@viid: the VI id
   7896 *	@addr: the MAC address
   7897 *	@mask: the mask
   7898 *	@idx: index at which to add this entry
   7899 *	@lookup_type: MAC address for inner (1) or outer (0) header
   7900 *	@port_id: the port index
   7901 *	@sleep_ok: call is allowed to sleep
   7902 *
   7903 *	Adds the mac entry at the specified index using raw mac interface.
   7904 *
   7905 *	Returns a negative error number or the allocated index for this mac.
   7906 */
   7907int t4_alloc_raw_mac_filt(struct adapter *adap, unsigned int viid,
   7908			  const u8 *addr, const u8 *mask, unsigned int idx,
   7909			  u8 lookup_type, u8 port_id, bool sleep_ok)
   7910{
   7911	int ret = 0;
   7912	struct fw_vi_mac_cmd c;
   7913	struct fw_vi_mac_raw *p = &c.u.raw;
   7914	u32 val;
   7915
   7916	memset(&c, 0, sizeof(c));
   7917	c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) |
   7918				   FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
   7919				   FW_VI_MAC_CMD_VIID_V(viid));
   7920	val = FW_CMD_LEN16_V(1) |
   7921	      FW_VI_MAC_CMD_ENTRY_TYPE_V(FW_VI_MAC_TYPE_RAW);
   7922	c.freemacs_to_len16 = cpu_to_be32(val);
   7923
   7924	/* Specify that this is an inner mac address */
   7925	p->raw_idx_pkd = cpu_to_be32(FW_VI_MAC_CMD_RAW_IDX_V(idx));
   7926
   7927	/* Lookup Type. Outer header: 0, Inner header: 1 */
   7928	p->data0_pkd = cpu_to_be32(DATALKPTYPE_V(lookup_type) |
   7929				   DATAPORTNUM_V(port_id));
   7930	/* Lookup mask and port mask */
   7931	p->data0m_pkd = cpu_to_be64(DATALKPTYPE_V(DATALKPTYPE_M) |
   7932				    DATAPORTNUM_V(DATAPORTNUM_M));
   7933
   7934	/* Copy the address and the mask */
   7935	memcpy((u8 *)&p->data1[0] + 2, addr, ETH_ALEN);
   7936	memcpy((u8 *)&p->data1m[0] + 2, mask, ETH_ALEN);
   7937
   7938	ret = t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, sleep_ok);
   7939	if (ret == 0) {
   7940		ret = FW_VI_MAC_CMD_RAW_IDX_G(be32_to_cpu(p->raw_idx_pkd));
   7941		if (ret != idx)
   7942			ret = -ENOMEM;
   7943	}
   7944
   7945	return ret;
   7946}
   7947
   7948/**
   7949 *	t4_alloc_mac_filt - allocates exact-match filters for MAC addresses
   7950 *	@adap: the adapter
   7951 *	@mbox: mailbox to use for the FW command
   7952 *	@viid: the VI id
   7953 *	@free: if true any existing filters for this VI id are first removed
   7954 *	@naddr: the number of MAC addresses to allocate filters for (up to 7)
   7955 *	@addr: the MAC address(es)
   7956 *	@idx: where to store the index of each allocated filter
   7957 *	@hash: pointer to hash address filter bitmap
   7958 *	@sleep_ok: call is allowed to sleep
   7959 *
   7960 *	Allocates an exact-match filter for each of the supplied addresses and
   7961 *	sets it to the corresponding address.  If @idx is not %NULL it should
   7962 *	have at least @naddr entries, each of which will be set to the index of
   7963 *	the filter allocated for the corresponding MAC address.  If a filter
   7964 *	could not be allocated for an address its index is set to 0xffff.
   7965 *	If @hash is not %NULL addresses that fail to allocate an exact filter
   7966 *	are hashed and update the hash filter bitmap pointed at by @hash.
   7967 *
   7968 *	Returns a negative error number or the number of filters allocated.
   7969 */
   7970int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox,
   7971		      unsigned int viid, bool free, unsigned int naddr,
   7972		      const u8 **addr, u16 *idx, u64 *hash, bool sleep_ok)
   7973{
   7974	int offset, ret = 0;
   7975	struct fw_vi_mac_cmd c;
   7976	unsigned int nfilters = 0;
   7977	unsigned int max_naddr = adap->params.arch.mps_tcam_size;
   7978	unsigned int rem = naddr;
   7979
   7980	if (naddr > max_naddr)
   7981		return -EINVAL;
   7982
   7983	for (offset = 0; offset < naddr ; /**/) {
   7984		unsigned int fw_naddr = (rem < ARRAY_SIZE(c.u.exact) ?
   7985					 rem : ARRAY_SIZE(c.u.exact));
   7986		size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd,
   7987						     u.exact[fw_naddr]), 16);
   7988		struct fw_vi_mac_exact *p;
   7989		int i;
   7990
   7991		memset(&c, 0, sizeof(c));
   7992		c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) |
   7993					   FW_CMD_REQUEST_F |
   7994					   FW_CMD_WRITE_F |
   7995					   FW_CMD_EXEC_V(free) |
   7996					   FW_VI_MAC_CMD_VIID_V(viid));
   7997		c.freemacs_to_len16 =
   7998			cpu_to_be32(FW_VI_MAC_CMD_FREEMACS_V(free) |
   7999				    FW_CMD_LEN16_V(len16));
   8000
   8001		for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) {
   8002			p->valid_to_idx =
   8003				cpu_to_be16(FW_VI_MAC_CMD_VALID_F |
   8004					    FW_VI_MAC_CMD_IDX_V(
   8005						    FW_VI_MAC_ADD_MAC));
   8006			memcpy(p->macaddr, addr[offset + i],
   8007			       sizeof(p->macaddr));
   8008		}
   8009
   8010		/* It's okay if we run out of space in our MAC address arena.
   8011		 * Some of the addresses we submit may get stored so we need
   8012		 * to run through the reply to see what the results were ...
   8013		 */
   8014		ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok);
   8015		if (ret && ret != -FW_ENOMEM)
   8016			break;
   8017
   8018		for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) {
   8019			u16 index = FW_VI_MAC_CMD_IDX_G(
   8020					be16_to_cpu(p->valid_to_idx));
   8021
   8022			if (idx)
   8023				idx[offset + i] = (index >= max_naddr ?
   8024						   0xffff : index);
   8025			if (index < max_naddr)
   8026				nfilters++;
   8027			else if (hash)
   8028				*hash |= (1ULL <<
   8029					  hash_mac_addr(addr[offset + i]));
   8030		}
   8031
   8032		free = false;
   8033		offset += fw_naddr;
   8034		rem -= fw_naddr;
   8035	}
   8036
   8037	if (ret == 0 || ret == -FW_ENOMEM)
   8038		ret = nfilters;
   8039	return ret;
   8040}
   8041
   8042/**
   8043 *	t4_free_mac_filt - frees exact-match filters of given MAC addresses
   8044 *	@adap: the adapter
   8045 *	@mbox: mailbox to use for the FW command
   8046 *	@viid: the VI id
   8047 *	@naddr: the number of MAC addresses to allocate filters for (up to 7)
   8048 *	@addr: the MAC address(es)
   8049 *	@sleep_ok: call is allowed to sleep
   8050 *
   8051 *	Frees the exact-match filter for each of the supplied addresses
   8052 *
   8053 *	Returns a negative error number or the number of filters freed.
   8054 */
   8055int t4_free_mac_filt(struct adapter *adap, unsigned int mbox,
   8056		     unsigned int viid, unsigned int naddr,
   8057		     const u8 **addr, bool sleep_ok)
   8058{
   8059	int offset, ret = 0;
   8060	struct fw_vi_mac_cmd c;
   8061	unsigned int nfilters = 0;
   8062	unsigned int max_naddr = is_t4(adap->params.chip) ?
   8063				       NUM_MPS_CLS_SRAM_L_INSTANCES :
   8064				       NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
   8065	unsigned int rem = naddr;
   8066
   8067	if (naddr > max_naddr)
   8068		return -EINVAL;
   8069
   8070	for (offset = 0; offset < (int)naddr ; /**/) {
   8071		unsigned int fw_naddr = (rem < ARRAY_SIZE(c.u.exact)
   8072					 ? rem
   8073					 : ARRAY_SIZE(c.u.exact));
   8074		size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd,
   8075						     u.exact[fw_naddr]), 16);
   8076		struct fw_vi_mac_exact *p;
   8077		int i;
   8078
   8079		memset(&c, 0, sizeof(c));
   8080		c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) |
   8081				     FW_CMD_REQUEST_F |
   8082				     FW_CMD_WRITE_F |
   8083				     FW_CMD_EXEC_V(0) |
   8084				     FW_VI_MAC_CMD_VIID_V(viid));
   8085		c.freemacs_to_len16 =
   8086				cpu_to_be32(FW_VI_MAC_CMD_FREEMACS_V(0) |
   8087					    FW_CMD_LEN16_V(len16));
   8088
   8089		for (i = 0, p = c.u.exact; i < (int)fw_naddr; i++, p++) {
   8090			p->valid_to_idx = cpu_to_be16(
   8091				FW_VI_MAC_CMD_VALID_F |
   8092				FW_VI_MAC_CMD_IDX_V(FW_VI_MAC_MAC_BASED_FREE));
   8093			memcpy(p->macaddr, addr[offset+i], sizeof(p->macaddr));
   8094		}
   8095
   8096		ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok);
   8097		if (ret)
   8098			break;
   8099
   8100		for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) {
   8101			u16 index = FW_VI_MAC_CMD_IDX_G(
   8102						be16_to_cpu(p->valid_to_idx));
   8103
   8104			if (index < max_naddr)
   8105				nfilters++;
   8106		}
   8107
   8108		offset += fw_naddr;
   8109		rem -= fw_naddr;
   8110	}
   8111
   8112	if (ret == 0)
   8113		ret = nfilters;
   8114	return ret;
   8115}
   8116
   8117/**
   8118 *	t4_change_mac - modifies the exact-match filter for a MAC address
   8119 *	@adap: the adapter
   8120 *	@mbox: mailbox to use for the FW command
   8121 *	@viid: the VI id
   8122 *	@idx: index of existing filter for old value of MAC address, or -1
   8123 *	@addr: the new MAC address value
   8124 *	@persist: whether a new MAC allocation should be persistent
   8125 *	@smt_idx: the destination to store the new SMT index.
   8126 *
   8127 *	Modifies an exact-match filter and sets it to the new MAC address.
   8128 *	Note that in general it is not possible to modify the value of a given
   8129 *	filter so the generic way to modify an address filter is to free the one
   8130 *	being used by the old address value and allocate a new filter for the
   8131 *	new address value.  @idx can be -1 if the address is a new addition.
   8132 *
   8133 *	Returns a negative error number or the index of the filter with the new
   8134 *	MAC value.
   8135 */
   8136int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
   8137		  int idx, const u8 *addr, bool persist, u8 *smt_idx)
   8138{
   8139	int ret, mode;
   8140	struct fw_vi_mac_cmd c;
   8141	struct fw_vi_mac_exact *p = c.u.exact;
   8142	unsigned int max_mac_addr = adap->params.arch.mps_tcam_size;
   8143
   8144	if (idx < 0)                             /* new allocation */
   8145		idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC;
   8146	mode = smt_idx ? FW_VI_MAC_SMT_AND_MPSTCAM : FW_VI_MAC_MPS_TCAM_ENTRY;
   8147
   8148	memset(&c, 0, sizeof(c));
   8149	c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) |
   8150				   FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
   8151				   FW_VI_MAC_CMD_VIID_V(viid));
   8152	c.freemacs_to_len16 = cpu_to_be32(FW_CMD_LEN16_V(1));
   8153	p->valid_to_idx = cpu_to_be16(FW_VI_MAC_CMD_VALID_F |
   8154				      FW_VI_MAC_CMD_SMAC_RESULT_V(mode) |
   8155				      FW_VI_MAC_CMD_IDX_V(idx));
   8156	memcpy(p->macaddr, addr, sizeof(p->macaddr));
   8157
   8158	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
   8159	if (ret == 0) {
   8160		ret = FW_VI_MAC_CMD_IDX_G(be16_to_cpu(p->valid_to_idx));
   8161		if (ret >= max_mac_addr)
   8162			ret = -ENOMEM;
   8163		if (smt_idx) {
   8164			if (adap->params.viid_smt_extn_support) {
   8165				*smt_idx = FW_VI_MAC_CMD_SMTID_G
   8166						    (be32_to_cpu(c.op_to_viid));
   8167			} else {
   8168				/* In T4/T5, SMT contains 256 SMAC entries
   8169				 * organized in 128 rows of 2 entries each.
   8170				 * In T6, SMT contains 256 SMAC entries in
   8171				 * 256 rows.
   8172				 */
   8173				if (CHELSIO_CHIP_VERSION(adap->params.chip) <=
   8174								     CHELSIO_T5)
   8175					*smt_idx = (viid & FW_VIID_VIN_M) << 1;
   8176				else
   8177					*smt_idx = (viid & FW_VIID_VIN_M);
   8178			}
   8179		}
   8180	}
   8181	return ret;
   8182}
   8183
   8184/**
   8185 *	t4_set_addr_hash - program the MAC inexact-match hash filter
   8186 *	@adap: the adapter
   8187 *	@mbox: mailbox to use for the FW command
   8188 *	@viid: the VI id
   8189 *	@ucast: whether the hash filter should also match unicast addresses
   8190 *	@vec: the value to be written to the hash filter
   8191 *	@sleep_ok: call is allowed to sleep
   8192 *
   8193 *	Sets the 64-bit inexact-match hash filter for a virtual interface.
   8194 */
   8195int t4_set_addr_hash(struct adapter *adap, unsigned int mbox, unsigned int viid,
   8196		     bool ucast, u64 vec, bool sleep_ok)
   8197{
   8198	struct fw_vi_mac_cmd c;
   8199
   8200	memset(&c, 0, sizeof(c));
   8201	c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) |
   8202				   FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
   8203				   FW_VI_ENABLE_CMD_VIID_V(viid));
   8204	c.freemacs_to_len16 = cpu_to_be32(FW_VI_MAC_CMD_HASHVECEN_F |
   8205					  FW_VI_MAC_CMD_HASHUNIEN_V(ucast) |
   8206					  FW_CMD_LEN16_V(1));
   8207	c.u.hash.hashvec = cpu_to_be64(vec);
   8208	return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
   8209}
   8210
   8211/**
   8212 *      t4_enable_vi_params - enable/disable a virtual interface
   8213 *      @adap: the adapter
   8214 *      @mbox: mailbox to use for the FW command
   8215 *      @viid: the VI id
   8216 *      @rx_en: 1=enable Rx, 0=disable Rx
   8217 *      @tx_en: 1=enable Tx, 0=disable Tx
   8218 *      @dcb_en: 1=enable delivery of Data Center Bridging messages.
   8219 *
   8220 *      Enables/disables a virtual interface.  Note that setting DCB Enable
   8221 *      only makes sense when enabling a Virtual Interface ...
   8222 */
   8223int t4_enable_vi_params(struct adapter *adap, unsigned int mbox,
   8224			unsigned int viid, bool rx_en, bool tx_en, bool dcb_en)
   8225{
   8226	struct fw_vi_enable_cmd c;
   8227
   8228	memset(&c, 0, sizeof(c));
   8229	c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_ENABLE_CMD) |
   8230				   FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
   8231				   FW_VI_ENABLE_CMD_VIID_V(viid));
   8232	c.ien_to_len16 = cpu_to_be32(FW_VI_ENABLE_CMD_IEN_V(rx_en) |
   8233				     FW_VI_ENABLE_CMD_EEN_V(tx_en) |
   8234				     FW_VI_ENABLE_CMD_DCB_INFO_V(dcb_en) |
   8235				     FW_LEN16(c));
   8236	return t4_wr_mbox_ns(adap, mbox, &c, sizeof(c), NULL);
   8237}
   8238
   8239/**
   8240 *	t4_enable_vi - enable/disable a virtual interface
   8241 *	@adap: the adapter
   8242 *	@mbox: mailbox to use for the FW command
   8243 *	@viid: the VI id
   8244 *	@rx_en: 1=enable Rx, 0=disable Rx
   8245 *	@tx_en: 1=enable Tx, 0=disable Tx
   8246 *
   8247 *	Enables/disables a virtual interface.
   8248 */
   8249int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid,
   8250		 bool rx_en, bool tx_en)
   8251{
   8252	return t4_enable_vi_params(adap, mbox, viid, rx_en, tx_en, 0);
   8253}
   8254
   8255/**
   8256 *	t4_enable_pi_params - enable/disable a Port's Virtual Interface
   8257 *      @adap: the adapter
   8258 *      @mbox: mailbox to use for the FW command
   8259 *      @pi: the Port Information structure
   8260 *      @rx_en: 1=enable Rx, 0=disable Rx
   8261 *      @tx_en: 1=enable Tx, 0=disable Tx
   8262 *      @dcb_en: 1=enable delivery of Data Center Bridging messages.
   8263 *
   8264 *      Enables/disables a Port's Virtual Interface.  Note that setting DCB
   8265 *	Enable only makes sense when enabling a Virtual Interface ...
   8266 *	If the Virtual Interface enable/disable operation is successful,
   8267 *	we notify the OS-specific code of a potential Link Status change
   8268 *	via the OS Contract API t4_os_link_changed().
   8269 */
   8270int t4_enable_pi_params(struct adapter *adap, unsigned int mbox,
   8271			struct port_info *pi,
   8272			bool rx_en, bool tx_en, bool dcb_en)
   8273{
   8274	int ret = t4_enable_vi_params(adap, mbox, pi->viid,
   8275				      rx_en, tx_en, dcb_en);
   8276	if (ret)
   8277		return ret;
   8278	t4_os_link_changed(adap, pi->port_id,
   8279			   rx_en && tx_en && pi->link_cfg.link_ok);
   8280	return 0;
   8281}
   8282
   8283/**
   8284 *	t4_identify_port - identify a VI's port by blinking its LED
   8285 *	@adap: the adapter
   8286 *	@mbox: mailbox to use for the FW command
   8287 *	@viid: the VI id
   8288 *	@nblinks: how many times to blink LED at 2.5 Hz
   8289 *
   8290 *	Identifies a VI's port by blinking its LED.
   8291 */
   8292int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid,
   8293		     unsigned int nblinks)
   8294{
   8295	struct fw_vi_enable_cmd c;
   8296
   8297	memset(&c, 0, sizeof(c));
   8298	c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_ENABLE_CMD) |
   8299				   FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
   8300				   FW_VI_ENABLE_CMD_VIID_V(viid));
   8301	c.ien_to_len16 = cpu_to_be32(FW_VI_ENABLE_CMD_LED_F | FW_LEN16(c));
   8302	c.blinkdur = cpu_to_be16(nblinks);
   8303	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
   8304}
   8305
   8306/**
   8307 *	t4_iq_stop - stop an ingress queue and its FLs
   8308 *	@adap: the adapter
   8309 *	@mbox: mailbox to use for the FW command
   8310 *	@pf: the PF owning the queues
   8311 *	@vf: the VF owning the queues
   8312 *	@iqtype: the ingress queue type (FW_IQ_TYPE_FL_INT_CAP, etc.)
   8313 *	@iqid: ingress queue id
   8314 *	@fl0id: FL0 queue id or 0xffff if no attached FL0
   8315 *	@fl1id: FL1 queue id or 0xffff if no attached FL1
   8316 *
   8317 *	Stops an ingress queue and its associated FLs, if any.  This causes
   8318 *	any current or future data/messages destined for these queues to be
   8319 *	tossed.
   8320 */
   8321int t4_iq_stop(struct adapter *adap, unsigned int mbox, unsigned int pf,
   8322	       unsigned int vf, unsigned int iqtype, unsigned int iqid,
   8323	       unsigned int fl0id, unsigned int fl1id)
   8324{
   8325	struct fw_iq_cmd c;
   8326
   8327	memset(&c, 0, sizeof(c));
   8328	c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_IQ_CMD) | FW_CMD_REQUEST_F |
   8329				  FW_CMD_EXEC_F | FW_IQ_CMD_PFN_V(pf) |
   8330				  FW_IQ_CMD_VFN_V(vf));
   8331	c.alloc_to_len16 = cpu_to_be32(FW_IQ_CMD_IQSTOP_F | FW_LEN16(c));
   8332	c.type_to_iqandstindex = cpu_to_be32(FW_IQ_CMD_TYPE_V(iqtype));
   8333	c.iqid = cpu_to_be16(iqid);
   8334	c.fl0id = cpu_to_be16(fl0id);
   8335	c.fl1id = cpu_to_be16(fl1id);
   8336	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
   8337}
   8338
   8339/**
   8340 *	t4_iq_free - free an ingress queue and its FLs
   8341 *	@adap: the adapter
   8342 *	@mbox: mailbox to use for the FW command
   8343 *	@pf: the PF owning the queues
   8344 *	@vf: the VF owning the queues
   8345 *	@iqtype: the ingress queue type
   8346 *	@iqid: ingress queue id
   8347 *	@fl0id: FL0 queue id or 0xffff if no attached FL0
   8348 *	@fl1id: FL1 queue id or 0xffff if no attached FL1
   8349 *
   8350 *	Frees an ingress queue and its associated FLs, if any.
   8351 */
   8352int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
   8353	       unsigned int vf, unsigned int iqtype, unsigned int iqid,
   8354	       unsigned int fl0id, unsigned int fl1id)
   8355{
   8356	struct fw_iq_cmd c;
   8357
   8358	memset(&c, 0, sizeof(c));
   8359	c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_IQ_CMD) | FW_CMD_REQUEST_F |
   8360				  FW_CMD_EXEC_F | FW_IQ_CMD_PFN_V(pf) |
   8361				  FW_IQ_CMD_VFN_V(vf));
   8362	c.alloc_to_len16 = cpu_to_be32(FW_IQ_CMD_FREE_F | FW_LEN16(c));
   8363	c.type_to_iqandstindex = cpu_to_be32(FW_IQ_CMD_TYPE_V(iqtype));
   8364	c.iqid = cpu_to_be16(iqid);
   8365	c.fl0id = cpu_to_be16(fl0id);
   8366	c.fl1id = cpu_to_be16(fl1id);
   8367	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
   8368}
   8369
   8370/**
   8371 *	t4_eth_eq_free - free an Ethernet egress queue
   8372 *	@adap: the adapter
   8373 *	@mbox: mailbox to use for the FW command
   8374 *	@pf: the PF owning the queue
   8375 *	@vf: the VF owning the queue
   8376 *	@eqid: egress queue id
   8377 *
   8378 *	Frees an Ethernet egress queue.
   8379 */
   8380int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
   8381		   unsigned int vf, unsigned int eqid)
   8382{
   8383	struct fw_eq_eth_cmd c;
   8384
   8385	memset(&c, 0, sizeof(c));
   8386	c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_EQ_ETH_CMD) |
   8387				  FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
   8388				  FW_EQ_ETH_CMD_PFN_V(pf) |
   8389				  FW_EQ_ETH_CMD_VFN_V(vf));
   8390	c.alloc_to_len16 = cpu_to_be32(FW_EQ_ETH_CMD_FREE_F | FW_LEN16(c));
   8391	c.eqid_pkd = cpu_to_be32(FW_EQ_ETH_CMD_EQID_V(eqid));
   8392	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
   8393}
   8394
   8395/**
   8396 *	t4_ctrl_eq_free - free a control egress queue
   8397 *	@adap: the adapter
   8398 *	@mbox: mailbox to use for the FW command
   8399 *	@pf: the PF owning the queue
   8400 *	@vf: the VF owning the queue
   8401 *	@eqid: egress queue id
   8402 *
   8403 *	Frees a control egress queue.
   8404 */
   8405int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
   8406		    unsigned int vf, unsigned int eqid)
   8407{
   8408	struct fw_eq_ctrl_cmd c;
   8409
   8410	memset(&c, 0, sizeof(c));
   8411	c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_EQ_CTRL_CMD) |
   8412				  FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
   8413				  FW_EQ_CTRL_CMD_PFN_V(pf) |
   8414				  FW_EQ_CTRL_CMD_VFN_V(vf));
   8415	c.alloc_to_len16 = cpu_to_be32(FW_EQ_CTRL_CMD_FREE_F | FW_LEN16(c));
   8416	c.cmpliqid_eqid = cpu_to_be32(FW_EQ_CTRL_CMD_EQID_V(eqid));
   8417	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
   8418}
   8419
   8420/**
   8421 *	t4_ofld_eq_free - free an offload egress queue
   8422 *	@adap: the adapter
   8423 *	@mbox: mailbox to use for the FW command
   8424 *	@pf: the PF owning the queue
   8425 *	@vf: the VF owning the queue
   8426 *	@eqid: egress queue id
   8427 *
   8428 *	Frees a control egress queue.
   8429 */
   8430int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
   8431		    unsigned int vf, unsigned int eqid)
   8432{
   8433	struct fw_eq_ofld_cmd c;
   8434
   8435	memset(&c, 0, sizeof(c));
   8436	c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_EQ_OFLD_CMD) |
   8437				  FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
   8438				  FW_EQ_OFLD_CMD_PFN_V(pf) |
   8439				  FW_EQ_OFLD_CMD_VFN_V(vf));
   8440	c.alloc_to_len16 = cpu_to_be32(FW_EQ_OFLD_CMD_FREE_F | FW_LEN16(c));
   8441	c.eqid_pkd = cpu_to_be32(FW_EQ_OFLD_CMD_EQID_V(eqid));
   8442	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
   8443}
   8444
   8445/**
   8446 *	t4_link_down_rc_str - return a string for a Link Down Reason Code
   8447 *	@link_down_rc: Link Down Reason Code
   8448 *
   8449 *	Returns a string representation of the Link Down Reason Code.
   8450 */
   8451static const char *t4_link_down_rc_str(unsigned char link_down_rc)
   8452{
   8453	static const char * const reason[] = {
   8454		"Link Down",
   8455		"Remote Fault",
   8456		"Auto-negotiation Failure",
   8457		"Reserved",
   8458		"Insufficient Airflow",
   8459		"Unable To Determine Reason",
   8460		"No RX Signal Detected",
   8461		"Reserved",
   8462	};
   8463
   8464	if (link_down_rc >= ARRAY_SIZE(reason))
   8465		return "Bad Reason Code";
   8466
   8467	return reason[link_down_rc];
   8468}
   8469
   8470/* Return the highest speed set in the port capabilities, in Mb/s. */
   8471static unsigned int fwcap_to_speed(fw_port_cap32_t caps)
   8472{
   8473	#define TEST_SPEED_RETURN(__caps_speed, __speed) \
   8474		do { \
   8475			if (caps & FW_PORT_CAP32_SPEED_##__caps_speed) \
   8476				return __speed; \
   8477		} while (0)
   8478
   8479	TEST_SPEED_RETURN(400G, 400000);
   8480	TEST_SPEED_RETURN(200G, 200000);
   8481	TEST_SPEED_RETURN(100G, 100000);
   8482	TEST_SPEED_RETURN(50G,   50000);
   8483	TEST_SPEED_RETURN(40G,   40000);
   8484	TEST_SPEED_RETURN(25G,   25000);
   8485	TEST_SPEED_RETURN(10G,   10000);
   8486	TEST_SPEED_RETURN(1G,     1000);
   8487	TEST_SPEED_RETURN(100M,    100);
   8488
   8489	#undef TEST_SPEED_RETURN
   8490
   8491	return 0;
   8492}
   8493
   8494/**
   8495 *	fwcap_to_fwspeed - return highest speed in Port Capabilities
   8496 *	@acaps: advertised Port Capabilities
   8497 *
   8498 *	Get the highest speed for the port from the advertised Port
   8499 *	Capabilities.  It will be either the highest speed from the list of
   8500 *	speeds or whatever user has set using ethtool.
   8501 */
   8502static fw_port_cap32_t fwcap_to_fwspeed(fw_port_cap32_t acaps)
   8503{
   8504	#define TEST_SPEED_RETURN(__caps_speed) \
   8505		do { \
   8506			if (acaps & FW_PORT_CAP32_SPEED_##__caps_speed) \
   8507				return FW_PORT_CAP32_SPEED_##__caps_speed; \
   8508		} while (0)
   8509
   8510	TEST_SPEED_RETURN(400G);
   8511	TEST_SPEED_RETURN(200G);
   8512	TEST_SPEED_RETURN(100G);
   8513	TEST_SPEED_RETURN(50G);
   8514	TEST_SPEED_RETURN(40G);
   8515	TEST_SPEED_RETURN(25G);
   8516	TEST_SPEED_RETURN(10G);
   8517	TEST_SPEED_RETURN(1G);
   8518	TEST_SPEED_RETURN(100M);
   8519
   8520	#undef TEST_SPEED_RETURN
   8521
   8522	return 0;
   8523}
   8524
   8525/**
   8526 *	lstatus_to_fwcap - translate old lstatus to 32-bit Port Capabilities
   8527 *	@lstatus: old FW_PORT_ACTION_GET_PORT_INFO lstatus value
   8528 *
   8529 *	Translates old FW_PORT_ACTION_GET_PORT_INFO lstatus field into new
   8530 *	32-bit Port Capabilities value.
   8531 */
   8532static fw_port_cap32_t lstatus_to_fwcap(u32 lstatus)
   8533{
   8534	fw_port_cap32_t linkattr = 0;
   8535
   8536	/* Unfortunately the format of the Link Status in the old
   8537	 * 16-bit Port Information message isn't the same as the
   8538	 * 16-bit Port Capabilities bitfield used everywhere else ...
   8539	 */
   8540	if (lstatus & FW_PORT_CMD_RXPAUSE_F)
   8541		linkattr |= FW_PORT_CAP32_FC_RX;
   8542	if (lstatus & FW_PORT_CMD_TXPAUSE_F)
   8543		linkattr |= FW_PORT_CAP32_FC_TX;
   8544	if (lstatus & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_100M))
   8545		linkattr |= FW_PORT_CAP32_SPEED_100M;
   8546	if (lstatus & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_1G))
   8547		linkattr |= FW_PORT_CAP32_SPEED_1G;
   8548	if (lstatus & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_10G))
   8549		linkattr |= FW_PORT_CAP32_SPEED_10G;
   8550	if (lstatus & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_25G))
   8551		linkattr |= FW_PORT_CAP32_SPEED_25G;
   8552	if (lstatus & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_40G))
   8553		linkattr |= FW_PORT_CAP32_SPEED_40G;
   8554	if (lstatus & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_100G))
   8555		linkattr |= FW_PORT_CAP32_SPEED_100G;
   8556
   8557	return linkattr;
   8558}
   8559
   8560/**
   8561 *	t4_handle_get_port_info - process a FW reply message
   8562 *	@pi: the port info
   8563 *	@rpl: start of the FW message
   8564 *
   8565 *	Processes a GET_PORT_INFO FW reply message.
   8566 */
   8567void t4_handle_get_port_info(struct port_info *pi, const __be64 *rpl)
   8568{
   8569	const struct fw_port_cmd *cmd = (const void *)rpl;
   8570	fw_port_cap32_t pcaps, acaps, lpacaps, linkattr;
   8571	struct link_config *lc = &pi->link_cfg;
   8572	struct adapter *adapter = pi->adapter;
   8573	unsigned int speed, fc, fec, adv_fc;
   8574	enum fw_port_module_type mod_type;
   8575	int action, link_ok, linkdnrc;
   8576	enum fw_port_type port_type;
   8577
   8578	/* Extract the various fields from the Port Information message.
   8579	 */
   8580	action = FW_PORT_CMD_ACTION_G(be32_to_cpu(cmd->action_to_len16));
   8581	switch (action) {
   8582	case FW_PORT_ACTION_GET_PORT_INFO: {
   8583		u32 lstatus = be32_to_cpu(cmd->u.info.lstatus_to_modtype);
   8584
   8585		link_ok = (lstatus & FW_PORT_CMD_LSTATUS_F) != 0;
   8586		linkdnrc = FW_PORT_CMD_LINKDNRC_G(lstatus);
   8587		port_type = FW_PORT_CMD_PTYPE_G(lstatus);
   8588		mod_type = FW_PORT_CMD_MODTYPE_G(lstatus);
   8589		pcaps = fwcaps16_to_caps32(be16_to_cpu(cmd->u.info.pcap));
   8590		acaps = fwcaps16_to_caps32(be16_to_cpu(cmd->u.info.acap));
   8591		lpacaps = fwcaps16_to_caps32(be16_to_cpu(cmd->u.info.lpacap));
   8592		linkattr = lstatus_to_fwcap(lstatus);
   8593		break;
   8594	}
   8595
   8596	case FW_PORT_ACTION_GET_PORT_INFO32: {
   8597		u32 lstatus32;
   8598
   8599		lstatus32 = be32_to_cpu(cmd->u.info32.lstatus32_to_cbllen32);
   8600		link_ok = (lstatus32 & FW_PORT_CMD_LSTATUS32_F) != 0;
   8601		linkdnrc = FW_PORT_CMD_LINKDNRC32_G(lstatus32);
   8602		port_type = FW_PORT_CMD_PORTTYPE32_G(lstatus32);
   8603		mod_type = FW_PORT_CMD_MODTYPE32_G(lstatus32);
   8604		pcaps = be32_to_cpu(cmd->u.info32.pcaps32);
   8605		acaps = be32_to_cpu(cmd->u.info32.acaps32);
   8606		lpacaps = be32_to_cpu(cmd->u.info32.lpacaps32);
   8607		linkattr = be32_to_cpu(cmd->u.info32.linkattr32);
   8608		break;
   8609	}
   8610
   8611	default:
   8612		dev_err(adapter->pdev_dev, "Handle Port Information: Bad Command/Action %#x\n",
   8613			be32_to_cpu(cmd->action_to_len16));
   8614		return;
   8615	}
   8616
   8617	fec = fwcap_to_cc_fec(acaps);
   8618	adv_fc = fwcap_to_cc_pause(acaps);
   8619	fc = fwcap_to_cc_pause(linkattr);
   8620	speed = fwcap_to_speed(linkattr);
   8621
   8622	/* Reset state for communicating new Transceiver Module status and
   8623	 * whether the OS-dependent layer wants us to redo the current
   8624	 * "sticky" L1 Configure Link Parameters.
   8625	 */
   8626	lc->new_module = false;
   8627	lc->redo_l1cfg = false;
   8628
   8629	if (mod_type != pi->mod_type) {
   8630		/* With the newer SFP28 and QSFP28 Transceiver Module Types,
   8631		 * various fundamental Port Capabilities which used to be
   8632		 * immutable can now change radically.  We can now have
   8633		 * Speeds, Auto-Negotiation, Forward Error Correction, etc.
   8634		 * all change based on what Transceiver Module is inserted.
   8635		 * So we need to record the Physical "Port" Capabilities on
   8636		 * every Transceiver Module change.
   8637		 */
   8638		lc->pcaps = pcaps;
   8639
   8640		/* When a new Transceiver Module is inserted, the Firmware
   8641		 * will examine its i2c EPROM to determine its type and
   8642		 * general operating parameters including things like Forward
   8643		 * Error Control, etc.  Various IEEE 802.3 standards dictate
   8644		 * how to interpret these i2c values to determine default
   8645		 * "sutomatic" settings.  We record these for future use when
   8646		 * the user explicitly requests these standards-based values.
   8647		 */
   8648		lc->def_acaps = acaps;
   8649
   8650		/* Some versions of the early T6 Firmware "cheated" when
   8651		 * handling different Transceiver Modules by changing the
   8652		 * underlaying Port Type reported to the Host Drivers.  As
   8653		 * such we need to capture whatever Port Type the Firmware
   8654		 * sends us and record it in case it's different from what we
   8655		 * were told earlier.  Unfortunately, since Firmware is
   8656		 * forever, we'll need to keep this code here forever, but in
   8657		 * later T6 Firmware it should just be an assignment of the
   8658		 * same value already recorded.
   8659		 */
   8660		pi->port_type = port_type;
   8661
   8662		/* Record new Module Type information.
   8663		 */
   8664		pi->mod_type = mod_type;
   8665
   8666		/* Let the OS-dependent layer know if we have a new
   8667		 * Transceiver Module inserted.
   8668		 */
   8669		lc->new_module = t4_is_inserted_mod_type(mod_type);
   8670
   8671		t4_os_portmod_changed(adapter, pi->port_id);
   8672	}
   8673
   8674	if (link_ok != lc->link_ok || speed != lc->speed ||
   8675	    fc != lc->fc || adv_fc != lc->advertised_fc ||
   8676	    fec != lc->fec) {
   8677		/* something changed */
   8678		if (!link_ok && lc->link_ok) {
   8679			lc->link_down_rc = linkdnrc;
   8680			dev_warn_ratelimited(adapter->pdev_dev,
   8681					     "Port %d link down, reason: %s\n",
   8682					     pi->tx_chan,
   8683					     t4_link_down_rc_str(linkdnrc));
   8684		}
   8685		lc->link_ok = link_ok;
   8686		lc->speed = speed;
   8687		lc->advertised_fc = adv_fc;
   8688		lc->fc = fc;
   8689		lc->fec = fec;
   8690
   8691		lc->lpacaps = lpacaps;
   8692		lc->acaps = acaps & ADVERT_MASK;
   8693
   8694		/* If we're not physically capable of Auto-Negotiation, note
   8695		 * this as Auto-Negotiation disabled.  Otherwise, we track
   8696		 * what Auto-Negotiation settings we have.  Note parallel
   8697		 * structure in t4_link_l1cfg_core() and init_link_config().
   8698		 */
   8699		if (!(lc->acaps & FW_PORT_CAP32_ANEG)) {
   8700			lc->autoneg = AUTONEG_DISABLE;
   8701		} else if (lc->acaps & FW_PORT_CAP32_ANEG) {
   8702			lc->autoneg = AUTONEG_ENABLE;
   8703		} else {
   8704			/* When Autoneg is disabled, user needs to set
   8705			 * single speed.
   8706			 * Similar to cxgb4_ethtool.c: set_link_ksettings
   8707			 */
   8708			lc->acaps = 0;
   8709			lc->speed_caps = fwcap_to_fwspeed(acaps);
   8710			lc->autoneg = AUTONEG_DISABLE;
   8711		}
   8712
   8713		t4_os_link_changed(adapter, pi->port_id, link_ok);
   8714	}
   8715
   8716	/* If we have a new Transceiver Module and the OS-dependent code has
   8717	 * told us that it wants us to redo whatever "sticky" L1 Configuration
   8718	 * Link Parameters are set, do that now.
   8719	 */
   8720	if (lc->new_module && lc->redo_l1cfg) {
   8721		struct link_config old_lc;
   8722		int ret;
   8723
   8724		/* Save the current L1 Configuration and restore it if an
   8725		 * error occurs.  We probably should fix the l1_cfg*()
   8726		 * routines not to change the link_config when an error
   8727		 * occurs ...
   8728		 */
   8729		old_lc = *lc;
   8730		ret = t4_link_l1cfg_ns(adapter, adapter->mbox, pi->lport, lc);
   8731		if (ret) {
   8732			*lc = old_lc;
   8733			dev_warn(adapter->pdev_dev,
   8734				 "Attempt to update new Transceiver Module settings failed\n");
   8735		}
   8736	}
   8737	lc->new_module = false;
   8738	lc->redo_l1cfg = false;
   8739}
   8740
   8741/**
   8742 *	t4_update_port_info - retrieve and update port information if changed
   8743 *	@pi: the port_info
   8744 *
   8745 *	We issue a Get Port Information Command to the Firmware and, if
   8746 *	successful, we check to see if anything is different from what we
   8747 *	last recorded and update things accordingly.
   8748 */
   8749int t4_update_port_info(struct port_info *pi)
   8750{
   8751	unsigned int fw_caps = pi->adapter->params.fw_caps_support;
   8752	struct fw_port_cmd port_cmd;
   8753	int ret;
   8754
   8755	memset(&port_cmd, 0, sizeof(port_cmd));
   8756	port_cmd.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PORT_CMD) |
   8757					    FW_CMD_REQUEST_F | FW_CMD_READ_F |
   8758					    FW_PORT_CMD_PORTID_V(pi->tx_chan));
   8759	port_cmd.action_to_len16 = cpu_to_be32(
   8760		FW_PORT_CMD_ACTION_V(fw_caps == FW_CAPS16
   8761				     ? FW_PORT_ACTION_GET_PORT_INFO
   8762				     : FW_PORT_ACTION_GET_PORT_INFO32) |
   8763		FW_LEN16(port_cmd));
   8764	ret = t4_wr_mbox(pi->adapter, pi->adapter->mbox,
   8765			 &port_cmd, sizeof(port_cmd), &port_cmd);
   8766	if (ret)
   8767		return ret;
   8768
   8769	t4_handle_get_port_info(pi, (__be64 *)&port_cmd);
   8770	return 0;
   8771}
   8772
   8773/**
   8774 *	t4_get_link_params - retrieve basic link parameters for given port
   8775 *	@pi: the port
   8776 *	@link_okp: value return pointer for link up/down
   8777 *	@speedp: value return pointer for speed (Mb/s)
   8778 *	@mtup: value return pointer for mtu
   8779 *
   8780 *	Retrieves basic link parameters for a port: link up/down, speed (Mb/s),
   8781 *	and MTU for a specified port.  A negative error is returned on
   8782 *	failure; 0 on success.
   8783 */
   8784int t4_get_link_params(struct port_info *pi, unsigned int *link_okp,
   8785		       unsigned int *speedp, unsigned int *mtup)
   8786{
   8787	unsigned int fw_caps = pi->adapter->params.fw_caps_support;
   8788	unsigned int action, link_ok, mtu;
   8789	struct fw_port_cmd port_cmd;
   8790	fw_port_cap32_t linkattr;
   8791	int ret;
   8792
   8793	memset(&port_cmd, 0, sizeof(port_cmd));
   8794	port_cmd.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PORT_CMD) |
   8795					    FW_CMD_REQUEST_F | FW_CMD_READ_F |
   8796					    FW_PORT_CMD_PORTID_V(pi->tx_chan));
   8797	action = (fw_caps == FW_CAPS16
   8798		  ? FW_PORT_ACTION_GET_PORT_INFO
   8799		  : FW_PORT_ACTION_GET_PORT_INFO32);
   8800	port_cmd.action_to_len16 = cpu_to_be32(
   8801		FW_PORT_CMD_ACTION_V(action) |
   8802		FW_LEN16(port_cmd));
   8803	ret = t4_wr_mbox(pi->adapter, pi->adapter->mbox,
   8804			 &port_cmd, sizeof(port_cmd), &port_cmd);
   8805	if (ret)
   8806		return ret;
   8807
   8808	if (action == FW_PORT_ACTION_GET_PORT_INFO) {
   8809		u32 lstatus = be32_to_cpu(port_cmd.u.info.lstatus_to_modtype);
   8810
   8811		link_ok = !!(lstatus & FW_PORT_CMD_LSTATUS_F);
   8812		linkattr = lstatus_to_fwcap(lstatus);
   8813		mtu = be16_to_cpu(port_cmd.u.info.mtu);
   8814	} else {
   8815		u32 lstatus32 =
   8816			   be32_to_cpu(port_cmd.u.info32.lstatus32_to_cbllen32);
   8817
   8818		link_ok = !!(lstatus32 & FW_PORT_CMD_LSTATUS32_F);
   8819		linkattr = be32_to_cpu(port_cmd.u.info32.linkattr32);
   8820		mtu = FW_PORT_CMD_MTU32_G(
   8821			be32_to_cpu(port_cmd.u.info32.auxlinfo32_mtu32));
   8822	}
   8823
   8824	if (link_okp)
   8825		*link_okp = link_ok;
   8826	if (speedp)
   8827		*speedp = fwcap_to_speed(linkattr);
   8828	if (mtup)
   8829		*mtup = mtu;
   8830
   8831	return 0;
   8832}
   8833
   8834/**
   8835 *      t4_handle_fw_rpl - process a FW reply message
   8836 *      @adap: the adapter
   8837 *      @rpl: start of the FW message
   8838 *
   8839 *      Processes a FW message, such as link state change messages.
   8840 */
   8841int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl)
   8842{
   8843	u8 opcode = *(const u8 *)rpl;
   8844
   8845	/* This might be a port command ... this simplifies the following
   8846	 * conditionals ...  We can get away with pre-dereferencing
   8847	 * action_to_len16 because it's in the first 16 bytes and all messages
   8848	 * will be at least that long.
   8849	 */
   8850	const struct fw_port_cmd *p = (const void *)rpl;
   8851	unsigned int action =
   8852		FW_PORT_CMD_ACTION_G(be32_to_cpu(p->action_to_len16));
   8853
   8854	if (opcode == FW_PORT_CMD &&
   8855	    (action == FW_PORT_ACTION_GET_PORT_INFO ||
   8856	     action == FW_PORT_ACTION_GET_PORT_INFO32)) {
   8857		int i;
   8858		int chan = FW_PORT_CMD_PORTID_G(be32_to_cpu(p->op_to_portid));
   8859		struct port_info *pi = NULL;
   8860
   8861		for_each_port(adap, i) {
   8862			pi = adap2pinfo(adap, i);
   8863			if (pi->tx_chan == chan)
   8864				break;
   8865		}
   8866
   8867		t4_handle_get_port_info(pi, rpl);
   8868	} else {
   8869		dev_warn(adap->pdev_dev, "Unknown firmware reply %d\n",
   8870			 opcode);
   8871		return -EINVAL;
   8872	}
   8873	return 0;
   8874}
   8875
   8876static void get_pci_mode(struct adapter *adapter, struct pci_params *p)
   8877{
   8878	u16 val;
   8879
   8880	if (pci_is_pcie(adapter->pdev)) {
   8881		pcie_capability_read_word(adapter->pdev, PCI_EXP_LNKSTA, &val);
   8882		p->speed = val & PCI_EXP_LNKSTA_CLS;
   8883		p->width = (val & PCI_EXP_LNKSTA_NLW) >> 4;
   8884	}
   8885}
   8886
   8887/**
   8888 *	init_link_config - initialize a link's SW state
   8889 *	@lc: pointer to structure holding the link state
   8890 *	@pcaps: link Port Capabilities
   8891 *	@acaps: link current Advertised Port Capabilities
   8892 *
   8893 *	Initializes the SW state maintained for each link, including the link's
   8894 *	capabilities and default speed/flow-control/autonegotiation settings.
   8895 */
   8896static void init_link_config(struct link_config *lc, fw_port_cap32_t pcaps,
   8897			     fw_port_cap32_t acaps)
   8898{
   8899	lc->pcaps = pcaps;
   8900	lc->def_acaps = acaps;
   8901	lc->lpacaps = 0;
   8902	lc->speed_caps = 0;
   8903	lc->speed = 0;
   8904	lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
   8905
   8906	/* For Forward Error Control, we default to whatever the Firmware
   8907	 * tells us the Link is currently advertising.
   8908	 */
   8909	lc->requested_fec = FEC_AUTO;
   8910	lc->fec = fwcap_to_cc_fec(lc->def_acaps);
   8911
   8912	/* If the Port is capable of Auto-Negtotiation, initialize it as
   8913	 * "enabled" and copy over all of the Physical Port Capabilities
   8914	 * to the Advertised Port Capabilities.  Otherwise mark it as
   8915	 * Auto-Negotiate disabled and select the highest supported speed
   8916	 * for the link.  Note parallel structure in t4_link_l1cfg_core()
   8917	 * and t4_handle_get_port_info().
   8918	 */
   8919	if (lc->pcaps & FW_PORT_CAP32_ANEG) {
   8920		lc->acaps = lc->pcaps & ADVERT_MASK;
   8921		lc->autoneg = AUTONEG_ENABLE;
   8922		lc->requested_fc |= PAUSE_AUTONEG;
   8923	} else {
   8924		lc->acaps = 0;
   8925		lc->autoneg = AUTONEG_DISABLE;
   8926		lc->speed_caps = fwcap_to_fwspeed(acaps);
   8927	}
   8928}
   8929
   8930#define CIM_PF_NOACCESS 0xeeeeeeee
   8931
   8932int t4_wait_dev_ready(void __iomem *regs)
   8933{
   8934	u32 whoami;
   8935
   8936	whoami = readl(regs + PL_WHOAMI_A);
   8937	if (whoami != 0xffffffff && whoami != CIM_PF_NOACCESS)
   8938		return 0;
   8939
   8940	msleep(500);
   8941	whoami = readl(regs + PL_WHOAMI_A);
   8942	return (whoami != 0xffffffff && whoami != CIM_PF_NOACCESS ? 0 : -EIO);
   8943}
   8944
   8945struct flash_desc {
   8946	u32 vendor_and_model_id;
   8947	u32 size_mb;
   8948};
   8949
   8950static int t4_get_flash_params(struct adapter *adap)
   8951{
   8952	/* Table for non-Numonix supported flash parts.  Numonix parts are left
   8953	 * to the preexisting code.  All flash parts have 64KB sectors.
   8954	 */
   8955	static struct flash_desc supported_flash[] = {
   8956		{ 0x150201, 4 << 20 },       /* Spansion 4MB S25FL032P */
   8957	};
   8958
   8959	unsigned int part, manufacturer;
   8960	unsigned int density, size = 0;
   8961	u32 flashid = 0;
   8962	int ret;
   8963
   8964	/* Issue a Read ID Command to the Flash part.  We decode supported
   8965	 * Flash parts and their sizes from this.  There's a newer Query
   8966	 * Command which can retrieve detailed geometry information but many
   8967	 * Flash parts don't support it.
   8968	 */
   8969
   8970	ret = sf1_write(adap, 1, 1, 0, SF_RD_ID);
   8971	if (!ret)
   8972		ret = sf1_read(adap, 3, 0, 1, &flashid);
   8973	t4_write_reg(adap, SF_OP_A, 0);                    /* unlock SF */
   8974	if (ret)
   8975		return ret;
   8976
   8977	/* Check to see if it's one of our non-standard supported Flash parts.
   8978	 */
   8979	for (part = 0; part < ARRAY_SIZE(supported_flash); part++)
   8980		if (supported_flash[part].vendor_and_model_id == flashid) {
   8981			adap->params.sf_size = supported_flash[part].size_mb;
   8982			adap->params.sf_nsec =
   8983				adap->params.sf_size / SF_SEC_SIZE;
   8984			goto found;
   8985		}
   8986
   8987	/* Decode Flash part size.  The code below looks repetitive with
   8988	 * common encodings, but that's not guaranteed in the JEDEC
   8989	 * specification for the Read JEDEC ID command.  The only thing that
   8990	 * we're guaranteed by the JEDEC specification is where the
   8991	 * Manufacturer ID is in the returned result.  After that each
   8992	 * Manufacturer ~could~ encode things completely differently.
   8993	 * Note, all Flash parts must have 64KB sectors.
   8994	 */
   8995	manufacturer = flashid & 0xff;
   8996	switch (manufacturer) {
   8997	case 0x20: { /* Micron/Numonix */
   8998		/* This Density -> Size decoding table is taken from Micron
   8999		 * Data Sheets.
   9000		 */
   9001		density = (flashid >> 16) & 0xff;
   9002		switch (density) {
   9003		case 0x14: /* 1MB */
   9004			size = 1 << 20;
   9005			break;
   9006		case 0x15: /* 2MB */
   9007			size = 1 << 21;
   9008			break;
   9009		case 0x16: /* 4MB */
   9010			size = 1 << 22;
   9011			break;
   9012		case 0x17: /* 8MB */
   9013			size = 1 << 23;
   9014			break;
   9015		case 0x18: /* 16MB */
   9016			size = 1 << 24;
   9017			break;
   9018		case 0x19: /* 32MB */
   9019			size = 1 << 25;
   9020			break;
   9021		case 0x20: /* 64MB */
   9022			size = 1 << 26;
   9023			break;
   9024		case 0x21: /* 128MB */
   9025			size = 1 << 27;
   9026			break;
   9027		case 0x22: /* 256MB */
   9028			size = 1 << 28;
   9029			break;
   9030		}
   9031		break;
   9032	}
   9033	case 0x9d: { /* ISSI -- Integrated Silicon Solution, Inc. */
   9034		/* This Density -> Size decoding table is taken from ISSI
   9035		 * Data Sheets.
   9036		 */
   9037		density = (flashid >> 16) & 0xff;
   9038		switch (density) {
   9039		case 0x16: /* 32 MB */
   9040			size = 1 << 25;
   9041			break;
   9042		case 0x17: /* 64MB */
   9043			size = 1 << 26;
   9044			break;
   9045		}
   9046		break;
   9047	}
   9048	case 0xc2: { /* Macronix */
   9049		/* This Density -> Size decoding table is taken from Macronix
   9050		 * Data Sheets.
   9051		 */
   9052		density = (flashid >> 16) & 0xff;
   9053		switch (density) {
   9054		case 0x17: /* 8MB */
   9055			size = 1 << 23;
   9056			break;
   9057		case 0x18: /* 16MB */
   9058			size = 1 << 24;
   9059			break;
   9060		}
   9061		break;
   9062	}
   9063	case 0xef: { /* Winbond */
   9064		/* This Density -> Size decoding table is taken from Winbond
   9065		 * Data Sheets.
   9066		 */
   9067		density = (flashid >> 16) & 0xff;
   9068		switch (density) {
   9069		case 0x17: /* 8MB */
   9070			size = 1 << 23;
   9071			break;
   9072		case 0x18: /* 16MB */
   9073			size = 1 << 24;
   9074			break;
   9075		}
   9076		break;
   9077	}
   9078	}
   9079
   9080	/* If we didn't recognize the FLASH part, that's no real issue: the
   9081	 * Hardware/Software contract says that Hardware will _*ALWAYS*_
   9082	 * use a FLASH part which is at least 4MB in size and has 64KB
   9083	 * sectors.  The unrecognized FLASH part is likely to be much larger
   9084	 * than 4MB, but that's all we really need.
   9085	 */
   9086	if (size == 0) {
   9087		dev_warn(adap->pdev_dev, "Unknown Flash Part, ID = %#x, assuming 4MB\n",
   9088			 flashid);
   9089		size = 1 << 22;
   9090	}
   9091
   9092	/* Store decoded Flash size and fall through into vetting code. */
   9093	adap->params.sf_size = size;
   9094	adap->params.sf_nsec = size / SF_SEC_SIZE;
   9095
   9096found:
   9097	if (adap->params.sf_size < FLASH_MIN_SIZE)
   9098		dev_warn(adap->pdev_dev, "WARNING: Flash Part ID %#x, size %#x < %#x\n",
   9099			 flashid, adap->params.sf_size, FLASH_MIN_SIZE);
   9100	return 0;
   9101}
   9102
   9103/**
   9104 *	t4_prep_adapter - prepare SW and HW for operation
   9105 *	@adapter: the adapter
   9106 *
   9107 *	Initialize adapter SW state for the various HW modules, set initial
   9108 *	values for some adapter tunables, take PHYs out of reset, and
   9109 *	initialize the MDIO interface.
   9110 */
   9111int t4_prep_adapter(struct adapter *adapter)
   9112{
   9113	int ret, ver;
   9114	uint16_t device_id;
   9115	u32 pl_rev;
   9116
   9117	get_pci_mode(adapter, &adapter->params.pci);
   9118	pl_rev = REV_G(t4_read_reg(adapter, PL_REV_A));
   9119
   9120	ret = t4_get_flash_params(adapter);
   9121	if (ret < 0) {
   9122		dev_err(adapter->pdev_dev, "error %d identifying flash\n", ret);
   9123		return ret;
   9124	}
   9125
   9126	/* Retrieve adapter's device ID
   9127	 */
   9128	pci_read_config_word(adapter->pdev, PCI_DEVICE_ID, &device_id);
   9129	ver = device_id >> 12;
   9130	adapter->params.chip = 0;
   9131	switch (ver) {
   9132	case CHELSIO_T4:
   9133		adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T4, pl_rev);
   9134		adapter->params.arch.sge_fl_db = DBPRIO_F;
   9135		adapter->params.arch.mps_tcam_size =
   9136				 NUM_MPS_CLS_SRAM_L_INSTANCES;
   9137		adapter->params.arch.mps_rplc_size = 128;
   9138		adapter->params.arch.nchan = NCHAN;
   9139		adapter->params.arch.pm_stats_cnt = PM_NSTATS;
   9140		adapter->params.arch.vfcount = 128;
   9141		/* Congestion map is for 4 channels so that
   9142		 * MPS can have 4 priority per port.
   9143		 */
   9144		adapter->params.arch.cng_ch_bits_log = 2;
   9145		break;
   9146	case CHELSIO_T5:
   9147		adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T5, pl_rev);
   9148		adapter->params.arch.sge_fl_db = DBPRIO_F | DBTYPE_F;
   9149		adapter->params.arch.mps_tcam_size =
   9150				 NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
   9151		adapter->params.arch.mps_rplc_size = 128;
   9152		adapter->params.arch.nchan = NCHAN;
   9153		adapter->params.arch.pm_stats_cnt = PM_NSTATS;
   9154		adapter->params.arch.vfcount = 128;
   9155		adapter->params.arch.cng_ch_bits_log = 2;
   9156		break;
   9157	case CHELSIO_T6:
   9158		adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T6, pl_rev);
   9159		adapter->params.arch.sge_fl_db = 0;
   9160		adapter->params.arch.mps_tcam_size =
   9161				 NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
   9162		adapter->params.arch.mps_rplc_size = 256;
   9163		adapter->params.arch.nchan = 2;
   9164		adapter->params.arch.pm_stats_cnt = T6_PM_NSTATS;
   9165		adapter->params.arch.vfcount = 256;
   9166		/* Congestion map will be for 2 channels so that
   9167		 * MPS can have 8 priority per port.
   9168		 */
   9169		adapter->params.arch.cng_ch_bits_log = 3;
   9170		break;
   9171	default:
   9172		dev_err(adapter->pdev_dev, "Device %d is not supported\n",
   9173			device_id);
   9174		return -EINVAL;
   9175	}
   9176
   9177	adapter->params.cim_la_size = CIMLA_SIZE;
   9178	init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
   9179
   9180	/*
   9181	 * Default port for debugging in case we can't reach FW.
   9182	 */
   9183	adapter->params.nports = 1;
   9184	adapter->params.portvec = 1;
   9185	adapter->params.vpd.cclk = 50000;
   9186
   9187	/* Set PCIe completion timeout to 4 seconds. */
   9188	pcie_capability_clear_and_set_word(adapter->pdev, PCI_EXP_DEVCTL2,
   9189					   PCI_EXP_DEVCTL2_COMP_TIMEOUT, 0xd);
   9190	return 0;
   9191}
   9192
   9193/**
   9194 *	t4_shutdown_adapter - shut down adapter, host & wire
   9195 *	@adapter: the adapter
   9196 *
   9197 *	Perform an emergency shutdown of the adapter and stop it from
   9198 *	continuing any further communication on the ports or DMA to the
   9199 *	host.  This is typically used when the adapter and/or firmware
   9200 *	have crashed and we want to prevent any further accidental
   9201 *	communication with the rest of the world.  This will also force
   9202 *	the port Link Status to go down -- if register writes work --
   9203 *	which should help our peers figure out that we're down.
   9204 */
   9205int t4_shutdown_adapter(struct adapter *adapter)
   9206{
   9207	int port;
   9208
   9209	t4_intr_disable(adapter);
   9210	t4_write_reg(adapter, DBG_GPIO_EN_A, 0);
   9211	for_each_port(adapter, port) {
   9212		u32 a_port_cfg = is_t4(adapter->params.chip) ?
   9213				       PORT_REG(port, XGMAC_PORT_CFG_A) :
   9214				       T5_PORT_REG(port, MAC_PORT_CFG_A);
   9215
   9216		t4_write_reg(adapter, a_port_cfg,
   9217			     t4_read_reg(adapter, a_port_cfg)
   9218			     & ~SIGNAL_DET_V(1));
   9219	}
   9220	t4_set_reg_field(adapter, SGE_CONTROL_A, GLOBALENABLE_F, 0);
   9221
   9222	return 0;
   9223}
   9224
   9225/**
   9226 *	t4_bar2_sge_qregs - return BAR2 SGE Queue register information
   9227 *	@adapter: the adapter
   9228 *	@qid: the Queue ID
   9229 *	@qtype: the Ingress or Egress type for @qid
   9230 *	@user: true if this request is for a user mode queue
   9231 *	@pbar2_qoffset: BAR2 Queue Offset
   9232 *	@pbar2_qid: BAR2 Queue ID or 0 for Queue ID inferred SGE Queues
   9233 *
   9234 *	Returns the BAR2 SGE Queue Registers information associated with the
   9235 *	indicated Absolute Queue ID.  These are passed back in return value
   9236 *	pointers.  @qtype should be T4_BAR2_QTYPE_EGRESS for Egress Queue
   9237 *	and T4_BAR2_QTYPE_INGRESS for Ingress Queues.
   9238 *
   9239 *	This may return an error which indicates that BAR2 SGE Queue
   9240 *	registers aren't available.  If an error is not returned, then the
   9241 *	following values are returned:
   9242 *
   9243 *	  *@pbar2_qoffset: the BAR2 Offset of the @qid Registers
   9244 *	  *@pbar2_qid: the BAR2 SGE Queue ID or 0 of @qid
   9245 *
   9246 *	If the returned BAR2 Queue ID is 0, then BAR2 SGE registers which
   9247 *	require the "Inferred Queue ID" ability may be used.  E.g. the
   9248 *	Write Combining Doorbell Buffer. If the BAR2 Queue ID is not 0,
   9249 *	then these "Inferred Queue ID" register may not be used.
   9250 */
   9251int t4_bar2_sge_qregs(struct adapter *adapter,
   9252		      unsigned int qid,
   9253		      enum t4_bar2_qtype qtype,
   9254		      int user,
   9255		      u64 *pbar2_qoffset,
   9256		      unsigned int *pbar2_qid)
   9257{
   9258	unsigned int page_shift, page_size, qpp_shift, qpp_mask;
   9259	u64 bar2_page_offset, bar2_qoffset;
   9260	unsigned int bar2_qid, bar2_qid_offset, bar2_qinferred;
   9261
   9262	/* T4 doesn't support BAR2 SGE Queue registers for kernel mode queues */
   9263	if (!user && is_t4(adapter->params.chip))
   9264		return -EINVAL;
   9265
   9266	/* Get our SGE Page Size parameters.
   9267	 */
   9268	page_shift = adapter->params.sge.hps + 10;
   9269	page_size = 1 << page_shift;
   9270
   9271	/* Get the right Queues per Page parameters for our Queue.
   9272	 */
   9273	qpp_shift = (qtype == T4_BAR2_QTYPE_EGRESS
   9274		     ? adapter->params.sge.eq_qpp
   9275		     : adapter->params.sge.iq_qpp);
   9276	qpp_mask = (1 << qpp_shift) - 1;
   9277
   9278	/*  Calculate the basics of the BAR2 SGE Queue register area:
   9279	 *  o The BAR2 page the Queue registers will be in.
   9280	 *  o The BAR2 Queue ID.
   9281	 *  o The BAR2 Queue ID Offset into the BAR2 page.
   9282	 */
   9283	bar2_page_offset = ((u64)(qid >> qpp_shift) << page_shift);
   9284	bar2_qid = qid & qpp_mask;
   9285	bar2_qid_offset = bar2_qid * SGE_UDB_SIZE;
   9286
   9287	/* If the BAR2 Queue ID Offset is less than the Page Size, then the
   9288	 * hardware will infer the Absolute Queue ID simply from the writes to
   9289	 * the BAR2 Queue ID Offset within the BAR2 Page (and we need to use a
   9290	 * BAR2 Queue ID of 0 for those writes).  Otherwise, we'll simply
   9291	 * write to the first BAR2 SGE Queue Area within the BAR2 Page with
   9292	 * the BAR2 Queue ID and the hardware will infer the Absolute Queue ID
   9293	 * from the BAR2 Page and BAR2 Queue ID.
   9294	 *
   9295	 * One important censequence of this is that some BAR2 SGE registers
   9296	 * have a "Queue ID" field and we can write the BAR2 SGE Queue ID
   9297	 * there.  But other registers synthesize the SGE Queue ID purely
   9298	 * from the writes to the registers -- the Write Combined Doorbell
   9299	 * Buffer is a good example.  These BAR2 SGE Registers are only
   9300	 * available for those BAR2 SGE Register areas where the SGE Absolute
   9301	 * Queue ID can be inferred from simple writes.
   9302	 */
   9303	bar2_qoffset = bar2_page_offset;
   9304	bar2_qinferred = (bar2_qid_offset < page_size);
   9305	if (bar2_qinferred) {
   9306		bar2_qoffset += bar2_qid_offset;
   9307		bar2_qid = 0;
   9308	}
   9309
   9310	*pbar2_qoffset = bar2_qoffset;
   9311	*pbar2_qid = bar2_qid;
   9312	return 0;
   9313}
   9314
   9315/**
   9316 *	t4_init_devlog_params - initialize adapter->params.devlog
   9317 *	@adap: the adapter
   9318 *
   9319 *	Initialize various fields of the adapter's Firmware Device Log
   9320 *	Parameters structure.
   9321 */
   9322int t4_init_devlog_params(struct adapter *adap)
   9323{
   9324	struct devlog_params *dparams = &adap->params.devlog;
   9325	u32 pf_dparams;
   9326	unsigned int devlog_meminfo;
   9327	struct fw_devlog_cmd devlog_cmd;
   9328	int ret;
   9329
   9330	/* If we're dealing with newer firmware, the Device Log Parameters
   9331	 * are stored in a designated register which allows us to access the
   9332	 * Device Log even if we can't talk to the firmware.
   9333	 */
   9334	pf_dparams =
   9335		t4_read_reg(adap, PCIE_FW_REG(PCIE_FW_PF_A, PCIE_FW_PF_DEVLOG));
   9336	if (pf_dparams) {
   9337		unsigned int nentries, nentries128;
   9338
   9339		dparams->memtype = PCIE_FW_PF_DEVLOG_MEMTYPE_G(pf_dparams);
   9340		dparams->start = PCIE_FW_PF_DEVLOG_ADDR16_G(pf_dparams) << 4;
   9341
   9342		nentries128 = PCIE_FW_PF_DEVLOG_NENTRIES128_G(pf_dparams);
   9343		nentries = (nentries128 + 1) * 128;
   9344		dparams->size = nentries * sizeof(struct fw_devlog_e);
   9345
   9346		return 0;
   9347	}
   9348
   9349	/* Otherwise, ask the firmware for it's Device Log Parameters.
   9350	 */
   9351	memset(&devlog_cmd, 0, sizeof(devlog_cmd));
   9352	devlog_cmd.op_to_write = cpu_to_be32(FW_CMD_OP_V(FW_DEVLOG_CMD) |
   9353					     FW_CMD_REQUEST_F | FW_CMD_READ_F);
   9354	devlog_cmd.retval_len16 = cpu_to_be32(FW_LEN16(devlog_cmd));
   9355	ret = t4_wr_mbox(adap, adap->mbox, &devlog_cmd, sizeof(devlog_cmd),
   9356			 &devlog_cmd);
   9357	if (ret)
   9358		return ret;
   9359
   9360	devlog_meminfo =
   9361		be32_to_cpu(devlog_cmd.memtype_devlog_memaddr16_devlog);
   9362	dparams->memtype = FW_DEVLOG_CMD_MEMTYPE_DEVLOG_G(devlog_meminfo);
   9363	dparams->start = FW_DEVLOG_CMD_MEMADDR16_DEVLOG_G(devlog_meminfo) << 4;
   9364	dparams->size = be32_to_cpu(devlog_cmd.memsize_devlog);
   9365
   9366	return 0;
   9367}
   9368
   9369/**
   9370 *	t4_init_sge_params - initialize adap->params.sge
   9371 *	@adapter: the adapter
   9372 *
   9373 *	Initialize various fields of the adapter's SGE Parameters structure.
   9374 */
   9375int t4_init_sge_params(struct adapter *adapter)
   9376{
   9377	struct sge_params *sge_params = &adapter->params.sge;
   9378	u32 hps, qpp;
   9379	unsigned int s_hps, s_qpp;
   9380
   9381	/* Extract the SGE Page Size for our PF.
   9382	 */
   9383	hps = t4_read_reg(adapter, SGE_HOST_PAGE_SIZE_A);
   9384	s_hps = (HOSTPAGESIZEPF0_S +
   9385		 (HOSTPAGESIZEPF1_S - HOSTPAGESIZEPF0_S) * adapter->pf);
   9386	sge_params->hps = ((hps >> s_hps) & HOSTPAGESIZEPF0_M);
   9387
   9388	/* Extract the SGE Egress and Ingess Queues Per Page for our PF.
   9389	 */
   9390	s_qpp = (QUEUESPERPAGEPF0_S +
   9391		(QUEUESPERPAGEPF1_S - QUEUESPERPAGEPF0_S) * adapter->pf);
   9392	qpp = t4_read_reg(adapter, SGE_EGRESS_QUEUES_PER_PAGE_PF_A);
   9393	sge_params->eq_qpp = ((qpp >> s_qpp) & QUEUESPERPAGEPF0_M);
   9394	qpp = t4_read_reg(adapter, SGE_INGRESS_QUEUES_PER_PAGE_PF_A);
   9395	sge_params->iq_qpp = ((qpp >> s_qpp) & QUEUESPERPAGEPF0_M);
   9396
   9397	return 0;
   9398}
   9399
   9400/**
   9401 *      t4_init_tp_params - initialize adap->params.tp
   9402 *      @adap: the adapter
   9403 *      @sleep_ok: if true we may sleep while awaiting command completion
   9404 *
   9405 *      Initialize various fields of the adapter's TP Parameters structure.
   9406 */
   9407int t4_init_tp_params(struct adapter *adap, bool sleep_ok)
   9408{
   9409	u32 param, val, v;
   9410	int chan, ret;
   9411
   9412
   9413	v = t4_read_reg(adap, TP_TIMER_RESOLUTION_A);
   9414	adap->params.tp.tre = TIMERRESOLUTION_G(v);
   9415	adap->params.tp.dack_re = DELAYEDACKRESOLUTION_G(v);
   9416
   9417	/* MODQ_REQ_MAP defaults to setting queues 0-3 to chan 0-3 */
   9418	for (chan = 0; chan < NCHAN; chan++)
   9419		adap->params.tp.tx_modq[chan] = chan;
   9420
   9421	/* Cache the adapter's Compressed Filter Mode/Mask and global Ingress
   9422	 * Configuration.
   9423	 */
   9424	param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
   9425		 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_FILTER) |
   9426		 FW_PARAMS_PARAM_Y_V(FW_PARAM_DEV_FILTER_MODE_MASK));
   9427
   9428	/* Read current value */
   9429	ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1,
   9430			      &param, &val);
   9431	if (ret == 0) {
   9432		dev_info(adap->pdev_dev,
   9433			 "Current filter mode/mask 0x%x:0x%x\n",
   9434			 FW_PARAMS_PARAM_FILTER_MODE_G(val),
   9435			 FW_PARAMS_PARAM_FILTER_MASK_G(val));
   9436		adap->params.tp.vlan_pri_map =
   9437			FW_PARAMS_PARAM_FILTER_MODE_G(val);
   9438		adap->params.tp.filter_mask =
   9439			FW_PARAMS_PARAM_FILTER_MASK_G(val);
   9440	} else {
   9441		dev_info(adap->pdev_dev,
   9442			 "Failed to read filter mode/mask via fw api, using indirect-reg-read\n");
   9443
   9444		/* Incase of older-fw (which doesn't expose the api
   9445		 * FW_PARAM_DEV_FILTER_MODE_MASK) and newer-driver (which uses
   9446		 * the fw api) combination, fall-back to older method of reading
   9447		 * the filter mode from indirect-register
   9448		 */
   9449		t4_tp_pio_read(adap, &adap->params.tp.vlan_pri_map, 1,
   9450			       TP_VLAN_PRI_MAP_A, sleep_ok);
   9451
   9452		/* With the older-fw and newer-driver combination we might run
   9453		 * into an issue when user wants to use hash filter region but
   9454		 * the filter_mask is zero, in this case filter_mask validation
   9455		 * is tough. To avoid that we set the filter_mask same as filter
   9456		 * mode, which will behave exactly as the older way of ignoring
   9457		 * the filter mask validation.
   9458		 */
   9459		adap->params.tp.filter_mask = adap->params.tp.vlan_pri_map;
   9460	}
   9461
   9462	t4_tp_pio_read(adap, &adap->params.tp.ingress_config, 1,
   9463		       TP_INGRESS_CONFIG_A, sleep_ok);
   9464
   9465	/* For T6, cache the adapter's compressed error vector
   9466	 * and passing outer header info for encapsulated packets.
   9467	 */
   9468	if (CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5) {
   9469		v = t4_read_reg(adap, TP_OUT_CONFIG_A);
   9470		adap->params.tp.rx_pkt_encap = (v & CRXPKTENC_F) ? 1 : 0;
   9471	}
   9472
   9473	/* Now that we have TP_VLAN_PRI_MAP cached, we can calculate the field
   9474	 * shift positions of several elements of the Compressed Filter Tuple
   9475	 * for this adapter which we need frequently ...
   9476	 */
   9477	adap->params.tp.fcoe_shift = t4_filter_field_shift(adap, FCOE_F);
   9478	adap->params.tp.port_shift = t4_filter_field_shift(adap, PORT_F);
   9479	adap->params.tp.vnic_shift = t4_filter_field_shift(adap, VNIC_ID_F);
   9480	adap->params.tp.vlan_shift = t4_filter_field_shift(adap, VLAN_F);
   9481	adap->params.tp.tos_shift = t4_filter_field_shift(adap, TOS_F);
   9482	adap->params.tp.protocol_shift = t4_filter_field_shift(adap,
   9483							       PROTOCOL_F);
   9484	adap->params.tp.ethertype_shift = t4_filter_field_shift(adap,
   9485								ETHERTYPE_F);
   9486	adap->params.tp.macmatch_shift = t4_filter_field_shift(adap,
   9487							       MACMATCH_F);
   9488	adap->params.tp.matchtype_shift = t4_filter_field_shift(adap,
   9489								MPSHITTYPE_F);
   9490	adap->params.tp.frag_shift = t4_filter_field_shift(adap,
   9491							   FRAGMENTATION_F);
   9492
   9493	/* If TP_INGRESS_CONFIG.VNID == 0, then TP_VLAN_PRI_MAP.VNIC_ID
   9494	 * represents the presence of an Outer VLAN instead of a VNIC ID.
   9495	 */
   9496	if ((adap->params.tp.ingress_config & VNIC_F) == 0)
   9497		adap->params.tp.vnic_shift = -1;
   9498
   9499	v = t4_read_reg(adap, LE_3_DB_HASH_MASK_GEN_IPV4_T6_A);
   9500	adap->params.tp.hash_filter_mask = v;
   9501	v = t4_read_reg(adap, LE_4_DB_HASH_MASK_GEN_IPV4_T6_A);
   9502	adap->params.tp.hash_filter_mask |= ((u64)v << 32);
   9503	return 0;
   9504}
   9505
   9506/**
   9507 *      t4_filter_field_shift - calculate filter field shift
   9508 *      @adap: the adapter
   9509 *      @filter_sel: the desired field (from TP_VLAN_PRI_MAP bits)
   9510 *
   9511 *      Return the shift position of a filter field within the Compressed
   9512 *      Filter Tuple.  The filter field is specified via its selection bit
   9513 *      within TP_VLAN_PRI_MAL (filter mode).  E.g. F_VLAN.
   9514 */
   9515int t4_filter_field_shift(const struct adapter *adap, int filter_sel)
   9516{
   9517	unsigned int filter_mode = adap->params.tp.vlan_pri_map;
   9518	unsigned int sel;
   9519	int field_shift;
   9520
   9521	if ((filter_mode & filter_sel) == 0)
   9522		return -1;
   9523
   9524	for (sel = 1, field_shift = 0; sel < filter_sel; sel <<= 1) {
   9525		switch (filter_mode & sel) {
   9526		case FCOE_F:
   9527			field_shift += FT_FCOE_W;
   9528			break;
   9529		case PORT_F:
   9530			field_shift += FT_PORT_W;
   9531			break;
   9532		case VNIC_ID_F:
   9533			field_shift += FT_VNIC_ID_W;
   9534			break;
   9535		case VLAN_F:
   9536			field_shift += FT_VLAN_W;
   9537			break;
   9538		case TOS_F:
   9539			field_shift += FT_TOS_W;
   9540			break;
   9541		case PROTOCOL_F:
   9542			field_shift += FT_PROTOCOL_W;
   9543			break;
   9544		case ETHERTYPE_F:
   9545			field_shift += FT_ETHERTYPE_W;
   9546			break;
   9547		case MACMATCH_F:
   9548			field_shift += FT_MACMATCH_W;
   9549			break;
   9550		case MPSHITTYPE_F:
   9551			field_shift += FT_MPSHITTYPE_W;
   9552			break;
   9553		case FRAGMENTATION_F:
   9554			field_shift += FT_FRAGMENTATION_W;
   9555			break;
   9556		}
   9557	}
   9558	return field_shift;
   9559}
   9560
   9561int t4_init_rss_mode(struct adapter *adap, int mbox)
   9562{
   9563	int i, ret;
   9564	struct fw_rss_vi_config_cmd rvc;
   9565
   9566	memset(&rvc, 0, sizeof(rvc));
   9567
   9568	for_each_port(adap, i) {
   9569		struct port_info *p = adap2pinfo(adap, i);
   9570
   9571		rvc.op_to_viid =
   9572			cpu_to_be32(FW_CMD_OP_V(FW_RSS_VI_CONFIG_CMD) |
   9573				    FW_CMD_REQUEST_F | FW_CMD_READ_F |
   9574				    FW_RSS_VI_CONFIG_CMD_VIID_V(p->viid));
   9575		rvc.retval_len16 = cpu_to_be32(FW_LEN16(rvc));
   9576		ret = t4_wr_mbox(adap, mbox, &rvc, sizeof(rvc), &rvc);
   9577		if (ret)
   9578			return ret;
   9579		p->rss_mode = be32_to_cpu(rvc.u.basicvirtual.defaultq_to_udpen);
   9580	}
   9581	return 0;
   9582}
   9583
   9584/**
   9585 *	t4_init_portinfo - allocate a virtual interface and initialize port_info
   9586 *	@pi: the port_info
   9587 *	@mbox: mailbox to use for the FW command
   9588 *	@port: physical port associated with the VI
   9589 *	@pf: the PF owning the VI
   9590 *	@vf: the VF owning the VI
   9591 *	@mac: the MAC address of the VI
   9592 *
   9593 *	Allocates a virtual interface for the given physical port.  If @mac is
   9594 *	not %NULL it contains the MAC address of the VI as assigned by FW.
   9595 *	@mac should be large enough to hold an Ethernet address.
   9596 *	Returns < 0 on error.
   9597 */
   9598int t4_init_portinfo(struct port_info *pi, int mbox,
   9599		     int port, int pf, int vf, u8 mac[])
   9600{
   9601	struct adapter *adapter = pi->adapter;
   9602	unsigned int fw_caps = adapter->params.fw_caps_support;
   9603	struct fw_port_cmd cmd;
   9604	unsigned int rss_size;
   9605	enum fw_port_type port_type;
   9606	int mdio_addr;
   9607	fw_port_cap32_t pcaps, acaps;
   9608	u8 vivld = 0, vin = 0;
   9609	int ret;
   9610
   9611	/* If we haven't yet determined whether we're talking to Firmware
   9612	 * which knows the new 32-bit Port Capabilities, it's time to find
   9613	 * out now.  This will also tell new Firmware to send us Port Status
   9614	 * Updates using the new 32-bit Port Capabilities version of the
   9615	 * Port Information message.
   9616	 */
   9617	if (fw_caps == FW_CAPS_UNKNOWN) {
   9618		u32 param, val;
   9619
   9620		param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_PFVF) |
   9621			 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_PFVF_PORT_CAPS32));
   9622		val = 1;
   9623		ret = t4_set_params(adapter, mbox, pf, vf, 1, &param, &val);
   9624		fw_caps = (ret == 0 ? FW_CAPS32 : FW_CAPS16);
   9625		adapter->params.fw_caps_support = fw_caps;
   9626	}
   9627
   9628	memset(&cmd, 0, sizeof(cmd));
   9629	cmd.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PORT_CMD) |
   9630				       FW_CMD_REQUEST_F | FW_CMD_READ_F |
   9631				       FW_PORT_CMD_PORTID_V(port));
   9632	cmd.action_to_len16 = cpu_to_be32(
   9633		FW_PORT_CMD_ACTION_V(fw_caps == FW_CAPS16
   9634				     ? FW_PORT_ACTION_GET_PORT_INFO
   9635				     : FW_PORT_ACTION_GET_PORT_INFO32) |
   9636		FW_LEN16(cmd));
   9637	ret = t4_wr_mbox(pi->adapter, mbox, &cmd, sizeof(cmd), &cmd);
   9638	if (ret)
   9639		return ret;
   9640
   9641	/* Extract the various fields from the Port Information message.
   9642	 */
   9643	if (fw_caps == FW_CAPS16) {
   9644		u32 lstatus = be32_to_cpu(cmd.u.info.lstatus_to_modtype);
   9645
   9646		port_type = FW_PORT_CMD_PTYPE_G(lstatus);
   9647		mdio_addr = ((lstatus & FW_PORT_CMD_MDIOCAP_F)
   9648			     ? FW_PORT_CMD_MDIOADDR_G(lstatus)
   9649			     : -1);
   9650		pcaps = fwcaps16_to_caps32(be16_to_cpu(cmd.u.info.pcap));
   9651		acaps = fwcaps16_to_caps32(be16_to_cpu(cmd.u.info.acap));
   9652	} else {
   9653		u32 lstatus32 = be32_to_cpu(cmd.u.info32.lstatus32_to_cbllen32);
   9654
   9655		port_type = FW_PORT_CMD_PORTTYPE32_G(lstatus32);
   9656		mdio_addr = ((lstatus32 & FW_PORT_CMD_MDIOCAP32_F)
   9657			     ? FW_PORT_CMD_MDIOADDR32_G(lstatus32)
   9658			     : -1);
   9659		pcaps = be32_to_cpu(cmd.u.info32.pcaps32);
   9660		acaps = be32_to_cpu(cmd.u.info32.acaps32);
   9661	}
   9662
   9663	ret = t4_alloc_vi(pi->adapter, mbox, port, pf, vf, 1, mac, &rss_size,
   9664			  &vivld, &vin);
   9665	if (ret < 0)
   9666		return ret;
   9667
   9668	pi->viid = ret;
   9669	pi->tx_chan = port;
   9670	pi->lport = port;
   9671	pi->rss_size = rss_size;
   9672	pi->rx_cchan = t4_get_tp_e2c_map(pi->adapter, port);
   9673
   9674	/* If fw supports returning the VIN as part of FW_VI_CMD,
   9675	 * save the returned values.
   9676	 */
   9677	if (adapter->params.viid_smt_extn_support) {
   9678		pi->vivld = vivld;
   9679		pi->vin = vin;
   9680	} else {
   9681		/* Retrieve the values from VIID */
   9682		pi->vivld = FW_VIID_VIVLD_G(pi->viid);
   9683		pi->vin =  FW_VIID_VIN_G(pi->viid);
   9684	}
   9685
   9686	pi->port_type = port_type;
   9687	pi->mdio_addr = mdio_addr;
   9688	pi->mod_type = FW_PORT_MOD_TYPE_NA;
   9689
   9690	init_link_config(&pi->link_cfg, pcaps, acaps);
   9691	return 0;
   9692}
   9693
   9694int t4_port_init(struct adapter *adap, int mbox, int pf, int vf)
   9695{
   9696	u8 addr[6];
   9697	int ret, i, j = 0;
   9698
   9699	for_each_port(adap, i) {
   9700		struct port_info *pi = adap2pinfo(adap, i);
   9701
   9702		while ((adap->params.portvec & (1 << j)) == 0)
   9703			j++;
   9704
   9705		ret = t4_init_portinfo(pi, mbox, j, pf, vf, addr);
   9706		if (ret)
   9707			return ret;
   9708
   9709		eth_hw_addr_set(adap->port[i], addr);
   9710		j++;
   9711	}
   9712	return 0;
   9713}
   9714
   9715int t4_init_port_mirror(struct port_info *pi, u8 mbox, u8 port, u8 pf, u8 vf,
   9716			u16 *mirror_viid)
   9717{
   9718	int ret;
   9719
   9720	ret = t4_alloc_vi(pi->adapter, mbox, port, pf, vf, 1, NULL, NULL,
   9721			  NULL, NULL);
   9722	if (ret < 0)
   9723		return ret;
   9724
   9725	if (mirror_viid)
   9726		*mirror_viid = ret;
   9727
   9728	return 0;
   9729}
   9730
   9731/**
   9732 *	t4_read_cimq_cfg - read CIM queue configuration
   9733 *	@adap: the adapter
   9734 *	@base: holds the queue base addresses in bytes
   9735 *	@size: holds the queue sizes in bytes
   9736 *	@thres: holds the queue full thresholds in bytes
   9737 *
   9738 *	Returns the current configuration of the CIM queues, starting with
   9739 *	the IBQs, then the OBQs.
   9740 */
   9741void t4_read_cimq_cfg(struct adapter *adap, u16 *base, u16 *size, u16 *thres)
   9742{
   9743	unsigned int i, v;
   9744	int cim_num_obq = is_t4(adap->params.chip) ?
   9745				CIM_NUM_OBQ : CIM_NUM_OBQ_T5;
   9746
   9747	for (i = 0; i < CIM_NUM_IBQ; i++) {
   9748		t4_write_reg(adap, CIM_QUEUE_CONFIG_REF_A, IBQSELECT_F |
   9749			     QUENUMSELECT_V(i));
   9750		v = t4_read_reg(adap, CIM_QUEUE_CONFIG_CTRL_A);
   9751		/* value is in 256-byte units */
   9752		*base++ = CIMQBASE_G(v) * 256;
   9753		*size++ = CIMQSIZE_G(v) * 256;
   9754		*thres++ = QUEFULLTHRSH_G(v) * 8; /* 8-byte unit */
   9755	}
   9756	for (i = 0; i < cim_num_obq; i++) {
   9757		t4_write_reg(adap, CIM_QUEUE_CONFIG_REF_A, OBQSELECT_F |
   9758			     QUENUMSELECT_V(i));
   9759		v = t4_read_reg(adap, CIM_QUEUE_CONFIG_CTRL_A);
   9760		/* value is in 256-byte units */
   9761		*base++ = CIMQBASE_G(v) * 256;
   9762		*size++ = CIMQSIZE_G(v) * 256;
   9763	}
   9764}
   9765
   9766/**
   9767 *	t4_read_cim_ibq - read the contents of a CIM inbound queue
   9768 *	@adap: the adapter
   9769 *	@qid: the queue index
   9770 *	@data: where to store the queue contents
   9771 *	@n: capacity of @data in 32-bit words
   9772 *
   9773 *	Reads the contents of the selected CIM queue starting at address 0 up
   9774 *	to the capacity of @data.  @n must be a multiple of 4.  Returns < 0 on
   9775 *	error and the number of 32-bit words actually read on success.
   9776 */
   9777int t4_read_cim_ibq(struct adapter *adap, unsigned int qid, u32 *data, size_t n)
   9778{
   9779	int i, err, attempts;
   9780	unsigned int addr;
   9781	const unsigned int nwords = CIM_IBQ_SIZE * 4;
   9782
   9783	if (qid > 5 || (n & 3))
   9784		return -EINVAL;
   9785
   9786	addr = qid * nwords;
   9787	if (n > nwords)
   9788		n = nwords;
   9789
   9790	/* It might take 3-10ms before the IBQ debug read access is allowed.
   9791	 * Wait for 1 Sec with a delay of 1 usec.
   9792	 */
   9793	attempts = 1000000;
   9794
   9795	for (i = 0; i < n; i++, addr++) {
   9796		t4_write_reg(adap, CIM_IBQ_DBG_CFG_A, IBQDBGADDR_V(addr) |
   9797			     IBQDBGEN_F);
   9798		err = t4_wait_op_done(adap, CIM_IBQ_DBG_CFG_A, IBQDBGBUSY_F, 0,
   9799				      attempts, 1);
   9800		if (err)
   9801			return err;
   9802		*data++ = t4_read_reg(adap, CIM_IBQ_DBG_DATA_A);
   9803	}
   9804	t4_write_reg(adap, CIM_IBQ_DBG_CFG_A, 0);
   9805	return i;
   9806}
   9807
   9808/**
   9809 *	t4_read_cim_obq - read the contents of a CIM outbound queue
   9810 *	@adap: the adapter
   9811 *	@qid: the queue index
   9812 *	@data: where to store the queue contents
   9813 *	@n: capacity of @data in 32-bit words
   9814 *
   9815 *	Reads the contents of the selected CIM queue starting at address 0 up
   9816 *	to the capacity of @data.  @n must be a multiple of 4.  Returns < 0 on
   9817 *	error and the number of 32-bit words actually read on success.
   9818 */
   9819int t4_read_cim_obq(struct adapter *adap, unsigned int qid, u32 *data, size_t n)
   9820{
   9821	int i, err;
   9822	unsigned int addr, v, nwords;
   9823	int cim_num_obq = is_t4(adap->params.chip) ?
   9824				CIM_NUM_OBQ : CIM_NUM_OBQ_T5;
   9825
   9826	if ((qid > (cim_num_obq - 1)) || (n & 3))
   9827		return -EINVAL;
   9828
   9829	t4_write_reg(adap, CIM_QUEUE_CONFIG_REF_A, OBQSELECT_F |
   9830		     QUENUMSELECT_V(qid));
   9831	v = t4_read_reg(adap, CIM_QUEUE_CONFIG_CTRL_A);
   9832
   9833	addr = CIMQBASE_G(v) * 64;    /* muliple of 256 -> muliple of 4 */
   9834	nwords = CIMQSIZE_G(v) * 64;  /* same */
   9835	if (n > nwords)
   9836		n = nwords;
   9837
   9838	for (i = 0; i < n; i++, addr++) {
   9839		t4_write_reg(adap, CIM_OBQ_DBG_CFG_A, OBQDBGADDR_V(addr) |
   9840			     OBQDBGEN_F);
   9841		err = t4_wait_op_done(adap, CIM_OBQ_DBG_CFG_A, OBQDBGBUSY_F, 0,
   9842				      2, 1);
   9843		if (err)
   9844			return err;
   9845		*data++ = t4_read_reg(adap, CIM_OBQ_DBG_DATA_A);
   9846	}
   9847	t4_write_reg(adap, CIM_OBQ_DBG_CFG_A, 0);
   9848	return i;
   9849}
   9850
   9851/**
   9852 *	t4_cim_read - read a block from CIM internal address space
   9853 *	@adap: the adapter
   9854 *	@addr: the start address within the CIM address space
   9855 *	@n: number of words to read
   9856 *	@valp: where to store the result
   9857 *
   9858 *	Reads a block of 4-byte words from the CIM intenal address space.
   9859 */
   9860int t4_cim_read(struct adapter *adap, unsigned int addr, unsigned int n,
   9861		unsigned int *valp)
   9862{
   9863	int ret = 0;
   9864
   9865	if (t4_read_reg(adap, CIM_HOST_ACC_CTRL_A) & HOSTBUSY_F)
   9866		return -EBUSY;
   9867
   9868	for ( ; !ret && n--; addr += 4) {
   9869		t4_write_reg(adap, CIM_HOST_ACC_CTRL_A, addr);
   9870		ret = t4_wait_op_done(adap, CIM_HOST_ACC_CTRL_A, HOSTBUSY_F,
   9871				      0, 5, 2);
   9872		if (!ret)
   9873			*valp++ = t4_read_reg(adap, CIM_HOST_ACC_DATA_A);
   9874	}
   9875	return ret;
   9876}
   9877
   9878/**
   9879 *	t4_cim_write - write a block into CIM internal address space
   9880 *	@adap: the adapter
   9881 *	@addr: the start address within the CIM address space
   9882 *	@n: number of words to write
   9883 *	@valp: set of values to write
   9884 *
   9885 *	Writes a block of 4-byte words into the CIM intenal address space.
   9886 */
   9887int t4_cim_write(struct adapter *adap, unsigned int addr, unsigned int n,
   9888		 const unsigned int *valp)
   9889{
   9890	int ret = 0;
   9891
   9892	if (t4_read_reg(adap, CIM_HOST_ACC_CTRL_A) & HOSTBUSY_F)
   9893		return -EBUSY;
   9894
   9895	for ( ; !ret && n--; addr += 4) {
   9896		t4_write_reg(adap, CIM_HOST_ACC_DATA_A, *valp++);
   9897		t4_write_reg(adap, CIM_HOST_ACC_CTRL_A, addr | HOSTWRITE_F);
   9898		ret = t4_wait_op_done(adap, CIM_HOST_ACC_CTRL_A, HOSTBUSY_F,
   9899				      0, 5, 2);
   9900	}
   9901	return ret;
   9902}
   9903
   9904static int t4_cim_write1(struct adapter *adap, unsigned int addr,
   9905			 unsigned int val)
   9906{
   9907	return t4_cim_write(adap, addr, 1, &val);
   9908}
   9909
   9910/**
   9911 *	t4_cim_read_la - read CIM LA capture buffer
   9912 *	@adap: the adapter
   9913 *	@la_buf: where to store the LA data
   9914 *	@wrptr: the HW write pointer within the capture buffer
   9915 *
   9916 *	Reads the contents of the CIM LA buffer with the most recent entry at
   9917 *	the end	of the returned data and with the entry at @wrptr first.
   9918 *	We try to leave the LA in the running state we find it in.
   9919 */
   9920int t4_cim_read_la(struct adapter *adap, u32 *la_buf, unsigned int *wrptr)
   9921{
   9922	int i, ret;
   9923	unsigned int cfg, val, idx;
   9924
   9925	ret = t4_cim_read(adap, UP_UP_DBG_LA_CFG_A, 1, &cfg);
   9926	if (ret)
   9927		return ret;
   9928
   9929	if (cfg & UPDBGLAEN_F) {	/* LA is running, freeze it */
   9930		ret = t4_cim_write1(adap, UP_UP_DBG_LA_CFG_A, 0);
   9931		if (ret)
   9932			return ret;
   9933	}
   9934
   9935	ret = t4_cim_read(adap, UP_UP_DBG_LA_CFG_A, 1, &val);
   9936	if (ret)
   9937		goto restart;
   9938
   9939	idx = UPDBGLAWRPTR_G(val);
   9940	if (wrptr)
   9941		*wrptr = idx;
   9942
   9943	for (i = 0; i < adap->params.cim_la_size; i++) {
   9944		ret = t4_cim_write1(adap, UP_UP_DBG_LA_CFG_A,
   9945				    UPDBGLARDPTR_V(idx) | UPDBGLARDEN_F);
   9946		if (ret)
   9947			break;
   9948		ret = t4_cim_read(adap, UP_UP_DBG_LA_CFG_A, 1, &val);
   9949		if (ret)
   9950			break;
   9951		if (val & UPDBGLARDEN_F) {
   9952			ret = -ETIMEDOUT;
   9953			break;
   9954		}
   9955		ret = t4_cim_read(adap, UP_UP_DBG_LA_DATA_A, 1, &la_buf[i]);
   9956		if (ret)
   9957			break;
   9958
   9959		/* Bits 0-3 of UpDbgLaRdPtr can be between 0000 to 1001 to
   9960		 * identify the 32-bit portion of the full 312-bit data
   9961		 */
   9962		if (is_t6(adap->params.chip) && (idx & 0xf) >= 9)
   9963			idx = (idx & 0xff0) + 0x10;
   9964		else
   9965			idx++;
   9966		/* address can't exceed 0xfff */
   9967		idx &= UPDBGLARDPTR_M;
   9968	}
   9969restart:
   9970	if (cfg & UPDBGLAEN_F) {
   9971		int r = t4_cim_write1(adap, UP_UP_DBG_LA_CFG_A,
   9972				      cfg & ~UPDBGLARDEN_F);
   9973		if (!ret)
   9974			ret = r;
   9975	}
   9976	return ret;
   9977}
   9978
   9979/**
   9980 *	t4_tp_read_la - read TP LA capture buffer
   9981 *	@adap: the adapter
   9982 *	@la_buf: where to store the LA data
   9983 *	@wrptr: the HW write pointer within the capture buffer
   9984 *
   9985 *	Reads the contents of the TP LA buffer with the most recent entry at
   9986 *	the end	of the returned data and with the entry at @wrptr first.
   9987 *	We leave the LA in the running state we find it in.
   9988 */
   9989void t4_tp_read_la(struct adapter *adap, u64 *la_buf, unsigned int *wrptr)
   9990{
   9991	bool last_incomplete;
   9992	unsigned int i, cfg, val, idx;
   9993
   9994	cfg = t4_read_reg(adap, TP_DBG_LA_CONFIG_A) & 0xffff;
   9995	if (cfg & DBGLAENABLE_F)			/* freeze LA */
   9996		t4_write_reg(adap, TP_DBG_LA_CONFIG_A,
   9997			     adap->params.tp.la_mask | (cfg ^ DBGLAENABLE_F));
   9998
   9999	val = t4_read_reg(adap, TP_DBG_LA_CONFIG_A);
  10000	idx = DBGLAWPTR_G(val);
  10001	last_incomplete = DBGLAMODE_G(val) >= 2 && (val & DBGLAWHLF_F) == 0;
  10002	if (last_incomplete)
  10003		idx = (idx + 1) & DBGLARPTR_M;
  10004	if (wrptr)
  10005		*wrptr = idx;
  10006
  10007	val &= 0xffff;
  10008	val &= ~DBGLARPTR_V(DBGLARPTR_M);
  10009	val |= adap->params.tp.la_mask;
  10010
  10011	for (i = 0; i < TPLA_SIZE; i++) {
  10012		t4_write_reg(adap, TP_DBG_LA_CONFIG_A, DBGLARPTR_V(idx) | val);
  10013		la_buf[i] = t4_read_reg64(adap, TP_DBG_LA_DATAL_A);
  10014		idx = (idx + 1) & DBGLARPTR_M;
  10015	}
  10016
  10017	/* Wipe out last entry if it isn't valid */
  10018	if (last_incomplete)
  10019		la_buf[TPLA_SIZE - 1] = ~0ULL;
  10020
  10021	if (cfg & DBGLAENABLE_F)                    /* restore running state */
  10022		t4_write_reg(adap, TP_DBG_LA_CONFIG_A,
  10023			     cfg | adap->params.tp.la_mask);
  10024}
  10025
  10026/* SGE Hung Ingress DMA Warning Threshold time and Warning Repeat Rate (in
  10027 * seconds).  If we find one of the SGE Ingress DMA State Machines in the same
  10028 * state for more than the Warning Threshold then we'll issue a warning about
  10029 * a potential hang.  We'll repeat the warning as the SGE Ingress DMA Channel
  10030 * appears to be hung every Warning Repeat second till the situation clears.
  10031 * If the situation clears, we'll note that as well.
  10032 */
  10033#define SGE_IDMA_WARN_THRESH 1
  10034#define SGE_IDMA_WARN_REPEAT 300
  10035
  10036/**
  10037 *	t4_idma_monitor_init - initialize SGE Ingress DMA Monitor
  10038 *	@adapter: the adapter
  10039 *	@idma: the adapter IDMA Monitor state
  10040 *
  10041 *	Initialize the state of an SGE Ingress DMA Monitor.
  10042 */
  10043void t4_idma_monitor_init(struct adapter *adapter,
  10044			  struct sge_idma_monitor_state *idma)
  10045{
  10046	/* Initialize the state variables for detecting an SGE Ingress DMA
  10047	 * hang.  The SGE has internal counters which count up on each clock
  10048	 * tick whenever the SGE finds its Ingress DMA State Engines in the
  10049	 * same state they were on the previous clock tick.  The clock used is
  10050	 * the Core Clock so we have a limit on the maximum "time" they can
  10051	 * record; typically a very small number of seconds.  For instance,
  10052	 * with a 600MHz Core Clock, we can only count up to a bit more than
  10053	 * 7s.  So we'll synthesize a larger counter in order to not run the
  10054	 * risk of having the "timers" overflow and give us the flexibility to
  10055	 * maintain a Hung SGE State Machine of our own which operates across
  10056	 * a longer time frame.
  10057	 */
  10058	idma->idma_1s_thresh = core_ticks_per_usec(adapter) * 1000000; /* 1s */
  10059	idma->idma_stalled[0] = 0;
  10060	idma->idma_stalled[1] = 0;
  10061}
  10062
  10063/**
  10064 *	t4_idma_monitor - monitor SGE Ingress DMA state
  10065 *	@adapter: the adapter
  10066 *	@idma: the adapter IDMA Monitor state
  10067 *	@hz: number of ticks/second
  10068 *	@ticks: number of ticks since the last IDMA Monitor call
  10069 */
  10070void t4_idma_monitor(struct adapter *adapter,
  10071		     struct sge_idma_monitor_state *idma,
  10072		     int hz, int ticks)
  10073{
  10074	int i, idma_same_state_cnt[2];
  10075
  10076	 /* Read the SGE Debug Ingress DMA Same State Count registers.  These
  10077	  * are counters inside the SGE which count up on each clock when the
  10078	  * SGE finds its Ingress DMA State Engines in the same states they
  10079	  * were in the previous clock.  The counters will peg out at
  10080	  * 0xffffffff without wrapping around so once they pass the 1s
  10081	  * threshold they'll stay above that till the IDMA state changes.
  10082	  */
  10083	t4_write_reg(adapter, SGE_DEBUG_INDEX_A, 13);
  10084	idma_same_state_cnt[0] = t4_read_reg(adapter, SGE_DEBUG_DATA_HIGH_A);
  10085	idma_same_state_cnt[1] = t4_read_reg(adapter, SGE_DEBUG_DATA_LOW_A);
  10086
  10087	for (i = 0; i < 2; i++) {
  10088		u32 debug0, debug11;
  10089
  10090		/* If the Ingress DMA Same State Counter ("timer") is less
  10091		 * than 1s, then we can reset our synthesized Stall Timer and
  10092		 * continue.  If we have previously emitted warnings about a
  10093		 * potential stalled Ingress Queue, issue a note indicating
  10094		 * that the Ingress Queue has resumed forward progress.
  10095		 */
  10096		if (idma_same_state_cnt[i] < idma->idma_1s_thresh) {
  10097			if (idma->idma_stalled[i] >= SGE_IDMA_WARN_THRESH * hz)
  10098				dev_warn(adapter->pdev_dev, "SGE idma%d, queue %u, "
  10099					 "resumed after %d seconds\n",
  10100					 i, idma->idma_qid[i],
  10101					 idma->idma_stalled[i] / hz);
  10102			idma->idma_stalled[i] = 0;
  10103			continue;
  10104		}
  10105
  10106		/* Synthesize an SGE Ingress DMA Same State Timer in the Hz
  10107		 * domain.  The first time we get here it'll be because we
  10108		 * passed the 1s Threshold; each additional time it'll be
  10109		 * because the RX Timer Callback is being fired on its regular
  10110		 * schedule.
  10111		 *
  10112		 * If the stall is below our Potential Hung Ingress Queue
  10113		 * Warning Threshold, continue.
  10114		 */
  10115		if (idma->idma_stalled[i] == 0) {
  10116			idma->idma_stalled[i] = hz;
  10117			idma->idma_warn[i] = 0;
  10118		} else {
  10119			idma->idma_stalled[i] += ticks;
  10120			idma->idma_warn[i] -= ticks;
  10121		}
  10122
  10123		if (idma->idma_stalled[i] < SGE_IDMA_WARN_THRESH * hz)
  10124			continue;
  10125
  10126		/* We'll issue a warning every SGE_IDMA_WARN_REPEAT seconds.
  10127		 */
  10128		if (idma->idma_warn[i] > 0)
  10129			continue;
  10130		idma->idma_warn[i] = SGE_IDMA_WARN_REPEAT * hz;
  10131
  10132		/* Read and save the SGE IDMA State and Queue ID information.
  10133		 * We do this every time in case it changes across time ...
  10134		 * can't be too careful ...
  10135		 */
  10136		t4_write_reg(adapter, SGE_DEBUG_INDEX_A, 0);
  10137		debug0 = t4_read_reg(adapter, SGE_DEBUG_DATA_LOW_A);
  10138		idma->idma_state[i] = (debug0 >> (i * 9)) & 0x3f;
  10139
  10140		t4_write_reg(adapter, SGE_DEBUG_INDEX_A, 11);
  10141		debug11 = t4_read_reg(adapter, SGE_DEBUG_DATA_LOW_A);
  10142		idma->idma_qid[i] = (debug11 >> (i * 16)) & 0xffff;
  10143
  10144		dev_warn(adapter->pdev_dev, "SGE idma%u, queue %u, potentially stuck in "
  10145			 "state %u for %d seconds (debug0=%#x, debug11=%#x)\n",
  10146			 i, idma->idma_qid[i], idma->idma_state[i],
  10147			 idma->idma_stalled[i] / hz,
  10148			 debug0, debug11);
  10149		t4_sge_decode_idma_state(adapter, idma->idma_state[i]);
  10150	}
  10151}
  10152
  10153/**
  10154 *	t4_load_cfg - download config file
  10155 *	@adap: the adapter
  10156 *	@cfg_data: the cfg text file to write
  10157 *	@size: text file size
  10158 *
  10159 *	Write the supplied config text file to the card's serial flash.
  10160 */
  10161int t4_load_cfg(struct adapter *adap, const u8 *cfg_data, unsigned int size)
  10162{
  10163	int ret, i, n, cfg_addr;
  10164	unsigned int addr;
  10165	unsigned int flash_cfg_start_sec;
  10166	unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
  10167
  10168	cfg_addr = t4_flash_cfg_addr(adap);
  10169	if (cfg_addr < 0)
  10170		return cfg_addr;
  10171
  10172	addr = cfg_addr;
  10173	flash_cfg_start_sec = addr / SF_SEC_SIZE;
  10174
  10175	if (size > FLASH_CFG_MAX_SIZE) {
  10176		dev_err(adap->pdev_dev, "cfg file too large, max is %u bytes\n",
  10177			FLASH_CFG_MAX_SIZE);
  10178		return -EFBIG;
  10179	}
  10180
  10181	i = DIV_ROUND_UP(FLASH_CFG_MAX_SIZE,	/* # of sectors spanned */
  10182			 sf_sec_size);
  10183	ret = t4_flash_erase_sectors(adap, flash_cfg_start_sec,
  10184				     flash_cfg_start_sec + i - 1);
  10185	/* If size == 0 then we're simply erasing the FLASH sectors associated
  10186	 * with the on-adapter Firmware Configuration File.
  10187	 */
  10188	if (ret || size == 0)
  10189		goto out;
  10190
  10191	/* this will write to the flash up to SF_PAGE_SIZE at a time */
  10192	for (i = 0; i < size; i += SF_PAGE_SIZE) {
  10193		if ((size - i) <  SF_PAGE_SIZE)
  10194			n = size - i;
  10195		else
  10196			n = SF_PAGE_SIZE;
  10197		ret = t4_write_flash(adap, addr, n, cfg_data, true);
  10198		if (ret)
  10199			goto out;
  10200
  10201		addr += SF_PAGE_SIZE;
  10202		cfg_data += SF_PAGE_SIZE;
  10203	}
  10204
  10205out:
  10206	if (ret)
  10207		dev_err(adap->pdev_dev, "config file %s failed %d\n",
  10208			(size == 0 ? "clear" : "download"), ret);
  10209	return ret;
  10210}
  10211
  10212/**
  10213 *	t4_set_vf_mac_acl - Set MAC address for the specified VF
  10214 *	@adapter: The adapter
  10215 *	@vf: one of the VFs instantiated by the specified PF
  10216 *	@naddr: the number of MAC addresses
  10217 *	@addr: the MAC address(es) to be set to the specified VF
  10218 */
  10219int t4_set_vf_mac_acl(struct adapter *adapter, unsigned int vf,
  10220		      unsigned int naddr, u8 *addr)
  10221{
  10222	struct fw_acl_mac_cmd cmd;
  10223
  10224	memset(&cmd, 0, sizeof(cmd));
  10225	cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_ACL_MAC_CMD) |
  10226				    FW_CMD_REQUEST_F |
  10227				    FW_CMD_WRITE_F |
  10228				    FW_ACL_MAC_CMD_PFN_V(adapter->pf) |
  10229				    FW_ACL_MAC_CMD_VFN_V(vf));
  10230
  10231	/* Note: Do not enable the ACL */
  10232	cmd.en_to_len16 = cpu_to_be32((unsigned int)FW_LEN16(cmd));
  10233	cmd.nmac = naddr;
  10234
  10235	switch (adapter->pf) {
  10236	case 3:
  10237		memcpy(cmd.macaddr3, addr, sizeof(cmd.macaddr3));
  10238		break;
  10239	case 2:
  10240		memcpy(cmd.macaddr2, addr, sizeof(cmd.macaddr2));
  10241		break;
  10242	case 1:
  10243		memcpy(cmd.macaddr1, addr, sizeof(cmd.macaddr1));
  10244		break;
  10245	case 0:
  10246		memcpy(cmd.macaddr0, addr, sizeof(cmd.macaddr0));
  10247		break;
  10248	}
  10249
  10250	return t4_wr_mbox(adapter, adapter->mbox, &cmd, sizeof(cmd), &cmd);
  10251}
  10252
  10253/**
  10254 * t4_read_pace_tbl - read the pace table
  10255 * @adap: the adapter
  10256 * @pace_vals: holds the returned values
  10257 *
  10258 * Returns the values of TP's pace table in microseconds.
  10259 */
  10260void t4_read_pace_tbl(struct adapter *adap, unsigned int pace_vals[NTX_SCHED])
  10261{
  10262	unsigned int i, v;
  10263
  10264	for (i = 0; i < NTX_SCHED; i++) {
  10265		t4_write_reg(adap, TP_PACE_TABLE_A, 0xffff0000 + i);
  10266		v = t4_read_reg(adap, TP_PACE_TABLE_A);
  10267		pace_vals[i] = dack_ticks_to_usec(adap, v);
  10268	}
  10269}
  10270
  10271/**
  10272 * t4_get_tx_sched - get the configuration of a Tx HW traffic scheduler
  10273 * @adap: the adapter
  10274 * @sched: the scheduler index
  10275 * @kbps: the byte rate in Kbps
  10276 * @ipg: the interpacket delay in tenths of nanoseconds
  10277 * @sleep_ok: if true we may sleep while awaiting command completion
  10278 *
  10279 * Return the current configuration of a HW Tx scheduler.
  10280 */
  10281void t4_get_tx_sched(struct adapter *adap, unsigned int sched,
  10282		     unsigned int *kbps, unsigned int *ipg, bool sleep_ok)
  10283{
  10284	unsigned int v, addr, bpt, cpt;
  10285
  10286	if (kbps) {
  10287		addr = TP_TX_MOD_Q1_Q0_RATE_LIMIT_A - sched / 2;
  10288		t4_tp_tm_pio_read(adap, &v, 1, addr, sleep_ok);
  10289		if (sched & 1)
  10290			v >>= 16;
  10291		bpt = (v >> 8) & 0xff;
  10292		cpt = v & 0xff;
  10293		if (!cpt) {
  10294			*kbps = 0;	/* scheduler disabled */
  10295		} else {
  10296			v = (adap->params.vpd.cclk * 1000) / cpt; /* ticks/s */
  10297			*kbps = (v * bpt) / 125;
  10298		}
  10299	}
  10300	if (ipg) {
  10301		addr = TP_TX_MOD_Q1_Q0_TIMER_SEPARATOR_A - sched / 2;
  10302		t4_tp_tm_pio_read(adap, &v, 1, addr, sleep_ok);
  10303		if (sched & 1)
  10304			v >>= 16;
  10305		v &= 0xffff;
  10306		*ipg = (10000 * v) / core_ticks_per_usec(adap);
  10307	}
  10308}
  10309
  10310/* t4_sge_ctxt_rd - read an SGE context through FW
  10311 * @adap: the adapter
  10312 * @mbox: mailbox to use for the FW command
  10313 * @cid: the context id
  10314 * @ctype: the context type
  10315 * @data: where to store the context data
  10316 *
  10317 * Issues a FW command through the given mailbox to read an SGE context.
  10318 */
  10319int t4_sge_ctxt_rd(struct adapter *adap, unsigned int mbox, unsigned int cid,
  10320		   enum ctxt_type ctype, u32 *data)
  10321{
  10322	struct fw_ldst_cmd c;
  10323	int ret;
  10324
  10325	if (ctype == CTXT_FLM)
  10326		ret = FW_LDST_ADDRSPC_SGE_FLMC;
  10327	else
  10328		ret = FW_LDST_ADDRSPC_SGE_CONMC;
  10329
  10330	memset(&c, 0, sizeof(c));
  10331	c.op_to_addrspace = cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) |
  10332					FW_CMD_REQUEST_F | FW_CMD_READ_F |
  10333					FW_LDST_CMD_ADDRSPACE_V(ret));
  10334	c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
  10335	c.u.idctxt.physid = cpu_to_be32(cid);
  10336
  10337	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
  10338	if (ret == 0) {
  10339		data[0] = be32_to_cpu(c.u.idctxt.ctxt_data0);
  10340		data[1] = be32_to_cpu(c.u.idctxt.ctxt_data1);
  10341		data[2] = be32_to_cpu(c.u.idctxt.ctxt_data2);
  10342		data[3] = be32_to_cpu(c.u.idctxt.ctxt_data3);
  10343		data[4] = be32_to_cpu(c.u.idctxt.ctxt_data4);
  10344		data[5] = be32_to_cpu(c.u.idctxt.ctxt_data5);
  10345	}
  10346	return ret;
  10347}
  10348
  10349/**
  10350 * t4_sge_ctxt_rd_bd - read an SGE context bypassing FW
  10351 * @adap: the adapter
  10352 * @cid: the context id
  10353 * @ctype: the context type
  10354 * @data: where to store the context data
  10355 *
  10356 * Reads an SGE context directly, bypassing FW.  This is only for
  10357 * debugging when FW is unavailable.
  10358 */
  10359int t4_sge_ctxt_rd_bd(struct adapter *adap, unsigned int cid,
  10360		      enum ctxt_type ctype, u32 *data)
  10361{
  10362	int i, ret;
  10363
  10364	t4_write_reg(adap, SGE_CTXT_CMD_A, CTXTQID_V(cid) | CTXTTYPE_V(ctype));
  10365	ret = t4_wait_op_done(adap, SGE_CTXT_CMD_A, BUSY_F, 0, 3, 1);
  10366	if (!ret)
  10367		for (i = SGE_CTXT_DATA0_A; i <= SGE_CTXT_DATA5_A; i += 4)
  10368			*data++ = t4_read_reg(adap, i);
  10369	return ret;
  10370}
  10371
  10372int t4_sched_params(struct adapter *adapter, u8 type, u8 level, u8 mode,
  10373		    u8 rateunit, u8 ratemode, u8 channel, u8 class,
  10374		    u32 minrate, u32 maxrate, u16 weight, u16 pktsize,
  10375		    u16 burstsize)
  10376{
  10377	struct fw_sched_cmd cmd;
  10378
  10379	memset(&cmd, 0, sizeof(cmd));
  10380	cmd.op_to_write = cpu_to_be32(FW_CMD_OP_V(FW_SCHED_CMD) |
  10381				      FW_CMD_REQUEST_F |
  10382				      FW_CMD_WRITE_F);
  10383	cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
  10384
  10385	cmd.u.params.sc = FW_SCHED_SC_PARAMS;
  10386	cmd.u.params.type = type;
  10387	cmd.u.params.level = level;
  10388	cmd.u.params.mode = mode;
  10389	cmd.u.params.ch = channel;
  10390	cmd.u.params.cl = class;
  10391	cmd.u.params.unit = rateunit;
  10392	cmd.u.params.rate = ratemode;
  10393	cmd.u.params.min = cpu_to_be32(minrate);
  10394	cmd.u.params.max = cpu_to_be32(maxrate);
  10395	cmd.u.params.weight = cpu_to_be16(weight);
  10396	cmd.u.params.pktsize = cpu_to_be16(pktsize);
  10397	cmd.u.params.burstsize = cpu_to_be16(burstsize);
  10398
  10399	return t4_wr_mbox_meat(adapter, adapter->mbox, &cmd, sizeof(cmd),
  10400			       NULL, 1);
  10401}
  10402
  10403/**
  10404 *	t4_i2c_rd - read I2C data from adapter
  10405 *	@adap: the adapter
  10406 *	@mbox: mailbox to use for the FW command
  10407 *	@port: Port number if per-port device; <0 if not
  10408 *	@devid: per-port device ID or absolute device ID
  10409 *	@offset: byte offset into device I2C space
  10410 *	@len: byte length of I2C space data
  10411 *	@buf: buffer in which to return I2C data
  10412 *
  10413 *	Reads the I2C data from the indicated device and location.
  10414 */
  10415int t4_i2c_rd(struct adapter *adap, unsigned int mbox, int port,
  10416	      unsigned int devid, unsigned int offset,
  10417	      unsigned int len, u8 *buf)
  10418{
  10419	struct fw_ldst_cmd ldst_cmd, ldst_rpl;
  10420	unsigned int i2c_max = sizeof(ldst_cmd.u.i2c.data);
  10421	int ret = 0;
  10422
  10423	if (len > I2C_PAGE_SIZE)
  10424		return -EINVAL;
  10425
  10426	/* Dont allow reads that spans multiple pages */
  10427	if (offset < I2C_PAGE_SIZE && offset + len > I2C_PAGE_SIZE)
  10428		return -EINVAL;
  10429
  10430	memset(&ldst_cmd, 0, sizeof(ldst_cmd));
  10431	ldst_cmd.op_to_addrspace =
  10432		cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) |
  10433			    FW_CMD_REQUEST_F |
  10434			    FW_CMD_READ_F |
  10435			    FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_I2C));
  10436	ldst_cmd.cycles_to_len16 = cpu_to_be32(FW_LEN16(ldst_cmd));
  10437	ldst_cmd.u.i2c.pid = (port < 0 ? 0xff : port);
  10438	ldst_cmd.u.i2c.did = devid;
  10439
  10440	while (len > 0) {
  10441		unsigned int i2c_len = (len < i2c_max) ? len : i2c_max;
  10442
  10443		ldst_cmd.u.i2c.boffset = offset;
  10444		ldst_cmd.u.i2c.blen = i2c_len;
  10445
  10446		ret = t4_wr_mbox(adap, mbox, &ldst_cmd, sizeof(ldst_cmd),
  10447				 &ldst_rpl);
  10448		if (ret)
  10449			break;
  10450
  10451		memcpy(buf, ldst_rpl.u.i2c.data, i2c_len);
  10452		offset += i2c_len;
  10453		buf += i2c_len;
  10454		len -= i2c_len;
  10455	}
  10456
  10457	return ret;
  10458}
  10459
  10460/**
  10461 *      t4_set_vlan_acl - Set a VLAN id for the specified VF
  10462 *      @adap: the adapter
  10463 *      @mbox: mailbox to use for the FW command
  10464 *      @vf: one of the VFs instantiated by the specified PF
  10465 *      @vlan: The vlanid to be set
  10466 */
  10467int t4_set_vlan_acl(struct adapter *adap, unsigned int mbox, unsigned int vf,
  10468		    u16 vlan)
  10469{
  10470	struct fw_acl_vlan_cmd vlan_cmd;
  10471	unsigned int enable;
  10472
  10473	enable = (vlan ? FW_ACL_VLAN_CMD_EN_F : 0);
  10474	memset(&vlan_cmd, 0, sizeof(vlan_cmd));
  10475	vlan_cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_ACL_VLAN_CMD) |
  10476					 FW_CMD_REQUEST_F |
  10477					 FW_CMD_WRITE_F |
  10478					 FW_CMD_EXEC_F |
  10479					 FW_ACL_VLAN_CMD_PFN_V(adap->pf) |
  10480					 FW_ACL_VLAN_CMD_VFN_V(vf));
  10481	vlan_cmd.en_to_len16 = cpu_to_be32(enable | FW_LEN16(vlan_cmd));
  10482	/* Drop all packets that donot match vlan id */
  10483	vlan_cmd.dropnovlan_fm = (enable
  10484				  ? (FW_ACL_VLAN_CMD_DROPNOVLAN_F |
  10485				     FW_ACL_VLAN_CMD_FM_F) : 0);
  10486	if (enable != 0) {
  10487		vlan_cmd.nvlan = 1;
  10488		vlan_cmd.vlanid[0] = cpu_to_be16(vlan);
  10489	}
  10490
  10491	return t4_wr_mbox(adap, adap->mbox, &vlan_cmd, sizeof(vlan_cmd), NULL);
  10492}
  10493
  10494/**
  10495 *	modify_device_id - Modifies the device ID of the Boot BIOS image
  10496 *	@device_id: the device ID to write.
  10497 *	@boot_data: the boot image to modify.
  10498 *
  10499 *	Write the supplied device ID to the boot BIOS image.
  10500 */
  10501static void modify_device_id(int device_id, u8 *boot_data)
  10502{
  10503	struct cxgb4_pcir_data *pcir_header;
  10504	struct legacy_pci_rom_hdr *header;
  10505	u8 *cur_header = boot_data;
  10506	u16 pcir_offset;
  10507
  10508	 /* Loop through all chained images and change the device ID's */
  10509	do {
  10510		header = (struct legacy_pci_rom_hdr *)cur_header;
  10511		pcir_offset = le16_to_cpu(header->pcir_offset);
  10512		pcir_header = (struct cxgb4_pcir_data *)(cur_header +
  10513			      pcir_offset);
  10514
  10515		/**
  10516		 * Only modify the Device ID if code type is Legacy or HP.
  10517		 * 0x00: Okay to modify
  10518		 * 0x01: FCODE. Do not modify
  10519		 * 0x03: Okay to modify
  10520		 * 0x04-0xFF: Do not modify
  10521		 */
  10522		if (pcir_header->code_type == CXGB4_HDR_CODE1) {
  10523			u8 csum = 0;
  10524			int i;
  10525
  10526			/**
  10527			 * Modify Device ID to match current adatper
  10528			 */
  10529			pcir_header->device_id = cpu_to_le16(device_id);
  10530
  10531			/**
  10532			 * Set checksum temporarily to 0.
  10533			 * We will recalculate it later.
  10534			 */
  10535			header->cksum = 0x0;
  10536
  10537			/**
  10538			 * Calculate and update checksum
  10539			 */
  10540			for (i = 0; i < (header->size512 * 512); i++)
  10541				csum += cur_header[i];
  10542
  10543			/**
  10544			 * Invert summed value to create the checksum
  10545			 * Writing new checksum value directly to the boot data
  10546			 */
  10547			cur_header[7] = -csum;
  10548
  10549		} else if (pcir_header->code_type == CXGB4_HDR_CODE2) {
  10550			/**
  10551			 * Modify Device ID to match current adatper
  10552			 */
  10553			pcir_header->device_id = cpu_to_le16(device_id);
  10554		}
  10555
  10556		/**
  10557		 * Move header pointer up to the next image in the ROM.
  10558		 */
  10559		cur_header += header->size512 * 512;
  10560	} while (!(pcir_header->indicator & CXGB4_HDR_INDI));
  10561}
  10562
  10563/**
  10564 *	t4_load_boot - download boot flash
  10565 *	@adap: the adapter
  10566 *	@boot_data: the boot image to write
  10567 *	@boot_addr: offset in flash to write boot_data
  10568 *	@size: image size
  10569 *
  10570 *	Write the supplied boot image to the card's serial flash.
  10571 *	The boot image has the following sections: a 28-byte header and the
  10572 *	boot image.
  10573 */
  10574int t4_load_boot(struct adapter *adap, u8 *boot_data,
  10575		 unsigned int boot_addr, unsigned int size)
  10576{
  10577	unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
  10578	unsigned int boot_sector = (boot_addr * 1024);
  10579	struct cxgb4_pci_exp_rom_header *header;
  10580	struct cxgb4_pcir_data *pcir_header;
  10581	int pcir_offset;
  10582	unsigned int i;
  10583	u16 device_id;
  10584	int ret, addr;
  10585
  10586	/**
  10587	 * Make sure the boot image does not encroach on the firmware region
  10588	 */
  10589	if ((boot_sector + size) >> 16 > FLASH_FW_START_SEC) {
  10590		dev_err(adap->pdev_dev, "boot image encroaching on firmware region\n");
  10591		return -EFBIG;
  10592	}
  10593
  10594	/* Get boot header */
  10595	header = (struct cxgb4_pci_exp_rom_header *)boot_data;
  10596	pcir_offset = le16_to_cpu(header->pcir_offset);
  10597	/* PCIR Data Structure */
  10598	pcir_header = (struct cxgb4_pcir_data *)&boot_data[pcir_offset];
  10599
  10600	/**
  10601	 * Perform some primitive sanity testing to avoid accidentally
  10602	 * writing garbage over the boot sectors.  We ought to check for
  10603	 * more but it's not worth it for now ...
  10604	 */
  10605	if (size < BOOT_MIN_SIZE || size > BOOT_MAX_SIZE) {
  10606		dev_err(adap->pdev_dev, "boot image too small/large\n");
  10607		return -EFBIG;
  10608	}
  10609
  10610	if (le16_to_cpu(header->signature) != BOOT_SIGNATURE) {
  10611		dev_err(adap->pdev_dev, "Boot image missing signature\n");
  10612		return -EINVAL;
  10613	}
  10614
  10615	/* Check PCI header signature */
  10616	if (le32_to_cpu(pcir_header->signature) != PCIR_SIGNATURE) {
  10617		dev_err(adap->pdev_dev, "PCI header missing signature\n");
  10618		return -EINVAL;
  10619	}
  10620
  10621	/* Check Vendor ID matches Chelsio ID*/
  10622	if (le16_to_cpu(pcir_header->vendor_id) != PCI_VENDOR_ID_CHELSIO) {
  10623		dev_err(adap->pdev_dev, "Vendor ID missing signature\n");
  10624		return -EINVAL;
  10625	}
  10626
  10627	/**
  10628	 * The boot sector is comprised of the Expansion-ROM boot, iSCSI boot,
  10629	 * and Boot configuration data sections. These 3 boot sections span
  10630	 * sectors 0 to 7 in flash and live right before the FW image location.
  10631	 */
  10632	i = DIV_ROUND_UP(size ? size : FLASH_FW_START,  sf_sec_size);
  10633	ret = t4_flash_erase_sectors(adap, boot_sector >> 16,
  10634				     (boot_sector >> 16) + i - 1);
  10635
  10636	/**
  10637	 * If size == 0 then we're simply erasing the FLASH sectors associated
  10638	 * with the on-adapter option ROM file
  10639	 */
  10640	if (ret || size == 0)
  10641		goto out;
  10642	/* Retrieve adapter's device ID */
  10643	pci_read_config_word(adap->pdev, PCI_DEVICE_ID, &device_id);
  10644       /* Want to deal with PF 0 so I strip off PF 4 indicator */
  10645	device_id = device_id & 0xf0ff;
  10646
  10647	 /* Check PCIE Device ID */
  10648	if (le16_to_cpu(pcir_header->device_id) != device_id) {
  10649		/**
  10650		 * Change the device ID in the Boot BIOS image to match
  10651		 * the Device ID of the current adapter.
  10652		 */
  10653		modify_device_id(device_id, boot_data);
  10654	}
  10655
  10656	/**
  10657	 * Skip over the first SF_PAGE_SIZE worth of data and write it after
  10658	 * we finish copying the rest of the boot image. This will ensure
  10659	 * that the BIOS boot header will only be written if the boot image
  10660	 * was written in full.
  10661	 */
  10662	addr = boot_sector;
  10663	for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
  10664		addr += SF_PAGE_SIZE;
  10665		boot_data += SF_PAGE_SIZE;
  10666		ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, boot_data,
  10667				     false);
  10668		if (ret)
  10669			goto out;
  10670	}
  10671
  10672	ret = t4_write_flash(adap, boot_sector, SF_PAGE_SIZE,
  10673			     (const u8 *)header, false);
  10674
  10675out:
  10676	if (ret)
  10677		dev_err(adap->pdev_dev, "boot image load failed, error %d\n",
  10678			ret);
  10679	return ret;
  10680}
  10681
  10682/**
  10683 *	t4_flash_bootcfg_addr - return the address of the flash
  10684 *	optionrom configuration
  10685 *	@adapter: the adapter
  10686 *
  10687 *	Return the address within the flash where the OptionROM Configuration
  10688 *	is stored, or an error if the device FLASH is too small to contain
  10689 *	a OptionROM Configuration.
  10690 */
  10691static int t4_flash_bootcfg_addr(struct adapter *adapter)
  10692{
  10693	/**
  10694	 * If the device FLASH isn't large enough to hold a Firmware
  10695	 * Configuration File, return an error.
  10696	 */
  10697	if (adapter->params.sf_size <
  10698	    FLASH_BOOTCFG_START + FLASH_BOOTCFG_MAX_SIZE)
  10699		return -ENOSPC;
  10700
  10701	return FLASH_BOOTCFG_START;
  10702}
  10703
  10704int t4_load_bootcfg(struct adapter *adap, const u8 *cfg_data, unsigned int size)
  10705{
  10706	unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
  10707	struct cxgb4_bootcfg_data *header;
  10708	unsigned int flash_cfg_start_sec;
  10709	unsigned int addr, npad;
  10710	int ret, i, n, cfg_addr;
  10711
  10712	cfg_addr = t4_flash_bootcfg_addr(adap);
  10713	if (cfg_addr < 0)
  10714		return cfg_addr;
  10715
  10716	addr = cfg_addr;
  10717	flash_cfg_start_sec = addr / SF_SEC_SIZE;
  10718
  10719	if (size > FLASH_BOOTCFG_MAX_SIZE) {
  10720		dev_err(adap->pdev_dev, "bootcfg file too large, max is %u bytes\n",
  10721			FLASH_BOOTCFG_MAX_SIZE);
  10722		return -EFBIG;
  10723	}
  10724
  10725	header = (struct cxgb4_bootcfg_data *)cfg_data;
  10726	if (le16_to_cpu(header->signature) != BOOT_CFG_SIG) {
  10727		dev_err(adap->pdev_dev, "Wrong bootcfg signature\n");
  10728		ret = -EINVAL;
  10729		goto out;
  10730	}
  10731
  10732	i = DIV_ROUND_UP(FLASH_BOOTCFG_MAX_SIZE,
  10733			 sf_sec_size);
  10734	ret = t4_flash_erase_sectors(adap, flash_cfg_start_sec,
  10735				     flash_cfg_start_sec + i - 1);
  10736
  10737	/**
  10738	 * If size == 0 then we're simply erasing the FLASH sectors associated
  10739	 * with the on-adapter OptionROM Configuration File.
  10740	 */
  10741	if (ret || size == 0)
  10742		goto out;
  10743
  10744	/* this will write to the flash up to SF_PAGE_SIZE at a time */
  10745	for (i = 0; i < size; i += SF_PAGE_SIZE) {
  10746		n = min_t(u32, size - i, SF_PAGE_SIZE);
  10747
  10748		ret = t4_write_flash(adap, addr, n, cfg_data, false);
  10749		if (ret)
  10750			goto out;
  10751
  10752		addr += SF_PAGE_SIZE;
  10753		cfg_data += SF_PAGE_SIZE;
  10754	}
  10755
  10756	npad = ((size + 4 - 1) & ~3) - size;
  10757	for (i = 0; i < npad; i++) {
  10758		u8 data = 0;
  10759
  10760		ret = t4_write_flash(adap, cfg_addr + size + i, 1, &data,
  10761				     false);
  10762		if (ret)
  10763			goto out;
  10764	}
  10765
  10766out:
  10767	if (ret)
  10768		dev_err(adap->pdev_dev, "boot config data %s failed %d\n",
  10769			(size == 0 ? "clear" : "download"), ret);
  10770	return ret;
  10771}