cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

bnxt.c (371159B)


      1/* Broadcom NetXtreme-C/E network driver.
      2 *
      3 * Copyright (c) 2014-2016 Broadcom Corporation
      4 * Copyright (c) 2016-2019 Broadcom Limited
      5 *
      6 * This program is free software; you can redistribute it and/or modify
      7 * it under the terms of the GNU General Public License as published by
      8 * the Free Software Foundation.
      9 */
     10
     11#include <linux/module.h>
     12
     13#include <linux/stringify.h>
     14#include <linux/kernel.h>
     15#include <linux/timer.h>
     16#include <linux/errno.h>
     17#include <linux/ioport.h>
     18#include <linux/slab.h>
     19#include <linux/vmalloc.h>
     20#include <linux/interrupt.h>
     21#include <linux/pci.h>
     22#include <linux/netdevice.h>
     23#include <linux/etherdevice.h>
     24#include <linux/skbuff.h>
     25#include <linux/dma-mapping.h>
     26#include <linux/bitops.h>
     27#include <linux/io.h>
     28#include <linux/irq.h>
     29#include <linux/delay.h>
     30#include <asm/byteorder.h>
     31#include <asm/page.h>
     32#include <linux/time.h>
     33#include <linux/mii.h>
     34#include <linux/mdio.h>
     35#include <linux/if.h>
     36#include <linux/if_vlan.h>
     37#include <linux/if_bridge.h>
     38#include <linux/rtc.h>
     39#include <linux/bpf.h>
     40#include <net/gro.h>
     41#include <net/ip.h>
     42#include <net/tcp.h>
     43#include <net/udp.h>
     44#include <net/checksum.h>
     45#include <net/ip6_checksum.h>
     46#include <net/udp_tunnel.h>
     47#include <linux/workqueue.h>
     48#include <linux/prefetch.h>
     49#include <linux/cache.h>
     50#include <linux/log2.h>
     51#include <linux/aer.h>
     52#include <linux/bitmap.h>
     53#include <linux/cpu_rmap.h>
     54#include <linux/cpumask.h>
     55#include <net/pkt_cls.h>
     56#include <linux/hwmon.h>
     57#include <linux/hwmon-sysfs.h>
     58#include <net/page_pool.h>
     59#include <linux/align.h>
     60
     61#include "bnxt_hsi.h"
     62#include "bnxt.h"
     63#include "bnxt_hwrm.h"
     64#include "bnxt_ulp.h"
     65#include "bnxt_sriov.h"
     66#include "bnxt_ethtool.h"
     67#include "bnxt_dcb.h"
     68#include "bnxt_xdp.h"
     69#include "bnxt_ptp.h"
     70#include "bnxt_vfr.h"
     71#include "bnxt_tc.h"
     72#include "bnxt_devlink.h"
     73#include "bnxt_debugfs.h"
     74
     75#define BNXT_TX_TIMEOUT		(5 * HZ)
     76#define BNXT_DEF_MSG_ENABLE	(NETIF_MSG_DRV | NETIF_MSG_HW | \
     77				 NETIF_MSG_TX_ERR)
     78
     79MODULE_LICENSE("GPL");
     80MODULE_DESCRIPTION("Broadcom BCM573xx network driver");
     81
     82#define BNXT_RX_OFFSET (NET_SKB_PAD + NET_IP_ALIGN)
     83#define BNXT_RX_DMA_OFFSET NET_SKB_PAD
     84#define BNXT_RX_COPY_THRESH 256
     85
     86#define BNXT_TX_PUSH_THRESH 164
     87
     88/* indexed by enum board_idx */
     89static const struct {
     90	char *name;
     91} board_info[] = {
     92	[BCM57301] = { "Broadcom BCM57301 NetXtreme-C 10Gb Ethernet" },
     93	[BCM57302] = { "Broadcom BCM57302 NetXtreme-C 10Gb/25Gb Ethernet" },
     94	[BCM57304] = { "Broadcom BCM57304 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" },
     95	[BCM57417_NPAR] = { "Broadcom BCM57417 NetXtreme-E Ethernet Partition" },
     96	[BCM58700] = { "Broadcom BCM58700 Nitro 1Gb/2.5Gb/10Gb Ethernet" },
     97	[BCM57311] = { "Broadcom BCM57311 NetXtreme-C 10Gb Ethernet" },
     98	[BCM57312] = { "Broadcom BCM57312 NetXtreme-C 10Gb/25Gb Ethernet" },
     99	[BCM57402] = { "Broadcom BCM57402 NetXtreme-E 10Gb Ethernet" },
    100	[BCM57404] = { "Broadcom BCM57404 NetXtreme-E 10Gb/25Gb Ethernet" },
    101	[BCM57406] = { "Broadcom BCM57406 NetXtreme-E 10GBase-T Ethernet" },
    102	[BCM57402_NPAR] = { "Broadcom BCM57402 NetXtreme-E Ethernet Partition" },
    103	[BCM57407] = { "Broadcom BCM57407 NetXtreme-E 10GBase-T Ethernet" },
    104	[BCM57412] = { "Broadcom BCM57412 NetXtreme-E 10Gb Ethernet" },
    105	[BCM57414] = { "Broadcom BCM57414 NetXtreme-E 10Gb/25Gb Ethernet" },
    106	[BCM57416] = { "Broadcom BCM57416 NetXtreme-E 10GBase-T Ethernet" },
    107	[BCM57417] = { "Broadcom BCM57417 NetXtreme-E 10GBase-T Ethernet" },
    108	[BCM57412_NPAR] = { "Broadcom BCM57412 NetXtreme-E Ethernet Partition" },
    109	[BCM57314] = { "Broadcom BCM57314 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" },
    110	[BCM57417_SFP] = { "Broadcom BCM57417 NetXtreme-E 10Gb/25Gb Ethernet" },
    111	[BCM57416_SFP] = { "Broadcom BCM57416 NetXtreme-E 10Gb Ethernet" },
    112	[BCM57404_NPAR] = { "Broadcom BCM57404 NetXtreme-E Ethernet Partition" },
    113	[BCM57406_NPAR] = { "Broadcom BCM57406 NetXtreme-E Ethernet Partition" },
    114	[BCM57407_SFP] = { "Broadcom BCM57407 NetXtreme-E 25Gb Ethernet" },
    115	[BCM57407_NPAR] = { "Broadcom BCM57407 NetXtreme-E Ethernet Partition" },
    116	[BCM57414_NPAR] = { "Broadcom BCM57414 NetXtreme-E Ethernet Partition" },
    117	[BCM57416_NPAR] = { "Broadcom BCM57416 NetXtreme-E Ethernet Partition" },
    118	[BCM57452] = { "Broadcom BCM57452 NetXtreme-E 10Gb/25Gb/40Gb/50Gb Ethernet" },
    119	[BCM57454] = { "Broadcom BCM57454 NetXtreme-E 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
    120	[BCM5745x_NPAR] = { "Broadcom BCM5745x NetXtreme-E Ethernet Partition" },
    121	[BCM57508] = { "Broadcom BCM57508 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" },
    122	[BCM57504] = { "Broadcom BCM57504 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" },
    123	[BCM57502] = { "Broadcom BCM57502 NetXtreme-E 10Gb/25Gb/50Gb Ethernet" },
    124	[BCM57508_NPAR] = { "Broadcom BCM57508 NetXtreme-E Ethernet Partition" },
    125	[BCM57504_NPAR] = { "Broadcom BCM57504 NetXtreme-E Ethernet Partition" },
    126	[BCM57502_NPAR] = { "Broadcom BCM57502 NetXtreme-E Ethernet Partition" },
    127	[BCM58802] = { "Broadcom BCM58802 NetXtreme-S 10Gb/25Gb/40Gb/50Gb Ethernet" },
    128	[BCM58804] = { "Broadcom BCM58804 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
    129	[BCM58808] = { "Broadcom BCM58808 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
    130	[NETXTREME_E_VF] = { "Broadcom NetXtreme-E Ethernet Virtual Function" },
    131	[NETXTREME_C_VF] = { "Broadcom NetXtreme-C Ethernet Virtual Function" },
    132	[NETXTREME_S_VF] = { "Broadcom NetXtreme-S Ethernet Virtual Function" },
    133	[NETXTREME_C_VF_HV] = { "Broadcom NetXtreme-C Virtual Function for Hyper-V" },
    134	[NETXTREME_E_VF_HV] = { "Broadcom NetXtreme-E Virtual Function for Hyper-V" },
    135	[NETXTREME_E_P5_VF] = { "Broadcom BCM5750X NetXtreme-E Ethernet Virtual Function" },
    136	[NETXTREME_E_P5_VF_HV] = { "Broadcom BCM5750X NetXtreme-E Virtual Function for Hyper-V" },
    137};
    138
    139static const struct pci_device_id bnxt_pci_tbl[] = {
    140	{ PCI_VDEVICE(BROADCOM, 0x1604), .driver_data = BCM5745x_NPAR },
    141	{ PCI_VDEVICE(BROADCOM, 0x1605), .driver_data = BCM5745x_NPAR },
    142	{ PCI_VDEVICE(BROADCOM, 0x1614), .driver_data = BCM57454 },
    143	{ PCI_VDEVICE(BROADCOM, 0x16c0), .driver_data = BCM57417_NPAR },
    144	{ PCI_VDEVICE(BROADCOM, 0x16c8), .driver_data = BCM57301 },
    145	{ PCI_VDEVICE(BROADCOM, 0x16c9), .driver_data = BCM57302 },
    146	{ PCI_VDEVICE(BROADCOM, 0x16ca), .driver_data = BCM57304 },
    147	{ PCI_VDEVICE(BROADCOM, 0x16cc), .driver_data = BCM57417_NPAR },
    148	{ PCI_VDEVICE(BROADCOM, 0x16cd), .driver_data = BCM58700 },
    149	{ PCI_VDEVICE(BROADCOM, 0x16ce), .driver_data = BCM57311 },
    150	{ PCI_VDEVICE(BROADCOM, 0x16cf), .driver_data = BCM57312 },
    151	{ PCI_VDEVICE(BROADCOM, 0x16d0), .driver_data = BCM57402 },
    152	{ PCI_VDEVICE(BROADCOM, 0x16d1), .driver_data = BCM57404 },
    153	{ PCI_VDEVICE(BROADCOM, 0x16d2), .driver_data = BCM57406 },
    154	{ PCI_VDEVICE(BROADCOM, 0x16d4), .driver_data = BCM57402_NPAR },
    155	{ PCI_VDEVICE(BROADCOM, 0x16d5), .driver_data = BCM57407 },
    156	{ PCI_VDEVICE(BROADCOM, 0x16d6), .driver_data = BCM57412 },
    157	{ PCI_VDEVICE(BROADCOM, 0x16d7), .driver_data = BCM57414 },
    158	{ PCI_VDEVICE(BROADCOM, 0x16d8), .driver_data = BCM57416 },
    159	{ PCI_VDEVICE(BROADCOM, 0x16d9), .driver_data = BCM57417 },
    160	{ PCI_VDEVICE(BROADCOM, 0x16de), .driver_data = BCM57412_NPAR },
    161	{ PCI_VDEVICE(BROADCOM, 0x16df), .driver_data = BCM57314 },
    162	{ PCI_VDEVICE(BROADCOM, 0x16e2), .driver_data = BCM57417_SFP },
    163	{ PCI_VDEVICE(BROADCOM, 0x16e3), .driver_data = BCM57416_SFP },
    164	{ PCI_VDEVICE(BROADCOM, 0x16e7), .driver_data = BCM57404_NPAR },
    165	{ PCI_VDEVICE(BROADCOM, 0x16e8), .driver_data = BCM57406_NPAR },
    166	{ PCI_VDEVICE(BROADCOM, 0x16e9), .driver_data = BCM57407_SFP },
    167	{ PCI_VDEVICE(BROADCOM, 0x16ea), .driver_data = BCM57407_NPAR },
    168	{ PCI_VDEVICE(BROADCOM, 0x16eb), .driver_data = BCM57412_NPAR },
    169	{ PCI_VDEVICE(BROADCOM, 0x16ec), .driver_data = BCM57414_NPAR },
    170	{ PCI_VDEVICE(BROADCOM, 0x16ed), .driver_data = BCM57414_NPAR },
    171	{ PCI_VDEVICE(BROADCOM, 0x16ee), .driver_data = BCM57416_NPAR },
    172	{ PCI_VDEVICE(BROADCOM, 0x16ef), .driver_data = BCM57416_NPAR },
    173	{ PCI_VDEVICE(BROADCOM, 0x16f0), .driver_data = BCM58808 },
    174	{ PCI_VDEVICE(BROADCOM, 0x16f1), .driver_data = BCM57452 },
    175	{ PCI_VDEVICE(BROADCOM, 0x1750), .driver_data = BCM57508 },
    176	{ PCI_VDEVICE(BROADCOM, 0x1751), .driver_data = BCM57504 },
    177	{ PCI_VDEVICE(BROADCOM, 0x1752), .driver_data = BCM57502 },
    178	{ PCI_VDEVICE(BROADCOM, 0x1800), .driver_data = BCM57508_NPAR },
    179	{ PCI_VDEVICE(BROADCOM, 0x1801), .driver_data = BCM57504_NPAR },
    180	{ PCI_VDEVICE(BROADCOM, 0x1802), .driver_data = BCM57502_NPAR },
    181	{ PCI_VDEVICE(BROADCOM, 0x1803), .driver_data = BCM57508_NPAR },
    182	{ PCI_VDEVICE(BROADCOM, 0x1804), .driver_data = BCM57504_NPAR },
    183	{ PCI_VDEVICE(BROADCOM, 0x1805), .driver_data = BCM57502_NPAR },
    184	{ PCI_VDEVICE(BROADCOM, 0xd802), .driver_data = BCM58802 },
    185	{ PCI_VDEVICE(BROADCOM, 0xd804), .driver_data = BCM58804 },
    186#ifdef CONFIG_BNXT_SRIOV
    187	{ PCI_VDEVICE(BROADCOM, 0x1606), .driver_data = NETXTREME_E_VF },
    188	{ PCI_VDEVICE(BROADCOM, 0x1607), .driver_data = NETXTREME_E_VF_HV },
    189	{ PCI_VDEVICE(BROADCOM, 0x1608), .driver_data = NETXTREME_E_VF_HV },
    190	{ PCI_VDEVICE(BROADCOM, 0x1609), .driver_data = NETXTREME_E_VF },
    191	{ PCI_VDEVICE(BROADCOM, 0x16bd), .driver_data = NETXTREME_E_VF_HV },
    192	{ PCI_VDEVICE(BROADCOM, 0x16c1), .driver_data = NETXTREME_E_VF },
    193	{ PCI_VDEVICE(BROADCOM, 0x16c2), .driver_data = NETXTREME_C_VF_HV },
    194	{ PCI_VDEVICE(BROADCOM, 0x16c3), .driver_data = NETXTREME_C_VF_HV },
    195	{ PCI_VDEVICE(BROADCOM, 0x16c4), .driver_data = NETXTREME_E_VF_HV },
    196	{ PCI_VDEVICE(BROADCOM, 0x16c5), .driver_data = NETXTREME_E_VF_HV },
    197	{ PCI_VDEVICE(BROADCOM, 0x16cb), .driver_data = NETXTREME_C_VF },
    198	{ PCI_VDEVICE(BROADCOM, 0x16d3), .driver_data = NETXTREME_E_VF },
    199	{ PCI_VDEVICE(BROADCOM, 0x16dc), .driver_data = NETXTREME_E_VF },
    200	{ PCI_VDEVICE(BROADCOM, 0x16e1), .driver_data = NETXTREME_C_VF },
    201	{ PCI_VDEVICE(BROADCOM, 0x16e5), .driver_data = NETXTREME_C_VF },
    202	{ PCI_VDEVICE(BROADCOM, 0x16e6), .driver_data = NETXTREME_C_VF_HV },
    203	{ PCI_VDEVICE(BROADCOM, 0x1806), .driver_data = NETXTREME_E_P5_VF },
    204	{ PCI_VDEVICE(BROADCOM, 0x1807), .driver_data = NETXTREME_E_P5_VF },
    205	{ PCI_VDEVICE(BROADCOM, 0x1808), .driver_data = NETXTREME_E_P5_VF_HV },
    206	{ PCI_VDEVICE(BROADCOM, 0x1809), .driver_data = NETXTREME_E_P5_VF_HV },
    207	{ PCI_VDEVICE(BROADCOM, 0xd800), .driver_data = NETXTREME_S_VF },
    208#endif
    209	{ 0 }
    210};
    211
    212MODULE_DEVICE_TABLE(pci, bnxt_pci_tbl);
    213
    214static const u16 bnxt_vf_req_snif[] = {
    215	HWRM_FUNC_CFG,
    216	HWRM_FUNC_VF_CFG,
    217	HWRM_PORT_PHY_QCFG,
    218	HWRM_CFA_L2_FILTER_ALLOC,
    219};
    220
    221static const u16 bnxt_async_events_arr[] = {
    222	ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE,
    223	ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE,
    224	ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD,
    225	ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED,
    226	ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE,
    227	ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE,
    228	ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE,
    229	ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY,
    230	ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY,
    231	ASYNC_EVENT_CMPL_EVENT_ID_DEBUG_NOTIFICATION,
    232	ASYNC_EVENT_CMPL_EVENT_ID_DEFERRED_RESPONSE,
    233	ASYNC_EVENT_CMPL_EVENT_ID_RING_MONITOR_MSG,
    234	ASYNC_EVENT_CMPL_EVENT_ID_ECHO_REQUEST,
    235	ASYNC_EVENT_CMPL_EVENT_ID_PPS_TIMESTAMP,
    236	ASYNC_EVENT_CMPL_EVENT_ID_ERROR_REPORT,
    237	ASYNC_EVENT_CMPL_EVENT_ID_PHC_UPDATE,
    238};
    239
    240static struct workqueue_struct *bnxt_pf_wq;
    241
    242static bool bnxt_vf_pciid(enum board_idx idx)
    243{
    244	return (idx == NETXTREME_C_VF || idx == NETXTREME_E_VF ||
    245		idx == NETXTREME_S_VF || idx == NETXTREME_C_VF_HV ||
    246		idx == NETXTREME_E_VF_HV || idx == NETXTREME_E_P5_VF ||
    247		idx == NETXTREME_E_P5_VF_HV);
    248}
    249
    250#define DB_CP_REARM_FLAGS	(DB_KEY_CP | DB_IDX_VALID)
    251#define DB_CP_FLAGS		(DB_KEY_CP | DB_IDX_VALID | DB_IRQ_DIS)
    252#define DB_CP_IRQ_DIS_FLAGS	(DB_KEY_CP | DB_IRQ_DIS)
    253
    254#define BNXT_CP_DB_IRQ_DIS(db)						\
    255		writel(DB_CP_IRQ_DIS_FLAGS, db)
    256
    257#define BNXT_DB_CQ(db, idx)						\
    258	writel(DB_CP_FLAGS | RING_CMP(idx), (db)->doorbell)
    259
    260#define BNXT_DB_NQ_P5(db, idx)						\
    261	bnxt_writeq(bp, (db)->db_key64 | DBR_TYPE_NQ | RING_CMP(idx),	\
    262		    (db)->doorbell)
    263
    264#define BNXT_DB_CQ_ARM(db, idx)						\
    265	writel(DB_CP_REARM_FLAGS | RING_CMP(idx), (db)->doorbell)
    266
    267#define BNXT_DB_NQ_ARM_P5(db, idx)					\
    268	bnxt_writeq(bp, (db)->db_key64 | DBR_TYPE_NQ_ARM | RING_CMP(idx),\
    269		    (db)->doorbell)
    270
    271static void bnxt_db_nq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
    272{
    273	if (bp->flags & BNXT_FLAG_CHIP_P5)
    274		BNXT_DB_NQ_P5(db, idx);
    275	else
    276		BNXT_DB_CQ(db, idx);
    277}
    278
    279static void bnxt_db_nq_arm(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
    280{
    281	if (bp->flags & BNXT_FLAG_CHIP_P5)
    282		BNXT_DB_NQ_ARM_P5(db, idx);
    283	else
    284		BNXT_DB_CQ_ARM(db, idx);
    285}
    286
    287static void bnxt_db_cq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
    288{
    289	if (bp->flags & BNXT_FLAG_CHIP_P5)
    290		bnxt_writeq(bp, db->db_key64 | DBR_TYPE_CQ_ARMALL |
    291			    RING_CMP(idx), db->doorbell);
    292	else
    293		BNXT_DB_CQ(db, idx);
    294}
    295
    296const u16 bnxt_lhint_arr[] = {
    297	TX_BD_FLAGS_LHINT_512_AND_SMALLER,
    298	TX_BD_FLAGS_LHINT_512_TO_1023,
    299	TX_BD_FLAGS_LHINT_1024_TO_2047,
    300	TX_BD_FLAGS_LHINT_1024_TO_2047,
    301	TX_BD_FLAGS_LHINT_2048_AND_LARGER,
    302	TX_BD_FLAGS_LHINT_2048_AND_LARGER,
    303	TX_BD_FLAGS_LHINT_2048_AND_LARGER,
    304	TX_BD_FLAGS_LHINT_2048_AND_LARGER,
    305	TX_BD_FLAGS_LHINT_2048_AND_LARGER,
    306	TX_BD_FLAGS_LHINT_2048_AND_LARGER,
    307	TX_BD_FLAGS_LHINT_2048_AND_LARGER,
    308	TX_BD_FLAGS_LHINT_2048_AND_LARGER,
    309	TX_BD_FLAGS_LHINT_2048_AND_LARGER,
    310	TX_BD_FLAGS_LHINT_2048_AND_LARGER,
    311	TX_BD_FLAGS_LHINT_2048_AND_LARGER,
    312	TX_BD_FLAGS_LHINT_2048_AND_LARGER,
    313	TX_BD_FLAGS_LHINT_2048_AND_LARGER,
    314	TX_BD_FLAGS_LHINT_2048_AND_LARGER,
    315	TX_BD_FLAGS_LHINT_2048_AND_LARGER,
    316};
    317
    318static u16 bnxt_xmit_get_cfa_action(struct sk_buff *skb)
    319{
    320	struct metadata_dst *md_dst = skb_metadata_dst(skb);
    321
    322	if (!md_dst || md_dst->type != METADATA_HW_PORT_MUX)
    323		return 0;
    324
    325	return md_dst->u.port_info.port_id;
    326}
    327
    328static void bnxt_txr_db_kick(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
    329			     u16 prod)
    330{
    331	bnxt_db_write(bp, &txr->tx_db, prod);
    332	txr->kick_pending = 0;
    333}
    334
    335static bool bnxt_txr_netif_try_stop_queue(struct bnxt *bp,
    336					  struct bnxt_tx_ring_info *txr,
    337					  struct netdev_queue *txq)
    338{
    339	netif_tx_stop_queue(txq);
    340
    341	/* netif_tx_stop_queue() must be done before checking
    342	 * tx index in bnxt_tx_avail() below, because in
    343	 * bnxt_tx_int(), we update tx index before checking for
    344	 * netif_tx_queue_stopped().
    345	 */
    346	smp_mb();
    347	if (bnxt_tx_avail(bp, txr) >= bp->tx_wake_thresh) {
    348		netif_tx_wake_queue(txq);
    349		return false;
    350	}
    351
    352	return true;
    353}
    354
    355static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
    356{
    357	struct bnxt *bp = netdev_priv(dev);
    358	struct tx_bd *txbd;
    359	struct tx_bd_ext *txbd1;
    360	struct netdev_queue *txq;
    361	int i;
    362	dma_addr_t mapping;
    363	unsigned int length, pad = 0;
    364	u32 len, free_size, vlan_tag_flags, cfa_action, flags;
    365	u16 prod, last_frag;
    366	struct pci_dev *pdev = bp->pdev;
    367	struct bnxt_tx_ring_info *txr;
    368	struct bnxt_sw_tx_bd *tx_buf;
    369	__le32 lflags = 0;
    370
    371	i = skb_get_queue_mapping(skb);
    372	if (unlikely(i >= bp->tx_nr_rings)) {
    373		dev_kfree_skb_any(skb);
    374		dev_core_stats_tx_dropped_inc(dev);
    375		return NETDEV_TX_OK;
    376	}
    377
    378	txq = netdev_get_tx_queue(dev, i);
    379	txr = &bp->tx_ring[bp->tx_ring_map[i]];
    380	prod = txr->tx_prod;
    381
    382	free_size = bnxt_tx_avail(bp, txr);
    383	if (unlikely(free_size < skb_shinfo(skb)->nr_frags + 2)) {
    384		/* We must have raced with NAPI cleanup */
    385		if (net_ratelimit() && txr->kick_pending)
    386			netif_warn(bp, tx_err, dev,
    387				   "bnxt: ring busy w/ flush pending!\n");
    388		if (bnxt_txr_netif_try_stop_queue(bp, txr, txq))
    389			return NETDEV_TX_BUSY;
    390	}
    391
    392	length = skb->len;
    393	len = skb_headlen(skb);
    394	last_frag = skb_shinfo(skb)->nr_frags;
    395
    396	txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
    397
    398	txbd->tx_bd_opaque = prod;
    399
    400	tx_buf = &txr->tx_buf_ring[prod];
    401	tx_buf->skb = skb;
    402	tx_buf->nr_frags = last_frag;
    403
    404	vlan_tag_flags = 0;
    405	cfa_action = bnxt_xmit_get_cfa_action(skb);
    406	if (skb_vlan_tag_present(skb)) {
    407		vlan_tag_flags = TX_BD_CFA_META_KEY_VLAN |
    408				 skb_vlan_tag_get(skb);
    409		/* Currently supports 8021Q, 8021AD vlan offloads
    410		 * QINQ1, QINQ2, QINQ3 vlan headers are deprecated
    411		 */
    412		if (skb->vlan_proto == htons(ETH_P_8021Q))
    413			vlan_tag_flags |= 1 << TX_BD_CFA_META_TPID_SHIFT;
    414	}
    415
    416	if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
    417		struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
    418
    419		if (ptp && ptp->tx_tstamp_en && !skb_is_gso(skb) &&
    420		    atomic_dec_if_positive(&ptp->tx_avail) >= 0) {
    421			if (!bnxt_ptp_parse(skb, &ptp->tx_seqid,
    422					    &ptp->tx_hdr_off)) {
    423				if (vlan_tag_flags)
    424					ptp->tx_hdr_off += VLAN_HLEN;
    425				lflags |= cpu_to_le32(TX_BD_FLAGS_STAMP);
    426				skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
    427			} else {
    428				atomic_inc(&bp->ptp_cfg->tx_avail);
    429			}
    430		}
    431	}
    432
    433	if (unlikely(skb->no_fcs))
    434		lflags |= cpu_to_le32(TX_BD_FLAGS_NO_CRC);
    435
    436	if (free_size == bp->tx_ring_size && length <= bp->tx_push_thresh &&
    437	    !lflags) {
    438		struct tx_push_buffer *tx_push_buf = txr->tx_push;
    439		struct tx_push_bd *tx_push = &tx_push_buf->push_bd;
    440		struct tx_bd_ext *tx_push1 = &tx_push->txbd2;
    441		void __iomem *db = txr->tx_db.doorbell;
    442		void *pdata = tx_push_buf->data;
    443		u64 *end;
    444		int j, push_len;
    445
    446		/* Set COAL_NOW to be ready quickly for the next push */
    447		tx_push->tx_bd_len_flags_type =
    448			cpu_to_le32((length << TX_BD_LEN_SHIFT) |
    449					TX_BD_TYPE_LONG_TX_BD |
    450					TX_BD_FLAGS_LHINT_512_AND_SMALLER |
    451					TX_BD_FLAGS_COAL_NOW |
    452					TX_BD_FLAGS_PACKET_END |
    453					(2 << TX_BD_FLAGS_BD_CNT_SHIFT));
    454
    455		if (skb->ip_summed == CHECKSUM_PARTIAL)
    456			tx_push1->tx_bd_hsize_lflags =
    457					cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM);
    458		else
    459			tx_push1->tx_bd_hsize_lflags = 0;
    460
    461		tx_push1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags);
    462		tx_push1->tx_bd_cfa_action =
    463			cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT);
    464
    465		end = pdata + length;
    466		end = PTR_ALIGN(end, 8) - 1;
    467		*end = 0;
    468
    469		skb_copy_from_linear_data(skb, pdata, len);
    470		pdata += len;
    471		for (j = 0; j < last_frag; j++) {
    472			skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
    473			void *fptr;
    474
    475			fptr = skb_frag_address_safe(frag);
    476			if (!fptr)
    477				goto normal_tx;
    478
    479			memcpy(pdata, fptr, skb_frag_size(frag));
    480			pdata += skb_frag_size(frag);
    481		}
    482
    483		txbd->tx_bd_len_flags_type = tx_push->tx_bd_len_flags_type;
    484		txbd->tx_bd_haddr = txr->data_mapping;
    485		prod = NEXT_TX(prod);
    486		txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
    487		memcpy(txbd, tx_push1, sizeof(*txbd));
    488		prod = NEXT_TX(prod);
    489		tx_push->doorbell =
    490			cpu_to_le32(DB_KEY_TX_PUSH | DB_LONG_TX_PUSH | prod);
    491		txr->tx_prod = prod;
    492
    493		tx_buf->is_push = 1;
    494		netdev_tx_sent_queue(txq, skb->len);
    495		wmb();	/* Sync is_push and byte queue before pushing data */
    496
    497		push_len = (length + sizeof(*tx_push) + 7) / 8;
    498		if (push_len > 16) {
    499			__iowrite64_copy(db, tx_push_buf, 16);
    500			__iowrite32_copy(db + 4, tx_push_buf + 1,
    501					 (push_len - 16) << 1);
    502		} else {
    503			__iowrite64_copy(db, tx_push_buf, push_len);
    504		}
    505
    506		goto tx_done;
    507	}
    508
    509normal_tx:
    510	if (length < BNXT_MIN_PKT_SIZE) {
    511		pad = BNXT_MIN_PKT_SIZE - length;
    512		if (skb_pad(skb, pad))
    513			/* SKB already freed. */
    514			goto tx_kick_pending;
    515		length = BNXT_MIN_PKT_SIZE;
    516	}
    517
    518	mapping = dma_map_single(&pdev->dev, skb->data, len, DMA_TO_DEVICE);
    519
    520	if (unlikely(dma_mapping_error(&pdev->dev, mapping)))
    521		goto tx_free;
    522
    523	dma_unmap_addr_set(tx_buf, mapping, mapping);
    524	flags = (len << TX_BD_LEN_SHIFT) | TX_BD_TYPE_LONG_TX_BD |
    525		((last_frag + 2) << TX_BD_FLAGS_BD_CNT_SHIFT);
    526
    527	txbd->tx_bd_haddr = cpu_to_le64(mapping);
    528
    529	prod = NEXT_TX(prod);
    530	txbd1 = (struct tx_bd_ext *)
    531		&txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
    532
    533	txbd1->tx_bd_hsize_lflags = lflags;
    534	if (skb_is_gso(skb)) {
    535		u32 hdr_len;
    536
    537		if (skb->encapsulation)
    538			hdr_len = skb_inner_network_offset(skb) +
    539				skb_inner_network_header_len(skb) +
    540				inner_tcp_hdrlen(skb);
    541		else
    542			hdr_len = skb_transport_offset(skb) +
    543				tcp_hdrlen(skb);
    544
    545		txbd1->tx_bd_hsize_lflags |= cpu_to_le32(TX_BD_FLAGS_LSO |
    546					TX_BD_FLAGS_T_IPID |
    547					(hdr_len << (TX_BD_HSIZE_SHIFT - 1)));
    548		length = skb_shinfo(skb)->gso_size;
    549		txbd1->tx_bd_mss = cpu_to_le32(length);
    550		length += hdr_len;
    551	} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
    552		txbd1->tx_bd_hsize_lflags |=
    553			cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM);
    554		txbd1->tx_bd_mss = 0;
    555	}
    556
    557	length >>= 9;
    558	if (unlikely(length >= ARRAY_SIZE(bnxt_lhint_arr))) {
    559		dev_warn_ratelimited(&pdev->dev, "Dropped oversize %d bytes TX packet.\n",
    560				     skb->len);
    561		i = 0;
    562		goto tx_dma_error;
    563	}
    564	flags |= bnxt_lhint_arr[length];
    565	txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
    566
    567	txbd1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags);
    568	txbd1->tx_bd_cfa_action =
    569			cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT);
    570	for (i = 0; i < last_frag; i++) {
    571		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
    572
    573		prod = NEXT_TX(prod);
    574		txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
    575
    576		len = skb_frag_size(frag);
    577		mapping = skb_frag_dma_map(&pdev->dev, frag, 0, len,
    578					   DMA_TO_DEVICE);
    579
    580		if (unlikely(dma_mapping_error(&pdev->dev, mapping)))
    581			goto tx_dma_error;
    582
    583		tx_buf = &txr->tx_buf_ring[prod];
    584		dma_unmap_addr_set(tx_buf, mapping, mapping);
    585
    586		txbd->tx_bd_haddr = cpu_to_le64(mapping);
    587
    588		flags = len << TX_BD_LEN_SHIFT;
    589		txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
    590	}
    591
    592	flags &= ~TX_BD_LEN;
    593	txbd->tx_bd_len_flags_type =
    594		cpu_to_le32(((len + pad) << TX_BD_LEN_SHIFT) | flags |
    595			    TX_BD_FLAGS_PACKET_END);
    596
    597	netdev_tx_sent_queue(txq, skb->len);
    598
    599	skb_tx_timestamp(skb);
    600
    601	/* Sync BD data before updating doorbell */
    602	wmb();
    603
    604	prod = NEXT_TX(prod);
    605	txr->tx_prod = prod;
    606
    607	if (!netdev_xmit_more() || netif_xmit_stopped(txq))
    608		bnxt_txr_db_kick(bp, txr, prod);
    609	else
    610		txr->kick_pending = 1;
    611
    612tx_done:
    613
    614	if (unlikely(bnxt_tx_avail(bp, txr) <= MAX_SKB_FRAGS + 1)) {
    615		if (netdev_xmit_more() && !tx_buf->is_push)
    616			bnxt_txr_db_kick(bp, txr, prod);
    617
    618		bnxt_txr_netif_try_stop_queue(bp, txr, txq);
    619	}
    620	return NETDEV_TX_OK;
    621
    622tx_dma_error:
    623	if (BNXT_TX_PTP_IS_SET(lflags))
    624		atomic_inc(&bp->ptp_cfg->tx_avail);
    625
    626	last_frag = i;
    627
    628	/* start back at beginning and unmap skb */
    629	prod = txr->tx_prod;
    630	tx_buf = &txr->tx_buf_ring[prod];
    631	dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
    632			 skb_headlen(skb), DMA_TO_DEVICE);
    633	prod = NEXT_TX(prod);
    634
    635	/* unmap remaining mapped pages */
    636	for (i = 0; i < last_frag; i++) {
    637		prod = NEXT_TX(prod);
    638		tx_buf = &txr->tx_buf_ring[prod];
    639		dma_unmap_page(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
    640			       skb_frag_size(&skb_shinfo(skb)->frags[i]),
    641			       DMA_TO_DEVICE);
    642	}
    643
    644tx_free:
    645	dev_kfree_skb_any(skb);
    646tx_kick_pending:
    647	if (txr->kick_pending)
    648		bnxt_txr_db_kick(bp, txr, txr->tx_prod);
    649	txr->tx_buf_ring[txr->tx_prod].skb = NULL;
    650	dev_core_stats_tx_dropped_inc(dev);
    651	return NETDEV_TX_OK;
    652}
    653
    654static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts)
    655{
    656	struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
    657	struct netdev_queue *txq = netdev_get_tx_queue(bp->dev, txr->txq_index);
    658	u16 cons = txr->tx_cons;
    659	struct pci_dev *pdev = bp->pdev;
    660	int i;
    661	unsigned int tx_bytes = 0;
    662
    663	for (i = 0; i < nr_pkts; i++) {
    664		struct bnxt_sw_tx_bd *tx_buf;
    665		bool compl_deferred = false;
    666		struct sk_buff *skb;
    667		int j, last;
    668
    669		tx_buf = &txr->tx_buf_ring[cons];
    670		cons = NEXT_TX(cons);
    671		skb = tx_buf->skb;
    672		tx_buf->skb = NULL;
    673
    674		if (tx_buf->is_push) {
    675			tx_buf->is_push = 0;
    676			goto next_tx_int;
    677		}
    678
    679		dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
    680				 skb_headlen(skb), DMA_TO_DEVICE);
    681		last = tx_buf->nr_frags;
    682
    683		for (j = 0; j < last; j++) {
    684			cons = NEXT_TX(cons);
    685			tx_buf = &txr->tx_buf_ring[cons];
    686			dma_unmap_page(
    687				&pdev->dev,
    688				dma_unmap_addr(tx_buf, mapping),
    689				skb_frag_size(&skb_shinfo(skb)->frags[j]),
    690				DMA_TO_DEVICE);
    691		}
    692		if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
    693			if (bp->flags & BNXT_FLAG_CHIP_P5) {
    694				if (!bnxt_get_tx_ts_p5(bp, skb))
    695					compl_deferred = true;
    696				else
    697					atomic_inc(&bp->ptp_cfg->tx_avail);
    698			}
    699		}
    700
    701next_tx_int:
    702		cons = NEXT_TX(cons);
    703
    704		tx_bytes += skb->len;
    705		if (!compl_deferred)
    706			dev_kfree_skb_any(skb);
    707	}
    708
    709	netdev_tx_completed_queue(txq, nr_pkts, tx_bytes);
    710	txr->tx_cons = cons;
    711
    712	/* Need to make the tx_cons update visible to bnxt_start_xmit()
    713	 * before checking for netif_tx_queue_stopped().  Without the
    714	 * memory barrier, there is a small possibility that bnxt_start_xmit()
    715	 * will miss it and cause the queue to be stopped forever.
    716	 */
    717	smp_mb();
    718
    719	if (unlikely(netif_tx_queue_stopped(txq)) &&
    720	    bnxt_tx_avail(bp, txr) >= bp->tx_wake_thresh &&
    721	    READ_ONCE(txr->dev_state) != BNXT_DEV_STATE_CLOSING)
    722		netif_tx_wake_queue(txq);
    723}
    724
    725static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping,
    726					 struct bnxt_rx_ring_info *rxr,
    727					 gfp_t gfp)
    728{
    729	struct device *dev = &bp->pdev->dev;
    730	struct page *page;
    731
    732	page = page_pool_dev_alloc_pages(rxr->page_pool);
    733	if (!page)
    734		return NULL;
    735
    736	*mapping = dma_map_page_attrs(dev, page, 0, PAGE_SIZE, bp->rx_dir,
    737				      DMA_ATTR_WEAK_ORDERING);
    738	if (dma_mapping_error(dev, *mapping)) {
    739		page_pool_recycle_direct(rxr->page_pool, page);
    740		return NULL;
    741	}
    742	return page;
    743}
    744
    745static inline u8 *__bnxt_alloc_rx_frag(struct bnxt *bp, dma_addr_t *mapping,
    746				       gfp_t gfp)
    747{
    748	u8 *data;
    749	struct pci_dev *pdev = bp->pdev;
    750
    751	if (gfp == GFP_ATOMIC)
    752		data = napi_alloc_frag(bp->rx_buf_size);
    753	else
    754		data = netdev_alloc_frag(bp->rx_buf_size);
    755	if (!data)
    756		return NULL;
    757
    758	*mapping = dma_map_single_attrs(&pdev->dev, data + bp->rx_dma_offset,
    759					bp->rx_buf_use_size, bp->rx_dir,
    760					DMA_ATTR_WEAK_ORDERING);
    761
    762	if (dma_mapping_error(&pdev->dev, *mapping)) {
    763		skb_free_frag(data);
    764		data = NULL;
    765	}
    766	return data;
    767}
    768
    769int bnxt_alloc_rx_data(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
    770		       u16 prod, gfp_t gfp)
    771{
    772	struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
    773	struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[prod];
    774	dma_addr_t mapping;
    775
    776	if (BNXT_RX_PAGE_MODE(bp)) {
    777		struct page *page =
    778			__bnxt_alloc_rx_page(bp, &mapping, rxr, gfp);
    779
    780		if (!page)
    781			return -ENOMEM;
    782
    783		mapping += bp->rx_dma_offset;
    784		rx_buf->data = page;
    785		rx_buf->data_ptr = page_address(page) + bp->rx_offset;
    786	} else {
    787		u8 *data = __bnxt_alloc_rx_frag(bp, &mapping, gfp);
    788
    789		if (!data)
    790			return -ENOMEM;
    791
    792		rx_buf->data = data;
    793		rx_buf->data_ptr = data + bp->rx_offset;
    794	}
    795	rx_buf->mapping = mapping;
    796
    797	rxbd->rx_bd_haddr = cpu_to_le64(mapping);
    798	return 0;
    799}
    800
    801void bnxt_reuse_rx_data(struct bnxt_rx_ring_info *rxr, u16 cons, void *data)
    802{
    803	u16 prod = rxr->rx_prod;
    804	struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
    805	struct rx_bd *cons_bd, *prod_bd;
    806
    807	prod_rx_buf = &rxr->rx_buf_ring[prod];
    808	cons_rx_buf = &rxr->rx_buf_ring[cons];
    809
    810	prod_rx_buf->data = data;
    811	prod_rx_buf->data_ptr = cons_rx_buf->data_ptr;
    812
    813	prod_rx_buf->mapping = cons_rx_buf->mapping;
    814
    815	prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
    816	cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
    817
    818	prod_bd->rx_bd_haddr = cons_bd->rx_bd_haddr;
    819}
    820
    821static inline u16 bnxt_find_next_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx)
    822{
    823	u16 next, max = rxr->rx_agg_bmap_size;
    824
    825	next = find_next_zero_bit(rxr->rx_agg_bmap, max, idx);
    826	if (next >= max)
    827		next = find_first_zero_bit(rxr->rx_agg_bmap, max);
    828	return next;
    829}
    830
    831static inline int bnxt_alloc_rx_page(struct bnxt *bp,
    832				     struct bnxt_rx_ring_info *rxr,
    833				     u16 prod, gfp_t gfp)
    834{
    835	struct rx_bd *rxbd =
    836		&rxr->rx_agg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
    837	struct bnxt_sw_rx_agg_bd *rx_agg_buf;
    838	struct pci_dev *pdev = bp->pdev;
    839	struct page *page;
    840	dma_addr_t mapping;
    841	u16 sw_prod = rxr->rx_sw_agg_prod;
    842	unsigned int offset = 0;
    843
    844	if (BNXT_RX_PAGE_MODE(bp)) {
    845		page = __bnxt_alloc_rx_page(bp, &mapping, rxr, gfp);
    846
    847		if (!page)
    848			return -ENOMEM;
    849
    850	} else {
    851		if (PAGE_SIZE > BNXT_RX_PAGE_SIZE) {
    852			page = rxr->rx_page;
    853			if (!page) {
    854				page = alloc_page(gfp);
    855				if (!page)
    856					return -ENOMEM;
    857				rxr->rx_page = page;
    858				rxr->rx_page_offset = 0;
    859			}
    860			offset = rxr->rx_page_offset;
    861			rxr->rx_page_offset += BNXT_RX_PAGE_SIZE;
    862			if (rxr->rx_page_offset == PAGE_SIZE)
    863				rxr->rx_page = NULL;
    864			else
    865				get_page(page);
    866		} else {
    867			page = alloc_page(gfp);
    868			if (!page)
    869				return -ENOMEM;
    870		}
    871
    872		mapping = dma_map_page_attrs(&pdev->dev, page, offset,
    873					     BNXT_RX_PAGE_SIZE, DMA_FROM_DEVICE,
    874					     DMA_ATTR_WEAK_ORDERING);
    875		if (dma_mapping_error(&pdev->dev, mapping)) {
    876			__free_page(page);
    877			return -EIO;
    878		}
    879	}
    880
    881	if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
    882		sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod);
    883
    884	__set_bit(sw_prod, rxr->rx_agg_bmap);
    885	rx_agg_buf = &rxr->rx_agg_ring[sw_prod];
    886	rxr->rx_sw_agg_prod = NEXT_RX_AGG(sw_prod);
    887
    888	rx_agg_buf->page = page;
    889	rx_agg_buf->offset = offset;
    890	rx_agg_buf->mapping = mapping;
    891	rxbd->rx_bd_haddr = cpu_to_le64(mapping);
    892	rxbd->rx_bd_opaque = sw_prod;
    893	return 0;
    894}
    895
    896static struct rx_agg_cmp *bnxt_get_agg(struct bnxt *bp,
    897				       struct bnxt_cp_ring_info *cpr,
    898				       u16 cp_cons, u16 curr)
    899{
    900	struct rx_agg_cmp *agg;
    901
    902	cp_cons = RING_CMP(ADV_RAW_CMP(cp_cons, curr));
    903	agg = (struct rx_agg_cmp *)
    904		&cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
    905	return agg;
    906}
    907
    908static struct rx_agg_cmp *bnxt_get_tpa_agg_p5(struct bnxt *bp,
    909					      struct bnxt_rx_ring_info *rxr,
    910					      u16 agg_id, u16 curr)
    911{
    912	struct bnxt_tpa_info *tpa_info = &rxr->rx_tpa[agg_id];
    913
    914	return &tpa_info->agg_arr[curr];
    915}
    916
    917static void bnxt_reuse_rx_agg_bufs(struct bnxt_cp_ring_info *cpr, u16 idx,
    918				   u16 start, u32 agg_bufs, bool tpa)
    919{
    920	struct bnxt_napi *bnapi = cpr->bnapi;
    921	struct bnxt *bp = bnapi->bp;
    922	struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
    923	u16 prod = rxr->rx_agg_prod;
    924	u16 sw_prod = rxr->rx_sw_agg_prod;
    925	bool p5_tpa = false;
    926	u32 i;
    927
    928	if ((bp->flags & BNXT_FLAG_CHIP_P5) && tpa)
    929		p5_tpa = true;
    930
    931	for (i = 0; i < agg_bufs; i++) {
    932		u16 cons;
    933		struct rx_agg_cmp *agg;
    934		struct bnxt_sw_rx_agg_bd *cons_rx_buf, *prod_rx_buf;
    935		struct rx_bd *prod_bd;
    936		struct page *page;
    937
    938		if (p5_tpa)
    939			agg = bnxt_get_tpa_agg_p5(bp, rxr, idx, start + i);
    940		else
    941			agg = bnxt_get_agg(bp, cpr, idx, start + i);
    942		cons = agg->rx_agg_cmp_opaque;
    943		__clear_bit(cons, rxr->rx_agg_bmap);
    944
    945		if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
    946			sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod);
    947
    948		__set_bit(sw_prod, rxr->rx_agg_bmap);
    949		prod_rx_buf = &rxr->rx_agg_ring[sw_prod];
    950		cons_rx_buf = &rxr->rx_agg_ring[cons];
    951
    952		/* It is possible for sw_prod to be equal to cons, so
    953		 * set cons_rx_buf->page to NULL first.
    954		 */
    955		page = cons_rx_buf->page;
    956		cons_rx_buf->page = NULL;
    957		prod_rx_buf->page = page;
    958		prod_rx_buf->offset = cons_rx_buf->offset;
    959
    960		prod_rx_buf->mapping = cons_rx_buf->mapping;
    961
    962		prod_bd = &rxr->rx_agg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
    963
    964		prod_bd->rx_bd_haddr = cpu_to_le64(cons_rx_buf->mapping);
    965		prod_bd->rx_bd_opaque = sw_prod;
    966
    967		prod = NEXT_RX_AGG(prod);
    968		sw_prod = NEXT_RX_AGG(sw_prod);
    969	}
    970	rxr->rx_agg_prod = prod;
    971	rxr->rx_sw_agg_prod = sw_prod;
    972}
    973
    974static struct sk_buff *bnxt_rx_multi_page_skb(struct bnxt *bp,
    975					      struct bnxt_rx_ring_info *rxr,
    976					      u16 cons, void *data, u8 *data_ptr,
    977					      dma_addr_t dma_addr,
    978					      unsigned int offset_and_len)
    979{
    980	unsigned int len = offset_and_len & 0xffff;
    981	struct page *page = data;
    982	u16 prod = rxr->rx_prod;
    983	struct sk_buff *skb;
    984	int err;
    985
    986	err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
    987	if (unlikely(err)) {
    988		bnxt_reuse_rx_data(rxr, cons, data);
    989		return NULL;
    990	}
    991	dma_addr -= bp->rx_dma_offset;
    992	dma_unmap_page_attrs(&bp->pdev->dev, dma_addr, PAGE_SIZE, bp->rx_dir,
    993			     DMA_ATTR_WEAK_ORDERING);
    994	skb = build_skb(page_address(page), BNXT_PAGE_MODE_BUF_SIZE +
    995					    bp->rx_dma_offset);
    996	if (!skb) {
    997		__free_page(page);
    998		return NULL;
    999	}
   1000	skb_mark_for_recycle(skb);
   1001	skb_reserve(skb, bp->rx_dma_offset);
   1002	__skb_put(skb, len);
   1003
   1004	return skb;
   1005}
   1006
   1007static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp,
   1008					struct bnxt_rx_ring_info *rxr,
   1009					u16 cons, void *data, u8 *data_ptr,
   1010					dma_addr_t dma_addr,
   1011					unsigned int offset_and_len)
   1012{
   1013	unsigned int payload = offset_and_len >> 16;
   1014	unsigned int len = offset_and_len & 0xffff;
   1015	skb_frag_t *frag;
   1016	struct page *page = data;
   1017	u16 prod = rxr->rx_prod;
   1018	struct sk_buff *skb;
   1019	int off, err;
   1020
   1021	err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
   1022	if (unlikely(err)) {
   1023		bnxt_reuse_rx_data(rxr, cons, data);
   1024		return NULL;
   1025	}
   1026	dma_addr -= bp->rx_dma_offset;
   1027	dma_unmap_page_attrs(&bp->pdev->dev, dma_addr, PAGE_SIZE, bp->rx_dir,
   1028			     DMA_ATTR_WEAK_ORDERING);
   1029
   1030	if (unlikely(!payload))
   1031		payload = eth_get_headlen(bp->dev, data_ptr, len);
   1032
   1033	skb = napi_alloc_skb(&rxr->bnapi->napi, payload);
   1034	if (!skb) {
   1035		__free_page(page);
   1036		return NULL;
   1037	}
   1038
   1039	skb_mark_for_recycle(skb);
   1040	off = (void *)data_ptr - page_address(page);
   1041	skb_add_rx_frag(skb, 0, page, off, len, PAGE_SIZE);
   1042	memcpy(skb->data - NET_IP_ALIGN, data_ptr - NET_IP_ALIGN,
   1043	       payload + NET_IP_ALIGN);
   1044
   1045	frag = &skb_shinfo(skb)->frags[0];
   1046	skb_frag_size_sub(frag, payload);
   1047	skb_frag_off_add(frag, payload);
   1048	skb->data_len -= payload;
   1049	skb->tail += payload;
   1050
   1051	return skb;
   1052}
   1053
   1054static struct sk_buff *bnxt_rx_skb(struct bnxt *bp,
   1055				   struct bnxt_rx_ring_info *rxr, u16 cons,
   1056				   void *data, u8 *data_ptr,
   1057				   dma_addr_t dma_addr,
   1058				   unsigned int offset_and_len)
   1059{
   1060	u16 prod = rxr->rx_prod;
   1061	struct sk_buff *skb;
   1062	int err;
   1063
   1064	err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
   1065	if (unlikely(err)) {
   1066		bnxt_reuse_rx_data(rxr, cons, data);
   1067		return NULL;
   1068	}
   1069
   1070	skb = build_skb(data, bp->rx_buf_size);
   1071	dma_unmap_single_attrs(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
   1072			       bp->rx_dir, DMA_ATTR_WEAK_ORDERING);
   1073	if (!skb) {
   1074		skb_free_frag(data);
   1075		return NULL;
   1076	}
   1077
   1078	skb_reserve(skb, bp->rx_offset);
   1079	skb_put(skb, offset_and_len & 0xffff);
   1080	return skb;
   1081}
   1082
   1083static u32 __bnxt_rx_agg_pages(struct bnxt *bp,
   1084			       struct bnxt_cp_ring_info *cpr,
   1085			       struct skb_shared_info *shinfo,
   1086			       u16 idx, u32 agg_bufs, bool tpa,
   1087			       struct xdp_buff *xdp)
   1088{
   1089	struct bnxt_napi *bnapi = cpr->bnapi;
   1090	struct pci_dev *pdev = bp->pdev;
   1091	struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
   1092	u16 prod = rxr->rx_agg_prod;
   1093	u32 i, total_frag_len = 0;
   1094	bool p5_tpa = false;
   1095
   1096	if ((bp->flags & BNXT_FLAG_CHIP_P5) && tpa)
   1097		p5_tpa = true;
   1098
   1099	for (i = 0; i < agg_bufs; i++) {
   1100		skb_frag_t *frag = &shinfo->frags[i];
   1101		u16 cons, frag_len;
   1102		struct rx_agg_cmp *agg;
   1103		struct bnxt_sw_rx_agg_bd *cons_rx_buf;
   1104		struct page *page;
   1105		dma_addr_t mapping;
   1106
   1107		if (p5_tpa)
   1108			agg = bnxt_get_tpa_agg_p5(bp, rxr, idx, i);
   1109		else
   1110			agg = bnxt_get_agg(bp, cpr, idx, i);
   1111		cons = agg->rx_agg_cmp_opaque;
   1112		frag_len = (le32_to_cpu(agg->rx_agg_cmp_len_flags_type) &
   1113			    RX_AGG_CMP_LEN) >> RX_AGG_CMP_LEN_SHIFT;
   1114
   1115		cons_rx_buf = &rxr->rx_agg_ring[cons];
   1116		skb_frag_off_set(frag, cons_rx_buf->offset);
   1117		skb_frag_size_set(frag, frag_len);
   1118		__skb_frag_set_page(frag, cons_rx_buf->page);
   1119		shinfo->nr_frags = i + 1;
   1120		__clear_bit(cons, rxr->rx_agg_bmap);
   1121
   1122		/* It is possible for bnxt_alloc_rx_page() to allocate
   1123		 * a sw_prod index that equals the cons index, so we
   1124		 * need to clear the cons entry now.
   1125		 */
   1126		mapping = cons_rx_buf->mapping;
   1127		page = cons_rx_buf->page;
   1128		cons_rx_buf->page = NULL;
   1129
   1130		if (xdp && page_is_pfmemalloc(page))
   1131			xdp_buff_set_frag_pfmemalloc(xdp);
   1132
   1133		if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_ATOMIC) != 0) {
   1134			unsigned int nr_frags;
   1135
   1136			nr_frags = --shinfo->nr_frags;
   1137			__skb_frag_set_page(&shinfo->frags[nr_frags], NULL);
   1138			cons_rx_buf->page = page;
   1139
   1140			/* Update prod since possibly some pages have been
   1141			 * allocated already.
   1142			 */
   1143			rxr->rx_agg_prod = prod;
   1144			bnxt_reuse_rx_agg_bufs(cpr, idx, i, agg_bufs - i, tpa);
   1145			return 0;
   1146		}
   1147
   1148		dma_unmap_page_attrs(&pdev->dev, mapping, BNXT_RX_PAGE_SIZE,
   1149				     bp->rx_dir,
   1150				     DMA_ATTR_WEAK_ORDERING);
   1151
   1152		total_frag_len += frag_len;
   1153		prod = NEXT_RX_AGG(prod);
   1154	}
   1155	rxr->rx_agg_prod = prod;
   1156	return total_frag_len;
   1157}
   1158
   1159static struct sk_buff *bnxt_rx_agg_pages_skb(struct bnxt *bp,
   1160					     struct bnxt_cp_ring_info *cpr,
   1161					     struct sk_buff *skb, u16 idx,
   1162					     u32 agg_bufs, bool tpa)
   1163{
   1164	struct skb_shared_info *shinfo = skb_shinfo(skb);
   1165	u32 total_frag_len = 0;
   1166
   1167	total_frag_len = __bnxt_rx_agg_pages(bp, cpr, shinfo, idx,
   1168					     agg_bufs, tpa, NULL);
   1169	if (!total_frag_len) {
   1170		dev_kfree_skb(skb);
   1171		return NULL;
   1172	}
   1173
   1174	skb->data_len += total_frag_len;
   1175	skb->len += total_frag_len;
   1176	skb->truesize += PAGE_SIZE * agg_bufs;
   1177	return skb;
   1178}
   1179
   1180static u32 bnxt_rx_agg_pages_xdp(struct bnxt *bp,
   1181				 struct bnxt_cp_ring_info *cpr,
   1182				 struct xdp_buff *xdp, u16 idx,
   1183				 u32 agg_bufs, bool tpa)
   1184{
   1185	struct skb_shared_info *shinfo = xdp_get_shared_info_from_buff(xdp);
   1186	u32 total_frag_len = 0;
   1187
   1188	if (!xdp_buff_has_frags(xdp))
   1189		shinfo->nr_frags = 0;
   1190
   1191	total_frag_len = __bnxt_rx_agg_pages(bp, cpr, shinfo,
   1192					     idx, agg_bufs, tpa, xdp);
   1193	if (total_frag_len) {
   1194		xdp_buff_set_frags_flag(xdp);
   1195		shinfo->nr_frags = agg_bufs;
   1196		shinfo->xdp_frags_size = total_frag_len;
   1197	}
   1198	return total_frag_len;
   1199}
   1200
   1201static int bnxt_agg_bufs_valid(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
   1202			       u8 agg_bufs, u32 *raw_cons)
   1203{
   1204	u16 last;
   1205	struct rx_agg_cmp *agg;
   1206
   1207	*raw_cons = ADV_RAW_CMP(*raw_cons, agg_bufs);
   1208	last = RING_CMP(*raw_cons);
   1209	agg = (struct rx_agg_cmp *)
   1210		&cpr->cp_desc_ring[CP_RING(last)][CP_IDX(last)];
   1211	return RX_AGG_CMP_VALID(agg, *raw_cons);
   1212}
   1213
   1214static inline struct sk_buff *bnxt_copy_skb(struct bnxt_napi *bnapi, u8 *data,
   1215					    unsigned int len,
   1216					    dma_addr_t mapping)
   1217{
   1218	struct bnxt *bp = bnapi->bp;
   1219	struct pci_dev *pdev = bp->pdev;
   1220	struct sk_buff *skb;
   1221
   1222	skb = napi_alloc_skb(&bnapi->napi, len);
   1223	if (!skb)
   1224		return NULL;
   1225
   1226	dma_sync_single_for_cpu(&pdev->dev, mapping, bp->rx_copy_thresh,
   1227				bp->rx_dir);
   1228
   1229	memcpy(skb->data - NET_IP_ALIGN, data - NET_IP_ALIGN,
   1230	       len + NET_IP_ALIGN);
   1231
   1232	dma_sync_single_for_device(&pdev->dev, mapping, bp->rx_copy_thresh,
   1233				   bp->rx_dir);
   1234
   1235	skb_put(skb, len);
   1236	return skb;
   1237}
   1238
   1239static int bnxt_discard_rx(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
   1240			   u32 *raw_cons, void *cmp)
   1241{
   1242	struct rx_cmp *rxcmp = cmp;
   1243	u32 tmp_raw_cons = *raw_cons;
   1244	u8 cmp_type, agg_bufs = 0;
   1245
   1246	cmp_type = RX_CMP_TYPE(rxcmp);
   1247
   1248	if (cmp_type == CMP_TYPE_RX_L2_CMP) {
   1249		agg_bufs = (le32_to_cpu(rxcmp->rx_cmp_misc_v1) &
   1250			    RX_CMP_AGG_BUFS) >>
   1251			   RX_CMP_AGG_BUFS_SHIFT;
   1252	} else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
   1253		struct rx_tpa_end_cmp *tpa_end = cmp;
   1254
   1255		if (bp->flags & BNXT_FLAG_CHIP_P5)
   1256			return 0;
   1257
   1258		agg_bufs = TPA_END_AGG_BUFS(tpa_end);
   1259	}
   1260
   1261	if (agg_bufs) {
   1262		if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons))
   1263			return -EBUSY;
   1264	}
   1265	*raw_cons = tmp_raw_cons;
   1266	return 0;
   1267}
   1268
   1269static void bnxt_queue_fw_reset_work(struct bnxt *bp, unsigned long delay)
   1270{
   1271	if (!(test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)))
   1272		return;
   1273
   1274	if (BNXT_PF(bp))
   1275		queue_delayed_work(bnxt_pf_wq, &bp->fw_reset_task, delay);
   1276	else
   1277		schedule_delayed_work(&bp->fw_reset_task, delay);
   1278}
   1279
   1280static void bnxt_queue_sp_work(struct bnxt *bp)
   1281{
   1282	if (BNXT_PF(bp))
   1283		queue_work(bnxt_pf_wq, &bp->sp_task);
   1284	else
   1285		schedule_work(&bp->sp_task);
   1286}
   1287
   1288static void bnxt_sched_reset(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
   1289{
   1290	if (!rxr->bnapi->in_reset) {
   1291		rxr->bnapi->in_reset = true;
   1292		if (bp->flags & BNXT_FLAG_CHIP_P5)
   1293			set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event);
   1294		else
   1295			set_bit(BNXT_RST_RING_SP_EVENT, &bp->sp_event);
   1296		bnxt_queue_sp_work(bp);
   1297	}
   1298	rxr->rx_next_cons = 0xffff;
   1299}
   1300
   1301static u16 bnxt_alloc_agg_idx(struct bnxt_rx_ring_info *rxr, u16 agg_id)
   1302{
   1303	struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map;
   1304	u16 idx = agg_id & MAX_TPA_P5_MASK;
   1305
   1306	if (test_bit(idx, map->agg_idx_bmap))
   1307		idx = find_first_zero_bit(map->agg_idx_bmap,
   1308					  BNXT_AGG_IDX_BMAP_SIZE);
   1309	__set_bit(idx, map->agg_idx_bmap);
   1310	map->agg_id_tbl[agg_id] = idx;
   1311	return idx;
   1312}
   1313
   1314static void bnxt_free_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx)
   1315{
   1316	struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map;
   1317
   1318	__clear_bit(idx, map->agg_idx_bmap);
   1319}
   1320
   1321static u16 bnxt_lookup_agg_idx(struct bnxt_rx_ring_info *rxr, u16 agg_id)
   1322{
   1323	struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map;
   1324
   1325	return map->agg_id_tbl[agg_id];
   1326}
   1327
   1328static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
   1329			   struct rx_tpa_start_cmp *tpa_start,
   1330			   struct rx_tpa_start_cmp_ext *tpa_start1)
   1331{
   1332	struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
   1333	struct bnxt_tpa_info *tpa_info;
   1334	u16 cons, prod, agg_id;
   1335	struct rx_bd *prod_bd;
   1336	dma_addr_t mapping;
   1337
   1338	if (bp->flags & BNXT_FLAG_CHIP_P5) {
   1339		agg_id = TPA_START_AGG_ID_P5(tpa_start);
   1340		agg_id = bnxt_alloc_agg_idx(rxr, agg_id);
   1341	} else {
   1342		agg_id = TPA_START_AGG_ID(tpa_start);
   1343	}
   1344	cons = tpa_start->rx_tpa_start_cmp_opaque;
   1345	prod = rxr->rx_prod;
   1346	cons_rx_buf = &rxr->rx_buf_ring[cons];
   1347	prod_rx_buf = &rxr->rx_buf_ring[prod];
   1348	tpa_info = &rxr->rx_tpa[agg_id];
   1349
   1350	if (unlikely(cons != rxr->rx_next_cons ||
   1351		     TPA_START_ERROR(tpa_start))) {
   1352		netdev_warn(bp->dev, "TPA cons %x, expected cons %x, error code %x\n",
   1353			    cons, rxr->rx_next_cons,
   1354			    TPA_START_ERROR_CODE(tpa_start1));
   1355		bnxt_sched_reset(bp, rxr);
   1356		return;
   1357	}
   1358	/* Store cfa_code in tpa_info to use in tpa_end
   1359	 * completion processing.
   1360	 */
   1361	tpa_info->cfa_code = TPA_START_CFA_CODE(tpa_start1);
   1362	prod_rx_buf->data = tpa_info->data;
   1363	prod_rx_buf->data_ptr = tpa_info->data_ptr;
   1364
   1365	mapping = tpa_info->mapping;
   1366	prod_rx_buf->mapping = mapping;
   1367
   1368	prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
   1369
   1370	prod_bd->rx_bd_haddr = cpu_to_le64(mapping);
   1371
   1372	tpa_info->data = cons_rx_buf->data;
   1373	tpa_info->data_ptr = cons_rx_buf->data_ptr;
   1374	cons_rx_buf->data = NULL;
   1375	tpa_info->mapping = cons_rx_buf->mapping;
   1376
   1377	tpa_info->len =
   1378		le32_to_cpu(tpa_start->rx_tpa_start_cmp_len_flags_type) >>
   1379				RX_TPA_START_CMP_LEN_SHIFT;
   1380	if (likely(TPA_START_HASH_VALID(tpa_start))) {
   1381		u32 hash_type = TPA_START_HASH_TYPE(tpa_start);
   1382
   1383		tpa_info->hash_type = PKT_HASH_TYPE_L4;
   1384		tpa_info->gso_type = SKB_GSO_TCPV4;
   1385		/* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */
   1386		if (hash_type == 3 || TPA_START_IS_IPV6(tpa_start1))
   1387			tpa_info->gso_type = SKB_GSO_TCPV6;
   1388		tpa_info->rss_hash =
   1389			le32_to_cpu(tpa_start->rx_tpa_start_cmp_rss_hash);
   1390	} else {
   1391		tpa_info->hash_type = PKT_HASH_TYPE_NONE;
   1392		tpa_info->gso_type = 0;
   1393		netif_warn(bp, rx_err, bp->dev, "TPA packet without valid hash\n");
   1394	}
   1395	tpa_info->flags2 = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_flags2);
   1396	tpa_info->metadata = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_metadata);
   1397	tpa_info->hdr_info = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_hdr_info);
   1398	tpa_info->agg_count = 0;
   1399
   1400	rxr->rx_prod = NEXT_RX(prod);
   1401	cons = NEXT_RX(cons);
   1402	rxr->rx_next_cons = NEXT_RX(cons);
   1403	cons_rx_buf = &rxr->rx_buf_ring[cons];
   1404
   1405	bnxt_reuse_rx_data(rxr, cons, cons_rx_buf->data);
   1406	rxr->rx_prod = NEXT_RX(rxr->rx_prod);
   1407	cons_rx_buf->data = NULL;
   1408}
   1409
   1410static void bnxt_abort_tpa(struct bnxt_cp_ring_info *cpr, u16 idx, u32 agg_bufs)
   1411{
   1412	if (agg_bufs)
   1413		bnxt_reuse_rx_agg_bufs(cpr, idx, 0, agg_bufs, true);
   1414}
   1415
   1416#ifdef CONFIG_INET
   1417static void bnxt_gro_tunnel(struct sk_buff *skb, __be16 ip_proto)
   1418{
   1419	struct udphdr *uh = NULL;
   1420
   1421	if (ip_proto == htons(ETH_P_IP)) {
   1422		struct iphdr *iph = (struct iphdr *)skb->data;
   1423
   1424		if (iph->protocol == IPPROTO_UDP)
   1425			uh = (struct udphdr *)(iph + 1);
   1426	} else {
   1427		struct ipv6hdr *iph = (struct ipv6hdr *)skb->data;
   1428
   1429		if (iph->nexthdr == IPPROTO_UDP)
   1430			uh = (struct udphdr *)(iph + 1);
   1431	}
   1432	if (uh) {
   1433		if (uh->check)
   1434			skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL_CSUM;
   1435		else
   1436			skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL;
   1437	}
   1438}
   1439#endif
   1440
   1441static struct sk_buff *bnxt_gro_func_5731x(struct bnxt_tpa_info *tpa_info,
   1442					   int payload_off, int tcp_ts,
   1443					   struct sk_buff *skb)
   1444{
   1445#ifdef CONFIG_INET
   1446	struct tcphdr *th;
   1447	int len, nw_off;
   1448	u16 outer_ip_off, inner_ip_off, inner_mac_off;
   1449	u32 hdr_info = tpa_info->hdr_info;
   1450	bool loopback = false;
   1451
   1452	inner_ip_off = BNXT_TPA_INNER_L3_OFF(hdr_info);
   1453	inner_mac_off = BNXT_TPA_INNER_L2_OFF(hdr_info);
   1454	outer_ip_off = BNXT_TPA_OUTER_L3_OFF(hdr_info);
   1455
   1456	/* If the packet is an internal loopback packet, the offsets will
   1457	 * have an extra 4 bytes.
   1458	 */
   1459	if (inner_mac_off == 4) {
   1460		loopback = true;
   1461	} else if (inner_mac_off > 4) {
   1462		__be16 proto = *((__be16 *)(skb->data + inner_ip_off -
   1463					    ETH_HLEN - 2));
   1464
   1465		/* We only support inner iPv4/ipv6.  If we don't see the
   1466		 * correct protocol ID, it must be a loopback packet where
   1467		 * the offsets are off by 4.
   1468		 */
   1469		if (proto != htons(ETH_P_IP) && proto != htons(ETH_P_IPV6))
   1470			loopback = true;
   1471	}
   1472	if (loopback) {
   1473		/* internal loopback packet, subtract all offsets by 4 */
   1474		inner_ip_off -= 4;
   1475		inner_mac_off -= 4;
   1476		outer_ip_off -= 4;
   1477	}
   1478
   1479	nw_off = inner_ip_off - ETH_HLEN;
   1480	skb_set_network_header(skb, nw_off);
   1481	if (tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_IP_TYPE) {
   1482		struct ipv6hdr *iph = ipv6_hdr(skb);
   1483
   1484		skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr));
   1485		len = skb->len - skb_transport_offset(skb);
   1486		th = tcp_hdr(skb);
   1487		th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0);
   1488	} else {
   1489		struct iphdr *iph = ip_hdr(skb);
   1490
   1491		skb_set_transport_header(skb, nw_off + sizeof(struct iphdr));
   1492		len = skb->len - skb_transport_offset(skb);
   1493		th = tcp_hdr(skb);
   1494		th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0);
   1495	}
   1496
   1497	if (inner_mac_off) { /* tunnel */
   1498		__be16 proto = *((__be16 *)(skb->data + outer_ip_off -
   1499					    ETH_HLEN - 2));
   1500
   1501		bnxt_gro_tunnel(skb, proto);
   1502	}
   1503#endif
   1504	return skb;
   1505}
   1506
   1507static struct sk_buff *bnxt_gro_func_5750x(struct bnxt_tpa_info *tpa_info,
   1508					   int payload_off, int tcp_ts,
   1509					   struct sk_buff *skb)
   1510{
   1511#ifdef CONFIG_INET
   1512	u16 outer_ip_off, inner_ip_off, inner_mac_off;
   1513	u32 hdr_info = tpa_info->hdr_info;
   1514	int iphdr_len, nw_off;
   1515
   1516	inner_ip_off = BNXT_TPA_INNER_L3_OFF(hdr_info);
   1517	inner_mac_off = BNXT_TPA_INNER_L2_OFF(hdr_info);
   1518	outer_ip_off = BNXT_TPA_OUTER_L3_OFF(hdr_info);
   1519
   1520	nw_off = inner_ip_off - ETH_HLEN;
   1521	skb_set_network_header(skb, nw_off);
   1522	iphdr_len = (tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_IP_TYPE) ?
   1523		     sizeof(struct ipv6hdr) : sizeof(struct iphdr);
   1524	skb_set_transport_header(skb, nw_off + iphdr_len);
   1525
   1526	if (inner_mac_off) { /* tunnel */
   1527		__be16 proto = *((__be16 *)(skb->data + outer_ip_off -
   1528					    ETH_HLEN - 2));
   1529
   1530		bnxt_gro_tunnel(skb, proto);
   1531	}
   1532#endif
   1533	return skb;
   1534}
   1535
   1536#define BNXT_IPV4_HDR_SIZE	(sizeof(struct iphdr) + sizeof(struct tcphdr))
   1537#define BNXT_IPV6_HDR_SIZE	(sizeof(struct ipv6hdr) + sizeof(struct tcphdr))
   1538
   1539static struct sk_buff *bnxt_gro_func_5730x(struct bnxt_tpa_info *tpa_info,
   1540					   int payload_off, int tcp_ts,
   1541					   struct sk_buff *skb)
   1542{
   1543#ifdef CONFIG_INET
   1544	struct tcphdr *th;
   1545	int len, nw_off, tcp_opt_len = 0;
   1546
   1547	if (tcp_ts)
   1548		tcp_opt_len = 12;
   1549
   1550	if (tpa_info->gso_type == SKB_GSO_TCPV4) {
   1551		struct iphdr *iph;
   1552
   1553		nw_off = payload_off - BNXT_IPV4_HDR_SIZE - tcp_opt_len -
   1554			 ETH_HLEN;
   1555		skb_set_network_header(skb, nw_off);
   1556		iph = ip_hdr(skb);
   1557		skb_set_transport_header(skb, nw_off + sizeof(struct iphdr));
   1558		len = skb->len - skb_transport_offset(skb);
   1559		th = tcp_hdr(skb);
   1560		th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0);
   1561	} else if (tpa_info->gso_type == SKB_GSO_TCPV6) {
   1562		struct ipv6hdr *iph;
   1563
   1564		nw_off = payload_off - BNXT_IPV6_HDR_SIZE - tcp_opt_len -
   1565			 ETH_HLEN;
   1566		skb_set_network_header(skb, nw_off);
   1567		iph = ipv6_hdr(skb);
   1568		skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr));
   1569		len = skb->len - skb_transport_offset(skb);
   1570		th = tcp_hdr(skb);
   1571		th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0);
   1572	} else {
   1573		dev_kfree_skb_any(skb);
   1574		return NULL;
   1575	}
   1576
   1577	if (nw_off) /* tunnel */
   1578		bnxt_gro_tunnel(skb, skb->protocol);
   1579#endif
   1580	return skb;
   1581}
   1582
   1583static inline struct sk_buff *bnxt_gro_skb(struct bnxt *bp,
   1584					   struct bnxt_tpa_info *tpa_info,
   1585					   struct rx_tpa_end_cmp *tpa_end,
   1586					   struct rx_tpa_end_cmp_ext *tpa_end1,
   1587					   struct sk_buff *skb)
   1588{
   1589#ifdef CONFIG_INET
   1590	int payload_off;
   1591	u16 segs;
   1592
   1593	segs = TPA_END_TPA_SEGS(tpa_end);
   1594	if (segs == 1)
   1595		return skb;
   1596
   1597	NAPI_GRO_CB(skb)->count = segs;
   1598	skb_shinfo(skb)->gso_size =
   1599		le32_to_cpu(tpa_end1->rx_tpa_end_cmp_seg_len);
   1600	skb_shinfo(skb)->gso_type = tpa_info->gso_type;
   1601	if (bp->flags & BNXT_FLAG_CHIP_P5)
   1602		payload_off = TPA_END_PAYLOAD_OFF_P5(tpa_end1);
   1603	else
   1604		payload_off = TPA_END_PAYLOAD_OFF(tpa_end);
   1605	skb = bp->gro_func(tpa_info, payload_off, TPA_END_GRO_TS(tpa_end), skb);
   1606	if (likely(skb))
   1607		tcp_gro_complete(skb);
   1608#endif
   1609	return skb;
   1610}
   1611
   1612/* Given the cfa_code of a received packet determine which
   1613 * netdev (vf-rep or PF) the packet is destined to.
   1614 */
   1615static struct net_device *bnxt_get_pkt_dev(struct bnxt *bp, u16 cfa_code)
   1616{
   1617	struct net_device *dev = bnxt_get_vf_rep(bp, cfa_code);
   1618
   1619	/* if vf-rep dev is NULL, the must belongs to the PF */
   1620	return dev ? dev : bp->dev;
   1621}
   1622
   1623static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
   1624					   struct bnxt_cp_ring_info *cpr,
   1625					   u32 *raw_cons,
   1626					   struct rx_tpa_end_cmp *tpa_end,
   1627					   struct rx_tpa_end_cmp_ext *tpa_end1,
   1628					   u8 *event)
   1629{
   1630	struct bnxt_napi *bnapi = cpr->bnapi;
   1631	struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
   1632	u8 *data_ptr, agg_bufs;
   1633	unsigned int len;
   1634	struct bnxt_tpa_info *tpa_info;
   1635	dma_addr_t mapping;
   1636	struct sk_buff *skb;
   1637	u16 idx = 0, agg_id;
   1638	void *data;
   1639	bool gro;
   1640
   1641	if (unlikely(bnapi->in_reset)) {
   1642		int rc = bnxt_discard_rx(bp, cpr, raw_cons, tpa_end);
   1643
   1644		if (rc < 0)
   1645			return ERR_PTR(-EBUSY);
   1646		return NULL;
   1647	}
   1648
   1649	if (bp->flags & BNXT_FLAG_CHIP_P5) {
   1650		agg_id = TPA_END_AGG_ID_P5(tpa_end);
   1651		agg_id = bnxt_lookup_agg_idx(rxr, agg_id);
   1652		agg_bufs = TPA_END_AGG_BUFS_P5(tpa_end1);
   1653		tpa_info = &rxr->rx_tpa[agg_id];
   1654		if (unlikely(agg_bufs != tpa_info->agg_count)) {
   1655			netdev_warn(bp->dev, "TPA end agg_buf %d != expected agg_bufs %d\n",
   1656				    agg_bufs, tpa_info->agg_count);
   1657			agg_bufs = tpa_info->agg_count;
   1658		}
   1659		tpa_info->agg_count = 0;
   1660		*event |= BNXT_AGG_EVENT;
   1661		bnxt_free_agg_idx(rxr, agg_id);
   1662		idx = agg_id;
   1663		gro = !!(bp->flags & BNXT_FLAG_GRO);
   1664	} else {
   1665		agg_id = TPA_END_AGG_ID(tpa_end);
   1666		agg_bufs = TPA_END_AGG_BUFS(tpa_end);
   1667		tpa_info = &rxr->rx_tpa[agg_id];
   1668		idx = RING_CMP(*raw_cons);
   1669		if (agg_bufs) {
   1670			if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, raw_cons))
   1671				return ERR_PTR(-EBUSY);
   1672
   1673			*event |= BNXT_AGG_EVENT;
   1674			idx = NEXT_CMP(idx);
   1675		}
   1676		gro = !!TPA_END_GRO(tpa_end);
   1677	}
   1678	data = tpa_info->data;
   1679	data_ptr = tpa_info->data_ptr;
   1680	prefetch(data_ptr);
   1681	len = tpa_info->len;
   1682	mapping = tpa_info->mapping;
   1683
   1684	if (unlikely(agg_bufs > MAX_SKB_FRAGS || TPA_END_ERRORS(tpa_end1))) {
   1685		bnxt_abort_tpa(cpr, idx, agg_bufs);
   1686		if (agg_bufs > MAX_SKB_FRAGS)
   1687			netdev_warn(bp->dev, "TPA frags %d exceeded MAX_SKB_FRAGS %d\n",
   1688				    agg_bufs, (int)MAX_SKB_FRAGS);
   1689		return NULL;
   1690	}
   1691
   1692	if (len <= bp->rx_copy_thresh) {
   1693		skb = bnxt_copy_skb(bnapi, data_ptr, len, mapping);
   1694		if (!skb) {
   1695			bnxt_abort_tpa(cpr, idx, agg_bufs);
   1696			cpr->sw_stats.rx.rx_oom_discards += 1;
   1697			return NULL;
   1698		}
   1699	} else {
   1700		u8 *new_data;
   1701		dma_addr_t new_mapping;
   1702
   1703		new_data = __bnxt_alloc_rx_frag(bp, &new_mapping, GFP_ATOMIC);
   1704		if (!new_data) {
   1705			bnxt_abort_tpa(cpr, idx, agg_bufs);
   1706			cpr->sw_stats.rx.rx_oom_discards += 1;
   1707			return NULL;
   1708		}
   1709
   1710		tpa_info->data = new_data;
   1711		tpa_info->data_ptr = new_data + bp->rx_offset;
   1712		tpa_info->mapping = new_mapping;
   1713
   1714		skb = build_skb(data, bp->rx_buf_size);
   1715		dma_unmap_single_attrs(&bp->pdev->dev, mapping,
   1716				       bp->rx_buf_use_size, bp->rx_dir,
   1717				       DMA_ATTR_WEAK_ORDERING);
   1718
   1719		if (!skb) {
   1720			skb_free_frag(data);
   1721			bnxt_abort_tpa(cpr, idx, agg_bufs);
   1722			cpr->sw_stats.rx.rx_oom_discards += 1;
   1723			return NULL;
   1724		}
   1725		skb_reserve(skb, bp->rx_offset);
   1726		skb_put(skb, len);
   1727	}
   1728
   1729	if (agg_bufs) {
   1730		skb = bnxt_rx_agg_pages_skb(bp, cpr, skb, idx, agg_bufs, true);
   1731		if (!skb) {
   1732			/* Page reuse already handled by bnxt_rx_pages(). */
   1733			cpr->sw_stats.rx.rx_oom_discards += 1;
   1734			return NULL;
   1735		}
   1736	}
   1737
   1738	skb->protocol =
   1739		eth_type_trans(skb, bnxt_get_pkt_dev(bp, tpa_info->cfa_code));
   1740
   1741	if (tpa_info->hash_type != PKT_HASH_TYPE_NONE)
   1742		skb_set_hash(skb, tpa_info->rss_hash, tpa_info->hash_type);
   1743
   1744	if ((tpa_info->flags2 & RX_CMP_FLAGS2_META_FORMAT_VLAN) &&
   1745	    (skb->dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX)) {
   1746		__be16 vlan_proto = htons(tpa_info->metadata >>
   1747					  RX_CMP_FLAGS2_METADATA_TPID_SFT);
   1748		u16 vtag = tpa_info->metadata & RX_CMP_FLAGS2_METADATA_TCI_MASK;
   1749
   1750		if (eth_type_vlan(vlan_proto)) {
   1751			__vlan_hwaccel_put_tag(skb, vlan_proto, vtag);
   1752		} else {
   1753			dev_kfree_skb(skb);
   1754			return NULL;
   1755		}
   1756	}
   1757
   1758	skb_checksum_none_assert(skb);
   1759	if (likely(tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_L4_CS_CALC)) {
   1760		skb->ip_summed = CHECKSUM_UNNECESSARY;
   1761		skb->csum_level =
   1762			(tpa_info->flags2 & RX_CMP_FLAGS2_T_L4_CS_CALC) >> 3;
   1763	}
   1764
   1765	if (gro)
   1766		skb = bnxt_gro_skb(bp, tpa_info, tpa_end, tpa_end1, skb);
   1767
   1768	return skb;
   1769}
   1770
   1771static void bnxt_tpa_agg(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
   1772			 struct rx_agg_cmp *rx_agg)
   1773{
   1774	u16 agg_id = TPA_AGG_AGG_ID(rx_agg);
   1775	struct bnxt_tpa_info *tpa_info;
   1776
   1777	agg_id = bnxt_lookup_agg_idx(rxr, agg_id);
   1778	tpa_info = &rxr->rx_tpa[agg_id];
   1779	BUG_ON(tpa_info->agg_count >= MAX_SKB_FRAGS);
   1780	tpa_info->agg_arr[tpa_info->agg_count++] = *rx_agg;
   1781}
   1782
   1783static void bnxt_deliver_skb(struct bnxt *bp, struct bnxt_napi *bnapi,
   1784			     struct sk_buff *skb)
   1785{
   1786	if (skb->dev != bp->dev) {
   1787		/* this packet belongs to a vf-rep */
   1788		bnxt_vf_rep_rx(bp, skb);
   1789		return;
   1790	}
   1791	skb_record_rx_queue(skb, bnapi->index);
   1792	napi_gro_receive(&bnapi->napi, skb);
   1793}
   1794
   1795/* returns the following:
   1796 * 1       - 1 packet successfully received
   1797 * 0       - successful TPA_START, packet not completed yet
   1798 * -EBUSY  - completion ring does not have all the agg buffers yet
   1799 * -ENOMEM - packet aborted due to out of memory
   1800 * -EIO    - packet aborted due to hw error indicated in BD
   1801 */
   1802static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
   1803		       u32 *raw_cons, u8 *event)
   1804{
   1805	struct bnxt_napi *bnapi = cpr->bnapi;
   1806	struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
   1807	struct net_device *dev = bp->dev;
   1808	struct rx_cmp *rxcmp;
   1809	struct rx_cmp_ext *rxcmp1;
   1810	u32 tmp_raw_cons = *raw_cons;
   1811	u16 cfa_code, cons, prod, cp_cons = RING_CMP(tmp_raw_cons);
   1812	struct bnxt_sw_rx_bd *rx_buf;
   1813	unsigned int len;
   1814	u8 *data_ptr, agg_bufs, cmp_type;
   1815	bool xdp_active = false;
   1816	dma_addr_t dma_addr;
   1817	struct sk_buff *skb;
   1818	struct xdp_buff xdp;
   1819	u32 flags, misc;
   1820	void *data;
   1821	int rc = 0;
   1822
   1823	rxcmp = (struct rx_cmp *)
   1824			&cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
   1825
   1826	cmp_type = RX_CMP_TYPE(rxcmp);
   1827
   1828	if (cmp_type == CMP_TYPE_RX_TPA_AGG_CMP) {
   1829		bnxt_tpa_agg(bp, rxr, (struct rx_agg_cmp *)rxcmp);
   1830		goto next_rx_no_prod_no_len;
   1831	}
   1832
   1833	tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons);
   1834	cp_cons = RING_CMP(tmp_raw_cons);
   1835	rxcmp1 = (struct rx_cmp_ext *)
   1836			&cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
   1837
   1838	if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
   1839		return -EBUSY;
   1840
   1841	/* The valid test of the entry must be done first before
   1842	 * reading any further.
   1843	 */
   1844	dma_rmb();
   1845	prod = rxr->rx_prod;
   1846
   1847	if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP) {
   1848		bnxt_tpa_start(bp, rxr, (struct rx_tpa_start_cmp *)rxcmp,
   1849			       (struct rx_tpa_start_cmp_ext *)rxcmp1);
   1850
   1851		*event |= BNXT_RX_EVENT;
   1852		goto next_rx_no_prod_no_len;
   1853
   1854	} else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
   1855		skb = bnxt_tpa_end(bp, cpr, &tmp_raw_cons,
   1856				   (struct rx_tpa_end_cmp *)rxcmp,
   1857				   (struct rx_tpa_end_cmp_ext *)rxcmp1, event);
   1858
   1859		if (IS_ERR(skb))
   1860			return -EBUSY;
   1861
   1862		rc = -ENOMEM;
   1863		if (likely(skb)) {
   1864			bnxt_deliver_skb(bp, bnapi, skb);
   1865			rc = 1;
   1866		}
   1867		*event |= BNXT_RX_EVENT;
   1868		goto next_rx_no_prod_no_len;
   1869	}
   1870
   1871	cons = rxcmp->rx_cmp_opaque;
   1872	if (unlikely(cons != rxr->rx_next_cons)) {
   1873		int rc1 = bnxt_discard_rx(bp, cpr, &tmp_raw_cons, rxcmp);
   1874
   1875		/* 0xffff is forced error, don't print it */
   1876		if (rxr->rx_next_cons != 0xffff)
   1877			netdev_warn(bp->dev, "RX cons %x != expected cons %x\n",
   1878				    cons, rxr->rx_next_cons);
   1879		bnxt_sched_reset(bp, rxr);
   1880		if (rc1)
   1881			return rc1;
   1882		goto next_rx_no_prod_no_len;
   1883	}
   1884	rx_buf = &rxr->rx_buf_ring[cons];
   1885	data = rx_buf->data;
   1886	data_ptr = rx_buf->data_ptr;
   1887	prefetch(data_ptr);
   1888
   1889	misc = le32_to_cpu(rxcmp->rx_cmp_misc_v1);
   1890	agg_bufs = (misc & RX_CMP_AGG_BUFS) >> RX_CMP_AGG_BUFS_SHIFT;
   1891
   1892	if (agg_bufs) {
   1893		if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons))
   1894			return -EBUSY;
   1895
   1896		cp_cons = NEXT_CMP(cp_cons);
   1897		*event |= BNXT_AGG_EVENT;
   1898	}
   1899	*event |= BNXT_RX_EVENT;
   1900
   1901	rx_buf->data = NULL;
   1902	if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L2_ERRORS) {
   1903		u32 rx_err = le32_to_cpu(rxcmp1->rx_cmp_cfa_code_errors_v2);
   1904
   1905		bnxt_reuse_rx_data(rxr, cons, data);
   1906		if (agg_bufs)
   1907			bnxt_reuse_rx_agg_bufs(cpr, cp_cons, 0, agg_bufs,
   1908					       false);
   1909
   1910		rc = -EIO;
   1911		if (rx_err & RX_CMPL_ERRORS_BUFFER_ERROR_MASK) {
   1912			bnapi->cp_ring.sw_stats.rx.rx_buf_errors++;
   1913			if (!(bp->flags & BNXT_FLAG_CHIP_P5) &&
   1914			    !(bp->fw_cap & BNXT_FW_CAP_RING_MONITOR)) {
   1915				netdev_warn_once(bp->dev, "RX buffer error %x\n",
   1916						 rx_err);
   1917				bnxt_sched_reset(bp, rxr);
   1918			}
   1919		}
   1920		goto next_rx_no_len;
   1921	}
   1922
   1923	flags = le32_to_cpu(rxcmp->rx_cmp_len_flags_type);
   1924	len = flags >> RX_CMP_LEN_SHIFT;
   1925	dma_addr = rx_buf->mapping;
   1926
   1927	if (bnxt_xdp_attached(bp, rxr)) {
   1928		bnxt_xdp_buff_init(bp, rxr, cons, &data_ptr, &len, &xdp);
   1929		if (agg_bufs) {
   1930			u32 frag_len = bnxt_rx_agg_pages_xdp(bp, cpr, &xdp,
   1931							     cp_cons, agg_bufs,
   1932							     false);
   1933			if (!frag_len) {
   1934				cpr->sw_stats.rx.rx_oom_discards += 1;
   1935				rc = -ENOMEM;
   1936				goto next_rx;
   1937			}
   1938		}
   1939		xdp_active = true;
   1940	}
   1941
   1942	if (xdp_active) {
   1943		if (bnxt_rx_xdp(bp, rxr, cons, xdp, data, &len, event)) {
   1944			rc = 1;
   1945			goto next_rx;
   1946		}
   1947	}
   1948
   1949	if (len <= bp->rx_copy_thresh) {
   1950		skb = bnxt_copy_skb(bnapi, data_ptr, len, dma_addr);
   1951		bnxt_reuse_rx_data(rxr, cons, data);
   1952		if (!skb) {
   1953			if (agg_bufs) {
   1954				if (!xdp_active)
   1955					bnxt_reuse_rx_agg_bufs(cpr, cp_cons, 0,
   1956							       agg_bufs, false);
   1957				else
   1958					bnxt_xdp_buff_frags_free(rxr, &xdp);
   1959			}
   1960			cpr->sw_stats.rx.rx_oom_discards += 1;
   1961			rc = -ENOMEM;
   1962			goto next_rx;
   1963		}
   1964	} else {
   1965		u32 payload;
   1966
   1967		if (rx_buf->data_ptr == data_ptr)
   1968			payload = misc & RX_CMP_PAYLOAD_OFFSET;
   1969		else
   1970			payload = 0;
   1971		skb = bp->rx_skb_func(bp, rxr, cons, data, data_ptr, dma_addr,
   1972				      payload | len);
   1973		if (!skb) {
   1974			cpr->sw_stats.rx.rx_oom_discards += 1;
   1975			rc = -ENOMEM;
   1976			goto next_rx;
   1977		}
   1978	}
   1979
   1980	if (agg_bufs) {
   1981		if (!xdp_active) {
   1982			skb = bnxt_rx_agg_pages_skb(bp, cpr, skb, cp_cons, agg_bufs, false);
   1983			if (!skb) {
   1984				cpr->sw_stats.rx.rx_oom_discards += 1;
   1985				rc = -ENOMEM;
   1986				goto next_rx;
   1987			}
   1988		} else {
   1989			skb = bnxt_xdp_build_skb(bp, skb, agg_bufs, rxr->page_pool, &xdp, rxcmp1);
   1990			if (!skb) {
   1991				/* we should be able to free the old skb here */
   1992				bnxt_xdp_buff_frags_free(rxr, &xdp);
   1993				cpr->sw_stats.rx.rx_oom_discards += 1;
   1994				rc = -ENOMEM;
   1995				goto next_rx;
   1996			}
   1997		}
   1998	}
   1999
   2000	if (RX_CMP_HASH_VALID(rxcmp)) {
   2001		u32 hash_type = RX_CMP_HASH_TYPE(rxcmp);
   2002		enum pkt_hash_types type = PKT_HASH_TYPE_L4;
   2003
   2004		/* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */
   2005		if (hash_type != 1 && hash_type != 3)
   2006			type = PKT_HASH_TYPE_L3;
   2007		skb_set_hash(skb, le32_to_cpu(rxcmp->rx_cmp_rss_hash), type);
   2008	}
   2009
   2010	cfa_code = RX_CMP_CFA_CODE(rxcmp1);
   2011	skb->protocol = eth_type_trans(skb, bnxt_get_pkt_dev(bp, cfa_code));
   2012
   2013	if ((rxcmp1->rx_cmp_flags2 &
   2014	     cpu_to_le32(RX_CMP_FLAGS2_META_FORMAT_VLAN)) &&
   2015	    (skb->dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX)) {
   2016		u32 meta_data = le32_to_cpu(rxcmp1->rx_cmp_meta_data);
   2017		u16 vtag = meta_data & RX_CMP_FLAGS2_METADATA_TCI_MASK;
   2018		__be16 vlan_proto = htons(meta_data >>
   2019					  RX_CMP_FLAGS2_METADATA_TPID_SFT);
   2020
   2021		if (eth_type_vlan(vlan_proto)) {
   2022			__vlan_hwaccel_put_tag(skb, vlan_proto, vtag);
   2023		} else {
   2024			dev_kfree_skb(skb);
   2025			goto next_rx;
   2026		}
   2027	}
   2028
   2029	skb_checksum_none_assert(skb);
   2030	if (RX_CMP_L4_CS_OK(rxcmp1)) {
   2031		if (dev->features & NETIF_F_RXCSUM) {
   2032			skb->ip_summed = CHECKSUM_UNNECESSARY;
   2033			skb->csum_level = RX_CMP_ENCAP(rxcmp1);
   2034		}
   2035	} else {
   2036		if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L4_CS_ERR_BITS) {
   2037			if (dev->features & NETIF_F_RXCSUM)
   2038				bnapi->cp_ring.sw_stats.rx.rx_l4_csum_errors++;
   2039		}
   2040	}
   2041
   2042	if (unlikely((flags & RX_CMP_FLAGS_ITYPES_MASK) ==
   2043		     RX_CMP_FLAGS_ITYPE_PTP_W_TS) || bp->ptp_all_rx_tstamp) {
   2044		if (bp->flags & BNXT_FLAG_CHIP_P5) {
   2045			u32 cmpl_ts = le32_to_cpu(rxcmp1->rx_cmp_timestamp);
   2046			u64 ns, ts;
   2047
   2048			if (!bnxt_get_rx_ts_p5(bp, &ts, cmpl_ts)) {
   2049				struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
   2050
   2051				spin_lock_bh(&ptp->ptp_lock);
   2052				ns = timecounter_cyc2time(&ptp->tc, ts);
   2053				spin_unlock_bh(&ptp->ptp_lock);
   2054				memset(skb_hwtstamps(skb), 0,
   2055				       sizeof(*skb_hwtstamps(skb)));
   2056				skb_hwtstamps(skb)->hwtstamp = ns_to_ktime(ns);
   2057			}
   2058		}
   2059	}
   2060	bnxt_deliver_skb(bp, bnapi, skb);
   2061	rc = 1;
   2062
   2063next_rx:
   2064	cpr->rx_packets += 1;
   2065	cpr->rx_bytes += len;
   2066
   2067next_rx_no_len:
   2068	rxr->rx_prod = NEXT_RX(prod);
   2069	rxr->rx_next_cons = NEXT_RX(cons);
   2070
   2071next_rx_no_prod_no_len:
   2072	*raw_cons = tmp_raw_cons;
   2073
   2074	return rc;
   2075}
   2076
   2077/* In netpoll mode, if we are using a combined completion ring, we need to
   2078 * discard the rx packets and recycle the buffers.
   2079 */
   2080static int bnxt_force_rx_discard(struct bnxt *bp,
   2081				 struct bnxt_cp_ring_info *cpr,
   2082				 u32 *raw_cons, u8 *event)
   2083{
   2084	u32 tmp_raw_cons = *raw_cons;
   2085	struct rx_cmp_ext *rxcmp1;
   2086	struct rx_cmp *rxcmp;
   2087	u16 cp_cons;
   2088	u8 cmp_type;
   2089	int rc;
   2090
   2091	cp_cons = RING_CMP(tmp_raw_cons);
   2092	rxcmp = (struct rx_cmp *)
   2093			&cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
   2094
   2095	tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons);
   2096	cp_cons = RING_CMP(tmp_raw_cons);
   2097	rxcmp1 = (struct rx_cmp_ext *)
   2098			&cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
   2099
   2100	if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
   2101		return -EBUSY;
   2102
   2103	/* The valid test of the entry must be done first before
   2104	 * reading any further.
   2105	 */
   2106	dma_rmb();
   2107	cmp_type = RX_CMP_TYPE(rxcmp);
   2108	if (cmp_type == CMP_TYPE_RX_L2_CMP) {
   2109		rxcmp1->rx_cmp_cfa_code_errors_v2 |=
   2110			cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR);
   2111	} else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
   2112		struct rx_tpa_end_cmp_ext *tpa_end1;
   2113
   2114		tpa_end1 = (struct rx_tpa_end_cmp_ext *)rxcmp1;
   2115		tpa_end1->rx_tpa_end_cmp_errors_v2 |=
   2116			cpu_to_le32(RX_TPA_END_CMP_ERRORS);
   2117	}
   2118	rc = bnxt_rx_pkt(bp, cpr, raw_cons, event);
   2119	if (rc && rc != -EBUSY)
   2120		cpr->sw_stats.rx.rx_netpoll_discards += 1;
   2121	return rc;
   2122}
   2123
   2124u32 bnxt_fw_health_readl(struct bnxt *bp, int reg_idx)
   2125{
   2126	struct bnxt_fw_health *fw_health = bp->fw_health;
   2127	u32 reg = fw_health->regs[reg_idx];
   2128	u32 reg_type, reg_off, val = 0;
   2129
   2130	reg_type = BNXT_FW_HEALTH_REG_TYPE(reg);
   2131	reg_off = BNXT_FW_HEALTH_REG_OFF(reg);
   2132	switch (reg_type) {
   2133	case BNXT_FW_HEALTH_REG_TYPE_CFG:
   2134		pci_read_config_dword(bp->pdev, reg_off, &val);
   2135		break;
   2136	case BNXT_FW_HEALTH_REG_TYPE_GRC:
   2137		reg_off = fw_health->mapped_regs[reg_idx];
   2138		fallthrough;
   2139	case BNXT_FW_HEALTH_REG_TYPE_BAR0:
   2140		val = readl(bp->bar0 + reg_off);
   2141		break;
   2142	case BNXT_FW_HEALTH_REG_TYPE_BAR1:
   2143		val = readl(bp->bar1 + reg_off);
   2144		break;
   2145	}
   2146	if (reg_idx == BNXT_FW_RESET_INPROG_REG)
   2147		val &= fw_health->fw_reset_inprog_reg_mask;
   2148	return val;
   2149}
   2150
   2151static u16 bnxt_agg_ring_id_to_grp_idx(struct bnxt *bp, u16 ring_id)
   2152{
   2153	int i;
   2154
   2155	for (i = 0; i < bp->rx_nr_rings; i++) {
   2156		u16 grp_idx = bp->rx_ring[i].bnapi->index;
   2157		struct bnxt_ring_grp_info *grp_info;
   2158
   2159		grp_info = &bp->grp_info[grp_idx];
   2160		if (grp_info->agg_fw_ring_id == ring_id)
   2161			return grp_idx;
   2162	}
   2163	return INVALID_HW_RING_ID;
   2164}
   2165
   2166static void bnxt_event_error_report(struct bnxt *bp, u32 data1, u32 data2)
   2167{
   2168	u32 err_type = BNXT_EVENT_ERROR_REPORT_TYPE(data1);
   2169
   2170	switch (err_type) {
   2171	case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_INVALID_SIGNAL:
   2172		netdev_err(bp->dev, "1PPS: Received invalid signal on pin%lu from the external source. Please fix the signal and reconfigure the pin\n",
   2173			   BNXT_EVENT_INVALID_SIGNAL_DATA(data2));
   2174		break;
   2175	case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_PAUSE_STORM:
   2176		netdev_warn(bp->dev, "Pause Storm detected!\n");
   2177		break;
   2178	case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_DOORBELL_DROP_THRESHOLD:
   2179		netdev_warn(bp->dev, "One or more MMIO doorbells dropped by the device!\n");
   2180		break;
   2181	default:
   2182		netdev_err(bp->dev, "FW reported unknown error type %u\n",
   2183			   err_type);
   2184		break;
   2185	}
   2186}
   2187
   2188#define BNXT_GET_EVENT_PORT(data)	\
   2189	((data) &			\
   2190	 ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK)
   2191
   2192#define BNXT_EVENT_RING_TYPE(data2)	\
   2193	((data2) &			\
   2194	 ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_MASK)
   2195
   2196#define BNXT_EVENT_RING_TYPE_RX(data2)	\
   2197	(BNXT_EVENT_RING_TYPE(data2) ==	\
   2198	 ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_RX)
   2199
   2200#define BNXT_EVENT_PHC_EVENT_TYPE(data1)	\
   2201	(((data1) & ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_MASK) >>\
   2202	 ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_SFT)
   2203
   2204#define BNXT_EVENT_PHC_RTC_UPDATE(data1)	\
   2205	(((data1) & ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_PHC_TIME_MSB_MASK) >>\
   2206	 ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_PHC_TIME_MSB_SFT)
   2207
   2208#define BNXT_PHC_BITS	48
   2209
   2210static int bnxt_async_event_process(struct bnxt *bp,
   2211				    struct hwrm_async_event_cmpl *cmpl)
   2212{
   2213	u16 event_id = le16_to_cpu(cmpl->event_id);
   2214	u32 data1 = le32_to_cpu(cmpl->event_data1);
   2215	u32 data2 = le32_to_cpu(cmpl->event_data2);
   2216
   2217	netdev_dbg(bp->dev, "hwrm event 0x%x {0x%x, 0x%x}\n",
   2218		   event_id, data1, data2);
   2219
   2220	/* TODO CHIMP_FW: Define event id's for link change, error etc */
   2221	switch (event_id) {
   2222	case ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE: {
   2223		struct bnxt_link_info *link_info = &bp->link_info;
   2224
   2225		if (BNXT_VF(bp))
   2226			goto async_event_process_exit;
   2227
   2228		/* print unsupported speed warning in forced speed mode only */
   2229		if (!(link_info->autoneg & BNXT_AUTONEG_SPEED) &&
   2230		    (data1 & 0x20000)) {
   2231			u16 fw_speed = link_info->force_link_speed;
   2232			u32 speed = bnxt_fw_to_ethtool_speed(fw_speed);
   2233
   2234			if (speed != SPEED_UNKNOWN)
   2235				netdev_warn(bp->dev, "Link speed %d no longer supported\n",
   2236					    speed);
   2237		}
   2238		set_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT, &bp->sp_event);
   2239	}
   2240		fallthrough;
   2241	case ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE:
   2242	case ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE:
   2243		set_bit(BNXT_LINK_CFG_CHANGE_SP_EVENT, &bp->sp_event);
   2244		fallthrough;
   2245	case ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE:
   2246		set_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event);
   2247		break;
   2248	case ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD:
   2249		set_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event);
   2250		break;
   2251	case ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED: {
   2252		u16 port_id = BNXT_GET_EVENT_PORT(data1);
   2253
   2254		if (BNXT_VF(bp))
   2255			break;
   2256
   2257		if (bp->pf.port_id != port_id)
   2258			break;
   2259
   2260		set_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event);
   2261		break;
   2262	}
   2263	case ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE:
   2264		if (BNXT_PF(bp))
   2265			goto async_event_process_exit;
   2266		set_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event);
   2267		break;
   2268	case ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY: {
   2269		char *type_str = "Solicited";
   2270
   2271		if (!bp->fw_health)
   2272			goto async_event_process_exit;
   2273
   2274		bp->fw_reset_timestamp = jiffies;
   2275		bp->fw_reset_min_dsecs = cmpl->timestamp_lo;
   2276		if (!bp->fw_reset_min_dsecs)
   2277			bp->fw_reset_min_dsecs = BNXT_DFLT_FW_RST_MIN_DSECS;
   2278		bp->fw_reset_max_dsecs = le16_to_cpu(cmpl->timestamp_hi);
   2279		if (!bp->fw_reset_max_dsecs)
   2280			bp->fw_reset_max_dsecs = BNXT_DFLT_FW_RST_MAX_DSECS;
   2281		if (EVENT_DATA1_RESET_NOTIFY_FW_ACTIVATION(data1)) {
   2282			set_bit(BNXT_STATE_FW_ACTIVATE_RESET, &bp->state);
   2283		} else if (EVENT_DATA1_RESET_NOTIFY_FATAL(data1)) {
   2284			type_str = "Fatal";
   2285			bp->fw_health->fatalities++;
   2286			set_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
   2287		} else if (data2 && BNXT_FW_STATUS_HEALTHY !=
   2288			   EVENT_DATA2_RESET_NOTIFY_FW_STATUS_CODE(data2)) {
   2289			type_str = "Non-fatal";
   2290			bp->fw_health->survivals++;
   2291			set_bit(BNXT_STATE_FW_NON_FATAL_COND, &bp->state);
   2292		}
   2293		netif_warn(bp, hw, bp->dev,
   2294			   "%s firmware reset event, data1: 0x%x, data2: 0x%x, min wait %u ms, max wait %u ms\n",
   2295			   type_str, data1, data2,
   2296			   bp->fw_reset_min_dsecs * 100,
   2297			   bp->fw_reset_max_dsecs * 100);
   2298		set_bit(BNXT_FW_RESET_NOTIFY_SP_EVENT, &bp->sp_event);
   2299		break;
   2300	}
   2301	case ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY: {
   2302		struct bnxt_fw_health *fw_health = bp->fw_health;
   2303		char *status_desc = "healthy";
   2304		u32 status;
   2305
   2306		if (!fw_health)
   2307			goto async_event_process_exit;
   2308
   2309		if (!EVENT_DATA1_RECOVERY_ENABLED(data1)) {
   2310			fw_health->enabled = false;
   2311			netif_info(bp, drv, bp->dev, "Driver recovery watchdog is disabled\n");
   2312			break;
   2313		}
   2314		fw_health->primary = EVENT_DATA1_RECOVERY_MASTER_FUNC(data1);
   2315		fw_health->tmr_multiplier =
   2316			DIV_ROUND_UP(fw_health->polling_dsecs * HZ,
   2317				     bp->current_interval * 10);
   2318		fw_health->tmr_counter = fw_health->tmr_multiplier;
   2319		if (!fw_health->enabled)
   2320			fw_health->last_fw_heartbeat =
   2321				bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
   2322		fw_health->last_fw_reset_cnt =
   2323			bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
   2324		status = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
   2325		if (status != BNXT_FW_STATUS_HEALTHY)
   2326			status_desc = "unhealthy";
   2327		netif_info(bp, drv, bp->dev,
   2328			   "Driver recovery watchdog, role: %s, firmware status: 0x%x (%s), resets: %u\n",
   2329			   fw_health->primary ? "primary" : "backup", status,
   2330			   status_desc, fw_health->last_fw_reset_cnt);
   2331		if (!fw_health->enabled) {
   2332			/* Make sure tmr_counter is set and visible to
   2333			 * bnxt_health_check() before setting enabled to true.
   2334			 */
   2335			smp_wmb();
   2336			fw_health->enabled = true;
   2337		}
   2338		goto async_event_process_exit;
   2339	}
   2340	case ASYNC_EVENT_CMPL_EVENT_ID_DEBUG_NOTIFICATION:
   2341		netif_notice(bp, hw, bp->dev,
   2342			     "Received firmware debug notification, data1: 0x%x, data2: 0x%x\n",
   2343			     data1, data2);
   2344		goto async_event_process_exit;
   2345	case ASYNC_EVENT_CMPL_EVENT_ID_RING_MONITOR_MSG: {
   2346		struct bnxt_rx_ring_info *rxr;
   2347		u16 grp_idx;
   2348
   2349		if (bp->flags & BNXT_FLAG_CHIP_P5)
   2350			goto async_event_process_exit;
   2351
   2352		netdev_warn(bp->dev, "Ring monitor event, ring type %lu id 0x%x\n",
   2353			    BNXT_EVENT_RING_TYPE(data2), data1);
   2354		if (!BNXT_EVENT_RING_TYPE_RX(data2))
   2355			goto async_event_process_exit;
   2356
   2357		grp_idx = bnxt_agg_ring_id_to_grp_idx(bp, data1);
   2358		if (grp_idx == INVALID_HW_RING_ID) {
   2359			netdev_warn(bp->dev, "Unknown RX agg ring id 0x%x\n",
   2360				    data1);
   2361			goto async_event_process_exit;
   2362		}
   2363		rxr = bp->bnapi[grp_idx]->rx_ring;
   2364		bnxt_sched_reset(bp, rxr);
   2365		goto async_event_process_exit;
   2366	}
   2367	case ASYNC_EVENT_CMPL_EVENT_ID_ECHO_REQUEST: {
   2368		struct bnxt_fw_health *fw_health = bp->fw_health;
   2369
   2370		netif_notice(bp, hw, bp->dev,
   2371			     "Received firmware echo request, data1: 0x%x, data2: 0x%x\n",
   2372			     data1, data2);
   2373		if (fw_health) {
   2374			fw_health->echo_req_data1 = data1;
   2375			fw_health->echo_req_data2 = data2;
   2376			set_bit(BNXT_FW_ECHO_REQUEST_SP_EVENT, &bp->sp_event);
   2377			break;
   2378		}
   2379		goto async_event_process_exit;
   2380	}
   2381	case ASYNC_EVENT_CMPL_EVENT_ID_PPS_TIMESTAMP: {
   2382		bnxt_ptp_pps_event(bp, data1, data2);
   2383		goto async_event_process_exit;
   2384	}
   2385	case ASYNC_EVENT_CMPL_EVENT_ID_ERROR_REPORT: {
   2386		bnxt_event_error_report(bp, data1, data2);
   2387		goto async_event_process_exit;
   2388	}
   2389	case ASYNC_EVENT_CMPL_EVENT_ID_PHC_UPDATE: {
   2390		switch (BNXT_EVENT_PHC_EVENT_TYPE(data1)) {
   2391		case ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_PHC_RTC_UPDATE:
   2392			if (bp->fw_cap & BNXT_FW_CAP_PTP_RTC) {
   2393				struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
   2394				u64 ns;
   2395
   2396				spin_lock_bh(&ptp->ptp_lock);
   2397				bnxt_ptp_update_current_time(bp);
   2398				ns = (((u64)BNXT_EVENT_PHC_RTC_UPDATE(data1) <<
   2399				       BNXT_PHC_BITS) | ptp->current_time);
   2400				bnxt_ptp_rtc_timecounter_init(ptp, ns);
   2401				spin_unlock_bh(&ptp->ptp_lock);
   2402			}
   2403			break;
   2404		}
   2405		goto async_event_process_exit;
   2406	}
   2407	case ASYNC_EVENT_CMPL_EVENT_ID_DEFERRED_RESPONSE: {
   2408		u16 seq_id = le32_to_cpu(cmpl->event_data2) & 0xffff;
   2409
   2410		hwrm_update_token(bp, seq_id, BNXT_HWRM_DEFERRED);
   2411		goto async_event_process_exit;
   2412	}
   2413	default:
   2414		goto async_event_process_exit;
   2415	}
   2416	bnxt_queue_sp_work(bp);
   2417async_event_process_exit:
   2418	bnxt_ulp_async_events(bp, cmpl);
   2419	return 0;
   2420}
   2421
   2422static int bnxt_hwrm_handler(struct bnxt *bp, struct tx_cmp *txcmp)
   2423{
   2424	u16 cmpl_type = TX_CMP_TYPE(txcmp), vf_id, seq_id;
   2425	struct hwrm_cmpl *h_cmpl = (struct hwrm_cmpl *)txcmp;
   2426	struct hwrm_fwd_req_cmpl *fwd_req_cmpl =
   2427				(struct hwrm_fwd_req_cmpl *)txcmp;
   2428
   2429	switch (cmpl_type) {
   2430	case CMPL_BASE_TYPE_HWRM_DONE:
   2431		seq_id = le16_to_cpu(h_cmpl->sequence_id);
   2432		hwrm_update_token(bp, seq_id, BNXT_HWRM_COMPLETE);
   2433		break;
   2434
   2435	case CMPL_BASE_TYPE_HWRM_FWD_REQ:
   2436		vf_id = le16_to_cpu(fwd_req_cmpl->source_id);
   2437
   2438		if ((vf_id < bp->pf.first_vf_id) ||
   2439		    (vf_id >= bp->pf.first_vf_id + bp->pf.active_vfs)) {
   2440			netdev_err(bp->dev, "Msg contains invalid VF id %x\n",
   2441				   vf_id);
   2442			return -EINVAL;
   2443		}
   2444
   2445		set_bit(vf_id - bp->pf.first_vf_id, bp->pf.vf_event_bmap);
   2446		set_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event);
   2447		bnxt_queue_sp_work(bp);
   2448		break;
   2449
   2450	case CMPL_BASE_TYPE_HWRM_ASYNC_EVENT:
   2451		bnxt_async_event_process(bp,
   2452					 (struct hwrm_async_event_cmpl *)txcmp);
   2453		break;
   2454
   2455	default:
   2456		break;
   2457	}
   2458
   2459	return 0;
   2460}
   2461
   2462static irqreturn_t bnxt_msix(int irq, void *dev_instance)
   2463{
   2464	struct bnxt_napi *bnapi = dev_instance;
   2465	struct bnxt *bp = bnapi->bp;
   2466	struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
   2467	u32 cons = RING_CMP(cpr->cp_raw_cons);
   2468
   2469	cpr->event_ctr++;
   2470	prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]);
   2471	napi_schedule(&bnapi->napi);
   2472	return IRQ_HANDLED;
   2473}
   2474
   2475static inline int bnxt_has_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
   2476{
   2477	u32 raw_cons = cpr->cp_raw_cons;
   2478	u16 cons = RING_CMP(raw_cons);
   2479	struct tx_cmp *txcmp;
   2480
   2481	txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
   2482
   2483	return TX_CMP_VALID(txcmp, raw_cons);
   2484}
   2485
   2486static irqreturn_t bnxt_inta(int irq, void *dev_instance)
   2487{
   2488	struct bnxt_napi *bnapi = dev_instance;
   2489	struct bnxt *bp = bnapi->bp;
   2490	struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
   2491	u32 cons = RING_CMP(cpr->cp_raw_cons);
   2492	u32 int_status;
   2493
   2494	prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]);
   2495
   2496	if (!bnxt_has_work(bp, cpr)) {
   2497		int_status = readl(bp->bar0 + BNXT_CAG_REG_LEGACY_INT_STATUS);
   2498		/* return if erroneous interrupt */
   2499		if (!(int_status & (0x10000 << cpr->cp_ring_struct.fw_ring_id)))
   2500			return IRQ_NONE;
   2501	}
   2502
   2503	/* disable ring IRQ */
   2504	BNXT_CP_DB_IRQ_DIS(cpr->cp_db.doorbell);
   2505
   2506	/* Return here if interrupt is shared and is disabled. */
   2507	if (unlikely(atomic_read(&bp->intr_sem) != 0))
   2508		return IRQ_HANDLED;
   2509
   2510	napi_schedule(&bnapi->napi);
   2511	return IRQ_HANDLED;
   2512}
   2513
   2514static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
   2515			    int budget)
   2516{
   2517	struct bnxt_napi *bnapi = cpr->bnapi;
   2518	u32 raw_cons = cpr->cp_raw_cons;
   2519	u32 cons;
   2520	int tx_pkts = 0;
   2521	int rx_pkts = 0;
   2522	u8 event = 0;
   2523	struct tx_cmp *txcmp;
   2524
   2525	cpr->has_more_work = 0;
   2526	cpr->had_work_done = 1;
   2527	while (1) {
   2528		int rc;
   2529
   2530		cons = RING_CMP(raw_cons);
   2531		txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
   2532
   2533		if (!TX_CMP_VALID(txcmp, raw_cons))
   2534			break;
   2535
   2536		/* The valid test of the entry must be done first before
   2537		 * reading any further.
   2538		 */
   2539		dma_rmb();
   2540		if (TX_CMP_TYPE(txcmp) == CMP_TYPE_TX_L2_CMP) {
   2541			tx_pkts++;
   2542			/* return full budget so NAPI will complete. */
   2543			if (unlikely(tx_pkts >= bp->tx_wake_thresh)) {
   2544				rx_pkts = budget;
   2545				raw_cons = NEXT_RAW_CMP(raw_cons);
   2546				if (budget)
   2547					cpr->has_more_work = 1;
   2548				break;
   2549			}
   2550		} else if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) {
   2551			if (likely(budget))
   2552				rc = bnxt_rx_pkt(bp, cpr, &raw_cons, &event);
   2553			else
   2554				rc = bnxt_force_rx_discard(bp, cpr, &raw_cons,
   2555							   &event);
   2556			if (likely(rc >= 0))
   2557				rx_pkts += rc;
   2558			/* Increment rx_pkts when rc is -ENOMEM to count towards
   2559			 * the NAPI budget.  Otherwise, we may potentially loop
   2560			 * here forever if we consistently cannot allocate
   2561			 * buffers.
   2562			 */
   2563			else if (rc == -ENOMEM && budget)
   2564				rx_pkts++;
   2565			else if (rc == -EBUSY)	/* partial completion */
   2566				break;
   2567		} else if (unlikely((TX_CMP_TYPE(txcmp) ==
   2568				     CMPL_BASE_TYPE_HWRM_DONE) ||
   2569				    (TX_CMP_TYPE(txcmp) ==
   2570				     CMPL_BASE_TYPE_HWRM_FWD_REQ) ||
   2571				    (TX_CMP_TYPE(txcmp) ==
   2572				     CMPL_BASE_TYPE_HWRM_ASYNC_EVENT))) {
   2573			bnxt_hwrm_handler(bp, txcmp);
   2574		}
   2575		raw_cons = NEXT_RAW_CMP(raw_cons);
   2576
   2577		if (rx_pkts && rx_pkts == budget) {
   2578			cpr->has_more_work = 1;
   2579			break;
   2580		}
   2581	}
   2582
   2583	if (event & BNXT_REDIRECT_EVENT)
   2584		xdp_do_flush();
   2585
   2586	if (event & BNXT_TX_EVENT) {
   2587		struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
   2588		u16 prod = txr->tx_prod;
   2589
   2590		/* Sync BD data before updating doorbell */
   2591		wmb();
   2592
   2593		bnxt_db_write_relaxed(bp, &txr->tx_db, prod);
   2594	}
   2595
   2596	cpr->cp_raw_cons = raw_cons;
   2597	bnapi->tx_pkts += tx_pkts;
   2598	bnapi->events |= event;
   2599	return rx_pkts;
   2600}
   2601
   2602static void __bnxt_poll_work_done(struct bnxt *bp, struct bnxt_napi *bnapi)
   2603{
   2604	if (bnapi->tx_pkts) {
   2605		bnapi->tx_int(bp, bnapi, bnapi->tx_pkts);
   2606		bnapi->tx_pkts = 0;
   2607	}
   2608
   2609	if ((bnapi->events & BNXT_RX_EVENT) && !(bnapi->in_reset)) {
   2610		struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
   2611
   2612		bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
   2613	}
   2614	if (bnapi->events & BNXT_AGG_EVENT) {
   2615		struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
   2616
   2617		bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
   2618	}
   2619	bnapi->events = 0;
   2620}
   2621
   2622static int bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
   2623			  int budget)
   2624{
   2625	struct bnxt_napi *bnapi = cpr->bnapi;
   2626	int rx_pkts;
   2627
   2628	rx_pkts = __bnxt_poll_work(bp, cpr, budget);
   2629
   2630	/* ACK completion ring before freeing tx ring and producing new
   2631	 * buffers in rx/agg rings to prevent overflowing the completion
   2632	 * ring.
   2633	 */
   2634	bnxt_db_cq(bp, &cpr->cp_db, cpr->cp_raw_cons);
   2635
   2636	__bnxt_poll_work_done(bp, bnapi);
   2637	return rx_pkts;
   2638}
   2639
   2640static int bnxt_poll_nitroa0(struct napi_struct *napi, int budget)
   2641{
   2642	struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
   2643	struct bnxt *bp = bnapi->bp;
   2644	struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
   2645	struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
   2646	struct tx_cmp *txcmp;
   2647	struct rx_cmp_ext *rxcmp1;
   2648	u32 cp_cons, tmp_raw_cons;
   2649	u32 raw_cons = cpr->cp_raw_cons;
   2650	u32 rx_pkts = 0;
   2651	u8 event = 0;
   2652
   2653	while (1) {
   2654		int rc;
   2655
   2656		cp_cons = RING_CMP(raw_cons);
   2657		txcmp = &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
   2658
   2659		if (!TX_CMP_VALID(txcmp, raw_cons))
   2660			break;
   2661
   2662		/* The valid test of the entry must be done first before
   2663		 * reading any further.
   2664		 */
   2665		dma_rmb();
   2666		if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) {
   2667			tmp_raw_cons = NEXT_RAW_CMP(raw_cons);
   2668			cp_cons = RING_CMP(tmp_raw_cons);
   2669			rxcmp1 = (struct rx_cmp_ext *)
   2670			  &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
   2671
   2672			if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
   2673				break;
   2674
   2675			/* force an error to recycle the buffer */
   2676			rxcmp1->rx_cmp_cfa_code_errors_v2 |=
   2677				cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR);
   2678
   2679			rc = bnxt_rx_pkt(bp, cpr, &raw_cons, &event);
   2680			if (likely(rc == -EIO) && budget)
   2681				rx_pkts++;
   2682			else if (rc == -EBUSY)	/* partial completion */
   2683				break;
   2684		} else if (unlikely(TX_CMP_TYPE(txcmp) ==
   2685				    CMPL_BASE_TYPE_HWRM_DONE)) {
   2686			bnxt_hwrm_handler(bp, txcmp);
   2687		} else {
   2688			netdev_err(bp->dev,
   2689				   "Invalid completion received on special ring\n");
   2690		}
   2691		raw_cons = NEXT_RAW_CMP(raw_cons);
   2692
   2693		if (rx_pkts == budget)
   2694			break;
   2695	}
   2696
   2697	cpr->cp_raw_cons = raw_cons;
   2698	BNXT_DB_CQ(&cpr->cp_db, cpr->cp_raw_cons);
   2699	bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
   2700
   2701	if (event & BNXT_AGG_EVENT)
   2702		bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
   2703
   2704	if (!bnxt_has_work(bp, cpr) && rx_pkts < budget) {
   2705		napi_complete_done(napi, rx_pkts);
   2706		BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons);
   2707	}
   2708	return rx_pkts;
   2709}
   2710
   2711static int bnxt_poll(struct napi_struct *napi, int budget)
   2712{
   2713	struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
   2714	struct bnxt *bp = bnapi->bp;
   2715	struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
   2716	int work_done = 0;
   2717
   2718	if (unlikely(test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))) {
   2719		napi_complete(napi);
   2720		return 0;
   2721	}
   2722	while (1) {
   2723		work_done += bnxt_poll_work(bp, cpr, budget - work_done);
   2724
   2725		if (work_done >= budget) {
   2726			if (!budget)
   2727				BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons);
   2728			break;
   2729		}
   2730
   2731		if (!bnxt_has_work(bp, cpr)) {
   2732			if (napi_complete_done(napi, work_done))
   2733				BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons);
   2734			break;
   2735		}
   2736	}
   2737	if (bp->flags & BNXT_FLAG_DIM) {
   2738		struct dim_sample dim_sample = {};
   2739
   2740		dim_update_sample(cpr->event_ctr,
   2741				  cpr->rx_packets,
   2742				  cpr->rx_bytes,
   2743				  &dim_sample);
   2744		net_dim(&cpr->dim, dim_sample);
   2745	}
   2746	return work_done;
   2747}
   2748
   2749static int __bnxt_poll_cqs(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
   2750{
   2751	struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
   2752	int i, work_done = 0;
   2753
   2754	for (i = 0; i < 2; i++) {
   2755		struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[i];
   2756
   2757		if (cpr2) {
   2758			work_done += __bnxt_poll_work(bp, cpr2,
   2759						      budget - work_done);
   2760			cpr->has_more_work |= cpr2->has_more_work;
   2761		}
   2762	}
   2763	return work_done;
   2764}
   2765
   2766static void __bnxt_poll_cqs_done(struct bnxt *bp, struct bnxt_napi *bnapi,
   2767				 u64 dbr_type)
   2768{
   2769	struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
   2770	int i;
   2771
   2772	for (i = 0; i < 2; i++) {
   2773		struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[i];
   2774		struct bnxt_db_info *db;
   2775
   2776		if (cpr2 && cpr2->had_work_done) {
   2777			db = &cpr2->cp_db;
   2778			bnxt_writeq(bp, db->db_key64 | dbr_type |
   2779				    RING_CMP(cpr2->cp_raw_cons), db->doorbell);
   2780			cpr2->had_work_done = 0;
   2781		}
   2782	}
   2783	__bnxt_poll_work_done(bp, bnapi);
   2784}
   2785
   2786static int bnxt_poll_p5(struct napi_struct *napi, int budget)
   2787{
   2788	struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
   2789	struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
   2790	struct bnxt_cp_ring_info *cpr_rx;
   2791	u32 raw_cons = cpr->cp_raw_cons;
   2792	struct bnxt *bp = bnapi->bp;
   2793	struct nqe_cn *nqcmp;
   2794	int work_done = 0;
   2795	u32 cons;
   2796
   2797	if (unlikely(test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))) {
   2798		napi_complete(napi);
   2799		return 0;
   2800	}
   2801	if (cpr->has_more_work) {
   2802		cpr->has_more_work = 0;
   2803		work_done = __bnxt_poll_cqs(bp, bnapi, budget);
   2804	}
   2805	while (1) {
   2806		cons = RING_CMP(raw_cons);
   2807		nqcmp = &cpr->nq_desc_ring[CP_RING(cons)][CP_IDX(cons)];
   2808
   2809		if (!NQ_CMP_VALID(nqcmp, raw_cons)) {
   2810			if (cpr->has_more_work)
   2811				break;
   2812
   2813			__bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ_ARMALL);
   2814			cpr->cp_raw_cons = raw_cons;
   2815			if (napi_complete_done(napi, work_done))
   2816				BNXT_DB_NQ_ARM_P5(&cpr->cp_db,
   2817						  cpr->cp_raw_cons);
   2818			goto poll_done;
   2819		}
   2820
   2821		/* The valid test of the entry must be done first before
   2822		 * reading any further.
   2823		 */
   2824		dma_rmb();
   2825
   2826		if (nqcmp->type == cpu_to_le16(NQ_CN_TYPE_CQ_NOTIFICATION)) {
   2827			u32 idx = le32_to_cpu(nqcmp->cq_handle_low);
   2828			struct bnxt_cp_ring_info *cpr2;
   2829
   2830			/* No more budget for RX work */
   2831			if (budget && work_done >= budget && idx == BNXT_RX_HDL)
   2832				break;
   2833
   2834			cpr2 = cpr->cp_ring_arr[idx];
   2835			work_done += __bnxt_poll_work(bp, cpr2,
   2836						      budget - work_done);
   2837			cpr->has_more_work |= cpr2->has_more_work;
   2838		} else {
   2839			bnxt_hwrm_handler(bp, (struct tx_cmp *)nqcmp);
   2840		}
   2841		raw_cons = NEXT_RAW_CMP(raw_cons);
   2842	}
   2843	__bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ);
   2844	if (raw_cons != cpr->cp_raw_cons) {
   2845		cpr->cp_raw_cons = raw_cons;
   2846		BNXT_DB_NQ_P5(&cpr->cp_db, raw_cons);
   2847	}
   2848poll_done:
   2849	cpr_rx = cpr->cp_ring_arr[BNXT_RX_HDL];
   2850	if (cpr_rx && (bp->flags & BNXT_FLAG_DIM)) {
   2851		struct dim_sample dim_sample = {};
   2852
   2853		dim_update_sample(cpr->event_ctr,
   2854				  cpr_rx->rx_packets,
   2855				  cpr_rx->rx_bytes,
   2856				  &dim_sample);
   2857		net_dim(&cpr->dim, dim_sample);
   2858	}
   2859	return work_done;
   2860}
   2861
   2862static void bnxt_free_tx_skbs(struct bnxt *bp)
   2863{
   2864	int i, max_idx;
   2865	struct pci_dev *pdev = bp->pdev;
   2866
   2867	if (!bp->tx_ring)
   2868		return;
   2869
   2870	max_idx = bp->tx_nr_pages * TX_DESC_CNT;
   2871	for (i = 0; i < bp->tx_nr_rings; i++) {
   2872		struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
   2873		int j;
   2874
   2875		if (!txr->tx_buf_ring)
   2876			continue;
   2877
   2878		for (j = 0; j < max_idx;) {
   2879			struct bnxt_sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
   2880			struct sk_buff *skb;
   2881			int k, last;
   2882
   2883			if (i < bp->tx_nr_rings_xdp &&
   2884			    tx_buf->action == XDP_REDIRECT) {
   2885				dma_unmap_single(&pdev->dev,
   2886					dma_unmap_addr(tx_buf, mapping),
   2887					dma_unmap_len(tx_buf, len),
   2888					DMA_TO_DEVICE);
   2889				xdp_return_frame(tx_buf->xdpf);
   2890				tx_buf->action = 0;
   2891				tx_buf->xdpf = NULL;
   2892				j++;
   2893				continue;
   2894			}
   2895
   2896			skb = tx_buf->skb;
   2897			if (!skb) {
   2898				j++;
   2899				continue;
   2900			}
   2901
   2902			tx_buf->skb = NULL;
   2903
   2904			if (tx_buf->is_push) {
   2905				dev_kfree_skb(skb);
   2906				j += 2;
   2907				continue;
   2908			}
   2909
   2910			dma_unmap_single(&pdev->dev,
   2911					 dma_unmap_addr(tx_buf, mapping),
   2912					 skb_headlen(skb),
   2913					 DMA_TO_DEVICE);
   2914
   2915			last = tx_buf->nr_frags;
   2916			j += 2;
   2917			for (k = 0; k < last; k++, j++) {
   2918				int ring_idx = j & bp->tx_ring_mask;
   2919				skb_frag_t *frag = &skb_shinfo(skb)->frags[k];
   2920
   2921				tx_buf = &txr->tx_buf_ring[ring_idx];
   2922				dma_unmap_page(
   2923					&pdev->dev,
   2924					dma_unmap_addr(tx_buf, mapping),
   2925					skb_frag_size(frag), DMA_TO_DEVICE);
   2926			}
   2927			dev_kfree_skb(skb);
   2928		}
   2929		netdev_tx_reset_queue(netdev_get_tx_queue(bp->dev, i));
   2930	}
   2931}
   2932
   2933static void bnxt_free_one_rx_ring_skbs(struct bnxt *bp, int ring_nr)
   2934{
   2935	struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr];
   2936	struct pci_dev *pdev = bp->pdev;
   2937	struct bnxt_tpa_idx_map *map;
   2938	int i, max_idx, max_agg_idx;
   2939
   2940	max_idx = bp->rx_nr_pages * RX_DESC_CNT;
   2941	max_agg_idx = bp->rx_agg_nr_pages * RX_DESC_CNT;
   2942	if (!rxr->rx_tpa)
   2943		goto skip_rx_tpa_free;
   2944
   2945	for (i = 0; i < bp->max_tpa; i++) {
   2946		struct bnxt_tpa_info *tpa_info = &rxr->rx_tpa[i];
   2947		u8 *data = tpa_info->data;
   2948
   2949		if (!data)
   2950			continue;
   2951
   2952		dma_unmap_single_attrs(&pdev->dev, tpa_info->mapping,
   2953				       bp->rx_buf_use_size, bp->rx_dir,
   2954				       DMA_ATTR_WEAK_ORDERING);
   2955
   2956		tpa_info->data = NULL;
   2957
   2958		skb_free_frag(data);
   2959	}
   2960
   2961skip_rx_tpa_free:
   2962	if (!rxr->rx_buf_ring)
   2963		goto skip_rx_buf_free;
   2964
   2965	for (i = 0; i < max_idx; i++) {
   2966		struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[i];
   2967		dma_addr_t mapping = rx_buf->mapping;
   2968		void *data = rx_buf->data;
   2969
   2970		if (!data)
   2971			continue;
   2972
   2973		rx_buf->data = NULL;
   2974		if (BNXT_RX_PAGE_MODE(bp)) {
   2975			mapping -= bp->rx_dma_offset;
   2976			dma_unmap_page_attrs(&pdev->dev, mapping, PAGE_SIZE,
   2977					     bp->rx_dir,
   2978					     DMA_ATTR_WEAK_ORDERING);
   2979			page_pool_recycle_direct(rxr->page_pool, data);
   2980		} else {
   2981			dma_unmap_single_attrs(&pdev->dev, mapping,
   2982					       bp->rx_buf_use_size, bp->rx_dir,
   2983					       DMA_ATTR_WEAK_ORDERING);
   2984			skb_free_frag(data);
   2985		}
   2986	}
   2987
   2988skip_rx_buf_free:
   2989	if (!rxr->rx_agg_ring)
   2990		goto skip_rx_agg_free;
   2991
   2992	for (i = 0; i < max_agg_idx; i++) {
   2993		struct bnxt_sw_rx_agg_bd *rx_agg_buf = &rxr->rx_agg_ring[i];
   2994		struct page *page = rx_agg_buf->page;
   2995
   2996		if (!page)
   2997			continue;
   2998
   2999		if (BNXT_RX_PAGE_MODE(bp)) {
   3000			dma_unmap_page_attrs(&pdev->dev, rx_agg_buf->mapping,
   3001					     BNXT_RX_PAGE_SIZE, bp->rx_dir,
   3002					     DMA_ATTR_WEAK_ORDERING);
   3003			rx_agg_buf->page = NULL;
   3004			__clear_bit(i, rxr->rx_agg_bmap);
   3005
   3006			page_pool_recycle_direct(rxr->page_pool, page);
   3007		} else {
   3008			dma_unmap_page_attrs(&pdev->dev, rx_agg_buf->mapping,
   3009					     BNXT_RX_PAGE_SIZE, DMA_FROM_DEVICE,
   3010					     DMA_ATTR_WEAK_ORDERING);
   3011			rx_agg_buf->page = NULL;
   3012			__clear_bit(i, rxr->rx_agg_bmap);
   3013
   3014			__free_page(page);
   3015		}
   3016	}
   3017
   3018skip_rx_agg_free:
   3019	if (rxr->rx_page) {
   3020		__free_page(rxr->rx_page);
   3021		rxr->rx_page = NULL;
   3022	}
   3023	map = rxr->rx_tpa_idx_map;
   3024	if (map)
   3025		memset(map->agg_idx_bmap, 0, sizeof(map->agg_idx_bmap));
   3026}
   3027
   3028static void bnxt_free_rx_skbs(struct bnxt *bp)
   3029{
   3030	int i;
   3031
   3032	if (!bp->rx_ring)
   3033		return;
   3034
   3035	for (i = 0; i < bp->rx_nr_rings; i++)
   3036		bnxt_free_one_rx_ring_skbs(bp, i);
   3037}
   3038
   3039static void bnxt_free_skbs(struct bnxt *bp)
   3040{
   3041	bnxt_free_tx_skbs(bp);
   3042	bnxt_free_rx_skbs(bp);
   3043}
   3044
   3045static void bnxt_init_ctx_mem(struct bnxt_mem_init *mem_init, void *p, int len)
   3046{
   3047	u8 init_val = mem_init->init_val;
   3048	u16 offset = mem_init->offset;
   3049	u8 *p2 = p;
   3050	int i;
   3051
   3052	if (!init_val)
   3053		return;
   3054	if (offset == BNXT_MEM_INVALID_OFFSET) {
   3055		memset(p, init_val, len);
   3056		return;
   3057	}
   3058	for (i = 0; i < len; i += mem_init->size)
   3059		*(p2 + i + offset) = init_val;
   3060}
   3061
   3062static void bnxt_free_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
   3063{
   3064	struct pci_dev *pdev = bp->pdev;
   3065	int i;
   3066
   3067	if (!rmem->pg_arr)
   3068		goto skip_pages;
   3069
   3070	for (i = 0; i < rmem->nr_pages; i++) {
   3071		if (!rmem->pg_arr[i])
   3072			continue;
   3073
   3074		dma_free_coherent(&pdev->dev, rmem->page_size,
   3075				  rmem->pg_arr[i], rmem->dma_arr[i]);
   3076
   3077		rmem->pg_arr[i] = NULL;
   3078	}
   3079skip_pages:
   3080	if (rmem->pg_tbl) {
   3081		size_t pg_tbl_size = rmem->nr_pages * 8;
   3082
   3083		if (rmem->flags & BNXT_RMEM_USE_FULL_PAGE_FLAG)
   3084			pg_tbl_size = rmem->page_size;
   3085		dma_free_coherent(&pdev->dev, pg_tbl_size,
   3086				  rmem->pg_tbl, rmem->pg_tbl_map);
   3087		rmem->pg_tbl = NULL;
   3088	}
   3089	if (rmem->vmem_size && *rmem->vmem) {
   3090		vfree(*rmem->vmem);
   3091		*rmem->vmem = NULL;
   3092	}
   3093}
   3094
   3095static int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
   3096{
   3097	struct pci_dev *pdev = bp->pdev;
   3098	u64 valid_bit = 0;
   3099	int i;
   3100
   3101	if (rmem->flags & (BNXT_RMEM_VALID_PTE_FLAG | BNXT_RMEM_RING_PTE_FLAG))
   3102		valid_bit = PTU_PTE_VALID;
   3103	if ((rmem->nr_pages > 1 || rmem->depth > 0) && !rmem->pg_tbl) {
   3104		size_t pg_tbl_size = rmem->nr_pages * 8;
   3105
   3106		if (rmem->flags & BNXT_RMEM_USE_FULL_PAGE_FLAG)
   3107			pg_tbl_size = rmem->page_size;
   3108		rmem->pg_tbl = dma_alloc_coherent(&pdev->dev, pg_tbl_size,
   3109						  &rmem->pg_tbl_map,
   3110						  GFP_KERNEL);
   3111		if (!rmem->pg_tbl)
   3112			return -ENOMEM;
   3113	}
   3114
   3115	for (i = 0; i < rmem->nr_pages; i++) {
   3116		u64 extra_bits = valid_bit;
   3117
   3118		rmem->pg_arr[i] = dma_alloc_coherent(&pdev->dev,
   3119						     rmem->page_size,
   3120						     &rmem->dma_arr[i],
   3121						     GFP_KERNEL);
   3122		if (!rmem->pg_arr[i])
   3123			return -ENOMEM;
   3124
   3125		if (rmem->mem_init)
   3126			bnxt_init_ctx_mem(rmem->mem_init, rmem->pg_arr[i],
   3127					  rmem->page_size);
   3128		if (rmem->nr_pages > 1 || rmem->depth > 0) {
   3129			if (i == rmem->nr_pages - 2 &&
   3130			    (rmem->flags & BNXT_RMEM_RING_PTE_FLAG))
   3131				extra_bits |= PTU_PTE_NEXT_TO_LAST;
   3132			else if (i == rmem->nr_pages - 1 &&
   3133				 (rmem->flags & BNXT_RMEM_RING_PTE_FLAG))
   3134				extra_bits |= PTU_PTE_LAST;
   3135			rmem->pg_tbl[i] =
   3136				cpu_to_le64(rmem->dma_arr[i] | extra_bits);
   3137		}
   3138	}
   3139
   3140	if (rmem->vmem_size) {
   3141		*rmem->vmem = vzalloc(rmem->vmem_size);
   3142		if (!(*rmem->vmem))
   3143			return -ENOMEM;
   3144	}
   3145	return 0;
   3146}
   3147
   3148static void bnxt_free_tpa_info(struct bnxt *bp)
   3149{
   3150	int i;
   3151
   3152	for (i = 0; i < bp->rx_nr_rings; i++) {
   3153		struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
   3154
   3155		kfree(rxr->rx_tpa_idx_map);
   3156		rxr->rx_tpa_idx_map = NULL;
   3157		if (rxr->rx_tpa) {
   3158			kfree(rxr->rx_tpa[0].agg_arr);
   3159			rxr->rx_tpa[0].agg_arr = NULL;
   3160		}
   3161		kfree(rxr->rx_tpa);
   3162		rxr->rx_tpa = NULL;
   3163	}
   3164}
   3165
   3166static int bnxt_alloc_tpa_info(struct bnxt *bp)
   3167{
   3168	int i, j, total_aggs = 0;
   3169
   3170	bp->max_tpa = MAX_TPA;
   3171	if (bp->flags & BNXT_FLAG_CHIP_P5) {
   3172		if (!bp->max_tpa_v2)
   3173			return 0;
   3174		bp->max_tpa = max_t(u16, bp->max_tpa_v2, MAX_TPA_P5);
   3175		total_aggs = bp->max_tpa * MAX_SKB_FRAGS;
   3176	}
   3177
   3178	for (i = 0; i < bp->rx_nr_rings; i++) {
   3179		struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
   3180		struct rx_agg_cmp *agg;
   3181
   3182		rxr->rx_tpa = kcalloc(bp->max_tpa, sizeof(struct bnxt_tpa_info),
   3183				      GFP_KERNEL);
   3184		if (!rxr->rx_tpa)
   3185			return -ENOMEM;
   3186
   3187		if (!(bp->flags & BNXT_FLAG_CHIP_P5))
   3188			continue;
   3189		agg = kcalloc(total_aggs, sizeof(*agg), GFP_KERNEL);
   3190		rxr->rx_tpa[0].agg_arr = agg;
   3191		if (!agg)
   3192			return -ENOMEM;
   3193		for (j = 1; j < bp->max_tpa; j++)
   3194			rxr->rx_tpa[j].agg_arr = agg + j * MAX_SKB_FRAGS;
   3195		rxr->rx_tpa_idx_map = kzalloc(sizeof(*rxr->rx_tpa_idx_map),
   3196					      GFP_KERNEL);
   3197		if (!rxr->rx_tpa_idx_map)
   3198			return -ENOMEM;
   3199	}
   3200	return 0;
   3201}
   3202
   3203static void bnxt_free_rx_rings(struct bnxt *bp)
   3204{
   3205	int i;
   3206
   3207	if (!bp->rx_ring)
   3208		return;
   3209
   3210	bnxt_free_tpa_info(bp);
   3211	for (i = 0; i < bp->rx_nr_rings; i++) {
   3212		struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
   3213		struct bnxt_ring_struct *ring;
   3214
   3215		if (rxr->xdp_prog)
   3216			bpf_prog_put(rxr->xdp_prog);
   3217
   3218		if (xdp_rxq_info_is_reg(&rxr->xdp_rxq))
   3219			xdp_rxq_info_unreg(&rxr->xdp_rxq);
   3220
   3221		page_pool_destroy(rxr->page_pool);
   3222		rxr->page_pool = NULL;
   3223
   3224		kfree(rxr->rx_agg_bmap);
   3225		rxr->rx_agg_bmap = NULL;
   3226
   3227		ring = &rxr->rx_ring_struct;
   3228		bnxt_free_ring(bp, &ring->ring_mem);
   3229
   3230		ring = &rxr->rx_agg_ring_struct;
   3231		bnxt_free_ring(bp, &ring->ring_mem);
   3232	}
   3233}
   3234
   3235static int bnxt_alloc_rx_page_pool(struct bnxt *bp,
   3236				   struct bnxt_rx_ring_info *rxr)
   3237{
   3238	struct page_pool_params pp = { 0 };
   3239
   3240	pp.pool_size = bp->rx_ring_size;
   3241	pp.nid = dev_to_node(&bp->pdev->dev);
   3242	pp.dev = &bp->pdev->dev;
   3243	pp.dma_dir = DMA_BIDIRECTIONAL;
   3244
   3245	rxr->page_pool = page_pool_create(&pp);
   3246	if (IS_ERR(rxr->page_pool)) {
   3247		int err = PTR_ERR(rxr->page_pool);
   3248
   3249		rxr->page_pool = NULL;
   3250		return err;
   3251	}
   3252	return 0;
   3253}
   3254
   3255static int bnxt_alloc_rx_rings(struct bnxt *bp)
   3256{
   3257	int i, rc = 0, agg_rings = 0;
   3258
   3259	if (!bp->rx_ring)
   3260		return -ENOMEM;
   3261
   3262	if (bp->flags & BNXT_FLAG_AGG_RINGS)
   3263		agg_rings = 1;
   3264
   3265	for (i = 0; i < bp->rx_nr_rings; i++) {
   3266		struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
   3267		struct bnxt_ring_struct *ring;
   3268
   3269		ring = &rxr->rx_ring_struct;
   3270
   3271		rc = bnxt_alloc_rx_page_pool(bp, rxr);
   3272		if (rc)
   3273			return rc;
   3274
   3275		rc = xdp_rxq_info_reg(&rxr->xdp_rxq, bp->dev, i, 0);
   3276		if (rc < 0)
   3277			return rc;
   3278
   3279		rc = xdp_rxq_info_reg_mem_model(&rxr->xdp_rxq,
   3280						MEM_TYPE_PAGE_POOL,
   3281						rxr->page_pool);
   3282		if (rc) {
   3283			xdp_rxq_info_unreg(&rxr->xdp_rxq);
   3284			return rc;
   3285		}
   3286
   3287		rc = bnxt_alloc_ring(bp, &ring->ring_mem);
   3288		if (rc)
   3289			return rc;
   3290
   3291		ring->grp_idx = i;
   3292		if (agg_rings) {
   3293			u16 mem_size;
   3294
   3295			ring = &rxr->rx_agg_ring_struct;
   3296			rc = bnxt_alloc_ring(bp, &ring->ring_mem);
   3297			if (rc)
   3298				return rc;
   3299
   3300			ring->grp_idx = i;
   3301			rxr->rx_agg_bmap_size = bp->rx_agg_ring_mask + 1;
   3302			mem_size = rxr->rx_agg_bmap_size / 8;
   3303			rxr->rx_agg_bmap = kzalloc(mem_size, GFP_KERNEL);
   3304			if (!rxr->rx_agg_bmap)
   3305				return -ENOMEM;
   3306		}
   3307	}
   3308	if (bp->flags & BNXT_FLAG_TPA)
   3309		rc = bnxt_alloc_tpa_info(bp);
   3310	return rc;
   3311}
   3312
   3313static void bnxt_free_tx_rings(struct bnxt *bp)
   3314{
   3315	int i;
   3316	struct pci_dev *pdev = bp->pdev;
   3317
   3318	if (!bp->tx_ring)
   3319		return;
   3320
   3321	for (i = 0; i < bp->tx_nr_rings; i++) {
   3322		struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
   3323		struct bnxt_ring_struct *ring;
   3324
   3325		if (txr->tx_push) {
   3326			dma_free_coherent(&pdev->dev, bp->tx_push_size,
   3327					  txr->tx_push, txr->tx_push_mapping);
   3328			txr->tx_push = NULL;
   3329		}
   3330
   3331		ring = &txr->tx_ring_struct;
   3332
   3333		bnxt_free_ring(bp, &ring->ring_mem);
   3334	}
   3335}
   3336
   3337static int bnxt_alloc_tx_rings(struct bnxt *bp)
   3338{
   3339	int i, j, rc;
   3340	struct pci_dev *pdev = bp->pdev;
   3341
   3342	bp->tx_push_size = 0;
   3343	if (bp->tx_push_thresh) {
   3344		int push_size;
   3345
   3346		push_size  = L1_CACHE_ALIGN(sizeof(struct tx_push_bd) +
   3347					bp->tx_push_thresh);
   3348
   3349		if (push_size > 256) {
   3350			push_size = 0;
   3351			bp->tx_push_thresh = 0;
   3352		}
   3353
   3354		bp->tx_push_size = push_size;
   3355	}
   3356
   3357	for (i = 0, j = 0; i < bp->tx_nr_rings; i++) {
   3358		struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
   3359		struct bnxt_ring_struct *ring;
   3360		u8 qidx;
   3361
   3362		ring = &txr->tx_ring_struct;
   3363
   3364		rc = bnxt_alloc_ring(bp, &ring->ring_mem);
   3365		if (rc)
   3366			return rc;
   3367
   3368		ring->grp_idx = txr->bnapi->index;
   3369		if (bp->tx_push_size) {
   3370			dma_addr_t mapping;
   3371
   3372			/* One pre-allocated DMA buffer to backup
   3373			 * TX push operation
   3374			 */
   3375			txr->tx_push = dma_alloc_coherent(&pdev->dev,
   3376						bp->tx_push_size,
   3377						&txr->tx_push_mapping,
   3378						GFP_KERNEL);
   3379
   3380			if (!txr->tx_push)
   3381				return -ENOMEM;
   3382
   3383			mapping = txr->tx_push_mapping +
   3384				sizeof(struct tx_push_bd);
   3385			txr->data_mapping = cpu_to_le64(mapping);
   3386		}
   3387		qidx = bp->tc_to_qidx[j];
   3388		ring->queue_id = bp->q_info[qidx].queue_id;
   3389		spin_lock_init(&txr->xdp_tx_lock);
   3390		if (i < bp->tx_nr_rings_xdp)
   3391			continue;
   3392		if (i % bp->tx_nr_rings_per_tc == (bp->tx_nr_rings_per_tc - 1))
   3393			j++;
   3394	}
   3395	return 0;
   3396}
   3397
   3398static void bnxt_free_cp_arrays(struct bnxt_cp_ring_info *cpr)
   3399{
   3400	struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
   3401
   3402	kfree(cpr->cp_desc_ring);
   3403	cpr->cp_desc_ring = NULL;
   3404	ring->ring_mem.pg_arr = NULL;
   3405	kfree(cpr->cp_desc_mapping);
   3406	cpr->cp_desc_mapping = NULL;
   3407	ring->ring_mem.dma_arr = NULL;
   3408}
   3409
   3410static int bnxt_alloc_cp_arrays(struct bnxt_cp_ring_info *cpr, int n)
   3411{
   3412	cpr->cp_desc_ring = kcalloc(n, sizeof(*cpr->cp_desc_ring), GFP_KERNEL);
   3413	if (!cpr->cp_desc_ring)
   3414		return -ENOMEM;
   3415	cpr->cp_desc_mapping = kcalloc(n, sizeof(*cpr->cp_desc_mapping),
   3416				       GFP_KERNEL);
   3417	if (!cpr->cp_desc_mapping)
   3418		return -ENOMEM;
   3419	return 0;
   3420}
   3421
   3422static void bnxt_free_all_cp_arrays(struct bnxt *bp)
   3423{
   3424	int i;
   3425
   3426	if (!bp->bnapi)
   3427		return;
   3428	for (i = 0; i < bp->cp_nr_rings; i++) {
   3429		struct bnxt_napi *bnapi = bp->bnapi[i];
   3430
   3431		if (!bnapi)
   3432			continue;
   3433		bnxt_free_cp_arrays(&bnapi->cp_ring);
   3434	}
   3435}
   3436
   3437static int bnxt_alloc_all_cp_arrays(struct bnxt *bp)
   3438{
   3439	int i, n = bp->cp_nr_pages;
   3440
   3441	for (i = 0; i < bp->cp_nr_rings; i++) {
   3442		struct bnxt_napi *bnapi = bp->bnapi[i];
   3443		int rc;
   3444
   3445		if (!bnapi)
   3446			continue;
   3447		rc = bnxt_alloc_cp_arrays(&bnapi->cp_ring, n);
   3448		if (rc)
   3449			return rc;
   3450	}
   3451	return 0;
   3452}
   3453
   3454static void bnxt_free_cp_rings(struct bnxt *bp)
   3455{
   3456	int i;
   3457
   3458	if (!bp->bnapi)
   3459		return;
   3460
   3461	for (i = 0; i < bp->cp_nr_rings; i++) {
   3462		struct bnxt_napi *bnapi = bp->bnapi[i];
   3463		struct bnxt_cp_ring_info *cpr;
   3464		struct bnxt_ring_struct *ring;
   3465		int j;
   3466
   3467		if (!bnapi)
   3468			continue;
   3469
   3470		cpr = &bnapi->cp_ring;
   3471		ring = &cpr->cp_ring_struct;
   3472
   3473		bnxt_free_ring(bp, &ring->ring_mem);
   3474
   3475		for (j = 0; j < 2; j++) {
   3476			struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
   3477
   3478			if (cpr2) {
   3479				ring = &cpr2->cp_ring_struct;
   3480				bnxt_free_ring(bp, &ring->ring_mem);
   3481				bnxt_free_cp_arrays(cpr2);
   3482				kfree(cpr2);
   3483				cpr->cp_ring_arr[j] = NULL;
   3484			}
   3485		}
   3486	}
   3487}
   3488
   3489static struct bnxt_cp_ring_info *bnxt_alloc_cp_sub_ring(struct bnxt *bp)
   3490{
   3491	struct bnxt_ring_mem_info *rmem;
   3492	struct bnxt_ring_struct *ring;
   3493	struct bnxt_cp_ring_info *cpr;
   3494	int rc;
   3495
   3496	cpr = kzalloc(sizeof(*cpr), GFP_KERNEL);
   3497	if (!cpr)
   3498		return NULL;
   3499
   3500	rc = bnxt_alloc_cp_arrays(cpr, bp->cp_nr_pages);
   3501	if (rc) {
   3502		bnxt_free_cp_arrays(cpr);
   3503		kfree(cpr);
   3504		return NULL;
   3505	}
   3506	ring = &cpr->cp_ring_struct;
   3507	rmem = &ring->ring_mem;
   3508	rmem->nr_pages = bp->cp_nr_pages;
   3509	rmem->page_size = HW_CMPD_RING_SIZE;
   3510	rmem->pg_arr = (void **)cpr->cp_desc_ring;
   3511	rmem->dma_arr = cpr->cp_desc_mapping;
   3512	rmem->flags = BNXT_RMEM_RING_PTE_FLAG;
   3513	rc = bnxt_alloc_ring(bp, rmem);
   3514	if (rc) {
   3515		bnxt_free_ring(bp, rmem);
   3516		bnxt_free_cp_arrays(cpr);
   3517		kfree(cpr);
   3518		cpr = NULL;
   3519	}
   3520	return cpr;
   3521}
   3522
   3523static int bnxt_alloc_cp_rings(struct bnxt *bp)
   3524{
   3525	bool sh = !!(bp->flags & BNXT_FLAG_SHARED_RINGS);
   3526	int i, rc, ulp_base_vec, ulp_msix;
   3527
   3528	ulp_msix = bnxt_get_ulp_msix_num(bp);
   3529	ulp_base_vec = bnxt_get_ulp_msix_base(bp);
   3530	for (i = 0; i < bp->cp_nr_rings; i++) {
   3531		struct bnxt_napi *bnapi = bp->bnapi[i];
   3532		struct bnxt_cp_ring_info *cpr;
   3533		struct bnxt_ring_struct *ring;
   3534
   3535		if (!bnapi)
   3536			continue;
   3537
   3538		cpr = &bnapi->cp_ring;
   3539		cpr->bnapi = bnapi;
   3540		ring = &cpr->cp_ring_struct;
   3541
   3542		rc = bnxt_alloc_ring(bp, &ring->ring_mem);
   3543		if (rc)
   3544			return rc;
   3545
   3546		if (ulp_msix && i >= ulp_base_vec)
   3547			ring->map_idx = i + ulp_msix;
   3548		else
   3549			ring->map_idx = i;
   3550
   3551		if (!(bp->flags & BNXT_FLAG_CHIP_P5))
   3552			continue;
   3553
   3554		if (i < bp->rx_nr_rings) {
   3555			struct bnxt_cp_ring_info *cpr2 =
   3556				bnxt_alloc_cp_sub_ring(bp);
   3557
   3558			cpr->cp_ring_arr[BNXT_RX_HDL] = cpr2;
   3559			if (!cpr2)
   3560				return -ENOMEM;
   3561			cpr2->bnapi = bnapi;
   3562		}
   3563		if ((sh && i < bp->tx_nr_rings) ||
   3564		    (!sh && i >= bp->rx_nr_rings)) {
   3565			struct bnxt_cp_ring_info *cpr2 =
   3566				bnxt_alloc_cp_sub_ring(bp);
   3567
   3568			cpr->cp_ring_arr[BNXT_TX_HDL] = cpr2;
   3569			if (!cpr2)
   3570				return -ENOMEM;
   3571			cpr2->bnapi = bnapi;
   3572		}
   3573	}
   3574	return 0;
   3575}
   3576
   3577static void bnxt_init_ring_struct(struct bnxt *bp)
   3578{
   3579	int i;
   3580
   3581	for (i = 0; i < bp->cp_nr_rings; i++) {
   3582		struct bnxt_napi *bnapi = bp->bnapi[i];
   3583		struct bnxt_ring_mem_info *rmem;
   3584		struct bnxt_cp_ring_info *cpr;
   3585		struct bnxt_rx_ring_info *rxr;
   3586		struct bnxt_tx_ring_info *txr;
   3587		struct bnxt_ring_struct *ring;
   3588
   3589		if (!bnapi)
   3590			continue;
   3591
   3592		cpr = &bnapi->cp_ring;
   3593		ring = &cpr->cp_ring_struct;
   3594		rmem = &ring->ring_mem;
   3595		rmem->nr_pages = bp->cp_nr_pages;
   3596		rmem->page_size = HW_CMPD_RING_SIZE;
   3597		rmem->pg_arr = (void **)cpr->cp_desc_ring;
   3598		rmem->dma_arr = cpr->cp_desc_mapping;
   3599		rmem->vmem_size = 0;
   3600
   3601		rxr = bnapi->rx_ring;
   3602		if (!rxr)
   3603			goto skip_rx;
   3604
   3605		ring = &rxr->rx_ring_struct;
   3606		rmem = &ring->ring_mem;
   3607		rmem->nr_pages = bp->rx_nr_pages;
   3608		rmem->page_size = HW_RXBD_RING_SIZE;
   3609		rmem->pg_arr = (void **)rxr->rx_desc_ring;
   3610		rmem->dma_arr = rxr->rx_desc_mapping;
   3611		rmem->vmem_size = SW_RXBD_RING_SIZE * bp->rx_nr_pages;
   3612		rmem->vmem = (void **)&rxr->rx_buf_ring;
   3613
   3614		ring = &rxr->rx_agg_ring_struct;
   3615		rmem = &ring->ring_mem;
   3616		rmem->nr_pages = bp->rx_agg_nr_pages;
   3617		rmem->page_size = HW_RXBD_RING_SIZE;
   3618		rmem->pg_arr = (void **)rxr->rx_agg_desc_ring;
   3619		rmem->dma_arr = rxr->rx_agg_desc_mapping;
   3620		rmem->vmem_size = SW_RXBD_AGG_RING_SIZE * bp->rx_agg_nr_pages;
   3621		rmem->vmem = (void **)&rxr->rx_agg_ring;
   3622
   3623skip_rx:
   3624		txr = bnapi->tx_ring;
   3625		if (!txr)
   3626			continue;
   3627
   3628		ring = &txr->tx_ring_struct;
   3629		rmem = &ring->ring_mem;
   3630		rmem->nr_pages = bp->tx_nr_pages;
   3631		rmem->page_size = HW_RXBD_RING_SIZE;
   3632		rmem->pg_arr = (void **)txr->tx_desc_ring;
   3633		rmem->dma_arr = txr->tx_desc_mapping;
   3634		rmem->vmem_size = SW_TXBD_RING_SIZE * bp->tx_nr_pages;
   3635		rmem->vmem = (void **)&txr->tx_buf_ring;
   3636	}
   3637}
   3638
   3639static void bnxt_init_rxbd_pages(struct bnxt_ring_struct *ring, u32 type)
   3640{
   3641	int i;
   3642	u32 prod;
   3643	struct rx_bd **rx_buf_ring;
   3644
   3645	rx_buf_ring = (struct rx_bd **)ring->ring_mem.pg_arr;
   3646	for (i = 0, prod = 0; i < ring->ring_mem.nr_pages; i++) {
   3647		int j;
   3648		struct rx_bd *rxbd;
   3649
   3650		rxbd = rx_buf_ring[i];
   3651		if (!rxbd)
   3652			continue;
   3653
   3654		for (j = 0; j < RX_DESC_CNT; j++, rxbd++, prod++) {
   3655			rxbd->rx_bd_len_flags_type = cpu_to_le32(type);
   3656			rxbd->rx_bd_opaque = prod;
   3657		}
   3658	}
   3659}
   3660
   3661static int bnxt_alloc_one_rx_ring(struct bnxt *bp, int ring_nr)
   3662{
   3663	struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr];
   3664	struct net_device *dev = bp->dev;
   3665	u32 prod;
   3666	int i;
   3667
   3668	prod = rxr->rx_prod;
   3669	for (i = 0; i < bp->rx_ring_size; i++) {
   3670		if (bnxt_alloc_rx_data(bp, rxr, prod, GFP_KERNEL)) {
   3671			netdev_warn(dev, "init'ed rx ring %d with %d/%d skbs only\n",
   3672				    ring_nr, i, bp->rx_ring_size);
   3673			break;
   3674		}
   3675		prod = NEXT_RX(prod);
   3676	}
   3677	rxr->rx_prod = prod;
   3678
   3679	if (!(bp->flags & BNXT_FLAG_AGG_RINGS))
   3680		return 0;
   3681
   3682	prod = rxr->rx_agg_prod;
   3683	for (i = 0; i < bp->rx_agg_ring_size; i++) {
   3684		if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_KERNEL)) {
   3685			netdev_warn(dev, "init'ed rx ring %d with %d/%d pages only\n",
   3686				    ring_nr, i, bp->rx_ring_size);
   3687			break;
   3688		}
   3689		prod = NEXT_RX_AGG(prod);
   3690	}
   3691	rxr->rx_agg_prod = prod;
   3692
   3693	if (rxr->rx_tpa) {
   3694		dma_addr_t mapping;
   3695		u8 *data;
   3696
   3697		for (i = 0; i < bp->max_tpa; i++) {
   3698			data = __bnxt_alloc_rx_frag(bp, &mapping, GFP_KERNEL);
   3699			if (!data)
   3700				return -ENOMEM;
   3701
   3702			rxr->rx_tpa[i].data = data;
   3703			rxr->rx_tpa[i].data_ptr = data + bp->rx_offset;
   3704			rxr->rx_tpa[i].mapping = mapping;
   3705		}
   3706	}
   3707	return 0;
   3708}
   3709
   3710static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr)
   3711{
   3712	struct bnxt_rx_ring_info *rxr;
   3713	struct bnxt_ring_struct *ring;
   3714	u32 type;
   3715
   3716	type = (bp->rx_buf_use_size << RX_BD_LEN_SHIFT) |
   3717		RX_BD_TYPE_RX_PACKET_BD | RX_BD_FLAGS_EOP;
   3718
   3719	if (NET_IP_ALIGN == 2)
   3720		type |= RX_BD_FLAGS_SOP;
   3721
   3722	rxr = &bp->rx_ring[ring_nr];
   3723	ring = &rxr->rx_ring_struct;
   3724	bnxt_init_rxbd_pages(ring, type);
   3725
   3726	if (BNXT_RX_PAGE_MODE(bp) && bp->xdp_prog) {
   3727		bpf_prog_add(bp->xdp_prog, 1);
   3728		rxr->xdp_prog = bp->xdp_prog;
   3729	}
   3730	ring->fw_ring_id = INVALID_HW_RING_ID;
   3731
   3732	ring = &rxr->rx_agg_ring_struct;
   3733	ring->fw_ring_id = INVALID_HW_RING_ID;
   3734
   3735	if ((bp->flags & BNXT_FLAG_AGG_RINGS)) {
   3736		type = ((u32)BNXT_RX_PAGE_SIZE << RX_BD_LEN_SHIFT) |
   3737			RX_BD_TYPE_RX_AGG_BD | RX_BD_FLAGS_SOP;
   3738
   3739		bnxt_init_rxbd_pages(ring, type);
   3740	}
   3741
   3742	return bnxt_alloc_one_rx_ring(bp, ring_nr);
   3743}
   3744
   3745static void bnxt_init_cp_rings(struct bnxt *bp)
   3746{
   3747	int i, j;
   3748
   3749	for (i = 0; i < bp->cp_nr_rings; i++) {
   3750		struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring;
   3751		struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
   3752
   3753		ring->fw_ring_id = INVALID_HW_RING_ID;
   3754		cpr->rx_ring_coal.coal_ticks = bp->rx_coal.coal_ticks;
   3755		cpr->rx_ring_coal.coal_bufs = bp->rx_coal.coal_bufs;
   3756		for (j = 0; j < 2; j++) {
   3757			struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
   3758
   3759			if (!cpr2)
   3760				continue;
   3761
   3762			ring = &cpr2->cp_ring_struct;
   3763			ring->fw_ring_id = INVALID_HW_RING_ID;
   3764			cpr2->rx_ring_coal.coal_ticks = bp->rx_coal.coal_ticks;
   3765			cpr2->rx_ring_coal.coal_bufs = bp->rx_coal.coal_bufs;
   3766		}
   3767	}
   3768}
   3769
   3770static int bnxt_init_rx_rings(struct bnxt *bp)
   3771{
   3772	int i, rc = 0;
   3773
   3774	if (BNXT_RX_PAGE_MODE(bp)) {
   3775		bp->rx_offset = NET_IP_ALIGN + XDP_PACKET_HEADROOM;
   3776		bp->rx_dma_offset = XDP_PACKET_HEADROOM;
   3777	} else {
   3778		bp->rx_offset = BNXT_RX_OFFSET;
   3779		bp->rx_dma_offset = BNXT_RX_DMA_OFFSET;
   3780	}
   3781
   3782	for (i = 0; i < bp->rx_nr_rings; i++) {
   3783		rc = bnxt_init_one_rx_ring(bp, i);
   3784		if (rc)
   3785			break;
   3786	}
   3787
   3788	return rc;
   3789}
   3790
   3791static int bnxt_init_tx_rings(struct bnxt *bp)
   3792{
   3793	u16 i;
   3794
   3795	bp->tx_wake_thresh = max_t(int, bp->tx_ring_size / 2,
   3796				   BNXT_MIN_TX_DESC_CNT);
   3797
   3798	for (i = 0; i < bp->tx_nr_rings; i++) {
   3799		struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
   3800		struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
   3801
   3802		ring->fw_ring_id = INVALID_HW_RING_ID;
   3803	}
   3804
   3805	return 0;
   3806}
   3807
   3808static void bnxt_free_ring_grps(struct bnxt *bp)
   3809{
   3810	kfree(bp->grp_info);
   3811	bp->grp_info = NULL;
   3812}
   3813
   3814static int bnxt_init_ring_grps(struct bnxt *bp, bool irq_re_init)
   3815{
   3816	int i;
   3817
   3818	if (irq_re_init) {
   3819		bp->grp_info = kcalloc(bp->cp_nr_rings,
   3820				       sizeof(struct bnxt_ring_grp_info),
   3821				       GFP_KERNEL);
   3822		if (!bp->grp_info)
   3823			return -ENOMEM;
   3824	}
   3825	for (i = 0; i < bp->cp_nr_rings; i++) {
   3826		if (irq_re_init)
   3827			bp->grp_info[i].fw_stats_ctx = INVALID_HW_RING_ID;
   3828		bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID;
   3829		bp->grp_info[i].rx_fw_ring_id = INVALID_HW_RING_ID;
   3830		bp->grp_info[i].agg_fw_ring_id = INVALID_HW_RING_ID;
   3831		bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
   3832	}
   3833	return 0;
   3834}
   3835
   3836static void bnxt_free_vnics(struct bnxt *bp)
   3837{
   3838	kfree(bp->vnic_info);
   3839	bp->vnic_info = NULL;
   3840	bp->nr_vnics = 0;
   3841}
   3842
   3843static int bnxt_alloc_vnics(struct bnxt *bp)
   3844{
   3845	int num_vnics = 1;
   3846
   3847#ifdef CONFIG_RFS_ACCEL
   3848	if ((bp->flags & (BNXT_FLAG_RFS | BNXT_FLAG_CHIP_P5)) == BNXT_FLAG_RFS)
   3849		num_vnics += bp->rx_nr_rings;
   3850#endif
   3851
   3852	if (BNXT_CHIP_TYPE_NITRO_A0(bp))
   3853		num_vnics++;
   3854
   3855	bp->vnic_info = kcalloc(num_vnics, sizeof(struct bnxt_vnic_info),
   3856				GFP_KERNEL);
   3857	if (!bp->vnic_info)
   3858		return -ENOMEM;
   3859
   3860	bp->nr_vnics = num_vnics;
   3861	return 0;
   3862}
   3863
   3864static void bnxt_init_vnics(struct bnxt *bp)
   3865{
   3866	int i;
   3867
   3868	for (i = 0; i < bp->nr_vnics; i++) {
   3869		struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
   3870		int j;
   3871
   3872		vnic->fw_vnic_id = INVALID_HW_RING_ID;
   3873		for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++)
   3874			vnic->fw_rss_cos_lb_ctx[j] = INVALID_HW_RING_ID;
   3875
   3876		vnic->fw_l2_ctx_id = INVALID_HW_RING_ID;
   3877
   3878		if (bp->vnic_info[i].rss_hash_key) {
   3879			if (i == 0)
   3880				prandom_bytes(vnic->rss_hash_key,
   3881					      HW_HASH_KEY_SIZE);
   3882			else
   3883				memcpy(vnic->rss_hash_key,
   3884				       bp->vnic_info[0].rss_hash_key,
   3885				       HW_HASH_KEY_SIZE);
   3886		}
   3887	}
   3888}
   3889
   3890static int bnxt_calc_nr_ring_pages(u32 ring_size, int desc_per_pg)
   3891{
   3892	int pages;
   3893
   3894	pages = ring_size / desc_per_pg;
   3895
   3896	if (!pages)
   3897		return 1;
   3898
   3899	pages++;
   3900
   3901	while (pages & (pages - 1))
   3902		pages++;
   3903
   3904	return pages;
   3905}
   3906
   3907void bnxt_set_tpa_flags(struct bnxt *bp)
   3908{
   3909	bp->flags &= ~BNXT_FLAG_TPA;
   3910	if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
   3911		return;
   3912	if (bp->dev->features & NETIF_F_LRO)
   3913		bp->flags |= BNXT_FLAG_LRO;
   3914	else if (bp->dev->features & NETIF_F_GRO_HW)
   3915		bp->flags |= BNXT_FLAG_GRO;
   3916}
   3917
   3918/* bp->rx_ring_size, bp->tx_ring_size, dev->mtu, BNXT_FLAG_{G|L}RO flags must
   3919 * be set on entry.
   3920 */
   3921void bnxt_set_ring_params(struct bnxt *bp)
   3922{
   3923	u32 ring_size, rx_size, rx_space, max_rx_cmpl;
   3924	u32 agg_factor = 0, agg_ring_size = 0;
   3925
   3926	/* 8 for CRC and VLAN */
   3927	rx_size = SKB_DATA_ALIGN(bp->dev->mtu + ETH_HLEN + NET_IP_ALIGN + 8);
   3928
   3929	rx_space = rx_size + ALIGN(max(NET_SKB_PAD, XDP_PACKET_HEADROOM), 8) +
   3930		SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
   3931
   3932	bp->rx_copy_thresh = BNXT_RX_COPY_THRESH;
   3933	ring_size = bp->rx_ring_size;
   3934	bp->rx_agg_ring_size = 0;
   3935	bp->rx_agg_nr_pages = 0;
   3936
   3937	if (bp->flags & BNXT_FLAG_TPA)
   3938		agg_factor = min_t(u32, 4, 65536 / BNXT_RX_PAGE_SIZE);
   3939
   3940	bp->flags &= ~BNXT_FLAG_JUMBO;
   3941	if (rx_space > PAGE_SIZE && !(bp->flags & BNXT_FLAG_NO_AGG_RINGS)) {
   3942		u32 jumbo_factor;
   3943
   3944		bp->flags |= BNXT_FLAG_JUMBO;
   3945		jumbo_factor = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
   3946		if (jumbo_factor > agg_factor)
   3947			agg_factor = jumbo_factor;
   3948	}
   3949	if (agg_factor) {
   3950		if (ring_size > BNXT_MAX_RX_DESC_CNT_JUM_ENA) {
   3951			ring_size = BNXT_MAX_RX_DESC_CNT_JUM_ENA;
   3952			netdev_warn(bp->dev, "RX ring size reduced from %d to %d because the jumbo ring is now enabled\n",
   3953				    bp->rx_ring_size, ring_size);
   3954			bp->rx_ring_size = ring_size;
   3955		}
   3956		agg_ring_size = ring_size * agg_factor;
   3957
   3958		bp->rx_agg_nr_pages = bnxt_calc_nr_ring_pages(agg_ring_size,
   3959							RX_DESC_CNT);
   3960		if (bp->rx_agg_nr_pages > MAX_RX_AGG_PAGES) {
   3961			u32 tmp = agg_ring_size;
   3962
   3963			bp->rx_agg_nr_pages = MAX_RX_AGG_PAGES;
   3964			agg_ring_size = MAX_RX_AGG_PAGES * RX_DESC_CNT - 1;
   3965			netdev_warn(bp->dev, "rx agg ring size %d reduced to %d.\n",
   3966				    tmp, agg_ring_size);
   3967		}
   3968		bp->rx_agg_ring_size = agg_ring_size;
   3969		bp->rx_agg_ring_mask = (bp->rx_agg_nr_pages * RX_DESC_CNT) - 1;
   3970
   3971		if (BNXT_RX_PAGE_MODE(bp)) {
   3972			rx_space = BNXT_PAGE_MODE_BUF_SIZE;
   3973			rx_size = BNXT_MAX_PAGE_MODE_MTU;
   3974		} else {
   3975			rx_size = SKB_DATA_ALIGN(BNXT_RX_COPY_THRESH + NET_IP_ALIGN);
   3976			rx_space = rx_size + NET_SKB_PAD +
   3977				SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
   3978		}
   3979	}
   3980
   3981	bp->rx_buf_use_size = rx_size;
   3982	bp->rx_buf_size = rx_space;
   3983
   3984	bp->rx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, RX_DESC_CNT);
   3985	bp->rx_ring_mask = (bp->rx_nr_pages * RX_DESC_CNT) - 1;
   3986
   3987	ring_size = bp->tx_ring_size;
   3988	bp->tx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, TX_DESC_CNT);
   3989	bp->tx_ring_mask = (bp->tx_nr_pages * TX_DESC_CNT) - 1;
   3990
   3991	max_rx_cmpl = bp->rx_ring_size;
   3992	/* MAX TPA needs to be added because TPA_START completions are
   3993	 * immediately recycled, so the TPA completions are not bound by
   3994	 * the RX ring size.
   3995	 */
   3996	if (bp->flags & BNXT_FLAG_TPA)
   3997		max_rx_cmpl += bp->max_tpa;
   3998	/* RX and TPA completions are 32-byte, all others are 16-byte */
   3999	ring_size = max_rx_cmpl * 2 + agg_ring_size + bp->tx_ring_size;
   4000	bp->cp_ring_size = ring_size;
   4001
   4002	bp->cp_nr_pages = bnxt_calc_nr_ring_pages(ring_size, CP_DESC_CNT);
   4003	if (bp->cp_nr_pages > MAX_CP_PAGES) {
   4004		bp->cp_nr_pages = MAX_CP_PAGES;
   4005		bp->cp_ring_size = MAX_CP_PAGES * CP_DESC_CNT - 1;
   4006		netdev_warn(bp->dev, "completion ring size %d reduced to %d.\n",
   4007			    ring_size, bp->cp_ring_size);
   4008	}
   4009	bp->cp_bit = bp->cp_nr_pages * CP_DESC_CNT;
   4010	bp->cp_ring_mask = bp->cp_bit - 1;
   4011}
   4012
   4013/* Changing allocation mode of RX rings.
   4014 * TODO: Update when extending xdp_rxq_info to support allocation modes.
   4015 */
   4016int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode)
   4017{
   4018	if (page_mode) {
   4019		bp->flags &= ~BNXT_FLAG_AGG_RINGS;
   4020		bp->flags |= BNXT_FLAG_RX_PAGE_MODE;
   4021
   4022		if (bp->dev->mtu > BNXT_MAX_PAGE_MODE_MTU) {
   4023			bp->flags |= BNXT_FLAG_JUMBO;
   4024			bp->rx_skb_func = bnxt_rx_multi_page_skb;
   4025			bp->dev->max_mtu =
   4026				min_t(u16, bp->max_mtu, BNXT_MAX_MTU);
   4027		} else {
   4028			bp->flags |= BNXT_FLAG_NO_AGG_RINGS;
   4029			bp->rx_skb_func = bnxt_rx_page_skb;
   4030			bp->dev->max_mtu =
   4031				min_t(u16, bp->max_mtu, BNXT_MAX_PAGE_MODE_MTU);
   4032		}
   4033		bp->rx_dir = DMA_BIDIRECTIONAL;
   4034		/* Disable LRO or GRO_HW */
   4035		netdev_update_features(bp->dev);
   4036	} else {
   4037		bp->dev->max_mtu = bp->max_mtu;
   4038		bp->flags &= ~BNXT_FLAG_RX_PAGE_MODE;
   4039		bp->rx_dir = DMA_FROM_DEVICE;
   4040		bp->rx_skb_func = bnxt_rx_skb;
   4041	}
   4042	return 0;
   4043}
   4044
   4045static void bnxt_free_vnic_attributes(struct bnxt *bp)
   4046{
   4047	int i;
   4048	struct bnxt_vnic_info *vnic;
   4049	struct pci_dev *pdev = bp->pdev;
   4050
   4051	if (!bp->vnic_info)
   4052		return;
   4053
   4054	for (i = 0; i < bp->nr_vnics; i++) {
   4055		vnic = &bp->vnic_info[i];
   4056
   4057		kfree(vnic->fw_grp_ids);
   4058		vnic->fw_grp_ids = NULL;
   4059
   4060		kfree(vnic->uc_list);
   4061		vnic->uc_list = NULL;
   4062
   4063		if (vnic->mc_list) {
   4064			dma_free_coherent(&pdev->dev, vnic->mc_list_size,
   4065					  vnic->mc_list, vnic->mc_list_mapping);
   4066			vnic->mc_list = NULL;
   4067		}
   4068
   4069		if (vnic->rss_table) {
   4070			dma_free_coherent(&pdev->dev, vnic->rss_table_size,
   4071					  vnic->rss_table,
   4072					  vnic->rss_table_dma_addr);
   4073			vnic->rss_table = NULL;
   4074		}
   4075
   4076		vnic->rss_hash_key = NULL;
   4077		vnic->flags = 0;
   4078	}
   4079}
   4080
   4081static int bnxt_alloc_vnic_attributes(struct bnxt *bp)
   4082{
   4083	int i, rc = 0, size;
   4084	struct bnxt_vnic_info *vnic;
   4085	struct pci_dev *pdev = bp->pdev;
   4086	int max_rings;
   4087
   4088	for (i = 0; i < bp->nr_vnics; i++) {
   4089		vnic = &bp->vnic_info[i];
   4090
   4091		if (vnic->flags & BNXT_VNIC_UCAST_FLAG) {
   4092			int mem_size = (BNXT_MAX_UC_ADDRS - 1) * ETH_ALEN;
   4093
   4094			if (mem_size > 0) {
   4095				vnic->uc_list = kmalloc(mem_size, GFP_KERNEL);
   4096				if (!vnic->uc_list) {
   4097					rc = -ENOMEM;
   4098					goto out;
   4099				}
   4100			}
   4101		}
   4102
   4103		if (vnic->flags & BNXT_VNIC_MCAST_FLAG) {
   4104			vnic->mc_list_size = BNXT_MAX_MC_ADDRS * ETH_ALEN;
   4105			vnic->mc_list =
   4106				dma_alloc_coherent(&pdev->dev,
   4107						   vnic->mc_list_size,
   4108						   &vnic->mc_list_mapping,
   4109						   GFP_KERNEL);
   4110			if (!vnic->mc_list) {
   4111				rc = -ENOMEM;
   4112				goto out;
   4113			}
   4114		}
   4115
   4116		if (bp->flags & BNXT_FLAG_CHIP_P5)
   4117			goto vnic_skip_grps;
   4118
   4119		if (vnic->flags & BNXT_VNIC_RSS_FLAG)
   4120			max_rings = bp->rx_nr_rings;
   4121		else
   4122			max_rings = 1;
   4123
   4124		vnic->fw_grp_ids = kcalloc(max_rings, sizeof(u16), GFP_KERNEL);
   4125		if (!vnic->fw_grp_ids) {
   4126			rc = -ENOMEM;
   4127			goto out;
   4128		}
   4129vnic_skip_grps:
   4130		if ((bp->flags & BNXT_FLAG_NEW_RSS_CAP) &&
   4131		    !(vnic->flags & BNXT_VNIC_RSS_FLAG))
   4132			continue;
   4133
   4134		/* Allocate rss table and hash key */
   4135		size = L1_CACHE_ALIGN(HW_HASH_INDEX_SIZE * sizeof(u16));
   4136		if (bp->flags & BNXT_FLAG_CHIP_P5)
   4137			size = L1_CACHE_ALIGN(BNXT_MAX_RSS_TABLE_SIZE_P5);
   4138
   4139		vnic->rss_table_size = size + HW_HASH_KEY_SIZE;
   4140		vnic->rss_table = dma_alloc_coherent(&pdev->dev,
   4141						     vnic->rss_table_size,
   4142						     &vnic->rss_table_dma_addr,
   4143						     GFP_KERNEL);
   4144		if (!vnic->rss_table) {
   4145			rc = -ENOMEM;
   4146			goto out;
   4147		}
   4148
   4149		vnic->rss_hash_key = ((void *)vnic->rss_table) + size;
   4150		vnic->rss_hash_key_dma_addr = vnic->rss_table_dma_addr + size;
   4151	}
   4152	return 0;
   4153
   4154out:
   4155	return rc;
   4156}
   4157
   4158static void bnxt_free_hwrm_resources(struct bnxt *bp)
   4159{
   4160	struct bnxt_hwrm_wait_token *token;
   4161
   4162	dma_pool_destroy(bp->hwrm_dma_pool);
   4163	bp->hwrm_dma_pool = NULL;
   4164
   4165	rcu_read_lock();
   4166	hlist_for_each_entry_rcu(token, &bp->hwrm_pending_list, node)
   4167		WRITE_ONCE(token->state, BNXT_HWRM_CANCELLED);
   4168	rcu_read_unlock();
   4169}
   4170
   4171static int bnxt_alloc_hwrm_resources(struct bnxt *bp)
   4172{
   4173	bp->hwrm_dma_pool = dma_pool_create("bnxt_hwrm", &bp->pdev->dev,
   4174					    BNXT_HWRM_DMA_SIZE,
   4175					    BNXT_HWRM_DMA_ALIGN, 0);
   4176	if (!bp->hwrm_dma_pool)
   4177		return -ENOMEM;
   4178
   4179	INIT_HLIST_HEAD(&bp->hwrm_pending_list);
   4180
   4181	return 0;
   4182}
   4183
   4184static void bnxt_free_stats_mem(struct bnxt *bp, struct bnxt_stats_mem *stats)
   4185{
   4186	kfree(stats->hw_masks);
   4187	stats->hw_masks = NULL;
   4188	kfree(stats->sw_stats);
   4189	stats->sw_stats = NULL;
   4190	if (stats->hw_stats) {
   4191		dma_free_coherent(&bp->pdev->dev, stats->len, stats->hw_stats,
   4192				  stats->hw_stats_map);
   4193		stats->hw_stats = NULL;
   4194	}
   4195}
   4196
   4197static int bnxt_alloc_stats_mem(struct bnxt *bp, struct bnxt_stats_mem *stats,
   4198				bool alloc_masks)
   4199{
   4200	stats->hw_stats = dma_alloc_coherent(&bp->pdev->dev, stats->len,
   4201					     &stats->hw_stats_map, GFP_KERNEL);
   4202	if (!stats->hw_stats)
   4203		return -ENOMEM;
   4204
   4205	stats->sw_stats = kzalloc(stats->len, GFP_KERNEL);
   4206	if (!stats->sw_stats)
   4207		goto stats_mem_err;
   4208
   4209	if (alloc_masks) {
   4210		stats->hw_masks = kzalloc(stats->len, GFP_KERNEL);
   4211		if (!stats->hw_masks)
   4212			goto stats_mem_err;
   4213	}
   4214	return 0;
   4215
   4216stats_mem_err:
   4217	bnxt_free_stats_mem(bp, stats);
   4218	return -ENOMEM;
   4219}
   4220
   4221static void bnxt_fill_masks(u64 *mask_arr, u64 mask, int count)
   4222{
   4223	int i;
   4224
   4225	for (i = 0; i < count; i++)
   4226		mask_arr[i] = mask;
   4227}
   4228
   4229static void bnxt_copy_hw_masks(u64 *mask_arr, __le64 *hw_mask_arr, int count)
   4230{
   4231	int i;
   4232
   4233	for (i = 0; i < count; i++)
   4234		mask_arr[i] = le64_to_cpu(hw_mask_arr[i]);
   4235}
   4236
   4237static int bnxt_hwrm_func_qstat_ext(struct bnxt *bp,
   4238				    struct bnxt_stats_mem *stats)
   4239{
   4240	struct hwrm_func_qstats_ext_output *resp;
   4241	struct hwrm_func_qstats_ext_input *req;
   4242	__le64 *hw_masks;
   4243	int rc;
   4244
   4245	if (!(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED) ||
   4246	    !(bp->flags & BNXT_FLAG_CHIP_P5))
   4247		return -EOPNOTSUPP;
   4248
   4249	rc = hwrm_req_init(bp, req, HWRM_FUNC_QSTATS_EXT);
   4250	if (rc)
   4251		return rc;
   4252
   4253	req->fid = cpu_to_le16(0xffff);
   4254	req->flags = FUNC_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK;
   4255
   4256	resp = hwrm_req_hold(bp, req);
   4257	rc = hwrm_req_send(bp, req);
   4258	if (!rc) {
   4259		hw_masks = &resp->rx_ucast_pkts;
   4260		bnxt_copy_hw_masks(stats->hw_masks, hw_masks, stats->len / 8);
   4261	}
   4262	hwrm_req_drop(bp, req);
   4263	return rc;
   4264}
   4265
   4266static int bnxt_hwrm_port_qstats(struct bnxt *bp, u8 flags);
   4267static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp, u8 flags);
   4268
   4269static void bnxt_init_stats(struct bnxt *bp)
   4270{
   4271	struct bnxt_napi *bnapi = bp->bnapi[0];
   4272	struct bnxt_cp_ring_info *cpr;
   4273	struct bnxt_stats_mem *stats;
   4274	__le64 *rx_stats, *tx_stats;
   4275	int rc, rx_count, tx_count;
   4276	u64 *rx_masks, *tx_masks;
   4277	u64 mask;
   4278	u8 flags;
   4279
   4280	cpr = &bnapi->cp_ring;
   4281	stats = &cpr->stats;
   4282	rc = bnxt_hwrm_func_qstat_ext(bp, stats);
   4283	if (rc) {
   4284		if (bp->flags & BNXT_FLAG_CHIP_P5)
   4285			mask = (1ULL << 48) - 1;
   4286		else
   4287			mask = -1ULL;
   4288		bnxt_fill_masks(stats->hw_masks, mask, stats->len / 8);
   4289	}
   4290	if (bp->flags & BNXT_FLAG_PORT_STATS) {
   4291		stats = &bp->port_stats;
   4292		rx_stats = stats->hw_stats;
   4293		rx_masks = stats->hw_masks;
   4294		rx_count = sizeof(struct rx_port_stats) / 8;
   4295		tx_stats = rx_stats + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
   4296		tx_masks = rx_masks + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
   4297		tx_count = sizeof(struct tx_port_stats) / 8;
   4298
   4299		flags = PORT_QSTATS_REQ_FLAGS_COUNTER_MASK;
   4300		rc = bnxt_hwrm_port_qstats(bp, flags);
   4301		if (rc) {
   4302			mask = (1ULL << 40) - 1;
   4303
   4304			bnxt_fill_masks(rx_masks, mask, rx_count);
   4305			bnxt_fill_masks(tx_masks, mask, tx_count);
   4306		} else {
   4307			bnxt_copy_hw_masks(rx_masks, rx_stats, rx_count);
   4308			bnxt_copy_hw_masks(tx_masks, tx_stats, tx_count);
   4309			bnxt_hwrm_port_qstats(bp, 0);
   4310		}
   4311	}
   4312	if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) {
   4313		stats = &bp->rx_port_stats_ext;
   4314		rx_stats = stats->hw_stats;
   4315		rx_masks = stats->hw_masks;
   4316		rx_count = sizeof(struct rx_port_stats_ext) / 8;
   4317		stats = &bp->tx_port_stats_ext;
   4318		tx_stats = stats->hw_stats;
   4319		tx_masks = stats->hw_masks;
   4320		tx_count = sizeof(struct tx_port_stats_ext) / 8;
   4321
   4322		flags = PORT_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK;
   4323		rc = bnxt_hwrm_port_qstats_ext(bp, flags);
   4324		if (rc) {
   4325			mask = (1ULL << 40) - 1;
   4326
   4327			bnxt_fill_masks(rx_masks, mask, rx_count);
   4328			if (tx_stats)
   4329				bnxt_fill_masks(tx_masks, mask, tx_count);
   4330		} else {
   4331			bnxt_copy_hw_masks(rx_masks, rx_stats, rx_count);
   4332			if (tx_stats)
   4333				bnxt_copy_hw_masks(tx_masks, tx_stats,
   4334						   tx_count);
   4335			bnxt_hwrm_port_qstats_ext(bp, 0);
   4336		}
   4337	}
   4338}
   4339
   4340static void bnxt_free_port_stats(struct bnxt *bp)
   4341{
   4342	bp->flags &= ~BNXT_FLAG_PORT_STATS;
   4343	bp->flags &= ~BNXT_FLAG_PORT_STATS_EXT;
   4344
   4345	bnxt_free_stats_mem(bp, &bp->port_stats);
   4346	bnxt_free_stats_mem(bp, &bp->rx_port_stats_ext);
   4347	bnxt_free_stats_mem(bp, &bp->tx_port_stats_ext);
   4348}
   4349
   4350static void bnxt_free_ring_stats(struct bnxt *bp)
   4351{
   4352	int i;
   4353
   4354	if (!bp->bnapi)
   4355		return;
   4356
   4357	for (i = 0; i < bp->cp_nr_rings; i++) {
   4358		struct bnxt_napi *bnapi = bp->bnapi[i];
   4359		struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
   4360
   4361		bnxt_free_stats_mem(bp, &cpr->stats);
   4362	}
   4363}
   4364
   4365static int bnxt_alloc_stats(struct bnxt *bp)
   4366{
   4367	u32 size, i;
   4368	int rc;
   4369
   4370	size = bp->hw_ring_stats_size;
   4371
   4372	for (i = 0; i < bp->cp_nr_rings; i++) {
   4373		struct bnxt_napi *bnapi = bp->bnapi[i];
   4374		struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
   4375
   4376		cpr->stats.len = size;
   4377		rc = bnxt_alloc_stats_mem(bp, &cpr->stats, !i);
   4378		if (rc)
   4379			return rc;
   4380
   4381		cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
   4382	}
   4383
   4384	if (BNXT_VF(bp) || bp->chip_num == CHIP_NUM_58700)
   4385		return 0;
   4386
   4387	if (bp->port_stats.hw_stats)
   4388		goto alloc_ext_stats;
   4389
   4390	bp->port_stats.len = BNXT_PORT_STATS_SIZE;
   4391	rc = bnxt_alloc_stats_mem(bp, &bp->port_stats, true);
   4392	if (rc)
   4393		return rc;
   4394
   4395	bp->flags |= BNXT_FLAG_PORT_STATS;
   4396
   4397alloc_ext_stats:
   4398	/* Display extended statistics only if FW supports it */
   4399	if (bp->hwrm_spec_code < 0x10804 || bp->hwrm_spec_code == 0x10900)
   4400		if (!(bp->fw_cap & BNXT_FW_CAP_EXT_STATS_SUPPORTED))
   4401			return 0;
   4402
   4403	if (bp->rx_port_stats_ext.hw_stats)
   4404		goto alloc_tx_ext_stats;
   4405
   4406	bp->rx_port_stats_ext.len = sizeof(struct rx_port_stats_ext);
   4407	rc = bnxt_alloc_stats_mem(bp, &bp->rx_port_stats_ext, true);
   4408	/* Extended stats are optional */
   4409	if (rc)
   4410		return 0;
   4411
   4412alloc_tx_ext_stats:
   4413	if (bp->tx_port_stats_ext.hw_stats)
   4414		return 0;
   4415
   4416	if (bp->hwrm_spec_code >= 0x10902 ||
   4417	    (bp->fw_cap & BNXT_FW_CAP_EXT_STATS_SUPPORTED)) {
   4418		bp->tx_port_stats_ext.len = sizeof(struct tx_port_stats_ext);
   4419		rc = bnxt_alloc_stats_mem(bp, &bp->tx_port_stats_ext, true);
   4420		/* Extended stats are optional */
   4421		if (rc)
   4422			return 0;
   4423	}
   4424	bp->flags |= BNXT_FLAG_PORT_STATS_EXT;
   4425	return 0;
   4426}
   4427
   4428static void bnxt_clear_ring_indices(struct bnxt *bp)
   4429{
   4430	int i;
   4431
   4432	if (!bp->bnapi)
   4433		return;
   4434
   4435	for (i = 0; i < bp->cp_nr_rings; i++) {
   4436		struct bnxt_napi *bnapi = bp->bnapi[i];
   4437		struct bnxt_cp_ring_info *cpr;
   4438		struct bnxt_rx_ring_info *rxr;
   4439		struct bnxt_tx_ring_info *txr;
   4440
   4441		if (!bnapi)
   4442			continue;
   4443
   4444		cpr = &bnapi->cp_ring;
   4445		cpr->cp_raw_cons = 0;
   4446
   4447		txr = bnapi->tx_ring;
   4448		if (txr) {
   4449			txr->tx_prod = 0;
   4450			txr->tx_cons = 0;
   4451		}
   4452
   4453		rxr = bnapi->rx_ring;
   4454		if (rxr) {
   4455			rxr->rx_prod = 0;
   4456			rxr->rx_agg_prod = 0;
   4457			rxr->rx_sw_agg_prod = 0;
   4458			rxr->rx_next_cons = 0;
   4459		}
   4460	}
   4461}
   4462
   4463static void bnxt_free_ntp_fltrs(struct bnxt *bp, bool irq_reinit)
   4464{
   4465#ifdef CONFIG_RFS_ACCEL
   4466	int i;
   4467
   4468	/* Under rtnl_lock and all our NAPIs have been disabled.  It's
   4469	 * safe to delete the hash table.
   4470	 */
   4471	for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
   4472		struct hlist_head *head;
   4473		struct hlist_node *tmp;
   4474		struct bnxt_ntuple_filter *fltr;
   4475
   4476		head = &bp->ntp_fltr_hash_tbl[i];
   4477		hlist_for_each_entry_safe(fltr, tmp, head, hash) {
   4478			hlist_del(&fltr->hash);
   4479			kfree(fltr);
   4480		}
   4481	}
   4482	if (irq_reinit) {
   4483		kfree(bp->ntp_fltr_bmap);
   4484		bp->ntp_fltr_bmap = NULL;
   4485	}
   4486	bp->ntp_fltr_count = 0;
   4487#endif
   4488}
   4489
   4490static int bnxt_alloc_ntp_fltrs(struct bnxt *bp)
   4491{
   4492#ifdef CONFIG_RFS_ACCEL
   4493	int i, rc = 0;
   4494
   4495	if (!(bp->flags & BNXT_FLAG_RFS))
   4496		return 0;
   4497
   4498	for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++)
   4499		INIT_HLIST_HEAD(&bp->ntp_fltr_hash_tbl[i]);
   4500
   4501	bp->ntp_fltr_count = 0;
   4502	bp->ntp_fltr_bmap = kcalloc(BITS_TO_LONGS(BNXT_NTP_FLTR_MAX_FLTR),
   4503				    sizeof(long),
   4504				    GFP_KERNEL);
   4505
   4506	if (!bp->ntp_fltr_bmap)
   4507		rc = -ENOMEM;
   4508
   4509	return rc;
   4510#else
   4511	return 0;
   4512#endif
   4513}
   4514
   4515static void bnxt_free_mem(struct bnxt *bp, bool irq_re_init)
   4516{
   4517	bnxt_free_vnic_attributes(bp);
   4518	bnxt_free_tx_rings(bp);
   4519	bnxt_free_rx_rings(bp);
   4520	bnxt_free_cp_rings(bp);
   4521	bnxt_free_all_cp_arrays(bp);
   4522	bnxt_free_ntp_fltrs(bp, irq_re_init);
   4523	if (irq_re_init) {
   4524		bnxt_free_ring_stats(bp);
   4525		if (!(bp->phy_flags & BNXT_PHY_FL_PORT_STATS_NO_RESET) ||
   4526		    test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
   4527			bnxt_free_port_stats(bp);
   4528		bnxt_free_ring_grps(bp);
   4529		bnxt_free_vnics(bp);
   4530		kfree(bp->tx_ring_map);
   4531		bp->tx_ring_map = NULL;
   4532		kfree(bp->tx_ring);
   4533		bp->tx_ring = NULL;
   4534		kfree(bp->rx_ring);
   4535		bp->rx_ring = NULL;
   4536		kfree(bp->bnapi);
   4537		bp->bnapi = NULL;
   4538	} else {
   4539		bnxt_clear_ring_indices(bp);
   4540	}
   4541}
   4542
   4543static int bnxt_alloc_mem(struct bnxt *bp, bool irq_re_init)
   4544{
   4545	int i, j, rc, size, arr_size;
   4546	void *bnapi;
   4547
   4548	if (irq_re_init) {
   4549		/* Allocate bnapi mem pointer array and mem block for
   4550		 * all queues
   4551		 */
   4552		arr_size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi *) *
   4553				bp->cp_nr_rings);
   4554		size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi));
   4555		bnapi = kzalloc(arr_size + size * bp->cp_nr_rings, GFP_KERNEL);
   4556		if (!bnapi)
   4557			return -ENOMEM;
   4558
   4559		bp->bnapi = bnapi;
   4560		bnapi += arr_size;
   4561		for (i = 0; i < bp->cp_nr_rings; i++, bnapi += size) {
   4562			bp->bnapi[i] = bnapi;
   4563			bp->bnapi[i]->index = i;
   4564			bp->bnapi[i]->bp = bp;
   4565			if (bp->flags & BNXT_FLAG_CHIP_P5) {
   4566				struct bnxt_cp_ring_info *cpr =
   4567					&bp->bnapi[i]->cp_ring;
   4568
   4569				cpr->cp_ring_struct.ring_mem.flags =
   4570					BNXT_RMEM_RING_PTE_FLAG;
   4571			}
   4572		}
   4573
   4574		bp->rx_ring = kcalloc(bp->rx_nr_rings,
   4575				      sizeof(struct bnxt_rx_ring_info),
   4576				      GFP_KERNEL);
   4577		if (!bp->rx_ring)
   4578			return -ENOMEM;
   4579
   4580		for (i = 0; i < bp->rx_nr_rings; i++) {
   4581			struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
   4582
   4583			if (bp->flags & BNXT_FLAG_CHIP_P5) {
   4584				rxr->rx_ring_struct.ring_mem.flags =
   4585					BNXT_RMEM_RING_PTE_FLAG;
   4586				rxr->rx_agg_ring_struct.ring_mem.flags =
   4587					BNXT_RMEM_RING_PTE_FLAG;
   4588			}
   4589			rxr->bnapi = bp->bnapi[i];
   4590			bp->bnapi[i]->rx_ring = &bp->rx_ring[i];
   4591		}
   4592
   4593		bp->tx_ring = kcalloc(bp->tx_nr_rings,
   4594				      sizeof(struct bnxt_tx_ring_info),
   4595				      GFP_KERNEL);
   4596		if (!bp->tx_ring)
   4597			return -ENOMEM;
   4598
   4599		bp->tx_ring_map = kcalloc(bp->tx_nr_rings, sizeof(u16),
   4600					  GFP_KERNEL);
   4601
   4602		if (!bp->tx_ring_map)
   4603			return -ENOMEM;
   4604
   4605		if (bp->flags & BNXT_FLAG_SHARED_RINGS)
   4606			j = 0;
   4607		else
   4608			j = bp->rx_nr_rings;
   4609
   4610		for (i = 0; i < bp->tx_nr_rings; i++, j++) {
   4611			struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
   4612
   4613			if (bp->flags & BNXT_FLAG_CHIP_P5)
   4614				txr->tx_ring_struct.ring_mem.flags =
   4615					BNXT_RMEM_RING_PTE_FLAG;
   4616			txr->bnapi = bp->bnapi[j];
   4617			bp->bnapi[j]->tx_ring = txr;
   4618			bp->tx_ring_map[i] = bp->tx_nr_rings_xdp + i;
   4619			if (i >= bp->tx_nr_rings_xdp) {
   4620				txr->txq_index = i - bp->tx_nr_rings_xdp;
   4621				bp->bnapi[j]->tx_int = bnxt_tx_int;
   4622			} else {
   4623				bp->bnapi[j]->flags |= BNXT_NAPI_FLAG_XDP;
   4624				bp->bnapi[j]->tx_int = bnxt_tx_int_xdp;
   4625			}
   4626		}
   4627
   4628		rc = bnxt_alloc_stats(bp);
   4629		if (rc)
   4630			goto alloc_mem_err;
   4631		bnxt_init_stats(bp);
   4632
   4633		rc = bnxt_alloc_ntp_fltrs(bp);
   4634		if (rc)
   4635			goto alloc_mem_err;
   4636
   4637		rc = bnxt_alloc_vnics(bp);
   4638		if (rc)
   4639			goto alloc_mem_err;
   4640	}
   4641
   4642	rc = bnxt_alloc_all_cp_arrays(bp);
   4643	if (rc)
   4644		goto alloc_mem_err;
   4645
   4646	bnxt_init_ring_struct(bp);
   4647
   4648	rc = bnxt_alloc_rx_rings(bp);
   4649	if (rc)
   4650		goto alloc_mem_err;
   4651
   4652	rc = bnxt_alloc_tx_rings(bp);
   4653	if (rc)
   4654		goto alloc_mem_err;
   4655
   4656	rc = bnxt_alloc_cp_rings(bp);
   4657	if (rc)
   4658		goto alloc_mem_err;
   4659
   4660	bp->vnic_info[0].flags |= BNXT_VNIC_RSS_FLAG | BNXT_VNIC_MCAST_FLAG |
   4661				  BNXT_VNIC_UCAST_FLAG;
   4662	rc = bnxt_alloc_vnic_attributes(bp);
   4663	if (rc)
   4664		goto alloc_mem_err;
   4665	return 0;
   4666
   4667alloc_mem_err:
   4668	bnxt_free_mem(bp, true);
   4669	return rc;
   4670}
   4671
   4672static void bnxt_disable_int(struct bnxt *bp)
   4673{
   4674	int i;
   4675
   4676	if (!bp->bnapi)
   4677		return;
   4678
   4679	for (i = 0; i < bp->cp_nr_rings; i++) {
   4680		struct bnxt_napi *bnapi = bp->bnapi[i];
   4681		struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
   4682		struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
   4683
   4684		if (ring->fw_ring_id != INVALID_HW_RING_ID)
   4685			bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons);
   4686	}
   4687}
   4688
   4689static int bnxt_cp_num_to_irq_num(struct bnxt *bp, int n)
   4690{
   4691	struct bnxt_napi *bnapi = bp->bnapi[n];
   4692	struct bnxt_cp_ring_info *cpr;
   4693
   4694	cpr = &bnapi->cp_ring;
   4695	return cpr->cp_ring_struct.map_idx;
   4696}
   4697
   4698static void bnxt_disable_int_sync(struct bnxt *bp)
   4699{
   4700	int i;
   4701
   4702	if (!bp->irq_tbl)
   4703		return;
   4704
   4705	atomic_inc(&bp->intr_sem);
   4706
   4707	bnxt_disable_int(bp);
   4708	for (i = 0; i < bp->cp_nr_rings; i++) {
   4709		int map_idx = bnxt_cp_num_to_irq_num(bp, i);
   4710
   4711		synchronize_irq(bp->irq_tbl[map_idx].vector);
   4712	}
   4713}
   4714
   4715static void bnxt_enable_int(struct bnxt *bp)
   4716{
   4717	int i;
   4718
   4719	atomic_set(&bp->intr_sem, 0);
   4720	for (i = 0; i < bp->cp_nr_rings; i++) {
   4721		struct bnxt_napi *bnapi = bp->bnapi[i];
   4722		struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
   4723
   4724		bnxt_db_nq_arm(bp, &cpr->cp_db, cpr->cp_raw_cons);
   4725	}
   4726}
   4727
   4728int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp, unsigned long *bmap, int bmap_size,
   4729			    bool async_only)
   4730{
   4731	DECLARE_BITMAP(async_events_bmap, 256);
   4732	u32 *events = (u32 *)async_events_bmap;
   4733	struct hwrm_func_drv_rgtr_output *resp;
   4734	struct hwrm_func_drv_rgtr_input *req;
   4735	u32 flags;
   4736	int rc, i;
   4737
   4738	rc = hwrm_req_init(bp, req, HWRM_FUNC_DRV_RGTR);
   4739	if (rc)
   4740		return rc;
   4741
   4742	req->enables = cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE |
   4743				   FUNC_DRV_RGTR_REQ_ENABLES_VER |
   4744				   FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD);
   4745
   4746	req->os_type = cpu_to_le16(FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX);
   4747	flags = FUNC_DRV_RGTR_REQ_FLAGS_16BIT_VER_MODE;
   4748	if (bp->fw_cap & BNXT_FW_CAP_HOT_RESET)
   4749		flags |= FUNC_DRV_RGTR_REQ_FLAGS_HOT_RESET_SUPPORT;
   4750	if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)
   4751		flags |= FUNC_DRV_RGTR_REQ_FLAGS_ERROR_RECOVERY_SUPPORT |
   4752			 FUNC_DRV_RGTR_REQ_FLAGS_MASTER_SUPPORT;
   4753	req->flags = cpu_to_le32(flags);
   4754	req->ver_maj_8b = DRV_VER_MAJ;
   4755	req->ver_min_8b = DRV_VER_MIN;
   4756	req->ver_upd_8b = DRV_VER_UPD;
   4757	req->ver_maj = cpu_to_le16(DRV_VER_MAJ);
   4758	req->ver_min = cpu_to_le16(DRV_VER_MIN);
   4759	req->ver_upd = cpu_to_le16(DRV_VER_UPD);
   4760
   4761	if (BNXT_PF(bp)) {
   4762		u32 data[8];
   4763		int i;
   4764
   4765		memset(data, 0, sizeof(data));
   4766		for (i = 0; i < ARRAY_SIZE(bnxt_vf_req_snif); i++) {
   4767			u16 cmd = bnxt_vf_req_snif[i];
   4768			unsigned int bit, idx;
   4769
   4770			idx = cmd / 32;
   4771			bit = cmd % 32;
   4772			data[idx] |= 1 << bit;
   4773		}
   4774
   4775		for (i = 0; i < 8; i++)
   4776			req->vf_req_fwd[i] = cpu_to_le32(data[i]);
   4777
   4778		req->enables |=
   4779			cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_VF_REQ_FWD);
   4780	}
   4781
   4782	if (bp->fw_cap & BNXT_FW_CAP_OVS_64BIT_HANDLE)
   4783		req->flags |= cpu_to_le32(
   4784			FUNC_DRV_RGTR_REQ_FLAGS_FLOW_HANDLE_64BIT_MODE);
   4785
   4786	memset(async_events_bmap, 0, sizeof(async_events_bmap));
   4787	for (i = 0; i < ARRAY_SIZE(bnxt_async_events_arr); i++) {
   4788		u16 event_id = bnxt_async_events_arr[i];
   4789
   4790		if (event_id == ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY &&
   4791		    !(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
   4792			continue;
   4793		__set_bit(bnxt_async_events_arr[i], async_events_bmap);
   4794	}
   4795	if (bmap && bmap_size) {
   4796		for (i = 0; i < bmap_size; i++) {
   4797			if (test_bit(i, bmap))
   4798				__set_bit(i, async_events_bmap);
   4799		}
   4800	}
   4801	for (i = 0; i < 8; i++)
   4802		req->async_event_fwd[i] |= cpu_to_le32(events[i]);
   4803
   4804	if (async_only)
   4805		req->enables =
   4806			cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD);
   4807
   4808	resp = hwrm_req_hold(bp, req);
   4809	rc = hwrm_req_send(bp, req);
   4810	if (!rc) {
   4811		set_bit(BNXT_STATE_DRV_REGISTERED, &bp->state);
   4812		if (resp->flags &
   4813		    cpu_to_le32(FUNC_DRV_RGTR_RESP_FLAGS_IF_CHANGE_SUPPORTED))
   4814			bp->fw_cap |= BNXT_FW_CAP_IF_CHANGE;
   4815	}
   4816	hwrm_req_drop(bp, req);
   4817	return rc;
   4818}
   4819
   4820int bnxt_hwrm_func_drv_unrgtr(struct bnxt *bp)
   4821{
   4822	struct hwrm_func_drv_unrgtr_input *req;
   4823	int rc;
   4824
   4825	if (!test_and_clear_bit(BNXT_STATE_DRV_REGISTERED, &bp->state))
   4826		return 0;
   4827
   4828	rc = hwrm_req_init(bp, req, HWRM_FUNC_DRV_UNRGTR);
   4829	if (rc)
   4830		return rc;
   4831	return hwrm_req_send(bp, req);
   4832}
   4833
   4834static int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, u8 tunnel_type)
   4835{
   4836	struct hwrm_tunnel_dst_port_free_input *req;
   4837	int rc;
   4838
   4839	if (tunnel_type == TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN &&
   4840	    bp->vxlan_fw_dst_port_id == INVALID_HW_RING_ID)
   4841		return 0;
   4842	if (tunnel_type == TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE &&
   4843	    bp->nge_fw_dst_port_id == INVALID_HW_RING_ID)
   4844		return 0;
   4845
   4846	rc = hwrm_req_init(bp, req, HWRM_TUNNEL_DST_PORT_FREE);
   4847	if (rc)
   4848		return rc;
   4849
   4850	req->tunnel_type = tunnel_type;
   4851
   4852	switch (tunnel_type) {
   4853	case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN:
   4854		req->tunnel_dst_port_id = cpu_to_le16(bp->vxlan_fw_dst_port_id);
   4855		bp->vxlan_port = 0;
   4856		bp->vxlan_fw_dst_port_id = INVALID_HW_RING_ID;
   4857		break;
   4858	case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE:
   4859		req->tunnel_dst_port_id = cpu_to_le16(bp->nge_fw_dst_port_id);
   4860		bp->nge_port = 0;
   4861		bp->nge_fw_dst_port_id = INVALID_HW_RING_ID;
   4862		break;
   4863	default:
   4864		break;
   4865	}
   4866
   4867	rc = hwrm_req_send(bp, req);
   4868	if (rc)
   4869		netdev_err(bp->dev, "hwrm_tunnel_dst_port_free failed. rc:%d\n",
   4870			   rc);
   4871	return rc;
   4872}
   4873
   4874static int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, __be16 port,
   4875					   u8 tunnel_type)
   4876{
   4877	struct hwrm_tunnel_dst_port_alloc_output *resp;
   4878	struct hwrm_tunnel_dst_port_alloc_input *req;
   4879	int rc;
   4880
   4881	rc = hwrm_req_init(bp, req, HWRM_TUNNEL_DST_PORT_ALLOC);
   4882	if (rc)
   4883		return rc;
   4884
   4885	req->tunnel_type = tunnel_type;
   4886	req->tunnel_dst_port_val = port;
   4887
   4888	resp = hwrm_req_hold(bp, req);
   4889	rc = hwrm_req_send(bp, req);
   4890	if (rc) {
   4891		netdev_err(bp->dev, "hwrm_tunnel_dst_port_alloc failed. rc:%d\n",
   4892			   rc);
   4893		goto err_out;
   4894	}
   4895
   4896	switch (tunnel_type) {
   4897	case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN:
   4898		bp->vxlan_port = port;
   4899		bp->vxlan_fw_dst_port_id =
   4900			le16_to_cpu(resp->tunnel_dst_port_id);
   4901		break;
   4902	case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE:
   4903		bp->nge_port = port;
   4904		bp->nge_fw_dst_port_id = le16_to_cpu(resp->tunnel_dst_port_id);
   4905		break;
   4906	default:
   4907		break;
   4908	}
   4909
   4910err_out:
   4911	hwrm_req_drop(bp, req);
   4912	return rc;
   4913}
   4914
   4915static int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, u16 vnic_id)
   4916{
   4917	struct hwrm_cfa_l2_set_rx_mask_input *req;
   4918	struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
   4919	int rc;
   4920
   4921	rc = hwrm_req_init(bp, req, HWRM_CFA_L2_SET_RX_MASK);
   4922	if (rc)
   4923		return rc;
   4924
   4925	req->vnic_id = cpu_to_le32(vnic->fw_vnic_id);
   4926	if (vnic->rx_mask & CFA_L2_SET_RX_MASK_REQ_MASK_MCAST) {
   4927		req->num_mc_entries = cpu_to_le32(vnic->mc_list_count);
   4928		req->mc_tbl_addr = cpu_to_le64(vnic->mc_list_mapping);
   4929	}
   4930	req->mask = cpu_to_le32(vnic->rx_mask);
   4931	return hwrm_req_send_silent(bp, req);
   4932}
   4933
   4934#ifdef CONFIG_RFS_ACCEL
   4935static int bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt *bp,
   4936					    struct bnxt_ntuple_filter *fltr)
   4937{
   4938	struct hwrm_cfa_ntuple_filter_free_input *req;
   4939	int rc;
   4940
   4941	rc = hwrm_req_init(bp, req, HWRM_CFA_NTUPLE_FILTER_FREE);
   4942	if (rc)
   4943		return rc;
   4944
   4945	req->ntuple_filter_id = fltr->filter_id;
   4946	return hwrm_req_send(bp, req);
   4947}
   4948
   4949#define BNXT_NTP_FLTR_FLAGS					\
   4950	(CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID |	\
   4951	 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE |	\
   4952	 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_MACADDR |	\
   4953	 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE |	\
   4954	 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR |	\
   4955	 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR_MASK |	\
   4956	 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR |	\
   4957	 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR_MASK |	\
   4958	 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL |	\
   4959	 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT |		\
   4960	 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT_MASK |	\
   4961	 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT |		\
   4962	 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT_MASK |	\
   4963	 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_ID)
   4964
   4965#define BNXT_NTP_TUNNEL_FLTR_FLAG				\
   4966		CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE
   4967
   4968static int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp,
   4969					     struct bnxt_ntuple_filter *fltr)
   4970{
   4971	struct hwrm_cfa_ntuple_filter_alloc_output *resp;
   4972	struct hwrm_cfa_ntuple_filter_alloc_input *req;
   4973	struct flow_keys *keys = &fltr->fkeys;
   4974	struct bnxt_vnic_info *vnic;
   4975	u32 flags = 0;
   4976	int rc;
   4977
   4978	rc = hwrm_req_init(bp, req, HWRM_CFA_NTUPLE_FILTER_ALLOC);
   4979	if (rc)
   4980		return rc;
   4981
   4982	req->l2_filter_id = bp->vnic_info[0].fw_l2_filter_id[fltr->l2_fltr_idx];
   4983
   4984	if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2) {
   4985		flags = CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DEST_RFS_RING_IDX;
   4986		req->dst_id = cpu_to_le16(fltr->rxq);
   4987	} else {
   4988		vnic = &bp->vnic_info[fltr->rxq + 1];
   4989		req->dst_id = cpu_to_le16(vnic->fw_vnic_id);
   4990	}
   4991	req->flags = cpu_to_le32(flags);
   4992	req->enables = cpu_to_le32(BNXT_NTP_FLTR_FLAGS);
   4993
   4994	req->ethertype = htons(ETH_P_IP);
   4995	memcpy(req->src_macaddr, fltr->src_mac_addr, ETH_ALEN);
   4996	req->ip_addr_type = CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4;
   4997	req->ip_protocol = keys->basic.ip_proto;
   4998
   4999	if (keys->basic.n_proto == htons(ETH_P_IPV6)) {
   5000		int i;
   5001
   5002		req->ethertype = htons(ETH_P_IPV6);
   5003		req->ip_addr_type =
   5004			CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6;
   5005		*(struct in6_addr *)&req->src_ipaddr[0] =
   5006			keys->addrs.v6addrs.src;
   5007		*(struct in6_addr *)&req->dst_ipaddr[0] =
   5008			keys->addrs.v6addrs.dst;
   5009		for (i = 0; i < 4; i++) {
   5010			req->src_ipaddr_mask[i] = cpu_to_be32(0xffffffff);
   5011			req->dst_ipaddr_mask[i] = cpu_to_be32(0xffffffff);
   5012		}
   5013	} else {
   5014		req->src_ipaddr[0] = keys->addrs.v4addrs.src;
   5015		req->src_ipaddr_mask[0] = cpu_to_be32(0xffffffff);
   5016		req->dst_ipaddr[0] = keys->addrs.v4addrs.dst;
   5017		req->dst_ipaddr_mask[0] = cpu_to_be32(0xffffffff);
   5018	}
   5019	if (keys->control.flags & FLOW_DIS_ENCAPSULATION) {
   5020		req->enables |= cpu_to_le32(BNXT_NTP_TUNNEL_FLTR_FLAG);
   5021		req->tunnel_type =
   5022			CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL;
   5023	}
   5024
   5025	req->src_port = keys->ports.src;
   5026	req->src_port_mask = cpu_to_be16(0xffff);
   5027	req->dst_port = keys->ports.dst;
   5028	req->dst_port_mask = cpu_to_be16(0xffff);
   5029
   5030	resp = hwrm_req_hold(bp, req);
   5031	rc = hwrm_req_send(bp, req);
   5032	if (!rc)
   5033		fltr->filter_id = resp->ntuple_filter_id;
   5034	hwrm_req_drop(bp, req);
   5035	return rc;
   5036}
   5037#endif
   5038
   5039static int bnxt_hwrm_set_vnic_filter(struct bnxt *bp, u16 vnic_id, u16 idx,
   5040				     const u8 *mac_addr)
   5041{
   5042	struct hwrm_cfa_l2_filter_alloc_output *resp;
   5043	struct hwrm_cfa_l2_filter_alloc_input *req;
   5044	int rc;
   5045
   5046	rc = hwrm_req_init(bp, req, HWRM_CFA_L2_FILTER_ALLOC);
   5047	if (rc)
   5048		return rc;
   5049
   5050	req->flags = cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX);
   5051	if (!BNXT_CHIP_TYPE_NITRO_A0(bp))
   5052		req->flags |=
   5053			cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST);
   5054	req->dst_id = cpu_to_le16(bp->vnic_info[vnic_id].fw_vnic_id);
   5055	req->enables =
   5056		cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR |
   5057			    CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_ID |
   5058			    CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR_MASK);
   5059	memcpy(req->l2_addr, mac_addr, ETH_ALEN);
   5060	req->l2_addr_mask[0] = 0xff;
   5061	req->l2_addr_mask[1] = 0xff;
   5062	req->l2_addr_mask[2] = 0xff;
   5063	req->l2_addr_mask[3] = 0xff;
   5064	req->l2_addr_mask[4] = 0xff;
   5065	req->l2_addr_mask[5] = 0xff;
   5066
   5067	resp = hwrm_req_hold(bp, req);
   5068	rc = hwrm_req_send(bp, req);
   5069	if (!rc)
   5070		bp->vnic_info[vnic_id].fw_l2_filter_id[idx] =
   5071							resp->l2_filter_id;
   5072	hwrm_req_drop(bp, req);
   5073	return rc;
   5074}
   5075
   5076static int bnxt_hwrm_clear_vnic_filter(struct bnxt *bp)
   5077{
   5078	struct hwrm_cfa_l2_filter_free_input *req;
   5079	u16 i, j, num_of_vnics = 1; /* only vnic 0 supported */
   5080	int rc;
   5081
   5082	/* Any associated ntuple filters will also be cleared by firmware. */
   5083	rc = hwrm_req_init(bp, req, HWRM_CFA_L2_FILTER_FREE);
   5084	if (rc)
   5085		return rc;
   5086	hwrm_req_hold(bp, req);
   5087	for (i = 0; i < num_of_vnics; i++) {
   5088		struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
   5089
   5090		for (j = 0; j < vnic->uc_filter_count; j++) {
   5091			req->l2_filter_id = vnic->fw_l2_filter_id[j];
   5092
   5093			rc = hwrm_req_send(bp, req);
   5094		}
   5095		vnic->uc_filter_count = 0;
   5096	}
   5097	hwrm_req_drop(bp, req);
   5098	return rc;
   5099}
   5100
   5101static int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, u16 vnic_id, u32 tpa_flags)
   5102{
   5103	struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
   5104	u16 max_aggs = VNIC_TPA_CFG_REQ_MAX_AGGS_MAX;
   5105	struct hwrm_vnic_tpa_cfg_input *req;
   5106	int rc;
   5107
   5108	if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
   5109		return 0;
   5110
   5111	rc = hwrm_req_init(bp, req, HWRM_VNIC_TPA_CFG);
   5112	if (rc)
   5113		return rc;
   5114
   5115	if (tpa_flags) {
   5116		u16 mss = bp->dev->mtu - 40;
   5117		u32 nsegs, n, segs = 0, flags;
   5118
   5119		flags = VNIC_TPA_CFG_REQ_FLAGS_TPA |
   5120			VNIC_TPA_CFG_REQ_FLAGS_ENCAP_TPA |
   5121			VNIC_TPA_CFG_REQ_FLAGS_RSC_WND_UPDATE |
   5122			VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_ECN |
   5123			VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_SAME_GRE_SEQ;
   5124		if (tpa_flags & BNXT_FLAG_GRO)
   5125			flags |= VNIC_TPA_CFG_REQ_FLAGS_GRO;
   5126
   5127		req->flags = cpu_to_le32(flags);
   5128
   5129		req->enables =
   5130			cpu_to_le32(VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_SEGS |
   5131				    VNIC_TPA_CFG_REQ_ENABLES_MAX_AGGS |
   5132				    VNIC_TPA_CFG_REQ_ENABLES_MIN_AGG_LEN);
   5133
   5134		/* Number of segs are log2 units, and first packet is not
   5135		 * included as part of this units.
   5136		 */
   5137		if (mss <= BNXT_RX_PAGE_SIZE) {
   5138			n = BNXT_RX_PAGE_SIZE / mss;
   5139			nsegs = (MAX_SKB_FRAGS - 1) * n;
   5140		} else {
   5141			n = mss / BNXT_RX_PAGE_SIZE;
   5142			if (mss & (BNXT_RX_PAGE_SIZE - 1))
   5143				n++;
   5144			nsegs = (MAX_SKB_FRAGS - n) / n;
   5145		}
   5146
   5147		if (bp->flags & BNXT_FLAG_CHIP_P5) {
   5148			segs = MAX_TPA_SEGS_P5;
   5149			max_aggs = bp->max_tpa;
   5150		} else {
   5151			segs = ilog2(nsegs);
   5152		}
   5153		req->max_agg_segs = cpu_to_le16(segs);
   5154		req->max_aggs = cpu_to_le16(max_aggs);
   5155
   5156		req->min_agg_len = cpu_to_le32(512);
   5157	}
   5158	req->vnic_id = cpu_to_le16(vnic->fw_vnic_id);
   5159
   5160	return hwrm_req_send(bp, req);
   5161}
   5162
   5163static u16 bnxt_cp_ring_from_grp(struct bnxt *bp, struct bnxt_ring_struct *ring)
   5164{
   5165	struct bnxt_ring_grp_info *grp_info;
   5166
   5167	grp_info = &bp->grp_info[ring->grp_idx];
   5168	return grp_info->cp_fw_ring_id;
   5169}
   5170
   5171static u16 bnxt_cp_ring_for_rx(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
   5172{
   5173	if (bp->flags & BNXT_FLAG_CHIP_P5) {
   5174		struct bnxt_napi *bnapi = rxr->bnapi;
   5175		struct bnxt_cp_ring_info *cpr;
   5176
   5177		cpr = bnapi->cp_ring.cp_ring_arr[BNXT_RX_HDL];
   5178		return cpr->cp_ring_struct.fw_ring_id;
   5179	} else {
   5180		return bnxt_cp_ring_from_grp(bp, &rxr->rx_ring_struct);
   5181	}
   5182}
   5183
   5184static u16 bnxt_cp_ring_for_tx(struct bnxt *bp, struct bnxt_tx_ring_info *txr)
   5185{
   5186	if (bp->flags & BNXT_FLAG_CHIP_P5) {
   5187		struct bnxt_napi *bnapi = txr->bnapi;
   5188		struct bnxt_cp_ring_info *cpr;
   5189
   5190		cpr = bnapi->cp_ring.cp_ring_arr[BNXT_TX_HDL];
   5191		return cpr->cp_ring_struct.fw_ring_id;
   5192	} else {
   5193		return bnxt_cp_ring_from_grp(bp, &txr->tx_ring_struct);
   5194	}
   5195}
   5196
   5197static int bnxt_alloc_rss_indir_tbl(struct bnxt *bp)
   5198{
   5199	int entries;
   5200
   5201	if (bp->flags & BNXT_FLAG_CHIP_P5)
   5202		entries = BNXT_MAX_RSS_TABLE_ENTRIES_P5;
   5203	else
   5204		entries = HW_HASH_INDEX_SIZE;
   5205
   5206	bp->rss_indir_tbl_entries = entries;
   5207	bp->rss_indir_tbl = kmalloc_array(entries, sizeof(*bp->rss_indir_tbl),
   5208					  GFP_KERNEL);
   5209	if (!bp->rss_indir_tbl)
   5210		return -ENOMEM;
   5211	return 0;
   5212}
   5213
   5214static void bnxt_set_dflt_rss_indir_tbl(struct bnxt *bp)
   5215{
   5216	u16 max_rings, max_entries, pad, i;
   5217
   5218	if (!bp->rx_nr_rings)
   5219		return;
   5220
   5221	if (BNXT_CHIP_TYPE_NITRO_A0(bp))
   5222		max_rings = bp->rx_nr_rings - 1;
   5223	else
   5224		max_rings = bp->rx_nr_rings;
   5225
   5226	max_entries = bnxt_get_rxfh_indir_size(bp->dev);
   5227
   5228	for (i = 0; i < max_entries; i++)
   5229		bp->rss_indir_tbl[i] = ethtool_rxfh_indir_default(i, max_rings);
   5230
   5231	pad = bp->rss_indir_tbl_entries - max_entries;
   5232	if (pad)
   5233		memset(&bp->rss_indir_tbl[i], 0, pad * sizeof(u16));
   5234}
   5235
   5236static u16 bnxt_get_max_rss_ring(struct bnxt *bp)
   5237{
   5238	u16 i, tbl_size, max_ring = 0;
   5239
   5240	if (!bp->rss_indir_tbl)
   5241		return 0;
   5242
   5243	tbl_size = bnxt_get_rxfh_indir_size(bp->dev);
   5244	for (i = 0; i < tbl_size; i++)
   5245		max_ring = max(max_ring, bp->rss_indir_tbl[i]);
   5246	return max_ring;
   5247}
   5248
   5249int bnxt_get_nr_rss_ctxs(struct bnxt *bp, int rx_rings)
   5250{
   5251	if (bp->flags & BNXT_FLAG_CHIP_P5)
   5252		return DIV_ROUND_UP(rx_rings, BNXT_RSS_TABLE_ENTRIES_P5);
   5253	if (BNXT_CHIP_TYPE_NITRO_A0(bp))
   5254		return 2;
   5255	return 1;
   5256}
   5257
   5258static void __bnxt_fill_hw_rss_tbl(struct bnxt *bp, struct bnxt_vnic_info *vnic)
   5259{
   5260	bool no_rss = !(vnic->flags & BNXT_VNIC_RSS_FLAG);
   5261	u16 i, j;
   5262
   5263	/* Fill the RSS indirection table with ring group ids */
   5264	for (i = 0, j = 0; i < HW_HASH_INDEX_SIZE; i++) {
   5265		if (!no_rss)
   5266			j = bp->rss_indir_tbl[i];
   5267		vnic->rss_table[i] = cpu_to_le16(vnic->fw_grp_ids[j]);
   5268	}
   5269}
   5270
   5271static void __bnxt_fill_hw_rss_tbl_p5(struct bnxt *bp,
   5272				      struct bnxt_vnic_info *vnic)
   5273{
   5274	__le16 *ring_tbl = vnic->rss_table;
   5275	struct bnxt_rx_ring_info *rxr;
   5276	u16 tbl_size, i;
   5277
   5278	tbl_size = bnxt_get_rxfh_indir_size(bp->dev);
   5279
   5280	for (i = 0; i < tbl_size; i++) {
   5281		u16 ring_id, j;
   5282
   5283		j = bp->rss_indir_tbl[i];
   5284		rxr = &bp->rx_ring[j];
   5285
   5286		ring_id = rxr->rx_ring_struct.fw_ring_id;
   5287		*ring_tbl++ = cpu_to_le16(ring_id);
   5288		ring_id = bnxt_cp_ring_for_rx(bp, rxr);
   5289		*ring_tbl++ = cpu_to_le16(ring_id);
   5290	}
   5291}
   5292
   5293static void bnxt_fill_hw_rss_tbl(struct bnxt *bp, struct bnxt_vnic_info *vnic)
   5294{
   5295	if (bp->flags & BNXT_FLAG_CHIP_P5)
   5296		__bnxt_fill_hw_rss_tbl_p5(bp, vnic);
   5297	else
   5298		__bnxt_fill_hw_rss_tbl(bp, vnic);
   5299}
   5300
   5301static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, u16 vnic_id, bool set_rss)
   5302{
   5303	struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
   5304	struct hwrm_vnic_rss_cfg_input *req;
   5305	int rc;
   5306
   5307	if ((bp->flags & BNXT_FLAG_CHIP_P5) ||
   5308	    vnic->fw_rss_cos_lb_ctx[0] == INVALID_HW_RING_ID)
   5309		return 0;
   5310
   5311	rc = hwrm_req_init(bp, req, HWRM_VNIC_RSS_CFG);
   5312	if (rc)
   5313		return rc;
   5314
   5315	if (set_rss) {
   5316		bnxt_fill_hw_rss_tbl(bp, vnic);
   5317		req->hash_type = cpu_to_le32(bp->rss_hash_cfg);
   5318		req->hash_mode_flags = VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT;
   5319		req->ring_grp_tbl_addr = cpu_to_le64(vnic->rss_table_dma_addr);
   5320		req->hash_key_tbl_addr =
   5321			cpu_to_le64(vnic->rss_hash_key_dma_addr);
   5322	}
   5323	req->rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
   5324	return hwrm_req_send(bp, req);
   5325}
   5326
   5327static int bnxt_hwrm_vnic_set_rss_p5(struct bnxt *bp, u16 vnic_id, bool set_rss)
   5328{
   5329	struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
   5330	struct hwrm_vnic_rss_cfg_input *req;
   5331	dma_addr_t ring_tbl_map;
   5332	u32 i, nr_ctxs;
   5333	int rc;
   5334
   5335	rc = hwrm_req_init(bp, req, HWRM_VNIC_RSS_CFG);
   5336	if (rc)
   5337		return rc;
   5338
   5339	req->vnic_id = cpu_to_le16(vnic->fw_vnic_id);
   5340	if (!set_rss)
   5341		return hwrm_req_send(bp, req);
   5342
   5343	bnxt_fill_hw_rss_tbl(bp, vnic);
   5344	req->hash_type = cpu_to_le32(bp->rss_hash_cfg);
   5345	req->hash_mode_flags = VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT;
   5346	req->hash_key_tbl_addr = cpu_to_le64(vnic->rss_hash_key_dma_addr);
   5347	ring_tbl_map = vnic->rss_table_dma_addr;
   5348	nr_ctxs = bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings);
   5349
   5350	hwrm_req_hold(bp, req);
   5351	for (i = 0; i < nr_ctxs; ring_tbl_map += BNXT_RSS_TABLE_SIZE_P5, i++) {
   5352		req->ring_grp_tbl_addr = cpu_to_le64(ring_tbl_map);
   5353		req->ring_table_pair_index = i;
   5354		req->rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[i]);
   5355		rc = hwrm_req_send(bp, req);
   5356		if (rc)
   5357			goto exit;
   5358	}
   5359
   5360exit:
   5361	hwrm_req_drop(bp, req);
   5362	return rc;
   5363}
   5364
   5365static int bnxt_hwrm_vnic_set_hds(struct bnxt *bp, u16 vnic_id)
   5366{
   5367	struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
   5368	struct hwrm_vnic_plcmodes_cfg_input *req;
   5369	int rc;
   5370
   5371	rc = hwrm_req_init(bp, req, HWRM_VNIC_PLCMODES_CFG);
   5372	if (rc)
   5373		return rc;
   5374
   5375	req->flags = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT);
   5376	req->enables = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID);
   5377
   5378	if (BNXT_RX_PAGE_MODE(bp) && !BNXT_RX_JUMBO_MODE(bp)) {
   5379		req->flags |= cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4 |
   5380					  VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6);
   5381		req->enables |=
   5382			cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID);
   5383	}
   5384	/* thresholds not implemented in firmware yet */
   5385	req->jumbo_thresh = cpu_to_le16(bp->rx_copy_thresh);
   5386	req->hds_threshold = cpu_to_le16(bp->rx_copy_thresh);
   5387	req->vnic_id = cpu_to_le32(vnic->fw_vnic_id);
   5388	return hwrm_req_send(bp, req);
   5389}
   5390
   5391static void bnxt_hwrm_vnic_ctx_free_one(struct bnxt *bp, u16 vnic_id,
   5392					u16 ctx_idx)
   5393{
   5394	struct hwrm_vnic_rss_cos_lb_ctx_free_input *req;
   5395
   5396	if (hwrm_req_init(bp, req, HWRM_VNIC_RSS_COS_LB_CTX_FREE))
   5397		return;
   5398
   5399	req->rss_cos_lb_ctx_id =
   5400		cpu_to_le16(bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx]);
   5401
   5402	hwrm_req_send(bp, req);
   5403	bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx] = INVALID_HW_RING_ID;
   5404}
   5405
   5406static void bnxt_hwrm_vnic_ctx_free(struct bnxt *bp)
   5407{
   5408	int i, j;
   5409
   5410	for (i = 0; i < bp->nr_vnics; i++) {
   5411		struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
   5412
   5413		for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++) {
   5414			if (vnic->fw_rss_cos_lb_ctx[j] != INVALID_HW_RING_ID)
   5415				bnxt_hwrm_vnic_ctx_free_one(bp, i, j);
   5416		}
   5417	}
   5418	bp->rsscos_nr_ctxs = 0;
   5419}
   5420
   5421static int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, u16 vnic_id, u16 ctx_idx)
   5422{
   5423	struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp;
   5424	struct hwrm_vnic_rss_cos_lb_ctx_alloc_input *req;
   5425	int rc;
   5426
   5427	rc = hwrm_req_init(bp, req, HWRM_VNIC_RSS_COS_LB_CTX_ALLOC);
   5428	if (rc)
   5429		return rc;
   5430
   5431	resp = hwrm_req_hold(bp, req);
   5432	rc = hwrm_req_send(bp, req);
   5433	if (!rc)
   5434		bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx] =
   5435			le16_to_cpu(resp->rss_cos_lb_ctx_id);
   5436	hwrm_req_drop(bp, req);
   5437
   5438	return rc;
   5439}
   5440
   5441static u32 bnxt_get_roce_vnic_mode(struct bnxt *bp)
   5442{
   5443	if (bp->flags & BNXT_FLAG_ROCE_MIRROR_CAP)
   5444		return VNIC_CFG_REQ_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_MODE;
   5445	return VNIC_CFG_REQ_FLAGS_ROCE_DUAL_VNIC_MODE;
   5446}
   5447
   5448int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id)
   5449{
   5450	struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
   5451	struct hwrm_vnic_cfg_input *req;
   5452	unsigned int ring = 0, grp_idx;
   5453	u16 def_vlan = 0;
   5454	int rc;
   5455
   5456	rc = hwrm_req_init(bp, req, HWRM_VNIC_CFG);
   5457	if (rc)
   5458		return rc;
   5459
   5460	if (bp->flags & BNXT_FLAG_CHIP_P5) {
   5461		struct bnxt_rx_ring_info *rxr = &bp->rx_ring[0];
   5462
   5463		req->default_rx_ring_id =
   5464			cpu_to_le16(rxr->rx_ring_struct.fw_ring_id);
   5465		req->default_cmpl_ring_id =
   5466			cpu_to_le16(bnxt_cp_ring_for_rx(bp, rxr));
   5467		req->enables =
   5468			cpu_to_le32(VNIC_CFG_REQ_ENABLES_DEFAULT_RX_RING_ID |
   5469				    VNIC_CFG_REQ_ENABLES_DEFAULT_CMPL_RING_ID);
   5470		goto vnic_mru;
   5471	}
   5472	req->enables = cpu_to_le32(VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP);
   5473	/* Only RSS support for now TBD: COS & LB */
   5474	if (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID) {
   5475		req->rss_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
   5476		req->enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE |
   5477					   VNIC_CFG_REQ_ENABLES_MRU);
   5478	} else if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG) {
   5479		req->rss_rule =
   5480			cpu_to_le16(bp->vnic_info[0].fw_rss_cos_lb_ctx[0]);
   5481		req->enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE |
   5482					   VNIC_CFG_REQ_ENABLES_MRU);
   5483		req->flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_RSS_DFLT_CR_MODE);
   5484	} else {
   5485		req->rss_rule = cpu_to_le16(0xffff);
   5486	}
   5487
   5488	if (BNXT_CHIP_TYPE_NITRO_A0(bp) &&
   5489	    (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID)) {
   5490		req->cos_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[1]);
   5491		req->enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_COS_RULE);
   5492	} else {
   5493		req->cos_rule = cpu_to_le16(0xffff);
   5494	}
   5495
   5496	if (vnic->flags & BNXT_VNIC_RSS_FLAG)
   5497		ring = 0;
   5498	else if (vnic->flags & BNXT_VNIC_RFS_FLAG)
   5499		ring = vnic_id - 1;
   5500	else if ((vnic_id == 1) && BNXT_CHIP_TYPE_NITRO_A0(bp))
   5501		ring = bp->rx_nr_rings - 1;
   5502
   5503	grp_idx = bp->rx_ring[ring].bnapi->index;
   5504	req->dflt_ring_grp = cpu_to_le16(bp->grp_info[grp_idx].fw_grp_id);
   5505	req->lb_rule = cpu_to_le16(0xffff);
   5506vnic_mru:
   5507	req->mru = cpu_to_le16(bp->dev->mtu + ETH_HLEN + VLAN_HLEN);
   5508
   5509	req->vnic_id = cpu_to_le16(vnic->fw_vnic_id);
   5510#ifdef CONFIG_BNXT_SRIOV
   5511	if (BNXT_VF(bp))
   5512		def_vlan = bp->vf.vlan;
   5513#endif
   5514	if ((bp->flags & BNXT_FLAG_STRIP_VLAN) || def_vlan)
   5515		req->flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE);
   5516	if (!vnic_id && bnxt_ulp_registered(bp->edev, BNXT_ROCE_ULP))
   5517		req->flags |= cpu_to_le32(bnxt_get_roce_vnic_mode(bp));
   5518
   5519	return hwrm_req_send(bp, req);
   5520}
   5521
   5522static void bnxt_hwrm_vnic_free_one(struct bnxt *bp, u16 vnic_id)
   5523{
   5524	if (bp->vnic_info[vnic_id].fw_vnic_id != INVALID_HW_RING_ID) {
   5525		struct hwrm_vnic_free_input *req;
   5526
   5527		if (hwrm_req_init(bp, req, HWRM_VNIC_FREE))
   5528			return;
   5529
   5530		req->vnic_id =
   5531			cpu_to_le32(bp->vnic_info[vnic_id].fw_vnic_id);
   5532
   5533		hwrm_req_send(bp, req);
   5534		bp->vnic_info[vnic_id].fw_vnic_id = INVALID_HW_RING_ID;
   5535	}
   5536}
   5537
   5538static void bnxt_hwrm_vnic_free(struct bnxt *bp)
   5539{
   5540	u16 i;
   5541
   5542	for (i = 0; i < bp->nr_vnics; i++)
   5543		bnxt_hwrm_vnic_free_one(bp, i);
   5544}
   5545
   5546static int bnxt_hwrm_vnic_alloc(struct bnxt *bp, u16 vnic_id,
   5547				unsigned int start_rx_ring_idx,
   5548				unsigned int nr_rings)
   5549{
   5550	unsigned int i, j, grp_idx, end_idx = start_rx_ring_idx + nr_rings;
   5551	struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
   5552	struct hwrm_vnic_alloc_output *resp;
   5553	struct hwrm_vnic_alloc_input *req;
   5554	int rc;
   5555
   5556	rc = hwrm_req_init(bp, req, HWRM_VNIC_ALLOC);
   5557	if (rc)
   5558		return rc;
   5559
   5560	if (bp->flags & BNXT_FLAG_CHIP_P5)
   5561		goto vnic_no_ring_grps;
   5562
   5563	/* map ring groups to this vnic */
   5564	for (i = start_rx_ring_idx, j = 0; i < end_idx; i++, j++) {
   5565		grp_idx = bp->rx_ring[i].bnapi->index;
   5566		if (bp->grp_info[grp_idx].fw_grp_id == INVALID_HW_RING_ID) {
   5567			netdev_err(bp->dev, "Not enough ring groups avail:%x req:%x\n",
   5568				   j, nr_rings);
   5569			break;
   5570		}
   5571		vnic->fw_grp_ids[j] = bp->grp_info[grp_idx].fw_grp_id;
   5572	}
   5573
   5574vnic_no_ring_grps:
   5575	for (i = 0; i < BNXT_MAX_CTX_PER_VNIC; i++)
   5576		vnic->fw_rss_cos_lb_ctx[i] = INVALID_HW_RING_ID;
   5577	if (vnic_id == 0)
   5578		req->flags = cpu_to_le32(VNIC_ALLOC_REQ_FLAGS_DEFAULT);
   5579
   5580	resp = hwrm_req_hold(bp, req);
   5581	rc = hwrm_req_send(bp, req);
   5582	if (!rc)
   5583		vnic->fw_vnic_id = le32_to_cpu(resp->vnic_id);
   5584	hwrm_req_drop(bp, req);
   5585	return rc;
   5586}
   5587
   5588static int bnxt_hwrm_vnic_qcaps(struct bnxt *bp)
   5589{
   5590	struct hwrm_vnic_qcaps_output *resp;
   5591	struct hwrm_vnic_qcaps_input *req;
   5592	int rc;
   5593
   5594	bp->hw_ring_stats_size = sizeof(struct ctx_hw_stats);
   5595	bp->flags &= ~(BNXT_FLAG_NEW_RSS_CAP | BNXT_FLAG_ROCE_MIRROR_CAP);
   5596	if (bp->hwrm_spec_code < 0x10600)
   5597		return 0;
   5598
   5599	rc = hwrm_req_init(bp, req, HWRM_VNIC_QCAPS);
   5600	if (rc)
   5601		return rc;
   5602
   5603	resp = hwrm_req_hold(bp, req);
   5604	rc = hwrm_req_send(bp, req);
   5605	if (!rc) {
   5606		u32 flags = le32_to_cpu(resp->flags);
   5607
   5608		if (!(bp->flags & BNXT_FLAG_CHIP_P5) &&
   5609		    (flags & VNIC_QCAPS_RESP_FLAGS_RSS_DFLT_CR_CAP))
   5610			bp->flags |= BNXT_FLAG_NEW_RSS_CAP;
   5611		if (flags &
   5612		    VNIC_QCAPS_RESP_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_CAP)
   5613			bp->flags |= BNXT_FLAG_ROCE_MIRROR_CAP;
   5614
   5615		/* Older P5 fw before EXT_HW_STATS support did not set
   5616		 * VLAN_STRIP_CAP properly.
   5617		 */
   5618		if ((flags & VNIC_QCAPS_RESP_FLAGS_VLAN_STRIP_CAP) ||
   5619		    (BNXT_CHIP_P5_THOR(bp) &&
   5620		     !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED)))
   5621			bp->fw_cap |= BNXT_FW_CAP_VLAN_RX_STRIP;
   5622		bp->max_tpa_v2 = le16_to_cpu(resp->max_aggs_supported);
   5623		if (bp->max_tpa_v2) {
   5624			if (BNXT_CHIP_P5_THOR(bp))
   5625				bp->hw_ring_stats_size = BNXT_RING_STATS_SIZE_P5;
   5626			else
   5627				bp->hw_ring_stats_size = BNXT_RING_STATS_SIZE_P5_SR2;
   5628		}
   5629	}
   5630	hwrm_req_drop(bp, req);
   5631	return rc;
   5632}
   5633
   5634static int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp)
   5635{
   5636	struct hwrm_ring_grp_alloc_output *resp;
   5637	struct hwrm_ring_grp_alloc_input *req;
   5638	int rc;
   5639	u16 i;
   5640
   5641	if (bp->flags & BNXT_FLAG_CHIP_P5)
   5642		return 0;
   5643
   5644	rc = hwrm_req_init(bp, req, HWRM_RING_GRP_ALLOC);
   5645	if (rc)
   5646		return rc;
   5647
   5648	resp = hwrm_req_hold(bp, req);
   5649	for (i = 0; i < bp->rx_nr_rings; i++) {
   5650		unsigned int grp_idx = bp->rx_ring[i].bnapi->index;
   5651
   5652		req->cr = cpu_to_le16(bp->grp_info[grp_idx].cp_fw_ring_id);
   5653		req->rr = cpu_to_le16(bp->grp_info[grp_idx].rx_fw_ring_id);
   5654		req->ar = cpu_to_le16(bp->grp_info[grp_idx].agg_fw_ring_id);
   5655		req->sc = cpu_to_le16(bp->grp_info[grp_idx].fw_stats_ctx);
   5656
   5657		rc = hwrm_req_send(bp, req);
   5658
   5659		if (rc)
   5660			break;
   5661
   5662		bp->grp_info[grp_idx].fw_grp_id =
   5663			le32_to_cpu(resp->ring_group_id);
   5664	}
   5665	hwrm_req_drop(bp, req);
   5666	return rc;
   5667}
   5668
   5669static void bnxt_hwrm_ring_grp_free(struct bnxt *bp)
   5670{
   5671	struct hwrm_ring_grp_free_input *req;
   5672	u16 i;
   5673
   5674	if (!bp->grp_info || (bp->flags & BNXT_FLAG_CHIP_P5))
   5675		return;
   5676
   5677	if (hwrm_req_init(bp, req, HWRM_RING_GRP_FREE))
   5678		return;
   5679
   5680	hwrm_req_hold(bp, req);
   5681	for (i = 0; i < bp->cp_nr_rings; i++) {
   5682		if (bp->grp_info[i].fw_grp_id == INVALID_HW_RING_ID)
   5683			continue;
   5684		req->ring_group_id =
   5685			cpu_to_le32(bp->grp_info[i].fw_grp_id);
   5686
   5687		hwrm_req_send(bp, req);
   5688		bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID;
   5689	}
   5690	hwrm_req_drop(bp, req);
   5691}
   5692
   5693static int hwrm_ring_alloc_send_msg(struct bnxt *bp,
   5694				    struct bnxt_ring_struct *ring,
   5695				    u32 ring_type, u32 map_index)
   5696{
   5697	struct hwrm_ring_alloc_output *resp;
   5698	struct hwrm_ring_alloc_input *req;
   5699	struct bnxt_ring_mem_info *rmem = &ring->ring_mem;
   5700	struct bnxt_ring_grp_info *grp_info;
   5701	int rc, err = 0;
   5702	u16 ring_id;
   5703
   5704	rc = hwrm_req_init(bp, req, HWRM_RING_ALLOC);
   5705	if (rc)
   5706		goto exit;
   5707
   5708	req->enables = 0;
   5709	if (rmem->nr_pages > 1) {
   5710		req->page_tbl_addr = cpu_to_le64(rmem->pg_tbl_map);
   5711		/* Page size is in log2 units */
   5712		req->page_size = BNXT_PAGE_SHIFT;
   5713		req->page_tbl_depth = 1;
   5714	} else {
   5715		req->page_tbl_addr =  cpu_to_le64(rmem->dma_arr[0]);
   5716	}
   5717	req->fbo = 0;
   5718	/* Association of ring index with doorbell index and MSIX number */
   5719	req->logical_id = cpu_to_le16(map_index);
   5720
   5721	switch (ring_type) {
   5722	case HWRM_RING_ALLOC_TX: {
   5723		struct bnxt_tx_ring_info *txr;
   5724
   5725		txr = container_of(ring, struct bnxt_tx_ring_info,
   5726				   tx_ring_struct);
   5727		req->ring_type = RING_ALLOC_REQ_RING_TYPE_TX;
   5728		/* Association of transmit ring with completion ring */
   5729		grp_info = &bp->grp_info[ring->grp_idx];
   5730		req->cmpl_ring_id = cpu_to_le16(bnxt_cp_ring_for_tx(bp, txr));
   5731		req->length = cpu_to_le32(bp->tx_ring_mask + 1);
   5732		req->stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
   5733		req->queue_id = cpu_to_le16(ring->queue_id);
   5734		break;
   5735	}
   5736	case HWRM_RING_ALLOC_RX:
   5737		req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
   5738		req->length = cpu_to_le32(bp->rx_ring_mask + 1);
   5739		if (bp->flags & BNXT_FLAG_CHIP_P5) {
   5740			u16 flags = 0;
   5741
   5742			/* Association of rx ring with stats context */
   5743			grp_info = &bp->grp_info[ring->grp_idx];
   5744			req->rx_buf_size = cpu_to_le16(bp->rx_buf_use_size);
   5745			req->stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
   5746			req->enables |= cpu_to_le32(
   5747				RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID);
   5748			if (NET_IP_ALIGN == 2)
   5749				flags = RING_ALLOC_REQ_FLAGS_RX_SOP_PAD;
   5750			req->flags = cpu_to_le16(flags);
   5751		}
   5752		break;
   5753	case HWRM_RING_ALLOC_AGG:
   5754		if (bp->flags & BNXT_FLAG_CHIP_P5) {
   5755			req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX_AGG;
   5756			/* Association of agg ring with rx ring */
   5757			grp_info = &bp->grp_info[ring->grp_idx];
   5758			req->rx_ring_id = cpu_to_le16(grp_info->rx_fw_ring_id);
   5759			req->rx_buf_size = cpu_to_le16(BNXT_RX_PAGE_SIZE);
   5760			req->stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
   5761			req->enables |= cpu_to_le32(
   5762				RING_ALLOC_REQ_ENABLES_RX_RING_ID_VALID |
   5763				RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID);
   5764		} else {
   5765			req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
   5766		}
   5767		req->length = cpu_to_le32(bp->rx_agg_ring_mask + 1);
   5768		break;
   5769	case HWRM_RING_ALLOC_CMPL:
   5770		req->ring_type = RING_ALLOC_REQ_RING_TYPE_L2_CMPL;
   5771		req->length = cpu_to_le32(bp->cp_ring_mask + 1);
   5772		if (bp->flags & BNXT_FLAG_CHIP_P5) {
   5773			/* Association of cp ring with nq */
   5774			grp_info = &bp->grp_info[map_index];
   5775			req->nq_ring_id = cpu_to_le16(grp_info->cp_fw_ring_id);
   5776			req->cq_handle = cpu_to_le64(ring->handle);
   5777			req->enables |= cpu_to_le32(
   5778				RING_ALLOC_REQ_ENABLES_NQ_RING_ID_VALID);
   5779		} else if (bp->flags & BNXT_FLAG_USING_MSIX) {
   5780			req->int_mode = RING_ALLOC_REQ_INT_MODE_MSIX;
   5781		}
   5782		break;
   5783	case HWRM_RING_ALLOC_NQ:
   5784		req->ring_type = RING_ALLOC_REQ_RING_TYPE_NQ;
   5785		req->length = cpu_to_le32(bp->cp_ring_mask + 1);
   5786		if (bp->flags & BNXT_FLAG_USING_MSIX)
   5787			req->int_mode = RING_ALLOC_REQ_INT_MODE_MSIX;
   5788		break;
   5789	default:
   5790		netdev_err(bp->dev, "hwrm alloc invalid ring type %d\n",
   5791			   ring_type);
   5792		return -1;
   5793	}
   5794
   5795	resp = hwrm_req_hold(bp, req);
   5796	rc = hwrm_req_send(bp, req);
   5797	err = le16_to_cpu(resp->error_code);
   5798	ring_id = le16_to_cpu(resp->ring_id);
   5799	hwrm_req_drop(bp, req);
   5800
   5801exit:
   5802	if (rc || err) {
   5803		netdev_err(bp->dev, "hwrm_ring_alloc type %d failed. rc:%x err:%x\n",
   5804			   ring_type, rc, err);
   5805		return -EIO;
   5806	}
   5807	ring->fw_ring_id = ring_id;
   5808	return rc;
   5809}
   5810
   5811static int bnxt_hwrm_set_async_event_cr(struct bnxt *bp, int idx)
   5812{
   5813	int rc;
   5814
   5815	if (BNXT_PF(bp)) {
   5816		struct hwrm_func_cfg_input *req;
   5817
   5818		rc = hwrm_req_init(bp, req, HWRM_FUNC_CFG);
   5819		if (rc)
   5820			return rc;
   5821
   5822		req->fid = cpu_to_le16(0xffff);
   5823		req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_ASYNC_EVENT_CR);
   5824		req->async_event_cr = cpu_to_le16(idx);
   5825		return hwrm_req_send(bp, req);
   5826	} else {
   5827		struct hwrm_func_vf_cfg_input *req;
   5828
   5829		rc = hwrm_req_init(bp, req, HWRM_FUNC_VF_CFG);
   5830		if (rc)
   5831			return rc;
   5832
   5833		req->enables =
   5834			cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_ASYNC_EVENT_CR);
   5835		req->async_event_cr = cpu_to_le16(idx);
   5836		return hwrm_req_send(bp, req);
   5837	}
   5838}
   5839
   5840static void bnxt_set_db(struct bnxt *bp, struct bnxt_db_info *db, u32 ring_type,
   5841			u32 map_idx, u32 xid)
   5842{
   5843	if (bp->flags & BNXT_FLAG_CHIP_P5) {
   5844		if (BNXT_PF(bp))
   5845			db->doorbell = bp->bar1 + DB_PF_OFFSET_P5;
   5846		else
   5847			db->doorbell = bp->bar1 + DB_VF_OFFSET_P5;
   5848		switch (ring_type) {
   5849		case HWRM_RING_ALLOC_TX:
   5850			db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SQ;
   5851			break;
   5852		case HWRM_RING_ALLOC_RX:
   5853		case HWRM_RING_ALLOC_AGG:
   5854			db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SRQ;
   5855			break;
   5856		case HWRM_RING_ALLOC_CMPL:
   5857			db->db_key64 = DBR_PATH_L2;
   5858			break;
   5859		case HWRM_RING_ALLOC_NQ:
   5860			db->db_key64 = DBR_PATH_L2;
   5861			break;
   5862		}
   5863		db->db_key64 |= (u64)xid << DBR_XID_SFT;
   5864	} else {
   5865		db->doorbell = bp->bar1 + map_idx * 0x80;
   5866		switch (ring_type) {
   5867		case HWRM_RING_ALLOC_TX:
   5868			db->db_key32 = DB_KEY_TX;
   5869			break;
   5870		case HWRM_RING_ALLOC_RX:
   5871		case HWRM_RING_ALLOC_AGG:
   5872			db->db_key32 = DB_KEY_RX;
   5873			break;
   5874		case HWRM_RING_ALLOC_CMPL:
   5875			db->db_key32 = DB_KEY_CP;
   5876			break;
   5877		}
   5878	}
   5879}
   5880
   5881static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
   5882{
   5883	bool agg_rings = !!(bp->flags & BNXT_FLAG_AGG_RINGS);
   5884	int i, rc = 0;
   5885	u32 type;
   5886
   5887	if (bp->flags & BNXT_FLAG_CHIP_P5)
   5888		type = HWRM_RING_ALLOC_NQ;
   5889	else
   5890		type = HWRM_RING_ALLOC_CMPL;
   5891	for (i = 0; i < bp->cp_nr_rings; i++) {
   5892		struct bnxt_napi *bnapi = bp->bnapi[i];
   5893		struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
   5894		struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
   5895		u32 map_idx = ring->map_idx;
   5896		unsigned int vector;
   5897
   5898		vector = bp->irq_tbl[map_idx].vector;
   5899		disable_irq_nosync(vector);
   5900		rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
   5901		if (rc) {
   5902			enable_irq(vector);
   5903			goto err_out;
   5904		}
   5905		bnxt_set_db(bp, &cpr->cp_db, type, map_idx, ring->fw_ring_id);
   5906		bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons);
   5907		enable_irq(vector);
   5908		bp->grp_info[i].cp_fw_ring_id = ring->fw_ring_id;
   5909
   5910		if (!i) {
   5911			rc = bnxt_hwrm_set_async_event_cr(bp, ring->fw_ring_id);
   5912			if (rc)
   5913				netdev_warn(bp->dev, "Failed to set async event completion ring.\n");
   5914		}
   5915	}
   5916
   5917	type = HWRM_RING_ALLOC_TX;
   5918	for (i = 0; i < bp->tx_nr_rings; i++) {
   5919		struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
   5920		struct bnxt_ring_struct *ring;
   5921		u32 map_idx;
   5922
   5923		if (bp->flags & BNXT_FLAG_CHIP_P5) {
   5924			struct bnxt_napi *bnapi = txr->bnapi;
   5925			struct bnxt_cp_ring_info *cpr, *cpr2;
   5926			u32 type2 = HWRM_RING_ALLOC_CMPL;
   5927
   5928			cpr = &bnapi->cp_ring;
   5929			cpr2 = cpr->cp_ring_arr[BNXT_TX_HDL];
   5930			ring = &cpr2->cp_ring_struct;
   5931			ring->handle = BNXT_TX_HDL;
   5932			map_idx = bnapi->index;
   5933			rc = hwrm_ring_alloc_send_msg(bp, ring, type2, map_idx);
   5934			if (rc)
   5935				goto err_out;
   5936			bnxt_set_db(bp, &cpr2->cp_db, type2, map_idx,
   5937				    ring->fw_ring_id);
   5938			bnxt_db_cq(bp, &cpr2->cp_db, cpr2->cp_raw_cons);
   5939		}
   5940		ring = &txr->tx_ring_struct;
   5941		map_idx = i;
   5942		rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
   5943		if (rc)
   5944			goto err_out;
   5945		bnxt_set_db(bp, &txr->tx_db, type, map_idx, ring->fw_ring_id);
   5946	}
   5947
   5948	type = HWRM_RING_ALLOC_RX;
   5949	for (i = 0; i < bp->rx_nr_rings; i++) {
   5950		struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
   5951		struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
   5952		struct bnxt_napi *bnapi = rxr->bnapi;
   5953		u32 map_idx = bnapi->index;
   5954
   5955		rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
   5956		if (rc)
   5957			goto err_out;
   5958		bnxt_set_db(bp, &rxr->rx_db, type, map_idx, ring->fw_ring_id);
   5959		/* If we have agg rings, post agg buffers first. */
   5960		if (!agg_rings)
   5961			bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
   5962		bp->grp_info[map_idx].rx_fw_ring_id = ring->fw_ring_id;
   5963		if (bp->flags & BNXT_FLAG_CHIP_P5) {
   5964			struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
   5965			u32 type2 = HWRM_RING_ALLOC_CMPL;
   5966			struct bnxt_cp_ring_info *cpr2;
   5967
   5968			cpr2 = cpr->cp_ring_arr[BNXT_RX_HDL];
   5969			ring = &cpr2->cp_ring_struct;
   5970			ring->handle = BNXT_RX_HDL;
   5971			rc = hwrm_ring_alloc_send_msg(bp, ring, type2, map_idx);
   5972			if (rc)
   5973				goto err_out;
   5974			bnxt_set_db(bp, &cpr2->cp_db, type2, map_idx,
   5975				    ring->fw_ring_id);
   5976			bnxt_db_cq(bp, &cpr2->cp_db, cpr2->cp_raw_cons);
   5977		}
   5978	}
   5979
   5980	if (agg_rings) {
   5981		type = HWRM_RING_ALLOC_AGG;
   5982		for (i = 0; i < bp->rx_nr_rings; i++) {
   5983			struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
   5984			struct bnxt_ring_struct *ring =
   5985						&rxr->rx_agg_ring_struct;
   5986			u32 grp_idx = ring->grp_idx;
   5987			u32 map_idx = grp_idx + bp->rx_nr_rings;
   5988
   5989			rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
   5990			if (rc)
   5991				goto err_out;
   5992
   5993			bnxt_set_db(bp, &rxr->rx_agg_db, type, map_idx,
   5994				    ring->fw_ring_id);
   5995			bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
   5996			bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
   5997			bp->grp_info[grp_idx].agg_fw_ring_id = ring->fw_ring_id;
   5998		}
   5999	}
   6000err_out:
   6001	return rc;
   6002}
   6003
   6004static int hwrm_ring_free_send_msg(struct bnxt *bp,
   6005				   struct bnxt_ring_struct *ring,
   6006				   u32 ring_type, int cmpl_ring_id)
   6007{
   6008	struct hwrm_ring_free_output *resp;
   6009	struct hwrm_ring_free_input *req;
   6010	u16 error_code = 0;
   6011	int rc;
   6012
   6013	if (BNXT_NO_FW_ACCESS(bp))
   6014		return 0;
   6015
   6016	rc = hwrm_req_init(bp, req, HWRM_RING_FREE);
   6017	if (rc)
   6018		goto exit;
   6019
   6020	req->cmpl_ring = cpu_to_le16(cmpl_ring_id);
   6021	req->ring_type = ring_type;
   6022	req->ring_id = cpu_to_le16(ring->fw_ring_id);
   6023
   6024	resp = hwrm_req_hold(bp, req);
   6025	rc = hwrm_req_send(bp, req);
   6026	error_code = le16_to_cpu(resp->error_code);
   6027	hwrm_req_drop(bp, req);
   6028exit:
   6029	if (rc || error_code) {
   6030		netdev_err(bp->dev, "hwrm_ring_free type %d failed. rc:%x err:%x\n",
   6031			   ring_type, rc, error_code);
   6032		return -EIO;
   6033	}
   6034	return 0;
   6035}
   6036
   6037static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
   6038{
   6039	u32 type;
   6040	int i;
   6041
   6042	if (!bp->bnapi)
   6043		return;
   6044
   6045	for (i = 0; i < bp->tx_nr_rings; i++) {
   6046		struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
   6047		struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
   6048
   6049		if (ring->fw_ring_id != INVALID_HW_RING_ID) {
   6050			u32 cmpl_ring_id = bnxt_cp_ring_for_tx(bp, txr);
   6051
   6052			hwrm_ring_free_send_msg(bp, ring,
   6053						RING_FREE_REQ_RING_TYPE_TX,
   6054						close_path ? cmpl_ring_id :
   6055						INVALID_HW_RING_ID);
   6056			ring->fw_ring_id = INVALID_HW_RING_ID;
   6057		}
   6058	}
   6059
   6060	for (i = 0; i < bp->rx_nr_rings; i++) {
   6061		struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
   6062		struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
   6063		u32 grp_idx = rxr->bnapi->index;
   6064
   6065		if (ring->fw_ring_id != INVALID_HW_RING_ID) {
   6066			u32 cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr);
   6067
   6068			hwrm_ring_free_send_msg(bp, ring,
   6069						RING_FREE_REQ_RING_TYPE_RX,
   6070						close_path ? cmpl_ring_id :
   6071						INVALID_HW_RING_ID);
   6072			ring->fw_ring_id = INVALID_HW_RING_ID;
   6073			bp->grp_info[grp_idx].rx_fw_ring_id =
   6074				INVALID_HW_RING_ID;
   6075		}
   6076	}
   6077
   6078	if (bp->flags & BNXT_FLAG_CHIP_P5)
   6079		type = RING_FREE_REQ_RING_TYPE_RX_AGG;
   6080	else
   6081		type = RING_FREE_REQ_RING_TYPE_RX;
   6082	for (i = 0; i < bp->rx_nr_rings; i++) {
   6083		struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
   6084		struct bnxt_ring_struct *ring = &rxr->rx_agg_ring_struct;
   6085		u32 grp_idx = rxr->bnapi->index;
   6086
   6087		if (ring->fw_ring_id != INVALID_HW_RING_ID) {
   6088			u32 cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr);
   6089
   6090			hwrm_ring_free_send_msg(bp, ring, type,
   6091						close_path ? cmpl_ring_id :
   6092						INVALID_HW_RING_ID);
   6093			ring->fw_ring_id = INVALID_HW_RING_ID;
   6094			bp->grp_info[grp_idx].agg_fw_ring_id =
   6095				INVALID_HW_RING_ID;
   6096		}
   6097	}
   6098
   6099	/* The completion rings are about to be freed.  After that the
   6100	 * IRQ doorbell will not work anymore.  So we need to disable
   6101	 * IRQ here.
   6102	 */
   6103	bnxt_disable_int_sync(bp);
   6104
   6105	if (bp->flags & BNXT_FLAG_CHIP_P5)
   6106		type = RING_FREE_REQ_RING_TYPE_NQ;
   6107	else
   6108		type = RING_FREE_REQ_RING_TYPE_L2_CMPL;
   6109	for (i = 0; i < bp->cp_nr_rings; i++) {
   6110		struct bnxt_napi *bnapi = bp->bnapi[i];
   6111		struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
   6112		struct bnxt_ring_struct *ring;
   6113		int j;
   6114
   6115		for (j = 0; j < 2; j++) {
   6116			struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
   6117
   6118			if (cpr2) {
   6119				ring = &cpr2->cp_ring_struct;
   6120				if (ring->fw_ring_id == INVALID_HW_RING_ID)
   6121					continue;
   6122				hwrm_ring_free_send_msg(bp, ring,
   6123					RING_FREE_REQ_RING_TYPE_L2_CMPL,
   6124					INVALID_HW_RING_ID);
   6125				ring->fw_ring_id = INVALID_HW_RING_ID;
   6126			}
   6127		}
   6128		ring = &cpr->cp_ring_struct;
   6129		if (ring->fw_ring_id != INVALID_HW_RING_ID) {
   6130			hwrm_ring_free_send_msg(bp, ring, type,
   6131						INVALID_HW_RING_ID);
   6132			ring->fw_ring_id = INVALID_HW_RING_ID;
   6133			bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
   6134		}
   6135	}
   6136}
   6137
   6138static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
   6139			   bool shared);
   6140
   6141static int bnxt_hwrm_get_rings(struct bnxt *bp)
   6142{
   6143	struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
   6144	struct hwrm_func_qcfg_output *resp;
   6145	struct hwrm_func_qcfg_input *req;
   6146	int rc;
   6147
   6148	if (bp->hwrm_spec_code < 0x10601)
   6149		return 0;
   6150
   6151	rc = hwrm_req_init(bp, req, HWRM_FUNC_QCFG);
   6152	if (rc)
   6153		return rc;
   6154
   6155	req->fid = cpu_to_le16(0xffff);
   6156	resp = hwrm_req_hold(bp, req);
   6157	rc = hwrm_req_send(bp, req);
   6158	if (rc) {
   6159		hwrm_req_drop(bp, req);
   6160		return rc;
   6161	}
   6162
   6163	hw_resc->resv_tx_rings = le16_to_cpu(resp->alloc_tx_rings);
   6164	if (BNXT_NEW_RM(bp)) {
   6165		u16 cp, stats;
   6166
   6167		hw_resc->resv_rx_rings = le16_to_cpu(resp->alloc_rx_rings);
   6168		hw_resc->resv_hw_ring_grps =
   6169			le32_to_cpu(resp->alloc_hw_ring_grps);
   6170		hw_resc->resv_vnics = le16_to_cpu(resp->alloc_vnics);
   6171		cp = le16_to_cpu(resp->alloc_cmpl_rings);
   6172		stats = le16_to_cpu(resp->alloc_stat_ctx);
   6173		hw_resc->resv_irqs = cp;
   6174		if (bp->flags & BNXT_FLAG_CHIP_P5) {
   6175			int rx = hw_resc->resv_rx_rings;
   6176			int tx = hw_resc->resv_tx_rings;
   6177
   6178			if (bp->flags & BNXT_FLAG_AGG_RINGS)
   6179				rx >>= 1;
   6180			if (cp < (rx + tx)) {
   6181				bnxt_trim_rings(bp, &rx, &tx, cp, false);
   6182				if (bp->flags & BNXT_FLAG_AGG_RINGS)
   6183					rx <<= 1;
   6184				hw_resc->resv_rx_rings = rx;
   6185				hw_resc->resv_tx_rings = tx;
   6186			}
   6187			hw_resc->resv_irqs = le16_to_cpu(resp->alloc_msix);
   6188			hw_resc->resv_hw_ring_grps = rx;
   6189		}
   6190		hw_resc->resv_cp_rings = cp;
   6191		hw_resc->resv_stat_ctxs = stats;
   6192	}
   6193	hwrm_req_drop(bp, req);
   6194	return 0;
   6195}
   6196
   6197int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings)
   6198{
   6199	struct hwrm_func_qcfg_output *resp;
   6200	struct hwrm_func_qcfg_input *req;
   6201	int rc;
   6202
   6203	if (bp->hwrm_spec_code < 0x10601)
   6204		return 0;
   6205
   6206	rc = hwrm_req_init(bp, req, HWRM_FUNC_QCFG);
   6207	if (rc)
   6208		return rc;
   6209
   6210	req->fid = cpu_to_le16(fid);
   6211	resp = hwrm_req_hold(bp, req);
   6212	rc = hwrm_req_send(bp, req);
   6213	if (!rc)
   6214		*tx_rings = le16_to_cpu(resp->alloc_tx_rings);
   6215
   6216	hwrm_req_drop(bp, req);
   6217	return rc;
   6218}
   6219
   6220static bool bnxt_rfs_supported(struct bnxt *bp);
   6221
   6222static struct hwrm_func_cfg_input *
   6223__bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
   6224			     int ring_grps, int cp_rings, int stats, int vnics)
   6225{
   6226	struct hwrm_func_cfg_input *req;
   6227	u32 enables = 0;
   6228
   6229	if (hwrm_req_init(bp, req, HWRM_FUNC_CFG))
   6230		return NULL;
   6231
   6232	req->fid = cpu_to_le16(0xffff);
   6233	enables |= tx_rings ? FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
   6234	req->num_tx_rings = cpu_to_le16(tx_rings);
   6235	if (BNXT_NEW_RM(bp)) {
   6236		enables |= rx_rings ? FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS : 0;
   6237		enables |= stats ? FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
   6238		if (bp->flags & BNXT_FLAG_CHIP_P5) {
   6239			enables |= cp_rings ? FUNC_CFG_REQ_ENABLES_NUM_MSIX : 0;
   6240			enables |= tx_rings + ring_grps ?
   6241				   FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
   6242			enables |= rx_rings ?
   6243				FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
   6244		} else {
   6245			enables |= cp_rings ?
   6246				   FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
   6247			enables |= ring_grps ?
   6248				   FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS |
   6249				   FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
   6250		}
   6251		enables |= vnics ? FUNC_CFG_REQ_ENABLES_NUM_VNICS : 0;
   6252
   6253		req->num_rx_rings = cpu_to_le16(rx_rings);
   6254		if (bp->flags & BNXT_FLAG_CHIP_P5) {
   6255			req->num_cmpl_rings = cpu_to_le16(tx_rings + ring_grps);
   6256			req->num_msix = cpu_to_le16(cp_rings);
   6257			req->num_rsscos_ctxs =
   6258				cpu_to_le16(DIV_ROUND_UP(ring_grps, 64));
   6259		} else {
   6260			req->num_cmpl_rings = cpu_to_le16(cp_rings);
   6261			req->num_hw_ring_grps = cpu_to_le16(ring_grps);
   6262			req->num_rsscos_ctxs = cpu_to_le16(1);
   6263			if (!(bp->flags & BNXT_FLAG_NEW_RSS_CAP) &&
   6264			    bnxt_rfs_supported(bp))
   6265				req->num_rsscos_ctxs =
   6266					cpu_to_le16(ring_grps + 1);
   6267		}
   6268		req->num_stat_ctxs = cpu_to_le16(stats);
   6269		req->num_vnics = cpu_to_le16(vnics);
   6270	}
   6271	req->enables = cpu_to_le32(enables);
   6272	return req;
   6273}
   6274
   6275static struct hwrm_func_vf_cfg_input *
   6276__bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
   6277			     int ring_grps, int cp_rings, int stats, int vnics)
   6278{
   6279	struct hwrm_func_vf_cfg_input *req;
   6280	u32 enables = 0;
   6281
   6282	if (hwrm_req_init(bp, req, HWRM_FUNC_VF_CFG))
   6283		return NULL;
   6284
   6285	enables |= tx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
   6286	enables |= rx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS |
   6287			      FUNC_VF_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
   6288	enables |= stats ? FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
   6289	if (bp->flags & BNXT_FLAG_CHIP_P5) {
   6290		enables |= tx_rings + ring_grps ?
   6291			   FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
   6292	} else {
   6293		enables |= cp_rings ?
   6294			   FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
   6295		enables |= ring_grps ?
   6296			   FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0;
   6297	}
   6298	enables |= vnics ? FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS : 0;
   6299	enables |= FUNC_VF_CFG_REQ_ENABLES_NUM_L2_CTXS;
   6300
   6301	req->num_l2_ctxs = cpu_to_le16(BNXT_VF_MAX_L2_CTX);
   6302	req->num_tx_rings = cpu_to_le16(tx_rings);
   6303	req->num_rx_rings = cpu_to_le16(rx_rings);
   6304	if (bp->flags & BNXT_FLAG_CHIP_P5) {
   6305		req->num_cmpl_rings = cpu_to_le16(tx_rings + ring_grps);
   6306		req->num_rsscos_ctxs = cpu_to_le16(DIV_ROUND_UP(ring_grps, 64));
   6307	} else {
   6308		req->num_cmpl_rings = cpu_to_le16(cp_rings);
   6309		req->num_hw_ring_grps = cpu_to_le16(ring_grps);
   6310		req->num_rsscos_ctxs = cpu_to_le16(BNXT_VF_MAX_RSS_CTX);
   6311	}
   6312	req->num_stat_ctxs = cpu_to_le16(stats);
   6313	req->num_vnics = cpu_to_le16(vnics);
   6314
   6315	req->enables = cpu_to_le32(enables);
   6316	return req;
   6317}
   6318
   6319static int
   6320bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
   6321			   int ring_grps, int cp_rings, int stats, int vnics)
   6322{
   6323	struct hwrm_func_cfg_input *req;
   6324	int rc;
   6325
   6326	req = __bnxt_hwrm_reserve_pf_rings(bp, tx_rings, rx_rings, ring_grps,
   6327					   cp_rings, stats, vnics);
   6328	if (!req)
   6329		return -ENOMEM;
   6330
   6331	if (!req->enables) {
   6332		hwrm_req_drop(bp, req);
   6333		return 0;
   6334	}
   6335
   6336	rc = hwrm_req_send(bp, req);
   6337	if (rc)
   6338		return rc;
   6339
   6340	if (bp->hwrm_spec_code < 0x10601)
   6341		bp->hw_resc.resv_tx_rings = tx_rings;
   6342
   6343	return bnxt_hwrm_get_rings(bp);
   6344}
   6345
   6346static int
   6347bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
   6348			   int ring_grps, int cp_rings, int stats, int vnics)
   6349{
   6350	struct hwrm_func_vf_cfg_input *req;
   6351	int rc;
   6352
   6353	if (!BNXT_NEW_RM(bp)) {
   6354		bp->hw_resc.resv_tx_rings = tx_rings;
   6355		return 0;
   6356	}
   6357
   6358	req = __bnxt_hwrm_reserve_vf_rings(bp, tx_rings, rx_rings, ring_grps,
   6359					   cp_rings, stats, vnics);
   6360	if (!req)
   6361		return -ENOMEM;
   6362
   6363	rc = hwrm_req_send(bp, req);
   6364	if (rc)
   6365		return rc;
   6366
   6367	return bnxt_hwrm_get_rings(bp);
   6368}
   6369
   6370static int bnxt_hwrm_reserve_rings(struct bnxt *bp, int tx, int rx, int grp,
   6371				   int cp, int stat, int vnic)
   6372{
   6373	if (BNXT_PF(bp))
   6374		return bnxt_hwrm_reserve_pf_rings(bp, tx, rx, grp, cp, stat,
   6375						  vnic);
   6376	else
   6377		return bnxt_hwrm_reserve_vf_rings(bp, tx, rx, grp, cp, stat,
   6378						  vnic);
   6379}
   6380
   6381int bnxt_nq_rings_in_use(struct bnxt *bp)
   6382{
   6383	int cp = bp->cp_nr_rings;
   6384	int ulp_msix, ulp_base;
   6385
   6386	ulp_msix = bnxt_get_ulp_msix_num(bp);
   6387	if (ulp_msix) {
   6388		ulp_base = bnxt_get_ulp_msix_base(bp);
   6389		cp += ulp_msix;
   6390		if ((ulp_base + ulp_msix) > cp)
   6391			cp = ulp_base + ulp_msix;
   6392	}
   6393	return cp;
   6394}
   6395
   6396static int bnxt_cp_rings_in_use(struct bnxt *bp)
   6397{
   6398	int cp;
   6399
   6400	if (!(bp->flags & BNXT_FLAG_CHIP_P5))
   6401		return bnxt_nq_rings_in_use(bp);
   6402
   6403	cp = bp->tx_nr_rings + bp->rx_nr_rings;
   6404	return cp;
   6405}
   6406
   6407static int bnxt_get_func_stat_ctxs(struct bnxt *bp)
   6408{
   6409	int ulp_stat = bnxt_get_ulp_stat_ctxs(bp);
   6410	int cp = bp->cp_nr_rings;
   6411
   6412	if (!ulp_stat)
   6413		return cp;
   6414
   6415	if (bnxt_nq_rings_in_use(bp) > cp + bnxt_get_ulp_msix_num(bp))
   6416		return bnxt_get_ulp_msix_base(bp) + ulp_stat;
   6417
   6418	return cp + ulp_stat;
   6419}
   6420
   6421/* Check if a default RSS map needs to be setup.  This function is only
   6422 * used on older firmware that does not require reserving RX rings.
   6423 */
   6424static void bnxt_check_rss_tbl_no_rmgr(struct bnxt *bp)
   6425{
   6426	struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
   6427
   6428	/* The RSS map is valid for RX rings set to resv_rx_rings */
   6429	if (hw_resc->resv_rx_rings != bp->rx_nr_rings) {
   6430		hw_resc->resv_rx_rings = bp->rx_nr_rings;
   6431		if (!netif_is_rxfh_configured(bp->dev))
   6432			bnxt_set_dflt_rss_indir_tbl(bp);
   6433	}
   6434}
   6435
   6436static bool bnxt_need_reserve_rings(struct bnxt *bp)
   6437{
   6438	struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
   6439	int cp = bnxt_cp_rings_in_use(bp);
   6440	int nq = bnxt_nq_rings_in_use(bp);
   6441	int rx = bp->rx_nr_rings, stat;
   6442	int vnic = 1, grp = rx;
   6443
   6444	if (hw_resc->resv_tx_rings != bp->tx_nr_rings &&
   6445	    bp->hwrm_spec_code >= 0x10601)
   6446		return true;
   6447
   6448	/* Old firmware does not need RX ring reservations but we still
   6449	 * need to setup a default RSS map when needed.  With new firmware
   6450	 * we go through RX ring reservations first and then set up the
   6451	 * RSS map for the successfully reserved RX rings when needed.
   6452	 */
   6453	if (!BNXT_NEW_RM(bp)) {
   6454		bnxt_check_rss_tbl_no_rmgr(bp);
   6455		return false;
   6456	}
   6457	if ((bp->flags & BNXT_FLAG_RFS) && !(bp->flags & BNXT_FLAG_CHIP_P5))
   6458		vnic = rx + 1;
   6459	if (bp->flags & BNXT_FLAG_AGG_RINGS)
   6460		rx <<= 1;
   6461	stat = bnxt_get_func_stat_ctxs(bp);
   6462	if (hw_resc->resv_rx_rings != rx || hw_resc->resv_cp_rings != cp ||
   6463	    hw_resc->resv_vnics != vnic || hw_resc->resv_stat_ctxs != stat ||
   6464	    (hw_resc->resv_hw_ring_grps != grp &&
   6465	     !(bp->flags & BNXT_FLAG_CHIP_P5)))
   6466		return true;
   6467	if ((bp->flags & BNXT_FLAG_CHIP_P5) && BNXT_PF(bp) &&
   6468	    hw_resc->resv_irqs != nq)
   6469		return true;
   6470	return false;
   6471}
   6472
   6473static int __bnxt_reserve_rings(struct bnxt *bp)
   6474{
   6475	struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
   6476	int cp = bnxt_nq_rings_in_use(bp);
   6477	int tx = bp->tx_nr_rings;
   6478	int rx = bp->rx_nr_rings;
   6479	int grp, rx_rings, rc;
   6480	int vnic = 1, stat;
   6481	bool sh = false;
   6482
   6483	if (!bnxt_need_reserve_rings(bp))
   6484		return 0;
   6485
   6486	if (bp->flags & BNXT_FLAG_SHARED_RINGS)
   6487		sh = true;
   6488	if ((bp->flags & BNXT_FLAG_RFS) && !(bp->flags & BNXT_FLAG_CHIP_P5))
   6489		vnic = rx + 1;
   6490	if (bp->flags & BNXT_FLAG_AGG_RINGS)
   6491		rx <<= 1;
   6492	grp = bp->rx_nr_rings;
   6493	stat = bnxt_get_func_stat_ctxs(bp);
   6494
   6495	rc = bnxt_hwrm_reserve_rings(bp, tx, rx, grp, cp, stat, vnic);
   6496	if (rc)
   6497		return rc;
   6498
   6499	tx = hw_resc->resv_tx_rings;
   6500	if (BNXT_NEW_RM(bp)) {
   6501		rx = hw_resc->resv_rx_rings;
   6502		cp = hw_resc->resv_irqs;
   6503		grp = hw_resc->resv_hw_ring_grps;
   6504		vnic = hw_resc->resv_vnics;
   6505		stat = hw_resc->resv_stat_ctxs;
   6506	}
   6507
   6508	rx_rings = rx;
   6509	if (bp->flags & BNXT_FLAG_AGG_RINGS) {
   6510		if (rx >= 2) {
   6511			rx_rings = rx >> 1;
   6512		} else {
   6513			if (netif_running(bp->dev))
   6514				return -ENOMEM;
   6515
   6516			bp->flags &= ~BNXT_FLAG_AGG_RINGS;
   6517			bp->flags |= BNXT_FLAG_NO_AGG_RINGS;
   6518			bp->dev->hw_features &= ~NETIF_F_LRO;
   6519			bp->dev->features &= ~NETIF_F_LRO;
   6520			bnxt_set_ring_params(bp);
   6521		}
   6522	}
   6523	rx_rings = min_t(int, rx_rings, grp);
   6524	cp = min_t(int, cp, bp->cp_nr_rings);
   6525	if (stat > bnxt_get_ulp_stat_ctxs(bp))
   6526		stat -= bnxt_get_ulp_stat_ctxs(bp);
   6527	cp = min_t(int, cp, stat);
   6528	rc = bnxt_trim_rings(bp, &rx_rings, &tx, cp, sh);
   6529	if (bp->flags & BNXT_FLAG_AGG_RINGS)
   6530		rx = rx_rings << 1;
   6531	cp = sh ? max_t(int, tx, rx_rings) : tx + rx_rings;
   6532	bp->tx_nr_rings = tx;
   6533
   6534	/* If we cannot reserve all the RX rings, reset the RSS map only
   6535	 * if absolutely necessary
   6536	 */
   6537	if (rx_rings != bp->rx_nr_rings) {
   6538		netdev_warn(bp->dev, "Able to reserve only %d out of %d requested RX rings\n",
   6539			    rx_rings, bp->rx_nr_rings);
   6540		if (netif_is_rxfh_configured(bp->dev) &&
   6541		    (bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings) !=
   6542		     bnxt_get_nr_rss_ctxs(bp, rx_rings) ||
   6543		     bnxt_get_max_rss_ring(bp) >= rx_rings)) {
   6544			netdev_warn(bp->dev, "RSS table entries reverting to default\n");
   6545			bp->dev->priv_flags &= ~IFF_RXFH_CONFIGURED;
   6546		}
   6547	}
   6548	bp->rx_nr_rings = rx_rings;
   6549	bp->cp_nr_rings = cp;
   6550
   6551	if (!tx || !rx || !cp || !grp || !vnic || !stat)
   6552		return -ENOMEM;
   6553
   6554	if (!netif_is_rxfh_configured(bp->dev))
   6555		bnxt_set_dflt_rss_indir_tbl(bp);
   6556
   6557	return rc;
   6558}
   6559
   6560static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
   6561				    int ring_grps, int cp_rings, int stats,
   6562				    int vnics)
   6563{
   6564	struct hwrm_func_vf_cfg_input *req;
   6565	u32 flags;
   6566
   6567	if (!BNXT_NEW_RM(bp))
   6568		return 0;
   6569
   6570	req = __bnxt_hwrm_reserve_vf_rings(bp, tx_rings, rx_rings, ring_grps,
   6571					   cp_rings, stats, vnics);
   6572	flags = FUNC_VF_CFG_REQ_FLAGS_TX_ASSETS_TEST |
   6573		FUNC_VF_CFG_REQ_FLAGS_RX_ASSETS_TEST |
   6574		FUNC_VF_CFG_REQ_FLAGS_CMPL_ASSETS_TEST |
   6575		FUNC_VF_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST |
   6576		FUNC_VF_CFG_REQ_FLAGS_VNIC_ASSETS_TEST |
   6577		FUNC_VF_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST;
   6578	if (!(bp->flags & BNXT_FLAG_CHIP_P5))
   6579		flags |= FUNC_VF_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST;
   6580
   6581	req->flags = cpu_to_le32(flags);
   6582	return hwrm_req_send_silent(bp, req);
   6583}
   6584
   6585static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
   6586				    int ring_grps, int cp_rings, int stats,
   6587				    int vnics)
   6588{
   6589	struct hwrm_func_cfg_input *req;
   6590	u32 flags;
   6591
   6592	req = __bnxt_hwrm_reserve_pf_rings(bp, tx_rings, rx_rings, ring_grps,
   6593					   cp_rings, stats, vnics);
   6594	flags = FUNC_CFG_REQ_FLAGS_TX_ASSETS_TEST;
   6595	if (BNXT_NEW_RM(bp)) {
   6596		flags |= FUNC_CFG_REQ_FLAGS_RX_ASSETS_TEST |
   6597			 FUNC_CFG_REQ_FLAGS_CMPL_ASSETS_TEST |
   6598			 FUNC_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST |
   6599			 FUNC_CFG_REQ_FLAGS_VNIC_ASSETS_TEST;
   6600		if (bp->flags & BNXT_FLAG_CHIP_P5)
   6601			flags |= FUNC_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST |
   6602				 FUNC_CFG_REQ_FLAGS_NQ_ASSETS_TEST;
   6603		else
   6604			flags |= FUNC_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST;
   6605	}
   6606
   6607	req->flags = cpu_to_le32(flags);
   6608	return hwrm_req_send_silent(bp, req);
   6609}
   6610
   6611static int bnxt_hwrm_check_rings(struct bnxt *bp, int tx_rings, int rx_rings,
   6612				 int ring_grps, int cp_rings, int stats,
   6613				 int vnics)
   6614{
   6615	if (bp->hwrm_spec_code < 0x10801)
   6616		return 0;
   6617
   6618	if (BNXT_PF(bp))
   6619		return bnxt_hwrm_check_pf_rings(bp, tx_rings, rx_rings,
   6620						ring_grps, cp_rings, stats,
   6621						vnics);
   6622
   6623	return bnxt_hwrm_check_vf_rings(bp, tx_rings, rx_rings, ring_grps,
   6624					cp_rings, stats, vnics);
   6625}
   6626
   6627static void bnxt_hwrm_coal_params_qcaps(struct bnxt *bp)
   6628{
   6629	struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
   6630	struct hwrm_ring_aggint_qcaps_output *resp;
   6631	struct hwrm_ring_aggint_qcaps_input *req;
   6632	int rc;
   6633
   6634	coal_cap->cmpl_params = BNXT_LEGACY_COAL_CMPL_PARAMS;
   6635	coal_cap->num_cmpl_dma_aggr_max = 63;
   6636	coal_cap->num_cmpl_dma_aggr_during_int_max = 63;
   6637	coal_cap->cmpl_aggr_dma_tmr_max = 65535;
   6638	coal_cap->cmpl_aggr_dma_tmr_during_int_max = 65535;
   6639	coal_cap->int_lat_tmr_min_max = 65535;
   6640	coal_cap->int_lat_tmr_max_max = 65535;
   6641	coal_cap->num_cmpl_aggr_int_max = 65535;
   6642	coal_cap->timer_units = 80;
   6643
   6644	if (bp->hwrm_spec_code < 0x10902)
   6645		return;
   6646
   6647	if (hwrm_req_init(bp, req, HWRM_RING_AGGINT_QCAPS))
   6648		return;
   6649
   6650	resp = hwrm_req_hold(bp, req);
   6651	rc = hwrm_req_send_silent(bp, req);
   6652	if (!rc) {
   6653		coal_cap->cmpl_params = le32_to_cpu(resp->cmpl_params);
   6654		coal_cap->nq_params = le32_to_cpu(resp->nq_params);
   6655		coal_cap->num_cmpl_dma_aggr_max =
   6656			le16_to_cpu(resp->num_cmpl_dma_aggr_max);
   6657		coal_cap->num_cmpl_dma_aggr_during_int_max =
   6658			le16_to_cpu(resp->num_cmpl_dma_aggr_during_int_max);
   6659		coal_cap->cmpl_aggr_dma_tmr_max =
   6660			le16_to_cpu(resp->cmpl_aggr_dma_tmr_max);
   6661		coal_cap->cmpl_aggr_dma_tmr_during_int_max =
   6662			le16_to_cpu(resp->cmpl_aggr_dma_tmr_during_int_max);
   6663		coal_cap->int_lat_tmr_min_max =
   6664			le16_to_cpu(resp->int_lat_tmr_min_max);
   6665		coal_cap->int_lat_tmr_max_max =
   6666			le16_to_cpu(resp->int_lat_tmr_max_max);
   6667		coal_cap->num_cmpl_aggr_int_max =
   6668			le16_to_cpu(resp->num_cmpl_aggr_int_max);
   6669		coal_cap->timer_units = le16_to_cpu(resp->timer_units);
   6670	}
   6671	hwrm_req_drop(bp, req);
   6672}
   6673
   6674static u16 bnxt_usec_to_coal_tmr(struct bnxt *bp, u16 usec)
   6675{
   6676	struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
   6677
   6678	return usec * 1000 / coal_cap->timer_units;
   6679}
   6680
   6681static void bnxt_hwrm_set_coal_params(struct bnxt *bp,
   6682	struct bnxt_coal *hw_coal,
   6683	struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req)
   6684{
   6685	struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
   6686	u16 val, tmr, max, flags = hw_coal->flags;
   6687	u32 cmpl_params = coal_cap->cmpl_params;
   6688
   6689	max = hw_coal->bufs_per_record * 128;
   6690	if (hw_coal->budget)
   6691		max = hw_coal->bufs_per_record * hw_coal->budget;
   6692	max = min_t(u16, max, coal_cap->num_cmpl_aggr_int_max);
   6693
   6694	val = clamp_t(u16, hw_coal->coal_bufs, 1, max);
   6695	req->num_cmpl_aggr_int = cpu_to_le16(val);
   6696
   6697	val = min_t(u16, val, coal_cap->num_cmpl_dma_aggr_max);
   6698	req->num_cmpl_dma_aggr = cpu_to_le16(val);
   6699
   6700	val = clamp_t(u16, hw_coal->coal_bufs_irq, 1,
   6701		      coal_cap->num_cmpl_dma_aggr_during_int_max);
   6702	req->num_cmpl_dma_aggr_during_int = cpu_to_le16(val);
   6703
   6704	tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks);
   6705	tmr = clamp_t(u16, tmr, 1, coal_cap->int_lat_tmr_max_max);
   6706	req->int_lat_tmr_max = cpu_to_le16(tmr);
   6707
   6708	/* min timer set to 1/2 of interrupt timer */
   6709	if (cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_INT_LAT_TMR_MIN) {
   6710		val = tmr / 2;
   6711		val = clamp_t(u16, val, 1, coal_cap->int_lat_tmr_min_max);
   6712		req->int_lat_tmr_min = cpu_to_le16(val);
   6713		req->enables |= cpu_to_le16(BNXT_COAL_CMPL_MIN_TMR_ENABLE);
   6714	}
   6715
   6716	/* buf timer set to 1/4 of interrupt timer */
   6717	val = clamp_t(u16, tmr / 4, 1, coal_cap->cmpl_aggr_dma_tmr_max);
   6718	req->cmpl_aggr_dma_tmr = cpu_to_le16(val);
   6719
   6720	if (cmpl_params &
   6721	    RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_NUM_CMPL_DMA_AGGR_DURING_INT) {
   6722		tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks_irq);
   6723		val = clamp_t(u16, tmr, 1,
   6724			      coal_cap->cmpl_aggr_dma_tmr_during_int_max);
   6725		req->cmpl_aggr_dma_tmr_during_int = cpu_to_le16(val);
   6726		req->enables |=
   6727			cpu_to_le16(BNXT_COAL_CMPL_AGGR_TMR_DURING_INT_ENABLE);
   6728	}
   6729
   6730	if ((cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_RING_IDLE) &&
   6731	    hw_coal->idle_thresh && hw_coal->coal_ticks < hw_coal->idle_thresh)
   6732		flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_RING_IDLE;
   6733	req->flags = cpu_to_le16(flags);
   6734	req->enables |= cpu_to_le16(BNXT_COAL_CMPL_ENABLES);
   6735}
   6736
   6737static int __bnxt_hwrm_set_coal_nq(struct bnxt *bp, struct bnxt_napi *bnapi,
   6738				   struct bnxt_coal *hw_coal)
   6739{
   6740	struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req;
   6741	struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
   6742	struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
   6743	u32 nq_params = coal_cap->nq_params;
   6744	u16 tmr;
   6745	int rc;
   6746
   6747	if (!(nq_params & RING_AGGINT_QCAPS_RESP_NQ_PARAMS_INT_LAT_TMR_MIN))
   6748		return 0;
   6749
   6750	rc = hwrm_req_init(bp, req, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS);
   6751	if (rc)
   6752		return rc;
   6753
   6754	req->ring_id = cpu_to_le16(cpr->cp_ring_struct.fw_ring_id);
   6755	req->flags =
   6756		cpu_to_le16(RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_IS_NQ);
   6757
   6758	tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks) / 2;
   6759	tmr = clamp_t(u16, tmr, 1, coal_cap->int_lat_tmr_min_max);
   6760	req->int_lat_tmr_min = cpu_to_le16(tmr);
   6761	req->enables |= cpu_to_le16(BNXT_COAL_CMPL_MIN_TMR_ENABLE);
   6762	return hwrm_req_send(bp, req);
   6763}
   6764
   6765int bnxt_hwrm_set_ring_coal(struct bnxt *bp, struct bnxt_napi *bnapi)
   6766{
   6767	struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req_rx;
   6768	struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
   6769	struct bnxt_coal coal;
   6770	int rc;
   6771
   6772	/* Tick values in micro seconds.
   6773	 * 1 coal_buf x bufs_per_record = 1 completion record.
   6774	 */
   6775	memcpy(&coal, &bp->rx_coal, sizeof(struct bnxt_coal));
   6776
   6777	coal.coal_ticks = cpr->rx_ring_coal.coal_ticks;
   6778	coal.coal_bufs = cpr->rx_ring_coal.coal_bufs;
   6779
   6780	if (!bnapi->rx_ring)
   6781		return -ENODEV;
   6782
   6783	rc = hwrm_req_init(bp, req_rx, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS);
   6784	if (rc)
   6785		return rc;
   6786
   6787	bnxt_hwrm_set_coal_params(bp, &coal, req_rx);
   6788
   6789	req_rx->ring_id = cpu_to_le16(bnxt_cp_ring_for_rx(bp, bnapi->rx_ring));
   6790
   6791	return hwrm_req_send(bp, req_rx);
   6792}
   6793
   6794int bnxt_hwrm_set_coal(struct bnxt *bp)
   6795{
   6796	struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req_rx, *req_tx,
   6797							   *req;
   6798	int i, rc;
   6799
   6800	rc = hwrm_req_init(bp, req_rx, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS);
   6801	if (rc)
   6802		return rc;
   6803
   6804	rc = hwrm_req_init(bp, req_tx, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS);
   6805	if (rc) {
   6806		hwrm_req_drop(bp, req_rx);
   6807		return rc;
   6808	}
   6809
   6810	bnxt_hwrm_set_coal_params(bp, &bp->rx_coal, req_rx);
   6811	bnxt_hwrm_set_coal_params(bp, &bp->tx_coal, req_tx);
   6812
   6813	hwrm_req_hold(bp, req_rx);
   6814	hwrm_req_hold(bp, req_tx);
   6815	for (i = 0; i < bp->cp_nr_rings; i++) {
   6816		struct bnxt_napi *bnapi = bp->bnapi[i];
   6817		struct bnxt_coal *hw_coal;
   6818		u16 ring_id;
   6819
   6820		req = req_rx;
   6821		if (!bnapi->rx_ring) {
   6822			ring_id = bnxt_cp_ring_for_tx(bp, bnapi->tx_ring);
   6823			req = req_tx;
   6824		} else {
   6825			ring_id = bnxt_cp_ring_for_rx(bp, bnapi->rx_ring);
   6826		}
   6827		req->ring_id = cpu_to_le16(ring_id);
   6828
   6829		rc = hwrm_req_send(bp, req);
   6830		if (rc)
   6831			break;
   6832
   6833		if (!(bp->flags & BNXT_FLAG_CHIP_P5))
   6834			continue;
   6835
   6836		if (bnapi->rx_ring && bnapi->tx_ring) {
   6837			req = req_tx;
   6838			ring_id = bnxt_cp_ring_for_tx(bp, bnapi->tx_ring);
   6839			req->ring_id = cpu_to_le16(ring_id);
   6840			rc = hwrm_req_send(bp, req);
   6841			if (rc)
   6842				break;
   6843		}
   6844		if (bnapi->rx_ring)
   6845			hw_coal = &bp->rx_coal;
   6846		else
   6847			hw_coal = &bp->tx_coal;
   6848		__bnxt_hwrm_set_coal_nq(bp, bnapi, hw_coal);
   6849	}
   6850	hwrm_req_drop(bp, req_rx);
   6851	hwrm_req_drop(bp, req_tx);
   6852	return rc;
   6853}
   6854
   6855static void bnxt_hwrm_stat_ctx_free(struct bnxt *bp)
   6856{
   6857	struct hwrm_stat_ctx_clr_stats_input *req0 = NULL;
   6858	struct hwrm_stat_ctx_free_input *req;
   6859	int i;
   6860
   6861	if (!bp->bnapi)
   6862		return;
   6863
   6864	if (BNXT_CHIP_TYPE_NITRO_A0(bp))
   6865		return;
   6866
   6867	if (hwrm_req_init(bp, req, HWRM_STAT_CTX_FREE))
   6868		return;
   6869	if (BNXT_FW_MAJ(bp) <= 20) {
   6870		if (hwrm_req_init(bp, req0, HWRM_STAT_CTX_CLR_STATS)) {
   6871			hwrm_req_drop(bp, req);
   6872			return;
   6873		}
   6874		hwrm_req_hold(bp, req0);
   6875	}
   6876	hwrm_req_hold(bp, req);
   6877	for (i = 0; i < bp->cp_nr_rings; i++) {
   6878		struct bnxt_napi *bnapi = bp->bnapi[i];
   6879		struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
   6880
   6881		if (cpr->hw_stats_ctx_id != INVALID_STATS_CTX_ID) {
   6882			req->stat_ctx_id = cpu_to_le32(cpr->hw_stats_ctx_id);
   6883			if (req0) {
   6884				req0->stat_ctx_id = req->stat_ctx_id;
   6885				hwrm_req_send(bp, req0);
   6886			}
   6887			hwrm_req_send(bp, req);
   6888
   6889			cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
   6890		}
   6891	}
   6892	hwrm_req_drop(bp, req);
   6893	if (req0)
   6894		hwrm_req_drop(bp, req0);
   6895}
   6896
   6897static int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp)
   6898{
   6899	struct hwrm_stat_ctx_alloc_output *resp;
   6900	struct hwrm_stat_ctx_alloc_input *req;
   6901	int rc, i;
   6902
   6903	if (BNXT_CHIP_TYPE_NITRO_A0(bp))
   6904		return 0;
   6905
   6906	rc = hwrm_req_init(bp, req, HWRM_STAT_CTX_ALLOC);
   6907	if (rc)
   6908		return rc;
   6909
   6910	req->stats_dma_length = cpu_to_le16(bp->hw_ring_stats_size);
   6911	req->update_period_ms = cpu_to_le32(bp->stats_coal_ticks / 1000);
   6912
   6913	resp = hwrm_req_hold(bp, req);
   6914	for (i = 0; i < bp->cp_nr_rings; i++) {
   6915		struct bnxt_napi *bnapi = bp->bnapi[i];
   6916		struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
   6917
   6918		req->stats_dma_addr = cpu_to_le64(cpr->stats.hw_stats_map);
   6919
   6920		rc = hwrm_req_send(bp, req);
   6921		if (rc)
   6922			break;
   6923
   6924		cpr->hw_stats_ctx_id = le32_to_cpu(resp->stat_ctx_id);
   6925
   6926		bp->grp_info[i].fw_stats_ctx = cpr->hw_stats_ctx_id;
   6927	}
   6928	hwrm_req_drop(bp, req);
   6929	return rc;
   6930}
   6931
   6932static int bnxt_hwrm_func_qcfg(struct bnxt *bp)
   6933{
   6934	struct hwrm_func_qcfg_output *resp;
   6935	struct hwrm_func_qcfg_input *req;
   6936	u32 min_db_offset = 0;
   6937	u16 flags;
   6938	int rc;
   6939
   6940	rc = hwrm_req_init(bp, req, HWRM_FUNC_QCFG);
   6941	if (rc)
   6942		return rc;
   6943
   6944	req->fid = cpu_to_le16(0xffff);
   6945	resp = hwrm_req_hold(bp, req);
   6946	rc = hwrm_req_send(bp, req);
   6947	if (rc)
   6948		goto func_qcfg_exit;
   6949
   6950#ifdef CONFIG_BNXT_SRIOV
   6951	if (BNXT_VF(bp)) {
   6952		struct bnxt_vf_info *vf = &bp->vf;
   6953
   6954		vf->vlan = le16_to_cpu(resp->vlan) & VLAN_VID_MASK;
   6955	} else {
   6956		bp->pf.registered_vfs = le16_to_cpu(resp->registered_vfs);
   6957	}
   6958#endif
   6959	flags = le16_to_cpu(resp->flags);
   6960	if (flags & (FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED |
   6961		     FUNC_QCFG_RESP_FLAGS_FW_LLDP_AGENT_ENABLED)) {
   6962		bp->fw_cap |= BNXT_FW_CAP_LLDP_AGENT;
   6963		if (flags & FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED)
   6964			bp->fw_cap |= BNXT_FW_CAP_DCBX_AGENT;
   6965	}
   6966	if (BNXT_PF(bp) && (flags & FUNC_QCFG_RESP_FLAGS_MULTI_HOST))
   6967		bp->flags |= BNXT_FLAG_MULTI_HOST;
   6968	if (flags & FUNC_QCFG_RESP_FLAGS_RING_MONITOR_ENABLED)
   6969		bp->fw_cap |= BNXT_FW_CAP_RING_MONITOR;
   6970
   6971	switch (resp->port_partition_type) {
   6972	case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_0:
   6973	case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_5:
   6974	case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR2_0:
   6975		bp->port_partition_type = resp->port_partition_type;
   6976		break;
   6977	}
   6978	if (bp->hwrm_spec_code < 0x10707 ||
   6979	    resp->evb_mode == FUNC_QCFG_RESP_EVB_MODE_VEB)
   6980		bp->br_mode = BRIDGE_MODE_VEB;
   6981	else if (resp->evb_mode == FUNC_QCFG_RESP_EVB_MODE_VEPA)
   6982		bp->br_mode = BRIDGE_MODE_VEPA;
   6983	else
   6984		bp->br_mode = BRIDGE_MODE_UNDEF;
   6985
   6986	bp->max_mtu = le16_to_cpu(resp->max_mtu_configured);
   6987	if (!bp->max_mtu)
   6988		bp->max_mtu = BNXT_MAX_MTU;
   6989
   6990	if (bp->db_size)
   6991		goto func_qcfg_exit;
   6992
   6993	if (bp->flags & BNXT_FLAG_CHIP_P5) {
   6994		if (BNXT_PF(bp))
   6995			min_db_offset = DB_PF_OFFSET_P5;
   6996		else
   6997			min_db_offset = DB_VF_OFFSET_P5;
   6998	}
   6999	bp->db_size = PAGE_ALIGN(le16_to_cpu(resp->l2_doorbell_bar_size_kb) *
   7000				 1024);
   7001	if (!bp->db_size || bp->db_size > pci_resource_len(bp->pdev, 2) ||
   7002	    bp->db_size <= min_db_offset)
   7003		bp->db_size = pci_resource_len(bp->pdev, 2);
   7004
   7005func_qcfg_exit:
   7006	hwrm_req_drop(bp, req);
   7007	return rc;
   7008}
   7009
   7010static void bnxt_init_ctx_initializer(struct bnxt_ctx_mem_info *ctx,
   7011			struct hwrm_func_backing_store_qcaps_output *resp)
   7012{
   7013	struct bnxt_mem_init *mem_init;
   7014	u16 init_mask;
   7015	u8 init_val;
   7016	u8 *offset;
   7017	int i;
   7018
   7019	init_val = resp->ctx_kind_initializer;
   7020	init_mask = le16_to_cpu(resp->ctx_init_mask);
   7021	offset = &resp->qp_init_offset;
   7022	mem_init = &ctx->mem_init[BNXT_CTX_MEM_INIT_QP];
   7023	for (i = 0; i < BNXT_CTX_MEM_INIT_MAX; i++, mem_init++, offset++) {
   7024		mem_init->init_val = init_val;
   7025		mem_init->offset = BNXT_MEM_INVALID_OFFSET;
   7026		if (!init_mask)
   7027			continue;
   7028		if (i == BNXT_CTX_MEM_INIT_STAT)
   7029			offset = &resp->stat_init_offset;
   7030		if (init_mask & (1 << i))
   7031			mem_init->offset = *offset * 4;
   7032		else
   7033			mem_init->init_val = 0;
   7034	}
   7035	ctx->mem_init[BNXT_CTX_MEM_INIT_QP].size = ctx->qp_entry_size;
   7036	ctx->mem_init[BNXT_CTX_MEM_INIT_SRQ].size = ctx->srq_entry_size;
   7037	ctx->mem_init[BNXT_CTX_MEM_INIT_CQ].size = ctx->cq_entry_size;
   7038	ctx->mem_init[BNXT_CTX_MEM_INIT_VNIC].size = ctx->vnic_entry_size;
   7039	ctx->mem_init[BNXT_CTX_MEM_INIT_STAT].size = ctx->stat_entry_size;
   7040	ctx->mem_init[BNXT_CTX_MEM_INIT_MRAV].size = ctx->mrav_entry_size;
   7041}
   7042
   7043static int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp)
   7044{
   7045	struct hwrm_func_backing_store_qcaps_output *resp;
   7046	struct hwrm_func_backing_store_qcaps_input *req;
   7047	int rc;
   7048
   7049	if (bp->hwrm_spec_code < 0x10902 || BNXT_VF(bp) || bp->ctx)
   7050		return 0;
   7051
   7052	rc = hwrm_req_init(bp, req, HWRM_FUNC_BACKING_STORE_QCAPS);
   7053	if (rc)
   7054		return rc;
   7055
   7056	resp = hwrm_req_hold(bp, req);
   7057	rc = hwrm_req_send_silent(bp, req);
   7058	if (!rc) {
   7059		struct bnxt_ctx_pg_info *ctx_pg;
   7060		struct bnxt_ctx_mem_info *ctx;
   7061		int i, tqm_rings;
   7062
   7063		ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
   7064		if (!ctx) {
   7065			rc = -ENOMEM;
   7066			goto ctx_err;
   7067		}
   7068		ctx->qp_max_entries = le32_to_cpu(resp->qp_max_entries);
   7069		ctx->qp_min_qp1_entries = le16_to_cpu(resp->qp_min_qp1_entries);
   7070		ctx->qp_max_l2_entries = le16_to_cpu(resp->qp_max_l2_entries);
   7071		ctx->qp_entry_size = le16_to_cpu(resp->qp_entry_size);
   7072		ctx->srq_max_l2_entries = le16_to_cpu(resp->srq_max_l2_entries);
   7073		ctx->srq_max_entries = le32_to_cpu(resp->srq_max_entries);
   7074		ctx->srq_entry_size = le16_to_cpu(resp->srq_entry_size);
   7075		ctx->cq_max_l2_entries = le16_to_cpu(resp->cq_max_l2_entries);
   7076		ctx->cq_max_entries = le32_to_cpu(resp->cq_max_entries);
   7077		ctx->cq_entry_size = le16_to_cpu(resp->cq_entry_size);
   7078		ctx->vnic_max_vnic_entries =
   7079			le16_to_cpu(resp->vnic_max_vnic_entries);
   7080		ctx->vnic_max_ring_table_entries =
   7081			le16_to_cpu(resp->vnic_max_ring_table_entries);
   7082		ctx->vnic_entry_size = le16_to_cpu(resp->vnic_entry_size);
   7083		ctx->stat_max_entries = le32_to_cpu(resp->stat_max_entries);
   7084		ctx->stat_entry_size = le16_to_cpu(resp->stat_entry_size);
   7085		ctx->tqm_entry_size = le16_to_cpu(resp->tqm_entry_size);
   7086		ctx->tqm_min_entries_per_ring =
   7087			le32_to_cpu(resp->tqm_min_entries_per_ring);
   7088		ctx->tqm_max_entries_per_ring =
   7089			le32_to_cpu(resp->tqm_max_entries_per_ring);
   7090		ctx->tqm_entries_multiple = resp->tqm_entries_multiple;
   7091		if (!ctx->tqm_entries_multiple)
   7092			ctx->tqm_entries_multiple = 1;
   7093		ctx->mrav_max_entries = le32_to_cpu(resp->mrav_max_entries);
   7094		ctx->mrav_entry_size = le16_to_cpu(resp->mrav_entry_size);
   7095		ctx->mrav_num_entries_units =
   7096			le16_to_cpu(resp->mrav_num_entries_units);
   7097		ctx->tim_entry_size = le16_to_cpu(resp->tim_entry_size);
   7098		ctx->tim_max_entries = le32_to_cpu(resp->tim_max_entries);
   7099
   7100		bnxt_init_ctx_initializer(ctx, resp);
   7101
   7102		ctx->tqm_fp_rings_count = resp->tqm_fp_rings_count;
   7103		if (!ctx->tqm_fp_rings_count)
   7104			ctx->tqm_fp_rings_count = bp->max_q;
   7105		else if (ctx->tqm_fp_rings_count > BNXT_MAX_TQM_FP_RINGS)
   7106			ctx->tqm_fp_rings_count = BNXT_MAX_TQM_FP_RINGS;
   7107
   7108		tqm_rings = ctx->tqm_fp_rings_count + BNXT_MAX_TQM_SP_RINGS;
   7109		ctx_pg = kcalloc(tqm_rings, sizeof(*ctx_pg), GFP_KERNEL);
   7110		if (!ctx_pg) {
   7111			kfree(ctx);
   7112			rc = -ENOMEM;
   7113			goto ctx_err;
   7114		}
   7115		for (i = 0; i < tqm_rings; i++, ctx_pg++)
   7116			ctx->tqm_mem[i] = ctx_pg;
   7117		bp->ctx = ctx;
   7118	} else {
   7119		rc = 0;
   7120	}
   7121ctx_err:
   7122	hwrm_req_drop(bp, req);
   7123	return rc;
   7124}
   7125
   7126static void bnxt_hwrm_set_pg_attr(struct bnxt_ring_mem_info *rmem, u8 *pg_attr,
   7127				  __le64 *pg_dir)
   7128{
   7129	if (!rmem->nr_pages)
   7130		return;
   7131
   7132	BNXT_SET_CTX_PAGE_ATTR(*pg_attr);
   7133	if (rmem->depth >= 1) {
   7134		if (rmem->depth == 2)
   7135			*pg_attr |= 2;
   7136		else
   7137			*pg_attr |= 1;
   7138		*pg_dir = cpu_to_le64(rmem->pg_tbl_map);
   7139	} else {
   7140		*pg_dir = cpu_to_le64(rmem->dma_arr[0]);
   7141	}
   7142}
   7143
   7144#define FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES			\
   7145	(FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP |		\
   7146	 FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ |		\
   7147	 FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ |		\
   7148	 FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC |		\
   7149	 FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT)
   7150
   7151static int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, u32 enables)
   7152{
   7153	struct hwrm_func_backing_store_cfg_input *req;
   7154	struct bnxt_ctx_mem_info *ctx = bp->ctx;
   7155	struct bnxt_ctx_pg_info *ctx_pg;
   7156	void **__req = (void **)&req;
   7157	u32 req_len = sizeof(*req);
   7158	__le32 *num_entries;
   7159	__le64 *pg_dir;
   7160	u32 flags = 0;
   7161	u8 *pg_attr;
   7162	u32 ena;
   7163	int rc;
   7164	int i;
   7165
   7166	if (!ctx)
   7167		return 0;
   7168
   7169	if (req_len > bp->hwrm_max_ext_req_len)
   7170		req_len = BNXT_BACKING_STORE_CFG_LEGACY_LEN;
   7171	rc = __hwrm_req_init(bp, __req, HWRM_FUNC_BACKING_STORE_CFG, req_len);
   7172	if (rc)
   7173		return rc;
   7174
   7175	req->enables = cpu_to_le32(enables);
   7176	if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP) {
   7177		ctx_pg = &ctx->qp_mem;
   7178		req->qp_num_entries = cpu_to_le32(ctx_pg->entries);
   7179		req->qp_num_qp1_entries = cpu_to_le16(ctx->qp_min_qp1_entries);
   7180		req->qp_num_l2_entries = cpu_to_le16(ctx->qp_max_l2_entries);
   7181		req->qp_entry_size = cpu_to_le16(ctx->qp_entry_size);
   7182		bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
   7183				      &req->qpc_pg_size_qpc_lvl,
   7184				      &req->qpc_page_dir);
   7185	}
   7186	if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ) {
   7187		ctx_pg = &ctx->srq_mem;
   7188		req->srq_num_entries = cpu_to_le32(ctx_pg->entries);
   7189		req->srq_num_l2_entries = cpu_to_le16(ctx->srq_max_l2_entries);
   7190		req->srq_entry_size = cpu_to_le16(ctx->srq_entry_size);
   7191		bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
   7192				      &req->srq_pg_size_srq_lvl,
   7193				      &req->srq_page_dir);
   7194	}
   7195	if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ) {
   7196		ctx_pg = &ctx->cq_mem;
   7197		req->cq_num_entries = cpu_to_le32(ctx_pg->entries);
   7198		req->cq_num_l2_entries = cpu_to_le16(ctx->cq_max_l2_entries);
   7199		req->cq_entry_size = cpu_to_le16(ctx->cq_entry_size);
   7200		bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
   7201				      &req->cq_pg_size_cq_lvl,
   7202				      &req->cq_page_dir);
   7203	}
   7204	if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC) {
   7205		ctx_pg = &ctx->vnic_mem;
   7206		req->vnic_num_vnic_entries =
   7207			cpu_to_le16(ctx->vnic_max_vnic_entries);
   7208		req->vnic_num_ring_table_entries =
   7209			cpu_to_le16(ctx->vnic_max_ring_table_entries);
   7210		req->vnic_entry_size = cpu_to_le16(ctx->vnic_entry_size);
   7211		bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
   7212				      &req->vnic_pg_size_vnic_lvl,
   7213				      &req->vnic_page_dir);
   7214	}
   7215	if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT) {
   7216		ctx_pg = &ctx->stat_mem;
   7217		req->stat_num_entries = cpu_to_le32(ctx->stat_max_entries);
   7218		req->stat_entry_size = cpu_to_le16(ctx->stat_entry_size);
   7219		bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
   7220				      &req->stat_pg_size_stat_lvl,
   7221				      &req->stat_page_dir);
   7222	}
   7223	if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV) {
   7224		ctx_pg = &ctx->mrav_mem;
   7225		req->mrav_num_entries = cpu_to_le32(ctx_pg->entries);
   7226		if (ctx->mrav_num_entries_units)
   7227			flags |=
   7228			FUNC_BACKING_STORE_CFG_REQ_FLAGS_MRAV_RESERVATION_SPLIT;
   7229		req->mrav_entry_size = cpu_to_le16(ctx->mrav_entry_size);
   7230		bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
   7231				      &req->mrav_pg_size_mrav_lvl,
   7232				      &req->mrav_page_dir);
   7233	}
   7234	if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM) {
   7235		ctx_pg = &ctx->tim_mem;
   7236		req->tim_num_entries = cpu_to_le32(ctx_pg->entries);
   7237		req->tim_entry_size = cpu_to_le16(ctx->tim_entry_size);
   7238		bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
   7239				      &req->tim_pg_size_tim_lvl,
   7240				      &req->tim_page_dir);
   7241	}
   7242	for (i = 0, num_entries = &req->tqm_sp_num_entries,
   7243	     pg_attr = &req->tqm_sp_pg_size_tqm_sp_lvl,
   7244	     pg_dir = &req->tqm_sp_page_dir,
   7245	     ena = FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP;
   7246	     i < BNXT_MAX_TQM_RINGS;
   7247	     i++, num_entries++, pg_attr++, pg_dir++, ena <<= 1) {
   7248		if (!(enables & ena))
   7249			continue;
   7250
   7251		req->tqm_entry_size = cpu_to_le16(ctx->tqm_entry_size);
   7252		ctx_pg = ctx->tqm_mem[i];
   7253		*num_entries = cpu_to_le32(ctx_pg->entries);
   7254		bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, pg_attr, pg_dir);
   7255	}
   7256	req->flags = cpu_to_le32(flags);
   7257	return hwrm_req_send(bp, req);
   7258}
   7259
   7260static int bnxt_alloc_ctx_mem_blk(struct bnxt *bp,
   7261				  struct bnxt_ctx_pg_info *ctx_pg)
   7262{
   7263	struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
   7264
   7265	rmem->page_size = BNXT_PAGE_SIZE;
   7266	rmem->pg_arr = ctx_pg->ctx_pg_arr;
   7267	rmem->dma_arr = ctx_pg->ctx_dma_arr;
   7268	rmem->flags = BNXT_RMEM_VALID_PTE_FLAG;
   7269	if (rmem->depth >= 1)
   7270		rmem->flags |= BNXT_RMEM_USE_FULL_PAGE_FLAG;
   7271	return bnxt_alloc_ring(bp, rmem);
   7272}
   7273
   7274static int bnxt_alloc_ctx_pg_tbls(struct bnxt *bp,
   7275				  struct bnxt_ctx_pg_info *ctx_pg, u32 mem_size,
   7276				  u8 depth, struct bnxt_mem_init *mem_init)
   7277{
   7278	struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
   7279	int rc;
   7280
   7281	if (!mem_size)
   7282		return -EINVAL;
   7283
   7284	ctx_pg->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE);
   7285	if (ctx_pg->nr_pages > MAX_CTX_TOTAL_PAGES) {
   7286		ctx_pg->nr_pages = 0;
   7287		return -EINVAL;
   7288	}
   7289	if (ctx_pg->nr_pages > MAX_CTX_PAGES || depth > 1) {
   7290		int nr_tbls, i;
   7291
   7292		rmem->depth = 2;
   7293		ctx_pg->ctx_pg_tbl = kcalloc(MAX_CTX_PAGES, sizeof(ctx_pg),
   7294					     GFP_KERNEL);
   7295		if (!ctx_pg->ctx_pg_tbl)
   7296			return -ENOMEM;
   7297		nr_tbls = DIV_ROUND_UP(ctx_pg->nr_pages, MAX_CTX_PAGES);
   7298		rmem->nr_pages = nr_tbls;
   7299		rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg);
   7300		if (rc)
   7301			return rc;
   7302		for (i = 0; i < nr_tbls; i++) {
   7303			struct bnxt_ctx_pg_info *pg_tbl;
   7304
   7305			pg_tbl = kzalloc(sizeof(*pg_tbl), GFP_KERNEL);
   7306			if (!pg_tbl)
   7307				return -ENOMEM;
   7308			ctx_pg->ctx_pg_tbl[i] = pg_tbl;
   7309			rmem = &pg_tbl->ring_mem;
   7310			rmem->pg_tbl = ctx_pg->ctx_pg_arr[i];
   7311			rmem->pg_tbl_map = ctx_pg->ctx_dma_arr[i];
   7312			rmem->depth = 1;
   7313			rmem->nr_pages = MAX_CTX_PAGES;
   7314			rmem->mem_init = mem_init;
   7315			if (i == (nr_tbls - 1)) {
   7316				int rem = ctx_pg->nr_pages % MAX_CTX_PAGES;
   7317
   7318				if (rem)
   7319					rmem->nr_pages = rem;
   7320			}
   7321			rc = bnxt_alloc_ctx_mem_blk(bp, pg_tbl);
   7322			if (rc)
   7323				break;
   7324		}
   7325	} else {
   7326		rmem->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE);
   7327		if (rmem->nr_pages > 1 || depth)
   7328			rmem->depth = 1;
   7329		rmem->mem_init = mem_init;
   7330		rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg);
   7331	}
   7332	return rc;
   7333}
   7334
   7335static void bnxt_free_ctx_pg_tbls(struct bnxt *bp,
   7336				  struct bnxt_ctx_pg_info *ctx_pg)
   7337{
   7338	struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
   7339
   7340	if (rmem->depth > 1 || ctx_pg->nr_pages > MAX_CTX_PAGES ||
   7341	    ctx_pg->ctx_pg_tbl) {
   7342		int i, nr_tbls = rmem->nr_pages;
   7343
   7344		for (i = 0; i < nr_tbls; i++) {
   7345			struct bnxt_ctx_pg_info *pg_tbl;
   7346			struct bnxt_ring_mem_info *rmem2;
   7347
   7348			pg_tbl = ctx_pg->ctx_pg_tbl[i];
   7349			if (!pg_tbl)
   7350				continue;
   7351			rmem2 = &pg_tbl->ring_mem;
   7352			bnxt_free_ring(bp, rmem2);
   7353			ctx_pg->ctx_pg_arr[i] = NULL;
   7354			kfree(pg_tbl);
   7355			ctx_pg->ctx_pg_tbl[i] = NULL;
   7356		}
   7357		kfree(ctx_pg->ctx_pg_tbl);
   7358		ctx_pg->ctx_pg_tbl = NULL;
   7359	}
   7360	bnxt_free_ring(bp, rmem);
   7361	ctx_pg->nr_pages = 0;
   7362}
   7363
   7364void bnxt_free_ctx_mem(struct bnxt *bp)
   7365{
   7366	struct bnxt_ctx_mem_info *ctx = bp->ctx;
   7367	int i;
   7368
   7369	if (!ctx)
   7370		return;
   7371
   7372	if (ctx->tqm_mem[0]) {
   7373		for (i = 0; i < ctx->tqm_fp_rings_count + 1; i++)
   7374			bnxt_free_ctx_pg_tbls(bp, ctx->tqm_mem[i]);
   7375		kfree(ctx->tqm_mem[0]);
   7376		ctx->tqm_mem[0] = NULL;
   7377	}
   7378
   7379	bnxt_free_ctx_pg_tbls(bp, &ctx->tim_mem);
   7380	bnxt_free_ctx_pg_tbls(bp, &ctx->mrav_mem);
   7381	bnxt_free_ctx_pg_tbls(bp, &ctx->stat_mem);
   7382	bnxt_free_ctx_pg_tbls(bp, &ctx->vnic_mem);
   7383	bnxt_free_ctx_pg_tbls(bp, &ctx->cq_mem);
   7384	bnxt_free_ctx_pg_tbls(bp, &ctx->srq_mem);
   7385	bnxt_free_ctx_pg_tbls(bp, &ctx->qp_mem);
   7386	ctx->flags &= ~BNXT_CTX_FLAG_INITED;
   7387}
   7388
   7389static int bnxt_alloc_ctx_mem(struct bnxt *bp)
   7390{
   7391	struct bnxt_ctx_pg_info *ctx_pg;
   7392	struct bnxt_ctx_mem_info *ctx;
   7393	struct bnxt_mem_init *init;
   7394	u32 mem_size, ena, entries;
   7395	u32 entries_sp, min;
   7396	u32 num_mr, num_ah;
   7397	u32 extra_srqs = 0;
   7398	u32 extra_qps = 0;
   7399	u8 pg_lvl = 1;
   7400	int i, rc;
   7401
   7402	rc = bnxt_hwrm_func_backing_store_qcaps(bp);
   7403	if (rc) {
   7404		netdev_err(bp->dev, "Failed querying context mem capability, rc = %d.\n",
   7405			   rc);
   7406		return rc;
   7407	}
   7408	ctx = bp->ctx;
   7409	if (!ctx || (ctx->flags & BNXT_CTX_FLAG_INITED))
   7410		return 0;
   7411
   7412	if ((bp->flags & BNXT_FLAG_ROCE_CAP) && !is_kdump_kernel()) {
   7413		pg_lvl = 2;
   7414		extra_qps = 65536;
   7415		extra_srqs = 8192;
   7416	}
   7417
   7418	ctx_pg = &ctx->qp_mem;
   7419	ctx_pg->entries = ctx->qp_min_qp1_entries + ctx->qp_max_l2_entries +
   7420			  extra_qps;
   7421	if (ctx->qp_entry_size) {
   7422		mem_size = ctx->qp_entry_size * ctx_pg->entries;
   7423		init = &ctx->mem_init[BNXT_CTX_MEM_INIT_QP];
   7424		rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl, init);
   7425		if (rc)
   7426			return rc;
   7427	}
   7428
   7429	ctx_pg = &ctx->srq_mem;
   7430	ctx_pg->entries = ctx->srq_max_l2_entries + extra_srqs;
   7431	if (ctx->srq_entry_size) {
   7432		mem_size = ctx->srq_entry_size * ctx_pg->entries;
   7433		init = &ctx->mem_init[BNXT_CTX_MEM_INIT_SRQ];
   7434		rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl, init);
   7435		if (rc)
   7436			return rc;
   7437	}
   7438
   7439	ctx_pg = &ctx->cq_mem;
   7440	ctx_pg->entries = ctx->cq_max_l2_entries + extra_qps * 2;
   7441	if (ctx->cq_entry_size) {
   7442		mem_size = ctx->cq_entry_size * ctx_pg->entries;
   7443		init = &ctx->mem_init[BNXT_CTX_MEM_INIT_CQ];
   7444		rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl, init);
   7445		if (rc)
   7446			return rc;
   7447	}
   7448
   7449	ctx_pg = &ctx->vnic_mem;
   7450	ctx_pg->entries = ctx->vnic_max_vnic_entries +
   7451			  ctx->vnic_max_ring_table_entries;
   7452	if (ctx->vnic_entry_size) {
   7453		mem_size = ctx->vnic_entry_size * ctx_pg->entries;
   7454		init = &ctx->mem_init[BNXT_CTX_MEM_INIT_VNIC];
   7455		rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, init);
   7456		if (rc)
   7457			return rc;
   7458	}
   7459
   7460	ctx_pg = &ctx->stat_mem;
   7461	ctx_pg->entries = ctx->stat_max_entries;
   7462	if (ctx->stat_entry_size) {
   7463		mem_size = ctx->stat_entry_size * ctx_pg->entries;
   7464		init = &ctx->mem_init[BNXT_CTX_MEM_INIT_STAT];
   7465		rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, init);
   7466		if (rc)
   7467			return rc;
   7468	}
   7469
   7470	ena = 0;
   7471	if (!(bp->flags & BNXT_FLAG_ROCE_CAP))
   7472		goto skip_rdma;
   7473
   7474	ctx_pg = &ctx->mrav_mem;
   7475	/* 128K extra is needed to accommodate static AH context
   7476	 * allocation by f/w.
   7477	 */
   7478	num_mr = 1024 * 256;
   7479	num_ah = 1024 * 128;
   7480	ctx_pg->entries = num_mr + num_ah;
   7481	if (ctx->mrav_entry_size) {
   7482		mem_size = ctx->mrav_entry_size * ctx_pg->entries;
   7483		init = &ctx->mem_init[BNXT_CTX_MEM_INIT_MRAV];
   7484		rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 2, init);
   7485		if (rc)
   7486			return rc;
   7487	}
   7488	ena = FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV;
   7489	if (ctx->mrav_num_entries_units)
   7490		ctx_pg->entries =
   7491			((num_mr / ctx->mrav_num_entries_units) << 16) |
   7492			 (num_ah / ctx->mrav_num_entries_units);
   7493
   7494	ctx_pg = &ctx->tim_mem;
   7495	ctx_pg->entries = ctx->qp_mem.entries;
   7496	if (ctx->tim_entry_size) {
   7497		mem_size = ctx->tim_entry_size * ctx_pg->entries;
   7498		rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, NULL);
   7499		if (rc)
   7500			return rc;
   7501	}
   7502	ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM;
   7503
   7504skip_rdma:
   7505	min = ctx->tqm_min_entries_per_ring;
   7506	entries_sp = ctx->vnic_max_vnic_entries + ctx->qp_max_l2_entries +
   7507		     2 * (extra_qps + ctx->qp_min_qp1_entries) + min;
   7508	entries_sp = roundup(entries_sp, ctx->tqm_entries_multiple);
   7509	entries = ctx->qp_max_l2_entries + 2 * (extra_qps + ctx->qp_min_qp1_entries);
   7510	entries = roundup(entries, ctx->tqm_entries_multiple);
   7511	entries = clamp_t(u32, entries, min, ctx->tqm_max_entries_per_ring);
   7512	for (i = 0; i < ctx->tqm_fp_rings_count + 1; i++) {
   7513		ctx_pg = ctx->tqm_mem[i];
   7514		ctx_pg->entries = i ? entries : entries_sp;
   7515		if (ctx->tqm_entry_size) {
   7516			mem_size = ctx->tqm_entry_size * ctx_pg->entries;
   7517			rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1,
   7518						    NULL);
   7519			if (rc)
   7520				return rc;
   7521		}
   7522		ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP << i;
   7523	}
   7524	ena |= FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES;
   7525	rc = bnxt_hwrm_func_backing_store_cfg(bp, ena);
   7526	if (rc) {
   7527		netdev_err(bp->dev, "Failed configuring context mem, rc = %d.\n",
   7528			   rc);
   7529		return rc;
   7530	}
   7531	ctx->flags |= BNXT_CTX_FLAG_INITED;
   7532	return 0;
   7533}
   7534
   7535int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp, bool all)
   7536{
   7537	struct hwrm_func_resource_qcaps_output *resp;
   7538	struct hwrm_func_resource_qcaps_input *req;
   7539	struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
   7540	int rc;
   7541
   7542	rc = hwrm_req_init(bp, req, HWRM_FUNC_RESOURCE_QCAPS);
   7543	if (rc)
   7544		return rc;
   7545
   7546	req->fid = cpu_to_le16(0xffff);
   7547	resp = hwrm_req_hold(bp, req);
   7548	rc = hwrm_req_send_silent(bp, req);
   7549	if (rc)
   7550		goto hwrm_func_resc_qcaps_exit;
   7551
   7552	hw_resc->max_tx_sch_inputs = le16_to_cpu(resp->max_tx_scheduler_inputs);
   7553	if (!all)
   7554		goto hwrm_func_resc_qcaps_exit;
   7555
   7556	hw_resc->min_rsscos_ctxs = le16_to_cpu(resp->min_rsscos_ctx);
   7557	hw_resc->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
   7558	hw_resc->min_cp_rings = le16_to_cpu(resp->min_cmpl_rings);
   7559	hw_resc->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
   7560	hw_resc->min_tx_rings = le16_to_cpu(resp->min_tx_rings);
   7561	hw_resc->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
   7562	hw_resc->min_rx_rings = le16_to_cpu(resp->min_rx_rings);
   7563	hw_resc->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
   7564	hw_resc->min_hw_ring_grps = le16_to_cpu(resp->min_hw_ring_grps);
   7565	hw_resc->max_hw_ring_grps = le16_to_cpu(resp->max_hw_ring_grps);
   7566	hw_resc->min_l2_ctxs = le16_to_cpu(resp->min_l2_ctxs);
   7567	hw_resc->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
   7568	hw_resc->min_vnics = le16_to_cpu(resp->min_vnics);
   7569	hw_resc->max_vnics = le16_to_cpu(resp->max_vnics);
   7570	hw_resc->min_stat_ctxs = le16_to_cpu(resp->min_stat_ctx);
   7571	hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
   7572
   7573	if (bp->flags & BNXT_FLAG_CHIP_P5) {
   7574		u16 max_msix = le16_to_cpu(resp->max_msix);
   7575
   7576		hw_resc->max_nqs = max_msix;
   7577		hw_resc->max_hw_ring_grps = hw_resc->max_rx_rings;
   7578	}
   7579
   7580	if (BNXT_PF(bp)) {
   7581		struct bnxt_pf_info *pf = &bp->pf;
   7582
   7583		pf->vf_resv_strategy =
   7584			le16_to_cpu(resp->vf_reservation_strategy);
   7585		if (pf->vf_resv_strategy > BNXT_VF_RESV_STRATEGY_MINIMAL_STATIC)
   7586			pf->vf_resv_strategy = BNXT_VF_RESV_STRATEGY_MAXIMAL;
   7587	}
   7588hwrm_func_resc_qcaps_exit:
   7589	hwrm_req_drop(bp, req);
   7590	return rc;
   7591}
   7592
   7593static int __bnxt_hwrm_ptp_qcfg(struct bnxt *bp)
   7594{
   7595	struct hwrm_port_mac_ptp_qcfg_output *resp;
   7596	struct hwrm_port_mac_ptp_qcfg_input *req;
   7597	struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
   7598	bool phc_cfg;
   7599	u8 flags;
   7600	int rc;
   7601
   7602	if (bp->hwrm_spec_code < 0x10801) {
   7603		rc = -ENODEV;
   7604		goto no_ptp;
   7605	}
   7606
   7607	rc = hwrm_req_init(bp, req, HWRM_PORT_MAC_PTP_QCFG);
   7608	if (rc)
   7609		goto no_ptp;
   7610
   7611	req->port_id = cpu_to_le16(bp->pf.port_id);
   7612	resp = hwrm_req_hold(bp, req);
   7613	rc = hwrm_req_send(bp, req);
   7614	if (rc)
   7615		goto exit;
   7616
   7617	flags = resp->flags;
   7618	if (!(flags & PORT_MAC_PTP_QCFG_RESP_FLAGS_HWRM_ACCESS)) {
   7619		rc = -ENODEV;
   7620		goto exit;
   7621	}
   7622	if (!ptp) {
   7623		ptp = kzalloc(sizeof(*ptp), GFP_KERNEL);
   7624		if (!ptp) {
   7625			rc = -ENOMEM;
   7626			goto exit;
   7627		}
   7628		ptp->bp = bp;
   7629		bp->ptp_cfg = ptp;
   7630	}
   7631	if (flags & PORT_MAC_PTP_QCFG_RESP_FLAGS_PARTIAL_DIRECT_ACCESS_REF_CLOCK) {
   7632		ptp->refclk_regs[0] = le32_to_cpu(resp->ts_ref_clock_reg_lower);
   7633		ptp->refclk_regs[1] = le32_to_cpu(resp->ts_ref_clock_reg_upper);
   7634	} else if (bp->flags & BNXT_FLAG_CHIP_P5) {
   7635		ptp->refclk_regs[0] = BNXT_TS_REG_TIMESYNC_TS0_LOWER;
   7636		ptp->refclk_regs[1] = BNXT_TS_REG_TIMESYNC_TS0_UPPER;
   7637	} else {
   7638		rc = -ENODEV;
   7639		goto exit;
   7640	}
   7641	phc_cfg = (flags & PORT_MAC_PTP_QCFG_RESP_FLAGS_RTC_CONFIGURED) != 0;
   7642	rc = bnxt_ptp_init(bp, phc_cfg);
   7643	if (rc)
   7644		netdev_warn(bp->dev, "PTP initialization failed.\n");
   7645exit:
   7646	hwrm_req_drop(bp, req);
   7647	if (!rc)
   7648		return 0;
   7649
   7650no_ptp:
   7651	bnxt_ptp_clear(bp);
   7652	kfree(ptp);
   7653	bp->ptp_cfg = NULL;
   7654	return rc;
   7655}
   7656
   7657static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
   7658{
   7659	struct hwrm_func_qcaps_output *resp;
   7660	struct hwrm_func_qcaps_input *req;
   7661	struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
   7662	u32 flags, flags_ext, flags_ext2;
   7663	int rc;
   7664
   7665	rc = hwrm_req_init(bp, req, HWRM_FUNC_QCAPS);
   7666	if (rc)
   7667		return rc;
   7668
   7669	req->fid = cpu_to_le16(0xffff);
   7670	resp = hwrm_req_hold(bp, req);
   7671	rc = hwrm_req_send(bp, req);
   7672	if (rc)
   7673		goto hwrm_func_qcaps_exit;
   7674
   7675	flags = le32_to_cpu(resp->flags);
   7676	if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V1_SUPPORTED)
   7677		bp->flags |= BNXT_FLAG_ROCEV1_CAP;
   7678	if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V2_SUPPORTED)
   7679		bp->flags |= BNXT_FLAG_ROCEV2_CAP;
   7680	if (flags & FUNC_QCAPS_RESP_FLAGS_PCIE_STATS_SUPPORTED)
   7681		bp->fw_cap |= BNXT_FW_CAP_PCIE_STATS_SUPPORTED;
   7682	if (flags & FUNC_QCAPS_RESP_FLAGS_HOT_RESET_CAPABLE)
   7683		bp->fw_cap |= BNXT_FW_CAP_HOT_RESET;
   7684	if (flags & FUNC_QCAPS_RESP_FLAGS_EXT_STATS_SUPPORTED)
   7685		bp->fw_cap |= BNXT_FW_CAP_EXT_STATS_SUPPORTED;
   7686	if (flags &  FUNC_QCAPS_RESP_FLAGS_ERROR_RECOVERY_CAPABLE)
   7687		bp->fw_cap |= BNXT_FW_CAP_ERROR_RECOVERY;
   7688	if (flags & FUNC_QCAPS_RESP_FLAGS_ERR_RECOVER_RELOAD)
   7689		bp->fw_cap |= BNXT_FW_CAP_ERR_RECOVER_RELOAD;
   7690	if (!(flags & FUNC_QCAPS_RESP_FLAGS_VLAN_ACCELERATION_TX_DISABLED))
   7691		bp->fw_cap |= BNXT_FW_CAP_VLAN_TX_INSERT;
   7692	if (flags & FUNC_QCAPS_RESP_FLAGS_DBG_QCAPS_CMD_SUPPORTED)
   7693		bp->fw_cap |= BNXT_FW_CAP_DBG_QCAPS;
   7694
   7695	flags_ext = le32_to_cpu(resp->flags_ext);
   7696	if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_EXT_HW_STATS_SUPPORTED)
   7697		bp->fw_cap |= BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED;
   7698	if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_PTP_PPS_SUPPORTED))
   7699		bp->fw_cap |= BNXT_FW_CAP_PTP_PPS;
   7700	if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_PTP_64BIT_RTC_SUPPORTED)
   7701		bp->fw_cap |= BNXT_FW_CAP_PTP_RTC;
   7702	if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_HOT_RESET_IF_SUPPORT))
   7703		bp->fw_cap |= BNXT_FW_CAP_HOT_RESET_IF;
   7704	if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_FW_LIVEPATCH_SUPPORTED))
   7705		bp->fw_cap |= BNXT_FW_CAP_LIVEPATCH;
   7706
   7707	flags_ext2 = le32_to_cpu(resp->flags_ext2);
   7708	if (flags_ext2 & FUNC_QCAPS_RESP_FLAGS_EXT2_RX_ALL_PKTS_TIMESTAMPS_SUPPORTED)
   7709		bp->fw_cap |= BNXT_FW_CAP_RX_ALL_PKT_TS;
   7710
   7711	bp->tx_push_thresh = 0;
   7712	if ((flags & FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED) &&
   7713	    BNXT_FW_MAJ(bp) > 217)
   7714		bp->tx_push_thresh = BNXT_TX_PUSH_THRESH;
   7715
   7716	hw_resc->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
   7717	hw_resc->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
   7718	hw_resc->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
   7719	hw_resc->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
   7720	hw_resc->max_hw_ring_grps = le32_to_cpu(resp->max_hw_ring_grps);
   7721	if (!hw_resc->max_hw_ring_grps)
   7722		hw_resc->max_hw_ring_grps = hw_resc->max_tx_rings;
   7723	hw_resc->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
   7724	hw_resc->max_vnics = le16_to_cpu(resp->max_vnics);
   7725	hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
   7726
   7727	if (BNXT_PF(bp)) {
   7728		struct bnxt_pf_info *pf = &bp->pf;
   7729
   7730		pf->fw_fid = le16_to_cpu(resp->fid);
   7731		pf->port_id = le16_to_cpu(resp->port_id);
   7732		memcpy(pf->mac_addr, resp->mac_address, ETH_ALEN);
   7733		pf->first_vf_id = le16_to_cpu(resp->first_vf_id);
   7734		pf->max_vfs = le16_to_cpu(resp->max_vfs);
   7735		pf->max_encap_records = le32_to_cpu(resp->max_encap_records);
   7736		pf->max_decap_records = le32_to_cpu(resp->max_decap_records);
   7737		pf->max_tx_em_flows = le32_to_cpu(resp->max_tx_em_flows);
   7738		pf->max_tx_wm_flows = le32_to_cpu(resp->max_tx_wm_flows);
   7739		pf->max_rx_em_flows = le32_to_cpu(resp->max_rx_em_flows);
   7740		pf->max_rx_wm_flows = le32_to_cpu(resp->max_rx_wm_flows);
   7741		bp->flags &= ~BNXT_FLAG_WOL_CAP;
   7742		if (flags & FUNC_QCAPS_RESP_FLAGS_WOL_MAGICPKT_SUPPORTED)
   7743			bp->flags |= BNXT_FLAG_WOL_CAP;
   7744		if (flags & FUNC_QCAPS_RESP_FLAGS_PTP_SUPPORTED) {
   7745			__bnxt_hwrm_ptp_qcfg(bp);
   7746		} else {
   7747			bnxt_ptp_clear(bp);
   7748			kfree(bp->ptp_cfg);
   7749			bp->ptp_cfg = NULL;
   7750		}
   7751	} else {
   7752#ifdef CONFIG_BNXT_SRIOV
   7753		struct bnxt_vf_info *vf = &bp->vf;
   7754
   7755		vf->fw_fid = le16_to_cpu(resp->fid);
   7756		memcpy(vf->mac_addr, resp->mac_address, ETH_ALEN);
   7757#endif
   7758	}
   7759
   7760hwrm_func_qcaps_exit:
   7761	hwrm_req_drop(bp, req);
   7762	return rc;
   7763}
   7764
   7765static void bnxt_hwrm_dbg_qcaps(struct bnxt *bp)
   7766{
   7767	struct hwrm_dbg_qcaps_output *resp;
   7768	struct hwrm_dbg_qcaps_input *req;
   7769	int rc;
   7770
   7771	bp->fw_dbg_cap = 0;
   7772	if (!(bp->fw_cap & BNXT_FW_CAP_DBG_QCAPS))
   7773		return;
   7774
   7775	rc = hwrm_req_init(bp, req, HWRM_DBG_QCAPS);
   7776	if (rc)
   7777		return;
   7778
   7779	req->fid = cpu_to_le16(0xffff);
   7780	resp = hwrm_req_hold(bp, req);
   7781	rc = hwrm_req_send(bp, req);
   7782	if (rc)
   7783		goto hwrm_dbg_qcaps_exit;
   7784
   7785	bp->fw_dbg_cap = le32_to_cpu(resp->flags);
   7786
   7787hwrm_dbg_qcaps_exit:
   7788	hwrm_req_drop(bp, req);
   7789}
   7790
   7791static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp);
   7792
   7793static int bnxt_hwrm_func_qcaps(struct bnxt *bp)
   7794{
   7795	int rc;
   7796
   7797	rc = __bnxt_hwrm_func_qcaps(bp);
   7798	if (rc)
   7799		return rc;
   7800
   7801	bnxt_hwrm_dbg_qcaps(bp);
   7802
   7803	rc = bnxt_hwrm_queue_qportcfg(bp);
   7804	if (rc) {
   7805		netdev_err(bp->dev, "hwrm query qportcfg failure rc: %d\n", rc);
   7806		return rc;
   7807	}
   7808	if (bp->hwrm_spec_code >= 0x10803) {
   7809		rc = bnxt_alloc_ctx_mem(bp);
   7810		if (rc)
   7811			return rc;
   7812		rc = bnxt_hwrm_func_resc_qcaps(bp, true);
   7813		if (!rc)
   7814			bp->fw_cap |= BNXT_FW_CAP_NEW_RM;
   7815	}
   7816	return 0;
   7817}
   7818
   7819static int bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(struct bnxt *bp)
   7820{
   7821	struct hwrm_cfa_adv_flow_mgnt_qcaps_output *resp;
   7822	struct hwrm_cfa_adv_flow_mgnt_qcaps_input *req;
   7823	u32 flags;
   7824	int rc;
   7825
   7826	if (!(bp->fw_cap & BNXT_FW_CAP_CFA_ADV_FLOW))
   7827		return 0;
   7828
   7829	rc = hwrm_req_init(bp, req, HWRM_CFA_ADV_FLOW_MGNT_QCAPS);
   7830	if (rc)
   7831		return rc;
   7832
   7833	resp = hwrm_req_hold(bp, req);
   7834	rc = hwrm_req_send(bp, req);
   7835	if (rc)
   7836		goto hwrm_cfa_adv_qcaps_exit;
   7837
   7838	flags = le32_to_cpu(resp->flags);
   7839	if (flags &
   7840	    CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RFS_RING_TBL_IDX_V2_SUPPORTED)
   7841		bp->fw_cap |= BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2;
   7842
   7843hwrm_cfa_adv_qcaps_exit:
   7844	hwrm_req_drop(bp, req);
   7845	return rc;
   7846}
   7847
   7848static int __bnxt_alloc_fw_health(struct bnxt *bp)
   7849{
   7850	if (bp->fw_health)
   7851		return 0;
   7852
   7853	bp->fw_health = kzalloc(sizeof(*bp->fw_health), GFP_KERNEL);
   7854	if (!bp->fw_health)
   7855		return -ENOMEM;
   7856
   7857	mutex_init(&bp->fw_health->lock);
   7858	return 0;
   7859}
   7860
   7861static int bnxt_alloc_fw_health(struct bnxt *bp)
   7862{
   7863	int rc;
   7864
   7865	if (!(bp->fw_cap & BNXT_FW_CAP_HOT_RESET) &&
   7866	    !(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
   7867		return 0;
   7868
   7869	rc = __bnxt_alloc_fw_health(bp);
   7870	if (rc) {
   7871		bp->fw_cap &= ~BNXT_FW_CAP_HOT_RESET;
   7872		bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY;
   7873		return rc;
   7874	}
   7875
   7876	return 0;
   7877}
   7878
   7879static void __bnxt_map_fw_health_reg(struct bnxt *bp, u32 reg)
   7880{
   7881	writel(reg & BNXT_GRC_BASE_MASK, bp->bar0 +
   7882					 BNXT_GRCPF_REG_WINDOW_BASE_OUT +
   7883					 BNXT_FW_HEALTH_WIN_MAP_OFF);
   7884}
   7885
   7886static void bnxt_inv_fw_health_reg(struct bnxt *bp)
   7887{
   7888	struct bnxt_fw_health *fw_health = bp->fw_health;
   7889	u32 reg_type;
   7890
   7891	if (!fw_health)
   7892		return;
   7893
   7894	reg_type = BNXT_FW_HEALTH_REG_TYPE(fw_health->regs[BNXT_FW_HEALTH_REG]);
   7895	if (reg_type == BNXT_FW_HEALTH_REG_TYPE_GRC)
   7896		fw_health->status_reliable = false;
   7897
   7898	reg_type = BNXT_FW_HEALTH_REG_TYPE(fw_health->regs[BNXT_FW_RESET_CNT_REG]);
   7899	if (reg_type == BNXT_FW_HEALTH_REG_TYPE_GRC)
   7900		fw_health->resets_reliable = false;
   7901}
   7902
   7903static void bnxt_try_map_fw_health_reg(struct bnxt *bp)
   7904{
   7905	void __iomem *hs;
   7906	u32 status_loc;
   7907	u32 reg_type;
   7908	u32 sig;
   7909
   7910	if (bp->fw_health)
   7911		bp->fw_health->status_reliable = false;
   7912
   7913	__bnxt_map_fw_health_reg(bp, HCOMM_STATUS_STRUCT_LOC);
   7914	hs = bp->bar0 + BNXT_FW_HEALTH_WIN_OFF(HCOMM_STATUS_STRUCT_LOC);
   7915
   7916	sig = readl(hs + offsetof(struct hcomm_status, sig_ver));
   7917	if ((sig & HCOMM_STATUS_SIGNATURE_MASK) != HCOMM_STATUS_SIGNATURE_VAL) {
   7918		if (!bp->chip_num) {
   7919			__bnxt_map_fw_health_reg(bp, BNXT_GRC_REG_BASE);
   7920			bp->chip_num = readl(bp->bar0 +
   7921					     BNXT_FW_HEALTH_WIN_BASE +
   7922					     BNXT_GRC_REG_CHIP_NUM);
   7923		}
   7924		if (!BNXT_CHIP_P5(bp))
   7925			return;
   7926
   7927		status_loc = BNXT_GRC_REG_STATUS_P5 |
   7928			     BNXT_FW_HEALTH_REG_TYPE_BAR0;
   7929	} else {
   7930		status_loc = readl(hs + offsetof(struct hcomm_status,
   7931						 fw_status_loc));
   7932	}
   7933
   7934	if (__bnxt_alloc_fw_health(bp)) {
   7935		netdev_warn(bp->dev, "no memory for firmware status checks\n");
   7936		return;
   7937	}
   7938
   7939	bp->fw_health->regs[BNXT_FW_HEALTH_REG] = status_loc;
   7940	reg_type = BNXT_FW_HEALTH_REG_TYPE(status_loc);
   7941	if (reg_type == BNXT_FW_HEALTH_REG_TYPE_GRC) {
   7942		__bnxt_map_fw_health_reg(bp, status_loc);
   7943		bp->fw_health->mapped_regs[BNXT_FW_HEALTH_REG] =
   7944			BNXT_FW_HEALTH_WIN_OFF(status_loc);
   7945	}
   7946
   7947	bp->fw_health->status_reliable = true;
   7948}
   7949
   7950static int bnxt_map_fw_health_regs(struct bnxt *bp)
   7951{
   7952	struct bnxt_fw_health *fw_health = bp->fw_health;
   7953	u32 reg_base = 0xffffffff;
   7954	int i;
   7955
   7956	bp->fw_health->status_reliable = false;
   7957	bp->fw_health->resets_reliable = false;
   7958	/* Only pre-map the monitoring GRC registers using window 3 */
   7959	for (i = 0; i < 4; i++) {
   7960		u32 reg = fw_health->regs[i];
   7961
   7962		if (BNXT_FW_HEALTH_REG_TYPE(reg) != BNXT_FW_HEALTH_REG_TYPE_GRC)
   7963			continue;
   7964		if (reg_base == 0xffffffff)
   7965			reg_base = reg & BNXT_GRC_BASE_MASK;
   7966		if ((reg & BNXT_GRC_BASE_MASK) != reg_base)
   7967			return -ERANGE;
   7968		fw_health->mapped_regs[i] = BNXT_FW_HEALTH_WIN_OFF(reg);
   7969	}
   7970	bp->fw_health->status_reliable = true;
   7971	bp->fw_health->resets_reliable = true;
   7972	if (reg_base == 0xffffffff)
   7973		return 0;
   7974
   7975	__bnxt_map_fw_health_reg(bp, reg_base);
   7976	return 0;
   7977}
   7978
   7979static void bnxt_remap_fw_health_regs(struct bnxt *bp)
   7980{
   7981	if (!bp->fw_health)
   7982		return;
   7983
   7984	if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) {
   7985		bp->fw_health->status_reliable = true;
   7986		bp->fw_health->resets_reliable = true;
   7987	} else {
   7988		bnxt_try_map_fw_health_reg(bp);
   7989	}
   7990}
   7991
   7992static int bnxt_hwrm_error_recovery_qcfg(struct bnxt *bp)
   7993{
   7994	struct bnxt_fw_health *fw_health = bp->fw_health;
   7995	struct hwrm_error_recovery_qcfg_output *resp;
   7996	struct hwrm_error_recovery_qcfg_input *req;
   7997	int rc, i;
   7998
   7999	if (!(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
   8000		return 0;
   8001
   8002	rc = hwrm_req_init(bp, req, HWRM_ERROR_RECOVERY_QCFG);
   8003	if (rc)
   8004		return rc;
   8005
   8006	resp = hwrm_req_hold(bp, req);
   8007	rc = hwrm_req_send(bp, req);
   8008	if (rc)
   8009		goto err_recovery_out;
   8010	fw_health->flags = le32_to_cpu(resp->flags);
   8011	if ((fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU) &&
   8012	    !(bp->fw_cap & BNXT_FW_CAP_KONG_MB_CHNL)) {
   8013		rc = -EINVAL;
   8014		goto err_recovery_out;
   8015	}
   8016	fw_health->polling_dsecs = le32_to_cpu(resp->driver_polling_freq);
   8017	fw_health->master_func_wait_dsecs =
   8018		le32_to_cpu(resp->master_func_wait_period);
   8019	fw_health->normal_func_wait_dsecs =
   8020		le32_to_cpu(resp->normal_func_wait_period);
   8021	fw_health->post_reset_wait_dsecs =
   8022		le32_to_cpu(resp->master_func_wait_period_after_reset);
   8023	fw_health->post_reset_max_wait_dsecs =
   8024		le32_to_cpu(resp->max_bailout_time_after_reset);
   8025	fw_health->regs[BNXT_FW_HEALTH_REG] =
   8026		le32_to_cpu(resp->fw_health_status_reg);
   8027	fw_health->regs[BNXT_FW_HEARTBEAT_REG] =
   8028		le32_to_cpu(resp->fw_heartbeat_reg);
   8029	fw_health->regs[BNXT_FW_RESET_CNT_REG] =
   8030		le32_to_cpu(resp->fw_reset_cnt_reg);
   8031	fw_health->regs[BNXT_FW_RESET_INPROG_REG] =
   8032		le32_to_cpu(resp->reset_inprogress_reg);
   8033	fw_health->fw_reset_inprog_reg_mask =
   8034		le32_to_cpu(resp->reset_inprogress_reg_mask);
   8035	fw_health->fw_reset_seq_cnt = resp->reg_array_cnt;
   8036	if (fw_health->fw_reset_seq_cnt >= 16) {
   8037		rc = -EINVAL;
   8038		goto err_recovery_out;
   8039	}
   8040	for (i = 0; i < fw_health->fw_reset_seq_cnt; i++) {
   8041		fw_health->fw_reset_seq_regs[i] =
   8042			le32_to_cpu(resp->reset_reg[i]);
   8043		fw_health->fw_reset_seq_vals[i] =
   8044			le32_to_cpu(resp->reset_reg_val[i]);
   8045		fw_health->fw_reset_seq_delay_msec[i] =
   8046			resp->delay_after_reset[i];
   8047	}
   8048err_recovery_out:
   8049	hwrm_req_drop(bp, req);
   8050	if (!rc)
   8051		rc = bnxt_map_fw_health_regs(bp);
   8052	if (rc)
   8053		bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY;
   8054	return rc;
   8055}
   8056
   8057static int bnxt_hwrm_func_reset(struct bnxt *bp)
   8058{
   8059	struct hwrm_func_reset_input *req;
   8060	int rc;
   8061
   8062	rc = hwrm_req_init(bp, req, HWRM_FUNC_RESET);
   8063	if (rc)
   8064		return rc;
   8065
   8066	req->enables = 0;
   8067	hwrm_req_timeout(bp, req, HWRM_RESET_TIMEOUT);
   8068	return hwrm_req_send(bp, req);
   8069}
   8070
   8071static void bnxt_nvm_cfg_ver_get(struct bnxt *bp)
   8072{
   8073	struct hwrm_nvm_get_dev_info_output nvm_info;
   8074
   8075	if (!bnxt_hwrm_nvm_get_dev_info(bp, &nvm_info))
   8076		snprintf(bp->nvm_cfg_ver, FW_VER_STR_LEN, "%d.%d.%d",
   8077			 nvm_info.nvm_cfg_ver_maj, nvm_info.nvm_cfg_ver_min,
   8078			 nvm_info.nvm_cfg_ver_upd);
   8079}
   8080
   8081static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
   8082{
   8083	struct hwrm_queue_qportcfg_output *resp;
   8084	struct hwrm_queue_qportcfg_input *req;
   8085	u8 i, j, *qptr;
   8086	bool no_rdma;
   8087	int rc = 0;
   8088
   8089	rc = hwrm_req_init(bp, req, HWRM_QUEUE_QPORTCFG);
   8090	if (rc)
   8091		return rc;
   8092
   8093	resp = hwrm_req_hold(bp, req);
   8094	rc = hwrm_req_send(bp, req);
   8095	if (rc)
   8096		goto qportcfg_exit;
   8097
   8098	if (!resp->max_configurable_queues) {
   8099		rc = -EINVAL;
   8100		goto qportcfg_exit;
   8101	}
   8102	bp->max_tc = resp->max_configurable_queues;
   8103	bp->max_lltc = resp->max_configurable_lossless_queues;
   8104	if (bp->max_tc > BNXT_MAX_QUEUE)
   8105		bp->max_tc = BNXT_MAX_QUEUE;
   8106
   8107	no_rdma = !(bp->flags & BNXT_FLAG_ROCE_CAP);
   8108	qptr = &resp->queue_id0;
   8109	for (i = 0, j = 0; i < bp->max_tc; i++) {
   8110		bp->q_info[j].queue_id = *qptr;
   8111		bp->q_ids[i] = *qptr++;
   8112		bp->q_info[j].queue_profile = *qptr++;
   8113		bp->tc_to_qidx[j] = j;
   8114		if (!BNXT_CNPQ(bp->q_info[j].queue_profile) ||
   8115		    (no_rdma && BNXT_PF(bp)))
   8116			j++;
   8117	}
   8118	bp->max_q = bp->max_tc;
   8119	bp->max_tc = max_t(u8, j, 1);
   8120
   8121	if (resp->queue_cfg_info & QUEUE_QPORTCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG)
   8122		bp->max_tc = 1;
   8123
   8124	if (bp->max_lltc > bp->max_tc)
   8125		bp->max_lltc = bp->max_tc;
   8126
   8127qportcfg_exit:
   8128	hwrm_req_drop(bp, req);
   8129	return rc;
   8130}
   8131
   8132static int bnxt_hwrm_poll(struct bnxt *bp)
   8133{
   8134	struct hwrm_ver_get_input *req;
   8135	int rc;
   8136
   8137	rc = hwrm_req_init(bp, req, HWRM_VER_GET);
   8138	if (rc)
   8139		return rc;
   8140
   8141	req->hwrm_intf_maj = HWRM_VERSION_MAJOR;
   8142	req->hwrm_intf_min = HWRM_VERSION_MINOR;
   8143	req->hwrm_intf_upd = HWRM_VERSION_UPDATE;
   8144
   8145	hwrm_req_flags(bp, req, BNXT_HWRM_CTX_SILENT | BNXT_HWRM_FULL_WAIT);
   8146	rc = hwrm_req_send(bp, req);
   8147	return rc;
   8148}
   8149
   8150static int bnxt_hwrm_ver_get(struct bnxt *bp)
   8151{
   8152	struct hwrm_ver_get_output *resp;
   8153	struct hwrm_ver_get_input *req;
   8154	u16 fw_maj, fw_min, fw_bld, fw_rsv;
   8155	u32 dev_caps_cfg, hwrm_ver;
   8156	int rc, len;
   8157
   8158	rc = hwrm_req_init(bp, req, HWRM_VER_GET);
   8159	if (rc)
   8160		return rc;
   8161
   8162	hwrm_req_flags(bp, req, BNXT_HWRM_FULL_WAIT);
   8163	bp->hwrm_max_req_len = HWRM_MAX_REQ_LEN;
   8164	req->hwrm_intf_maj = HWRM_VERSION_MAJOR;
   8165	req->hwrm_intf_min = HWRM_VERSION_MINOR;
   8166	req->hwrm_intf_upd = HWRM_VERSION_UPDATE;
   8167
   8168	resp = hwrm_req_hold(bp, req);
   8169	rc = hwrm_req_send(bp, req);
   8170	if (rc)
   8171		goto hwrm_ver_get_exit;
   8172
   8173	memcpy(&bp->ver_resp, resp, sizeof(struct hwrm_ver_get_output));
   8174
   8175	bp->hwrm_spec_code = resp->hwrm_intf_maj_8b << 16 |
   8176			     resp->hwrm_intf_min_8b << 8 |
   8177			     resp->hwrm_intf_upd_8b;
   8178	if (resp->hwrm_intf_maj_8b < 1) {
   8179		netdev_warn(bp->dev, "HWRM interface %d.%d.%d is older than 1.0.0.\n",
   8180			    resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b,
   8181			    resp->hwrm_intf_upd_8b);
   8182		netdev_warn(bp->dev, "Please update firmware with HWRM interface 1.0.0 or newer.\n");
   8183	}
   8184
   8185	hwrm_ver = HWRM_VERSION_MAJOR << 16 | HWRM_VERSION_MINOR << 8 |
   8186			HWRM_VERSION_UPDATE;
   8187
   8188	if (bp->hwrm_spec_code > hwrm_ver)
   8189		snprintf(bp->hwrm_ver_supp, FW_VER_STR_LEN, "%d.%d.%d",
   8190			 HWRM_VERSION_MAJOR, HWRM_VERSION_MINOR,
   8191			 HWRM_VERSION_UPDATE);
   8192	else
   8193		snprintf(bp->hwrm_ver_supp, FW_VER_STR_LEN, "%d.%d.%d",
   8194			 resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b,
   8195			 resp->hwrm_intf_upd_8b);
   8196
   8197	fw_maj = le16_to_cpu(resp->hwrm_fw_major);
   8198	if (bp->hwrm_spec_code > 0x10803 && fw_maj) {
   8199		fw_min = le16_to_cpu(resp->hwrm_fw_minor);
   8200		fw_bld = le16_to_cpu(resp->hwrm_fw_build);
   8201		fw_rsv = le16_to_cpu(resp->hwrm_fw_patch);
   8202		len = FW_VER_STR_LEN;
   8203	} else {
   8204		fw_maj = resp->hwrm_fw_maj_8b;
   8205		fw_min = resp->hwrm_fw_min_8b;
   8206		fw_bld = resp->hwrm_fw_bld_8b;
   8207		fw_rsv = resp->hwrm_fw_rsvd_8b;
   8208		len = BC_HWRM_STR_LEN;
   8209	}
   8210	bp->fw_ver_code = BNXT_FW_VER_CODE(fw_maj, fw_min, fw_bld, fw_rsv);
   8211	snprintf(bp->fw_ver_str, len, "%d.%d.%d.%d", fw_maj, fw_min, fw_bld,
   8212		 fw_rsv);
   8213
   8214	if (strlen(resp->active_pkg_name)) {
   8215		int fw_ver_len = strlen(bp->fw_ver_str);
   8216
   8217		snprintf(bp->fw_ver_str + fw_ver_len,
   8218			 FW_VER_STR_LEN - fw_ver_len - 1, "/pkg %s",
   8219			 resp->active_pkg_name);
   8220		bp->fw_cap |= BNXT_FW_CAP_PKG_VER;
   8221	}
   8222
   8223	bp->hwrm_cmd_timeout = le16_to_cpu(resp->def_req_timeout);
   8224	if (!bp->hwrm_cmd_timeout)
   8225		bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT;
   8226	bp->hwrm_cmd_max_timeout = le16_to_cpu(resp->max_req_timeout) * 1000;
   8227	if (!bp->hwrm_cmd_max_timeout)
   8228		bp->hwrm_cmd_max_timeout = HWRM_CMD_MAX_TIMEOUT;
   8229	else if (bp->hwrm_cmd_max_timeout > HWRM_CMD_MAX_TIMEOUT)
   8230		netdev_warn(bp->dev, "Device requests max timeout of %d seconds, may trigger hung task watchdog\n",
   8231			    bp->hwrm_cmd_max_timeout / 1000);
   8232
   8233	if (resp->hwrm_intf_maj_8b >= 1) {
   8234		bp->hwrm_max_req_len = le16_to_cpu(resp->max_req_win_len);
   8235		bp->hwrm_max_ext_req_len = le16_to_cpu(resp->max_ext_req_len);
   8236	}
   8237	if (bp->hwrm_max_ext_req_len < HWRM_MAX_REQ_LEN)
   8238		bp->hwrm_max_ext_req_len = HWRM_MAX_REQ_LEN;
   8239
   8240	bp->chip_num = le16_to_cpu(resp->chip_num);
   8241	bp->chip_rev = resp->chip_rev;
   8242	if (bp->chip_num == CHIP_NUM_58700 && !resp->chip_rev &&
   8243	    !resp->chip_metal)
   8244		bp->flags |= BNXT_FLAG_CHIP_NITRO_A0;
   8245
   8246	dev_caps_cfg = le32_to_cpu(resp->dev_caps_cfg);
   8247	if ((dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) &&
   8248	    (dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_REQUIRED))
   8249		bp->fw_cap |= BNXT_FW_CAP_SHORT_CMD;
   8250
   8251	if (dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_KONG_MB_CHNL_SUPPORTED)
   8252		bp->fw_cap |= BNXT_FW_CAP_KONG_MB_CHNL;
   8253
   8254	if (dev_caps_cfg &
   8255	    VER_GET_RESP_DEV_CAPS_CFG_FLOW_HANDLE_64BIT_SUPPORTED)
   8256		bp->fw_cap |= BNXT_FW_CAP_OVS_64BIT_HANDLE;
   8257
   8258	if (dev_caps_cfg &
   8259	    VER_GET_RESP_DEV_CAPS_CFG_TRUSTED_VF_SUPPORTED)
   8260		bp->fw_cap |= BNXT_FW_CAP_TRUSTED_VF;
   8261
   8262	if (dev_caps_cfg &
   8263	    VER_GET_RESP_DEV_CAPS_CFG_CFA_ADV_FLOW_MGNT_SUPPORTED)
   8264		bp->fw_cap |= BNXT_FW_CAP_CFA_ADV_FLOW;
   8265
   8266hwrm_ver_get_exit:
   8267	hwrm_req_drop(bp, req);
   8268	return rc;
   8269}
   8270
   8271int bnxt_hwrm_fw_set_time(struct bnxt *bp)
   8272{
   8273	struct hwrm_fw_set_time_input *req;
   8274	struct tm tm;
   8275	time64_t now = ktime_get_real_seconds();
   8276	int rc;
   8277
   8278	if ((BNXT_VF(bp) && bp->hwrm_spec_code < 0x10901) ||
   8279	    bp->hwrm_spec_code < 0x10400)
   8280		return -EOPNOTSUPP;
   8281
   8282	time64_to_tm(now, 0, &tm);
   8283	rc = hwrm_req_init(bp, req, HWRM_FW_SET_TIME);
   8284	if (rc)
   8285		return rc;
   8286
   8287	req->year = cpu_to_le16(1900 + tm.tm_year);
   8288	req->month = 1 + tm.tm_mon;
   8289	req->day = tm.tm_mday;
   8290	req->hour = tm.tm_hour;
   8291	req->minute = tm.tm_min;
   8292	req->second = tm.tm_sec;
   8293	return hwrm_req_send(bp, req);
   8294}
   8295
   8296static void bnxt_add_one_ctr(u64 hw, u64 *sw, u64 mask)
   8297{
   8298	u64 sw_tmp;
   8299
   8300	hw &= mask;
   8301	sw_tmp = (*sw & ~mask) | hw;
   8302	if (hw < (*sw & mask))
   8303		sw_tmp += mask + 1;
   8304	WRITE_ONCE(*sw, sw_tmp);
   8305}
   8306
   8307static void __bnxt_accumulate_stats(__le64 *hw_stats, u64 *sw_stats, u64 *masks,
   8308				    int count, bool ignore_zero)
   8309{
   8310	int i;
   8311
   8312	for (i = 0; i < count; i++) {
   8313		u64 hw = le64_to_cpu(READ_ONCE(hw_stats[i]));
   8314
   8315		if (ignore_zero && !hw)
   8316			continue;
   8317
   8318		if (masks[i] == -1ULL)
   8319			sw_stats[i] = hw;
   8320		else
   8321			bnxt_add_one_ctr(hw, &sw_stats[i], masks[i]);
   8322	}
   8323}
   8324
   8325static void bnxt_accumulate_stats(struct bnxt_stats_mem *stats)
   8326{
   8327	if (!stats->hw_stats)
   8328		return;
   8329
   8330	__bnxt_accumulate_stats(stats->hw_stats, stats->sw_stats,
   8331				stats->hw_masks, stats->len / 8, false);
   8332}
   8333
   8334static void bnxt_accumulate_all_stats(struct bnxt *bp)
   8335{
   8336	struct bnxt_stats_mem *ring0_stats;
   8337	bool ignore_zero = false;
   8338	int i;
   8339
   8340	/* Chip bug.  Counter intermittently becomes 0. */
   8341	if (bp->flags & BNXT_FLAG_CHIP_P5)
   8342		ignore_zero = true;
   8343
   8344	for (i = 0; i < bp->cp_nr_rings; i++) {
   8345		struct bnxt_napi *bnapi = bp->bnapi[i];
   8346		struct bnxt_cp_ring_info *cpr;
   8347		struct bnxt_stats_mem *stats;
   8348
   8349		cpr = &bnapi->cp_ring;
   8350		stats = &cpr->stats;
   8351		if (!i)
   8352			ring0_stats = stats;
   8353		__bnxt_accumulate_stats(stats->hw_stats, stats->sw_stats,
   8354					ring0_stats->hw_masks,
   8355					ring0_stats->len / 8, ignore_zero);
   8356	}
   8357	if (bp->flags & BNXT_FLAG_PORT_STATS) {
   8358		struct bnxt_stats_mem *stats = &bp->port_stats;
   8359		__le64 *hw_stats = stats->hw_stats;
   8360		u64 *sw_stats = stats->sw_stats;
   8361		u64 *masks = stats->hw_masks;
   8362		int cnt;
   8363
   8364		cnt = sizeof(struct rx_port_stats) / 8;
   8365		__bnxt_accumulate_stats(hw_stats, sw_stats, masks, cnt, false);
   8366
   8367		hw_stats += BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
   8368		sw_stats += BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
   8369		masks += BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
   8370		cnt = sizeof(struct tx_port_stats) / 8;
   8371		__bnxt_accumulate_stats(hw_stats, sw_stats, masks, cnt, false);
   8372	}
   8373	if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) {
   8374		bnxt_accumulate_stats(&bp->rx_port_stats_ext);
   8375		bnxt_accumulate_stats(&bp->tx_port_stats_ext);
   8376	}
   8377}
   8378
   8379static int bnxt_hwrm_port_qstats(struct bnxt *bp, u8 flags)
   8380{
   8381	struct hwrm_port_qstats_input *req;
   8382	struct bnxt_pf_info *pf = &bp->pf;
   8383	int rc;
   8384
   8385	if (!(bp->flags & BNXT_FLAG_PORT_STATS))
   8386		return 0;
   8387
   8388	if (flags && !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED))
   8389		return -EOPNOTSUPP;
   8390
   8391	rc = hwrm_req_init(bp, req, HWRM_PORT_QSTATS);
   8392	if (rc)
   8393		return rc;
   8394
   8395	req->flags = flags;
   8396	req->port_id = cpu_to_le16(pf->port_id);
   8397	req->tx_stat_host_addr = cpu_to_le64(bp->port_stats.hw_stats_map +
   8398					    BNXT_TX_PORT_STATS_BYTE_OFFSET);
   8399	req->rx_stat_host_addr = cpu_to_le64(bp->port_stats.hw_stats_map);
   8400	return hwrm_req_send(bp, req);
   8401}
   8402
   8403static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp, u8 flags)
   8404{
   8405	struct hwrm_queue_pri2cos_qcfg_output *resp_qc;
   8406	struct hwrm_queue_pri2cos_qcfg_input *req_qc;
   8407	struct hwrm_port_qstats_ext_output *resp_qs;
   8408	struct hwrm_port_qstats_ext_input *req_qs;
   8409	struct bnxt_pf_info *pf = &bp->pf;
   8410	u32 tx_stat_size;
   8411	int rc;
   8412
   8413	if (!(bp->flags & BNXT_FLAG_PORT_STATS_EXT))
   8414		return 0;
   8415
   8416	if (flags && !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED))
   8417		return -EOPNOTSUPP;
   8418
   8419	rc = hwrm_req_init(bp, req_qs, HWRM_PORT_QSTATS_EXT);
   8420	if (rc)
   8421		return rc;
   8422
   8423	req_qs->flags = flags;
   8424	req_qs->port_id = cpu_to_le16(pf->port_id);
   8425	req_qs->rx_stat_size = cpu_to_le16(sizeof(struct rx_port_stats_ext));
   8426	req_qs->rx_stat_host_addr = cpu_to_le64(bp->rx_port_stats_ext.hw_stats_map);
   8427	tx_stat_size = bp->tx_port_stats_ext.hw_stats ?
   8428		       sizeof(struct tx_port_stats_ext) : 0;
   8429	req_qs->tx_stat_size = cpu_to_le16(tx_stat_size);
   8430	req_qs->tx_stat_host_addr = cpu_to_le64(bp->tx_port_stats_ext.hw_stats_map);
   8431	resp_qs = hwrm_req_hold(bp, req_qs);
   8432	rc = hwrm_req_send(bp, req_qs);
   8433	if (!rc) {
   8434		bp->fw_rx_stats_ext_size =
   8435			le16_to_cpu(resp_qs->rx_stat_size) / 8;
   8436		if (BNXT_FW_MAJ(bp) < 220 &&
   8437		    bp->fw_rx_stats_ext_size > BNXT_RX_STATS_EXT_NUM_LEGACY)
   8438			bp->fw_rx_stats_ext_size = BNXT_RX_STATS_EXT_NUM_LEGACY;
   8439
   8440		bp->fw_tx_stats_ext_size = tx_stat_size ?
   8441			le16_to_cpu(resp_qs->tx_stat_size) / 8 : 0;
   8442	} else {
   8443		bp->fw_rx_stats_ext_size = 0;
   8444		bp->fw_tx_stats_ext_size = 0;
   8445	}
   8446	hwrm_req_drop(bp, req_qs);
   8447
   8448	if (flags)
   8449		return rc;
   8450
   8451	if (bp->fw_tx_stats_ext_size <=
   8452	    offsetof(struct tx_port_stats_ext, pfc_pri0_tx_duration_us) / 8) {
   8453		bp->pri2cos_valid = 0;
   8454		return rc;
   8455	}
   8456
   8457	rc = hwrm_req_init(bp, req_qc, HWRM_QUEUE_PRI2COS_QCFG);
   8458	if (rc)
   8459		return rc;
   8460
   8461	req_qc->flags = cpu_to_le32(QUEUE_PRI2COS_QCFG_REQ_FLAGS_IVLAN);
   8462
   8463	resp_qc = hwrm_req_hold(bp, req_qc);
   8464	rc = hwrm_req_send(bp, req_qc);
   8465	if (!rc) {
   8466		u8 *pri2cos;
   8467		int i, j;
   8468
   8469		pri2cos = &resp_qc->pri0_cos_queue_id;
   8470		for (i = 0; i < 8; i++) {
   8471			u8 queue_id = pri2cos[i];
   8472			u8 queue_idx;
   8473
   8474			/* Per port queue IDs start from 0, 10, 20, etc */
   8475			queue_idx = queue_id % 10;
   8476			if (queue_idx > BNXT_MAX_QUEUE) {
   8477				bp->pri2cos_valid = false;
   8478				hwrm_req_drop(bp, req_qc);
   8479				return rc;
   8480			}
   8481			for (j = 0; j < bp->max_q; j++) {
   8482				if (bp->q_ids[j] == queue_id)
   8483					bp->pri2cos_idx[i] = queue_idx;
   8484			}
   8485		}
   8486		bp->pri2cos_valid = true;
   8487	}
   8488	hwrm_req_drop(bp, req_qc);
   8489
   8490	return rc;
   8491}
   8492
   8493static void bnxt_hwrm_free_tunnel_ports(struct bnxt *bp)
   8494{
   8495	bnxt_hwrm_tunnel_dst_port_free(bp,
   8496		TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN);
   8497	bnxt_hwrm_tunnel_dst_port_free(bp,
   8498		TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE);
   8499}
   8500
   8501static int bnxt_set_tpa(struct bnxt *bp, bool set_tpa)
   8502{
   8503	int rc, i;
   8504	u32 tpa_flags = 0;
   8505
   8506	if (set_tpa)
   8507		tpa_flags = bp->flags & BNXT_FLAG_TPA;
   8508	else if (BNXT_NO_FW_ACCESS(bp))
   8509		return 0;
   8510	for (i = 0; i < bp->nr_vnics; i++) {
   8511		rc = bnxt_hwrm_vnic_set_tpa(bp, i, tpa_flags);
   8512		if (rc) {
   8513			netdev_err(bp->dev, "hwrm vnic set tpa failure rc for vnic %d: %x\n",
   8514				   i, rc);
   8515			return rc;
   8516		}
   8517	}
   8518	return 0;
   8519}
   8520
   8521static void bnxt_hwrm_clear_vnic_rss(struct bnxt *bp)
   8522{
   8523	int i;
   8524
   8525	for (i = 0; i < bp->nr_vnics; i++)
   8526		bnxt_hwrm_vnic_set_rss(bp, i, false);
   8527}
   8528
   8529static void bnxt_clear_vnic(struct bnxt *bp)
   8530{
   8531	if (!bp->vnic_info)
   8532		return;
   8533
   8534	bnxt_hwrm_clear_vnic_filter(bp);
   8535	if (!(bp->flags & BNXT_FLAG_CHIP_P5)) {
   8536		/* clear all RSS setting before free vnic ctx */
   8537		bnxt_hwrm_clear_vnic_rss(bp);
   8538		bnxt_hwrm_vnic_ctx_free(bp);
   8539	}
   8540	/* before free the vnic, undo the vnic tpa settings */
   8541	if (bp->flags & BNXT_FLAG_TPA)
   8542		bnxt_set_tpa(bp, false);
   8543	bnxt_hwrm_vnic_free(bp);
   8544	if (bp->flags & BNXT_FLAG_CHIP_P5)
   8545		bnxt_hwrm_vnic_ctx_free(bp);
   8546}
   8547
   8548static void bnxt_hwrm_resource_free(struct bnxt *bp, bool close_path,
   8549				    bool irq_re_init)
   8550{
   8551	bnxt_clear_vnic(bp);
   8552	bnxt_hwrm_ring_free(bp, close_path);
   8553	bnxt_hwrm_ring_grp_free(bp);
   8554	if (irq_re_init) {
   8555		bnxt_hwrm_stat_ctx_free(bp);
   8556		bnxt_hwrm_free_tunnel_ports(bp);
   8557	}
   8558}
   8559
   8560static int bnxt_hwrm_set_br_mode(struct bnxt *bp, u16 br_mode)
   8561{
   8562	struct hwrm_func_cfg_input *req;
   8563	u8 evb_mode;
   8564	int rc;
   8565
   8566	if (br_mode == BRIDGE_MODE_VEB)
   8567		evb_mode = FUNC_CFG_REQ_EVB_MODE_VEB;
   8568	else if (br_mode == BRIDGE_MODE_VEPA)
   8569		evb_mode = FUNC_CFG_REQ_EVB_MODE_VEPA;
   8570	else
   8571		return -EINVAL;
   8572
   8573	rc = hwrm_req_init(bp, req, HWRM_FUNC_CFG);
   8574	if (rc)
   8575		return rc;
   8576
   8577	req->fid = cpu_to_le16(0xffff);
   8578	req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_EVB_MODE);
   8579	req->evb_mode = evb_mode;
   8580	return hwrm_req_send(bp, req);
   8581}
   8582
   8583static int bnxt_hwrm_set_cache_line_size(struct bnxt *bp, int size)
   8584{
   8585	struct hwrm_func_cfg_input *req;
   8586	int rc;
   8587
   8588	if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10803)
   8589		return 0;
   8590
   8591	rc = hwrm_req_init(bp, req, HWRM_FUNC_CFG);
   8592	if (rc)
   8593		return rc;
   8594
   8595	req->fid = cpu_to_le16(0xffff);
   8596	req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_CACHE_LINESIZE);
   8597	req->options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_64;
   8598	if (size == 128)
   8599		req->options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_128;
   8600
   8601	return hwrm_req_send(bp, req);
   8602}
   8603
   8604static int __bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id)
   8605{
   8606	struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
   8607	int rc;
   8608
   8609	if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG)
   8610		goto skip_rss_ctx;
   8611
   8612	/* allocate context for vnic */
   8613	rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, 0);
   8614	if (rc) {
   8615		netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
   8616			   vnic_id, rc);
   8617		goto vnic_setup_err;
   8618	}
   8619	bp->rsscos_nr_ctxs++;
   8620
   8621	if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
   8622		rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, 1);
   8623		if (rc) {
   8624			netdev_err(bp->dev, "hwrm vnic %d cos ctx alloc failure rc: %x\n",
   8625				   vnic_id, rc);
   8626			goto vnic_setup_err;
   8627		}
   8628		bp->rsscos_nr_ctxs++;
   8629	}
   8630
   8631skip_rss_ctx:
   8632	/* configure default vnic, ring grp */
   8633	rc = bnxt_hwrm_vnic_cfg(bp, vnic_id);
   8634	if (rc) {
   8635		netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n",
   8636			   vnic_id, rc);
   8637		goto vnic_setup_err;
   8638	}
   8639
   8640	/* Enable RSS hashing on vnic */
   8641	rc = bnxt_hwrm_vnic_set_rss(bp, vnic_id, true);
   8642	if (rc) {
   8643		netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %x\n",
   8644			   vnic_id, rc);
   8645		goto vnic_setup_err;
   8646	}
   8647
   8648	if (bp->flags & BNXT_FLAG_AGG_RINGS) {
   8649		rc = bnxt_hwrm_vnic_set_hds(bp, vnic_id);
   8650		if (rc) {
   8651			netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n",
   8652				   vnic_id, rc);
   8653		}
   8654	}
   8655
   8656vnic_setup_err:
   8657	return rc;
   8658}
   8659
   8660static int __bnxt_setup_vnic_p5(struct bnxt *bp, u16 vnic_id)
   8661{
   8662	int rc, i, nr_ctxs;
   8663
   8664	nr_ctxs = bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings);
   8665	for (i = 0; i < nr_ctxs; i++) {
   8666		rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, i);
   8667		if (rc) {
   8668			netdev_err(bp->dev, "hwrm vnic %d ctx %d alloc failure rc: %x\n",
   8669				   vnic_id, i, rc);
   8670			break;
   8671		}
   8672		bp->rsscos_nr_ctxs++;
   8673	}
   8674	if (i < nr_ctxs)
   8675		return -ENOMEM;
   8676
   8677	rc = bnxt_hwrm_vnic_set_rss_p5(bp, vnic_id, true);
   8678	if (rc) {
   8679		netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %d\n",
   8680			   vnic_id, rc);
   8681		return rc;
   8682	}
   8683	rc = bnxt_hwrm_vnic_cfg(bp, vnic_id);
   8684	if (rc) {
   8685		netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n",
   8686			   vnic_id, rc);
   8687		return rc;
   8688	}
   8689	if (bp->flags & BNXT_FLAG_AGG_RINGS) {
   8690		rc = bnxt_hwrm_vnic_set_hds(bp, vnic_id);
   8691		if (rc) {
   8692			netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n",
   8693				   vnic_id, rc);
   8694		}
   8695	}
   8696	return rc;
   8697}
   8698
   8699static int bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id)
   8700{
   8701	if (bp->flags & BNXT_FLAG_CHIP_P5)
   8702		return __bnxt_setup_vnic_p5(bp, vnic_id);
   8703	else
   8704		return __bnxt_setup_vnic(bp, vnic_id);
   8705}
   8706
   8707static int bnxt_alloc_rfs_vnics(struct bnxt *bp)
   8708{
   8709#ifdef CONFIG_RFS_ACCEL
   8710	int i, rc = 0;
   8711
   8712	if (bp->flags & BNXT_FLAG_CHIP_P5)
   8713		return 0;
   8714
   8715	for (i = 0; i < bp->rx_nr_rings; i++) {
   8716		struct bnxt_vnic_info *vnic;
   8717		u16 vnic_id = i + 1;
   8718		u16 ring_id = i;
   8719
   8720		if (vnic_id >= bp->nr_vnics)
   8721			break;
   8722
   8723		vnic = &bp->vnic_info[vnic_id];
   8724		vnic->flags |= BNXT_VNIC_RFS_FLAG;
   8725		if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
   8726			vnic->flags |= BNXT_VNIC_RFS_NEW_RSS_FLAG;
   8727		rc = bnxt_hwrm_vnic_alloc(bp, vnic_id, ring_id, 1);
   8728		if (rc) {
   8729			netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
   8730				   vnic_id, rc);
   8731			break;
   8732		}
   8733		rc = bnxt_setup_vnic(bp, vnic_id);
   8734		if (rc)
   8735			break;
   8736	}
   8737	return rc;
   8738#else
   8739	return 0;
   8740#endif
   8741}
   8742
   8743/* Allow PF, trusted VFs and VFs with default VLAN to be in promiscuous mode */
   8744static bool bnxt_promisc_ok(struct bnxt *bp)
   8745{
   8746#ifdef CONFIG_BNXT_SRIOV
   8747	if (BNXT_VF(bp) && !bp->vf.vlan && !bnxt_is_trusted_vf(bp, &bp->vf))
   8748		return false;
   8749#endif
   8750	return true;
   8751}
   8752
   8753static int bnxt_setup_nitroa0_vnic(struct bnxt *bp)
   8754{
   8755	unsigned int rc = 0;
   8756
   8757	rc = bnxt_hwrm_vnic_alloc(bp, 1, bp->rx_nr_rings - 1, 1);
   8758	if (rc) {
   8759		netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n",
   8760			   rc);
   8761		return rc;
   8762	}
   8763
   8764	rc = bnxt_hwrm_vnic_cfg(bp, 1);
   8765	if (rc) {
   8766		netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n",
   8767			   rc);
   8768		return rc;
   8769	}
   8770	return rc;
   8771}
   8772
   8773static int bnxt_cfg_rx_mode(struct bnxt *);
   8774static bool bnxt_mc_list_updated(struct bnxt *, u32 *);
   8775
   8776static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
   8777{
   8778	struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
   8779	int rc = 0;
   8780	unsigned int rx_nr_rings = bp->rx_nr_rings;
   8781
   8782	if (irq_re_init) {
   8783		rc = bnxt_hwrm_stat_ctx_alloc(bp);
   8784		if (rc) {
   8785			netdev_err(bp->dev, "hwrm stat ctx alloc failure rc: %x\n",
   8786				   rc);
   8787			goto err_out;
   8788		}
   8789	}
   8790
   8791	rc = bnxt_hwrm_ring_alloc(bp);
   8792	if (rc) {
   8793		netdev_err(bp->dev, "hwrm ring alloc failure rc: %x\n", rc);
   8794		goto err_out;
   8795	}
   8796
   8797	rc = bnxt_hwrm_ring_grp_alloc(bp);
   8798	if (rc) {
   8799		netdev_err(bp->dev, "hwrm_ring_grp alloc failure: %x\n", rc);
   8800		goto err_out;
   8801	}
   8802
   8803	if (BNXT_CHIP_TYPE_NITRO_A0(bp))
   8804		rx_nr_rings--;
   8805
   8806	/* default vnic 0 */
   8807	rc = bnxt_hwrm_vnic_alloc(bp, 0, 0, rx_nr_rings);
   8808	if (rc) {
   8809		netdev_err(bp->dev, "hwrm vnic alloc failure rc: %x\n", rc);
   8810		goto err_out;
   8811	}
   8812
   8813	rc = bnxt_setup_vnic(bp, 0);
   8814	if (rc)
   8815		goto err_out;
   8816
   8817	if (bp->flags & BNXT_FLAG_RFS) {
   8818		rc = bnxt_alloc_rfs_vnics(bp);
   8819		if (rc)
   8820			goto err_out;
   8821	}
   8822
   8823	if (bp->flags & BNXT_FLAG_TPA) {
   8824		rc = bnxt_set_tpa(bp, true);
   8825		if (rc)
   8826			goto err_out;
   8827	}
   8828
   8829	if (BNXT_VF(bp))
   8830		bnxt_update_vf_mac(bp);
   8831
   8832	/* Filter for default vnic 0 */
   8833	rc = bnxt_hwrm_set_vnic_filter(bp, 0, 0, bp->dev->dev_addr);
   8834	if (rc) {
   8835		if (BNXT_VF(bp) && rc == -ENODEV)
   8836			netdev_err(bp->dev, "Cannot configure L2 filter while PF is unavailable\n");
   8837		else
   8838			netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", rc);
   8839		goto err_out;
   8840	}
   8841	vnic->uc_filter_count = 1;
   8842
   8843	vnic->rx_mask = 0;
   8844	if (test_bit(BNXT_STATE_HALF_OPEN, &bp->state))
   8845		goto skip_rx_mask;
   8846
   8847	if (bp->dev->flags & IFF_BROADCAST)
   8848		vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
   8849
   8850	if (bp->dev->flags & IFF_PROMISC)
   8851		vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
   8852
   8853	if (bp->dev->flags & IFF_ALLMULTI) {
   8854		vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
   8855		vnic->mc_list_count = 0;
   8856	} else if (bp->dev->flags & IFF_MULTICAST) {
   8857		u32 mask = 0;
   8858
   8859		bnxt_mc_list_updated(bp, &mask);
   8860		vnic->rx_mask |= mask;
   8861	}
   8862
   8863	rc = bnxt_cfg_rx_mode(bp);
   8864	if (rc)
   8865		goto err_out;
   8866
   8867skip_rx_mask:
   8868	rc = bnxt_hwrm_set_coal(bp);
   8869	if (rc)
   8870		netdev_warn(bp->dev, "HWRM set coalescing failure rc: %x\n",
   8871				rc);
   8872
   8873	if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
   8874		rc = bnxt_setup_nitroa0_vnic(bp);
   8875		if (rc)
   8876			netdev_err(bp->dev, "Special vnic setup failure for NS2 A0 rc: %x\n",
   8877				   rc);
   8878	}
   8879
   8880	if (BNXT_VF(bp)) {
   8881		bnxt_hwrm_func_qcfg(bp);
   8882		netdev_update_features(bp->dev);
   8883	}
   8884
   8885	return 0;
   8886
   8887err_out:
   8888	bnxt_hwrm_resource_free(bp, 0, true);
   8889
   8890	return rc;
   8891}
   8892
   8893static int bnxt_shutdown_nic(struct bnxt *bp, bool irq_re_init)
   8894{
   8895	bnxt_hwrm_resource_free(bp, 1, irq_re_init);
   8896	return 0;
   8897}
   8898
   8899static int bnxt_init_nic(struct bnxt *bp, bool irq_re_init)
   8900{
   8901	bnxt_init_cp_rings(bp);
   8902	bnxt_init_rx_rings(bp);
   8903	bnxt_init_tx_rings(bp);
   8904	bnxt_init_ring_grps(bp, irq_re_init);
   8905	bnxt_init_vnics(bp);
   8906
   8907	return bnxt_init_chip(bp, irq_re_init);
   8908}
   8909
   8910static int bnxt_set_real_num_queues(struct bnxt *bp)
   8911{
   8912	int rc;
   8913	struct net_device *dev = bp->dev;
   8914
   8915	rc = netif_set_real_num_tx_queues(dev, bp->tx_nr_rings -
   8916					  bp->tx_nr_rings_xdp);
   8917	if (rc)
   8918		return rc;
   8919
   8920	rc = netif_set_real_num_rx_queues(dev, bp->rx_nr_rings);
   8921	if (rc)
   8922		return rc;
   8923
   8924#ifdef CONFIG_RFS_ACCEL
   8925	if (bp->flags & BNXT_FLAG_RFS)
   8926		dev->rx_cpu_rmap = alloc_irq_cpu_rmap(bp->rx_nr_rings);
   8927#endif
   8928
   8929	return rc;
   8930}
   8931
   8932static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
   8933			   bool shared)
   8934{
   8935	int _rx = *rx, _tx = *tx;
   8936
   8937	if (shared) {
   8938		*rx = min_t(int, _rx, max);
   8939		*tx = min_t(int, _tx, max);
   8940	} else {
   8941		if (max < 2)
   8942			return -ENOMEM;
   8943
   8944		while (_rx + _tx > max) {
   8945			if (_rx > _tx && _rx > 1)
   8946				_rx--;
   8947			else if (_tx > 1)
   8948				_tx--;
   8949		}
   8950		*rx = _rx;
   8951		*tx = _tx;
   8952	}
   8953	return 0;
   8954}
   8955
   8956static void bnxt_setup_msix(struct bnxt *bp)
   8957{
   8958	const int len = sizeof(bp->irq_tbl[0].name);
   8959	struct net_device *dev = bp->dev;
   8960	int tcs, i;
   8961
   8962	tcs = netdev_get_num_tc(dev);
   8963	if (tcs) {
   8964		int i, off, count;
   8965
   8966		for (i = 0; i < tcs; i++) {
   8967			count = bp->tx_nr_rings_per_tc;
   8968			off = i * count;
   8969			netdev_set_tc_queue(dev, i, count, off);
   8970		}
   8971	}
   8972
   8973	for (i = 0; i < bp->cp_nr_rings; i++) {
   8974		int map_idx = bnxt_cp_num_to_irq_num(bp, i);
   8975		char *attr;
   8976
   8977		if (bp->flags & BNXT_FLAG_SHARED_RINGS)
   8978			attr = "TxRx";
   8979		else if (i < bp->rx_nr_rings)
   8980			attr = "rx";
   8981		else
   8982			attr = "tx";
   8983
   8984		snprintf(bp->irq_tbl[map_idx].name, len, "%s-%s-%d", dev->name,
   8985			 attr, i);
   8986		bp->irq_tbl[map_idx].handler = bnxt_msix;
   8987	}
   8988}
   8989
   8990static void bnxt_setup_inta(struct bnxt *bp)
   8991{
   8992	const int len = sizeof(bp->irq_tbl[0].name);
   8993
   8994	if (netdev_get_num_tc(bp->dev))
   8995		netdev_reset_tc(bp->dev);
   8996
   8997	snprintf(bp->irq_tbl[0].name, len, "%s-%s-%d", bp->dev->name, "TxRx",
   8998		 0);
   8999	bp->irq_tbl[0].handler = bnxt_inta;
   9000}
   9001
   9002static int bnxt_init_int_mode(struct bnxt *bp);
   9003
   9004static int bnxt_setup_int_mode(struct bnxt *bp)
   9005{
   9006	int rc;
   9007
   9008	if (!bp->irq_tbl) {
   9009		rc = bnxt_init_int_mode(bp);
   9010		if (rc || !bp->irq_tbl)
   9011			return rc ?: -ENODEV;
   9012	}
   9013
   9014	if (bp->flags & BNXT_FLAG_USING_MSIX)
   9015		bnxt_setup_msix(bp);
   9016	else
   9017		bnxt_setup_inta(bp);
   9018
   9019	rc = bnxt_set_real_num_queues(bp);
   9020	return rc;
   9021}
   9022
   9023#ifdef CONFIG_RFS_ACCEL
   9024static unsigned int bnxt_get_max_func_rss_ctxs(struct bnxt *bp)
   9025{
   9026	return bp->hw_resc.max_rsscos_ctxs;
   9027}
   9028
   9029static unsigned int bnxt_get_max_func_vnics(struct bnxt *bp)
   9030{
   9031	return bp->hw_resc.max_vnics;
   9032}
   9033#endif
   9034
   9035unsigned int bnxt_get_max_func_stat_ctxs(struct bnxt *bp)
   9036{
   9037	return bp->hw_resc.max_stat_ctxs;
   9038}
   9039
   9040unsigned int bnxt_get_max_func_cp_rings(struct bnxt *bp)
   9041{
   9042	return bp->hw_resc.max_cp_rings;
   9043}
   9044
   9045static unsigned int bnxt_get_max_func_cp_rings_for_en(struct bnxt *bp)
   9046{
   9047	unsigned int cp = bp->hw_resc.max_cp_rings;
   9048
   9049	if (!(bp->flags & BNXT_FLAG_CHIP_P5))
   9050		cp -= bnxt_get_ulp_msix_num(bp);
   9051
   9052	return cp;
   9053}
   9054
   9055static unsigned int bnxt_get_max_func_irqs(struct bnxt *bp)
   9056{
   9057	struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
   9058
   9059	if (bp->flags & BNXT_FLAG_CHIP_P5)
   9060		return min_t(unsigned int, hw_resc->max_irqs, hw_resc->max_nqs);
   9061
   9062	return min_t(unsigned int, hw_resc->max_irqs, hw_resc->max_cp_rings);
   9063}
   9064
   9065static void bnxt_set_max_func_irqs(struct bnxt *bp, unsigned int max_irqs)
   9066{
   9067	bp->hw_resc.max_irqs = max_irqs;
   9068}
   9069
   9070unsigned int bnxt_get_avail_cp_rings_for_en(struct bnxt *bp)
   9071{
   9072	unsigned int cp;
   9073
   9074	cp = bnxt_get_max_func_cp_rings_for_en(bp);
   9075	if (bp->flags & BNXT_FLAG_CHIP_P5)
   9076		return cp - bp->rx_nr_rings - bp->tx_nr_rings;
   9077	else
   9078		return cp - bp->cp_nr_rings;
   9079}
   9080
   9081unsigned int bnxt_get_avail_stat_ctxs_for_en(struct bnxt *bp)
   9082{
   9083	return bnxt_get_max_func_stat_ctxs(bp) - bnxt_get_func_stat_ctxs(bp);
   9084}
   9085
   9086int bnxt_get_avail_msix(struct bnxt *bp, int num)
   9087{
   9088	int max_cp = bnxt_get_max_func_cp_rings(bp);
   9089	int max_irq = bnxt_get_max_func_irqs(bp);
   9090	int total_req = bp->cp_nr_rings + num;
   9091	int max_idx, avail_msix;
   9092
   9093	max_idx = bp->total_irqs;
   9094	if (!(bp->flags & BNXT_FLAG_CHIP_P5))
   9095		max_idx = min_t(int, bp->total_irqs, max_cp);
   9096	avail_msix = max_idx - bp->cp_nr_rings;
   9097	if (!BNXT_NEW_RM(bp) || avail_msix >= num)
   9098		return avail_msix;
   9099
   9100	if (max_irq < total_req) {
   9101		num = max_irq - bp->cp_nr_rings;
   9102		if (num <= 0)
   9103			return 0;
   9104	}
   9105	return num;
   9106}
   9107
   9108static int bnxt_get_num_msix(struct bnxt *bp)
   9109{
   9110	if (!BNXT_NEW_RM(bp))
   9111		return bnxt_get_max_func_irqs(bp);
   9112
   9113	return bnxt_nq_rings_in_use(bp);
   9114}
   9115
   9116static int bnxt_init_msix(struct bnxt *bp)
   9117{
   9118	int i, total_vecs, max, rc = 0, min = 1, ulp_msix;
   9119	struct msix_entry *msix_ent;
   9120
   9121	total_vecs = bnxt_get_num_msix(bp);
   9122	max = bnxt_get_max_func_irqs(bp);
   9123	if (total_vecs > max)
   9124		total_vecs = max;
   9125
   9126	if (!total_vecs)
   9127		return 0;
   9128
   9129	msix_ent = kcalloc(total_vecs, sizeof(struct msix_entry), GFP_KERNEL);
   9130	if (!msix_ent)
   9131		return -ENOMEM;
   9132
   9133	for (i = 0; i < total_vecs; i++) {
   9134		msix_ent[i].entry = i;
   9135		msix_ent[i].vector = 0;
   9136	}
   9137
   9138	if (!(bp->flags & BNXT_FLAG_SHARED_RINGS))
   9139		min = 2;
   9140
   9141	total_vecs = pci_enable_msix_range(bp->pdev, msix_ent, min, total_vecs);
   9142	ulp_msix = bnxt_get_ulp_msix_num(bp);
   9143	if (total_vecs < 0 || total_vecs < ulp_msix) {
   9144		rc = -ENODEV;
   9145		goto msix_setup_exit;
   9146	}
   9147
   9148	bp->irq_tbl = kcalloc(total_vecs, sizeof(struct bnxt_irq), GFP_KERNEL);
   9149	if (bp->irq_tbl) {
   9150		for (i = 0; i < total_vecs; i++)
   9151			bp->irq_tbl[i].vector = msix_ent[i].vector;
   9152
   9153		bp->total_irqs = total_vecs;
   9154		/* Trim rings based upon num of vectors allocated */
   9155		rc = bnxt_trim_rings(bp, &bp->rx_nr_rings, &bp->tx_nr_rings,
   9156				     total_vecs - ulp_msix, min == 1);
   9157		if (rc)
   9158			goto msix_setup_exit;
   9159
   9160		bp->cp_nr_rings = (min == 1) ?
   9161				  max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
   9162				  bp->tx_nr_rings + bp->rx_nr_rings;
   9163
   9164	} else {
   9165		rc = -ENOMEM;
   9166		goto msix_setup_exit;
   9167	}
   9168	bp->flags |= BNXT_FLAG_USING_MSIX;
   9169	kfree(msix_ent);
   9170	return 0;
   9171
   9172msix_setup_exit:
   9173	netdev_err(bp->dev, "bnxt_init_msix err: %x\n", rc);
   9174	kfree(bp->irq_tbl);
   9175	bp->irq_tbl = NULL;
   9176	pci_disable_msix(bp->pdev);
   9177	kfree(msix_ent);
   9178	return rc;
   9179}
   9180
   9181static int bnxt_init_inta(struct bnxt *bp)
   9182{
   9183	bp->irq_tbl = kzalloc(sizeof(struct bnxt_irq), GFP_KERNEL);
   9184	if (!bp->irq_tbl)
   9185		return -ENOMEM;
   9186
   9187	bp->total_irqs = 1;
   9188	bp->rx_nr_rings = 1;
   9189	bp->tx_nr_rings = 1;
   9190	bp->cp_nr_rings = 1;
   9191	bp->flags |= BNXT_FLAG_SHARED_RINGS;
   9192	bp->irq_tbl[0].vector = bp->pdev->irq;
   9193	return 0;
   9194}
   9195
   9196static int bnxt_init_int_mode(struct bnxt *bp)
   9197{
   9198	int rc = -ENODEV;
   9199
   9200	if (bp->flags & BNXT_FLAG_MSIX_CAP)
   9201		rc = bnxt_init_msix(bp);
   9202
   9203	if (!(bp->flags & BNXT_FLAG_USING_MSIX) && BNXT_PF(bp)) {
   9204		/* fallback to INTA */
   9205		rc = bnxt_init_inta(bp);
   9206	}
   9207	return rc;
   9208}
   9209
   9210static void bnxt_clear_int_mode(struct bnxt *bp)
   9211{
   9212	if (bp->flags & BNXT_FLAG_USING_MSIX)
   9213		pci_disable_msix(bp->pdev);
   9214
   9215	kfree(bp->irq_tbl);
   9216	bp->irq_tbl = NULL;
   9217	bp->flags &= ~BNXT_FLAG_USING_MSIX;
   9218}
   9219
   9220int bnxt_reserve_rings(struct bnxt *bp, bool irq_re_init)
   9221{
   9222	int tcs = netdev_get_num_tc(bp->dev);
   9223	bool irq_cleared = false;
   9224	int rc;
   9225
   9226	if (!bnxt_need_reserve_rings(bp))
   9227		return 0;
   9228
   9229	if (irq_re_init && BNXT_NEW_RM(bp) &&
   9230	    bnxt_get_num_msix(bp) != bp->total_irqs) {
   9231		bnxt_ulp_irq_stop(bp);
   9232		bnxt_clear_int_mode(bp);
   9233		irq_cleared = true;
   9234	}
   9235	rc = __bnxt_reserve_rings(bp);
   9236	if (irq_cleared) {
   9237		if (!rc)
   9238			rc = bnxt_init_int_mode(bp);
   9239		bnxt_ulp_irq_restart(bp, rc);
   9240	}
   9241	if (rc) {
   9242		netdev_err(bp->dev, "ring reservation/IRQ init failure rc: %d\n", rc);
   9243		return rc;
   9244	}
   9245	if (tcs && (bp->tx_nr_rings_per_tc * tcs != bp->tx_nr_rings)) {
   9246		netdev_err(bp->dev, "tx ring reservation failure\n");
   9247		netdev_reset_tc(bp->dev);
   9248		bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
   9249		return -ENOMEM;
   9250	}
   9251	return 0;
   9252}
   9253
   9254static void bnxt_free_irq(struct bnxt *bp)
   9255{
   9256	struct bnxt_irq *irq;
   9257	int i;
   9258
   9259#ifdef CONFIG_RFS_ACCEL
   9260	free_irq_cpu_rmap(bp->dev->rx_cpu_rmap);
   9261	bp->dev->rx_cpu_rmap = NULL;
   9262#endif
   9263	if (!bp->irq_tbl || !bp->bnapi)
   9264		return;
   9265
   9266	for (i = 0; i < bp->cp_nr_rings; i++) {
   9267		int map_idx = bnxt_cp_num_to_irq_num(bp, i);
   9268
   9269		irq = &bp->irq_tbl[map_idx];
   9270		if (irq->requested) {
   9271			if (irq->have_cpumask) {
   9272				irq_set_affinity_hint(irq->vector, NULL);
   9273				free_cpumask_var(irq->cpu_mask);
   9274				irq->have_cpumask = 0;
   9275			}
   9276			free_irq(irq->vector, bp->bnapi[i]);
   9277		}
   9278
   9279		irq->requested = 0;
   9280	}
   9281}
   9282
   9283static int bnxt_request_irq(struct bnxt *bp)
   9284{
   9285	int i, j, rc = 0;
   9286	unsigned long flags = 0;
   9287#ifdef CONFIG_RFS_ACCEL
   9288	struct cpu_rmap *rmap;
   9289#endif
   9290
   9291	rc = bnxt_setup_int_mode(bp);
   9292	if (rc) {
   9293		netdev_err(bp->dev, "bnxt_setup_int_mode err: %x\n",
   9294			   rc);
   9295		return rc;
   9296	}
   9297#ifdef CONFIG_RFS_ACCEL
   9298	rmap = bp->dev->rx_cpu_rmap;
   9299#endif
   9300	if (!(bp->flags & BNXT_FLAG_USING_MSIX))
   9301		flags = IRQF_SHARED;
   9302
   9303	for (i = 0, j = 0; i < bp->cp_nr_rings; i++) {
   9304		int map_idx = bnxt_cp_num_to_irq_num(bp, i);
   9305		struct bnxt_irq *irq = &bp->irq_tbl[map_idx];
   9306
   9307#ifdef CONFIG_RFS_ACCEL
   9308		if (rmap && bp->bnapi[i]->rx_ring) {
   9309			rc = irq_cpu_rmap_add(rmap, irq->vector);
   9310			if (rc)
   9311				netdev_warn(bp->dev, "failed adding irq rmap for ring %d\n",
   9312					    j);
   9313			j++;
   9314		}
   9315#endif
   9316		rc = request_irq(irq->vector, irq->handler, flags, irq->name,
   9317				 bp->bnapi[i]);
   9318		if (rc)
   9319			break;
   9320
   9321		irq->requested = 1;
   9322
   9323		if (zalloc_cpumask_var(&irq->cpu_mask, GFP_KERNEL)) {
   9324			int numa_node = dev_to_node(&bp->pdev->dev);
   9325
   9326			irq->have_cpumask = 1;
   9327			cpumask_set_cpu(cpumask_local_spread(i, numa_node),
   9328					irq->cpu_mask);
   9329			rc = irq_set_affinity_hint(irq->vector, irq->cpu_mask);
   9330			if (rc) {
   9331				netdev_warn(bp->dev,
   9332					    "Set affinity failed, IRQ = %d\n",
   9333					    irq->vector);
   9334				break;
   9335			}
   9336		}
   9337	}
   9338	return rc;
   9339}
   9340
   9341static void bnxt_del_napi(struct bnxt *bp)
   9342{
   9343	int i;
   9344
   9345	if (!bp->bnapi)
   9346		return;
   9347
   9348	for (i = 0; i < bp->cp_nr_rings; i++) {
   9349		struct bnxt_napi *bnapi = bp->bnapi[i];
   9350
   9351		__netif_napi_del(&bnapi->napi);
   9352	}
   9353	/* We called __netif_napi_del(), we need
   9354	 * to respect an RCU grace period before freeing napi structures.
   9355	 */
   9356	synchronize_net();
   9357}
   9358
   9359static void bnxt_init_napi(struct bnxt *bp)
   9360{
   9361	int i;
   9362	unsigned int cp_nr_rings = bp->cp_nr_rings;
   9363	struct bnxt_napi *bnapi;
   9364
   9365	if (bp->flags & BNXT_FLAG_USING_MSIX) {
   9366		int (*poll_fn)(struct napi_struct *, int) = bnxt_poll;
   9367
   9368		if (bp->flags & BNXT_FLAG_CHIP_P5)
   9369			poll_fn = bnxt_poll_p5;
   9370		else if (BNXT_CHIP_TYPE_NITRO_A0(bp))
   9371			cp_nr_rings--;
   9372		for (i = 0; i < cp_nr_rings; i++) {
   9373			bnapi = bp->bnapi[i];
   9374			netif_napi_add(bp->dev, &bnapi->napi, poll_fn, 64);
   9375		}
   9376		if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
   9377			bnapi = bp->bnapi[cp_nr_rings];
   9378			netif_napi_add(bp->dev, &bnapi->napi,
   9379				       bnxt_poll_nitroa0, 64);
   9380		}
   9381	} else {
   9382		bnapi = bp->bnapi[0];
   9383		netif_napi_add(bp->dev, &bnapi->napi, bnxt_poll, 64);
   9384	}
   9385}
   9386
   9387static void bnxt_disable_napi(struct bnxt *bp)
   9388{
   9389	int i;
   9390
   9391	if (!bp->bnapi ||
   9392	    test_and_set_bit(BNXT_STATE_NAPI_DISABLED, &bp->state))
   9393		return;
   9394
   9395	for (i = 0; i < bp->cp_nr_rings; i++) {
   9396		struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring;
   9397
   9398		napi_disable(&bp->bnapi[i]->napi);
   9399		if (bp->bnapi[i]->rx_ring)
   9400			cancel_work_sync(&cpr->dim.work);
   9401	}
   9402}
   9403
   9404static void bnxt_enable_napi(struct bnxt *bp)
   9405{
   9406	int i;
   9407
   9408	clear_bit(BNXT_STATE_NAPI_DISABLED, &bp->state);
   9409	for (i = 0; i < bp->cp_nr_rings; i++) {
   9410		struct bnxt_napi *bnapi = bp->bnapi[i];
   9411		struct bnxt_cp_ring_info *cpr;
   9412
   9413		cpr = &bnapi->cp_ring;
   9414		if (bnapi->in_reset)
   9415			cpr->sw_stats.rx.rx_resets++;
   9416		bnapi->in_reset = false;
   9417
   9418		if (bnapi->rx_ring) {
   9419			INIT_WORK(&cpr->dim.work, bnxt_dim_work);
   9420			cpr->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
   9421		}
   9422		napi_enable(&bnapi->napi);
   9423	}
   9424}
   9425
   9426void bnxt_tx_disable(struct bnxt *bp)
   9427{
   9428	int i;
   9429	struct bnxt_tx_ring_info *txr;
   9430
   9431	if (bp->tx_ring) {
   9432		for (i = 0; i < bp->tx_nr_rings; i++) {
   9433			txr = &bp->tx_ring[i];
   9434			WRITE_ONCE(txr->dev_state, BNXT_DEV_STATE_CLOSING);
   9435		}
   9436	}
   9437	/* Make sure napi polls see @dev_state change */
   9438	synchronize_net();
   9439	/* Drop carrier first to prevent TX timeout */
   9440	netif_carrier_off(bp->dev);
   9441	/* Stop all TX queues */
   9442	netif_tx_disable(bp->dev);
   9443}
   9444
   9445void bnxt_tx_enable(struct bnxt *bp)
   9446{
   9447	int i;
   9448	struct bnxt_tx_ring_info *txr;
   9449
   9450	for (i = 0; i < bp->tx_nr_rings; i++) {
   9451		txr = &bp->tx_ring[i];
   9452		WRITE_ONCE(txr->dev_state, 0);
   9453	}
   9454	/* Make sure napi polls see @dev_state change */
   9455	synchronize_net();
   9456	netif_tx_wake_all_queues(bp->dev);
   9457	if (BNXT_LINK_IS_UP(bp))
   9458		netif_carrier_on(bp->dev);
   9459}
   9460
   9461static char *bnxt_report_fec(struct bnxt_link_info *link_info)
   9462{
   9463	u8 active_fec = link_info->active_fec_sig_mode &
   9464			PORT_PHY_QCFG_RESP_ACTIVE_FEC_MASK;
   9465
   9466	switch (active_fec) {
   9467	default:
   9468	case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_NONE_ACTIVE:
   9469		return "None";
   9470	case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_CLAUSE74_ACTIVE:
   9471		return "Clause 74 BaseR";
   9472	case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_CLAUSE91_ACTIVE:
   9473		return "Clause 91 RS(528,514)";
   9474	case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS544_1XN_ACTIVE:
   9475		return "Clause 91 RS544_1XN";
   9476	case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS544_IEEE_ACTIVE:
   9477		return "Clause 91 RS(544,514)";
   9478	case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_1XN_ACTIVE:
   9479		return "Clause 91 RS272_1XN";
   9480	case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_IEEE_ACTIVE:
   9481		return "Clause 91 RS(272,257)";
   9482	}
   9483}
   9484
   9485void bnxt_report_link(struct bnxt *bp)
   9486{
   9487	if (BNXT_LINK_IS_UP(bp)) {
   9488		const char *signal = "";
   9489		const char *flow_ctrl;
   9490		const char *duplex;
   9491		u32 speed;
   9492		u16 fec;
   9493
   9494		netif_carrier_on(bp->dev);
   9495		speed = bnxt_fw_to_ethtool_speed(bp->link_info.link_speed);
   9496		if (speed == SPEED_UNKNOWN) {
   9497			netdev_info(bp->dev, "NIC Link is Up, speed unknown\n");
   9498			return;
   9499		}
   9500		if (bp->link_info.duplex == BNXT_LINK_DUPLEX_FULL)
   9501			duplex = "full";
   9502		else
   9503			duplex = "half";
   9504		if (bp->link_info.pause == BNXT_LINK_PAUSE_BOTH)
   9505			flow_ctrl = "ON - receive & transmit";
   9506		else if (bp->link_info.pause == BNXT_LINK_PAUSE_TX)
   9507			flow_ctrl = "ON - transmit";
   9508		else if (bp->link_info.pause == BNXT_LINK_PAUSE_RX)
   9509			flow_ctrl = "ON - receive";
   9510		else
   9511			flow_ctrl = "none";
   9512		if (bp->link_info.phy_qcfg_resp.option_flags &
   9513		    PORT_PHY_QCFG_RESP_OPTION_FLAGS_SIGNAL_MODE_KNOWN) {
   9514			u8 sig_mode = bp->link_info.active_fec_sig_mode &
   9515				      PORT_PHY_QCFG_RESP_SIGNAL_MODE_MASK;
   9516			switch (sig_mode) {
   9517			case PORT_PHY_QCFG_RESP_SIGNAL_MODE_NRZ:
   9518				signal = "(NRZ) ";
   9519				break;
   9520			case PORT_PHY_QCFG_RESP_SIGNAL_MODE_PAM4:
   9521				signal = "(PAM4) ";
   9522				break;
   9523			default:
   9524				break;
   9525			}
   9526		}
   9527		netdev_info(bp->dev, "NIC Link is Up, %u Mbps %s%s duplex, Flow control: %s\n",
   9528			    speed, signal, duplex, flow_ctrl);
   9529		if (bp->phy_flags & BNXT_PHY_FL_EEE_CAP)
   9530			netdev_info(bp->dev, "EEE is %s\n",
   9531				    bp->eee.eee_active ? "active" :
   9532							 "not active");
   9533		fec = bp->link_info.fec_cfg;
   9534		if (!(fec & PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED))
   9535			netdev_info(bp->dev, "FEC autoneg %s encoding: %s\n",
   9536				    (fec & BNXT_FEC_AUTONEG) ? "on" : "off",
   9537				    bnxt_report_fec(&bp->link_info));
   9538	} else {
   9539		netif_carrier_off(bp->dev);
   9540		netdev_err(bp->dev, "NIC Link is Down\n");
   9541	}
   9542}
   9543
   9544static bool bnxt_phy_qcaps_no_speed(struct hwrm_port_phy_qcaps_output *resp)
   9545{
   9546	if (!resp->supported_speeds_auto_mode &&
   9547	    !resp->supported_speeds_force_mode &&
   9548	    !resp->supported_pam4_speeds_auto_mode &&
   9549	    !resp->supported_pam4_speeds_force_mode)
   9550		return true;
   9551	return false;
   9552}
   9553
   9554static int bnxt_hwrm_phy_qcaps(struct bnxt *bp)
   9555{
   9556	struct bnxt_link_info *link_info = &bp->link_info;
   9557	struct hwrm_port_phy_qcaps_output *resp;
   9558	struct hwrm_port_phy_qcaps_input *req;
   9559	int rc = 0;
   9560
   9561	if (bp->hwrm_spec_code < 0x10201)
   9562		return 0;
   9563
   9564	rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_QCAPS);
   9565	if (rc)
   9566		return rc;
   9567
   9568	resp = hwrm_req_hold(bp, req);
   9569	rc = hwrm_req_send(bp, req);
   9570	if (rc)
   9571		goto hwrm_phy_qcaps_exit;
   9572
   9573	bp->phy_flags = resp->flags | (le16_to_cpu(resp->flags2) << 8);
   9574	if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_EEE_SUPPORTED) {
   9575		struct ethtool_eee *eee = &bp->eee;
   9576		u16 fw_speeds = le16_to_cpu(resp->supported_speeds_eee_mode);
   9577
   9578		eee->supported = _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
   9579		bp->lpi_tmr_lo = le32_to_cpu(resp->tx_lpi_timer_low) &
   9580				 PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_LOW_MASK;
   9581		bp->lpi_tmr_hi = le32_to_cpu(resp->valid_tx_lpi_timer_high) &
   9582				 PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_MASK;
   9583	}
   9584
   9585	if (bp->hwrm_spec_code >= 0x10a01) {
   9586		if (bnxt_phy_qcaps_no_speed(resp)) {
   9587			link_info->phy_state = BNXT_PHY_STATE_DISABLED;
   9588			netdev_warn(bp->dev, "Ethernet link disabled\n");
   9589		} else if (link_info->phy_state == BNXT_PHY_STATE_DISABLED) {
   9590			link_info->phy_state = BNXT_PHY_STATE_ENABLED;
   9591			netdev_info(bp->dev, "Ethernet link enabled\n");
   9592			/* Phy re-enabled, reprobe the speeds */
   9593			link_info->support_auto_speeds = 0;
   9594			link_info->support_pam4_auto_speeds = 0;
   9595		}
   9596	}
   9597	if (resp->supported_speeds_auto_mode)
   9598		link_info->support_auto_speeds =
   9599			le16_to_cpu(resp->supported_speeds_auto_mode);
   9600	if (resp->supported_pam4_speeds_auto_mode)
   9601		link_info->support_pam4_auto_speeds =
   9602			le16_to_cpu(resp->supported_pam4_speeds_auto_mode);
   9603
   9604	bp->port_count = resp->port_cnt;
   9605
   9606hwrm_phy_qcaps_exit:
   9607	hwrm_req_drop(bp, req);
   9608	return rc;
   9609}
   9610
   9611static bool bnxt_support_dropped(u16 advertising, u16 supported)
   9612{
   9613	u16 diff = advertising ^ supported;
   9614
   9615	return ((supported | diff) != supported);
   9616}
   9617
   9618int bnxt_update_link(struct bnxt *bp, bool chng_link_state)
   9619{
   9620	struct bnxt_link_info *link_info = &bp->link_info;
   9621	struct hwrm_port_phy_qcfg_output *resp;
   9622	struct hwrm_port_phy_qcfg_input *req;
   9623	u8 link_state = link_info->link_state;
   9624	bool support_changed = false;
   9625	int rc;
   9626
   9627	rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_QCFG);
   9628	if (rc)
   9629		return rc;
   9630
   9631	resp = hwrm_req_hold(bp, req);
   9632	rc = hwrm_req_send(bp, req);
   9633	if (rc) {
   9634		hwrm_req_drop(bp, req);
   9635		if (BNXT_VF(bp) && rc == -ENODEV) {
   9636			netdev_warn(bp->dev, "Cannot obtain link state while PF unavailable.\n");
   9637			rc = 0;
   9638		}
   9639		return rc;
   9640	}
   9641
   9642	memcpy(&link_info->phy_qcfg_resp, resp, sizeof(*resp));
   9643	link_info->phy_link_status = resp->link;
   9644	link_info->duplex = resp->duplex_cfg;
   9645	if (bp->hwrm_spec_code >= 0x10800)
   9646		link_info->duplex = resp->duplex_state;
   9647	link_info->pause = resp->pause;
   9648	link_info->auto_mode = resp->auto_mode;
   9649	link_info->auto_pause_setting = resp->auto_pause;
   9650	link_info->lp_pause = resp->link_partner_adv_pause;
   9651	link_info->force_pause_setting = resp->force_pause;
   9652	link_info->duplex_setting = resp->duplex_cfg;
   9653	if (link_info->phy_link_status == BNXT_LINK_LINK)
   9654		link_info->link_speed = le16_to_cpu(resp->link_speed);
   9655	else
   9656		link_info->link_speed = 0;
   9657	link_info->force_link_speed = le16_to_cpu(resp->force_link_speed);
   9658	link_info->force_pam4_link_speed =
   9659		le16_to_cpu(resp->force_pam4_link_speed);
   9660	link_info->support_speeds = le16_to_cpu(resp->support_speeds);
   9661	link_info->support_pam4_speeds = le16_to_cpu(resp->support_pam4_speeds);
   9662	link_info->auto_link_speeds = le16_to_cpu(resp->auto_link_speed_mask);
   9663	link_info->auto_pam4_link_speeds =
   9664		le16_to_cpu(resp->auto_pam4_link_speed_mask);
   9665	link_info->lp_auto_link_speeds =
   9666		le16_to_cpu(resp->link_partner_adv_speeds);
   9667	link_info->lp_auto_pam4_link_speeds =
   9668		resp->link_partner_pam4_adv_speeds;
   9669	link_info->preemphasis = le32_to_cpu(resp->preemphasis);
   9670	link_info->phy_ver[0] = resp->phy_maj;
   9671	link_info->phy_ver[1] = resp->phy_min;
   9672	link_info->phy_ver[2] = resp->phy_bld;
   9673	link_info->media_type = resp->media_type;
   9674	link_info->phy_type = resp->phy_type;
   9675	link_info->transceiver = resp->xcvr_pkg_type;
   9676	link_info->phy_addr = resp->eee_config_phy_addr &
   9677			      PORT_PHY_QCFG_RESP_PHY_ADDR_MASK;
   9678	link_info->module_status = resp->module_status;
   9679
   9680	if (bp->phy_flags & BNXT_PHY_FL_EEE_CAP) {
   9681		struct ethtool_eee *eee = &bp->eee;
   9682		u16 fw_speeds;
   9683
   9684		eee->eee_active = 0;
   9685		if (resp->eee_config_phy_addr &
   9686		    PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ACTIVE) {
   9687			eee->eee_active = 1;
   9688			fw_speeds = le16_to_cpu(
   9689				resp->link_partner_adv_eee_link_speed_mask);
   9690			eee->lp_advertised =
   9691				_bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
   9692		}
   9693
   9694		/* Pull initial EEE config */
   9695		if (!chng_link_state) {
   9696			if (resp->eee_config_phy_addr &
   9697			    PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ENABLED)
   9698				eee->eee_enabled = 1;
   9699
   9700			fw_speeds = le16_to_cpu(resp->adv_eee_link_speed_mask);
   9701			eee->advertised =
   9702				_bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
   9703
   9704			if (resp->eee_config_phy_addr &
   9705			    PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_TX_LPI) {
   9706				__le32 tmr;
   9707
   9708				eee->tx_lpi_enabled = 1;
   9709				tmr = resp->xcvr_identifier_type_tx_lpi_timer;
   9710				eee->tx_lpi_timer = le32_to_cpu(tmr) &
   9711					PORT_PHY_QCFG_RESP_TX_LPI_TIMER_MASK;
   9712			}
   9713		}
   9714	}
   9715
   9716	link_info->fec_cfg = PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED;
   9717	if (bp->hwrm_spec_code >= 0x10504) {
   9718		link_info->fec_cfg = le16_to_cpu(resp->fec_cfg);
   9719		link_info->active_fec_sig_mode = resp->active_fec_signal_mode;
   9720	}
   9721	/* TODO: need to add more logic to report VF link */
   9722	if (chng_link_state) {
   9723		if (link_info->phy_link_status == BNXT_LINK_LINK)
   9724			link_info->link_state = BNXT_LINK_STATE_UP;
   9725		else
   9726			link_info->link_state = BNXT_LINK_STATE_DOWN;
   9727		if (link_state != link_info->link_state)
   9728			bnxt_report_link(bp);
   9729	} else {
   9730		/* always link down if not require to update link state */
   9731		link_info->link_state = BNXT_LINK_STATE_DOWN;
   9732	}
   9733	hwrm_req_drop(bp, req);
   9734
   9735	if (!BNXT_PHY_CFG_ABLE(bp))
   9736		return 0;
   9737
   9738	/* Check if any advertised speeds are no longer supported. The caller
   9739	 * holds the link_lock mutex, so we can modify link_info settings.
   9740	 */
   9741	if (bnxt_support_dropped(link_info->advertising,
   9742				 link_info->support_auto_speeds)) {
   9743		link_info->advertising = link_info->support_auto_speeds;
   9744		support_changed = true;
   9745	}
   9746	if (bnxt_support_dropped(link_info->advertising_pam4,
   9747				 link_info->support_pam4_auto_speeds)) {
   9748		link_info->advertising_pam4 = link_info->support_pam4_auto_speeds;
   9749		support_changed = true;
   9750	}
   9751	if (support_changed && (link_info->autoneg & BNXT_AUTONEG_SPEED))
   9752		bnxt_hwrm_set_link_setting(bp, true, false);
   9753	return 0;
   9754}
   9755
   9756static void bnxt_get_port_module_status(struct bnxt *bp)
   9757{
   9758	struct bnxt_link_info *link_info = &bp->link_info;
   9759	struct hwrm_port_phy_qcfg_output *resp = &link_info->phy_qcfg_resp;
   9760	u8 module_status;
   9761
   9762	if (bnxt_update_link(bp, true))
   9763		return;
   9764
   9765	module_status = link_info->module_status;
   9766	switch (module_status) {
   9767	case PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX:
   9768	case PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN:
   9769	case PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG:
   9770		netdev_warn(bp->dev, "Unqualified SFP+ module detected on port %d\n",
   9771			    bp->pf.port_id);
   9772		if (bp->hwrm_spec_code >= 0x10201) {
   9773			netdev_warn(bp->dev, "Module part number %s\n",
   9774				    resp->phy_vendor_partnumber);
   9775		}
   9776		if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX)
   9777			netdev_warn(bp->dev, "TX is disabled\n");
   9778		if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN)
   9779			netdev_warn(bp->dev, "SFP+ module is shutdown\n");
   9780	}
   9781}
   9782
   9783static void
   9784bnxt_hwrm_set_pause_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req)
   9785{
   9786	if (bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) {
   9787		if (bp->hwrm_spec_code >= 0x10201)
   9788			req->auto_pause =
   9789				PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE;
   9790		if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX)
   9791			req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_RX;
   9792		if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX)
   9793			req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_TX;
   9794		req->enables |=
   9795			cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE);
   9796	} else {
   9797		if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX)
   9798			req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_RX;
   9799		if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX)
   9800			req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_TX;
   9801		req->enables |=
   9802			cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_PAUSE);
   9803		if (bp->hwrm_spec_code >= 0x10201) {
   9804			req->auto_pause = req->force_pause;
   9805			req->enables |= cpu_to_le32(
   9806				PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE);
   9807		}
   9808	}
   9809}
   9810
   9811static void bnxt_hwrm_set_link_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req)
   9812{
   9813	if (bp->link_info.autoneg & BNXT_AUTONEG_SPEED) {
   9814		req->auto_mode |= PORT_PHY_CFG_REQ_AUTO_MODE_SPEED_MASK;
   9815		if (bp->link_info.advertising) {
   9816			req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK);
   9817			req->auto_link_speed_mask = cpu_to_le16(bp->link_info.advertising);
   9818		}
   9819		if (bp->link_info.advertising_pam4) {
   9820			req->enables |=
   9821				cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_PAM4_LINK_SPEED_MASK);
   9822			req->auto_link_pam4_speed_mask =
   9823				cpu_to_le16(bp->link_info.advertising_pam4);
   9824		}
   9825		req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE);
   9826		req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESTART_AUTONEG);
   9827	} else {
   9828		req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE);
   9829		if (bp->link_info.req_signal_mode == BNXT_SIG_MODE_PAM4) {
   9830			req->force_pam4_link_speed = cpu_to_le16(bp->link_info.req_link_speed);
   9831			req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_PAM4_LINK_SPEED);
   9832		} else {
   9833			req->force_link_speed = cpu_to_le16(bp->link_info.req_link_speed);
   9834		}
   9835	}
   9836
   9837	/* tell chimp that the setting takes effect immediately */
   9838	req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESET_PHY);
   9839}
   9840
   9841int bnxt_hwrm_set_pause(struct bnxt *bp)
   9842{
   9843	struct hwrm_port_phy_cfg_input *req;
   9844	int rc;
   9845
   9846	rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_CFG);
   9847	if (rc)
   9848		return rc;
   9849
   9850	bnxt_hwrm_set_pause_common(bp, req);
   9851
   9852	if ((bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) ||
   9853	    bp->link_info.force_link_chng)
   9854		bnxt_hwrm_set_link_common(bp, req);
   9855
   9856	rc = hwrm_req_send(bp, req);
   9857	if (!rc && !(bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL)) {
   9858		/* since changing of pause setting doesn't trigger any link
   9859		 * change event, the driver needs to update the current pause
   9860		 * result upon successfully return of the phy_cfg command
   9861		 */
   9862		bp->link_info.pause =
   9863		bp->link_info.force_pause_setting = bp->link_info.req_flow_ctrl;
   9864		bp->link_info.auto_pause_setting = 0;
   9865		if (!bp->link_info.force_link_chng)
   9866			bnxt_report_link(bp);
   9867	}
   9868	bp->link_info.force_link_chng = false;
   9869	return rc;
   9870}
   9871
   9872static void bnxt_hwrm_set_eee(struct bnxt *bp,
   9873			      struct hwrm_port_phy_cfg_input *req)
   9874{
   9875	struct ethtool_eee *eee = &bp->eee;
   9876
   9877	if (eee->eee_enabled) {
   9878		u16 eee_speeds;
   9879		u32 flags = PORT_PHY_CFG_REQ_FLAGS_EEE_ENABLE;
   9880
   9881		if (eee->tx_lpi_enabled)
   9882			flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_ENABLE;
   9883		else
   9884			flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_DISABLE;
   9885
   9886		req->flags |= cpu_to_le32(flags);
   9887		eee_speeds = bnxt_get_fw_auto_link_speeds(eee->advertised);
   9888		req->eee_link_speed_mask = cpu_to_le16(eee_speeds);
   9889		req->tx_lpi_timer = cpu_to_le32(eee->tx_lpi_timer);
   9890	} else {
   9891		req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_EEE_DISABLE);
   9892	}
   9893}
   9894
   9895int bnxt_hwrm_set_link_setting(struct bnxt *bp, bool set_pause, bool set_eee)
   9896{
   9897	struct hwrm_port_phy_cfg_input *req;
   9898	int rc;
   9899
   9900	rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_CFG);
   9901	if (rc)
   9902		return rc;
   9903
   9904	if (set_pause)
   9905		bnxt_hwrm_set_pause_common(bp, req);
   9906
   9907	bnxt_hwrm_set_link_common(bp, req);
   9908
   9909	if (set_eee)
   9910		bnxt_hwrm_set_eee(bp, req);
   9911	return hwrm_req_send(bp, req);
   9912}
   9913
   9914static int bnxt_hwrm_shutdown_link(struct bnxt *bp)
   9915{
   9916	struct hwrm_port_phy_cfg_input *req;
   9917	int rc;
   9918
   9919	if (!BNXT_SINGLE_PF(bp))
   9920		return 0;
   9921
   9922	if (pci_num_vf(bp->pdev) &&
   9923	    !(bp->phy_flags & BNXT_PHY_FL_FW_MANAGED_LKDN))
   9924		return 0;
   9925
   9926	rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_CFG);
   9927	if (rc)
   9928		return rc;
   9929
   9930	req->flags = cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE_LINK_DWN);
   9931	rc = hwrm_req_send(bp, req);
   9932	if (!rc) {
   9933		mutex_lock(&bp->link_lock);
   9934		/* Device is not obliged link down in certain scenarios, even
   9935		 * when forced. Setting the state unknown is consistent with
   9936		 * driver startup and will force link state to be reported
   9937		 * during subsequent open based on PORT_PHY_QCFG.
   9938		 */
   9939		bp->link_info.link_state = BNXT_LINK_STATE_UNKNOWN;
   9940		mutex_unlock(&bp->link_lock);
   9941	}
   9942	return rc;
   9943}
   9944
   9945static int bnxt_fw_reset_via_optee(struct bnxt *bp)
   9946{
   9947#ifdef CONFIG_TEE_BNXT_FW
   9948	int rc = tee_bnxt_fw_load();
   9949
   9950	if (rc)
   9951		netdev_err(bp->dev, "Failed FW reset via OP-TEE, rc=%d\n", rc);
   9952
   9953	return rc;
   9954#else
   9955	netdev_err(bp->dev, "OP-TEE not supported\n");
   9956	return -ENODEV;
   9957#endif
   9958}
   9959
   9960static int bnxt_try_recover_fw(struct bnxt *bp)
   9961{
   9962	if (bp->fw_health && bp->fw_health->status_reliable) {
   9963		int retry = 0, rc;
   9964		u32 sts;
   9965
   9966		do {
   9967			sts = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
   9968			rc = bnxt_hwrm_poll(bp);
   9969			if (!BNXT_FW_IS_BOOTING(sts) &&
   9970			    !BNXT_FW_IS_RECOVERING(sts))
   9971				break;
   9972			retry++;
   9973		} while (rc == -EBUSY && retry < BNXT_FW_RETRY);
   9974
   9975		if (!BNXT_FW_IS_HEALTHY(sts)) {
   9976			netdev_err(bp->dev,
   9977				   "Firmware not responding, status: 0x%x\n",
   9978				   sts);
   9979			rc = -ENODEV;
   9980		}
   9981		if (sts & FW_STATUS_REG_CRASHED_NO_MASTER) {
   9982			netdev_warn(bp->dev, "Firmware recover via OP-TEE requested\n");
   9983			return bnxt_fw_reset_via_optee(bp);
   9984		}
   9985		return rc;
   9986	}
   9987
   9988	return -ENODEV;
   9989}
   9990
   9991int bnxt_cancel_reservations(struct bnxt *bp, bool fw_reset)
   9992{
   9993	struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
   9994	int rc;
   9995
   9996	if (!BNXT_NEW_RM(bp))
   9997		return 0; /* no resource reservations required */
   9998
   9999	rc = bnxt_hwrm_func_resc_qcaps(bp, true);
  10000	if (rc)
  10001		netdev_err(bp->dev, "resc_qcaps failed\n");
  10002
  10003	hw_resc->resv_cp_rings = 0;
  10004	hw_resc->resv_stat_ctxs = 0;
  10005	hw_resc->resv_irqs = 0;
  10006	hw_resc->resv_tx_rings = 0;
  10007	hw_resc->resv_rx_rings = 0;
  10008	hw_resc->resv_hw_ring_grps = 0;
  10009	hw_resc->resv_vnics = 0;
  10010	if (!fw_reset) {
  10011		bp->tx_nr_rings = 0;
  10012		bp->rx_nr_rings = 0;
  10013	}
  10014
  10015	return rc;
  10016}
  10017
  10018static int bnxt_hwrm_if_change(struct bnxt *bp, bool up)
  10019{
  10020	struct hwrm_func_drv_if_change_output *resp;
  10021	struct hwrm_func_drv_if_change_input *req;
  10022	bool fw_reset = !bp->irq_tbl;
  10023	bool resc_reinit = false;
  10024	int rc, retry = 0;
  10025	u32 flags = 0;
  10026
  10027	if (!(bp->fw_cap & BNXT_FW_CAP_IF_CHANGE))
  10028		return 0;
  10029
  10030	rc = hwrm_req_init(bp, req, HWRM_FUNC_DRV_IF_CHANGE);
  10031	if (rc)
  10032		return rc;
  10033
  10034	if (up)
  10035		req->flags = cpu_to_le32(FUNC_DRV_IF_CHANGE_REQ_FLAGS_UP);
  10036	resp = hwrm_req_hold(bp, req);
  10037
  10038	hwrm_req_flags(bp, req, BNXT_HWRM_FULL_WAIT);
  10039	while (retry < BNXT_FW_IF_RETRY) {
  10040		rc = hwrm_req_send(bp, req);
  10041		if (rc != -EAGAIN)
  10042			break;
  10043
  10044		msleep(50);
  10045		retry++;
  10046	}
  10047
  10048	if (rc == -EAGAIN) {
  10049		hwrm_req_drop(bp, req);
  10050		return rc;
  10051	} else if (!rc) {
  10052		flags = le32_to_cpu(resp->flags);
  10053	} else if (up) {
  10054		rc = bnxt_try_recover_fw(bp);
  10055		fw_reset = true;
  10056	}
  10057	hwrm_req_drop(bp, req);
  10058	if (rc)
  10059		return rc;
  10060
  10061	if (!up) {
  10062		bnxt_inv_fw_health_reg(bp);
  10063		return 0;
  10064	}
  10065
  10066	if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_RESC_CHANGE)
  10067		resc_reinit = true;
  10068	if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_HOT_FW_RESET_DONE)
  10069		fw_reset = true;
  10070	else
  10071		bnxt_remap_fw_health_regs(bp);
  10072
  10073	if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state) && !fw_reset) {
  10074		netdev_err(bp->dev, "RESET_DONE not set during FW reset.\n");
  10075		set_bit(BNXT_STATE_ABORT_ERR, &bp->state);
  10076		return -ENODEV;
  10077	}
  10078	if (resc_reinit || fw_reset) {
  10079		if (fw_reset) {
  10080			set_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
  10081			if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
  10082				bnxt_ulp_stop(bp);
  10083			bnxt_free_ctx_mem(bp);
  10084			kfree(bp->ctx);
  10085			bp->ctx = NULL;
  10086			bnxt_dcb_free(bp);
  10087			rc = bnxt_fw_init_one(bp);
  10088			if (rc) {
  10089				clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
  10090				set_bit(BNXT_STATE_ABORT_ERR, &bp->state);
  10091				return rc;
  10092			}
  10093			bnxt_clear_int_mode(bp);
  10094			rc = bnxt_init_int_mode(bp);
  10095			if (rc) {
  10096				clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
  10097				netdev_err(bp->dev, "init int mode failed\n");
  10098				return rc;
  10099			}
  10100		}
  10101		rc = bnxt_cancel_reservations(bp, fw_reset);
  10102	}
  10103	return rc;
  10104}
  10105
  10106static int bnxt_hwrm_port_led_qcaps(struct bnxt *bp)
  10107{
  10108	struct hwrm_port_led_qcaps_output *resp;
  10109	struct hwrm_port_led_qcaps_input *req;
  10110	struct bnxt_pf_info *pf = &bp->pf;
  10111	int rc;
  10112
  10113	bp->num_leds = 0;
  10114	if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10601)
  10115		return 0;
  10116
  10117	rc = hwrm_req_init(bp, req, HWRM_PORT_LED_QCAPS);
  10118	if (rc)
  10119		return rc;
  10120
  10121	req->port_id = cpu_to_le16(pf->port_id);
  10122	resp = hwrm_req_hold(bp, req);
  10123	rc = hwrm_req_send(bp, req);
  10124	if (rc) {
  10125		hwrm_req_drop(bp, req);
  10126		return rc;
  10127	}
  10128	if (resp->num_leds > 0 && resp->num_leds < BNXT_MAX_LED) {
  10129		int i;
  10130
  10131		bp->num_leds = resp->num_leds;
  10132		memcpy(bp->leds, &resp->led0_id, sizeof(bp->leds[0]) *
  10133						 bp->num_leds);
  10134		for (i = 0; i < bp->num_leds; i++) {
  10135			struct bnxt_led_info *led = &bp->leds[i];
  10136			__le16 caps = led->led_state_caps;
  10137
  10138			if (!led->led_group_id ||
  10139			    !BNXT_LED_ALT_BLINK_CAP(caps)) {
  10140				bp->num_leds = 0;
  10141				break;
  10142			}
  10143		}
  10144	}
  10145	hwrm_req_drop(bp, req);
  10146	return 0;
  10147}
  10148
  10149int bnxt_hwrm_alloc_wol_fltr(struct bnxt *bp)
  10150{
  10151	struct hwrm_wol_filter_alloc_output *resp;
  10152	struct hwrm_wol_filter_alloc_input *req;
  10153	int rc;
  10154
  10155	rc = hwrm_req_init(bp, req, HWRM_WOL_FILTER_ALLOC);
  10156	if (rc)
  10157		return rc;
  10158
  10159	req->port_id = cpu_to_le16(bp->pf.port_id);
  10160	req->wol_type = WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT;
  10161	req->enables = cpu_to_le32(WOL_FILTER_ALLOC_REQ_ENABLES_MAC_ADDRESS);
  10162	memcpy(req->mac_address, bp->dev->dev_addr, ETH_ALEN);
  10163
  10164	resp = hwrm_req_hold(bp, req);
  10165	rc = hwrm_req_send(bp, req);
  10166	if (!rc)
  10167		bp->wol_filter_id = resp->wol_filter_id;
  10168	hwrm_req_drop(bp, req);
  10169	return rc;
  10170}
  10171
  10172int bnxt_hwrm_free_wol_fltr(struct bnxt *bp)
  10173{
  10174	struct hwrm_wol_filter_free_input *req;
  10175	int rc;
  10176
  10177	rc = hwrm_req_init(bp, req, HWRM_WOL_FILTER_FREE);
  10178	if (rc)
  10179		return rc;
  10180
  10181	req->port_id = cpu_to_le16(bp->pf.port_id);
  10182	req->enables = cpu_to_le32(WOL_FILTER_FREE_REQ_ENABLES_WOL_FILTER_ID);
  10183	req->wol_filter_id = bp->wol_filter_id;
  10184
  10185	return hwrm_req_send(bp, req);
  10186}
  10187
  10188static u16 bnxt_hwrm_get_wol_fltrs(struct bnxt *bp, u16 handle)
  10189{
  10190	struct hwrm_wol_filter_qcfg_output *resp;
  10191	struct hwrm_wol_filter_qcfg_input *req;
  10192	u16 next_handle = 0;
  10193	int rc;
  10194
  10195	rc = hwrm_req_init(bp, req, HWRM_WOL_FILTER_QCFG);
  10196	if (rc)
  10197		return rc;
  10198
  10199	req->port_id = cpu_to_le16(bp->pf.port_id);
  10200	req->handle = cpu_to_le16(handle);
  10201	resp = hwrm_req_hold(bp, req);
  10202	rc = hwrm_req_send(bp, req);
  10203	if (!rc) {
  10204		next_handle = le16_to_cpu(resp->next_handle);
  10205		if (next_handle != 0) {
  10206			if (resp->wol_type ==
  10207			    WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT) {
  10208				bp->wol = 1;
  10209				bp->wol_filter_id = resp->wol_filter_id;
  10210			}
  10211		}
  10212	}
  10213	hwrm_req_drop(bp, req);
  10214	return next_handle;
  10215}
  10216
  10217static void bnxt_get_wol_settings(struct bnxt *bp)
  10218{
  10219	u16 handle = 0;
  10220
  10221	bp->wol = 0;
  10222	if (!BNXT_PF(bp) || !(bp->flags & BNXT_FLAG_WOL_CAP))
  10223		return;
  10224
  10225	do {
  10226		handle = bnxt_hwrm_get_wol_fltrs(bp, handle);
  10227	} while (handle && handle != 0xffff);
  10228}
  10229
  10230#ifdef CONFIG_BNXT_HWMON
  10231static ssize_t bnxt_show_temp(struct device *dev,
  10232			      struct device_attribute *devattr, char *buf)
  10233{
  10234	struct hwrm_temp_monitor_query_output *resp;
  10235	struct hwrm_temp_monitor_query_input *req;
  10236	struct bnxt *bp = dev_get_drvdata(dev);
  10237	u32 len = 0;
  10238	int rc;
  10239
  10240	rc = hwrm_req_init(bp, req, HWRM_TEMP_MONITOR_QUERY);
  10241	if (rc)
  10242		return rc;
  10243	resp = hwrm_req_hold(bp, req);
  10244	rc = hwrm_req_send(bp, req);
  10245	if (!rc)
  10246		len = sprintf(buf, "%u\n", resp->temp * 1000); /* display millidegree */
  10247	hwrm_req_drop(bp, req);
  10248	if (rc)
  10249		return rc;
  10250	return len;
  10251}
  10252static SENSOR_DEVICE_ATTR(temp1_input, 0444, bnxt_show_temp, NULL, 0);
  10253
  10254static struct attribute *bnxt_attrs[] = {
  10255	&sensor_dev_attr_temp1_input.dev_attr.attr,
  10256	NULL
  10257};
  10258ATTRIBUTE_GROUPS(bnxt);
  10259
  10260static void bnxt_hwmon_close(struct bnxt *bp)
  10261{
  10262	if (bp->hwmon_dev) {
  10263		hwmon_device_unregister(bp->hwmon_dev);
  10264		bp->hwmon_dev = NULL;
  10265	}
  10266}
  10267
  10268static void bnxt_hwmon_open(struct bnxt *bp)
  10269{
  10270	struct hwrm_temp_monitor_query_input *req;
  10271	struct pci_dev *pdev = bp->pdev;
  10272	int rc;
  10273
  10274	rc = hwrm_req_init(bp, req, HWRM_TEMP_MONITOR_QUERY);
  10275	if (!rc)
  10276		rc = hwrm_req_send_silent(bp, req);
  10277	if (rc == -EACCES || rc == -EOPNOTSUPP) {
  10278		bnxt_hwmon_close(bp);
  10279		return;
  10280	}
  10281
  10282	if (bp->hwmon_dev)
  10283		return;
  10284
  10285	bp->hwmon_dev = hwmon_device_register_with_groups(&pdev->dev,
  10286							  DRV_MODULE_NAME, bp,
  10287							  bnxt_groups);
  10288	if (IS_ERR(bp->hwmon_dev)) {
  10289		bp->hwmon_dev = NULL;
  10290		dev_warn(&pdev->dev, "Cannot register hwmon device\n");
  10291	}
  10292}
  10293#else
  10294static void bnxt_hwmon_close(struct bnxt *bp)
  10295{
  10296}
  10297
  10298static void bnxt_hwmon_open(struct bnxt *bp)
  10299{
  10300}
  10301#endif
  10302
  10303static bool bnxt_eee_config_ok(struct bnxt *bp)
  10304{
  10305	struct ethtool_eee *eee = &bp->eee;
  10306	struct bnxt_link_info *link_info = &bp->link_info;
  10307
  10308	if (!(bp->phy_flags & BNXT_PHY_FL_EEE_CAP))
  10309		return true;
  10310
  10311	if (eee->eee_enabled) {
  10312		u32 advertising =
  10313			_bnxt_fw_to_ethtool_adv_spds(link_info->advertising, 0);
  10314
  10315		if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
  10316			eee->eee_enabled = 0;
  10317			return false;
  10318		}
  10319		if (eee->advertised & ~advertising) {
  10320			eee->advertised = advertising & eee->supported;
  10321			return false;
  10322		}
  10323	}
  10324	return true;
  10325}
  10326
  10327static int bnxt_update_phy_setting(struct bnxt *bp)
  10328{
  10329	int rc;
  10330	bool update_link = false;
  10331	bool update_pause = false;
  10332	bool update_eee = false;
  10333	struct bnxt_link_info *link_info = &bp->link_info;
  10334
  10335	rc = bnxt_update_link(bp, true);
  10336	if (rc) {
  10337		netdev_err(bp->dev, "failed to update link (rc: %x)\n",
  10338			   rc);
  10339		return rc;
  10340	}
  10341	if (!BNXT_SINGLE_PF(bp))
  10342		return 0;
  10343
  10344	if ((link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) &&
  10345	    (link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH) !=
  10346	    link_info->req_flow_ctrl)
  10347		update_pause = true;
  10348	if (!(link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) &&
  10349	    link_info->force_pause_setting != link_info->req_flow_ctrl)
  10350		update_pause = true;
  10351	if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
  10352		if (BNXT_AUTO_MODE(link_info->auto_mode))
  10353			update_link = true;
  10354		if (link_info->req_signal_mode == BNXT_SIG_MODE_NRZ &&
  10355		    link_info->req_link_speed != link_info->force_link_speed)
  10356			update_link = true;
  10357		else if (link_info->req_signal_mode == BNXT_SIG_MODE_PAM4 &&
  10358			 link_info->req_link_speed != link_info->force_pam4_link_speed)
  10359			update_link = true;
  10360		if (link_info->req_duplex != link_info->duplex_setting)
  10361			update_link = true;
  10362	} else {
  10363		if (link_info->auto_mode == BNXT_LINK_AUTO_NONE)
  10364			update_link = true;
  10365		if (link_info->advertising != link_info->auto_link_speeds ||
  10366		    link_info->advertising_pam4 != link_info->auto_pam4_link_speeds)
  10367			update_link = true;
  10368	}
  10369
  10370	/* The last close may have shutdown the link, so need to call
  10371	 * PHY_CFG to bring it back up.
  10372	 */
  10373	if (!BNXT_LINK_IS_UP(bp))
  10374		update_link = true;
  10375
  10376	if (!bnxt_eee_config_ok(bp))
  10377		update_eee = true;
  10378
  10379	if (update_link)
  10380		rc = bnxt_hwrm_set_link_setting(bp, update_pause, update_eee);
  10381	else if (update_pause)
  10382		rc = bnxt_hwrm_set_pause(bp);
  10383	if (rc) {
  10384		netdev_err(bp->dev, "failed to update phy setting (rc: %x)\n",
  10385			   rc);
  10386		return rc;
  10387	}
  10388
  10389	return rc;
  10390}
  10391
  10392/* Common routine to pre-map certain register block to different GRC window.
  10393 * A PF has 16 4K windows and a VF has 4 4K windows. However, only 15 windows
  10394 * in PF and 3 windows in VF that can be customized to map in different
  10395 * register blocks.
  10396 */
  10397static void bnxt_preset_reg_win(struct bnxt *bp)
  10398{
  10399	if (BNXT_PF(bp)) {
  10400		/* CAG registers map to GRC window #4 */
  10401		writel(BNXT_CAG_REG_BASE,
  10402		       bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 12);
  10403	}
  10404}
  10405
  10406static int bnxt_init_dflt_ring_mode(struct bnxt *bp);
  10407
  10408static int bnxt_reinit_after_abort(struct bnxt *bp)
  10409{
  10410	int rc;
  10411
  10412	if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
  10413		return -EBUSY;
  10414
  10415	if (bp->dev->reg_state == NETREG_UNREGISTERED)
  10416		return -ENODEV;
  10417
  10418	rc = bnxt_fw_init_one(bp);
  10419	if (!rc) {
  10420		bnxt_clear_int_mode(bp);
  10421		rc = bnxt_init_int_mode(bp);
  10422		if (!rc) {
  10423			clear_bit(BNXT_STATE_ABORT_ERR, &bp->state);
  10424			set_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
  10425		}
  10426	}
  10427	return rc;
  10428}
  10429
  10430static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
  10431{
  10432	int rc = 0;
  10433
  10434	bnxt_preset_reg_win(bp);
  10435	netif_carrier_off(bp->dev);
  10436	if (irq_re_init) {
  10437		/* Reserve rings now if none were reserved at driver probe. */
  10438		rc = bnxt_init_dflt_ring_mode(bp);
  10439		if (rc) {
  10440			netdev_err(bp->dev, "Failed to reserve default rings at open\n");
  10441			return rc;
  10442		}
  10443	}
  10444	rc = bnxt_reserve_rings(bp, irq_re_init);
  10445	if (rc)
  10446		return rc;
  10447	if ((bp->flags & BNXT_FLAG_RFS) &&
  10448	    !(bp->flags & BNXT_FLAG_USING_MSIX)) {
  10449		/* disable RFS if falling back to INTA */
  10450		bp->dev->hw_features &= ~NETIF_F_NTUPLE;
  10451		bp->flags &= ~BNXT_FLAG_RFS;
  10452	}
  10453
  10454	rc = bnxt_alloc_mem(bp, irq_re_init);
  10455	if (rc) {
  10456		netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc);
  10457		goto open_err_free_mem;
  10458	}
  10459
  10460	if (irq_re_init) {
  10461		bnxt_init_napi(bp);
  10462		rc = bnxt_request_irq(bp);
  10463		if (rc) {
  10464			netdev_err(bp->dev, "bnxt_request_irq err: %x\n", rc);
  10465			goto open_err_irq;
  10466		}
  10467	}
  10468
  10469	rc = bnxt_init_nic(bp, irq_re_init);
  10470	if (rc) {
  10471		netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc);
  10472		goto open_err_irq;
  10473	}
  10474
  10475	bnxt_enable_napi(bp);
  10476	bnxt_debug_dev_init(bp);
  10477
  10478	if (link_re_init) {
  10479		mutex_lock(&bp->link_lock);
  10480		rc = bnxt_update_phy_setting(bp);
  10481		mutex_unlock(&bp->link_lock);
  10482		if (rc) {
  10483			netdev_warn(bp->dev, "failed to update phy settings\n");
  10484			if (BNXT_SINGLE_PF(bp)) {
  10485				bp->link_info.phy_retry = true;
  10486				bp->link_info.phy_retry_expires =
  10487					jiffies + 5 * HZ;
  10488			}
  10489		}
  10490	}
  10491
  10492	if (irq_re_init)
  10493		udp_tunnel_nic_reset_ntf(bp->dev);
  10494
  10495	if (bp->tx_nr_rings_xdp < num_possible_cpus()) {
  10496		if (!static_key_enabled(&bnxt_xdp_locking_key))
  10497			static_branch_enable(&bnxt_xdp_locking_key);
  10498	} else if (static_key_enabled(&bnxt_xdp_locking_key)) {
  10499		static_branch_disable(&bnxt_xdp_locking_key);
  10500	}
  10501	set_bit(BNXT_STATE_OPEN, &bp->state);
  10502	bnxt_enable_int(bp);
  10503	/* Enable TX queues */
  10504	bnxt_tx_enable(bp);
  10505	mod_timer(&bp->timer, jiffies + bp->current_interval);
  10506	/* Poll link status and check for SFP+ module status */
  10507	mutex_lock(&bp->link_lock);
  10508	bnxt_get_port_module_status(bp);
  10509	mutex_unlock(&bp->link_lock);
  10510
  10511	/* VF-reps may need to be re-opened after the PF is re-opened */
  10512	if (BNXT_PF(bp))
  10513		bnxt_vf_reps_open(bp);
  10514	bnxt_ptp_init_rtc(bp, true);
  10515	bnxt_ptp_cfg_tstamp_filters(bp);
  10516	return 0;
  10517
  10518open_err_irq:
  10519	bnxt_del_napi(bp);
  10520
  10521open_err_free_mem:
  10522	bnxt_free_skbs(bp);
  10523	bnxt_free_irq(bp);
  10524	bnxt_free_mem(bp, true);
  10525	return rc;
  10526}
  10527
  10528/* rtnl_lock held */
  10529int bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
  10530{
  10531	int rc = 0;
  10532
  10533	if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state))
  10534		rc = -EIO;
  10535	if (!rc)
  10536		rc = __bnxt_open_nic(bp, irq_re_init, link_re_init);
  10537	if (rc) {
  10538		netdev_err(bp->dev, "nic open fail (rc: %x)\n", rc);
  10539		dev_close(bp->dev);
  10540	}
  10541	return rc;
  10542}
  10543
  10544/* rtnl_lock held, open the NIC half way by allocating all resources, but
  10545 * NAPI, IRQ, and TX are not enabled.  This is mainly used for offline
  10546 * self tests.
  10547 */
  10548int bnxt_half_open_nic(struct bnxt *bp)
  10549{
  10550	int rc = 0;
  10551
  10552	if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) {
  10553		netdev_err(bp->dev, "A previous firmware reset has not completed, aborting half open\n");
  10554		rc = -ENODEV;
  10555		goto half_open_err;
  10556	}
  10557
  10558	rc = bnxt_alloc_mem(bp, true);
  10559	if (rc) {
  10560		netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc);
  10561		goto half_open_err;
  10562	}
  10563	set_bit(BNXT_STATE_HALF_OPEN, &bp->state);
  10564	rc = bnxt_init_nic(bp, true);
  10565	if (rc) {
  10566		clear_bit(BNXT_STATE_HALF_OPEN, &bp->state);
  10567		netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc);
  10568		goto half_open_err;
  10569	}
  10570	return 0;
  10571
  10572half_open_err:
  10573	bnxt_free_skbs(bp);
  10574	bnxt_free_mem(bp, true);
  10575	dev_close(bp->dev);
  10576	return rc;
  10577}
  10578
  10579/* rtnl_lock held, this call can only be made after a previous successful
  10580 * call to bnxt_half_open_nic().
  10581 */
  10582void bnxt_half_close_nic(struct bnxt *bp)
  10583{
  10584	bnxt_hwrm_resource_free(bp, false, true);
  10585	bnxt_free_skbs(bp);
  10586	bnxt_free_mem(bp, true);
  10587	clear_bit(BNXT_STATE_HALF_OPEN, &bp->state);
  10588}
  10589
  10590void bnxt_reenable_sriov(struct bnxt *bp)
  10591{
  10592	if (BNXT_PF(bp)) {
  10593		struct bnxt_pf_info *pf = &bp->pf;
  10594		int n = pf->active_vfs;
  10595
  10596		if (n)
  10597			bnxt_cfg_hw_sriov(bp, &n, true);
  10598	}
  10599}
  10600
  10601static int bnxt_open(struct net_device *dev)
  10602{
  10603	struct bnxt *bp = netdev_priv(dev);
  10604	int rc;
  10605
  10606	if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) {
  10607		rc = bnxt_reinit_after_abort(bp);
  10608		if (rc) {
  10609			if (rc == -EBUSY)
  10610				netdev_err(bp->dev, "A previous firmware reset has not completed, aborting\n");
  10611			else
  10612				netdev_err(bp->dev, "Failed to reinitialize after aborted firmware reset\n");
  10613			return -ENODEV;
  10614		}
  10615	}
  10616
  10617	rc = bnxt_hwrm_if_change(bp, true);
  10618	if (rc)
  10619		return rc;
  10620
  10621	rc = __bnxt_open_nic(bp, true, true);
  10622	if (rc) {
  10623		bnxt_hwrm_if_change(bp, false);
  10624	} else {
  10625		if (test_and_clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state)) {
  10626			if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
  10627				bnxt_ulp_start(bp, 0);
  10628				bnxt_reenable_sriov(bp);
  10629			}
  10630		}
  10631		bnxt_hwmon_open(bp);
  10632	}
  10633
  10634	return rc;
  10635}
  10636
  10637static bool bnxt_drv_busy(struct bnxt *bp)
  10638{
  10639	return (test_bit(BNXT_STATE_IN_SP_TASK, &bp->state) ||
  10640		test_bit(BNXT_STATE_READ_STATS, &bp->state));
  10641}
  10642
  10643static void bnxt_get_ring_stats(struct bnxt *bp,
  10644				struct rtnl_link_stats64 *stats);
  10645
  10646static void __bnxt_close_nic(struct bnxt *bp, bool irq_re_init,
  10647			     bool link_re_init)
  10648{
  10649	/* Close the VF-reps before closing PF */
  10650	if (BNXT_PF(bp))
  10651		bnxt_vf_reps_close(bp);
  10652
  10653	/* Change device state to avoid TX queue wake up's */
  10654	bnxt_tx_disable(bp);
  10655
  10656	clear_bit(BNXT_STATE_OPEN, &bp->state);
  10657	smp_mb__after_atomic();
  10658	while (bnxt_drv_busy(bp))
  10659		msleep(20);
  10660
  10661	/* Flush rings and and disable interrupts */
  10662	bnxt_shutdown_nic(bp, irq_re_init);
  10663
  10664	/* TODO CHIMP_FW: Link/PHY related cleanup if (link_re_init) */
  10665
  10666	bnxt_debug_dev_exit(bp);
  10667	bnxt_disable_napi(bp);
  10668	del_timer_sync(&bp->timer);
  10669	bnxt_free_skbs(bp);
  10670
  10671	/* Save ring stats before shutdown */
  10672	if (bp->bnapi && irq_re_init)
  10673		bnxt_get_ring_stats(bp, &bp->net_stats_prev);
  10674	if (irq_re_init) {
  10675		bnxt_free_irq(bp);
  10676		bnxt_del_napi(bp);
  10677	}
  10678	bnxt_free_mem(bp, irq_re_init);
  10679}
  10680
  10681int bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
  10682{
  10683	int rc = 0;
  10684
  10685	if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
  10686		/* If we get here, it means firmware reset is in progress
  10687		 * while we are trying to close.  We can safely proceed with
  10688		 * the close because we are holding rtnl_lock().  Some firmware
  10689		 * messages may fail as we proceed to close.  We set the
  10690		 * ABORT_ERR flag here so that the FW reset thread will later
  10691		 * abort when it gets the rtnl_lock() and sees the flag.
  10692		 */
  10693		netdev_warn(bp->dev, "FW reset in progress during close, FW reset will be aborted\n");
  10694		set_bit(BNXT_STATE_ABORT_ERR, &bp->state);
  10695	}
  10696
  10697#ifdef CONFIG_BNXT_SRIOV
  10698	if (bp->sriov_cfg) {
  10699		rc = wait_event_interruptible_timeout(bp->sriov_cfg_wait,
  10700						      !bp->sriov_cfg,
  10701						      BNXT_SRIOV_CFG_WAIT_TMO);
  10702		if (rc)
  10703			netdev_warn(bp->dev, "timeout waiting for SRIOV config operation to complete!\n");
  10704	}
  10705#endif
  10706	__bnxt_close_nic(bp, irq_re_init, link_re_init);
  10707	return rc;
  10708}
  10709
  10710static int bnxt_close(struct net_device *dev)
  10711{
  10712	struct bnxt *bp = netdev_priv(dev);
  10713
  10714	bnxt_hwmon_close(bp);
  10715	bnxt_close_nic(bp, true, true);
  10716	bnxt_hwrm_shutdown_link(bp);
  10717	bnxt_hwrm_if_change(bp, false);
  10718	return 0;
  10719}
  10720
  10721static int bnxt_hwrm_port_phy_read(struct bnxt *bp, u16 phy_addr, u16 reg,
  10722				   u16 *val)
  10723{
  10724	struct hwrm_port_phy_mdio_read_output *resp;
  10725	struct hwrm_port_phy_mdio_read_input *req;
  10726	int rc;
  10727
  10728	if (bp->hwrm_spec_code < 0x10a00)
  10729		return -EOPNOTSUPP;
  10730
  10731	rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_MDIO_READ);
  10732	if (rc)
  10733		return rc;
  10734
  10735	req->port_id = cpu_to_le16(bp->pf.port_id);
  10736	req->phy_addr = phy_addr;
  10737	req->reg_addr = cpu_to_le16(reg & 0x1f);
  10738	if (mdio_phy_id_is_c45(phy_addr)) {
  10739		req->cl45_mdio = 1;
  10740		req->phy_addr = mdio_phy_id_prtad(phy_addr);
  10741		req->dev_addr = mdio_phy_id_devad(phy_addr);
  10742		req->reg_addr = cpu_to_le16(reg);
  10743	}
  10744
  10745	resp = hwrm_req_hold(bp, req);
  10746	rc = hwrm_req_send(bp, req);
  10747	if (!rc)
  10748		*val = le16_to_cpu(resp->reg_data);
  10749	hwrm_req_drop(bp, req);
  10750	return rc;
  10751}
  10752
  10753static int bnxt_hwrm_port_phy_write(struct bnxt *bp, u16 phy_addr, u16 reg,
  10754				    u16 val)
  10755{
  10756	struct hwrm_port_phy_mdio_write_input *req;
  10757	int rc;
  10758
  10759	if (bp->hwrm_spec_code < 0x10a00)
  10760		return -EOPNOTSUPP;
  10761
  10762	rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_MDIO_WRITE);
  10763	if (rc)
  10764		return rc;
  10765
  10766	req->port_id = cpu_to_le16(bp->pf.port_id);
  10767	req->phy_addr = phy_addr;
  10768	req->reg_addr = cpu_to_le16(reg & 0x1f);
  10769	if (mdio_phy_id_is_c45(phy_addr)) {
  10770		req->cl45_mdio = 1;
  10771		req->phy_addr = mdio_phy_id_prtad(phy_addr);
  10772		req->dev_addr = mdio_phy_id_devad(phy_addr);
  10773		req->reg_addr = cpu_to_le16(reg);
  10774	}
  10775	req->reg_data = cpu_to_le16(val);
  10776
  10777	return hwrm_req_send(bp, req);
  10778}
  10779
  10780/* rtnl_lock held */
  10781static int bnxt_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
  10782{
  10783	struct mii_ioctl_data *mdio = if_mii(ifr);
  10784	struct bnxt *bp = netdev_priv(dev);
  10785	int rc;
  10786
  10787	switch (cmd) {
  10788	case SIOCGMIIPHY:
  10789		mdio->phy_id = bp->link_info.phy_addr;
  10790
  10791		fallthrough;
  10792	case SIOCGMIIREG: {
  10793		u16 mii_regval = 0;
  10794
  10795		if (!netif_running(dev))
  10796			return -EAGAIN;
  10797
  10798		rc = bnxt_hwrm_port_phy_read(bp, mdio->phy_id, mdio->reg_num,
  10799					     &mii_regval);
  10800		mdio->val_out = mii_regval;
  10801		return rc;
  10802	}
  10803
  10804	case SIOCSMIIREG:
  10805		if (!netif_running(dev))
  10806			return -EAGAIN;
  10807
  10808		return bnxt_hwrm_port_phy_write(bp, mdio->phy_id, mdio->reg_num,
  10809						mdio->val_in);
  10810
  10811	case SIOCSHWTSTAMP:
  10812		return bnxt_hwtstamp_set(dev, ifr);
  10813
  10814	case SIOCGHWTSTAMP:
  10815		return bnxt_hwtstamp_get(dev, ifr);
  10816
  10817	default:
  10818		/* do nothing */
  10819		break;
  10820	}
  10821	return -EOPNOTSUPP;
  10822}
  10823
  10824static void bnxt_get_ring_stats(struct bnxt *bp,
  10825				struct rtnl_link_stats64 *stats)
  10826{
  10827	int i;
  10828
  10829	for (i = 0; i < bp->cp_nr_rings; i++) {
  10830		struct bnxt_napi *bnapi = bp->bnapi[i];
  10831		struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
  10832		u64 *sw = cpr->stats.sw_stats;
  10833
  10834		stats->rx_packets += BNXT_GET_RING_STATS64(sw, rx_ucast_pkts);
  10835		stats->rx_packets += BNXT_GET_RING_STATS64(sw, rx_mcast_pkts);
  10836		stats->rx_packets += BNXT_GET_RING_STATS64(sw, rx_bcast_pkts);
  10837
  10838		stats->tx_packets += BNXT_GET_RING_STATS64(sw, tx_ucast_pkts);
  10839		stats->tx_packets += BNXT_GET_RING_STATS64(sw, tx_mcast_pkts);
  10840		stats->tx_packets += BNXT_GET_RING_STATS64(sw, tx_bcast_pkts);
  10841
  10842		stats->rx_bytes += BNXT_GET_RING_STATS64(sw, rx_ucast_bytes);
  10843		stats->rx_bytes += BNXT_GET_RING_STATS64(sw, rx_mcast_bytes);
  10844		stats->rx_bytes += BNXT_GET_RING_STATS64(sw, rx_bcast_bytes);
  10845
  10846		stats->tx_bytes += BNXT_GET_RING_STATS64(sw, tx_ucast_bytes);
  10847		stats->tx_bytes += BNXT_GET_RING_STATS64(sw, tx_mcast_bytes);
  10848		stats->tx_bytes += BNXT_GET_RING_STATS64(sw, tx_bcast_bytes);
  10849
  10850		stats->rx_missed_errors +=
  10851			BNXT_GET_RING_STATS64(sw, rx_discard_pkts);
  10852
  10853		stats->multicast += BNXT_GET_RING_STATS64(sw, rx_mcast_pkts);
  10854
  10855		stats->tx_dropped += BNXT_GET_RING_STATS64(sw, tx_error_pkts);
  10856
  10857		stats->rx_dropped +=
  10858			cpr->sw_stats.rx.rx_netpoll_discards +
  10859			cpr->sw_stats.rx.rx_oom_discards;
  10860	}
  10861}
  10862
  10863static void bnxt_add_prev_stats(struct bnxt *bp,
  10864				struct rtnl_link_stats64 *stats)
  10865{
  10866	struct rtnl_link_stats64 *prev_stats = &bp->net_stats_prev;
  10867
  10868	stats->rx_packets += prev_stats->rx_packets;
  10869	stats->tx_packets += prev_stats->tx_packets;
  10870	stats->rx_bytes += prev_stats->rx_bytes;
  10871	stats->tx_bytes += prev_stats->tx_bytes;
  10872	stats->rx_missed_errors += prev_stats->rx_missed_errors;
  10873	stats->multicast += prev_stats->multicast;
  10874	stats->rx_dropped += prev_stats->rx_dropped;
  10875	stats->tx_dropped += prev_stats->tx_dropped;
  10876}
  10877
  10878static void
  10879bnxt_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
  10880{
  10881	struct bnxt *bp = netdev_priv(dev);
  10882
  10883	set_bit(BNXT_STATE_READ_STATS, &bp->state);
  10884	/* Make sure bnxt_close_nic() sees that we are reading stats before
  10885	 * we check the BNXT_STATE_OPEN flag.
  10886	 */
  10887	smp_mb__after_atomic();
  10888	if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
  10889		clear_bit(BNXT_STATE_READ_STATS, &bp->state);
  10890		*stats = bp->net_stats_prev;
  10891		return;
  10892	}
  10893
  10894	bnxt_get_ring_stats(bp, stats);
  10895	bnxt_add_prev_stats(bp, stats);
  10896
  10897	if (bp->flags & BNXT_FLAG_PORT_STATS) {
  10898		u64 *rx = bp->port_stats.sw_stats;
  10899		u64 *tx = bp->port_stats.sw_stats +
  10900			  BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
  10901
  10902		stats->rx_crc_errors =
  10903			BNXT_GET_RX_PORT_STATS64(rx, rx_fcs_err_frames);
  10904		stats->rx_frame_errors =
  10905			BNXT_GET_RX_PORT_STATS64(rx, rx_align_err_frames);
  10906		stats->rx_length_errors =
  10907			BNXT_GET_RX_PORT_STATS64(rx, rx_undrsz_frames) +
  10908			BNXT_GET_RX_PORT_STATS64(rx, rx_ovrsz_frames) +
  10909			BNXT_GET_RX_PORT_STATS64(rx, rx_runt_frames);
  10910		stats->rx_errors =
  10911			BNXT_GET_RX_PORT_STATS64(rx, rx_false_carrier_frames) +
  10912			BNXT_GET_RX_PORT_STATS64(rx, rx_jbr_frames);
  10913		stats->collisions =
  10914			BNXT_GET_TX_PORT_STATS64(tx, tx_total_collisions);
  10915		stats->tx_fifo_errors =
  10916			BNXT_GET_TX_PORT_STATS64(tx, tx_fifo_underruns);
  10917		stats->tx_errors = BNXT_GET_TX_PORT_STATS64(tx, tx_err);
  10918	}
  10919	clear_bit(BNXT_STATE_READ_STATS, &bp->state);
  10920}
  10921
  10922static bool bnxt_mc_list_updated(struct bnxt *bp, u32 *rx_mask)
  10923{
  10924	struct net_device *dev = bp->dev;
  10925	struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
  10926	struct netdev_hw_addr *ha;
  10927	u8 *haddr;
  10928	int mc_count = 0;
  10929	bool update = false;
  10930	int off = 0;
  10931
  10932	netdev_for_each_mc_addr(ha, dev) {
  10933		if (mc_count >= BNXT_MAX_MC_ADDRS) {
  10934			*rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
  10935			vnic->mc_list_count = 0;
  10936			return false;
  10937		}
  10938		haddr = ha->addr;
  10939		if (!ether_addr_equal(haddr, vnic->mc_list + off)) {
  10940			memcpy(vnic->mc_list + off, haddr, ETH_ALEN);
  10941			update = true;
  10942		}
  10943		off += ETH_ALEN;
  10944		mc_count++;
  10945	}
  10946	if (mc_count)
  10947		*rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_MCAST;
  10948
  10949	if (mc_count != vnic->mc_list_count) {
  10950		vnic->mc_list_count = mc_count;
  10951		update = true;
  10952	}
  10953	return update;
  10954}
  10955
  10956static bool bnxt_uc_list_updated(struct bnxt *bp)
  10957{
  10958	struct net_device *dev = bp->dev;
  10959	struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
  10960	struct netdev_hw_addr *ha;
  10961	int off = 0;
  10962
  10963	if (netdev_uc_count(dev) != (vnic->uc_filter_count - 1))
  10964		return true;
  10965
  10966	netdev_for_each_uc_addr(ha, dev) {
  10967		if (!ether_addr_equal(ha->addr, vnic->uc_list + off))
  10968			return true;
  10969
  10970		off += ETH_ALEN;
  10971	}
  10972	return false;
  10973}
  10974
  10975static void bnxt_set_rx_mode(struct net_device *dev)
  10976{
  10977	struct bnxt *bp = netdev_priv(dev);
  10978	struct bnxt_vnic_info *vnic;
  10979	bool mc_update = false;
  10980	bool uc_update;
  10981	u32 mask;
  10982
  10983	if (!test_bit(BNXT_STATE_OPEN, &bp->state))
  10984		return;
  10985
  10986	vnic = &bp->vnic_info[0];
  10987	mask = vnic->rx_mask;
  10988	mask &= ~(CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS |
  10989		  CFA_L2_SET_RX_MASK_REQ_MASK_MCAST |
  10990		  CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST |
  10991		  CFA_L2_SET_RX_MASK_REQ_MASK_BCAST);
  10992
  10993	if (dev->flags & IFF_PROMISC)
  10994		mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
  10995
  10996	uc_update = bnxt_uc_list_updated(bp);
  10997
  10998	if (dev->flags & IFF_BROADCAST)
  10999		mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
  11000	if (dev->flags & IFF_ALLMULTI) {
  11001		mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
  11002		vnic->mc_list_count = 0;
  11003	} else if (dev->flags & IFF_MULTICAST) {
  11004		mc_update = bnxt_mc_list_updated(bp, &mask);
  11005	}
  11006
  11007	if (mask != vnic->rx_mask || uc_update || mc_update) {
  11008		vnic->rx_mask = mask;
  11009
  11010		set_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event);
  11011		bnxt_queue_sp_work(bp);
  11012	}
  11013}
  11014
  11015static int bnxt_cfg_rx_mode(struct bnxt *bp)
  11016{
  11017	struct net_device *dev = bp->dev;
  11018	struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
  11019	struct hwrm_cfa_l2_filter_free_input *req;
  11020	struct netdev_hw_addr *ha;
  11021	int i, off = 0, rc;
  11022	bool uc_update;
  11023
  11024	netif_addr_lock_bh(dev);
  11025	uc_update = bnxt_uc_list_updated(bp);
  11026	netif_addr_unlock_bh(dev);
  11027
  11028	if (!uc_update)
  11029		goto skip_uc;
  11030
  11031	rc = hwrm_req_init(bp, req, HWRM_CFA_L2_FILTER_FREE);
  11032	if (rc)
  11033		return rc;
  11034	hwrm_req_hold(bp, req);
  11035	for (i = 1; i < vnic->uc_filter_count; i++) {
  11036		req->l2_filter_id = vnic->fw_l2_filter_id[i];
  11037
  11038		rc = hwrm_req_send(bp, req);
  11039	}
  11040	hwrm_req_drop(bp, req);
  11041
  11042	vnic->uc_filter_count = 1;
  11043
  11044	netif_addr_lock_bh(dev);
  11045	if (netdev_uc_count(dev) > (BNXT_MAX_UC_ADDRS - 1)) {
  11046		vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
  11047	} else {
  11048		netdev_for_each_uc_addr(ha, dev) {
  11049			memcpy(vnic->uc_list + off, ha->addr, ETH_ALEN);
  11050			off += ETH_ALEN;
  11051			vnic->uc_filter_count++;
  11052		}
  11053	}
  11054	netif_addr_unlock_bh(dev);
  11055
  11056	for (i = 1, off = 0; i < vnic->uc_filter_count; i++, off += ETH_ALEN) {
  11057		rc = bnxt_hwrm_set_vnic_filter(bp, 0, i, vnic->uc_list + off);
  11058		if (rc) {
  11059			if (BNXT_VF(bp) && rc == -ENODEV) {
  11060				if (!test_and_set_bit(BNXT_STATE_L2_FILTER_RETRY, &bp->state))
  11061					netdev_warn(bp->dev, "Cannot configure L2 filters while PF is unavailable, will retry\n");
  11062				else
  11063					netdev_dbg(bp->dev, "PF still unavailable while configuring L2 filters.\n");
  11064				rc = 0;
  11065			} else {
  11066				netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", rc);
  11067			}
  11068			vnic->uc_filter_count = i;
  11069			return rc;
  11070		}
  11071	}
  11072	if (test_and_clear_bit(BNXT_STATE_L2_FILTER_RETRY, &bp->state))
  11073		netdev_notice(bp->dev, "Retry of L2 filter configuration successful.\n");
  11074
  11075skip_uc:
  11076	if ((vnic->rx_mask & CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS) &&
  11077	    !bnxt_promisc_ok(bp))
  11078		vnic->rx_mask &= ~CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
  11079	rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
  11080	if (rc && (vnic->rx_mask & CFA_L2_SET_RX_MASK_REQ_MASK_MCAST)) {
  11081		netdev_info(bp->dev, "Failed setting MC filters rc: %d, turning on ALL_MCAST mode\n",
  11082			    rc);
  11083		vnic->rx_mask &= ~CFA_L2_SET_RX_MASK_REQ_MASK_MCAST;
  11084		vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
  11085		vnic->mc_list_count = 0;
  11086		rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
  11087	}
  11088	if (rc)
  11089		netdev_err(bp->dev, "HWRM cfa l2 rx mask failure rc: %d\n",
  11090			   rc);
  11091
  11092	return rc;
  11093}
  11094
  11095static bool bnxt_can_reserve_rings(struct bnxt *bp)
  11096{
  11097#ifdef CONFIG_BNXT_SRIOV
  11098	if (BNXT_NEW_RM(bp) && BNXT_VF(bp)) {
  11099		struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
  11100
  11101		/* No minimum rings were provisioned by the PF.  Don't
  11102		 * reserve rings by default when device is down.
  11103		 */
  11104		if (hw_resc->min_tx_rings || hw_resc->resv_tx_rings)
  11105			return true;
  11106
  11107		if (!netif_running(bp->dev))
  11108			return false;
  11109	}
  11110#endif
  11111	return true;
  11112}
  11113
  11114/* If the chip and firmware supports RFS */
  11115static bool bnxt_rfs_supported(struct bnxt *bp)
  11116{
  11117	if (bp->flags & BNXT_FLAG_CHIP_P5) {
  11118		if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2)
  11119			return true;
  11120		return false;
  11121	}
  11122	/* 212 firmware is broken for aRFS */
  11123	if (BNXT_FW_MAJ(bp) == 212)
  11124		return false;
  11125	if (BNXT_PF(bp) && !BNXT_CHIP_TYPE_NITRO_A0(bp))
  11126		return true;
  11127	if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
  11128		return true;
  11129	return false;
  11130}
  11131
  11132/* If runtime conditions support RFS */
  11133static bool bnxt_rfs_capable(struct bnxt *bp)
  11134{
  11135#ifdef CONFIG_RFS_ACCEL
  11136	int vnics, max_vnics, max_rss_ctxs;
  11137
  11138	if (bp->flags & BNXT_FLAG_CHIP_P5)
  11139		return bnxt_rfs_supported(bp);
  11140	if (!(bp->flags & BNXT_FLAG_MSIX_CAP) || !bnxt_can_reserve_rings(bp) || !bp->rx_nr_rings)
  11141		return false;
  11142
  11143	vnics = 1 + bp->rx_nr_rings;
  11144	max_vnics = bnxt_get_max_func_vnics(bp);
  11145	max_rss_ctxs = bnxt_get_max_func_rss_ctxs(bp);
  11146
  11147	/* RSS contexts not a limiting factor */
  11148	if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
  11149		max_rss_ctxs = max_vnics;
  11150	if (vnics > max_vnics || vnics > max_rss_ctxs) {
  11151		if (bp->rx_nr_rings > 1)
  11152			netdev_warn(bp->dev,
  11153				    "Not enough resources to support NTUPLE filters, enough resources for up to %d rx rings\n",
  11154				    min(max_rss_ctxs - 1, max_vnics - 1));
  11155		return false;
  11156	}
  11157
  11158	if (!BNXT_NEW_RM(bp))
  11159		return true;
  11160
  11161	if (vnics == bp->hw_resc.resv_vnics)
  11162		return true;
  11163
  11164	bnxt_hwrm_reserve_rings(bp, 0, 0, 0, 0, 0, vnics);
  11165	if (vnics <= bp->hw_resc.resv_vnics)
  11166		return true;
  11167
  11168	netdev_warn(bp->dev, "Unable to reserve resources to support NTUPLE filters.\n");
  11169	bnxt_hwrm_reserve_rings(bp, 0, 0, 0, 0, 0, 1);
  11170	return false;
  11171#else
  11172	return false;
  11173#endif
  11174}
  11175
  11176static netdev_features_t bnxt_fix_features(struct net_device *dev,
  11177					   netdev_features_t features)
  11178{
  11179	struct bnxt *bp = netdev_priv(dev);
  11180	netdev_features_t vlan_features;
  11181
  11182	if ((features & NETIF_F_NTUPLE) && !bnxt_rfs_capable(bp))
  11183		features &= ~NETIF_F_NTUPLE;
  11184
  11185	if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
  11186		features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
  11187
  11188	if (!(bp->flags & BNXT_FLAG_TPA))
  11189		features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
  11190
  11191	if (!(features & NETIF_F_GRO))
  11192		features &= ~NETIF_F_GRO_HW;
  11193
  11194	if (features & NETIF_F_GRO_HW)
  11195		features &= ~NETIF_F_LRO;
  11196
  11197	/* Both CTAG and STAG VLAN accelaration on the RX side have to be
  11198	 * turned on or off together.
  11199	 */
  11200	vlan_features = features & BNXT_HW_FEATURE_VLAN_ALL_RX;
  11201	if (vlan_features != BNXT_HW_FEATURE_VLAN_ALL_RX) {
  11202		if (dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX)
  11203			features &= ~BNXT_HW_FEATURE_VLAN_ALL_RX;
  11204		else if (vlan_features)
  11205			features |= BNXT_HW_FEATURE_VLAN_ALL_RX;
  11206	}
  11207#ifdef CONFIG_BNXT_SRIOV
  11208	if (BNXT_VF(bp) && bp->vf.vlan)
  11209		features &= ~BNXT_HW_FEATURE_VLAN_ALL_RX;
  11210#endif
  11211	return features;
  11212}
  11213
  11214static int bnxt_set_features(struct net_device *dev, netdev_features_t features)
  11215{
  11216	struct bnxt *bp = netdev_priv(dev);
  11217	u32 flags = bp->flags;
  11218	u32 changes;
  11219	int rc = 0;
  11220	bool re_init = false;
  11221	bool update_tpa = false;
  11222
  11223	flags &= ~BNXT_FLAG_ALL_CONFIG_FEATS;
  11224	if (features & NETIF_F_GRO_HW)
  11225		flags |= BNXT_FLAG_GRO;
  11226	else if (features & NETIF_F_LRO)
  11227		flags |= BNXT_FLAG_LRO;
  11228
  11229	if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
  11230		flags &= ~BNXT_FLAG_TPA;
  11231
  11232	if (features & BNXT_HW_FEATURE_VLAN_ALL_RX)
  11233		flags |= BNXT_FLAG_STRIP_VLAN;
  11234
  11235	if (features & NETIF_F_NTUPLE)
  11236		flags |= BNXT_FLAG_RFS;
  11237
  11238	changes = flags ^ bp->flags;
  11239	if (changes & BNXT_FLAG_TPA) {
  11240		update_tpa = true;
  11241		if ((bp->flags & BNXT_FLAG_TPA) == 0 ||
  11242		    (flags & BNXT_FLAG_TPA) == 0 ||
  11243		    (bp->flags & BNXT_FLAG_CHIP_P5))
  11244			re_init = true;
  11245	}
  11246
  11247	if (changes & ~BNXT_FLAG_TPA)
  11248		re_init = true;
  11249
  11250	if (flags != bp->flags) {
  11251		u32 old_flags = bp->flags;
  11252
  11253		if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
  11254			bp->flags = flags;
  11255			if (update_tpa)
  11256				bnxt_set_ring_params(bp);
  11257			return rc;
  11258		}
  11259
  11260		if (re_init) {
  11261			bnxt_close_nic(bp, false, false);
  11262			bp->flags = flags;
  11263			if (update_tpa)
  11264				bnxt_set_ring_params(bp);
  11265
  11266			return bnxt_open_nic(bp, false, false);
  11267		}
  11268		if (update_tpa) {
  11269			bp->flags = flags;
  11270			rc = bnxt_set_tpa(bp,
  11271					  (flags & BNXT_FLAG_TPA) ?
  11272					  true : false);
  11273			if (rc)
  11274				bp->flags = old_flags;
  11275		}
  11276	}
  11277	return rc;
  11278}
  11279
  11280static bool bnxt_exthdr_check(struct bnxt *bp, struct sk_buff *skb, int nw_off,
  11281			      u8 **nextp)
  11282{
  11283	struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + nw_off);
  11284	int hdr_count = 0;
  11285	u8 *nexthdr;
  11286	int start;
  11287
  11288	/* Check that there are at most 2 IPv6 extension headers, no
  11289	 * fragment header, and each is <= 64 bytes.
  11290	 */
  11291	start = nw_off + sizeof(*ip6h);
  11292	nexthdr = &ip6h->nexthdr;
  11293	while (ipv6_ext_hdr(*nexthdr)) {
  11294		struct ipv6_opt_hdr *hp;
  11295		int hdrlen;
  11296
  11297		if (hdr_count >= 3 || *nexthdr == NEXTHDR_NONE ||
  11298		    *nexthdr == NEXTHDR_FRAGMENT)
  11299			return false;
  11300		hp = __skb_header_pointer(NULL, start, sizeof(*hp), skb->data,
  11301					  skb_headlen(skb), NULL);
  11302		if (!hp)
  11303			return false;
  11304		if (*nexthdr == NEXTHDR_AUTH)
  11305			hdrlen = ipv6_authlen(hp);
  11306		else
  11307			hdrlen = ipv6_optlen(hp);
  11308
  11309		if (hdrlen > 64)
  11310			return false;
  11311		nexthdr = &hp->nexthdr;
  11312		start += hdrlen;
  11313		hdr_count++;
  11314	}
  11315	if (nextp) {
  11316		/* Caller will check inner protocol */
  11317		if (skb->encapsulation) {
  11318			*nextp = nexthdr;
  11319			return true;
  11320		}
  11321		*nextp = NULL;
  11322	}
  11323	/* Only support TCP/UDP for non-tunneled ipv6 and inner ipv6 */
  11324	return *nexthdr == IPPROTO_TCP || *nexthdr == IPPROTO_UDP;
  11325}
  11326
  11327/* For UDP, we can only handle 1 Vxlan port and 1 Geneve port. */
  11328static bool bnxt_udp_tunl_check(struct bnxt *bp, struct sk_buff *skb)
  11329{
  11330	struct udphdr *uh = udp_hdr(skb);
  11331	__be16 udp_port = uh->dest;
  11332
  11333	if (udp_port != bp->vxlan_port && udp_port != bp->nge_port)
  11334		return false;
  11335	if (skb->inner_protocol_type == ENCAP_TYPE_ETHER) {
  11336		struct ethhdr *eh = inner_eth_hdr(skb);
  11337
  11338		switch (eh->h_proto) {
  11339		case htons(ETH_P_IP):
  11340			return true;
  11341		case htons(ETH_P_IPV6):
  11342			return bnxt_exthdr_check(bp, skb,
  11343						 skb_inner_network_offset(skb),
  11344						 NULL);
  11345		}
  11346	}
  11347	return false;
  11348}
  11349
  11350static bool bnxt_tunl_check(struct bnxt *bp, struct sk_buff *skb, u8 l4_proto)
  11351{
  11352	switch (l4_proto) {
  11353	case IPPROTO_UDP:
  11354		return bnxt_udp_tunl_check(bp, skb);
  11355	case IPPROTO_IPIP:
  11356		return true;
  11357	case IPPROTO_GRE: {
  11358		switch (skb->inner_protocol) {
  11359		default:
  11360			return false;
  11361		case htons(ETH_P_IP):
  11362			return true;
  11363		case htons(ETH_P_IPV6):
  11364			fallthrough;
  11365		}
  11366	}
  11367	case IPPROTO_IPV6:
  11368		/* Check ext headers of inner ipv6 */
  11369		return bnxt_exthdr_check(bp, skb, skb_inner_network_offset(skb),
  11370					 NULL);
  11371	}
  11372	return false;
  11373}
  11374
  11375static netdev_features_t bnxt_features_check(struct sk_buff *skb,
  11376					     struct net_device *dev,
  11377					     netdev_features_t features)
  11378{
  11379	struct bnxt *bp = netdev_priv(dev);
  11380	u8 *l4_proto;
  11381
  11382	features = vlan_features_check(skb, features);
  11383	switch (vlan_get_protocol(skb)) {
  11384	case htons(ETH_P_IP):
  11385		if (!skb->encapsulation)
  11386			return features;
  11387		l4_proto = &ip_hdr(skb)->protocol;
  11388		if (bnxt_tunl_check(bp, skb, *l4_proto))
  11389			return features;
  11390		break;
  11391	case htons(ETH_P_IPV6):
  11392		if (!bnxt_exthdr_check(bp, skb, skb_network_offset(skb),
  11393				       &l4_proto))
  11394			break;
  11395		if (!l4_proto || bnxt_tunl_check(bp, skb, *l4_proto))
  11396			return features;
  11397		break;
  11398	}
  11399	return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
  11400}
  11401
  11402int bnxt_dbg_hwrm_rd_reg(struct bnxt *bp, u32 reg_off, u16 num_words,
  11403			 u32 *reg_buf)
  11404{
  11405	struct hwrm_dbg_read_direct_output *resp;
  11406	struct hwrm_dbg_read_direct_input *req;
  11407	__le32 *dbg_reg_buf;
  11408	dma_addr_t mapping;
  11409	int rc, i;
  11410
  11411	rc = hwrm_req_init(bp, req, HWRM_DBG_READ_DIRECT);
  11412	if (rc)
  11413		return rc;
  11414
  11415	dbg_reg_buf = hwrm_req_dma_slice(bp, req, num_words * 4,
  11416					 &mapping);
  11417	if (!dbg_reg_buf) {
  11418		rc = -ENOMEM;
  11419		goto dbg_rd_reg_exit;
  11420	}
  11421
  11422	req->host_dest_addr = cpu_to_le64(mapping);
  11423
  11424	resp = hwrm_req_hold(bp, req);
  11425	req->read_addr = cpu_to_le32(reg_off + CHIMP_REG_VIEW_ADDR);
  11426	req->read_len32 = cpu_to_le32(num_words);
  11427
  11428	rc = hwrm_req_send(bp, req);
  11429	if (rc || resp->error_code) {
  11430		rc = -EIO;
  11431		goto dbg_rd_reg_exit;
  11432	}
  11433	for (i = 0; i < num_words; i++)
  11434		reg_buf[i] = le32_to_cpu(dbg_reg_buf[i]);
  11435
  11436dbg_rd_reg_exit:
  11437	hwrm_req_drop(bp, req);
  11438	return rc;
  11439}
  11440
  11441static int bnxt_dbg_hwrm_ring_info_get(struct bnxt *bp, u8 ring_type,
  11442				       u32 ring_id, u32 *prod, u32 *cons)
  11443{
  11444	struct hwrm_dbg_ring_info_get_output *resp;
  11445	struct hwrm_dbg_ring_info_get_input *req;
  11446	int rc;
  11447
  11448	rc = hwrm_req_init(bp, req, HWRM_DBG_RING_INFO_GET);
  11449	if (rc)
  11450		return rc;
  11451
  11452	req->ring_type = ring_type;
  11453	req->fw_ring_id = cpu_to_le32(ring_id);
  11454	resp = hwrm_req_hold(bp, req);
  11455	rc = hwrm_req_send(bp, req);
  11456	if (!rc) {
  11457		*prod = le32_to_cpu(resp->producer_index);
  11458		*cons = le32_to_cpu(resp->consumer_index);
  11459	}
  11460	hwrm_req_drop(bp, req);
  11461	return rc;
  11462}
  11463
  11464static void bnxt_dump_tx_sw_state(struct bnxt_napi *bnapi)
  11465{
  11466	struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
  11467	int i = bnapi->index;
  11468
  11469	if (!txr)
  11470		return;
  11471
  11472	netdev_info(bnapi->bp->dev, "[%d]: tx{fw_ring: %d prod: %x cons: %x}\n",
  11473		    i, txr->tx_ring_struct.fw_ring_id, txr->tx_prod,
  11474		    txr->tx_cons);
  11475}
  11476
  11477static void bnxt_dump_rx_sw_state(struct bnxt_napi *bnapi)
  11478{
  11479	struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
  11480	int i = bnapi->index;
  11481
  11482	if (!rxr)
  11483		return;
  11484
  11485	netdev_info(bnapi->bp->dev, "[%d]: rx{fw_ring: %d prod: %x} rx_agg{fw_ring: %d agg_prod: %x sw_agg_prod: %x}\n",
  11486		    i, rxr->rx_ring_struct.fw_ring_id, rxr->rx_prod,
  11487		    rxr->rx_agg_ring_struct.fw_ring_id, rxr->rx_agg_prod,
  11488		    rxr->rx_sw_agg_prod);
  11489}
  11490
  11491static void bnxt_dump_cp_sw_state(struct bnxt_napi *bnapi)
  11492{
  11493	struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
  11494	int i = bnapi->index;
  11495
  11496	netdev_info(bnapi->bp->dev, "[%d]: cp{fw_ring: %d raw_cons: %x}\n",
  11497		    i, cpr->cp_ring_struct.fw_ring_id, cpr->cp_raw_cons);
  11498}
  11499
  11500static void bnxt_dbg_dump_states(struct bnxt *bp)
  11501{
  11502	int i;
  11503	struct bnxt_napi *bnapi;
  11504
  11505	for (i = 0; i < bp->cp_nr_rings; i++) {
  11506		bnapi = bp->bnapi[i];
  11507		if (netif_msg_drv(bp)) {
  11508			bnxt_dump_tx_sw_state(bnapi);
  11509			bnxt_dump_rx_sw_state(bnapi);
  11510			bnxt_dump_cp_sw_state(bnapi);
  11511		}
  11512	}
  11513}
  11514
  11515static int bnxt_hwrm_rx_ring_reset(struct bnxt *bp, int ring_nr)
  11516{
  11517	struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr];
  11518	struct hwrm_ring_reset_input *req;
  11519	struct bnxt_napi *bnapi = rxr->bnapi;
  11520	struct bnxt_cp_ring_info *cpr;
  11521	u16 cp_ring_id;
  11522	int rc;
  11523
  11524	rc = hwrm_req_init(bp, req, HWRM_RING_RESET);
  11525	if (rc)
  11526		return rc;
  11527
  11528	cpr = &bnapi->cp_ring;
  11529	cp_ring_id = cpr->cp_ring_struct.fw_ring_id;
  11530	req->cmpl_ring = cpu_to_le16(cp_ring_id);
  11531	req->ring_type = RING_RESET_REQ_RING_TYPE_RX_RING_GRP;
  11532	req->ring_id = cpu_to_le16(bp->grp_info[bnapi->index].fw_grp_id);
  11533	return hwrm_req_send_silent(bp, req);
  11534}
  11535
  11536static void bnxt_reset_task(struct bnxt *bp, bool silent)
  11537{
  11538	if (!silent)
  11539		bnxt_dbg_dump_states(bp);
  11540	if (netif_running(bp->dev)) {
  11541		int rc;
  11542
  11543		if (silent) {
  11544			bnxt_close_nic(bp, false, false);
  11545			bnxt_open_nic(bp, false, false);
  11546		} else {
  11547			bnxt_ulp_stop(bp);
  11548			bnxt_close_nic(bp, true, false);
  11549			rc = bnxt_open_nic(bp, true, false);
  11550			bnxt_ulp_start(bp, rc);
  11551		}
  11552	}
  11553}
  11554
  11555static void bnxt_tx_timeout(struct net_device *dev, unsigned int txqueue)
  11556{
  11557	struct bnxt *bp = netdev_priv(dev);
  11558
  11559	netdev_err(bp->dev,  "TX timeout detected, starting reset task!\n");
  11560	set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event);
  11561	bnxt_queue_sp_work(bp);
  11562}
  11563
  11564static void bnxt_fw_health_check(struct bnxt *bp)
  11565{
  11566	struct bnxt_fw_health *fw_health = bp->fw_health;
  11567	u32 val;
  11568
  11569	if (!fw_health->enabled || test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
  11570		return;
  11571
  11572	/* Make sure it is enabled before checking the tmr_counter. */
  11573	smp_rmb();
  11574	if (fw_health->tmr_counter) {
  11575		fw_health->tmr_counter--;
  11576		return;
  11577	}
  11578
  11579	val = bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
  11580	if (val == fw_health->last_fw_heartbeat) {
  11581		fw_health->arrests++;
  11582		goto fw_reset;
  11583	}
  11584
  11585	fw_health->last_fw_heartbeat = val;
  11586
  11587	val = bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
  11588	if (val != fw_health->last_fw_reset_cnt) {
  11589		fw_health->discoveries++;
  11590		goto fw_reset;
  11591	}
  11592
  11593	fw_health->tmr_counter = fw_health->tmr_multiplier;
  11594	return;
  11595
  11596fw_reset:
  11597	set_bit(BNXT_FW_EXCEPTION_SP_EVENT, &bp->sp_event);
  11598	bnxt_queue_sp_work(bp);
  11599}
  11600
  11601static void bnxt_timer(struct timer_list *t)
  11602{
  11603	struct bnxt *bp = from_timer(bp, t, timer);
  11604	struct net_device *dev = bp->dev;
  11605
  11606	if (!netif_running(dev) || !test_bit(BNXT_STATE_OPEN, &bp->state))
  11607		return;
  11608
  11609	if (atomic_read(&bp->intr_sem) != 0)
  11610		goto bnxt_restart_timer;
  11611
  11612	if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)
  11613		bnxt_fw_health_check(bp);
  11614
  11615	if (BNXT_LINK_IS_UP(bp) && bp->stats_coal_ticks) {
  11616		set_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event);
  11617		bnxt_queue_sp_work(bp);
  11618	}
  11619
  11620	if (bnxt_tc_flower_enabled(bp)) {
  11621		set_bit(BNXT_FLOW_STATS_SP_EVENT, &bp->sp_event);
  11622		bnxt_queue_sp_work(bp);
  11623	}
  11624
  11625#ifdef CONFIG_RFS_ACCEL
  11626	if ((bp->flags & BNXT_FLAG_RFS) && bp->ntp_fltr_count) {
  11627		set_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event);
  11628		bnxt_queue_sp_work(bp);
  11629	}
  11630#endif /*CONFIG_RFS_ACCEL*/
  11631
  11632	if (bp->link_info.phy_retry) {
  11633		if (time_after(jiffies, bp->link_info.phy_retry_expires)) {
  11634			bp->link_info.phy_retry = false;
  11635			netdev_warn(bp->dev, "failed to update phy settings after maximum retries.\n");
  11636		} else {
  11637			set_bit(BNXT_UPDATE_PHY_SP_EVENT, &bp->sp_event);
  11638			bnxt_queue_sp_work(bp);
  11639		}
  11640	}
  11641
  11642	if (test_bit(BNXT_STATE_L2_FILTER_RETRY, &bp->state)) {
  11643		set_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event);
  11644		bnxt_queue_sp_work(bp);
  11645	}
  11646
  11647	if ((bp->flags & BNXT_FLAG_CHIP_P5) && !bp->chip_rev &&
  11648	    netif_carrier_ok(dev)) {
  11649		set_bit(BNXT_RING_COAL_NOW_SP_EVENT, &bp->sp_event);
  11650		bnxt_queue_sp_work(bp);
  11651	}
  11652bnxt_restart_timer:
  11653	mod_timer(&bp->timer, jiffies + bp->current_interval);
  11654}
  11655
  11656static void bnxt_rtnl_lock_sp(struct bnxt *bp)
  11657{
  11658	/* We are called from bnxt_sp_task which has BNXT_STATE_IN_SP_TASK
  11659	 * set.  If the device is being closed, bnxt_close() may be holding
  11660	 * rtnl() and waiting for BNXT_STATE_IN_SP_TASK to clear.  So we
  11661	 * must clear BNXT_STATE_IN_SP_TASK before holding rtnl().
  11662	 */
  11663	clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
  11664	rtnl_lock();
  11665}
  11666
  11667static void bnxt_rtnl_unlock_sp(struct bnxt *bp)
  11668{
  11669	set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
  11670	rtnl_unlock();
  11671}
  11672
  11673/* Only called from bnxt_sp_task() */
  11674static void bnxt_reset(struct bnxt *bp, bool silent)
  11675{
  11676	bnxt_rtnl_lock_sp(bp);
  11677	if (test_bit(BNXT_STATE_OPEN, &bp->state))
  11678		bnxt_reset_task(bp, silent);
  11679	bnxt_rtnl_unlock_sp(bp);
  11680}
  11681
  11682/* Only called from bnxt_sp_task() */
  11683static void bnxt_rx_ring_reset(struct bnxt *bp)
  11684{
  11685	int i;
  11686
  11687	bnxt_rtnl_lock_sp(bp);
  11688	if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
  11689		bnxt_rtnl_unlock_sp(bp);
  11690		return;
  11691	}
  11692	/* Disable and flush TPA before resetting the RX ring */
  11693	if (bp->flags & BNXT_FLAG_TPA)
  11694		bnxt_set_tpa(bp, false);
  11695	for (i = 0; i < bp->rx_nr_rings; i++) {
  11696		struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
  11697		struct bnxt_cp_ring_info *cpr;
  11698		int rc;
  11699
  11700		if (!rxr->bnapi->in_reset)
  11701			continue;
  11702
  11703		rc = bnxt_hwrm_rx_ring_reset(bp, i);
  11704		if (rc) {
  11705			if (rc == -EINVAL || rc == -EOPNOTSUPP)
  11706				netdev_info_once(bp->dev, "RX ring reset not supported by firmware, falling back to global reset\n");
  11707			else
  11708				netdev_warn(bp->dev, "RX ring reset failed, rc = %d, falling back to global reset\n",
  11709					    rc);
  11710			bnxt_reset_task(bp, true);
  11711			break;
  11712		}
  11713		bnxt_free_one_rx_ring_skbs(bp, i);
  11714		rxr->rx_prod = 0;
  11715		rxr->rx_agg_prod = 0;
  11716		rxr->rx_sw_agg_prod = 0;
  11717		rxr->rx_next_cons = 0;
  11718		rxr->bnapi->in_reset = false;
  11719		bnxt_alloc_one_rx_ring(bp, i);
  11720		cpr = &rxr->bnapi->cp_ring;
  11721		cpr->sw_stats.rx.rx_resets++;
  11722		if (bp->flags & BNXT_FLAG_AGG_RINGS)
  11723			bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
  11724		bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
  11725	}
  11726	if (bp->flags & BNXT_FLAG_TPA)
  11727		bnxt_set_tpa(bp, true);
  11728	bnxt_rtnl_unlock_sp(bp);
  11729}
  11730
  11731static void bnxt_fw_reset_close(struct bnxt *bp)
  11732{
  11733	bnxt_ulp_stop(bp);
  11734	/* When firmware is in fatal state, quiesce device and disable
  11735	 * bus master to prevent any potential bad DMAs before freeing
  11736	 * kernel memory.
  11737	 */
  11738	if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) {
  11739		u16 val = 0;
  11740
  11741		pci_read_config_word(bp->pdev, PCI_SUBSYSTEM_ID, &val);
  11742		if (val == 0xffff)
  11743			bp->fw_reset_min_dsecs = 0;
  11744		bnxt_tx_disable(bp);
  11745		bnxt_disable_napi(bp);
  11746		bnxt_disable_int_sync(bp);
  11747		bnxt_free_irq(bp);
  11748		bnxt_clear_int_mode(bp);
  11749		pci_disable_device(bp->pdev);
  11750	}
  11751	__bnxt_close_nic(bp, true, false);
  11752	bnxt_vf_reps_free(bp);
  11753	bnxt_clear_int_mode(bp);
  11754	bnxt_hwrm_func_drv_unrgtr(bp);
  11755	if (pci_is_enabled(bp->pdev))
  11756		pci_disable_device(bp->pdev);
  11757	bnxt_free_ctx_mem(bp);
  11758	kfree(bp->ctx);
  11759	bp->ctx = NULL;
  11760}
  11761
  11762static bool is_bnxt_fw_ok(struct bnxt *bp)
  11763{
  11764	struct bnxt_fw_health *fw_health = bp->fw_health;
  11765	bool no_heartbeat = false, has_reset = false;
  11766	u32 val;
  11767
  11768	val = bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
  11769	if (val == fw_health->last_fw_heartbeat)
  11770		no_heartbeat = true;
  11771
  11772	val = bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
  11773	if (val != fw_health->last_fw_reset_cnt)
  11774		has_reset = true;
  11775
  11776	if (!no_heartbeat && has_reset)
  11777		return true;
  11778
  11779	return false;
  11780}
  11781
  11782/* rtnl_lock is acquired before calling this function */
  11783static void bnxt_force_fw_reset(struct bnxt *bp)
  11784{
  11785	struct bnxt_fw_health *fw_health = bp->fw_health;
  11786	struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
  11787	u32 wait_dsecs;
  11788
  11789	if (!test_bit(BNXT_STATE_OPEN, &bp->state) ||
  11790	    test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
  11791		return;
  11792
  11793	if (ptp) {
  11794		spin_lock_bh(&ptp->ptp_lock);
  11795		set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
  11796		spin_unlock_bh(&ptp->ptp_lock);
  11797	} else {
  11798		set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
  11799	}
  11800	bnxt_fw_reset_close(bp);
  11801	wait_dsecs = fw_health->master_func_wait_dsecs;
  11802	if (fw_health->primary) {
  11803		if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU)
  11804			wait_dsecs = 0;
  11805		bp->fw_reset_state = BNXT_FW_RESET_STATE_RESET_FW;
  11806	} else {
  11807		bp->fw_reset_timestamp = jiffies + wait_dsecs * HZ / 10;
  11808		wait_dsecs = fw_health->normal_func_wait_dsecs;
  11809		bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
  11810	}
  11811
  11812	bp->fw_reset_min_dsecs = fw_health->post_reset_wait_dsecs;
  11813	bp->fw_reset_max_dsecs = fw_health->post_reset_max_wait_dsecs;
  11814	bnxt_queue_fw_reset_work(bp, wait_dsecs * HZ / 10);
  11815}
  11816
  11817void bnxt_fw_exception(struct bnxt *bp)
  11818{
  11819	netdev_warn(bp->dev, "Detected firmware fatal condition, initiating reset\n");
  11820	set_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
  11821	bnxt_rtnl_lock_sp(bp);
  11822	bnxt_force_fw_reset(bp);
  11823	bnxt_rtnl_unlock_sp(bp);
  11824}
  11825
  11826/* Returns the number of registered VFs, or 1 if VF configuration is pending, or
  11827 * < 0 on error.
  11828 */
  11829static int bnxt_get_registered_vfs(struct bnxt *bp)
  11830{
  11831#ifdef CONFIG_BNXT_SRIOV
  11832	int rc;
  11833
  11834	if (!BNXT_PF(bp))
  11835		return 0;
  11836
  11837	rc = bnxt_hwrm_func_qcfg(bp);
  11838	if (rc) {
  11839		netdev_err(bp->dev, "func_qcfg cmd failed, rc = %d\n", rc);
  11840		return rc;
  11841	}
  11842	if (bp->pf.registered_vfs)
  11843		return bp->pf.registered_vfs;
  11844	if (bp->sriov_cfg)
  11845		return 1;
  11846#endif
  11847	return 0;
  11848}
  11849
  11850void bnxt_fw_reset(struct bnxt *bp)
  11851{
  11852	bnxt_rtnl_lock_sp(bp);
  11853	if (test_bit(BNXT_STATE_OPEN, &bp->state) &&
  11854	    !test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
  11855		struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
  11856		int n = 0, tmo;
  11857
  11858		if (ptp) {
  11859			spin_lock_bh(&ptp->ptp_lock);
  11860			set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
  11861			spin_unlock_bh(&ptp->ptp_lock);
  11862		} else {
  11863			set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
  11864		}
  11865		if (bp->pf.active_vfs &&
  11866		    !test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))
  11867			n = bnxt_get_registered_vfs(bp);
  11868		if (n < 0) {
  11869			netdev_err(bp->dev, "Firmware reset aborted, rc = %d\n",
  11870				   n);
  11871			clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
  11872			dev_close(bp->dev);
  11873			goto fw_reset_exit;
  11874		} else if (n > 0) {
  11875			u16 vf_tmo_dsecs = n * 10;
  11876
  11877			if (bp->fw_reset_max_dsecs < vf_tmo_dsecs)
  11878				bp->fw_reset_max_dsecs = vf_tmo_dsecs;
  11879			bp->fw_reset_state =
  11880				BNXT_FW_RESET_STATE_POLL_VF;
  11881			bnxt_queue_fw_reset_work(bp, HZ / 10);
  11882			goto fw_reset_exit;
  11883		}
  11884		bnxt_fw_reset_close(bp);
  11885		if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) {
  11886			bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW_DOWN;
  11887			tmo = HZ / 10;
  11888		} else {
  11889			bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
  11890			tmo = bp->fw_reset_min_dsecs * HZ / 10;
  11891		}
  11892		bnxt_queue_fw_reset_work(bp, tmo);
  11893	}
  11894fw_reset_exit:
  11895	bnxt_rtnl_unlock_sp(bp);
  11896}
  11897
  11898static void bnxt_chk_missed_irq(struct bnxt *bp)
  11899{
  11900	int i;
  11901
  11902	if (!(bp->flags & BNXT_FLAG_CHIP_P5))
  11903		return;
  11904
  11905	for (i = 0; i < bp->cp_nr_rings; i++) {
  11906		struct bnxt_napi *bnapi = bp->bnapi[i];
  11907		struct bnxt_cp_ring_info *cpr;
  11908		u32 fw_ring_id;
  11909		int j;
  11910
  11911		if (!bnapi)
  11912			continue;
  11913
  11914		cpr = &bnapi->cp_ring;
  11915		for (j = 0; j < 2; j++) {
  11916			struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
  11917			u32 val[2];
  11918
  11919			if (!cpr2 || cpr2->has_more_work ||
  11920			    !bnxt_has_work(bp, cpr2))
  11921				continue;
  11922
  11923			if (cpr2->cp_raw_cons != cpr2->last_cp_raw_cons) {
  11924				cpr2->last_cp_raw_cons = cpr2->cp_raw_cons;
  11925				continue;
  11926			}
  11927			fw_ring_id = cpr2->cp_ring_struct.fw_ring_id;
  11928			bnxt_dbg_hwrm_ring_info_get(bp,
  11929				DBG_RING_INFO_GET_REQ_RING_TYPE_L2_CMPL,
  11930				fw_ring_id, &val[0], &val[1]);
  11931			cpr->sw_stats.cmn.missed_irqs++;
  11932		}
  11933	}
  11934}
  11935
  11936static void bnxt_cfg_ntp_filters(struct bnxt *);
  11937
  11938static void bnxt_init_ethtool_link_settings(struct bnxt *bp)
  11939{
  11940	struct bnxt_link_info *link_info = &bp->link_info;
  11941
  11942	if (BNXT_AUTO_MODE(link_info->auto_mode)) {
  11943		link_info->autoneg = BNXT_AUTONEG_SPEED;
  11944		if (bp->hwrm_spec_code >= 0x10201) {
  11945			if (link_info->auto_pause_setting &
  11946			    PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE)
  11947				link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
  11948		} else {
  11949			link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
  11950		}
  11951		link_info->advertising = link_info->auto_link_speeds;
  11952		link_info->advertising_pam4 = link_info->auto_pam4_link_speeds;
  11953	} else {
  11954		link_info->req_link_speed = link_info->force_link_speed;
  11955		link_info->req_signal_mode = BNXT_SIG_MODE_NRZ;
  11956		if (link_info->force_pam4_link_speed) {
  11957			link_info->req_link_speed =
  11958				link_info->force_pam4_link_speed;
  11959			link_info->req_signal_mode = BNXT_SIG_MODE_PAM4;
  11960		}
  11961		link_info->req_duplex = link_info->duplex_setting;
  11962	}
  11963	if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL)
  11964		link_info->req_flow_ctrl =
  11965			link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH;
  11966	else
  11967		link_info->req_flow_ctrl = link_info->force_pause_setting;
  11968}
  11969
  11970static void bnxt_fw_echo_reply(struct bnxt *bp)
  11971{
  11972	struct bnxt_fw_health *fw_health = bp->fw_health;
  11973	struct hwrm_func_echo_response_input *req;
  11974	int rc;
  11975
  11976	rc = hwrm_req_init(bp, req, HWRM_FUNC_ECHO_RESPONSE);
  11977	if (rc)
  11978		return;
  11979	req->event_data1 = cpu_to_le32(fw_health->echo_req_data1);
  11980	req->event_data2 = cpu_to_le32(fw_health->echo_req_data2);
  11981	hwrm_req_send(bp, req);
  11982}
  11983
  11984static void bnxt_sp_task(struct work_struct *work)
  11985{
  11986	struct bnxt *bp = container_of(work, struct bnxt, sp_task);
  11987
  11988	set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
  11989	smp_mb__after_atomic();
  11990	if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
  11991		clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
  11992		return;
  11993	}
  11994
  11995	if (test_and_clear_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event))
  11996		bnxt_cfg_rx_mode(bp);
  11997
  11998	if (test_and_clear_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event))
  11999		bnxt_cfg_ntp_filters(bp);
  12000	if (test_and_clear_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event))
  12001		bnxt_hwrm_exec_fwd_req(bp);
  12002	if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event)) {
  12003		bnxt_hwrm_port_qstats(bp, 0);
  12004		bnxt_hwrm_port_qstats_ext(bp, 0);
  12005		bnxt_accumulate_all_stats(bp);
  12006	}
  12007
  12008	if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) {
  12009		int rc;
  12010
  12011		mutex_lock(&bp->link_lock);
  12012		if (test_and_clear_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT,
  12013				       &bp->sp_event))
  12014			bnxt_hwrm_phy_qcaps(bp);
  12015
  12016		rc = bnxt_update_link(bp, true);
  12017		if (rc)
  12018			netdev_err(bp->dev, "SP task can't update link (rc: %x)\n",
  12019				   rc);
  12020
  12021		if (test_and_clear_bit(BNXT_LINK_CFG_CHANGE_SP_EVENT,
  12022				       &bp->sp_event))
  12023			bnxt_init_ethtool_link_settings(bp);
  12024		mutex_unlock(&bp->link_lock);
  12025	}
  12026	if (test_and_clear_bit(BNXT_UPDATE_PHY_SP_EVENT, &bp->sp_event)) {
  12027		int rc;
  12028
  12029		mutex_lock(&bp->link_lock);
  12030		rc = bnxt_update_phy_setting(bp);
  12031		mutex_unlock(&bp->link_lock);
  12032		if (rc) {
  12033			netdev_warn(bp->dev, "update phy settings retry failed\n");
  12034		} else {
  12035			bp->link_info.phy_retry = false;
  12036			netdev_info(bp->dev, "update phy settings retry succeeded\n");
  12037		}
  12038	}
  12039	if (test_and_clear_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event)) {
  12040		mutex_lock(&bp->link_lock);
  12041		bnxt_get_port_module_status(bp);
  12042		mutex_unlock(&bp->link_lock);
  12043	}
  12044
  12045	if (test_and_clear_bit(BNXT_FLOW_STATS_SP_EVENT, &bp->sp_event))
  12046		bnxt_tc_flow_stats_work(bp);
  12047
  12048	if (test_and_clear_bit(BNXT_RING_COAL_NOW_SP_EVENT, &bp->sp_event))
  12049		bnxt_chk_missed_irq(bp);
  12050
  12051	if (test_and_clear_bit(BNXT_FW_ECHO_REQUEST_SP_EVENT, &bp->sp_event))
  12052		bnxt_fw_echo_reply(bp);
  12053
  12054	/* These functions below will clear BNXT_STATE_IN_SP_TASK.  They
  12055	 * must be the last functions to be called before exiting.
  12056	 */
  12057	if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event))
  12058		bnxt_reset(bp, false);
  12059
  12060	if (test_and_clear_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event))
  12061		bnxt_reset(bp, true);
  12062
  12063	if (test_and_clear_bit(BNXT_RST_RING_SP_EVENT, &bp->sp_event))
  12064		bnxt_rx_ring_reset(bp);
  12065
  12066	if (test_and_clear_bit(BNXT_FW_RESET_NOTIFY_SP_EVENT, &bp->sp_event)) {
  12067		if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state) ||
  12068		    test_bit(BNXT_STATE_FW_NON_FATAL_COND, &bp->state))
  12069			bnxt_devlink_health_fw_report(bp);
  12070		else
  12071			bnxt_fw_reset(bp);
  12072	}
  12073
  12074	if (test_and_clear_bit(BNXT_FW_EXCEPTION_SP_EVENT, &bp->sp_event)) {
  12075		if (!is_bnxt_fw_ok(bp))
  12076			bnxt_devlink_health_fw_report(bp);
  12077	}
  12078
  12079	smp_mb__before_atomic();
  12080	clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
  12081}
  12082
  12083/* Under rtnl_lock */
  12084int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs,
  12085		     int tx_xdp)
  12086{
  12087	int max_rx, max_tx, tx_sets = 1;
  12088	int tx_rings_needed, stats;
  12089	int rx_rings = rx;
  12090	int cp, vnics, rc;
  12091
  12092	if (tcs)
  12093		tx_sets = tcs;
  12094
  12095	rc = bnxt_get_max_rings(bp, &max_rx, &max_tx, sh);
  12096	if (rc)
  12097		return rc;
  12098
  12099	if (max_rx < rx)
  12100		return -ENOMEM;
  12101
  12102	tx_rings_needed = tx * tx_sets + tx_xdp;
  12103	if (max_tx < tx_rings_needed)
  12104		return -ENOMEM;
  12105
  12106	vnics = 1;
  12107	if ((bp->flags & (BNXT_FLAG_RFS | BNXT_FLAG_CHIP_P5)) == BNXT_FLAG_RFS)
  12108		vnics += rx_rings;
  12109
  12110	if (bp->flags & BNXT_FLAG_AGG_RINGS)
  12111		rx_rings <<= 1;
  12112	cp = sh ? max_t(int, tx_rings_needed, rx) : tx_rings_needed + rx;
  12113	stats = cp;
  12114	if (BNXT_NEW_RM(bp)) {
  12115		cp += bnxt_get_ulp_msix_num(bp);
  12116		stats += bnxt_get_ulp_stat_ctxs(bp);
  12117	}
  12118	return bnxt_hwrm_check_rings(bp, tx_rings_needed, rx_rings, rx, cp,
  12119				     stats, vnics);
  12120}
  12121
  12122static void bnxt_unmap_bars(struct bnxt *bp, struct pci_dev *pdev)
  12123{
  12124	if (bp->bar2) {
  12125		pci_iounmap(pdev, bp->bar2);
  12126		bp->bar2 = NULL;
  12127	}
  12128
  12129	if (bp->bar1) {
  12130		pci_iounmap(pdev, bp->bar1);
  12131		bp->bar1 = NULL;
  12132	}
  12133
  12134	if (bp->bar0) {
  12135		pci_iounmap(pdev, bp->bar0);
  12136		bp->bar0 = NULL;
  12137	}
  12138}
  12139
  12140static void bnxt_cleanup_pci(struct bnxt *bp)
  12141{
  12142	bnxt_unmap_bars(bp, bp->pdev);
  12143	pci_release_regions(bp->pdev);
  12144	if (pci_is_enabled(bp->pdev))
  12145		pci_disable_device(bp->pdev);
  12146}
  12147
  12148static void bnxt_init_dflt_coal(struct bnxt *bp)
  12149{
  12150	struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
  12151	struct bnxt_coal *coal;
  12152	u16 flags = 0;
  12153
  12154	if (coal_cap->cmpl_params &
  12155	    RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_TIMER_RESET)
  12156		flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET;
  12157
  12158	/* Tick values in micro seconds.
  12159	 * 1 coal_buf x bufs_per_record = 1 completion record.
  12160	 */
  12161	coal = &bp->rx_coal;
  12162	coal->coal_ticks = 10;
  12163	coal->coal_bufs = 30;
  12164	coal->coal_ticks_irq = 1;
  12165	coal->coal_bufs_irq = 2;
  12166	coal->idle_thresh = 50;
  12167	coal->bufs_per_record = 2;
  12168	coal->budget = 64;		/* NAPI budget */
  12169	coal->flags = flags;
  12170
  12171	coal = &bp->tx_coal;
  12172	coal->coal_ticks = 28;
  12173	coal->coal_bufs = 30;
  12174	coal->coal_ticks_irq = 2;
  12175	coal->coal_bufs_irq = 2;
  12176	coal->bufs_per_record = 1;
  12177	coal->flags = flags;
  12178
  12179	bp->stats_coal_ticks = BNXT_DEF_STATS_COAL_TICKS;
  12180}
  12181
  12182static int bnxt_fw_init_one_p1(struct bnxt *bp)
  12183{
  12184	int rc;
  12185
  12186	bp->fw_cap = 0;
  12187	rc = bnxt_hwrm_ver_get(bp);
  12188	bnxt_try_map_fw_health_reg(bp);
  12189	if (rc) {
  12190		rc = bnxt_try_recover_fw(bp);
  12191		if (rc)
  12192			return rc;
  12193		rc = bnxt_hwrm_ver_get(bp);
  12194		if (rc)
  12195			return rc;
  12196	}
  12197
  12198	bnxt_nvm_cfg_ver_get(bp);
  12199
  12200	rc = bnxt_hwrm_func_reset(bp);
  12201	if (rc)
  12202		return -ENODEV;
  12203
  12204	bnxt_hwrm_fw_set_time(bp);
  12205	return 0;
  12206}
  12207
  12208static int bnxt_fw_init_one_p2(struct bnxt *bp)
  12209{
  12210	int rc;
  12211
  12212	/* Get the MAX capabilities for this function */
  12213	rc = bnxt_hwrm_func_qcaps(bp);
  12214	if (rc) {
  12215		netdev_err(bp->dev, "hwrm query capability failure rc: %x\n",
  12216			   rc);
  12217		return -ENODEV;
  12218	}
  12219
  12220	rc = bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(bp);
  12221	if (rc)
  12222		netdev_warn(bp->dev, "hwrm query adv flow mgnt failure rc: %d\n",
  12223			    rc);
  12224
  12225	if (bnxt_alloc_fw_health(bp)) {
  12226		netdev_warn(bp->dev, "no memory for firmware error recovery\n");
  12227	} else {
  12228		rc = bnxt_hwrm_error_recovery_qcfg(bp);
  12229		if (rc)
  12230			netdev_warn(bp->dev, "hwrm query error recovery failure rc: %d\n",
  12231				    rc);
  12232	}
  12233
  12234	rc = bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, false);
  12235	if (rc)
  12236		return -ENODEV;
  12237
  12238	bnxt_hwrm_func_qcfg(bp);
  12239	bnxt_hwrm_vnic_qcaps(bp);
  12240	bnxt_hwrm_port_led_qcaps(bp);
  12241	bnxt_ethtool_init(bp);
  12242	bnxt_dcb_init(bp);
  12243	return 0;
  12244}
  12245
  12246static void bnxt_set_dflt_rss_hash_type(struct bnxt *bp)
  12247{
  12248	bp->flags &= ~BNXT_FLAG_UDP_RSS_CAP;
  12249	bp->rss_hash_cfg = VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4 |
  12250			   VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4 |
  12251			   VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6 |
  12252			   VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6;
  12253	if (BNXT_CHIP_P4_PLUS(bp) && bp->hwrm_spec_code >= 0x10501) {
  12254		bp->flags |= BNXT_FLAG_UDP_RSS_CAP;
  12255		bp->rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4 |
  12256				    VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6;
  12257	}
  12258}
  12259
  12260static void bnxt_set_dflt_rfs(struct bnxt *bp)
  12261{
  12262	struct net_device *dev = bp->dev;
  12263
  12264	dev->hw_features &= ~NETIF_F_NTUPLE;
  12265	dev->features &= ~NETIF_F_NTUPLE;
  12266	bp->flags &= ~BNXT_FLAG_RFS;
  12267	if (bnxt_rfs_supported(bp)) {
  12268		dev->hw_features |= NETIF_F_NTUPLE;
  12269		if (bnxt_rfs_capable(bp)) {
  12270			bp->flags |= BNXT_FLAG_RFS;
  12271			dev->features |= NETIF_F_NTUPLE;
  12272		}
  12273	}
  12274}
  12275
  12276static void bnxt_fw_init_one_p3(struct bnxt *bp)
  12277{
  12278	struct pci_dev *pdev = bp->pdev;
  12279
  12280	bnxt_set_dflt_rss_hash_type(bp);
  12281	bnxt_set_dflt_rfs(bp);
  12282
  12283	bnxt_get_wol_settings(bp);
  12284	if (bp->flags & BNXT_FLAG_WOL_CAP)
  12285		device_set_wakeup_enable(&pdev->dev, bp->wol);
  12286	else
  12287		device_set_wakeup_capable(&pdev->dev, false);
  12288
  12289	bnxt_hwrm_set_cache_line_size(bp, cache_line_size());
  12290	bnxt_hwrm_coal_params_qcaps(bp);
  12291}
  12292
  12293static int bnxt_probe_phy(struct bnxt *bp, bool fw_dflt);
  12294
  12295int bnxt_fw_init_one(struct bnxt *bp)
  12296{
  12297	int rc;
  12298
  12299	rc = bnxt_fw_init_one_p1(bp);
  12300	if (rc) {
  12301		netdev_err(bp->dev, "Firmware init phase 1 failed\n");
  12302		return rc;
  12303	}
  12304	rc = bnxt_fw_init_one_p2(bp);
  12305	if (rc) {
  12306		netdev_err(bp->dev, "Firmware init phase 2 failed\n");
  12307		return rc;
  12308	}
  12309	rc = bnxt_probe_phy(bp, false);
  12310	if (rc)
  12311		return rc;
  12312	rc = bnxt_approve_mac(bp, bp->dev->dev_addr, false);
  12313	if (rc)
  12314		return rc;
  12315
  12316	bnxt_fw_init_one_p3(bp);
  12317	return 0;
  12318}
  12319
  12320static void bnxt_fw_reset_writel(struct bnxt *bp, int reg_idx)
  12321{
  12322	struct bnxt_fw_health *fw_health = bp->fw_health;
  12323	u32 reg = fw_health->fw_reset_seq_regs[reg_idx];
  12324	u32 val = fw_health->fw_reset_seq_vals[reg_idx];
  12325	u32 reg_type, reg_off, delay_msecs;
  12326
  12327	delay_msecs = fw_health->fw_reset_seq_delay_msec[reg_idx];
  12328	reg_type = BNXT_FW_HEALTH_REG_TYPE(reg);
  12329	reg_off = BNXT_FW_HEALTH_REG_OFF(reg);
  12330	switch (reg_type) {
  12331	case BNXT_FW_HEALTH_REG_TYPE_CFG:
  12332		pci_write_config_dword(bp->pdev, reg_off, val);
  12333		break;
  12334	case BNXT_FW_HEALTH_REG_TYPE_GRC:
  12335		writel(reg_off & BNXT_GRC_BASE_MASK,
  12336		       bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 4);
  12337		reg_off = (reg_off & BNXT_GRC_OFFSET_MASK) + 0x2000;
  12338		fallthrough;
  12339	case BNXT_FW_HEALTH_REG_TYPE_BAR0:
  12340		writel(val, bp->bar0 + reg_off);
  12341		break;
  12342	case BNXT_FW_HEALTH_REG_TYPE_BAR1:
  12343		writel(val, bp->bar1 + reg_off);
  12344		break;
  12345	}
  12346	if (delay_msecs) {
  12347		pci_read_config_dword(bp->pdev, 0, &val);
  12348		msleep(delay_msecs);
  12349	}
  12350}
  12351
  12352bool bnxt_hwrm_reset_permitted(struct bnxt *bp)
  12353{
  12354	struct hwrm_func_qcfg_output *resp;
  12355	struct hwrm_func_qcfg_input *req;
  12356	bool result = true; /* firmware will enforce if unknown */
  12357
  12358	if (~bp->fw_cap & BNXT_FW_CAP_HOT_RESET_IF)
  12359		return result;
  12360
  12361	if (hwrm_req_init(bp, req, HWRM_FUNC_QCFG))
  12362		return result;
  12363
  12364	req->fid = cpu_to_le16(0xffff);
  12365	resp = hwrm_req_hold(bp, req);
  12366	if (!hwrm_req_send(bp, req))
  12367		result = !!(le16_to_cpu(resp->flags) &
  12368			    FUNC_QCFG_RESP_FLAGS_HOT_RESET_ALLOWED);
  12369	hwrm_req_drop(bp, req);
  12370	return result;
  12371}
  12372
  12373static void bnxt_reset_all(struct bnxt *bp)
  12374{
  12375	struct bnxt_fw_health *fw_health = bp->fw_health;
  12376	int i, rc;
  12377
  12378	if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) {
  12379		bnxt_fw_reset_via_optee(bp);
  12380		bp->fw_reset_timestamp = jiffies;
  12381		return;
  12382	}
  12383
  12384	if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_HOST) {
  12385		for (i = 0; i < fw_health->fw_reset_seq_cnt; i++)
  12386			bnxt_fw_reset_writel(bp, i);
  12387	} else if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU) {
  12388		struct hwrm_fw_reset_input *req;
  12389
  12390		rc = hwrm_req_init(bp, req, HWRM_FW_RESET);
  12391		if (!rc) {
  12392			req->target_id = cpu_to_le16(HWRM_TARGET_ID_KONG);
  12393			req->embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP;
  12394			req->selfrst_status = FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP;
  12395			req->flags = FW_RESET_REQ_FLAGS_RESET_GRACEFUL;
  12396			rc = hwrm_req_send(bp, req);
  12397		}
  12398		if (rc != -ENODEV)
  12399			netdev_warn(bp->dev, "Unable to reset FW rc=%d\n", rc);
  12400	}
  12401	bp->fw_reset_timestamp = jiffies;
  12402}
  12403
  12404static bool bnxt_fw_reset_timeout(struct bnxt *bp)
  12405{
  12406	return time_after(jiffies, bp->fw_reset_timestamp +
  12407			  (bp->fw_reset_max_dsecs * HZ / 10));
  12408}
  12409
  12410static void bnxt_fw_reset_abort(struct bnxt *bp, int rc)
  12411{
  12412	clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
  12413	if (bp->fw_reset_state != BNXT_FW_RESET_STATE_POLL_VF) {
  12414		bnxt_ulp_start(bp, rc);
  12415		bnxt_dl_health_fw_status_update(bp, false);
  12416	}
  12417	bp->fw_reset_state = 0;
  12418	dev_close(bp->dev);
  12419}
  12420
  12421static void bnxt_fw_reset_task(struct work_struct *work)
  12422{
  12423	struct bnxt *bp = container_of(work, struct bnxt, fw_reset_task.work);
  12424	int rc = 0;
  12425
  12426	if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
  12427		netdev_err(bp->dev, "bnxt_fw_reset_task() called when not in fw reset mode!\n");
  12428		return;
  12429	}
  12430
  12431	switch (bp->fw_reset_state) {
  12432	case BNXT_FW_RESET_STATE_POLL_VF: {
  12433		int n = bnxt_get_registered_vfs(bp);
  12434		int tmo;
  12435
  12436		if (n < 0) {
  12437			netdev_err(bp->dev, "Firmware reset aborted, subsequent func_qcfg cmd failed, rc = %d, %d msecs since reset timestamp\n",
  12438				   n, jiffies_to_msecs(jiffies -
  12439				   bp->fw_reset_timestamp));
  12440			goto fw_reset_abort;
  12441		} else if (n > 0) {
  12442			if (bnxt_fw_reset_timeout(bp)) {
  12443				clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
  12444				bp->fw_reset_state = 0;
  12445				netdev_err(bp->dev, "Firmware reset aborted, bnxt_get_registered_vfs() returns %d\n",
  12446					   n);
  12447				return;
  12448			}
  12449			bnxt_queue_fw_reset_work(bp, HZ / 10);
  12450			return;
  12451		}
  12452		bp->fw_reset_timestamp = jiffies;
  12453		rtnl_lock();
  12454		if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) {
  12455			bnxt_fw_reset_abort(bp, rc);
  12456			rtnl_unlock();
  12457			return;
  12458		}
  12459		bnxt_fw_reset_close(bp);
  12460		if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) {
  12461			bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW_DOWN;
  12462			tmo = HZ / 10;
  12463		} else {
  12464			bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
  12465			tmo = bp->fw_reset_min_dsecs * HZ / 10;
  12466		}
  12467		rtnl_unlock();
  12468		bnxt_queue_fw_reset_work(bp, tmo);
  12469		return;
  12470	}
  12471	case BNXT_FW_RESET_STATE_POLL_FW_DOWN: {
  12472		u32 val;
  12473
  12474		val = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
  12475		if (!(val & BNXT_FW_STATUS_SHUTDOWN) &&
  12476		    !bnxt_fw_reset_timeout(bp)) {
  12477			bnxt_queue_fw_reset_work(bp, HZ / 5);
  12478			return;
  12479		}
  12480
  12481		if (!bp->fw_health->primary) {
  12482			u32 wait_dsecs = bp->fw_health->normal_func_wait_dsecs;
  12483
  12484			bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
  12485			bnxt_queue_fw_reset_work(bp, wait_dsecs * HZ / 10);
  12486			return;
  12487		}
  12488		bp->fw_reset_state = BNXT_FW_RESET_STATE_RESET_FW;
  12489	}
  12490		fallthrough;
  12491	case BNXT_FW_RESET_STATE_RESET_FW:
  12492		bnxt_reset_all(bp);
  12493		bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
  12494		bnxt_queue_fw_reset_work(bp, bp->fw_reset_min_dsecs * HZ / 10);
  12495		return;
  12496	case BNXT_FW_RESET_STATE_ENABLE_DEV:
  12497		bnxt_inv_fw_health_reg(bp);
  12498		if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state) &&
  12499		    !bp->fw_reset_min_dsecs) {
  12500			u16 val;
  12501
  12502			pci_read_config_word(bp->pdev, PCI_SUBSYSTEM_ID, &val);
  12503			if (val == 0xffff) {
  12504				if (bnxt_fw_reset_timeout(bp)) {
  12505					netdev_err(bp->dev, "Firmware reset aborted, PCI config space invalid\n");
  12506					rc = -ETIMEDOUT;
  12507					goto fw_reset_abort;
  12508				}
  12509				bnxt_queue_fw_reset_work(bp, HZ / 1000);
  12510				return;
  12511			}
  12512		}
  12513		clear_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
  12514		clear_bit(BNXT_STATE_FW_NON_FATAL_COND, &bp->state);
  12515		if (test_and_clear_bit(BNXT_STATE_FW_ACTIVATE_RESET, &bp->state) &&
  12516		    !test_bit(BNXT_STATE_FW_ACTIVATE, &bp->state))
  12517			bnxt_dl_remote_reload(bp);
  12518		if (pci_enable_device(bp->pdev)) {
  12519			netdev_err(bp->dev, "Cannot re-enable PCI device\n");
  12520			rc = -ENODEV;
  12521			goto fw_reset_abort;
  12522		}
  12523		pci_set_master(bp->pdev);
  12524		bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW;
  12525		fallthrough;
  12526	case BNXT_FW_RESET_STATE_POLL_FW:
  12527		bp->hwrm_cmd_timeout = SHORT_HWRM_CMD_TIMEOUT;
  12528		rc = bnxt_hwrm_poll(bp);
  12529		if (rc) {
  12530			if (bnxt_fw_reset_timeout(bp)) {
  12531				netdev_err(bp->dev, "Firmware reset aborted\n");
  12532				goto fw_reset_abort_status;
  12533			}
  12534			bnxt_queue_fw_reset_work(bp, HZ / 5);
  12535			return;
  12536		}
  12537		bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT;
  12538		bp->fw_reset_state = BNXT_FW_RESET_STATE_OPENING;
  12539		fallthrough;
  12540	case BNXT_FW_RESET_STATE_OPENING:
  12541		while (!rtnl_trylock()) {
  12542			bnxt_queue_fw_reset_work(bp, HZ / 10);
  12543			return;
  12544		}
  12545		rc = bnxt_open(bp->dev);
  12546		if (rc) {
  12547			netdev_err(bp->dev, "bnxt_open() failed during FW reset\n");
  12548			bnxt_fw_reset_abort(bp, rc);
  12549			rtnl_unlock();
  12550			return;
  12551		}
  12552
  12553		if ((bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) &&
  12554		    bp->fw_health->enabled) {
  12555			bp->fw_health->last_fw_reset_cnt =
  12556				bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
  12557		}
  12558		bp->fw_reset_state = 0;
  12559		/* Make sure fw_reset_state is 0 before clearing the flag */
  12560		smp_mb__before_atomic();
  12561		clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
  12562		bnxt_ulp_start(bp, 0);
  12563		bnxt_reenable_sriov(bp);
  12564		bnxt_vf_reps_alloc(bp);
  12565		bnxt_vf_reps_open(bp);
  12566		bnxt_ptp_reapply_pps(bp);
  12567		clear_bit(BNXT_STATE_FW_ACTIVATE, &bp->state);
  12568		if (test_and_clear_bit(BNXT_STATE_RECOVER, &bp->state)) {
  12569			bnxt_dl_health_fw_recovery_done(bp);
  12570			bnxt_dl_health_fw_status_update(bp, true);
  12571		}
  12572		rtnl_unlock();
  12573		break;
  12574	}
  12575	return;
  12576
  12577fw_reset_abort_status:
  12578	if (bp->fw_health->status_reliable ||
  12579	    (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)) {
  12580		u32 sts = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
  12581
  12582		netdev_err(bp->dev, "fw_health_status 0x%x\n", sts);
  12583	}
  12584fw_reset_abort:
  12585	rtnl_lock();
  12586	bnxt_fw_reset_abort(bp, rc);
  12587	rtnl_unlock();
  12588}
  12589
  12590static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev)
  12591{
  12592	int rc;
  12593	struct bnxt *bp = netdev_priv(dev);
  12594
  12595	SET_NETDEV_DEV(dev, &pdev->dev);
  12596
  12597	/* enable device (incl. PCI PM wakeup), and bus-mastering */
  12598	rc = pci_enable_device(pdev);
  12599	if (rc) {
  12600		dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
  12601		goto init_err;
  12602	}
  12603
  12604	if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
  12605		dev_err(&pdev->dev,
  12606			"Cannot find PCI device base address, aborting\n");
  12607		rc = -ENODEV;
  12608		goto init_err_disable;
  12609	}
  12610
  12611	rc = pci_request_regions(pdev, DRV_MODULE_NAME);
  12612	if (rc) {
  12613		dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
  12614		goto init_err_disable;
  12615	}
  12616
  12617	if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) != 0 &&
  12618	    dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
  12619		dev_err(&pdev->dev, "System does not support DMA, aborting\n");
  12620		rc = -EIO;
  12621		goto init_err_release;
  12622	}
  12623
  12624	pci_set_master(pdev);
  12625
  12626	bp->dev = dev;
  12627	bp->pdev = pdev;
  12628
  12629	/* Doorbell BAR bp->bar1 is mapped after bnxt_fw_init_one_p2()
  12630	 * determines the BAR size.
  12631	 */
  12632	bp->bar0 = pci_ioremap_bar(pdev, 0);
  12633	if (!bp->bar0) {
  12634		dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
  12635		rc = -ENOMEM;
  12636		goto init_err_release;
  12637	}
  12638
  12639	bp->bar2 = pci_ioremap_bar(pdev, 4);
  12640	if (!bp->bar2) {
  12641		dev_err(&pdev->dev, "Cannot map bar4 registers, aborting\n");
  12642		rc = -ENOMEM;
  12643		goto init_err_release;
  12644	}
  12645
  12646	pci_enable_pcie_error_reporting(pdev);
  12647
  12648	INIT_WORK(&bp->sp_task, bnxt_sp_task);
  12649	INIT_DELAYED_WORK(&bp->fw_reset_task, bnxt_fw_reset_task);
  12650
  12651	spin_lock_init(&bp->ntp_fltr_lock);
  12652#if BITS_PER_LONG == 32
  12653	spin_lock_init(&bp->db_lock);
  12654#endif
  12655
  12656	bp->rx_ring_size = BNXT_DEFAULT_RX_RING_SIZE;
  12657	bp->tx_ring_size = BNXT_DEFAULT_TX_RING_SIZE;
  12658
  12659	timer_setup(&bp->timer, bnxt_timer, 0);
  12660	bp->current_interval = BNXT_TIMER_INTERVAL;
  12661
  12662	bp->vxlan_fw_dst_port_id = INVALID_HW_RING_ID;
  12663	bp->nge_fw_dst_port_id = INVALID_HW_RING_ID;
  12664
  12665	clear_bit(BNXT_STATE_OPEN, &bp->state);
  12666	return 0;
  12667
  12668init_err_release:
  12669	bnxt_unmap_bars(bp, pdev);
  12670	pci_release_regions(pdev);
  12671
  12672init_err_disable:
  12673	pci_disable_device(pdev);
  12674
  12675init_err:
  12676	return rc;
  12677}
  12678
  12679/* rtnl_lock held */
  12680static int bnxt_change_mac_addr(struct net_device *dev, void *p)
  12681{
  12682	struct sockaddr *addr = p;
  12683	struct bnxt *bp = netdev_priv(dev);
  12684	int rc = 0;
  12685
  12686	if (!is_valid_ether_addr(addr->sa_data))
  12687		return -EADDRNOTAVAIL;
  12688
  12689	if (ether_addr_equal(addr->sa_data, dev->dev_addr))
  12690		return 0;
  12691
  12692	rc = bnxt_approve_mac(bp, addr->sa_data, true);
  12693	if (rc)
  12694		return rc;
  12695
  12696	eth_hw_addr_set(dev, addr->sa_data);
  12697	if (netif_running(dev)) {
  12698		bnxt_close_nic(bp, false, false);
  12699		rc = bnxt_open_nic(bp, false, false);
  12700	}
  12701
  12702	return rc;
  12703}
  12704
  12705/* rtnl_lock held */
  12706static int bnxt_change_mtu(struct net_device *dev, int new_mtu)
  12707{
  12708	struct bnxt *bp = netdev_priv(dev);
  12709
  12710	if (netif_running(dev))
  12711		bnxt_close_nic(bp, true, false);
  12712
  12713	dev->mtu = new_mtu;
  12714	bnxt_set_ring_params(bp);
  12715
  12716	if (netif_running(dev))
  12717		return bnxt_open_nic(bp, true, false);
  12718
  12719	return 0;
  12720}
  12721
  12722int bnxt_setup_mq_tc(struct net_device *dev, u8 tc)
  12723{
  12724	struct bnxt *bp = netdev_priv(dev);
  12725	bool sh = false;
  12726	int rc;
  12727
  12728	if (tc > bp->max_tc) {
  12729		netdev_err(dev, "Too many traffic classes requested: %d. Max supported is %d.\n",
  12730			   tc, bp->max_tc);
  12731		return -EINVAL;
  12732	}
  12733
  12734	if (netdev_get_num_tc(dev) == tc)
  12735		return 0;
  12736
  12737	if (bp->flags & BNXT_FLAG_SHARED_RINGS)
  12738		sh = true;
  12739
  12740	rc = bnxt_check_rings(bp, bp->tx_nr_rings_per_tc, bp->rx_nr_rings,
  12741			      sh, tc, bp->tx_nr_rings_xdp);
  12742	if (rc)
  12743		return rc;
  12744
  12745	/* Needs to close the device and do hw resource re-allocations */
  12746	if (netif_running(bp->dev))
  12747		bnxt_close_nic(bp, true, false);
  12748
  12749	if (tc) {
  12750		bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tc;
  12751		netdev_set_num_tc(dev, tc);
  12752	} else {
  12753		bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
  12754		netdev_reset_tc(dev);
  12755	}
  12756	bp->tx_nr_rings += bp->tx_nr_rings_xdp;
  12757	bp->cp_nr_rings = sh ? max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
  12758			       bp->tx_nr_rings + bp->rx_nr_rings;
  12759
  12760	if (netif_running(bp->dev))
  12761		return bnxt_open_nic(bp, true, false);
  12762
  12763	return 0;
  12764}
  12765
  12766static int bnxt_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
  12767				  void *cb_priv)
  12768{
  12769	struct bnxt *bp = cb_priv;
  12770
  12771	if (!bnxt_tc_flower_enabled(bp) ||
  12772	    !tc_cls_can_offload_and_chain0(bp->dev, type_data))
  12773		return -EOPNOTSUPP;
  12774
  12775	switch (type) {
  12776	case TC_SETUP_CLSFLOWER:
  12777		return bnxt_tc_setup_flower(bp, bp->pf.fw_fid, type_data);
  12778	default:
  12779		return -EOPNOTSUPP;
  12780	}
  12781}
  12782
  12783LIST_HEAD(bnxt_block_cb_list);
  12784
  12785static int bnxt_setup_tc(struct net_device *dev, enum tc_setup_type type,
  12786			 void *type_data)
  12787{
  12788	struct bnxt *bp = netdev_priv(dev);
  12789
  12790	switch (type) {
  12791	case TC_SETUP_BLOCK:
  12792		return flow_block_cb_setup_simple(type_data,
  12793						  &bnxt_block_cb_list,
  12794						  bnxt_setup_tc_block_cb,
  12795						  bp, bp, true);
  12796	case TC_SETUP_QDISC_MQPRIO: {
  12797		struct tc_mqprio_qopt *mqprio = type_data;
  12798
  12799		mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
  12800
  12801		return bnxt_setup_mq_tc(dev, mqprio->num_tc);
  12802	}
  12803	default:
  12804		return -EOPNOTSUPP;
  12805	}
  12806}
  12807
  12808#ifdef CONFIG_RFS_ACCEL
  12809static bool bnxt_fltr_match(struct bnxt_ntuple_filter *f1,
  12810			    struct bnxt_ntuple_filter *f2)
  12811{
  12812	struct flow_keys *keys1 = &f1->fkeys;
  12813	struct flow_keys *keys2 = &f2->fkeys;
  12814
  12815	if (keys1->basic.n_proto != keys2->basic.n_proto ||
  12816	    keys1->basic.ip_proto != keys2->basic.ip_proto)
  12817		return false;
  12818
  12819	if (keys1->basic.n_proto == htons(ETH_P_IP)) {
  12820		if (keys1->addrs.v4addrs.src != keys2->addrs.v4addrs.src ||
  12821		    keys1->addrs.v4addrs.dst != keys2->addrs.v4addrs.dst)
  12822			return false;
  12823	} else {
  12824		if (memcmp(&keys1->addrs.v6addrs.src, &keys2->addrs.v6addrs.src,
  12825			   sizeof(keys1->addrs.v6addrs.src)) ||
  12826		    memcmp(&keys1->addrs.v6addrs.dst, &keys2->addrs.v6addrs.dst,
  12827			   sizeof(keys1->addrs.v6addrs.dst)))
  12828			return false;
  12829	}
  12830
  12831	if (keys1->ports.ports == keys2->ports.ports &&
  12832	    keys1->control.flags == keys2->control.flags &&
  12833	    ether_addr_equal(f1->src_mac_addr, f2->src_mac_addr) &&
  12834	    ether_addr_equal(f1->dst_mac_addr, f2->dst_mac_addr))
  12835		return true;
  12836
  12837	return false;
  12838}
  12839
  12840static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
  12841			      u16 rxq_index, u32 flow_id)
  12842{
  12843	struct bnxt *bp = netdev_priv(dev);
  12844	struct bnxt_ntuple_filter *fltr, *new_fltr;
  12845	struct flow_keys *fkeys;
  12846	struct ethhdr *eth = (struct ethhdr *)skb_mac_header(skb);
  12847	int rc = 0, idx, bit_id, l2_idx = 0;
  12848	struct hlist_head *head;
  12849	u32 flags;
  12850
  12851	if (!ether_addr_equal(dev->dev_addr, eth->h_dest)) {
  12852		struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
  12853		int off = 0, j;
  12854
  12855		netif_addr_lock_bh(dev);
  12856		for (j = 0; j < vnic->uc_filter_count; j++, off += ETH_ALEN) {
  12857			if (ether_addr_equal(eth->h_dest,
  12858					     vnic->uc_list + off)) {
  12859				l2_idx = j + 1;
  12860				break;
  12861			}
  12862		}
  12863		netif_addr_unlock_bh(dev);
  12864		if (!l2_idx)
  12865			return -EINVAL;
  12866	}
  12867	new_fltr = kzalloc(sizeof(*new_fltr), GFP_ATOMIC);
  12868	if (!new_fltr)
  12869		return -ENOMEM;
  12870
  12871	fkeys = &new_fltr->fkeys;
  12872	if (!skb_flow_dissect_flow_keys(skb, fkeys, 0)) {
  12873		rc = -EPROTONOSUPPORT;
  12874		goto err_free;
  12875	}
  12876
  12877	if ((fkeys->basic.n_proto != htons(ETH_P_IP) &&
  12878	     fkeys->basic.n_proto != htons(ETH_P_IPV6)) ||
  12879	    ((fkeys->basic.ip_proto != IPPROTO_TCP) &&
  12880	     (fkeys->basic.ip_proto != IPPROTO_UDP))) {
  12881		rc = -EPROTONOSUPPORT;
  12882		goto err_free;
  12883	}
  12884	if (fkeys->basic.n_proto == htons(ETH_P_IPV6) &&
  12885	    bp->hwrm_spec_code < 0x10601) {
  12886		rc = -EPROTONOSUPPORT;
  12887		goto err_free;
  12888	}
  12889	flags = fkeys->control.flags;
  12890	if (((flags & FLOW_DIS_ENCAPSULATION) &&
  12891	     bp->hwrm_spec_code < 0x10601) || (flags & FLOW_DIS_IS_FRAGMENT)) {
  12892		rc = -EPROTONOSUPPORT;
  12893		goto err_free;
  12894	}
  12895
  12896	memcpy(new_fltr->dst_mac_addr, eth->h_dest, ETH_ALEN);
  12897	memcpy(new_fltr->src_mac_addr, eth->h_source, ETH_ALEN);
  12898
  12899	idx = skb_get_hash_raw(skb) & BNXT_NTP_FLTR_HASH_MASK;
  12900	head = &bp->ntp_fltr_hash_tbl[idx];
  12901	rcu_read_lock();
  12902	hlist_for_each_entry_rcu(fltr, head, hash) {
  12903		if (bnxt_fltr_match(fltr, new_fltr)) {
  12904			rcu_read_unlock();
  12905			rc = 0;
  12906			goto err_free;
  12907		}
  12908	}
  12909	rcu_read_unlock();
  12910
  12911	spin_lock_bh(&bp->ntp_fltr_lock);
  12912	bit_id = bitmap_find_free_region(bp->ntp_fltr_bmap,
  12913					 BNXT_NTP_FLTR_MAX_FLTR, 0);
  12914	if (bit_id < 0) {
  12915		spin_unlock_bh(&bp->ntp_fltr_lock);
  12916		rc = -ENOMEM;
  12917		goto err_free;
  12918	}
  12919
  12920	new_fltr->sw_id = (u16)bit_id;
  12921	new_fltr->flow_id = flow_id;
  12922	new_fltr->l2_fltr_idx = l2_idx;
  12923	new_fltr->rxq = rxq_index;
  12924	hlist_add_head_rcu(&new_fltr->hash, head);
  12925	bp->ntp_fltr_count++;
  12926	spin_unlock_bh(&bp->ntp_fltr_lock);
  12927
  12928	set_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event);
  12929	bnxt_queue_sp_work(bp);
  12930
  12931	return new_fltr->sw_id;
  12932
  12933err_free:
  12934	kfree(new_fltr);
  12935	return rc;
  12936}
  12937
  12938static void bnxt_cfg_ntp_filters(struct bnxt *bp)
  12939{
  12940	int i;
  12941
  12942	for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
  12943		struct hlist_head *head;
  12944		struct hlist_node *tmp;
  12945		struct bnxt_ntuple_filter *fltr;
  12946		int rc;
  12947
  12948		head = &bp->ntp_fltr_hash_tbl[i];
  12949		hlist_for_each_entry_safe(fltr, tmp, head, hash) {
  12950			bool del = false;
  12951
  12952			if (test_bit(BNXT_FLTR_VALID, &fltr->state)) {
  12953				if (rps_may_expire_flow(bp->dev, fltr->rxq,
  12954							fltr->flow_id,
  12955							fltr->sw_id)) {
  12956					bnxt_hwrm_cfa_ntuple_filter_free(bp,
  12957									 fltr);
  12958					del = true;
  12959				}
  12960			} else {
  12961				rc = bnxt_hwrm_cfa_ntuple_filter_alloc(bp,
  12962								       fltr);
  12963				if (rc)
  12964					del = true;
  12965				else
  12966					set_bit(BNXT_FLTR_VALID, &fltr->state);
  12967			}
  12968
  12969			if (del) {
  12970				spin_lock_bh(&bp->ntp_fltr_lock);
  12971				hlist_del_rcu(&fltr->hash);
  12972				bp->ntp_fltr_count--;
  12973				spin_unlock_bh(&bp->ntp_fltr_lock);
  12974				synchronize_rcu();
  12975				clear_bit(fltr->sw_id, bp->ntp_fltr_bmap);
  12976				kfree(fltr);
  12977			}
  12978		}
  12979	}
  12980	if (test_and_clear_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event))
  12981		netdev_info(bp->dev, "Receive PF driver unload event!\n");
  12982}
  12983
  12984#else
  12985
  12986static void bnxt_cfg_ntp_filters(struct bnxt *bp)
  12987{
  12988}
  12989
  12990#endif /* CONFIG_RFS_ACCEL */
  12991
  12992static int bnxt_udp_tunnel_sync(struct net_device *netdev, unsigned int table)
  12993{
  12994	struct bnxt *bp = netdev_priv(netdev);
  12995	struct udp_tunnel_info ti;
  12996	unsigned int cmd;
  12997
  12998	udp_tunnel_nic_get_port(netdev, table, 0, &ti);
  12999	if (ti.type == UDP_TUNNEL_TYPE_VXLAN)
  13000		cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN;
  13001	else
  13002		cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE;
  13003
  13004	if (ti.port)
  13005		return bnxt_hwrm_tunnel_dst_port_alloc(bp, ti.port, cmd);
  13006
  13007	return bnxt_hwrm_tunnel_dst_port_free(bp, cmd);
  13008}
  13009
  13010static const struct udp_tunnel_nic_info bnxt_udp_tunnels = {
  13011	.sync_table	= bnxt_udp_tunnel_sync,
  13012	.flags		= UDP_TUNNEL_NIC_INFO_MAY_SLEEP |
  13013			  UDP_TUNNEL_NIC_INFO_OPEN_ONLY,
  13014	.tables		= {
  13015		{ .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN,  },
  13016		{ .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_GENEVE, },
  13017	},
  13018};
  13019
  13020static int bnxt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
  13021			       struct net_device *dev, u32 filter_mask,
  13022			       int nlflags)
  13023{
  13024	struct bnxt *bp = netdev_priv(dev);
  13025
  13026	return ndo_dflt_bridge_getlink(skb, pid, seq, dev, bp->br_mode, 0, 0,
  13027				       nlflags, filter_mask, NULL);
  13028}
  13029
  13030static int bnxt_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
  13031			       u16 flags, struct netlink_ext_ack *extack)
  13032{
  13033	struct bnxt *bp = netdev_priv(dev);
  13034	struct nlattr *attr, *br_spec;
  13035	int rem, rc = 0;
  13036
  13037	if (bp->hwrm_spec_code < 0x10708 || !BNXT_SINGLE_PF(bp))
  13038		return -EOPNOTSUPP;
  13039
  13040	br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
  13041	if (!br_spec)
  13042		return -EINVAL;
  13043
  13044	nla_for_each_nested(attr, br_spec, rem) {
  13045		u16 mode;
  13046
  13047		if (nla_type(attr) != IFLA_BRIDGE_MODE)
  13048			continue;
  13049
  13050		if (nla_len(attr) < sizeof(mode))
  13051			return -EINVAL;
  13052
  13053		mode = nla_get_u16(attr);
  13054		if (mode == bp->br_mode)
  13055			break;
  13056
  13057		rc = bnxt_hwrm_set_br_mode(bp, mode);
  13058		if (!rc)
  13059			bp->br_mode = mode;
  13060		break;
  13061	}
  13062	return rc;
  13063}
  13064
  13065int bnxt_get_port_parent_id(struct net_device *dev,
  13066			    struct netdev_phys_item_id *ppid)
  13067{
  13068	struct bnxt *bp = netdev_priv(dev);
  13069
  13070	if (bp->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV)
  13071		return -EOPNOTSUPP;
  13072
  13073	/* The PF and it's VF-reps only support the switchdev framework */
  13074	if (!BNXT_PF(bp) || !(bp->flags & BNXT_FLAG_DSN_VALID))
  13075		return -EOPNOTSUPP;
  13076
  13077	ppid->id_len = sizeof(bp->dsn);
  13078	memcpy(ppid->id, bp->dsn, ppid->id_len);
  13079
  13080	return 0;
  13081}
  13082
  13083static struct devlink_port *bnxt_get_devlink_port(struct net_device *dev)
  13084{
  13085	struct bnxt *bp = netdev_priv(dev);
  13086
  13087	return &bp->dl_port;
  13088}
  13089
  13090static const struct net_device_ops bnxt_netdev_ops = {
  13091	.ndo_open		= bnxt_open,
  13092	.ndo_start_xmit		= bnxt_start_xmit,
  13093	.ndo_stop		= bnxt_close,
  13094	.ndo_get_stats64	= bnxt_get_stats64,
  13095	.ndo_set_rx_mode	= bnxt_set_rx_mode,
  13096	.ndo_eth_ioctl		= bnxt_ioctl,
  13097	.ndo_validate_addr	= eth_validate_addr,
  13098	.ndo_set_mac_address	= bnxt_change_mac_addr,
  13099	.ndo_change_mtu		= bnxt_change_mtu,
  13100	.ndo_fix_features	= bnxt_fix_features,
  13101	.ndo_set_features	= bnxt_set_features,
  13102	.ndo_features_check	= bnxt_features_check,
  13103	.ndo_tx_timeout		= bnxt_tx_timeout,
  13104#ifdef CONFIG_BNXT_SRIOV
  13105	.ndo_get_vf_config	= bnxt_get_vf_config,
  13106	.ndo_set_vf_mac		= bnxt_set_vf_mac,
  13107	.ndo_set_vf_vlan	= bnxt_set_vf_vlan,
  13108	.ndo_set_vf_rate	= bnxt_set_vf_bw,
  13109	.ndo_set_vf_link_state	= bnxt_set_vf_link_state,
  13110	.ndo_set_vf_spoofchk	= bnxt_set_vf_spoofchk,
  13111	.ndo_set_vf_trust	= bnxt_set_vf_trust,
  13112#endif
  13113	.ndo_setup_tc           = bnxt_setup_tc,
  13114#ifdef CONFIG_RFS_ACCEL
  13115	.ndo_rx_flow_steer	= bnxt_rx_flow_steer,
  13116#endif
  13117	.ndo_bpf		= bnxt_xdp,
  13118	.ndo_xdp_xmit		= bnxt_xdp_xmit,
  13119	.ndo_bridge_getlink	= bnxt_bridge_getlink,
  13120	.ndo_bridge_setlink	= bnxt_bridge_setlink,
  13121	.ndo_get_devlink_port	= bnxt_get_devlink_port,
  13122};
  13123
  13124static void bnxt_remove_one(struct pci_dev *pdev)
  13125{
  13126	struct net_device *dev = pci_get_drvdata(pdev);
  13127	struct bnxt *bp = netdev_priv(dev);
  13128
  13129	if (BNXT_PF(bp))
  13130		bnxt_sriov_disable(bp);
  13131
  13132	if (BNXT_PF(bp))
  13133		devlink_port_type_clear(&bp->dl_port);
  13134
  13135	bnxt_ptp_clear(bp);
  13136	pci_disable_pcie_error_reporting(pdev);
  13137	unregister_netdev(dev);
  13138	clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
  13139	/* Flush any pending tasks */
  13140	cancel_work_sync(&bp->sp_task);
  13141	cancel_delayed_work_sync(&bp->fw_reset_task);
  13142	bp->sp_event = 0;
  13143
  13144	bnxt_dl_fw_reporters_destroy(bp);
  13145	bnxt_dl_unregister(bp);
  13146	bnxt_shutdown_tc(bp);
  13147
  13148	bnxt_clear_int_mode(bp);
  13149	bnxt_hwrm_func_drv_unrgtr(bp);
  13150	bnxt_free_hwrm_resources(bp);
  13151	bnxt_ethtool_free(bp);
  13152	bnxt_dcb_free(bp);
  13153	kfree(bp->edev);
  13154	bp->edev = NULL;
  13155	kfree(bp->ptp_cfg);
  13156	bp->ptp_cfg = NULL;
  13157	kfree(bp->fw_health);
  13158	bp->fw_health = NULL;
  13159	bnxt_cleanup_pci(bp);
  13160	bnxt_free_ctx_mem(bp);
  13161	kfree(bp->ctx);
  13162	bp->ctx = NULL;
  13163	kfree(bp->rss_indir_tbl);
  13164	bp->rss_indir_tbl = NULL;
  13165	bnxt_free_port_stats(bp);
  13166	free_netdev(dev);
  13167}
  13168
  13169static int bnxt_probe_phy(struct bnxt *bp, bool fw_dflt)
  13170{
  13171	int rc = 0;
  13172	struct bnxt_link_info *link_info = &bp->link_info;
  13173
  13174	bp->phy_flags = 0;
  13175	rc = bnxt_hwrm_phy_qcaps(bp);
  13176	if (rc) {
  13177		netdev_err(bp->dev, "Probe phy can't get phy capabilities (rc: %x)\n",
  13178			   rc);
  13179		return rc;
  13180	}
  13181	if (bp->phy_flags & BNXT_PHY_FL_NO_FCS)
  13182		bp->dev->priv_flags |= IFF_SUPP_NOFCS;
  13183	else
  13184		bp->dev->priv_flags &= ~IFF_SUPP_NOFCS;
  13185	if (!fw_dflt)
  13186		return 0;
  13187
  13188	mutex_lock(&bp->link_lock);
  13189	rc = bnxt_update_link(bp, false);
  13190	if (rc) {
  13191		mutex_unlock(&bp->link_lock);
  13192		netdev_err(bp->dev, "Probe phy can't update link (rc: %x)\n",
  13193			   rc);
  13194		return rc;
  13195	}
  13196
  13197	/* Older firmware does not have supported_auto_speeds, so assume
  13198	 * that all supported speeds can be autonegotiated.
  13199	 */
  13200	if (link_info->auto_link_speeds && !link_info->support_auto_speeds)
  13201		link_info->support_auto_speeds = link_info->support_speeds;
  13202
  13203	bnxt_init_ethtool_link_settings(bp);
  13204	mutex_unlock(&bp->link_lock);
  13205	return 0;
  13206}
  13207
  13208static int bnxt_get_max_irq(struct pci_dev *pdev)
  13209{
  13210	u16 ctrl;
  13211
  13212	if (!pdev->msix_cap)
  13213		return 1;
  13214
  13215	pci_read_config_word(pdev, pdev->msix_cap + PCI_MSIX_FLAGS, &ctrl);
  13216	return (ctrl & PCI_MSIX_FLAGS_QSIZE) + 1;
  13217}
  13218
  13219static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx,
  13220				int *max_cp)
  13221{
  13222	struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
  13223	int max_ring_grps = 0, max_irq;
  13224
  13225	*max_tx = hw_resc->max_tx_rings;
  13226	*max_rx = hw_resc->max_rx_rings;
  13227	*max_cp = bnxt_get_max_func_cp_rings_for_en(bp);
  13228	max_irq = min_t(int, bnxt_get_max_func_irqs(bp) -
  13229			bnxt_get_ulp_msix_num(bp),
  13230			hw_resc->max_stat_ctxs - bnxt_get_ulp_stat_ctxs(bp));
  13231	if (!(bp->flags & BNXT_FLAG_CHIP_P5))
  13232		*max_cp = min_t(int, *max_cp, max_irq);
  13233	max_ring_grps = hw_resc->max_hw_ring_grps;
  13234	if (BNXT_CHIP_TYPE_NITRO_A0(bp) && BNXT_PF(bp)) {
  13235		*max_cp -= 1;
  13236		*max_rx -= 2;
  13237	}
  13238	if (bp->flags & BNXT_FLAG_AGG_RINGS)
  13239		*max_rx >>= 1;
  13240	if (bp->flags & BNXT_FLAG_CHIP_P5) {
  13241		bnxt_trim_rings(bp, max_rx, max_tx, *max_cp, false);
  13242		/* On P5 chips, max_cp output param should be available NQs */
  13243		*max_cp = max_irq;
  13244	}
  13245	*max_rx = min_t(int, *max_rx, max_ring_grps);
  13246}
  13247
  13248int bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx, bool shared)
  13249{
  13250	int rx, tx, cp;
  13251
  13252	_bnxt_get_max_rings(bp, &rx, &tx, &cp);
  13253	*max_rx = rx;
  13254	*max_tx = tx;
  13255	if (!rx || !tx || !cp)
  13256		return -ENOMEM;
  13257
  13258	return bnxt_trim_rings(bp, max_rx, max_tx, cp, shared);
  13259}
  13260
  13261static int bnxt_get_dflt_rings(struct bnxt *bp, int *max_rx, int *max_tx,
  13262			       bool shared)
  13263{
  13264	int rc;
  13265
  13266	rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared);
  13267	if (rc && (bp->flags & BNXT_FLAG_AGG_RINGS)) {
  13268		/* Not enough rings, try disabling agg rings. */
  13269		bp->flags &= ~BNXT_FLAG_AGG_RINGS;
  13270		rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared);
  13271		if (rc) {
  13272			/* set BNXT_FLAG_AGG_RINGS back for consistency */
  13273			bp->flags |= BNXT_FLAG_AGG_RINGS;
  13274			return rc;
  13275		}
  13276		bp->flags |= BNXT_FLAG_NO_AGG_RINGS;
  13277		bp->dev->hw_features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
  13278		bp->dev->features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
  13279		bnxt_set_ring_params(bp);
  13280	}
  13281
  13282	if (bp->flags & BNXT_FLAG_ROCE_CAP) {
  13283		int max_cp, max_stat, max_irq;
  13284
  13285		/* Reserve minimum resources for RoCE */
  13286		max_cp = bnxt_get_max_func_cp_rings(bp);
  13287		max_stat = bnxt_get_max_func_stat_ctxs(bp);
  13288		max_irq = bnxt_get_max_func_irqs(bp);
  13289		if (max_cp <= BNXT_MIN_ROCE_CP_RINGS ||
  13290		    max_irq <= BNXT_MIN_ROCE_CP_RINGS ||
  13291		    max_stat <= BNXT_MIN_ROCE_STAT_CTXS)
  13292			return 0;
  13293
  13294		max_cp -= BNXT_MIN_ROCE_CP_RINGS;
  13295		max_irq -= BNXT_MIN_ROCE_CP_RINGS;
  13296		max_stat -= BNXT_MIN_ROCE_STAT_CTXS;
  13297		max_cp = min_t(int, max_cp, max_irq);
  13298		max_cp = min_t(int, max_cp, max_stat);
  13299		rc = bnxt_trim_rings(bp, max_rx, max_tx, max_cp, shared);
  13300		if (rc)
  13301			rc = 0;
  13302	}
  13303	return rc;
  13304}
  13305
  13306/* In initial default shared ring setting, each shared ring must have a
  13307 * RX/TX ring pair.
  13308 */
  13309static void bnxt_trim_dflt_sh_rings(struct bnxt *bp)
  13310{
  13311	bp->cp_nr_rings = min_t(int, bp->tx_nr_rings_per_tc, bp->rx_nr_rings);
  13312	bp->rx_nr_rings = bp->cp_nr_rings;
  13313	bp->tx_nr_rings_per_tc = bp->cp_nr_rings;
  13314	bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
  13315}
  13316
  13317static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh)
  13318{
  13319	int dflt_rings, max_rx_rings, max_tx_rings, rc;
  13320
  13321	if (!bnxt_can_reserve_rings(bp))
  13322		return 0;
  13323
  13324	if (sh)
  13325		bp->flags |= BNXT_FLAG_SHARED_RINGS;
  13326	dflt_rings = is_kdump_kernel() ? 1 : netif_get_num_default_rss_queues();
  13327	/* Reduce default rings on multi-port cards so that total default
  13328	 * rings do not exceed CPU count.
  13329	 */
  13330	if (bp->port_count > 1) {
  13331		int max_rings =
  13332			max_t(int, num_online_cpus() / bp->port_count, 1);
  13333
  13334		dflt_rings = min_t(int, dflt_rings, max_rings);
  13335	}
  13336	rc = bnxt_get_dflt_rings(bp, &max_rx_rings, &max_tx_rings, sh);
  13337	if (rc)
  13338		return rc;
  13339	bp->rx_nr_rings = min_t(int, dflt_rings, max_rx_rings);
  13340	bp->tx_nr_rings_per_tc = min_t(int, dflt_rings, max_tx_rings);
  13341	if (sh)
  13342		bnxt_trim_dflt_sh_rings(bp);
  13343	else
  13344		bp->cp_nr_rings = bp->tx_nr_rings_per_tc + bp->rx_nr_rings;
  13345	bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
  13346
  13347	rc = __bnxt_reserve_rings(bp);
  13348	if (rc && rc != -ENODEV)
  13349		netdev_warn(bp->dev, "Unable to reserve tx rings\n");
  13350	bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
  13351	if (sh)
  13352		bnxt_trim_dflt_sh_rings(bp);
  13353
  13354	/* Rings may have been trimmed, re-reserve the trimmed rings. */
  13355	if (bnxt_need_reserve_rings(bp)) {
  13356		rc = __bnxt_reserve_rings(bp);
  13357		if (rc && rc != -ENODEV)
  13358			netdev_warn(bp->dev, "2nd rings reservation failed.\n");
  13359		bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
  13360	}
  13361	if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
  13362		bp->rx_nr_rings++;
  13363		bp->cp_nr_rings++;
  13364	}
  13365	if (rc) {
  13366		bp->tx_nr_rings = 0;
  13367		bp->rx_nr_rings = 0;
  13368	}
  13369	return rc;
  13370}
  13371
  13372static int bnxt_init_dflt_ring_mode(struct bnxt *bp)
  13373{
  13374	int rc;
  13375
  13376	if (bp->tx_nr_rings)
  13377		return 0;
  13378
  13379	bnxt_ulp_irq_stop(bp);
  13380	bnxt_clear_int_mode(bp);
  13381	rc = bnxt_set_dflt_rings(bp, true);
  13382	if (rc) {
  13383		if (BNXT_VF(bp) && rc == -ENODEV)
  13384			netdev_err(bp->dev, "Cannot configure VF rings while PF is unavailable.\n");
  13385		else
  13386			netdev_err(bp->dev, "Not enough rings available.\n");
  13387		goto init_dflt_ring_err;
  13388	}
  13389	rc = bnxt_init_int_mode(bp);
  13390	if (rc)
  13391		goto init_dflt_ring_err;
  13392
  13393	bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
  13394
  13395	bnxt_set_dflt_rfs(bp);
  13396
  13397init_dflt_ring_err:
  13398	bnxt_ulp_irq_restart(bp, rc);
  13399	return rc;
  13400}
  13401
  13402int bnxt_restore_pf_fw_resources(struct bnxt *bp)
  13403{
  13404	int rc;
  13405
  13406	ASSERT_RTNL();
  13407	bnxt_hwrm_func_qcaps(bp);
  13408
  13409	if (netif_running(bp->dev))
  13410		__bnxt_close_nic(bp, true, false);
  13411
  13412	bnxt_ulp_irq_stop(bp);
  13413	bnxt_clear_int_mode(bp);
  13414	rc = bnxt_init_int_mode(bp);
  13415	bnxt_ulp_irq_restart(bp, rc);
  13416
  13417	if (netif_running(bp->dev)) {
  13418		if (rc)
  13419			dev_close(bp->dev);
  13420		else
  13421			rc = bnxt_open_nic(bp, true, false);
  13422	}
  13423
  13424	return rc;
  13425}
  13426
  13427static int bnxt_init_mac_addr(struct bnxt *bp)
  13428{
  13429	int rc = 0;
  13430
  13431	if (BNXT_PF(bp)) {
  13432		eth_hw_addr_set(bp->dev, bp->pf.mac_addr);
  13433	} else {
  13434#ifdef CONFIG_BNXT_SRIOV
  13435		struct bnxt_vf_info *vf = &bp->vf;
  13436		bool strict_approval = true;
  13437
  13438		if (is_valid_ether_addr(vf->mac_addr)) {
  13439			/* overwrite netdev dev_addr with admin VF MAC */
  13440			eth_hw_addr_set(bp->dev, vf->mac_addr);
  13441			/* Older PF driver or firmware may not approve this
  13442			 * correctly.
  13443			 */
  13444			strict_approval = false;
  13445		} else {
  13446			eth_hw_addr_random(bp->dev);
  13447		}
  13448		rc = bnxt_approve_mac(bp, bp->dev->dev_addr, strict_approval);
  13449#endif
  13450	}
  13451	return rc;
  13452}
  13453
  13454static void bnxt_vpd_read_info(struct bnxt *bp)
  13455{
  13456	struct pci_dev *pdev = bp->pdev;
  13457	unsigned int vpd_size, kw_len;
  13458	int pos, size;
  13459	u8 *vpd_data;
  13460
  13461	vpd_data = pci_vpd_alloc(pdev, &vpd_size);
  13462	if (IS_ERR(vpd_data)) {
  13463		pci_warn(pdev, "Unable to read VPD\n");
  13464		return;
  13465	}
  13466
  13467	pos = pci_vpd_find_ro_info_keyword(vpd_data, vpd_size,
  13468					   PCI_VPD_RO_KEYWORD_PARTNO, &kw_len);
  13469	if (pos < 0)
  13470		goto read_sn;
  13471
  13472	size = min_t(int, kw_len, BNXT_VPD_FLD_LEN - 1);
  13473	memcpy(bp->board_partno, &vpd_data[pos], size);
  13474
  13475read_sn:
  13476	pos = pci_vpd_find_ro_info_keyword(vpd_data, vpd_size,
  13477					   PCI_VPD_RO_KEYWORD_SERIALNO,
  13478					   &kw_len);
  13479	if (pos < 0)
  13480		goto exit;
  13481
  13482	size = min_t(int, kw_len, BNXT_VPD_FLD_LEN - 1);
  13483	memcpy(bp->board_serialno, &vpd_data[pos], size);
  13484exit:
  13485	kfree(vpd_data);
  13486}
  13487
  13488static int bnxt_pcie_dsn_get(struct bnxt *bp, u8 dsn[])
  13489{
  13490	struct pci_dev *pdev = bp->pdev;
  13491	u64 qword;
  13492
  13493	qword = pci_get_dsn(pdev);
  13494	if (!qword) {
  13495		netdev_info(bp->dev, "Unable to read adapter's DSN\n");
  13496		return -EOPNOTSUPP;
  13497	}
  13498
  13499	put_unaligned_le64(qword, dsn);
  13500
  13501	bp->flags |= BNXT_FLAG_DSN_VALID;
  13502	return 0;
  13503}
  13504
  13505static int bnxt_map_db_bar(struct bnxt *bp)
  13506{
  13507	if (!bp->db_size)
  13508		return -ENODEV;
  13509	bp->bar1 = pci_iomap(bp->pdev, 2, bp->db_size);
  13510	if (!bp->bar1)
  13511		return -ENOMEM;
  13512	return 0;
  13513}
  13514
  13515void bnxt_print_device_info(struct bnxt *bp)
  13516{
  13517	netdev_info(bp->dev, "%s found at mem %lx, node addr %pM\n",
  13518		    board_info[bp->board_idx].name,
  13519		    (long)pci_resource_start(bp->pdev, 0), bp->dev->dev_addr);
  13520
  13521	pcie_print_link_status(bp->pdev);
  13522}
  13523
  13524static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
  13525{
  13526	struct net_device *dev;
  13527	struct bnxt *bp;
  13528	int rc, max_irqs;
  13529
  13530	if (pci_is_bridge(pdev))
  13531		return -ENODEV;
  13532
  13533	/* Clear any pending DMA transactions from crash kernel
  13534	 * while loading driver in capture kernel.
  13535	 */
  13536	if (is_kdump_kernel()) {
  13537		pci_clear_master(pdev);
  13538		pcie_flr(pdev);
  13539	}
  13540
  13541	max_irqs = bnxt_get_max_irq(pdev);
  13542	dev = alloc_etherdev_mq(sizeof(*bp), max_irqs);
  13543	if (!dev)
  13544		return -ENOMEM;
  13545
  13546	bp = netdev_priv(dev);
  13547	bp->board_idx = ent->driver_data;
  13548	bp->msg_enable = BNXT_DEF_MSG_ENABLE;
  13549	bnxt_set_max_func_irqs(bp, max_irqs);
  13550
  13551	if (bnxt_vf_pciid(bp->board_idx))
  13552		bp->flags |= BNXT_FLAG_VF;
  13553
  13554	if (pdev->msix_cap)
  13555		bp->flags |= BNXT_FLAG_MSIX_CAP;
  13556
  13557	rc = bnxt_init_board(pdev, dev);
  13558	if (rc < 0)
  13559		goto init_err_free;
  13560
  13561	dev->netdev_ops = &bnxt_netdev_ops;
  13562	dev->watchdog_timeo = BNXT_TX_TIMEOUT;
  13563	dev->ethtool_ops = &bnxt_ethtool_ops;
  13564	pci_set_drvdata(pdev, dev);
  13565
  13566	rc = bnxt_alloc_hwrm_resources(bp);
  13567	if (rc)
  13568		goto init_err_pci_clean;
  13569
  13570	mutex_init(&bp->hwrm_cmd_lock);
  13571	mutex_init(&bp->link_lock);
  13572
  13573	rc = bnxt_fw_init_one_p1(bp);
  13574	if (rc)
  13575		goto init_err_pci_clean;
  13576
  13577	if (BNXT_PF(bp))
  13578		bnxt_vpd_read_info(bp);
  13579
  13580	if (BNXT_CHIP_P5(bp)) {
  13581		bp->flags |= BNXT_FLAG_CHIP_P5;
  13582		if (BNXT_CHIP_SR2(bp))
  13583			bp->flags |= BNXT_FLAG_CHIP_SR2;
  13584	}
  13585
  13586	rc = bnxt_alloc_rss_indir_tbl(bp);
  13587	if (rc)
  13588		goto init_err_pci_clean;
  13589
  13590	rc = bnxt_fw_init_one_p2(bp);
  13591	if (rc)
  13592		goto init_err_pci_clean;
  13593
  13594	rc = bnxt_map_db_bar(bp);
  13595	if (rc) {
  13596		dev_err(&pdev->dev, "Cannot map doorbell BAR rc = %d, aborting\n",
  13597			rc);
  13598		goto init_err_pci_clean;
  13599	}
  13600
  13601	dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
  13602			   NETIF_F_TSO | NETIF_F_TSO6 |
  13603			   NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
  13604			   NETIF_F_GSO_IPXIP4 |
  13605			   NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM |
  13606			   NETIF_F_GSO_PARTIAL | NETIF_F_RXHASH |
  13607			   NETIF_F_RXCSUM | NETIF_F_GRO;
  13608
  13609	if (BNXT_SUPPORTS_TPA(bp))
  13610		dev->hw_features |= NETIF_F_LRO;
  13611
  13612	dev->hw_enc_features =
  13613			NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
  13614			NETIF_F_TSO | NETIF_F_TSO6 |
  13615			NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
  13616			NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM |
  13617			NETIF_F_GSO_IPXIP4 | NETIF_F_GSO_PARTIAL;
  13618	dev->udp_tunnel_nic_info = &bnxt_udp_tunnels;
  13619
  13620	dev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM |
  13621				    NETIF_F_GSO_GRE_CSUM;
  13622	dev->vlan_features = dev->hw_features | NETIF_F_HIGHDMA;
  13623	if (bp->fw_cap & BNXT_FW_CAP_VLAN_RX_STRIP)
  13624		dev->hw_features |= BNXT_HW_FEATURE_VLAN_ALL_RX;
  13625	if (bp->fw_cap & BNXT_FW_CAP_VLAN_TX_INSERT)
  13626		dev->hw_features |= BNXT_HW_FEATURE_VLAN_ALL_TX;
  13627	if (BNXT_SUPPORTS_TPA(bp))
  13628		dev->hw_features |= NETIF_F_GRO_HW;
  13629	dev->features |= dev->hw_features | NETIF_F_HIGHDMA;
  13630	if (dev->features & NETIF_F_GRO_HW)
  13631		dev->features &= ~NETIF_F_LRO;
  13632	dev->priv_flags |= IFF_UNICAST_FLT;
  13633
  13634#ifdef CONFIG_BNXT_SRIOV
  13635	init_waitqueue_head(&bp->sriov_cfg_wait);
  13636#endif
  13637	if (BNXT_SUPPORTS_TPA(bp)) {
  13638		bp->gro_func = bnxt_gro_func_5730x;
  13639		if (BNXT_CHIP_P4(bp))
  13640			bp->gro_func = bnxt_gro_func_5731x;
  13641		else if (BNXT_CHIP_P5(bp))
  13642			bp->gro_func = bnxt_gro_func_5750x;
  13643	}
  13644	if (!BNXT_CHIP_P4_PLUS(bp))
  13645		bp->flags |= BNXT_FLAG_DOUBLE_DB;
  13646
  13647	rc = bnxt_init_mac_addr(bp);
  13648	if (rc) {
  13649		dev_err(&pdev->dev, "Unable to initialize mac address.\n");
  13650		rc = -EADDRNOTAVAIL;
  13651		goto init_err_pci_clean;
  13652	}
  13653
  13654	if (BNXT_PF(bp)) {
  13655		/* Read the adapter's DSN to use as the eswitch switch_id */
  13656		rc = bnxt_pcie_dsn_get(bp, bp->dsn);
  13657	}
  13658
  13659	/* MTU range: 60 - FW defined max */
  13660	dev->min_mtu = ETH_ZLEN;
  13661	dev->max_mtu = bp->max_mtu;
  13662
  13663	rc = bnxt_probe_phy(bp, true);
  13664	if (rc)
  13665		goto init_err_pci_clean;
  13666
  13667	bnxt_set_rx_skb_mode(bp, false);
  13668	bnxt_set_tpa_flags(bp);
  13669	bnxt_set_ring_params(bp);
  13670	rc = bnxt_set_dflt_rings(bp, true);
  13671	if (rc) {
  13672		if (BNXT_VF(bp) && rc == -ENODEV) {
  13673			netdev_err(bp->dev, "Cannot configure VF rings while PF is unavailable.\n");
  13674		} else {
  13675			netdev_err(bp->dev, "Not enough rings available.\n");
  13676			rc = -ENOMEM;
  13677		}
  13678		goto init_err_pci_clean;
  13679	}
  13680
  13681	bnxt_fw_init_one_p3(bp);
  13682
  13683	bnxt_init_dflt_coal(bp);
  13684
  13685	if (dev->hw_features & BNXT_HW_FEATURE_VLAN_ALL_RX)
  13686		bp->flags |= BNXT_FLAG_STRIP_VLAN;
  13687
  13688	rc = bnxt_init_int_mode(bp);
  13689	if (rc)
  13690		goto init_err_pci_clean;
  13691
  13692	/* No TC has been set yet and rings may have been trimmed due to
  13693	 * limited MSIX, so we re-initialize the TX rings per TC.
  13694	 */
  13695	bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
  13696
  13697	if (BNXT_PF(bp)) {
  13698		if (!bnxt_pf_wq) {
  13699			bnxt_pf_wq =
  13700				create_singlethread_workqueue("bnxt_pf_wq");
  13701			if (!bnxt_pf_wq) {
  13702				dev_err(&pdev->dev, "Unable to create workqueue.\n");
  13703				rc = -ENOMEM;
  13704				goto init_err_pci_clean;
  13705			}
  13706		}
  13707		rc = bnxt_init_tc(bp);
  13708		if (rc)
  13709			netdev_err(dev, "Failed to initialize TC flower offload, err = %d.\n",
  13710				   rc);
  13711	}
  13712
  13713	bnxt_inv_fw_health_reg(bp);
  13714	rc = bnxt_dl_register(bp);
  13715	if (rc)
  13716		goto init_err_dl;
  13717
  13718	rc = register_netdev(dev);
  13719	if (rc)
  13720		goto init_err_cleanup;
  13721
  13722	if (BNXT_PF(bp))
  13723		devlink_port_type_eth_set(&bp->dl_port, bp->dev);
  13724	bnxt_dl_fw_reporters_create(bp);
  13725
  13726	bnxt_print_device_info(bp);
  13727
  13728	pci_save_state(pdev);
  13729	return 0;
  13730
  13731init_err_cleanup:
  13732	bnxt_dl_unregister(bp);
  13733init_err_dl:
  13734	bnxt_shutdown_tc(bp);
  13735	bnxt_clear_int_mode(bp);
  13736
  13737init_err_pci_clean:
  13738	bnxt_hwrm_func_drv_unrgtr(bp);
  13739	bnxt_free_hwrm_resources(bp);
  13740	bnxt_ethtool_free(bp);
  13741	bnxt_ptp_clear(bp);
  13742	kfree(bp->ptp_cfg);
  13743	bp->ptp_cfg = NULL;
  13744	kfree(bp->fw_health);
  13745	bp->fw_health = NULL;
  13746	bnxt_cleanup_pci(bp);
  13747	bnxt_free_ctx_mem(bp);
  13748	kfree(bp->ctx);
  13749	bp->ctx = NULL;
  13750	kfree(bp->rss_indir_tbl);
  13751	bp->rss_indir_tbl = NULL;
  13752
  13753init_err_free:
  13754	free_netdev(dev);
  13755	return rc;
  13756}
  13757
  13758static void bnxt_shutdown(struct pci_dev *pdev)
  13759{
  13760	struct net_device *dev = pci_get_drvdata(pdev);
  13761	struct bnxt *bp;
  13762
  13763	if (!dev)
  13764		return;
  13765
  13766	rtnl_lock();
  13767	bp = netdev_priv(dev);
  13768	if (!bp)
  13769		goto shutdown_exit;
  13770
  13771	if (netif_running(dev))
  13772		dev_close(dev);
  13773
  13774	bnxt_ulp_shutdown(bp);
  13775	bnxt_clear_int_mode(bp);
  13776	pci_disable_device(pdev);
  13777
  13778	if (system_state == SYSTEM_POWER_OFF) {
  13779		pci_wake_from_d3(pdev, bp->wol);
  13780		pci_set_power_state(pdev, PCI_D3hot);
  13781	}
  13782
  13783shutdown_exit:
  13784	rtnl_unlock();
  13785}
  13786
  13787#ifdef CONFIG_PM_SLEEP
  13788static int bnxt_suspend(struct device *device)
  13789{
  13790	struct net_device *dev = dev_get_drvdata(device);
  13791	struct bnxt *bp = netdev_priv(dev);
  13792	int rc = 0;
  13793
  13794	rtnl_lock();
  13795	bnxt_ulp_stop(bp);
  13796	if (netif_running(dev)) {
  13797		netif_device_detach(dev);
  13798		rc = bnxt_close(dev);
  13799	}
  13800	bnxt_hwrm_func_drv_unrgtr(bp);
  13801	pci_disable_device(bp->pdev);
  13802	bnxt_free_ctx_mem(bp);
  13803	kfree(bp->ctx);
  13804	bp->ctx = NULL;
  13805	rtnl_unlock();
  13806	return rc;
  13807}
  13808
  13809static int bnxt_resume(struct device *device)
  13810{
  13811	struct net_device *dev = dev_get_drvdata(device);
  13812	struct bnxt *bp = netdev_priv(dev);
  13813	int rc = 0;
  13814
  13815	rtnl_lock();
  13816	rc = pci_enable_device(bp->pdev);
  13817	if (rc) {
  13818		netdev_err(dev, "Cannot re-enable PCI device during resume, err = %d\n",
  13819			   rc);
  13820		goto resume_exit;
  13821	}
  13822	pci_set_master(bp->pdev);
  13823	if (bnxt_hwrm_ver_get(bp)) {
  13824		rc = -ENODEV;
  13825		goto resume_exit;
  13826	}
  13827	rc = bnxt_hwrm_func_reset(bp);
  13828	if (rc) {
  13829		rc = -EBUSY;
  13830		goto resume_exit;
  13831	}
  13832
  13833	rc = bnxt_hwrm_func_qcaps(bp);
  13834	if (rc)
  13835		goto resume_exit;
  13836
  13837	if (bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, false)) {
  13838		rc = -ENODEV;
  13839		goto resume_exit;
  13840	}
  13841
  13842	bnxt_get_wol_settings(bp);
  13843	if (netif_running(dev)) {
  13844		rc = bnxt_open(dev);
  13845		if (!rc)
  13846			netif_device_attach(dev);
  13847	}
  13848
  13849resume_exit:
  13850	bnxt_ulp_start(bp, rc);
  13851	if (!rc)
  13852		bnxt_reenable_sriov(bp);
  13853	rtnl_unlock();
  13854	return rc;
  13855}
  13856
  13857static SIMPLE_DEV_PM_OPS(bnxt_pm_ops, bnxt_suspend, bnxt_resume);
  13858#define BNXT_PM_OPS (&bnxt_pm_ops)
  13859
  13860#else
  13861
  13862#define BNXT_PM_OPS NULL
  13863
  13864#endif /* CONFIG_PM_SLEEP */
  13865
  13866/**
  13867 * bnxt_io_error_detected - called when PCI error is detected
  13868 * @pdev: Pointer to PCI device
  13869 * @state: The current pci connection state
  13870 *
  13871 * This function is called after a PCI bus error affecting
  13872 * this device has been detected.
  13873 */
  13874static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev,
  13875					       pci_channel_state_t state)
  13876{
  13877	struct net_device *netdev = pci_get_drvdata(pdev);
  13878	struct bnxt *bp = netdev_priv(netdev);
  13879
  13880	netdev_info(netdev, "PCI I/O error detected\n");
  13881
  13882	rtnl_lock();
  13883	netif_device_detach(netdev);
  13884
  13885	bnxt_ulp_stop(bp);
  13886
  13887	if (state == pci_channel_io_perm_failure) {
  13888		rtnl_unlock();
  13889		return PCI_ERS_RESULT_DISCONNECT;
  13890	}
  13891
  13892	if (state == pci_channel_io_frozen)
  13893		set_bit(BNXT_STATE_PCI_CHANNEL_IO_FROZEN, &bp->state);
  13894
  13895	if (netif_running(netdev))
  13896		bnxt_close(netdev);
  13897
  13898	if (pci_is_enabled(pdev))
  13899		pci_disable_device(pdev);
  13900	bnxt_free_ctx_mem(bp);
  13901	kfree(bp->ctx);
  13902	bp->ctx = NULL;
  13903	rtnl_unlock();
  13904
  13905	/* Request a slot slot reset. */
  13906	return PCI_ERS_RESULT_NEED_RESET;
  13907}
  13908
  13909/**
  13910 * bnxt_io_slot_reset - called after the pci bus has been reset.
  13911 * @pdev: Pointer to PCI device
  13912 *
  13913 * Restart the card from scratch, as if from a cold-boot.
  13914 * At this point, the card has exprienced a hard reset,
  13915 * followed by fixups by BIOS, and has its config space
  13916 * set up identically to what it was at cold boot.
  13917 */
  13918static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev)
  13919{
  13920	pci_ers_result_t result = PCI_ERS_RESULT_DISCONNECT;
  13921	struct net_device *netdev = pci_get_drvdata(pdev);
  13922	struct bnxt *bp = netdev_priv(netdev);
  13923	int err = 0, off;
  13924
  13925	netdev_info(bp->dev, "PCI Slot Reset\n");
  13926
  13927	rtnl_lock();
  13928
  13929	if (pci_enable_device(pdev)) {
  13930		dev_err(&pdev->dev,
  13931			"Cannot re-enable PCI device after reset.\n");
  13932	} else {
  13933		pci_set_master(pdev);
  13934		/* Upon fatal error, our device internal logic that latches to
  13935		 * BAR value is getting reset and will restore only upon
  13936		 * rewritting the BARs.
  13937		 *
  13938		 * As pci_restore_state() does not re-write the BARs if the
  13939		 * value is same as saved value earlier, driver needs to
  13940		 * write the BARs to 0 to force restore, in case of fatal error.
  13941		 */
  13942		if (test_and_clear_bit(BNXT_STATE_PCI_CHANNEL_IO_FROZEN,
  13943				       &bp->state)) {
  13944			for (off = PCI_BASE_ADDRESS_0;
  13945			     off <= PCI_BASE_ADDRESS_5; off += 4)
  13946				pci_write_config_dword(bp->pdev, off, 0);
  13947		}
  13948		pci_restore_state(pdev);
  13949		pci_save_state(pdev);
  13950
  13951		err = bnxt_hwrm_func_reset(bp);
  13952		if (!err)
  13953			result = PCI_ERS_RESULT_RECOVERED;
  13954	}
  13955
  13956	rtnl_unlock();
  13957
  13958	return result;
  13959}
  13960
  13961/**
  13962 * bnxt_io_resume - called when traffic can start flowing again.
  13963 * @pdev: Pointer to PCI device
  13964 *
  13965 * This callback is called when the error recovery driver tells
  13966 * us that its OK to resume normal operation.
  13967 */
  13968static void bnxt_io_resume(struct pci_dev *pdev)
  13969{
  13970	struct net_device *netdev = pci_get_drvdata(pdev);
  13971	struct bnxt *bp = netdev_priv(netdev);
  13972	int err;
  13973
  13974	netdev_info(bp->dev, "PCI Slot Resume\n");
  13975	rtnl_lock();
  13976
  13977	err = bnxt_hwrm_func_qcaps(bp);
  13978	if (!err && netif_running(netdev))
  13979		err = bnxt_open(netdev);
  13980
  13981	bnxt_ulp_start(bp, err);
  13982	if (!err) {
  13983		bnxt_reenable_sriov(bp);
  13984		netif_device_attach(netdev);
  13985	}
  13986
  13987	rtnl_unlock();
  13988}
  13989
  13990static const struct pci_error_handlers bnxt_err_handler = {
  13991	.error_detected	= bnxt_io_error_detected,
  13992	.slot_reset	= bnxt_io_slot_reset,
  13993	.resume		= bnxt_io_resume
  13994};
  13995
  13996static struct pci_driver bnxt_pci_driver = {
  13997	.name		= DRV_MODULE_NAME,
  13998	.id_table	= bnxt_pci_tbl,
  13999	.probe		= bnxt_init_one,
  14000	.remove		= bnxt_remove_one,
  14001	.shutdown	= bnxt_shutdown,
  14002	.driver.pm	= BNXT_PM_OPS,
  14003	.err_handler	= &bnxt_err_handler,
  14004#if defined(CONFIG_BNXT_SRIOV)
  14005	.sriov_configure = bnxt_sriov_configure,
  14006#endif
  14007};
  14008
  14009static int __init bnxt_init(void)
  14010{
  14011	bnxt_debug_init();
  14012	return pci_register_driver(&bnxt_pci_driver);
  14013}
  14014
  14015static void __exit bnxt_exit(void)
  14016{
  14017	pci_unregister_driver(&bnxt_pci_driver);
  14018	if (bnxt_pf_wq)
  14019		destroy_workqueue(bnxt_pf_wq);
  14020	bnxt_debug_exit();
  14021}
  14022
  14023module_init(bnxt_init);
  14024module_exit(bnxt_exit);