cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

qede_main.c (77144B)


      1// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
      2/* QLogic qede NIC Driver
      3 * Copyright (c) 2015-2017  QLogic Corporation
      4 * Copyright (c) 2019-2020 Marvell International Ltd.
      5 */
      6
      7#include <linux/crash_dump.h>
      8#include <linux/module.h>
      9#include <linux/pci.h>
     10#include <linux/device.h>
     11#include <linux/netdevice.h>
     12#include <linux/etherdevice.h>
     13#include <linux/skbuff.h>
     14#include <linux/errno.h>
     15#include <linux/list.h>
     16#include <linux/string.h>
     17#include <linux/dma-mapping.h>
     18#include <linux/interrupt.h>
     19#include <asm/byteorder.h>
     20#include <asm/param.h>
     21#include <linux/io.h>
     22#include <linux/netdev_features.h>
     23#include <linux/udp.h>
     24#include <linux/tcp.h>
     25#include <net/udp_tunnel.h>
     26#include <linux/ip.h>
     27#include <net/ipv6.h>
     28#include <net/tcp.h>
     29#include <linux/if_ether.h>
     30#include <linux/if_vlan.h>
     31#include <linux/pkt_sched.h>
     32#include <linux/ethtool.h>
     33#include <linux/in.h>
     34#include <linux/random.h>
     35#include <net/ip6_checksum.h>
     36#include <linux/bitops.h>
     37#include <linux/vmalloc.h>
     38#include <linux/aer.h>
     39#include "qede.h"
     40#include "qede_ptp.h"
     41
     42MODULE_DESCRIPTION("QLogic FastLinQ 4xxxx Ethernet Driver");
     43MODULE_LICENSE("GPL");
     44
     45static uint debug;
     46module_param(debug, uint, 0);
     47MODULE_PARM_DESC(debug, " Default debug msglevel");
     48
     49static const struct qed_eth_ops *qed_ops;
     50
     51#define CHIP_NUM_57980S_40		0x1634
     52#define CHIP_NUM_57980S_10		0x1666
     53#define CHIP_NUM_57980S_MF		0x1636
     54#define CHIP_NUM_57980S_100		0x1644
     55#define CHIP_NUM_57980S_50		0x1654
     56#define CHIP_NUM_57980S_25		0x1656
     57#define CHIP_NUM_57980S_IOV		0x1664
     58#define CHIP_NUM_AH			0x8070
     59#define CHIP_NUM_AH_IOV			0x8090
     60
     61#ifndef PCI_DEVICE_ID_NX2_57980E
     62#define PCI_DEVICE_ID_57980S_40		CHIP_NUM_57980S_40
     63#define PCI_DEVICE_ID_57980S_10		CHIP_NUM_57980S_10
     64#define PCI_DEVICE_ID_57980S_MF		CHIP_NUM_57980S_MF
     65#define PCI_DEVICE_ID_57980S_100	CHIP_NUM_57980S_100
     66#define PCI_DEVICE_ID_57980S_50		CHIP_NUM_57980S_50
     67#define PCI_DEVICE_ID_57980S_25		CHIP_NUM_57980S_25
     68#define PCI_DEVICE_ID_57980S_IOV	CHIP_NUM_57980S_IOV
     69#define PCI_DEVICE_ID_AH		CHIP_NUM_AH
     70#define PCI_DEVICE_ID_AH_IOV		CHIP_NUM_AH_IOV
     71
     72#endif
     73
     74enum qede_pci_private {
     75	QEDE_PRIVATE_PF,
     76	QEDE_PRIVATE_VF
     77};
     78
     79static const struct pci_device_id qede_pci_tbl[] = {
     80	{PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_40), QEDE_PRIVATE_PF},
     81	{PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_10), QEDE_PRIVATE_PF},
     82	{PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_MF), QEDE_PRIVATE_PF},
     83	{PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_100), QEDE_PRIVATE_PF},
     84	{PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_50), QEDE_PRIVATE_PF},
     85	{PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_25), QEDE_PRIVATE_PF},
     86#ifdef CONFIG_QED_SRIOV
     87	{PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_IOV), QEDE_PRIVATE_VF},
     88#endif
     89	{PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_AH), QEDE_PRIVATE_PF},
     90#ifdef CONFIG_QED_SRIOV
     91	{PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_AH_IOV), QEDE_PRIVATE_VF},
     92#endif
     93	{ 0 }
     94};
     95
     96MODULE_DEVICE_TABLE(pci, qede_pci_tbl);
     97
     98static int qede_probe(struct pci_dev *pdev, const struct pci_device_id *id);
     99static pci_ers_result_t
    100qede_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state);
    101
    102#define TX_TIMEOUT		(5 * HZ)
    103
    104/* Utilize last protocol index for XDP */
    105#define XDP_PI	11
    106
    107static void qede_remove(struct pci_dev *pdev);
    108static void qede_shutdown(struct pci_dev *pdev);
    109static void qede_link_update(void *dev, struct qed_link_output *link);
    110static void qede_schedule_recovery_handler(void *dev);
    111static void qede_recovery_handler(struct qede_dev *edev);
    112static void qede_schedule_hw_err_handler(void *dev,
    113					 enum qed_hw_err_type err_type);
    114static void qede_get_eth_tlv_data(void *edev, void *data);
    115static void qede_get_generic_tlv_data(void *edev,
    116				      struct qed_generic_tlvs *data);
    117static void qede_generic_hw_err_handler(struct qede_dev *edev);
    118#ifdef CONFIG_QED_SRIOV
    119static int qede_set_vf_vlan(struct net_device *ndev, int vf, u16 vlan, u8 qos,
    120			    __be16 vlan_proto)
    121{
    122	struct qede_dev *edev = netdev_priv(ndev);
    123
    124	if (vlan > 4095) {
    125		DP_NOTICE(edev, "Illegal vlan value %d\n", vlan);
    126		return -EINVAL;
    127	}
    128
    129	if (vlan_proto != htons(ETH_P_8021Q))
    130		return -EPROTONOSUPPORT;
    131
    132	DP_VERBOSE(edev, QED_MSG_IOV, "Setting Vlan 0x%04x to VF [%d]\n",
    133		   vlan, vf);
    134
    135	return edev->ops->iov->set_vlan(edev->cdev, vlan, vf);
    136}
    137
    138static int qede_set_vf_mac(struct net_device *ndev, int vfidx, u8 *mac)
    139{
    140	struct qede_dev *edev = netdev_priv(ndev);
    141
    142	DP_VERBOSE(edev, QED_MSG_IOV, "Setting MAC %pM to VF [%d]\n", mac, vfidx);
    143
    144	if (!is_valid_ether_addr(mac)) {
    145		DP_VERBOSE(edev, QED_MSG_IOV, "MAC address isn't valid\n");
    146		return -EINVAL;
    147	}
    148
    149	return edev->ops->iov->set_mac(edev->cdev, mac, vfidx);
    150}
    151
    152static int qede_sriov_configure(struct pci_dev *pdev, int num_vfs_param)
    153{
    154	struct qede_dev *edev = netdev_priv(pci_get_drvdata(pdev));
    155	struct qed_dev_info *qed_info = &edev->dev_info.common;
    156	struct qed_update_vport_params *vport_params;
    157	int rc;
    158
    159	vport_params = vzalloc(sizeof(*vport_params));
    160	if (!vport_params)
    161		return -ENOMEM;
    162	DP_VERBOSE(edev, QED_MSG_IOV, "Requested %d VFs\n", num_vfs_param);
    163
    164	rc = edev->ops->iov->configure(edev->cdev, num_vfs_param);
    165
    166	/* Enable/Disable Tx switching for PF */
    167	if ((rc == num_vfs_param) && netif_running(edev->ndev) &&
    168	    !qed_info->b_inter_pf_switch && qed_info->tx_switching) {
    169		vport_params->vport_id = 0;
    170		vport_params->update_tx_switching_flg = 1;
    171		vport_params->tx_switching_flg = num_vfs_param ? 1 : 0;
    172		edev->ops->vport_update(edev->cdev, vport_params);
    173	}
    174
    175	vfree(vport_params);
    176	return rc;
    177}
    178#endif
    179
    180static const struct pci_error_handlers qede_err_handler = {
    181	.error_detected = qede_io_error_detected,
    182};
    183
    184static struct pci_driver qede_pci_driver = {
    185	.name = "qede",
    186	.id_table = qede_pci_tbl,
    187	.probe = qede_probe,
    188	.remove = qede_remove,
    189	.shutdown = qede_shutdown,
    190#ifdef CONFIG_QED_SRIOV
    191	.sriov_configure = qede_sriov_configure,
    192#endif
    193	.err_handler = &qede_err_handler,
    194};
    195
    196static struct qed_eth_cb_ops qede_ll_ops = {
    197	{
    198#ifdef CONFIG_RFS_ACCEL
    199		.arfs_filter_op = qede_arfs_filter_op,
    200#endif
    201		.link_update = qede_link_update,
    202		.schedule_recovery_handler = qede_schedule_recovery_handler,
    203		.schedule_hw_err_handler = qede_schedule_hw_err_handler,
    204		.get_generic_tlv_data = qede_get_generic_tlv_data,
    205		.get_protocol_tlv_data = qede_get_eth_tlv_data,
    206	},
    207	.force_mac = qede_force_mac,
    208	.ports_update = qede_udp_ports_update,
    209};
    210
    211static int qede_netdev_event(struct notifier_block *this, unsigned long event,
    212			     void *ptr)
    213{
    214	struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
    215	struct ethtool_drvinfo drvinfo;
    216	struct qede_dev *edev;
    217
    218	if (event != NETDEV_CHANGENAME && event != NETDEV_CHANGEADDR)
    219		goto done;
    220
    221	/* Check whether this is a qede device */
    222	if (!ndev || !ndev->ethtool_ops || !ndev->ethtool_ops->get_drvinfo)
    223		goto done;
    224
    225	memset(&drvinfo, 0, sizeof(drvinfo));
    226	ndev->ethtool_ops->get_drvinfo(ndev, &drvinfo);
    227	if (strcmp(drvinfo.driver, "qede"))
    228		goto done;
    229	edev = netdev_priv(ndev);
    230
    231	switch (event) {
    232	case NETDEV_CHANGENAME:
    233		/* Notify qed of the name change */
    234		if (!edev->ops || !edev->ops->common)
    235			goto done;
    236		edev->ops->common->set_name(edev->cdev, edev->ndev->name);
    237		break;
    238	case NETDEV_CHANGEADDR:
    239		edev = netdev_priv(ndev);
    240		qede_rdma_event_changeaddr(edev);
    241		break;
    242	}
    243
    244done:
    245	return NOTIFY_DONE;
    246}
    247
    248static struct notifier_block qede_netdev_notifier = {
    249	.notifier_call = qede_netdev_event,
    250};
    251
    252static
    253int __init qede_init(void)
    254{
    255	int ret;
    256
    257	pr_info("qede init: QLogic FastLinQ 4xxxx Ethernet Driver qede\n");
    258
    259	qede_forced_speed_maps_init();
    260
    261	qed_ops = qed_get_eth_ops();
    262	if (!qed_ops) {
    263		pr_notice("Failed to get qed ethtool operations\n");
    264		return -EINVAL;
    265	}
    266
    267	/* Must register notifier before pci ops, since we might miss
    268	 * interface rename after pci probe and netdev registration.
    269	 */
    270	ret = register_netdevice_notifier(&qede_netdev_notifier);
    271	if (ret) {
    272		pr_notice("Failed to register netdevice_notifier\n");
    273		qed_put_eth_ops();
    274		return -EINVAL;
    275	}
    276
    277	ret = pci_register_driver(&qede_pci_driver);
    278	if (ret) {
    279		pr_notice("Failed to register driver\n");
    280		unregister_netdevice_notifier(&qede_netdev_notifier);
    281		qed_put_eth_ops();
    282		return -EINVAL;
    283	}
    284
    285	return 0;
    286}
    287
    288static void __exit qede_cleanup(void)
    289{
    290	if (debug & QED_LOG_INFO_MASK)
    291		pr_info("qede_cleanup called\n");
    292
    293	unregister_netdevice_notifier(&qede_netdev_notifier);
    294	pci_unregister_driver(&qede_pci_driver);
    295	qed_put_eth_ops();
    296}
    297
    298module_init(qede_init);
    299module_exit(qede_cleanup);
    300
    301static int qede_open(struct net_device *ndev);
    302static int qede_close(struct net_device *ndev);
    303
    304void qede_fill_by_demand_stats(struct qede_dev *edev)
    305{
    306	struct qede_stats_common *p_common = &edev->stats.common;
    307	struct qed_eth_stats stats;
    308
    309	edev->ops->get_vport_stats(edev->cdev, &stats);
    310
    311	p_common->no_buff_discards = stats.common.no_buff_discards;
    312	p_common->packet_too_big_discard = stats.common.packet_too_big_discard;
    313	p_common->ttl0_discard = stats.common.ttl0_discard;
    314	p_common->rx_ucast_bytes = stats.common.rx_ucast_bytes;
    315	p_common->rx_mcast_bytes = stats.common.rx_mcast_bytes;
    316	p_common->rx_bcast_bytes = stats.common.rx_bcast_bytes;
    317	p_common->rx_ucast_pkts = stats.common.rx_ucast_pkts;
    318	p_common->rx_mcast_pkts = stats.common.rx_mcast_pkts;
    319	p_common->rx_bcast_pkts = stats.common.rx_bcast_pkts;
    320	p_common->mftag_filter_discards = stats.common.mftag_filter_discards;
    321	p_common->mac_filter_discards = stats.common.mac_filter_discards;
    322	p_common->gft_filter_drop = stats.common.gft_filter_drop;
    323
    324	p_common->tx_ucast_bytes = stats.common.tx_ucast_bytes;
    325	p_common->tx_mcast_bytes = stats.common.tx_mcast_bytes;
    326	p_common->tx_bcast_bytes = stats.common.tx_bcast_bytes;
    327	p_common->tx_ucast_pkts = stats.common.tx_ucast_pkts;
    328	p_common->tx_mcast_pkts = stats.common.tx_mcast_pkts;
    329	p_common->tx_bcast_pkts = stats.common.tx_bcast_pkts;
    330	p_common->tx_err_drop_pkts = stats.common.tx_err_drop_pkts;
    331	p_common->coalesced_pkts = stats.common.tpa_coalesced_pkts;
    332	p_common->coalesced_events = stats.common.tpa_coalesced_events;
    333	p_common->coalesced_aborts_num = stats.common.tpa_aborts_num;
    334	p_common->non_coalesced_pkts = stats.common.tpa_not_coalesced_pkts;
    335	p_common->coalesced_bytes = stats.common.tpa_coalesced_bytes;
    336
    337	p_common->rx_64_byte_packets = stats.common.rx_64_byte_packets;
    338	p_common->rx_65_to_127_byte_packets =
    339	    stats.common.rx_65_to_127_byte_packets;
    340	p_common->rx_128_to_255_byte_packets =
    341	    stats.common.rx_128_to_255_byte_packets;
    342	p_common->rx_256_to_511_byte_packets =
    343	    stats.common.rx_256_to_511_byte_packets;
    344	p_common->rx_512_to_1023_byte_packets =
    345	    stats.common.rx_512_to_1023_byte_packets;
    346	p_common->rx_1024_to_1518_byte_packets =
    347	    stats.common.rx_1024_to_1518_byte_packets;
    348	p_common->rx_crc_errors = stats.common.rx_crc_errors;
    349	p_common->rx_mac_crtl_frames = stats.common.rx_mac_crtl_frames;
    350	p_common->rx_pause_frames = stats.common.rx_pause_frames;
    351	p_common->rx_pfc_frames = stats.common.rx_pfc_frames;
    352	p_common->rx_align_errors = stats.common.rx_align_errors;
    353	p_common->rx_carrier_errors = stats.common.rx_carrier_errors;
    354	p_common->rx_oversize_packets = stats.common.rx_oversize_packets;
    355	p_common->rx_jabbers = stats.common.rx_jabbers;
    356	p_common->rx_undersize_packets = stats.common.rx_undersize_packets;
    357	p_common->rx_fragments = stats.common.rx_fragments;
    358	p_common->tx_64_byte_packets = stats.common.tx_64_byte_packets;
    359	p_common->tx_65_to_127_byte_packets =
    360	    stats.common.tx_65_to_127_byte_packets;
    361	p_common->tx_128_to_255_byte_packets =
    362	    stats.common.tx_128_to_255_byte_packets;
    363	p_common->tx_256_to_511_byte_packets =
    364	    stats.common.tx_256_to_511_byte_packets;
    365	p_common->tx_512_to_1023_byte_packets =
    366	    stats.common.tx_512_to_1023_byte_packets;
    367	p_common->tx_1024_to_1518_byte_packets =
    368	    stats.common.tx_1024_to_1518_byte_packets;
    369	p_common->tx_pause_frames = stats.common.tx_pause_frames;
    370	p_common->tx_pfc_frames = stats.common.tx_pfc_frames;
    371	p_common->brb_truncates = stats.common.brb_truncates;
    372	p_common->brb_discards = stats.common.brb_discards;
    373	p_common->tx_mac_ctrl_frames = stats.common.tx_mac_ctrl_frames;
    374	p_common->link_change_count = stats.common.link_change_count;
    375	p_common->ptp_skip_txts = edev->ptp_skip_txts;
    376
    377	if (QEDE_IS_BB(edev)) {
    378		struct qede_stats_bb *p_bb = &edev->stats.bb;
    379
    380		p_bb->rx_1519_to_1522_byte_packets =
    381		    stats.bb.rx_1519_to_1522_byte_packets;
    382		p_bb->rx_1519_to_2047_byte_packets =
    383		    stats.bb.rx_1519_to_2047_byte_packets;
    384		p_bb->rx_2048_to_4095_byte_packets =
    385		    stats.bb.rx_2048_to_4095_byte_packets;
    386		p_bb->rx_4096_to_9216_byte_packets =
    387		    stats.bb.rx_4096_to_9216_byte_packets;
    388		p_bb->rx_9217_to_16383_byte_packets =
    389		    stats.bb.rx_9217_to_16383_byte_packets;
    390		p_bb->tx_1519_to_2047_byte_packets =
    391		    stats.bb.tx_1519_to_2047_byte_packets;
    392		p_bb->tx_2048_to_4095_byte_packets =
    393		    stats.bb.tx_2048_to_4095_byte_packets;
    394		p_bb->tx_4096_to_9216_byte_packets =
    395		    stats.bb.tx_4096_to_9216_byte_packets;
    396		p_bb->tx_9217_to_16383_byte_packets =
    397		    stats.bb.tx_9217_to_16383_byte_packets;
    398		p_bb->tx_lpi_entry_count = stats.bb.tx_lpi_entry_count;
    399		p_bb->tx_total_collisions = stats.bb.tx_total_collisions;
    400	} else {
    401		struct qede_stats_ah *p_ah = &edev->stats.ah;
    402
    403		p_ah->rx_1519_to_max_byte_packets =
    404		    stats.ah.rx_1519_to_max_byte_packets;
    405		p_ah->tx_1519_to_max_byte_packets =
    406		    stats.ah.tx_1519_to_max_byte_packets;
    407	}
    408}
    409
    410static void qede_get_stats64(struct net_device *dev,
    411			     struct rtnl_link_stats64 *stats)
    412{
    413	struct qede_dev *edev = netdev_priv(dev);
    414	struct qede_stats_common *p_common;
    415
    416	qede_fill_by_demand_stats(edev);
    417	p_common = &edev->stats.common;
    418
    419	stats->rx_packets = p_common->rx_ucast_pkts + p_common->rx_mcast_pkts +
    420			    p_common->rx_bcast_pkts;
    421	stats->tx_packets = p_common->tx_ucast_pkts + p_common->tx_mcast_pkts +
    422			    p_common->tx_bcast_pkts;
    423
    424	stats->rx_bytes = p_common->rx_ucast_bytes + p_common->rx_mcast_bytes +
    425			  p_common->rx_bcast_bytes;
    426	stats->tx_bytes = p_common->tx_ucast_bytes + p_common->tx_mcast_bytes +
    427			  p_common->tx_bcast_bytes;
    428
    429	stats->tx_errors = p_common->tx_err_drop_pkts;
    430	stats->multicast = p_common->rx_mcast_pkts + p_common->rx_bcast_pkts;
    431
    432	stats->rx_fifo_errors = p_common->no_buff_discards;
    433
    434	if (QEDE_IS_BB(edev))
    435		stats->collisions = edev->stats.bb.tx_total_collisions;
    436	stats->rx_crc_errors = p_common->rx_crc_errors;
    437	stats->rx_frame_errors = p_common->rx_align_errors;
    438}
    439
    440#ifdef CONFIG_QED_SRIOV
    441static int qede_get_vf_config(struct net_device *dev, int vfidx,
    442			      struct ifla_vf_info *ivi)
    443{
    444	struct qede_dev *edev = netdev_priv(dev);
    445
    446	if (!edev->ops)
    447		return -EINVAL;
    448
    449	return edev->ops->iov->get_config(edev->cdev, vfidx, ivi);
    450}
    451
    452static int qede_set_vf_rate(struct net_device *dev, int vfidx,
    453			    int min_tx_rate, int max_tx_rate)
    454{
    455	struct qede_dev *edev = netdev_priv(dev);
    456
    457	return edev->ops->iov->set_rate(edev->cdev, vfidx, min_tx_rate,
    458					max_tx_rate);
    459}
    460
    461static int qede_set_vf_spoofchk(struct net_device *dev, int vfidx, bool val)
    462{
    463	struct qede_dev *edev = netdev_priv(dev);
    464
    465	if (!edev->ops)
    466		return -EINVAL;
    467
    468	return edev->ops->iov->set_spoof(edev->cdev, vfidx, val);
    469}
    470
    471static int qede_set_vf_link_state(struct net_device *dev, int vfidx,
    472				  int link_state)
    473{
    474	struct qede_dev *edev = netdev_priv(dev);
    475
    476	if (!edev->ops)
    477		return -EINVAL;
    478
    479	return edev->ops->iov->set_link_state(edev->cdev, vfidx, link_state);
    480}
    481
    482static int qede_set_vf_trust(struct net_device *dev, int vfidx, bool setting)
    483{
    484	struct qede_dev *edev = netdev_priv(dev);
    485
    486	if (!edev->ops)
    487		return -EINVAL;
    488
    489	return edev->ops->iov->set_trust(edev->cdev, vfidx, setting);
    490}
    491#endif
    492
    493static int qede_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
    494{
    495	struct qede_dev *edev = netdev_priv(dev);
    496
    497	if (!netif_running(dev))
    498		return -EAGAIN;
    499
    500	switch (cmd) {
    501	case SIOCSHWTSTAMP:
    502		return qede_ptp_hw_ts(edev, ifr);
    503	default:
    504		DP_VERBOSE(edev, QED_MSG_DEBUG,
    505			   "default IOCTL cmd 0x%x\n", cmd);
    506		return -EOPNOTSUPP;
    507	}
    508
    509	return 0;
    510}
    511
    512static void qede_fp_sb_dump(struct qede_dev *edev, struct qede_fastpath *fp)
    513{
    514	char *p_sb = (char *)fp->sb_info->sb_virt;
    515	u32 sb_size, i;
    516
    517	sb_size = sizeof(struct status_block);
    518
    519	for (i = 0; i < sb_size; i += 8)
    520		DP_NOTICE(edev,
    521			  "%02hhX %02hhX %02hhX %02hhX  %02hhX %02hhX %02hhX %02hhX\n",
    522			  p_sb[i], p_sb[i + 1], p_sb[i + 2], p_sb[i + 3],
    523			  p_sb[i + 4], p_sb[i + 5], p_sb[i + 6], p_sb[i + 7]);
    524}
    525
    526static void
    527qede_txq_fp_log_metadata(struct qede_dev *edev,
    528			 struct qede_fastpath *fp, struct qede_tx_queue *txq)
    529{
    530	struct qed_chain *p_chain = &txq->tx_pbl;
    531
    532	/* Dump txq/fp/sb ids etc. other metadata */
    533	DP_NOTICE(edev,
    534		  "fpid 0x%x sbid 0x%x txqid [0x%x] ndev_qid [0x%x] cos [0x%x] p_chain %p cap %d size %d jiffies %lu HZ 0x%x\n",
    535		  fp->id, fp->sb_info->igu_sb_id, txq->index, txq->ndev_txq_id, txq->cos,
    536		  p_chain, p_chain->capacity, p_chain->size, jiffies, HZ);
    537
    538	/* Dump all the relevant prod/cons indexes */
    539	DP_NOTICE(edev,
    540		  "hw cons %04x sw_tx_prod=0x%x, sw_tx_cons=0x%x, bd_prod 0x%x bd_cons 0x%x\n",
    541		  le16_to_cpu(*txq->hw_cons_ptr), txq->sw_tx_prod, txq->sw_tx_cons,
    542		  qed_chain_get_prod_idx(p_chain), qed_chain_get_cons_idx(p_chain));
    543}
    544
    545static void
    546qede_tx_log_print(struct qede_dev *edev, struct qede_fastpath *fp, struct qede_tx_queue *txq)
    547{
    548	struct qed_sb_info_dbg sb_dbg;
    549	int rc;
    550
    551	/* sb info */
    552	qede_fp_sb_dump(edev, fp);
    553
    554	memset(&sb_dbg, 0, sizeof(sb_dbg));
    555	rc = edev->ops->common->get_sb_info(edev->cdev, fp->sb_info, (u16)fp->id, &sb_dbg);
    556
    557	DP_NOTICE(edev, "IGU: prod %08x cons %08x CAU Tx %04x\n",
    558		  sb_dbg.igu_prod, sb_dbg.igu_cons, sb_dbg.pi[TX_PI(txq->cos)]);
    559
    560	/* report to mfw */
    561	edev->ops->common->mfw_report(edev->cdev,
    562				      "Txq[%d]: FW cons [host] %04x, SW cons %04x, SW prod %04x [Jiffies %lu]\n",
    563				      txq->index, le16_to_cpu(*txq->hw_cons_ptr),
    564				      qed_chain_get_cons_idx(&txq->tx_pbl),
    565				      qed_chain_get_prod_idx(&txq->tx_pbl), jiffies);
    566	if (!rc)
    567		edev->ops->common->mfw_report(edev->cdev,
    568					      "Txq[%d]: SB[0x%04x] - IGU: prod %08x cons %08x CAU Tx %04x\n",
    569					      txq->index, fp->sb_info->igu_sb_id,
    570					      sb_dbg.igu_prod, sb_dbg.igu_cons,
    571					      sb_dbg.pi[TX_PI(txq->cos)]);
    572}
    573
    574static void qede_tx_timeout(struct net_device *dev, unsigned int txqueue)
    575{
    576	struct qede_dev *edev = netdev_priv(dev);
    577	int i;
    578
    579	netif_carrier_off(dev);
    580	DP_NOTICE(edev, "TX timeout on queue %u!\n", txqueue);
    581
    582	for_each_queue(i) {
    583		struct qede_tx_queue *txq;
    584		struct qede_fastpath *fp;
    585		int cos;
    586
    587		fp = &edev->fp_array[i];
    588		if (!(fp->type & QEDE_FASTPATH_TX))
    589			continue;
    590
    591		for_each_cos_in_txq(edev, cos) {
    592			txq = &fp->txq[cos];
    593
    594			/* Dump basic metadata for all queues */
    595			qede_txq_fp_log_metadata(edev, fp, txq);
    596
    597			if (qed_chain_get_cons_idx(&txq->tx_pbl) !=
    598			    qed_chain_get_prod_idx(&txq->tx_pbl))
    599				qede_tx_log_print(edev, fp, txq);
    600		}
    601	}
    602
    603	if (IS_VF(edev))
    604		return;
    605
    606	if (test_and_set_bit(QEDE_ERR_IS_HANDLED, &edev->err_flags) ||
    607	    edev->state == QEDE_STATE_RECOVERY) {
    608		DP_INFO(edev,
    609			"Avoid handling a Tx timeout while another HW error is being handled\n");
    610		return;
    611	}
    612
    613	set_bit(QEDE_ERR_GET_DBG_INFO, &edev->err_flags);
    614	set_bit(QEDE_SP_HW_ERR, &edev->sp_flags);
    615	schedule_delayed_work(&edev->sp_task, 0);
    616}
    617
    618static int qede_setup_tc(struct net_device *ndev, u8 num_tc)
    619{
    620	struct qede_dev *edev = netdev_priv(ndev);
    621	int cos, count, offset;
    622
    623	if (num_tc > edev->dev_info.num_tc)
    624		return -EINVAL;
    625
    626	netdev_reset_tc(ndev);
    627	netdev_set_num_tc(ndev, num_tc);
    628
    629	for_each_cos_in_txq(edev, cos) {
    630		count = QEDE_TSS_COUNT(edev);
    631		offset = cos * QEDE_TSS_COUNT(edev);
    632		netdev_set_tc_queue(ndev, cos, count, offset);
    633	}
    634
    635	return 0;
    636}
    637
    638static int
    639qede_set_flower(struct qede_dev *edev, struct flow_cls_offload *f,
    640		__be16 proto)
    641{
    642	switch (f->command) {
    643	case FLOW_CLS_REPLACE:
    644		return qede_add_tc_flower_fltr(edev, proto, f);
    645	case FLOW_CLS_DESTROY:
    646		return qede_delete_flow_filter(edev, f->cookie);
    647	default:
    648		return -EOPNOTSUPP;
    649	}
    650}
    651
    652static int qede_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
    653				  void *cb_priv)
    654{
    655	struct flow_cls_offload *f;
    656	struct qede_dev *edev = cb_priv;
    657
    658	if (!tc_cls_can_offload_and_chain0(edev->ndev, type_data))
    659		return -EOPNOTSUPP;
    660
    661	switch (type) {
    662	case TC_SETUP_CLSFLOWER:
    663		f = type_data;
    664		return qede_set_flower(edev, f, f->common.protocol);
    665	default:
    666		return -EOPNOTSUPP;
    667	}
    668}
    669
    670static LIST_HEAD(qede_block_cb_list);
    671
    672static int
    673qede_setup_tc_offload(struct net_device *dev, enum tc_setup_type type,
    674		      void *type_data)
    675{
    676	struct qede_dev *edev = netdev_priv(dev);
    677	struct tc_mqprio_qopt *mqprio;
    678
    679	switch (type) {
    680	case TC_SETUP_BLOCK:
    681		return flow_block_cb_setup_simple(type_data,
    682						  &qede_block_cb_list,
    683						  qede_setup_tc_block_cb,
    684						  edev, edev, true);
    685	case TC_SETUP_QDISC_MQPRIO:
    686		mqprio = type_data;
    687
    688		mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
    689		return qede_setup_tc(dev, mqprio->num_tc);
    690	default:
    691		return -EOPNOTSUPP;
    692	}
    693}
    694
    695static const struct net_device_ops qede_netdev_ops = {
    696	.ndo_open		= qede_open,
    697	.ndo_stop		= qede_close,
    698	.ndo_start_xmit		= qede_start_xmit,
    699	.ndo_select_queue	= qede_select_queue,
    700	.ndo_set_rx_mode	= qede_set_rx_mode,
    701	.ndo_set_mac_address	= qede_set_mac_addr,
    702	.ndo_validate_addr	= eth_validate_addr,
    703	.ndo_change_mtu		= qede_change_mtu,
    704	.ndo_eth_ioctl		= qede_ioctl,
    705	.ndo_tx_timeout		= qede_tx_timeout,
    706#ifdef CONFIG_QED_SRIOV
    707	.ndo_set_vf_mac		= qede_set_vf_mac,
    708	.ndo_set_vf_vlan	= qede_set_vf_vlan,
    709	.ndo_set_vf_trust	= qede_set_vf_trust,
    710#endif
    711	.ndo_vlan_rx_add_vid	= qede_vlan_rx_add_vid,
    712	.ndo_vlan_rx_kill_vid	= qede_vlan_rx_kill_vid,
    713	.ndo_fix_features	= qede_fix_features,
    714	.ndo_set_features	= qede_set_features,
    715	.ndo_get_stats64	= qede_get_stats64,
    716#ifdef CONFIG_QED_SRIOV
    717	.ndo_set_vf_link_state	= qede_set_vf_link_state,
    718	.ndo_set_vf_spoofchk	= qede_set_vf_spoofchk,
    719	.ndo_get_vf_config	= qede_get_vf_config,
    720	.ndo_set_vf_rate	= qede_set_vf_rate,
    721#endif
    722	.ndo_features_check	= qede_features_check,
    723	.ndo_bpf		= qede_xdp,
    724#ifdef CONFIG_RFS_ACCEL
    725	.ndo_rx_flow_steer	= qede_rx_flow_steer,
    726#endif
    727	.ndo_xdp_xmit		= qede_xdp_transmit,
    728	.ndo_setup_tc		= qede_setup_tc_offload,
    729};
    730
    731static const struct net_device_ops qede_netdev_vf_ops = {
    732	.ndo_open		= qede_open,
    733	.ndo_stop		= qede_close,
    734	.ndo_start_xmit		= qede_start_xmit,
    735	.ndo_select_queue	= qede_select_queue,
    736	.ndo_set_rx_mode	= qede_set_rx_mode,
    737	.ndo_set_mac_address	= qede_set_mac_addr,
    738	.ndo_validate_addr	= eth_validate_addr,
    739	.ndo_change_mtu		= qede_change_mtu,
    740	.ndo_vlan_rx_add_vid	= qede_vlan_rx_add_vid,
    741	.ndo_vlan_rx_kill_vid	= qede_vlan_rx_kill_vid,
    742	.ndo_fix_features	= qede_fix_features,
    743	.ndo_set_features	= qede_set_features,
    744	.ndo_get_stats64	= qede_get_stats64,
    745	.ndo_features_check	= qede_features_check,
    746};
    747
    748static const struct net_device_ops qede_netdev_vf_xdp_ops = {
    749	.ndo_open		= qede_open,
    750	.ndo_stop		= qede_close,
    751	.ndo_start_xmit		= qede_start_xmit,
    752	.ndo_select_queue	= qede_select_queue,
    753	.ndo_set_rx_mode	= qede_set_rx_mode,
    754	.ndo_set_mac_address	= qede_set_mac_addr,
    755	.ndo_validate_addr	= eth_validate_addr,
    756	.ndo_change_mtu		= qede_change_mtu,
    757	.ndo_vlan_rx_add_vid	= qede_vlan_rx_add_vid,
    758	.ndo_vlan_rx_kill_vid	= qede_vlan_rx_kill_vid,
    759	.ndo_fix_features	= qede_fix_features,
    760	.ndo_set_features	= qede_set_features,
    761	.ndo_get_stats64	= qede_get_stats64,
    762	.ndo_features_check	= qede_features_check,
    763	.ndo_bpf		= qede_xdp,
    764	.ndo_xdp_xmit		= qede_xdp_transmit,
    765};
    766
    767/* -------------------------------------------------------------------------
    768 * START OF PROBE / REMOVE
    769 * -------------------------------------------------------------------------
    770 */
    771
    772static struct qede_dev *qede_alloc_etherdev(struct qed_dev *cdev,
    773					    struct pci_dev *pdev,
    774					    struct qed_dev_eth_info *info,
    775					    u32 dp_module, u8 dp_level)
    776{
    777	struct net_device *ndev;
    778	struct qede_dev *edev;
    779
    780	ndev = alloc_etherdev_mqs(sizeof(*edev),
    781				  info->num_queues * info->num_tc,
    782				  info->num_queues);
    783	if (!ndev) {
    784		pr_err("etherdev allocation failed\n");
    785		return NULL;
    786	}
    787
    788	edev = netdev_priv(ndev);
    789	edev->ndev = ndev;
    790	edev->cdev = cdev;
    791	edev->pdev = pdev;
    792	edev->dp_module = dp_module;
    793	edev->dp_level = dp_level;
    794	edev->ops = qed_ops;
    795
    796	if (is_kdump_kernel()) {
    797		edev->q_num_rx_buffers = NUM_RX_BDS_KDUMP_MIN;
    798		edev->q_num_tx_buffers = NUM_TX_BDS_KDUMP_MIN;
    799	} else {
    800		edev->q_num_rx_buffers = NUM_RX_BDS_DEF;
    801		edev->q_num_tx_buffers = NUM_TX_BDS_DEF;
    802	}
    803
    804	DP_INFO(edev, "Allocated netdev with %d tx queues and %d rx queues\n",
    805		info->num_queues, info->num_queues);
    806
    807	SET_NETDEV_DEV(ndev, &pdev->dev);
    808
    809	memset(&edev->stats, 0, sizeof(edev->stats));
    810	memcpy(&edev->dev_info, info, sizeof(*info));
    811
    812	/* As ethtool doesn't have the ability to show WoL behavior as
    813	 * 'default', if device supports it declare it's enabled.
    814	 */
    815	if (edev->dev_info.common.wol_support)
    816		edev->wol_enabled = true;
    817
    818	INIT_LIST_HEAD(&edev->vlan_list);
    819
    820	return edev;
    821}
    822
    823static void qede_init_ndev(struct qede_dev *edev)
    824{
    825	struct net_device *ndev = edev->ndev;
    826	struct pci_dev *pdev = edev->pdev;
    827	bool udp_tunnel_enable = false;
    828	netdev_features_t hw_features;
    829
    830	pci_set_drvdata(pdev, ndev);
    831
    832	ndev->mem_start = edev->dev_info.common.pci_mem_start;
    833	ndev->base_addr = ndev->mem_start;
    834	ndev->mem_end = edev->dev_info.common.pci_mem_end;
    835	ndev->irq = edev->dev_info.common.pci_irq;
    836
    837	ndev->watchdog_timeo = TX_TIMEOUT;
    838
    839	if (IS_VF(edev)) {
    840		if (edev->dev_info.xdp_supported)
    841			ndev->netdev_ops = &qede_netdev_vf_xdp_ops;
    842		else
    843			ndev->netdev_ops = &qede_netdev_vf_ops;
    844	} else {
    845		ndev->netdev_ops = &qede_netdev_ops;
    846	}
    847
    848	qede_set_ethtool_ops(ndev);
    849
    850	ndev->priv_flags |= IFF_UNICAST_FLT;
    851
    852	/* user-changeble features */
    853	hw_features = NETIF_F_GRO | NETIF_F_GRO_HW | NETIF_F_SG |
    854		      NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
    855		      NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_HW_TC;
    856
    857	if (edev->dev_info.common.b_arfs_capable)
    858		hw_features |= NETIF_F_NTUPLE;
    859
    860	if (edev->dev_info.common.vxlan_enable ||
    861	    edev->dev_info.common.geneve_enable)
    862		udp_tunnel_enable = true;
    863
    864	if (udp_tunnel_enable || edev->dev_info.common.gre_enable) {
    865		hw_features |= NETIF_F_TSO_ECN;
    866		ndev->hw_enc_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
    867					NETIF_F_SG | NETIF_F_TSO |
    868					NETIF_F_TSO_ECN | NETIF_F_TSO6 |
    869					NETIF_F_RXCSUM;
    870	}
    871
    872	if (udp_tunnel_enable) {
    873		hw_features |= (NETIF_F_GSO_UDP_TUNNEL |
    874				NETIF_F_GSO_UDP_TUNNEL_CSUM);
    875		ndev->hw_enc_features |= (NETIF_F_GSO_UDP_TUNNEL |
    876					  NETIF_F_GSO_UDP_TUNNEL_CSUM);
    877
    878		qede_set_udp_tunnels(edev);
    879	}
    880
    881	if (edev->dev_info.common.gre_enable) {
    882		hw_features |= (NETIF_F_GSO_GRE | NETIF_F_GSO_GRE_CSUM);
    883		ndev->hw_enc_features |= (NETIF_F_GSO_GRE |
    884					  NETIF_F_GSO_GRE_CSUM);
    885	}
    886
    887	ndev->vlan_features = hw_features | NETIF_F_RXHASH | NETIF_F_RXCSUM |
    888			      NETIF_F_HIGHDMA;
    889	ndev->features = hw_features | NETIF_F_RXHASH | NETIF_F_RXCSUM |
    890			 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HIGHDMA |
    891			 NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_CTAG_TX;
    892
    893	ndev->hw_features = hw_features;
    894
    895	/* MTU range: 46 - 9600 */
    896	ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
    897	ndev->max_mtu = QEDE_MAX_JUMBO_PACKET_SIZE;
    898
    899	/* Set network device HW mac */
    900	eth_hw_addr_set(edev->ndev, edev->dev_info.common.hw_mac);
    901
    902	ndev->mtu = edev->dev_info.common.mtu;
    903}
    904
    905/* This function converts from 32b param to two params of level and module
    906 * Input 32b decoding:
    907 * b31 - enable all NOTICE prints. NOTICE prints are for deviation from the
    908 * 'happy' flow, e.g. memory allocation failed.
    909 * b30 - enable all INFO prints. INFO prints are for major steps in the flow
    910 * and provide important parameters.
    911 * b29-b0 - per-module bitmap, where each bit enables VERBOSE prints of that
    912 * module. VERBOSE prints are for tracking the specific flow in low level.
    913 *
    914 * Notice that the level should be that of the lowest required logs.
    915 */
    916void qede_config_debug(uint debug, u32 *p_dp_module, u8 *p_dp_level)
    917{
    918	*p_dp_level = QED_LEVEL_NOTICE;
    919	*p_dp_module = 0;
    920
    921	if (debug & QED_LOG_VERBOSE_MASK) {
    922		*p_dp_level = QED_LEVEL_VERBOSE;
    923		*p_dp_module = (debug & 0x3FFFFFFF);
    924	} else if (debug & QED_LOG_INFO_MASK) {
    925		*p_dp_level = QED_LEVEL_INFO;
    926	} else if (debug & QED_LOG_NOTICE_MASK) {
    927		*p_dp_level = QED_LEVEL_NOTICE;
    928	}
    929}
    930
    931static void qede_free_fp_array(struct qede_dev *edev)
    932{
    933	if (edev->fp_array) {
    934		struct qede_fastpath *fp;
    935		int i;
    936
    937		for_each_queue(i) {
    938			fp = &edev->fp_array[i];
    939
    940			kfree(fp->sb_info);
    941			/* Handle mem alloc failure case where qede_init_fp
    942			 * didn't register xdp_rxq_info yet.
    943			 * Implicit only (fp->type & QEDE_FASTPATH_RX)
    944			 */
    945			if (fp->rxq && xdp_rxq_info_is_reg(&fp->rxq->xdp_rxq))
    946				xdp_rxq_info_unreg(&fp->rxq->xdp_rxq);
    947			kfree(fp->rxq);
    948			kfree(fp->xdp_tx);
    949			kfree(fp->txq);
    950		}
    951		kfree(edev->fp_array);
    952	}
    953
    954	edev->num_queues = 0;
    955	edev->fp_num_tx = 0;
    956	edev->fp_num_rx = 0;
    957}
    958
    959static int qede_alloc_fp_array(struct qede_dev *edev)
    960{
    961	u8 fp_combined, fp_rx = edev->fp_num_rx;
    962	struct qede_fastpath *fp;
    963	void *mem;
    964	int i;
    965
    966	edev->fp_array = kcalloc(QEDE_QUEUE_CNT(edev),
    967				 sizeof(*edev->fp_array), GFP_KERNEL);
    968	if (!edev->fp_array) {
    969		DP_NOTICE(edev, "fp array allocation failed\n");
    970		goto err;
    971	}
    972
    973	mem = krealloc(edev->coal_entry, QEDE_QUEUE_CNT(edev) *
    974		       sizeof(*edev->coal_entry), GFP_KERNEL);
    975	if (!mem) {
    976		DP_ERR(edev, "coalesce entry allocation failed\n");
    977		kfree(edev->coal_entry);
    978		goto err;
    979	}
    980	edev->coal_entry = mem;
    981
    982	fp_combined = QEDE_QUEUE_CNT(edev) - fp_rx - edev->fp_num_tx;
    983
    984	/* Allocate the FP elements for Rx queues followed by combined and then
    985	 * the Tx. This ordering should be maintained so that the respective
    986	 * queues (Rx or Tx) will be together in the fastpath array and the
    987	 * associated ids will be sequential.
    988	 */
    989	for_each_queue(i) {
    990		fp = &edev->fp_array[i];
    991
    992		fp->sb_info = kzalloc(sizeof(*fp->sb_info), GFP_KERNEL);
    993		if (!fp->sb_info) {
    994			DP_NOTICE(edev, "sb info struct allocation failed\n");
    995			goto err;
    996		}
    997
    998		if (fp_rx) {
    999			fp->type = QEDE_FASTPATH_RX;
   1000			fp_rx--;
   1001		} else if (fp_combined) {
   1002			fp->type = QEDE_FASTPATH_COMBINED;
   1003			fp_combined--;
   1004		} else {
   1005			fp->type = QEDE_FASTPATH_TX;
   1006		}
   1007
   1008		if (fp->type & QEDE_FASTPATH_TX) {
   1009			fp->txq = kcalloc(edev->dev_info.num_tc,
   1010					  sizeof(*fp->txq), GFP_KERNEL);
   1011			if (!fp->txq)
   1012				goto err;
   1013		}
   1014
   1015		if (fp->type & QEDE_FASTPATH_RX) {
   1016			fp->rxq = kzalloc(sizeof(*fp->rxq), GFP_KERNEL);
   1017			if (!fp->rxq)
   1018				goto err;
   1019
   1020			if (edev->xdp_prog) {
   1021				fp->xdp_tx = kzalloc(sizeof(*fp->xdp_tx),
   1022						     GFP_KERNEL);
   1023				if (!fp->xdp_tx)
   1024					goto err;
   1025				fp->type |= QEDE_FASTPATH_XDP;
   1026			}
   1027		}
   1028	}
   1029
   1030	return 0;
   1031err:
   1032	qede_free_fp_array(edev);
   1033	return -ENOMEM;
   1034}
   1035
   1036/* The qede lock is used to protect driver state change and driver flows that
   1037 * are not reentrant.
   1038 */
   1039void __qede_lock(struct qede_dev *edev)
   1040{
   1041	mutex_lock(&edev->qede_lock);
   1042}
   1043
   1044void __qede_unlock(struct qede_dev *edev)
   1045{
   1046	mutex_unlock(&edev->qede_lock);
   1047}
   1048
   1049/* This version of the lock should be used when acquiring the RTNL lock is also
   1050 * needed in addition to the internal qede lock.
   1051 */
   1052static void qede_lock(struct qede_dev *edev)
   1053{
   1054	rtnl_lock();
   1055	__qede_lock(edev);
   1056}
   1057
   1058static void qede_unlock(struct qede_dev *edev)
   1059{
   1060	__qede_unlock(edev);
   1061	rtnl_unlock();
   1062}
   1063
   1064static void qede_sp_task(struct work_struct *work)
   1065{
   1066	struct qede_dev *edev = container_of(work, struct qede_dev,
   1067					     sp_task.work);
   1068
   1069	/* Disable execution of this deferred work once
   1070	 * qede removal is in progress, this stop any future
   1071	 * scheduling of sp_task.
   1072	 */
   1073	if (test_bit(QEDE_SP_DISABLE, &edev->sp_flags))
   1074		return;
   1075
   1076	/* The locking scheme depends on the specific flag:
   1077	 * In case of QEDE_SP_RECOVERY, acquiring the RTNL lock is required to
   1078	 * ensure that ongoing flows are ended and new ones are not started.
   1079	 * In other cases - only the internal qede lock should be acquired.
   1080	 */
   1081
   1082	if (test_and_clear_bit(QEDE_SP_RECOVERY, &edev->sp_flags)) {
   1083#ifdef CONFIG_QED_SRIOV
   1084		/* SRIOV must be disabled outside the lock to avoid a deadlock.
   1085		 * The recovery of the active VFs is currently not supported.
   1086		 */
   1087		if (pci_num_vf(edev->pdev))
   1088			qede_sriov_configure(edev->pdev, 0);
   1089#endif
   1090		qede_lock(edev);
   1091		qede_recovery_handler(edev);
   1092		qede_unlock(edev);
   1093	}
   1094
   1095	__qede_lock(edev);
   1096
   1097	if (test_and_clear_bit(QEDE_SP_RX_MODE, &edev->sp_flags))
   1098		if (edev->state == QEDE_STATE_OPEN)
   1099			qede_config_rx_mode(edev->ndev);
   1100
   1101#ifdef CONFIG_RFS_ACCEL
   1102	if (test_and_clear_bit(QEDE_SP_ARFS_CONFIG, &edev->sp_flags)) {
   1103		if (edev->state == QEDE_STATE_OPEN)
   1104			qede_process_arfs_filters(edev, false);
   1105	}
   1106#endif
   1107	if (test_and_clear_bit(QEDE_SP_HW_ERR, &edev->sp_flags))
   1108		qede_generic_hw_err_handler(edev);
   1109	__qede_unlock(edev);
   1110
   1111	if (test_and_clear_bit(QEDE_SP_AER, &edev->sp_flags)) {
   1112#ifdef CONFIG_QED_SRIOV
   1113		/* SRIOV must be disabled outside the lock to avoid a deadlock.
   1114		 * The recovery of the active VFs is currently not supported.
   1115		 */
   1116		if (pci_num_vf(edev->pdev))
   1117			qede_sriov_configure(edev->pdev, 0);
   1118#endif
   1119		edev->ops->common->recovery_process(edev->cdev);
   1120	}
   1121}
   1122
   1123static void qede_update_pf_params(struct qed_dev *cdev)
   1124{
   1125	struct qed_pf_params pf_params;
   1126	u16 num_cons;
   1127
   1128	/* 64 rx + 64 tx + 64 XDP */
   1129	memset(&pf_params, 0, sizeof(struct qed_pf_params));
   1130
   1131	/* 1 rx + 1 xdp + max tx cos */
   1132	num_cons = QED_MIN_L2_CONS;
   1133
   1134	pf_params.eth_pf_params.num_cons = (MAX_SB_PER_PF_MIMD - 1) * num_cons;
   1135
   1136	/* Same for VFs - make sure they'll have sufficient connections
   1137	 * to support XDP Tx queues.
   1138	 */
   1139	pf_params.eth_pf_params.num_vf_cons = 48;
   1140
   1141	pf_params.eth_pf_params.num_arfs_filters = QEDE_RFS_MAX_FLTR;
   1142	qed_ops->common->update_pf_params(cdev, &pf_params);
   1143}
   1144
   1145#define QEDE_FW_VER_STR_SIZE	80
   1146
   1147static void qede_log_probe(struct qede_dev *edev)
   1148{
   1149	struct qed_dev_info *p_dev_info = &edev->dev_info.common;
   1150	u8 buf[QEDE_FW_VER_STR_SIZE];
   1151	size_t left_size;
   1152
   1153	snprintf(buf, QEDE_FW_VER_STR_SIZE,
   1154		 "Storm FW %d.%d.%d.%d, Management FW %d.%d.%d.%d",
   1155		 p_dev_info->fw_major, p_dev_info->fw_minor, p_dev_info->fw_rev,
   1156		 p_dev_info->fw_eng,
   1157		 (p_dev_info->mfw_rev & QED_MFW_VERSION_3_MASK) >>
   1158		 QED_MFW_VERSION_3_OFFSET,
   1159		 (p_dev_info->mfw_rev & QED_MFW_VERSION_2_MASK) >>
   1160		 QED_MFW_VERSION_2_OFFSET,
   1161		 (p_dev_info->mfw_rev & QED_MFW_VERSION_1_MASK) >>
   1162		 QED_MFW_VERSION_1_OFFSET,
   1163		 (p_dev_info->mfw_rev & QED_MFW_VERSION_0_MASK) >>
   1164		 QED_MFW_VERSION_0_OFFSET);
   1165
   1166	left_size = QEDE_FW_VER_STR_SIZE - strlen(buf);
   1167	if (p_dev_info->mbi_version && left_size)
   1168		snprintf(buf + strlen(buf), left_size,
   1169			 " [MBI %d.%d.%d]",
   1170			 (p_dev_info->mbi_version & QED_MBI_VERSION_2_MASK) >>
   1171			 QED_MBI_VERSION_2_OFFSET,
   1172			 (p_dev_info->mbi_version & QED_MBI_VERSION_1_MASK) >>
   1173			 QED_MBI_VERSION_1_OFFSET,
   1174			 (p_dev_info->mbi_version & QED_MBI_VERSION_0_MASK) >>
   1175			 QED_MBI_VERSION_0_OFFSET);
   1176
   1177	pr_info("qede %02x:%02x.%02x: %s [%s]\n", edev->pdev->bus->number,
   1178		PCI_SLOT(edev->pdev->devfn), PCI_FUNC(edev->pdev->devfn),
   1179		buf, edev->ndev->name);
   1180}
   1181
   1182enum qede_probe_mode {
   1183	QEDE_PROBE_NORMAL,
   1184	QEDE_PROBE_RECOVERY,
   1185};
   1186
   1187static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level,
   1188			bool is_vf, enum qede_probe_mode mode)
   1189{
   1190	struct qed_probe_params probe_params;
   1191	struct qed_slowpath_params sp_params;
   1192	struct qed_dev_eth_info dev_info;
   1193	struct qede_dev *edev;
   1194	struct qed_dev *cdev;
   1195	int rc;
   1196
   1197	if (unlikely(dp_level & QED_LEVEL_INFO))
   1198		pr_notice("Starting qede probe\n");
   1199
   1200	memset(&probe_params, 0, sizeof(probe_params));
   1201	probe_params.protocol = QED_PROTOCOL_ETH;
   1202	probe_params.dp_module = dp_module;
   1203	probe_params.dp_level = dp_level;
   1204	probe_params.is_vf = is_vf;
   1205	probe_params.recov_in_prog = (mode == QEDE_PROBE_RECOVERY);
   1206	cdev = qed_ops->common->probe(pdev, &probe_params);
   1207	if (!cdev) {
   1208		rc = -ENODEV;
   1209		goto err0;
   1210	}
   1211
   1212	qede_update_pf_params(cdev);
   1213
   1214	/* Start the Slowpath-process */
   1215	memset(&sp_params, 0, sizeof(sp_params));
   1216	sp_params.int_mode = QED_INT_MODE_MSIX;
   1217	strlcpy(sp_params.name, "qede LAN", QED_DRV_VER_STR_SIZE);
   1218	rc = qed_ops->common->slowpath_start(cdev, &sp_params);
   1219	if (rc) {
   1220		pr_notice("Cannot start slowpath\n");
   1221		goto err1;
   1222	}
   1223
   1224	/* Learn information crucial for qede to progress */
   1225	rc = qed_ops->fill_dev_info(cdev, &dev_info);
   1226	if (rc)
   1227		goto err2;
   1228
   1229	if (mode != QEDE_PROBE_RECOVERY) {
   1230		edev = qede_alloc_etherdev(cdev, pdev, &dev_info, dp_module,
   1231					   dp_level);
   1232		if (!edev) {
   1233			rc = -ENOMEM;
   1234			goto err2;
   1235		}
   1236
   1237		edev->devlink = qed_ops->common->devlink_register(cdev);
   1238		if (IS_ERR(edev->devlink)) {
   1239			DP_NOTICE(edev, "Cannot register devlink\n");
   1240			rc = PTR_ERR(edev->devlink);
   1241			edev->devlink = NULL;
   1242			goto err3;
   1243		}
   1244	} else {
   1245		struct net_device *ndev = pci_get_drvdata(pdev);
   1246		struct qed_devlink *qdl;
   1247
   1248		edev = netdev_priv(ndev);
   1249		qdl = devlink_priv(edev->devlink);
   1250		qdl->cdev = cdev;
   1251		edev->cdev = cdev;
   1252		memset(&edev->stats, 0, sizeof(edev->stats));
   1253		memcpy(&edev->dev_info, &dev_info, sizeof(dev_info));
   1254	}
   1255
   1256	if (is_vf)
   1257		set_bit(QEDE_FLAGS_IS_VF, &edev->flags);
   1258
   1259	qede_init_ndev(edev);
   1260
   1261	rc = qede_rdma_dev_add(edev, (mode == QEDE_PROBE_RECOVERY));
   1262	if (rc)
   1263		goto err3;
   1264
   1265	if (mode != QEDE_PROBE_RECOVERY) {
   1266		/* Prepare the lock prior to the registration of the netdev,
   1267		 * as once it's registered we might reach flows requiring it
   1268		 * [it's even possible to reach a flow needing it directly
   1269		 * from there, although it's unlikely].
   1270		 */
   1271		INIT_DELAYED_WORK(&edev->sp_task, qede_sp_task);
   1272		mutex_init(&edev->qede_lock);
   1273
   1274		rc = register_netdev(edev->ndev);
   1275		if (rc) {
   1276			DP_NOTICE(edev, "Cannot register net-device\n");
   1277			goto err4;
   1278		}
   1279	}
   1280
   1281	edev->ops->common->set_name(cdev, edev->ndev->name);
   1282
   1283	/* PTP not supported on VFs */
   1284	if (!is_vf)
   1285		qede_ptp_enable(edev);
   1286
   1287	edev->ops->register_ops(cdev, &qede_ll_ops, edev);
   1288
   1289#ifdef CONFIG_DCB
   1290	if (!IS_VF(edev))
   1291		qede_set_dcbnl_ops(edev->ndev);
   1292#endif
   1293
   1294	edev->rx_copybreak = QEDE_RX_HDR_SIZE;
   1295
   1296	qede_log_probe(edev);
   1297	return 0;
   1298
   1299err4:
   1300	qede_rdma_dev_remove(edev, (mode == QEDE_PROBE_RECOVERY));
   1301err3:
   1302	if (mode != QEDE_PROBE_RECOVERY)
   1303		free_netdev(edev->ndev);
   1304	else
   1305		edev->cdev = NULL;
   1306err2:
   1307	qed_ops->common->slowpath_stop(cdev);
   1308err1:
   1309	qed_ops->common->remove(cdev);
   1310err0:
   1311	return rc;
   1312}
   1313
   1314static int qede_probe(struct pci_dev *pdev, const struct pci_device_id *id)
   1315{
   1316	bool is_vf = false;
   1317	u32 dp_module = 0;
   1318	u8 dp_level = 0;
   1319
   1320	switch ((enum qede_pci_private)id->driver_data) {
   1321	case QEDE_PRIVATE_VF:
   1322		if (debug & QED_LOG_VERBOSE_MASK)
   1323			dev_err(&pdev->dev, "Probing a VF\n");
   1324		is_vf = true;
   1325		break;
   1326	default:
   1327		if (debug & QED_LOG_VERBOSE_MASK)
   1328			dev_err(&pdev->dev, "Probing a PF\n");
   1329	}
   1330
   1331	qede_config_debug(debug, &dp_module, &dp_level);
   1332
   1333	return __qede_probe(pdev, dp_module, dp_level, is_vf,
   1334			    QEDE_PROBE_NORMAL);
   1335}
   1336
   1337enum qede_remove_mode {
   1338	QEDE_REMOVE_NORMAL,
   1339	QEDE_REMOVE_RECOVERY,
   1340};
   1341
   1342static void __qede_remove(struct pci_dev *pdev, enum qede_remove_mode mode)
   1343{
   1344	struct net_device *ndev = pci_get_drvdata(pdev);
   1345	struct qede_dev *edev;
   1346	struct qed_dev *cdev;
   1347
   1348	if (!ndev) {
   1349		dev_info(&pdev->dev, "Device has already been removed\n");
   1350		return;
   1351	}
   1352
   1353	edev = netdev_priv(ndev);
   1354	cdev = edev->cdev;
   1355
   1356	DP_INFO(edev, "Starting qede_remove\n");
   1357
   1358	qede_rdma_dev_remove(edev, (mode == QEDE_REMOVE_RECOVERY));
   1359
   1360	if (mode != QEDE_REMOVE_RECOVERY) {
   1361		set_bit(QEDE_SP_DISABLE, &edev->sp_flags);
   1362		unregister_netdev(ndev);
   1363
   1364		cancel_delayed_work_sync(&edev->sp_task);
   1365
   1366		edev->ops->common->set_power_state(cdev, PCI_D0);
   1367
   1368		pci_set_drvdata(pdev, NULL);
   1369	}
   1370
   1371	qede_ptp_disable(edev);
   1372
   1373	/* Use global ops since we've freed edev */
   1374	qed_ops->common->slowpath_stop(cdev);
   1375	if (system_state == SYSTEM_POWER_OFF)
   1376		return;
   1377
   1378	if (mode != QEDE_REMOVE_RECOVERY && edev->devlink) {
   1379		qed_ops->common->devlink_unregister(edev->devlink);
   1380		edev->devlink = NULL;
   1381	}
   1382	qed_ops->common->remove(cdev);
   1383	edev->cdev = NULL;
   1384
   1385	/* Since this can happen out-of-sync with other flows,
   1386	 * don't release the netdevice until after slowpath stop
   1387	 * has been called to guarantee various other contexts
   1388	 * [e.g., QED register callbacks] won't break anything when
   1389	 * accessing the netdevice.
   1390	 */
   1391	if (mode != QEDE_REMOVE_RECOVERY) {
   1392		kfree(edev->coal_entry);
   1393		free_netdev(ndev);
   1394	}
   1395
   1396	dev_info(&pdev->dev, "Ending qede_remove successfully\n");
   1397}
   1398
   1399static void qede_remove(struct pci_dev *pdev)
   1400{
   1401	__qede_remove(pdev, QEDE_REMOVE_NORMAL);
   1402}
   1403
   1404static void qede_shutdown(struct pci_dev *pdev)
   1405{
   1406	__qede_remove(pdev, QEDE_REMOVE_NORMAL);
   1407}
   1408
   1409/* -------------------------------------------------------------------------
   1410 * START OF LOAD / UNLOAD
   1411 * -------------------------------------------------------------------------
   1412 */
   1413
   1414static int qede_set_num_queues(struct qede_dev *edev)
   1415{
   1416	int rc;
   1417	u16 rss_num;
   1418
   1419	/* Setup queues according to possible resources*/
   1420	if (edev->req_queues)
   1421		rss_num = edev->req_queues;
   1422	else
   1423		rss_num = netif_get_num_default_rss_queues() *
   1424			  edev->dev_info.common.num_hwfns;
   1425
   1426	rss_num = min_t(u16, QEDE_MAX_RSS_CNT(edev), rss_num);
   1427
   1428	rc = edev->ops->common->set_fp_int(edev->cdev, rss_num);
   1429	if (rc > 0) {
   1430		/* Managed to request interrupts for our queues */
   1431		edev->num_queues = rc;
   1432		DP_INFO(edev, "Managed %d [of %d] RSS queues\n",
   1433			QEDE_QUEUE_CNT(edev), rss_num);
   1434		rc = 0;
   1435	}
   1436
   1437	edev->fp_num_tx = edev->req_num_tx;
   1438	edev->fp_num_rx = edev->req_num_rx;
   1439
   1440	return rc;
   1441}
   1442
   1443static void qede_free_mem_sb(struct qede_dev *edev, struct qed_sb_info *sb_info,
   1444			     u16 sb_id)
   1445{
   1446	if (sb_info->sb_virt) {
   1447		edev->ops->common->sb_release(edev->cdev, sb_info, sb_id,
   1448					      QED_SB_TYPE_L2_QUEUE);
   1449		dma_free_coherent(&edev->pdev->dev, sizeof(*sb_info->sb_virt),
   1450				  (void *)sb_info->sb_virt, sb_info->sb_phys);
   1451		memset(sb_info, 0, sizeof(*sb_info));
   1452	}
   1453}
   1454
   1455/* This function allocates fast-path status block memory */
   1456static int qede_alloc_mem_sb(struct qede_dev *edev,
   1457			     struct qed_sb_info *sb_info, u16 sb_id)
   1458{
   1459	struct status_block *sb_virt;
   1460	dma_addr_t sb_phys;
   1461	int rc;
   1462
   1463	sb_virt = dma_alloc_coherent(&edev->pdev->dev,
   1464				     sizeof(*sb_virt), &sb_phys, GFP_KERNEL);
   1465	if (!sb_virt) {
   1466		DP_ERR(edev, "Status block allocation failed\n");
   1467		return -ENOMEM;
   1468	}
   1469
   1470	rc = edev->ops->common->sb_init(edev->cdev, sb_info,
   1471					sb_virt, sb_phys, sb_id,
   1472					QED_SB_TYPE_L2_QUEUE);
   1473	if (rc) {
   1474		DP_ERR(edev, "Status block initialization failed\n");
   1475		dma_free_coherent(&edev->pdev->dev, sizeof(*sb_virt),
   1476				  sb_virt, sb_phys);
   1477		return rc;
   1478	}
   1479
   1480	return 0;
   1481}
   1482
   1483static void qede_free_rx_buffers(struct qede_dev *edev,
   1484				 struct qede_rx_queue *rxq)
   1485{
   1486	u16 i;
   1487
   1488	for (i = rxq->sw_rx_cons; i != rxq->sw_rx_prod; i++) {
   1489		struct sw_rx_data *rx_buf;
   1490		struct page *data;
   1491
   1492		rx_buf = &rxq->sw_rx_ring[i & NUM_RX_BDS_MAX];
   1493		data = rx_buf->data;
   1494
   1495		dma_unmap_page(&edev->pdev->dev,
   1496			       rx_buf->mapping, PAGE_SIZE, rxq->data_direction);
   1497
   1498		rx_buf->data = NULL;
   1499		__free_page(data);
   1500	}
   1501}
   1502
   1503static void qede_free_mem_rxq(struct qede_dev *edev, struct qede_rx_queue *rxq)
   1504{
   1505	/* Free rx buffers */
   1506	qede_free_rx_buffers(edev, rxq);
   1507
   1508	/* Free the parallel SW ring */
   1509	kfree(rxq->sw_rx_ring);
   1510
   1511	/* Free the real RQ ring used by FW */
   1512	edev->ops->common->chain_free(edev->cdev, &rxq->rx_bd_ring);
   1513	edev->ops->common->chain_free(edev->cdev, &rxq->rx_comp_ring);
   1514}
   1515
   1516static void qede_set_tpa_param(struct qede_rx_queue *rxq)
   1517{
   1518	int i;
   1519
   1520	for (i = 0; i < ETH_TPA_MAX_AGGS_NUM; i++) {
   1521		struct qede_agg_info *tpa_info = &rxq->tpa_info[i];
   1522
   1523		tpa_info->state = QEDE_AGG_STATE_NONE;
   1524	}
   1525}
   1526
   1527/* This function allocates all memory needed per Rx queue */
   1528static int qede_alloc_mem_rxq(struct qede_dev *edev, struct qede_rx_queue *rxq)
   1529{
   1530	struct qed_chain_init_params params = {
   1531		.cnt_type	= QED_CHAIN_CNT_TYPE_U16,
   1532		.num_elems	= RX_RING_SIZE,
   1533	};
   1534	struct qed_dev *cdev = edev->cdev;
   1535	int i, rc, size;
   1536
   1537	rxq->num_rx_buffers = edev->q_num_rx_buffers;
   1538
   1539	rxq->rx_buf_size = NET_IP_ALIGN + ETH_OVERHEAD + edev->ndev->mtu;
   1540
   1541	rxq->rx_headroom = edev->xdp_prog ? XDP_PACKET_HEADROOM : NET_SKB_PAD;
   1542	size = rxq->rx_headroom +
   1543	       SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
   1544
   1545	/* Make sure that the headroom and  payload fit in a single page */
   1546	if (rxq->rx_buf_size + size > PAGE_SIZE)
   1547		rxq->rx_buf_size = PAGE_SIZE - size;
   1548
   1549	/* Segment size to split a page in multiple equal parts,
   1550	 * unless XDP is used in which case we'd use the entire page.
   1551	 */
   1552	if (!edev->xdp_prog) {
   1553		size = size + rxq->rx_buf_size;
   1554		rxq->rx_buf_seg_size = roundup_pow_of_two(size);
   1555	} else {
   1556		rxq->rx_buf_seg_size = PAGE_SIZE;
   1557		edev->ndev->features &= ~NETIF_F_GRO_HW;
   1558	}
   1559
   1560	/* Allocate the parallel driver ring for Rx buffers */
   1561	size = sizeof(*rxq->sw_rx_ring) * RX_RING_SIZE;
   1562	rxq->sw_rx_ring = kzalloc(size, GFP_KERNEL);
   1563	if (!rxq->sw_rx_ring) {
   1564		DP_ERR(edev, "Rx buffers ring allocation failed\n");
   1565		rc = -ENOMEM;
   1566		goto err;
   1567	}
   1568
   1569	/* Allocate FW Rx ring  */
   1570	params.mode = QED_CHAIN_MODE_NEXT_PTR;
   1571	params.intended_use = QED_CHAIN_USE_TO_CONSUME_PRODUCE;
   1572	params.elem_size = sizeof(struct eth_rx_bd);
   1573
   1574	rc = edev->ops->common->chain_alloc(cdev, &rxq->rx_bd_ring, &params);
   1575	if (rc)
   1576		goto err;
   1577
   1578	/* Allocate FW completion ring */
   1579	params.mode = QED_CHAIN_MODE_PBL;
   1580	params.intended_use = QED_CHAIN_USE_TO_CONSUME;
   1581	params.elem_size = sizeof(union eth_rx_cqe);
   1582
   1583	rc = edev->ops->common->chain_alloc(cdev, &rxq->rx_comp_ring, &params);
   1584	if (rc)
   1585		goto err;
   1586
   1587	/* Allocate buffers for the Rx ring */
   1588	rxq->filled_buffers = 0;
   1589	for (i = 0; i < rxq->num_rx_buffers; i++) {
   1590		rc = qede_alloc_rx_buffer(rxq, false);
   1591		if (rc) {
   1592			DP_ERR(edev,
   1593			       "Rx buffers allocation failed at index %d\n", i);
   1594			goto err;
   1595		}
   1596	}
   1597
   1598	edev->gro_disable = !(edev->ndev->features & NETIF_F_GRO_HW);
   1599	if (!edev->gro_disable)
   1600		qede_set_tpa_param(rxq);
   1601err:
   1602	return rc;
   1603}
   1604
   1605static void qede_free_mem_txq(struct qede_dev *edev, struct qede_tx_queue *txq)
   1606{
   1607	/* Free the parallel SW ring */
   1608	if (txq->is_xdp)
   1609		kfree(txq->sw_tx_ring.xdp);
   1610	else
   1611		kfree(txq->sw_tx_ring.skbs);
   1612
   1613	/* Free the real RQ ring used by FW */
   1614	edev->ops->common->chain_free(edev->cdev, &txq->tx_pbl);
   1615}
   1616
   1617/* This function allocates all memory needed per Tx queue */
   1618static int qede_alloc_mem_txq(struct qede_dev *edev, struct qede_tx_queue *txq)
   1619{
   1620	struct qed_chain_init_params params = {
   1621		.mode		= QED_CHAIN_MODE_PBL,
   1622		.intended_use	= QED_CHAIN_USE_TO_CONSUME_PRODUCE,
   1623		.cnt_type	= QED_CHAIN_CNT_TYPE_U16,
   1624		.num_elems	= edev->q_num_tx_buffers,
   1625		.elem_size	= sizeof(union eth_tx_bd_types),
   1626	};
   1627	int size, rc;
   1628
   1629	txq->num_tx_buffers = edev->q_num_tx_buffers;
   1630
   1631	/* Allocate the parallel driver ring for Tx buffers */
   1632	if (txq->is_xdp) {
   1633		size = sizeof(*txq->sw_tx_ring.xdp) * txq->num_tx_buffers;
   1634		txq->sw_tx_ring.xdp = kzalloc(size, GFP_KERNEL);
   1635		if (!txq->sw_tx_ring.xdp)
   1636			goto err;
   1637	} else {
   1638		size = sizeof(*txq->sw_tx_ring.skbs) * txq->num_tx_buffers;
   1639		txq->sw_tx_ring.skbs = kzalloc(size, GFP_KERNEL);
   1640		if (!txq->sw_tx_ring.skbs)
   1641			goto err;
   1642	}
   1643
   1644	rc = edev->ops->common->chain_alloc(edev->cdev, &txq->tx_pbl, &params);
   1645	if (rc)
   1646		goto err;
   1647
   1648	return 0;
   1649
   1650err:
   1651	qede_free_mem_txq(edev, txq);
   1652	return -ENOMEM;
   1653}
   1654
   1655/* This function frees all memory of a single fp */
   1656static void qede_free_mem_fp(struct qede_dev *edev, struct qede_fastpath *fp)
   1657{
   1658	qede_free_mem_sb(edev, fp->sb_info, fp->id);
   1659
   1660	if (fp->type & QEDE_FASTPATH_RX)
   1661		qede_free_mem_rxq(edev, fp->rxq);
   1662
   1663	if (fp->type & QEDE_FASTPATH_XDP)
   1664		qede_free_mem_txq(edev, fp->xdp_tx);
   1665
   1666	if (fp->type & QEDE_FASTPATH_TX) {
   1667		int cos;
   1668
   1669		for_each_cos_in_txq(edev, cos)
   1670			qede_free_mem_txq(edev, &fp->txq[cos]);
   1671	}
   1672}
   1673
   1674/* This function allocates all memory needed for a single fp (i.e. an entity
   1675 * which contains status block, one rx queue and/or multiple per-TC tx queues.
   1676 */
   1677static int qede_alloc_mem_fp(struct qede_dev *edev, struct qede_fastpath *fp)
   1678{
   1679	int rc = 0;
   1680
   1681	rc = qede_alloc_mem_sb(edev, fp->sb_info, fp->id);
   1682	if (rc)
   1683		goto out;
   1684
   1685	if (fp->type & QEDE_FASTPATH_RX) {
   1686		rc = qede_alloc_mem_rxq(edev, fp->rxq);
   1687		if (rc)
   1688			goto out;
   1689	}
   1690
   1691	if (fp->type & QEDE_FASTPATH_XDP) {
   1692		rc = qede_alloc_mem_txq(edev, fp->xdp_tx);
   1693		if (rc)
   1694			goto out;
   1695	}
   1696
   1697	if (fp->type & QEDE_FASTPATH_TX) {
   1698		int cos;
   1699
   1700		for_each_cos_in_txq(edev, cos) {
   1701			rc = qede_alloc_mem_txq(edev, &fp->txq[cos]);
   1702			if (rc)
   1703				goto out;
   1704		}
   1705	}
   1706
   1707out:
   1708	return rc;
   1709}
   1710
   1711static void qede_free_mem_load(struct qede_dev *edev)
   1712{
   1713	int i;
   1714
   1715	for_each_queue(i) {
   1716		struct qede_fastpath *fp = &edev->fp_array[i];
   1717
   1718		qede_free_mem_fp(edev, fp);
   1719	}
   1720}
   1721
   1722/* This function allocates all qede memory at NIC load. */
   1723static int qede_alloc_mem_load(struct qede_dev *edev)
   1724{
   1725	int rc = 0, queue_id;
   1726
   1727	for (queue_id = 0; queue_id < QEDE_QUEUE_CNT(edev); queue_id++) {
   1728		struct qede_fastpath *fp = &edev->fp_array[queue_id];
   1729
   1730		rc = qede_alloc_mem_fp(edev, fp);
   1731		if (rc) {
   1732			DP_ERR(edev,
   1733			       "Failed to allocate memory for fastpath - rss id = %d\n",
   1734			       queue_id);
   1735			qede_free_mem_load(edev);
   1736			return rc;
   1737		}
   1738	}
   1739
   1740	return 0;
   1741}
   1742
   1743static void qede_empty_tx_queue(struct qede_dev *edev,
   1744				struct qede_tx_queue *txq)
   1745{
   1746	unsigned int pkts_compl = 0, bytes_compl = 0;
   1747	struct netdev_queue *netdev_txq;
   1748	int rc, len = 0;
   1749
   1750	netdev_txq = netdev_get_tx_queue(edev->ndev, txq->ndev_txq_id);
   1751
   1752	while (qed_chain_get_cons_idx(&txq->tx_pbl) !=
   1753	       qed_chain_get_prod_idx(&txq->tx_pbl)) {
   1754		DP_VERBOSE(edev, NETIF_MSG_IFDOWN,
   1755			   "Freeing a packet on tx queue[%d]: chain_cons 0x%x, chain_prod 0x%x\n",
   1756			   txq->index, qed_chain_get_cons_idx(&txq->tx_pbl),
   1757			   qed_chain_get_prod_idx(&txq->tx_pbl));
   1758
   1759		rc = qede_free_tx_pkt(edev, txq, &len);
   1760		if (rc) {
   1761			DP_NOTICE(edev,
   1762				  "Failed to free a packet on tx queue[%d]: chain_cons 0x%x, chain_prod 0x%x\n",
   1763				  txq->index,
   1764				  qed_chain_get_cons_idx(&txq->tx_pbl),
   1765				  qed_chain_get_prod_idx(&txq->tx_pbl));
   1766			break;
   1767		}
   1768
   1769		bytes_compl += len;
   1770		pkts_compl++;
   1771		txq->sw_tx_cons++;
   1772	}
   1773
   1774	netdev_tx_completed_queue(netdev_txq, pkts_compl, bytes_compl);
   1775}
   1776
   1777static void qede_empty_tx_queues(struct qede_dev *edev)
   1778{
   1779	int i;
   1780
   1781	for_each_queue(i)
   1782		if (edev->fp_array[i].type & QEDE_FASTPATH_TX) {
   1783			int cos;
   1784
   1785			for_each_cos_in_txq(edev, cos) {
   1786				struct qede_fastpath *fp;
   1787
   1788				fp = &edev->fp_array[i];
   1789				qede_empty_tx_queue(edev,
   1790						    &fp->txq[cos]);
   1791			}
   1792		}
   1793}
   1794
   1795/* This function inits fp content and resets the SB, RXQ and TXQ structures */
   1796static void qede_init_fp(struct qede_dev *edev)
   1797{
   1798	int queue_id, rxq_index = 0, txq_index = 0;
   1799	struct qede_fastpath *fp;
   1800	bool init_xdp = false;
   1801
   1802	for_each_queue(queue_id) {
   1803		fp = &edev->fp_array[queue_id];
   1804
   1805		fp->edev = edev;
   1806		fp->id = queue_id;
   1807
   1808		if (fp->type & QEDE_FASTPATH_XDP) {
   1809			fp->xdp_tx->index = QEDE_TXQ_IDX_TO_XDP(edev,
   1810								rxq_index);
   1811			fp->xdp_tx->is_xdp = 1;
   1812
   1813			spin_lock_init(&fp->xdp_tx->xdp_tx_lock);
   1814			init_xdp = true;
   1815		}
   1816
   1817		if (fp->type & QEDE_FASTPATH_RX) {
   1818			fp->rxq->rxq_id = rxq_index++;
   1819
   1820			/* Determine how to map buffers for this queue */
   1821			if (fp->type & QEDE_FASTPATH_XDP)
   1822				fp->rxq->data_direction = DMA_BIDIRECTIONAL;
   1823			else
   1824				fp->rxq->data_direction = DMA_FROM_DEVICE;
   1825			fp->rxq->dev = &edev->pdev->dev;
   1826
   1827			/* Driver have no error path from here */
   1828			WARN_ON(xdp_rxq_info_reg(&fp->rxq->xdp_rxq, edev->ndev,
   1829						 fp->rxq->rxq_id, 0) < 0);
   1830
   1831			if (xdp_rxq_info_reg_mem_model(&fp->rxq->xdp_rxq,
   1832						       MEM_TYPE_PAGE_ORDER0,
   1833						       NULL)) {
   1834				DP_NOTICE(edev,
   1835					  "Failed to register XDP memory model\n");
   1836			}
   1837		}
   1838
   1839		if (fp->type & QEDE_FASTPATH_TX) {
   1840			int cos;
   1841
   1842			for_each_cos_in_txq(edev, cos) {
   1843				struct qede_tx_queue *txq = &fp->txq[cos];
   1844				u16 ndev_tx_id;
   1845
   1846				txq->cos = cos;
   1847				txq->index = txq_index;
   1848				ndev_tx_id = QEDE_TXQ_TO_NDEV_TXQ_ID(edev, txq);
   1849				txq->ndev_txq_id = ndev_tx_id;
   1850
   1851				if (edev->dev_info.is_legacy)
   1852					txq->is_legacy = true;
   1853				txq->dev = &edev->pdev->dev;
   1854			}
   1855
   1856			txq_index++;
   1857		}
   1858
   1859		snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
   1860			 edev->ndev->name, queue_id);
   1861	}
   1862
   1863	if (init_xdp) {
   1864		edev->total_xdp_queues = QEDE_RSS_COUNT(edev);
   1865		DP_INFO(edev, "Total XDP queues: %u\n", edev->total_xdp_queues);
   1866	}
   1867}
   1868
   1869static int qede_set_real_num_queues(struct qede_dev *edev)
   1870{
   1871	int rc = 0;
   1872
   1873	rc = netif_set_real_num_tx_queues(edev->ndev,
   1874					  QEDE_TSS_COUNT(edev) *
   1875					  edev->dev_info.num_tc);
   1876	if (rc) {
   1877		DP_NOTICE(edev, "Failed to set real number of Tx queues\n");
   1878		return rc;
   1879	}
   1880
   1881	rc = netif_set_real_num_rx_queues(edev->ndev, QEDE_RSS_COUNT(edev));
   1882	if (rc) {
   1883		DP_NOTICE(edev, "Failed to set real number of Rx queues\n");
   1884		return rc;
   1885	}
   1886
   1887	return 0;
   1888}
   1889
   1890static void qede_napi_disable_remove(struct qede_dev *edev)
   1891{
   1892	int i;
   1893
   1894	for_each_queue(i) {
   1895		napi_disable(&edev->fp_array[i].napi);
   1896
   1897		netif_napi_del(&edev->fp_array[i].napi);
   1898	}
   1899}
   1900
   1901static void qede_napi_add_enable(struct qede_dev *edev)
   1902{
   1903	int i;
   1904
   1905	/* Add NAPI objects */
   1906	for_each_queue(i) {
   1907		netif_napi_add(edev->ndev, &edev->fp_array[i].napi,
   1908			       qede_poll, NAPI_POLL_WEIGHT);
   1909		napi_enable(&edev->fp_array[i].napi);
   1910	}
   1911}
   1912
   1913static void qede_sync_free_irqs(struct qede_dev *edev)
   1914{
   1915	int i;
   1916
   1917	for (i = 0; i < edev->int_info.used_cnt; i++) {
   1918		if (edev->int_info.msix_cnt) {
   1919			free_irq(edev->int_info.msix[i].vector,
   1920				 &edev->fp_array[i]);
   1921		} else {
   1922			edev->ops->common->simd_handler_clean(edev->cdev, i);
   1923		}
   1924	}
   1925
   1926	edev->int_info.used_cnt = 0;
   1927	edev->int_info.msix_cnt = 0;
   1928}
   1929
   1930static int qede_req_msix_irqs(struct qede_dev *edev)
   1931{
   1932	int i, rc;
   1933
   1934	/* Sanitize number of interrupts == number of prepared RSS queues */
   1935	if (QEDE_QUEUE_CNT(edev) > edev->int_info.msix_cnt) {
   1936		DP_ERR(edev,
   1937		       "Interrupt mismatch: %d RSS queues > %d MSI-x vectors\n",
   1938		       QEDE_QUEUE_CNT(edev), edev->int_info.msix_cnt);
   1939		return -EINVAL;
   1940	}
   1941
   1942	for (i = 0; i < QEDE_QUEUE_CNT(edev); i++) {
   1943#ifdef CONFIG_RFS_ACCEL
   1944		struct qede_fastpath *fp = &edev->fp_array[i];
   1945
   1946		if (edev->ndev->rx_cpu_rmap && (fp->type & QEDE_FASTPATH_RX)) {
   1947			rc = irq_cpu_rmap_add(edev->ndev->rx_cpu_rmap,
   1948					      edev->int_info.msix[i].vector);
   1949			if (rc) {
   1950				DP_ERR(edev, "Failed to add CPU rmap\n");
   1951				qede_free_arfs(edev);
   1952			}
   1953		}
   1954#endif
   1955		rc = request_irq(edev->int_info.msix[i].vector,
   1956				 qede_msix_fp_int, 0, edev->fp_array[i].name,
   1957				 &edev->fp_array[i]);
   1958		if (rc) {
   1959			DP_ERR(edev, "Request fp %d irq failed\n", i);
   1960#ifdef CONFIG_RFS_ACCEL
   1961			if (edev->ndev->rx_cpu_rmap)
   1962				free_irq_cpu_rmap(edev->ndev->rx_cpu_rmap);
   1963
   1964			edev->ndev->rx_cpu_rmap = NULL;
   1965#endif
   1966			qede_sync_free_irqs(edev);
   1967			return rc;
   1968		}
   1969		DP_VERBOSE(edev, NETIF_MSG_INTR,
   1970			   "Requested fp irq for %s [entry %d]. Cookie is at %p\n",
   1971			   edev->fp_array[i].name, i,
   1972			   &edev->fp_array[i]);
   1973		edev->int_info.used_cnt++;
   1974	}
   1975
   1976	return 0;
   1977}
   1978
   1979static void qede_simd_fp_handler(void *cookie)
   1980{
   1981	struct qede_fastpath *fp = (struct qede_fastpath *)cookie;
   1982
   1983	napi_schedule_irqoff(&fp->napi);
   1984}
   1985
   1986static int qede_setup_irqs(struct qede_dev *edev)
   1987{
   1988	int i, rc = 0;
   1989
   1990	/* Learn Interrupt configuration */
   1991	rc = edev->ops->common->get_fp_int(edev->cdev, &edev->int_info);
   1992	if (rc)
   1993		return rc;
   1994
   1995	if (edev->int_info.msix_cnt) {
   1996		rc = qede_req_msix_irqs(edev);
   1997		if (rc)
   1998			return rc;
   1999		edev->ndev->irq = edev->int_info.msix[0].vector;
   2000	} else {
   2001		const struct qed_common_ops *ops;
   2002
   2003		/* qed should learn receive the RSS ids and callbacks */
   2004		ops = edev->ops->common;
   2005		for (i = 0; i < QEDE_QUEUE_CNT(edev); i++)
   2006			ops->simd_handler_config(edev->cdev,
   2007						 &edev->fp_array[i], i,
   2008						 qede_simd_fp_handler);
   2009		edev->int_info.used_cnt = QEDE_QUEUE_CNT(edev);
   2010	}
   2011	return 0;
   2012}
   2013
   2014static int qede_drain_txq(struct qede_dev *edev,
   2015			  struct qede_tx_queue *txq, bool allow_drain)
   2016{
   2017	int rc, cnt = 1000;
   2018
   2019	while (txq->sw_tx_cons != txq->sw_tx_prod) {
   2020		if (!cnt) {
   2021			if (allow_drain) {
   2022				DP_NOTICE(edev,
   2023					  "Tx queue[%d] is stuck, requesting MCP to drain\n",
   2024					  txq->index);
   2025				rc = edev->ops->common->drain(edev->cdev);
   2026				if (rc)
   2027					return rc;
   2028				return qede_drain_txq(edev, txq, false);
   2029			}
   2030			DP_NOTICE(edev,
   2031				  "Timeout waiting for tx queue[%d]: PROD=%d, CONS=%d\n",
   2032				  txq->index, txq->sw_tx_prod,
   2033				  txq->sw_tx_cons);
   2034			return -ENODEV;
   2035		}
   2036		cnt--;
   2037		usleep_range(1000, 2000);
   2038		barrier();
   2039	}
   2040
   2041	/* FW finished processing, wait for HW to transmit all tx packets */
   2042	usleep_range(1000, 2000);
   2043
   2044	return 0;
   2045}
   2046
   2047static int qede_stop_txq(struct qede_dev *edev,
   2048			 struct qede_tx_queue *txq, int rss_id)
   2049{
   2050	/* delete doorbell from doorbell recovery mechanism */
   2051	edev->ops->common->db_recovery_del(edev->cdev, txq->doorbell_addr,
   2052					   &txq->tx_db);
   2053
   2054	return edev->ops->q_tx_stop(edev->cdev, rss_id, txq->handle);
   2055}
   2056
   2057static int qede_stop_queues(struct qede_dev *edev)
   2058{
   2059	struct qed_update_vport_params *vport_update_params;
   2060	struct qed_dev *cdev = edev->cdev;
   2061	struct qede_fastpath *fp;
   2062	int rc, i;
   2063
   2064	/* Disable the vport */
   2065	vport_update_params = vzalloc(sizeof(*vport_update_params));
   2066	if (!vport_update_params)
   2067		return -ENOMEM;
   2068
   2069	vport_update_params->vport_id = 0;
   2070	vport_update_params->update_vport_active_flg = 1;
   2071	vport_update_params->vport_active_flg = 0;
   2072	vport_update_params->update_rss_flg = 0;
   2073
   2074	rc = edev->ops->vport_update(cdev, vport_update_params);
   2075	vfree(vport_update_params);
   2076
   2077	if (rc) {
   2078		DP_ERR(edev, "Failed to update vport\n");
   2079		return rc;
   2080	}
   2081
   2082	/* Flush Tx queues. If needed, request drain from MCP */
   2083	for_each_queue(i) {
   2084		fp = &edev->fp_array[i];
   2085
   2086		if (fp->type & QEDE_FASTPATH_TX) {
   2087			int cos;
   2088
   2089			for_each_cos_in_txq(edev, cos) {
   2090				rc = qede_drain_txq(edev, &fp->txq[cos], true);
   2091				if (rc)
   2092					return rc;
   2093			}
   2094		}
   2095
   2096		if (fp->type & QEDE_FASTPATH_XDP) {
   2097			rc = qede_drain_txq(edev, fp->xdp_tx, true);
   2098			if (rc)
   2099				return rc;
   2100		}
   2101	}
   2102
   2103	/* Stop all Queues in reverse order */
   2104	for (i = QEDE_QUEUE_CNT(edev) - 1; i >= 0; i--) {
   2105		fp = &edev->fp_array[i];
   2106
   2107		/* Stop the Tx Queue(s) */
   2108		if (fp->type & QEDE_FASTPATH_TX) {
   2109			int cos;
   2110
   2111			for_each_cos_in_txq(edev, cos) {
   2112				rc = qede_stop_txq(edev, &fp->txq[cos], i);
   2113				if (rc)
   2114					return rc;
   2115			}
   2116		}
   2117
   2118		/* Stop the Rx Queue */
   2119		if (fp->type & QEDE_FASTPATH_RX) {
   2120			rc = edev->ops->q_rx_stop(cdev, i, fp->rxq->handle);
   2121			if (rc) {
   2122				DP_ERR(edev, "Failed to stop RXQ #%d\n", i);
   2123				return rc;
   2124			}
   2125		}
   2126
   2127		/* Stop the XDP forwarding queue */
   2128		if (fp->type & QEDE_FASTPATH_XDP) {
   2129			rc = qede_stop_txq(edev, fp->xdp_tx, i);
   2130			if (rc)
   2131				return rc;
   2132
   2133			bpf_prog_put(fp->rxq->xdp_prog);
   2134		}
   2135	}
   2136
   2137	/* Stop the vport */
   2138	rc = edev->ops->vport_stop(cdev, 0);
   2139	if (rc)
   2140		DP_ERR(edev, "Failed to stop VPORT\n");
   2141
   2142	return rc;
   2143}
   2144
   2145static int qede_start_txq(struct qede_dev *edev,
   2146			  struct qede_fastpath *fp,
   2147			  struct qede_tx_queue *txq, u8 rss_id, u16 sb_idx)
   2148{
   2149	dma_addr_t phys_table = qed_chain_get_pbl_phys(&txq->tx_pbl);
   2150	u32 page_cnt = qed_chain_get_page_cnt(&txq->tx_pbl);
   2151	struct qed_queue_start_common_params params;
   2152	struct qed_txq_start_ret_params ret_params;
   2153	int rc;
   2154
   2155	memset(&params, 0, sizeof(params));
   2156	memset(&ret_params, 0, sizeof(ret_params));
   2157
   2158	/* Let the XDP queue share the queue-zone with one of the regular txq.
   2159	 * We don't really care about its coalescing.
   2160	 */
   2161	if (txq->is_xdp)
   2162		params.queue_id = QEDE_TXQ_XDP_TO_IDX(edev, txq);
   2163	else
   2164		params.queue_id = txq->index;
   2165
   2166	params.p_sb = fp->sb_info;
   2167	params.sb_idx = sb_idx;
   2168	params.tc = txq->cos;
   2169
   2170	rc = edev->ops->q_tx_start(edev->cdev, rss_id, &params, phys_table,
   2171				   page_cnt, &ret_params);
   2172	if (rc) {
   2173		DP_ERR(edev, "Start TXQ #%d failed %d\n", txq->index, rc);
   2174		return rc;
   2175	}
   2176
   2177	txq->doorbell_addr = ret_params.p_doorbell;
   2178	txq->handle = ret_params.p_handle;
   2179
   2180	/* Determine the FW consumer address associated */
   2181	txq->hw_cons_ptr = &fp->sb_info->sb_virt->pi_array[sb_idx];
   2182
   2183	/* Prepare the doorbell parameters */
   2184	SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_DEST, DB_DEST_XCM);
   2185	SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_AGG_CMD, DB_AGG_CMD_SET);
   2186	SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_AGG_VAL_SEL,
   2187		  DQ_XCM_ETH_TX_BD_PROD_CMD);
   2188	txq->tx_db.data.agg_flags = DQ_XCM_ETH_DQ_CF_CMD;
   2189
   2190	/* register doorbell with doorbell recovery mechanism */
   2191	rc = edev->ops->common->db_recovery_add(edev->cdev, txq->doorbell_addr,
   2192						&txq->tx_db, DB_REC_WIDTH_32B,
   2193						DB_REC_KERNEL);
   2194
   2195	return rc;
   2196}
   2197
   2198static int qede_start_queues(struct qede_dev *edev, bool clear_stats)
   2199{
   2200	int vlan_removal_en = 1;
   2201	struct qed_dev *cdev = edev->cdev;
   2202	struct qed_dev_info *qed_info = &edev->dev_info.common;
   2203	struct qed_update_vport_params *vport_update_params;
   2204	struct qed_queue_start_common_params q_params;
   2205	struct qed_start_vport_params start = {0};
   2206	int rc, i;
   2207
   2208	if (!edev->num_queues) {
   2209		DP_ERR(edev,
   2210		       "Cannot update V-VPORT as active as there are no Rx queues\n");
   2211		return -EINVAL;
   2212	}
   2213
   2214	vport_update_params = vzalloc(sizeof(*vport_update_params));
   2215	if (!vport_update_params)
   2216		return -ENOMEM;
   2217
   2218	start.handle_ptp_pkts = !!(edev->ptp);
   2219	start.gro_enable = !edev->gro_disable;
   2220	start.mtu = edev->ndev->mtu;
   2221	start.vport_id = 0;
   2222	start.drop_ttl0 = true;
   2223	start.remove_inner_vlan = vlan_removal_en;
   2224	start.clear_stats = clear_stats;
   2225
   2226	rc = edev->ops->vport_start(cdev, &start);
   2227
   2228	if (rc) {
   2229		DP_ERR(edev, "Start V-PORT failed %d\n", rc);
   2230		goto out;
   2231	}
   2232
   2233	DP_VERBOSE(edev, NETIF_MSG_IFUP,
   2234		   "Start vport ramrod passed, vport_id = %d, MTU = %d, vlan_removal_en = %d\n",
   2235		   start.vport_id, edev->ndev->mtu + 0xe, vlan_removal_en);
   2236
   2237	for_each_queue(i) {
   2238		struct qede_fastpath *fp = &edev->fp_array[i];
   2239		dma_addr_t p_phys_table;
   2240		u32 page_cnt;
   2241
   2242		if (fp->type & QEDE_FASTPATH_RX) {
   2243			struct qed_rxq_start_ret_params ret_params;
   2244			struct qede_rx_queue *rxq = fp->rxq;
   2245			__le16 *val;
   2246
   2247			memset(&ret_params, 0, sizeof(ret_params));
   2248			memset(&q_params, 0, sizeof(q_params));
   2249			q_params.queue_id = rxq->rxq_id;
   2250			q_params.vport_id = 0;
   2251			q_params.p_sb = fp->sb_info;
   2252			q_params.sb_idx = RX_PI;
   2253
   2254			p_phys_table =
   2255			    qed_chain_get_pbl_phys(&rxq->rx_comp_ring);
   2256			page_cnt = qed_chain_get_page_cnt(&rxq->rx_comp_ring);
   2257
   2258			rc = edev->ops->q_rx_start(cdev, i, &q_params,
   2259						   rxq->rx_buf_size,
   2260						   rxq->rx_bd_ring.p_phys_addr,
   2261						   p_phys_table,
   2262						   page_cnt, &ret_params);
   2263			if (rc) {
   2264				DP_ERR(edev, "Start RXQ #%d failed %d\n", i,
   2265				       rc);
   2266				goto out;
   2267			}
   2268
   2269			/* Use the return parameters */
   2270			rxq->hw_rxq_prod_addr = ret_params.p_prod;
   2271			rxq->handle = ret_params.p_handle;
   2272
   2273			val = &fp->sb_info->sb_virt->pi_array[RX_PI];
   2274			rxq->hw_cons_ptr = val;
   2275
   2276			qede_update_rx_prod(edev, rxq);
   2277		}
   2278
   2279		if (fp->type & QEDE_FASTPATH_XDP) {
   2280			rc = qede_start_txq(edev, fp, fp->xdp_tx, i, XDP_PI);
   2281			if (rc)
   2282				goto out;
   2283
   2284			bpf_prog_add(edev->xdp_prog, 1);
   2285			fp->rxq->xdp_prog = edev->xdp_prog;
   2286		}
   2287
   2288		if (fp->type & QEDE_FASTPATH_TX) {
   2289			int cos;
   2290
   2291			for_each_cos_in_txq(edev, cos) {
   2292				rc = qede_start_txq(edev, fp, &fp->txq[cos], i,
   2293						    TX_PI(cos));
   2294				if (rc)
   2295					goto out;
   2296			}
   2297		}
   2298	}
   2299
   2300	/* Prepare and send the vport enable */
   2301	vport_update_params->vport_id = start.vport_id;
   2302	vport_update_params->update_vport_active_flg = 1;
   2303	vport_update_params->vport_active_flg = 1;
   2304
   2305	if ((qed_info->b_inter_pf_switch || pci_num_vf(edev->pdev)) &&
   2306	    qed_info->tx_switching) {
   2307		vport_update_params->update_tx_switching_flg = 1;
   2308		vport_update_params->tx_switching_flg = 1;
   2309	}
   2310
   2311	qede_fill_rss_params(edev, &vport_update_params->rss_params,
   2312			     &vport_update_params->update_rss_flg);
   2313
   2314	rc = edev->ops->vport_update(cdev, vport_update_params);
   2315	if (rc)
   2316		DP_ERR(edev, "Update V-PORT failed %d\n", rc);
   2317
   2318out:
   2319	vfree(vport_update_params);
   2320	return rc;
   2321}
   2322
   2323enum qede_unload_mode {
   2324	QEDE_UNLOAD_NORMAL,
   2325	QEDE_UNLOAD_RECOVERY,
   2326};
   2327
   2328static void qede_unload(struct qede_dev *edev, enum qede_unload_mode mode,
   2329			bool is_locked)
   2330{
   2331	struct qed_link_params link_params;
   2332	int rc;
   2333
   2334	DP_INFO(edev, "Starting qede unload\n");
   2335
   2336	if (!is_locked)
   2337		__qede_lock(edev);
   2338
   2339	clear_bit(QEDE_FLAGS_LINK_REQUESTED, &edev->flags);
   2340
   2341	if (mode != QEDE_UNLOAD_RECOVERY)
   2342		edev->state = QEDE_STATE_CLOSED;
   2343
   2344	qede_rdma_dev_event_close(edev);
   2345
   2346	/* Close OS Tx */
   2347	netif_tx_disable(edev->ndev);
   2348	netif_carrier_off(edev->ndev);
   2349
   2350	if (mode != QEDE_UNLOAD_RECOVERY) {
   2351		/* Reset the link */
   2352		memset(&link_params, 0, sizeof(link_params));
   2353		link_params.link_up = false;
   2354		edev->ops->common->set_link(edev->cdev, &link_params);
   2355
   2356		rc = qede_stop_queues(edev);
   2357		if (rc) {
   2358#ifdef CONFIG_RFS_ACCEL
   2359			if (edev->dev_info.common.b_arfs_capable) {
   2360				qede_poll_for_freeing_arfs_filters(edev);
   2361				if (edev->ndev->rx_cpu_rmap)
   2362					free_irq_cpu_rmap(edev->ndev->rx_cpu_rmap);
   2363
   2364				edev->ndev->rx_cpu_rmap = NULL;
   2365			}
   2366#endif
   2367			qede_sync_free_irqs(edev);
   2368			goto out;
   2369		}
   2370
   2371		DP_INFO(edev, "Stopped Queues\n");
   2372	}
   2373
   2374	qede_vlan_mark_nonconfigured(edev);
   2375	edev->ops->fastpath_stop(edev->cdev);
   2376
   2377	if (edev->dev_info.common.b_arfs_capable) {
   2378		qede_poll_for_freeing_arfs_filters(edev);
   2379		qede_free_arfs(edev);
   2380	}
   2381
   2382	/* Release the interrupts */
   2383	qede_sync_free_irqs(edev);
   2384	edev->ops->common->set_fp_int(edev->cdev, 0);
   2385
   2386	qede_napi_disable_remove(edev);
   2387
   2388	if (mode == QEDE_UNLOAD_RECOVERY)
   2389		qede_empty_tx_queues(edev);
   2390
   2391	qede_free_mem_load(edev);
   2392	qede_free_fp_array(edev);
   2393
   2394out:
   2395	if (!is_locked)
   2396		__qede_unlock(edev);
   2397
   2398	if (mode != QEDE_UNLOAD_RECOVERY)
   2399		DP_NOTICE(edev, "Link is down\n");
   2400
   2401	edev->ptp_skip_txts = 0;
   2402
   2403	DP_INFO(edev, "Ending qede unload\n");
   2404}
   2405
   2406enum qede_load_mode {
   2407	QEDE_LOAD_NORMAL,
   2408	QEDE_LOAD_RELOAD,
   2409	QEDE_LOAD_RECOVERY,
   2410};
   2411
   2412static int qede_load(struct qede_dev *edev, enum qede_load_mode mode,
   2413		     bool is_locked)
   2414{
   2415	struct qed_link_params link_params;
   2416	struct ethtool_coalesce coal = {};
   2417	u8 num_tc;
   2418	int rc, i;
   2419
   2420	DP_INFO(edev, "Starting qede load\n");
   2421
   2422	if (!is_locked)
   2423		__qede_lock(edev);
   2424
   2425	rc = qede_set_num_queues(edev);
   2426	if (rc)
   2427		goto out;
   2428
   2429	rc = qede_alloc_fp_array(edev);
   2430	if (rc)
   2431		goto out;
   2432
   2433	qede_init_fp(edev);
   2434
   2435	rc = qede_alloc_mem_load(edev);
   2436	if (rc)
   2437		goto err1;
   2438	DP_INFO(edev, "Allocated %d Rx, %d Tx queues\n",
   2439		QEDE_RSS_COUNT(edev), QEDE_TSS_COUNT(edev));
   2440
   2441	rc = qede_set_real_num_queues(edev);
   2442	if (rc)
   2443		goto err2;
   2444
   2445	if (qede_alloc_arfs(edev)) {
   2446		edev->ndev->features &= ~NETIF_F_NTUPLE;
   2447		edev->dev_info.common.b_arfs_capable = false;
   2448	}
   2449
   2450	qede_napi_add_enable(edev);
   2451	DP_INFO(edev, "Napi added and enabled\n");
   2452
   2453	rc = qede_setup_irqs(edev);
   2454	if (rc)
   2455		goto err3;
   2456	DP_INFO(edev, "Setup IRQs succeeded\n");
   2457
   2458	rc = qede_start_queues(edev, mode != QEDE_LOAD_RELOAD);
   2459	if (rc)
   2460		goto err4;
   2461	DP_INFO(edev, "Start VPORT, RXQ and TXQ succeeded\n");
   2462
   2463	num_tc = netdev_get_num_tc(edev->ndev);
   2464	num_tc = num_tc ? num_tc : edev->dev_info.num_tc;
   2465	qede_setup_tc(edev->ndev, num_tc);
   2466
   2467	/* Program un-configured VLANs */
   2468	qede_configure_vlan_filters(edev);
   2469
   2470	set_bit(QEDE_FLAGS_LINK_REQUESTED, &edev->flags);
   2471
   2472	/* Ask for link-up using current configuration */
   2473	memset(&link_params, 0, sizeof(link_params));
   2474	link_params.link_up = true;
   2475	edev->ops->common->set_link(edev->cdev, &link_params);
   2476
   2477	edev->state = QEDE_STATE_OPEN;
   2478
   2479	coal.rx_coalesce_usecs = QED_DEFAULT_RX_USECS;
   2480	coal.tx_coalesce_usecs = QED_DEFAULT_TX_USECS;
   2481
   2482	for_each_queue(i) {
   2483		if (edev->coal_entry[i].isvalid) {
   2484			coal.rx_coalesce_usecs = edev->coal_entry[i].rxc;
   2485			coal.tx_coalesce_usecs = edev->coal_entry[i].txc;
   2486		}
   2487		__qede_unlock(edev);
   2488		qede_set_per_coalesce(edev->ndev, i, &coal);
   2489		__qede_lock(edev);
   2490	}
   2491	DP_INFO(edev, "Ending successfully qede load\n");
   2492
   2493	goto out;
   2494err4:
   2495	qede_sync_free_irqs(edev);
   2496err3:
   2497	qede_napi_disable_remove(edev);
   2498err2:
   2499	qede_free_mem_load(edev);
   2500err1:
   2501	edev->ops->common->set_fp_int(edev->cdev, 0);
   2502	qede_free_fp_array(edev);
   2503	edev->num_queues = 0;
   2504	edev->fp_num_tx = 0;
   2505	edev->fp_num_rx = 0;
   2506out:
   2507	if (!is_locked)
   2508		__qede_unlock(edev);
   2509
   2510	return rc;
   2511}
   2512
   2513/* 'func' should be able to run between unload and reload assuming interface
   2514 * is actually running, or afterwards in case it's currently DOWN.
   2515 */
   2516void qede_reload(struct qede_dev *edev,
   2517		 struct qede_reload_args *args, bool is_locked)
   2518{
   2519	if (!is_locked)
   2520		__qede_lock(edev);
   2521
   2522	/* Since qede_lock is held, internal state wouldn't change even
   2523	 * if netdev state would start transitioning. Check whether current
   2524	 * internal configuration indicates device is up, then reload.
   2525	 */
   2526	if (edev->state == QEDE_STATE_OPEN) {
   2527		qede_unload(edev, QEDE_UNLOAD_NORMAL, true);
   2528		if (args)
   2529			args->func(edev, args);
   2530		qede_load(edev, QEDE_LOAD_RELOAD, true);
   2531
   2532		/* Since no one is going to do it for us, re-configure */
   2533		qede_config_rx_mode(edev->ndev);
   2534	} else if (args) {
   2535		args->func(edev, args);
   2536	}
   2537
   2538	if (!is_locked)
   2539		__qede_unlock(edev);
   2540}
   2541
   2542/* called with rtnl_lock */
   2543static int qede_open(struct net_device *ndev)
   2544{
   2545	struct qede_dev *edev = netdev_priv(ndev);
   2546	int rc;
   2547
   2548	netif_carrier_off(ndev);
   2549
   2550	edev->ops->common->set_power_state(edev->cdev, PCI_D0);
   2551
   2552	rc = qede_load(edev, QEDE_LOAD_NORMAL, false);
   2553	if (rc)
   2554		return rc;
   2555
   2556	udp_tunnel_nic_reset_ntf(ndev);
   2557
   2558	edev->ops->common->update_drv_state(edev->cdev, true);
   2559
   2560	return 0;
   2561}
   2562
   2563static int qede_close(struct net_device *ndev)
   2564{
   2565	struct qede_dev *edev = netdev_priv(ndev);
   2566
   2567	qede_unload(edev, QEDE_UNLOAD_NORMAL, false);
   2568
   2569	if (edev->cdev)
   2570		edev->ops->common->update_drv_state(edev->cdev, false);
   2571
   2572	return 0;
   2573}
   2574
   2575static void qede_link_update(void *dev, struct qed_link_output *link)
   2576{
   2577	struct qede_dev *edev = dev;
   2578
   2579	if (!test_bit(QEDE_FLAGS_LINK_REQUESTED, &edev->flags)) {
   2580		DP_VERBOSE(edev, NETIF_MSG_LINK, "Interface is not ready\n");
   2581		return;
   2582	}
   2583
   2584	if (link->link_up) {
   2585		if (!netif_carrier_ok(edev->ndev)) {
   2586			DP_NOTICE(edev, "Link is up\n");
   2587			netif_tx_start_all_queues(edev->ndev);
   2588			netif_carrier_on(edev->ndev);
   2589			qede_rdma_dev_event_open(edev);
   2590		}
   2591	} else {
   2592		if (netif_carrier_ok(edev->ndev)) {
   2593			DP_NOTICE(edev, "Link is down\n");
   2594			netif_tx_disable(edev->ndev);
   2595			netif_carrier_off(edev->ndev);
   2596			qede_rdma_dev_event_close(edev);
   2597		}
   2598	}
   2599}
   2600
   2601static void qede_schedule_recovery_handler(void *dev)
   2602{
   2603	struct qede_dev *edev = dev;
   2604
   2605	if (edev->state == QEDE_STATE_RECOVERY) {
   2606		DP_NOTICE(edev,
   2607			  "Avoid scheduling a recovery handling since already in recovery state\n");
   2608		return;
   2609	}
   2610
   2611	set_bit(QEDE_SP_RECOVERY, &edev->sp_flags);
   2612	schedule_delayed_work(&edev->sp_task, 0);
   2613
   2614	DP_INFO(edev, "Scheduled a recovery handler\n");
   2615}
   2616
   2617static void qede_recovery_failed(struct qede_dev *edev)
   2618{
   2619	netdev_err(edev->ndev, "Recovery handling has failed. Power cycle is needed.\n");
   2620
   2621	netif_device_detach(edev->ndev);
   2622
   2623	if (edev->cdev)
   2624		edev->ops->common->set_power_state(edev->cdev, PCI_D3hot);
   2625}
   2626
   2627static void qede_recovery_handler(struct qede_dev *edev)
   2628{
   2629	u32 curr_state = edev->state;
   2630	int rc;
   2631
   2632	DP_NOTICE(edev, "Starting a recovery process\n");
   2633
   2634	/* No need to acquire first the qede_lock since is done by qede_sp_task
   2635	 * before calling this function.
   2636	 */
   2637	edev->state = QEDE_STATE_RECOVERY;
   2638
   2639	edev->ops->common->recovery_prolog(edev->cdev);
   2640
   2641	if (curr_state == QEDE_STATE_OPEN)
   2642		qede_unload(edev, QEDE_UNLOAD_RECOVERY, true);
   2643
   2644	__qede_remove(edev->pdev, QEDE_REMOVE_RECOVERY);
   2645
   2646	rc = __qede_probe(edev->pdev, edev->dp_module, edev->dp_level,
   2647			  IS_VF(edev), QEDE_PROBE_RECOVERY);
   2648	if (rc) {
   2649		edev->cdev = NULL;
   2650		goto err;
   2651	}
   2652
   2653	if (curr_state == QEDE_STATE_OPEN) {
   2654		rc = qede_load(edev, QEDE_LOAD_RECOVERY, true);
   2655		if (rc)
   2656			goto err;
   2657
   2658		qede_config_rx_mode(edev->ndev);
   2659		udp_tunnel_nic_reset_ntf(edev->ndev);
   2660	}
   2661
   2662	edev->state = curr_state;
   2663
   2664	DP_NOTICE(edev, "Recovery handling is done\n");
   2665
   2666	return;
   2667
   2668err:
   2669	qede_recovery_failed(edev);
   2670}
   2671
   2672static void qede_atomic_hw_err_handler(struct qede_dev *edev)
   2673{
   2674	struct qed_dev *cdev = edev->cdev;
   2675
   2676	DP_NOTICE(edev,
   2677		  "Generic non-sleepable HW error handling started - err_flags 0x%lx\n",
   2678		  edev->err_flags);
   2679
   2680	/* Get a call trace of the flow that led to the error */
   2681	WARN_ON(test_bit(QEDE_ERR_WARN, &edev->err_flags));
   2682
   2683	/* Prevent HW attentions from being reasserted */
   2684	if (test_bit(QEDE_ERR_ATTN_CLR_EN, &edev->err_flags))
   2685		edev->ops->common->attn_clr_enable(cdev, true);
   2686
   2687	DP_NOTICE(edev, "Generic non-sleepable HW error handling is done\n");
   2688}
   2689
   2690static void qede_generic_hw_err_handler(struct qede_dev *edev)
   2691{
   2692	DP_NOTICE(edev,
   2693		  "Generic sleepable HW error handling started - err_flags 0x%lx\n",
   2694		  edev->err_flags);
   2695
   2696	if (edev->devlink) {
   2697		DP_NOTICE(edev, "Reporting fatal error to devlink\n");
   2698		edev->ops->common->report_fatal_error(edev->devlink, edev->last_err_type);
   2699	}
   2700
   2701	clear_bit(QEDE_ERR_IS_HANDLED, &edev->err_flags);
   2702
   2703	DP_NOTICE(edev, "Generic sleepable HW error handling is done\n");
   2704}
   2705
   2706static void qede_set_hw_err_flags(struct qede_dev *edev,
   2707				  enum qed_hw_err_type err_type)
   2708{
   2709	unsigned long err_flags = 0;
   2710
   2711	switch (err_type) {
   2712	case QED_HW_ERR_DMAE_FAIL:
   2713		set_bit(QEDE_ERR_WARN, &err_flags);
   2714		fallthrough;
   2715	case QED_HW_ERR_MFW_RESP_FAIL:
   2716	case QED_HW_ERR_HW_ATTN:
   2717	case QED_HW_ERR_RAMROD_FAIL:
   2718	case QED_HW_ERR_FW_ASSERT:
   2719		set_bit(QEDE_ERR_ATTN_CLR_EN, &err_flags);
   2720		set_bit(QEDE_ERR_GET_DBG_INFO, &err_flags);
   2721		/* make this error as recoverable and start recovery*/
   2722		set_bit(QEDE_ERR_IS_RECOVERABLE, &err_flags);
   2723		break;
   2724
   2725	default:
   2726		DP_NOTICE(edev, "Unexpected HW error [%d]\n", err_type);
   2727		break;
   2728	}
   2729
   2730	edev->err_flags |= err_flags;
   2731}
   2732
   2733static void qede_schedule_hw_err_handler(void *dev,
   2734					 enum qed_hw_err_type err_type)
   2735{
   2736	struct qede_dev *edev = dev;
   2737
   2738	/* Fan failure cannot be masked by handling of another HW error or by a
   2739	 * concurrent recovery process.
   2740	 */
   2741	if ((test_and_set_bit(QEDE_ERR_IS_HANDLED, &edev->err_flags) ||
   2742	     edev->state == QEDE_STATE_RECOVERY) &&
   2743	     err_type != QED_HW_ERR_FAN_FAIL) {
   2744		DP_INFO(edev,
   2745			"Avoid scheduling an error handling while another HW error is being handled\n");
   2746		return;
   2747	}
   2748
   2749	if (err_type >= QED_HW_ERR_LAST) {
   2750		DP_NOTICE(edev, "Unknown HW error [%d]\n", err_type);
   2751		clear_bit(QEDE_ERR_IS_HANDLED, &edev->err_flags);
   2752		return;
   2753	}
   2754
   2755	edev->last_err_type = err_type;
   2756	qede_set_hw_err_flags(edev, err_type);
   2757	qede_atomic_hw_err_handler(edev);
   2758	set_bit(QEDE_SP_HW_ERR, &edev->sp_flags);
   2759	schedule_delayed_work(&edev->sp_task, 0);
   2760
   2761	DP_INFO(edev, "Scheduled a error handler [err_type %d]\n", err_type);
   2762}
   2763
   2764static bool qede_is_txq_full(struct qede_dev *edev, struct qede_tx_queue *txq)
   2765{
   2766	struct netdev_queue *netdev_txq;
   2767
   2768	netdev_txq = netdev_get_tx_queue(edev->ndev, txq->ndev_txq_id);
   2769	if (netif_xmit_stopped(netdev_txq))
   2770		return true;
   2771
   2772	return false;
   2773}
   2774
   2775static void qede_get_generic_tlv_data(void *dev, struct qed_generic_tlvs *data)
   2776{
   2777	struct qede_dev *edev = dev;
   2778	struct netdev_hw_addr *ha;
   2779	int i;
   2780
   2781	if (edev->ndev->features & NETIF_F_IP_CSUM)
   2782		data->feat_flags |= QED_TLV_IP_CSUM;
   2783	if (edev->ndev->features & NETIF_F_TSO)
   2784		data->feat_flags |= QED_TLV_LSO;
   2785
   2786	ether_addr_copy(data->mac[0], edev->ndev->dev_addr);
   2787	eth_zero_addr(data->mac[1]);
   2788	eth_zero_addr(data->mac[2]);
   2789	/* Copy the first two UC macs */
   2790	netif_addr_lock_bh(edev->ndev);
   2791	i = 1;
   2792	netdev_for_each_uc_addr(ha, edev->ndev) {
   2793		ether_addr_copy(data->mac[i++], ha->addr);
   2794		if (i == QED_TLV_MAC_COUNT)
   2795			break;
   2796	}
   2797
   2798	netif_addr_unlock_bh(edev->ndev);
   2799}
   2800
   2801static void qede_get_eth_tlv_data(void *dev, void *data)
   2802{
   2803	struct qed_mfw_tlv_eth *etlv = data;
   2804	struct qede_dev *edev = dev;
   2805	struct qede_fastpath *fp;
   2806	int i;
   2807
   2808	etlv->lso_maxoff_size = 0XFFFF;
   2809	etlv->lso_maxoff_size_set = true;
   2810	etlv->lso_minseg_size = (u16)ETH_TX_LSO_WINDOW_MIN_LEN;
   2811	etlv->lso_minseg_size_set = true;
   2812	etlv->prom_mode = !!(edev->ndev->flags & IFF_PROMISC);
   2813	etlv->prom_mode_set = true;
   2814	etlv->tx_descr_size = QEDE_TSS_COUNT(edev);
   2815	etlv->tx_descr_size_set = true;
   2816	etlv->rx_descr_size = QEDE_RSS_COUNT(edev);
   2817	etlv->rx_descr_size_set = true;
   2818	etlv->iov_offload = QED_MFW_TLV_IOV_OFFLOAD_VEB;
   2819	etlv->iov_offload_set = true;
   2820
   2821	/* Fill information regarding queues; Should be done under the qede
   2822	 * lock to guarantee those don't change beneath our feet.
   2823	 */
   2824	etlv->txqs_empty = true;
   2825	etlv->rxqs_empty = true;
   2826	etlv->num_txqs_full = 0;
   2827	etlv->num_rxqs_full = 0;
   2828
   2829	__qede_lock(edev);
   2830	for_each_queue(i) {
   2831		fp = &edev->fp_array[i];
   2832		if (fp->type & QEDE_FASTPATH_TX) {
   2833			struct qede_tx_queue *txq = QEDE_FP_TC0_TXQ(fp);
   2834
   2835			if (txq->sw_tx_cons != txq->sw_tx_prod)
   2836				etlv->txqs_empty = false;
   2837			if (qede_is_txq_full(edev, txq))
   2838				etlv->num_txqs_full++;
   2839		}
   2840		if (fp->type & QEDE_FASTPATH_RX) {
   2841			if (qede_has_rx_work(fp->rxq))
   2842				etlv->rxqs_empty = false;
   2843
   2844			/* This one is a bit tricky; Firmware might stop
   2845			 * placing packets if ring is not yet full.
   2846			 * Give an approximation.
   2847			 */
   2848			if (le16_to_cpu(*fp->rxq->hw_cons_ptr) -
   2849			    qed_chain_get_cons_idx(&fp->rxq->rx_comp_ring) >
   2850			    RX_RING_SIZE - 100)
   2851				etlv->num_rxqs_full++;
   2852		}
   2853	}
   2854	__qede_unlock(edev);
   2855
   2856	etlv->txqs_empty_set = true;
   2857	etlv->rxqs_empty_set = true;
   2858	etlv->num_txqs_full_set = true;
   2859	etlv->num_rxqs_full_set = true;
   2860}
   2861
   2862/**
   2863 * qede_io_error_detected(): Called when PCI error is detected
   2864 *
   2865 * @pdev: Pointer to PCI device
   2866 * @state: The current pci connection state
   2867 *
   2868 *Return: pci_ers_result_t.
   2869 *
   2870 * This function is called after a PCI bus error affecting
   2871 * this device has been detected.
   2872 */
   2873static pci_ers_result_t
   2874qede_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
   2875{
   2876	struct net_device *dev = pci_get_drvdata(pdev);
   2877	struct qede_dev *edev = netdev_priv(dev);
   2878
   2879	if (!edev)
   2880		return PCI_ERS_RESULT_NONE;
   2881
   2882	DP_NOTICE(edev, "IO error detected [%d]\n", state);
   2883
   2884	__qede_lock(edev);
   2885	if (edev->state == QEDE_STATE_RECOVERY) {
   2886		DP_NOTICE(edev, "Device already in the recovery state\n");
   2887		__qede_unlock(edev);
   2888		return PCI_ERS_RESULT_NONE;
   2889	}
   2890
   2891	/* PF handles the recovery of its VFs */
   2892	if (IS_VF(edev)) {
   2893		DP_VERBOSE(edev, QED_MSG_IOV,
   2894			   "VF recovery is handled by its PF\n");
   2895		__qede_unlock(edev);
   2896		return PCI_ERS_RESULT_RECOVERED;
   2897	}
   2898
   2899	/* Close OS Tx */
   2900	netif_tx_disable(edev->ndev);
   2901	netif_carrier_off(edev->ndev);
   2902
   2903	set_bit(QEDE_SP_AER, &edev->sp_flags);
   2904	schedule_delayed_work(&edev->sp_task, 0);
   2905
   2906	__qede_unlock(edev);
   2907
   2908	return PCI_ERS_RESULT_CAN_RECOVER;
   2909}