cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

aq_nic.c (42443B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/* Atlantic Network Driver
      3 *
      4 * Copyright (C) 2014-2019 aQuantia Corporation
      5 * Copyright (C) 2019-2020 Marvell International Ltd.
      6 */
      7
      8/* File aq_nic.c: Definition of common code for NIC. */
      9
     10#include "aq_nic.h"
     11#include "aq_ring.h"
     12#include "aq_vec.h"
     13#include "aq_hw.h"
     14#include "aq_pci_func.h"
     15#include "aq_macsec.h"
     16#include "aq_main.h"
     17#include "aq_phy.h"
     18#include "aq_ptp.h"
     19#include "aq_filters.h"
     20
     21#include <linux/moduleparam.h>
     22#include <linux/netdevice.h>
     23#include <linux/etherdevice.h>
     24#include <linux/timer.h>
     25#include <linux/cpu.h>
     26#include <linux/ip.h>
     27#include <linux/tcp.h>
     28#include <net/ip.h>
     29#include <net/pkt_cls.h>
     30
     31static unsigned int aq_itr = AQ_CFG_INTERRUPT_MODERATION_AUTO;
     32module_param_named(aq_itr, aq_itr, uint, 0644);
     33MODULE_PARM_DESC(aq_itr, "Interrupt throttling mode");
     34
     35static unsigned int aq_itr_tx;
     36module_param_named(aq_itr_tx, aq_itr_tx, uint, 0644);
     37MODULE_PARM_DESC(aq_itr_tx, "TX interrupt throttle rate");
     38
     39static unsigned int aq_itr_rx;
     40module_param_named(aq_itr_rx, aq_itr_rx, uint, 0644);
     41MODULE_PARM_DESC(aq_itr_rx, "RX interrupt throttle rate");
     42
     43static void aq_nic_update_ndev_stats(struct aq_nic_s *self);
     44
     45static void aq_nic_rss_init(struct aq_nic_s *self, unsigned int num_rss_queues)
     46{
     47	static u8 rss_key[AQ_CFG_RSS_HASHKEY_SIZE] = {
     48		0x1e, 0xad, 0x71, 0x87, 0x65, 0xfc, 0x26, 0x7d,
     49		0x0d, 0x45, 0x67, 0x74, 0xcd, 0x06, 0x1a, 0x18,
     50		0xb6, 0xc1, 0xf0, 0xc7, 0xbb, 0x18, 0xbe, 0xf8,
     51		0x19, 0x13, 0x4b, 0xa9, 0xd0, 0x3e, 0xfe, 0x70,
     52		0x25, 0x03, 0xab, 0x50, 0x6a, 0x8b, 0x82, 0x0c
     53	};
     54	struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg;
     55	struct aq_rss_parameters *rss_params;
     56	int i = 0;
     57
     58	rss_params = &cfg->aq_rss;
     59
     60	rss_params->hash_secret_key_size = sizeof(rss_key);
     61	memcpy(rss_params->hash_secret_key, rss_key, sizeof(rss_key));
     62	rss_params->indirection_table_size = AQ_CFG_RSS_INDIRECTION_TABLE_MAX;
     63
     64	for (i = rss_params->indirection_table_size; i--;)
     65		rss_params->indirection_table[i] = i & (num_rss_queues - 1);
     66}
     67
     68/* Recalculate the number of vectors */
     69static void aq_nic_cfg_update_num_vecs(struct aq_nic_s *self)
     70{
     71	struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg;
     72
     73	cfg->vecs = min(cfg->aq_hw_caps->vecs, AQ_CFG_VECS_DEF);
     74	cfg->vecs = min(cfg->vecs, num_online_cpus());
     75	if (self->irqvecs > AQ_HW_SERVICE_IRQS)
     76		cfg->vecs = min(cfg->vecs, self->irqvecs - AQ_HW_SERVICE_IRQS);
     77	/* cfg->vecs should be power of 2 for RSS */
     78	cfg->vecs = rounddown_pow_of_two(cfg->vecs);
     79
     80	if (ATL_HW_IS_CHIP_FEATURE(self->aq_hw, ANTIGUA)) {
     81		if (cfg->tcs > 2)
     82			cfg->vecs = min(cfg->vecs, 4U);
     83	}
     84
     85	if (cfg->vecs <= 4)
     86		cfg->tc_mode = AQ_TC_MODE_8TCS;
     87	else
     88		cfg->tc_mode = AQ_TC_MODE_4TCS;
     89
     90	/*rss rings */
     91	cfg->num_rss_queues = min(cfg->vecs, AQ_CFG_NUM_RSS_QUEUES_DEF);
     92	aq_nic_rss_init(self, cfg->num_rss_queues);
     93}
     94
     95/* Checks hw_caps and 'corrects' aq_nic_cfg in runtime */
     96void aq_nic_cfg_start(struct aq_nic_s *self)
     97{
     98	struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg;
     99	int i;
    100
    101	cfg->tcs = AQ_CFG_TCS_DEF;
    102
    103	cfg->is_polling = AQ_CFG_IS_POLLING_DEF;
    104
    105	cfg->itr = aq_itr;
    106	cfg->tx_itr = aq_itr_tx;
    107	cfg->rx_itr = aq_itr_rx;
    108
    109	cfg->rxpageorder = AQ_CFG_RX_PAGEORDER;
    110	cfg->is_rss = AQ_CFG_IS_RSS_DEF;
    111	cfg->aq_rss.base_cpu_number = AQ_CFG_RSS_BASE_CPU_NUM_DEF;
    112	cfg->fc.req = AQ_CFG_FC_MODE;
    113	cfg->wol = AQ_CFG_WOL_MODES;
    114
    115	cfg->mtu = AQ_CFG_MTU_DEF;
    116	cfg->link_speed_msk = AQ_CFG_SPEED_MSK;
    117	cfg->is_autoneg = AQ_CFG_IS_AUTONEG_DEF;
    118
    119	cfg->is_lro = AQ_CFG_IS_LRO_DEF;
    120	cfg->is_ptp = true;
    121
    122	/*descriptors */
    123	cfg->rxds = min(cfg->aq_hw_caps->rxds_max, AQ_CFG_RXDS_DEF);
    124	cfg->txds = min(cfg->aq_hw_caps->txds_max, AQ_CFG_TXDS_DEF);
    125
    126	aq_nic_cfg_update_num_vecs(self);
    127
    128	cfg->irq_type = aq_pci_func_get_irq_type(self);
    129
    130	if ((cfg->irq_type == AQ_HW_IRQ_LEGACY) ||
    131	    (cfg->aq_hw_caps->vecs == 1U) ||
    132	    (cfg->vecs == 1U)) {
    133		cfg->is_rss = 0U;
    134		cfg->vecs = 1U;
    135	}
    136
    137	/* Check if we have enough vectors allocated for
    138	 * link status IRQ. If no - we'll know link state from
    139	 * slower service task.
    140	 */
    141	if (AQ_HW_SERVICE_IRQS > 0 && cfg->vecs + 1 <= self->irqvecs)
    142		cfg->link_irq_vec = cfg->vecs;
    143	else
    144		cfg->link_irq_vec = 0;
    145
    146	cfg->link_speed_msk &= cfg->aq_hw_caps->link_speed_msk;
    147	cfg->features = cfg->aq_hw_caps->hw_features;
    148	cfg->is_vlan_rx_strip = !!(cfg->features & NETIF_F_HW_VLAN_CTAG_RX);
    149	cfg->is_vlan_tx_insert = !!(cfg->features & NETIF_F_HW_VLAN_CTAG_TX);
    150	cfg->is_vlan_force_promisc = true;
    151
    152	for (i = 0; i < sizeof(cfg->prio_tc_map); i++)
    153		cfg->prio_tc_map[i] = cfg->tcs * i / 8;
    154}
    155
    156static int aq_nic_update_link_status(struct aq_nic_s *self)
    157{
    158	int err = self->aq_fw_ops->update_link_status(self->aq_hw);
    159	u32 fc = 0;
    160
    161	if (err)
    162		return err;
    163
    164	if (self->aq_fw_ops->get_flow_control)
    165		self->aq_fw_ops->get_flow_control(self->aq_hw, &fc);
    166	self->aq_nic_cfg.fc.cur = fc;
    167
    168	if (self->link_status.mbps != self->aq_hw->aq_link_status.mbps) {
    169		netdev_info(self->ndev, "%s: link change old %d new %d\n",
    170			    AQ_CFG_DRV_NAME, self->link_status.mbps,
    171			    self->aq_hw->aq_link_status.mbps);
    172		aq_nic_update_interrupt_moderation_settings(self);
    173
    174		if (self->aq_ptp) {
    175			aq_ptp_clock_init(self);
    176			aq_ptp_tm_offset_set(self,
    177					     self->aq_hw->aq_link_status.mbps);
    178			aq_ptp_link_change(self);
    179		}
    180
    181		/* Driver has to update flow control settings on RX block
    182		 * on any link event.
    183		 * We should query FW whether it negotiated FC.
    184		 */
    185		if (self->aq_hw_ops->hw_set_fc)
    186			self->aq_hw_ops->hw_set_fc(self->aq_hw, fc, 0);
    187	}
    188
    189	self->link_status = self->aq_hw->aq_link_status;
    190	if (!netif_carrier_ok(self->ndev) && self->link_status.mbps) {
    191		aq_utils_obj_set(&self->flags,
    192				 AQ_NIC_FLAG_STARTED);
    193		aq_utils_obj_clear(&self->flags,
    194				   AQ_NIC_LINK_DOWN);
    195		netif_carrier_on(self->ndev);
    196#if IS_ENABLED(CONFIG_MACSEC)
    197		aq_macsec_enable(self);
    198#endif
    199		if (self->aq_hw_ops->hw_tc_rate_limit_set)
    200			self->aq_hw_ops->hw_tc_rate_limit_set(self->aq_hw);
    201
    202		netif_tx_wake_all_queues(self->ndev);
    203	}
    204	if (netif_carrier_ok(self->ndev) && !self->link_status.mbps) {
    205		netif_carrier_off(self->ndev);
    206		netif_tx_disable(self->ndev);
    207		aq_utils_obj_set(&self->flags, AQ_NIC_LINK_DOWN);
    208	}
    209
    210	return 0;
    211}
    212
    213static irqreturn_t aq_linkstate_threaded_isr(int irq, void *private)
    214{
    215	struct aq_nic_s *self = private;
    216
    217	if (!self)
    218		return IRQ_NONE;
    219
    220	aq_nic_update_link_status(self);
    221
    222	self->aq_hw_ops->hw_irq_enable(self->aq_hw,
    223				       BIT(self->aq_nic_cfg.link_irq_vec));
    224
    225	return IRQ_HANDLED;
    226}
    227
    228static void aq_nic_service_task(struct work_struct *work)
    229{
    230	struct aq_nic_s *self = container_of(work, struct aq_nic_s,
    231					     service_task);
    232	int err;
    233
    234	aq_ptp_service_task(self);
    235
    236	if (aq_utils_obj_test(&self->flags, AQ_NIC_FLAGS_IS_NOT_READY))
    237		return;
    238
    239	err = aq_nic_update_link_status(self);
    240	if (err)
    241		return;
    242
    243#if IS_ENABLED(CONFIG_MACSEC)
    244	aq_macsec_work(self);
    245#endif
    246
    247	mutex_lock(&self->fwreq_mutex);
    248	if (self->aq_fw_ops->update_stats)
    249		self->aq_fw_ops->update_stats(self->aq_hw);
    250	mutex_unlock(&self->fwreq_mutex);
    251
    252	aq_nic_update_ndev_stats(self);
    253}
    254
    255static void aq_nic_service_timer_cb(struct timer_list *t)
    256{
    257	struct aq_nic_s *self = from_timer(self, t, service_timer);
    258
    259	mod_timer(&self->service_timer,
    260		  jiffies + AQ_CFG_SERVICE_TIMER_INTERVAL);
    261
    262	aq_ndev_schedule_work(&self->service_task);
    263}
    264
    265static void aq_nic_polling_timer_cb(struct timer_list *t)
    266{
    267	struct aq_nic_s *self = from_timer(self, t, polling_timer);
    268	struct aq_vec_s *aq_vec = NULL;
    269	unsigned int i = 0U;
    270
    271	for (i = 0U, aq_vec = self->aq_vec[0];
    272		self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i])
    273		aq_vec_isr(i, (void *)aq_vec);
    274
    275	mod_timer(&self->polling_timer, jiffies +
    276		  AQ_CFG_POLLING_TIMER_INTERVAL);
    277}
    278
    279static int aq_nic_hw_prepare(struct aq_nic_s *self)
    280{
    281	int err = 0;
    282
    283	err = self->aq_hw_ops->hw_soft_reset(self->aq_hw);
    284	if (err)
    285		goto exit;
    286
    287	err = self->aq_hw_ops->hw_prepare(self->aq_hw, &self->aq_fw_ops);
    288
    289exit:
    290	return err;
    291}
    292
    293static bool aq_nic_is_valid_ether_addr(const u8 *addr)
    294{
    295	/* Some engineering samples of Aquantia NICs are provisioned with a
    296	 * partially populated MAC, which is still invalid.
    297	 */
    298	return !(addr[0] == 0 && addr[1] == 0 && addr[2] == 0);
    299}
    300
    301int aq_nic_ndev_register(struct aq_nic_s *self)
    302{
    303	u8 addr[ETH_ALEN];
    304	int err = 0;
    305
    306	if (!self->ndev) {
    307		err = -EINVAL;
    308		goto err_exit;
    309	}
    310
    311	err = aq_nic_hw_prepare(self);
    312	if (err)
    313		goto err_exit;
    314
    315#if IS_ENABLED(CONFIG_MACSEC)
    316	aq_macsec_init(self);
    317#endif
    318
    319	if (platform_get_ethdev_address(&self->pdev->dev, self->ndev) != 0) {
    320		// If DT has none or an invalid one, ask device for MAC address
    321		mutex_lock(&self->fwreq_mutex);
    322		err = self->aq_fw_ops->get_mac_permanent(self->aq_hw, addr);
    323		mutex_unlock(&self->fwreq_mutex);
    324
    325		if (err)
    326			goto err_exit;
    327
    328		if (is_valid_ether_addr(addr) &&
    329		    aq_nic_is_valid_ether_addr(addr)) {
    330			eth_hw_addr_set(self->ndev, addr);
    331		} else {
    332			netdev_warn(self->ndev, "MAC is invalid, will use random.");
    333			eth_hw_addr_random(self->ndev);
    334		}
    335	}
    336
    337#if defined(AQ_CFG_MAC_ADDR_PERMANENT)
    338	{
    339		static u8 mac_addr_permanent[] = AQ_CFG_MAC_ADDR_PERMANENT;
    340
    341		eth_hw_addr_set(self->ndev, mac_addr_permanent);
    342	}
    343#endif
    344
    345	for (self->aq_vecs = 0; self->aq_vecs < aq_nic_get_cfg(self)->vecs;
    346	     self->aq_vecs++) {
    347		self->aq_vec[self->aq_vecs] =
    348		    aq_vec_alloc(self, self->aq_vecs, aq_nic_get_cfg(self));
    349		if (!self->aq_vec[self->aq_vecs]) {
    350			err = -ENOMEM;
    351			goto err_exit;
    352		}
    353	}
    354
    355	netif_carrier_off(self->ndev);
    356
    357	netif_tx_disable(self->ndev);
    358
    359	err = register_netdev(self->ndev);
    360	if (err)
    361		goto err_exit;
    362
    363err_exit:
    364#if IS_ENABLED(CONFIG_MACSEC)
    365	if (err)
    366		aq_macsec_free(self);
    367#endif
    368	return err;
    369}
    370
    371void aq_nic_ndev_init(struct aq_nic_s *self)
    372{
    373	const struct aq_hw_caps_s *aq_hw_caps = self->aq_nic_cfg.aq_hw_caps;
    374	struct aq_nic_cfg_s *aq_nic_cfg = &self->aq_nic_cfg;
    375
    376	self->ndev->hw_features |= aq_hw_caps->hw_features;
    377	self->ndev->features = aq_hw_caps->hw_features;
    378	self->ndev->vlan_features |= NETIF_F_HW_CSUM | NETIF_F_RXCSUM |
    379				     NETIF_F_RXHASH | NETIF_F_SG |
    380				     NETIF_F_LRO | NETIF_F_TSO | NETIF_F_TSO6;
    381	self->ndev->gso_partial_features = NETIF_F_GSO_UDP_L4;
    382	self->ndev->priv_flags = aq_hw_caps->hw_priv_flags;
    383	self->ndev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
    384
    385	self->msg_enable = NETIF_MSG_DRV | NETIF_MSG_LINK;
    386	self->ndev->mtu = aq_nic_cfg->mtu - ETH_HLEN;
    387	self->ndev->max_mtu = aq_hw_caps->mtu - ETH_FCS_LEN - ETH_HLEN;
    388
    389}
    390
    391void aq_nic_set_tx_ring(struct aq_nic_s *self, unsigned int idx,
    392			struct aq_ring_s *ring)
    393{
    394	self->aq_ring_tx[idx] = ring;
    395}
    396
    397struct net_device *aq_nic_get_ndev(struct aq_nic_s *self)
    398{
    399	return self->ndev;
    400}
    401
    402int aq_nic_init(struct aq_nic_s *self)
    403{
    404	struct aq_vec_s *aq_vec = NULL;
    405	unsigned int i = 0U;
    406	int err = 0;
    407
    408	self->power_state = AQ_HW_POWER_STATE_D0;
    409	mutex_lock(&self->fwreq_mutex);
    410	err = self->aq_hw_ops->hw_reset(self->aq_hw);
    411	mutex_unlock(&self->fwreq_mutex);
    412	if (err < 0)
    413		goto err_exit;
    414	/* Restore default settings */
    415	aq_nic_set_downshift(self, self->aq_nic_cfg.downshift_counter);
    416	aq_nic_set_media_detect(self, self->aq_nic_cfg.is_media_detect ?
    417				AQ_HW_MEDIA_DETECT_CNT : 0);
    418
    419	err = self->aq_hw_ops->hw_init(self->aq_hw,
    420				       aq_nic_get_ndev(self)->dev_addr);
    421	if (err < 0)
    422		goto err_exit;
    423
    424	if (ATL_HW_IS_CHIP_FEATURE(self->aq_hw, ATLANTIC) &&
    425	    self->aq_nic_cfg.aq_hw_caps->media_type == AQ_HW_MEDIA_TYPE_TP) {
    426		self->aq_hw->phy_id = HW_ATL_PHY_ID_MAX;
    427		err = aq_phy_init(self->aq_hw);
    428
    429		/* Disable the PTP on NICs where it's known to cause datapath
    430		 * problems.
    431		 * Ideally this should have been done by PHY provisioning, but
    432		 * many units have been shipped with enabled PTP block already.
    433		 */
    434		if (self->aq_nic_cfg.aq_hw_caps->quirks & AQ_NIC_QUIRK_BAD_PTP)
    435			if (self->aq_hw->phy_id != HW_ATL_PHY_ID_MAX)
    436				aq_phy_disable_ptp(self->aq_hw);
    437	}
    438
    439	for (i = 0U; i < self->aq_vecs; i++) {
    440		aq_vec = self->aq_vec[i];
    441		err = aq_vec_ring_alloc(aq_vec, self, i,
    442					aq_nic_get_cfg(self));
    443		if (err)
    444			goto err_exit;
    445
    446		aq_vec_init(aq_vec, self->aq_hw_ops, self->aq_hw);
    447	}
    448
    449	if (aq_nic_get_cfg(self)->is_ptp) {
    450		err = aq_ptp_init(self, self->irqvecs - 1);
    451		if (err < 0)
    452			goto err_exit;
    453
    454		err = aq_ptp_ring_alloc(self);
    455		if (err < 0)
    456			goto err_exit;
    457
    458		err = aq_ptp_ring_init(self);
    459		if (err < 0)
    460			goto err_exit;
    461	}
    462
    463	netif_carrier_off(self->ndev);
    464
    465err_exit:
    466	return err;
    467}
    468
    469int aq_nic_start(struct aq_nic_s *self)
    470{
    471	struct aq_vec_s *aq_vec = NULL;
    472	struct aq_nic_cfg_s *cfg;
    473	unsigned int i = 0U;
    474	int err = 0;
    475
    476	cfg = aq_nic_get_cfg(self);
    477
    478	err = self->aq_hw_ops->hw_multicast_list_set(self->aq_hw,
    479						     self->mc_list.ar,
    480						     self->mc_list.count);
    481	if (err < 0)
    482		goto err_exit;
    483
    484	err = self->aq_hw_ops->hw_packet_filter_set(self->aq_hw,
    485						    self->packet_filter);
    486	if (err < 0)
    487		goto err_exit;
    488
    489	for (i = 0U; self->aq_vecs > i; ++i) {
    490		aq_vec = self->aq_vec[i];
    491		err = aq_vec_start(aq_vec);
    492		if (err < 0)
    493			goto err_exit;
    494	}
    495
    496	err = aq_ptp_ring_start(self);
    497	if (err < 0)
    498		goto err_exit;
    499
    500	aq_nic_set_loopback(self);
    501
    502	err = self->aq_hw_ops->hw_start(self->aq_hw);
    503	if (err < 0)
    504		goto err_exit;
    505
    506	err = aq_nic_update_interrupt_moderation_settings(self);
    507	if (err)
    508		goto err_exit;
    509
    510	INIT_WORK(&self->service_task, aq_nic_service_task);
    511
    512	timer_setup(&self->service_timer, aq_nic_service_timer_cb, 0);
    513	aq_nic_service_timer_cb(&self->service_timer);
    514
    515	if (cfg->is_polling) {
    516		timer_setup(&self->polling_timer, aq_nic_polling_timer_cb, 0);
    517		mod_timer(&self->polling_timer, jiffies +
    518			  AQ_CFG_POLLING_TIMER_INTERVAL);
    519	} else {
    520		for (i = 0U; self->aq_vecs > i; ++i) {
    521			aq_vec = self->aq_vec[i];
    522			err = aq_pci_func_alloc_irq(self, i, self->ndev->name,
    523						    aq_vec_isr, aq_vec,
    524						    aq_vec_get_affinity_mask(aq_vec));
    525			if (err < 0)
    526				goto err_exit;
    527		}
    528
    529		err = aq_ptp_irq_alloc(self);
    530		if (err < 0)
    531			goto err_exit;
    532
    533		if (cfg->link_irq_vec) {
    534			int irqvec = pci_irq_vector(self->pdev,
    535						    cfg->link_irq_vec);
    536			err = request_threaded_irq(irqvec, NULL,
    537						   aq_linkstate_threaded_isr,
    538						   IRQF_SHARED | IRQF_ONESHOT,
    539						   self->ndev->name, self);
    540			if (err < 0)
    541				goto err_exit;
    542			self->msix_entry_mask |= (1 << cfg->link_irq_vec);
    543		}
    544
    545		err = self->aq_hw_ops->hw_irq_enable(self->aq_hw,
    546						     AQ_CFG_IRQ_MASK);
    547		if (err < 0)
    548			goto err_exit;
    549	}
    550
    551	err = netif_set_real_num_tx_queues(self->ndev,
    552					   self->aq_vecs * cfg->tcs);
    553	if (err < 0)
    554		goto err_exit;
    555
    556	err = netif_set_real_num_rx_queues(self->ndev,
    557					   self->aq_vecs * cfg->tcs);
    558	if (err < 0)
    559		goto err_exit;
    560
    561	for (i = 0; i < cfg->tcs; i++) {
    562		u16 offset = self->aq_vecs * i;
    563
    564		netdev_set_tc_queue(self->ndev, i, self->aq_vecs, offset);
    565	}
    566	netif_tx_start_all_queues(self->ndev);
    567
    568err_exit:
    569	return err;
    570}
    571
    572static unsigned int aq_nic_map_xdp(struct aq_nic_s *self,
    573				   struct xdp_frame *xdpf,
    574				   struct aq_ring_s *ring)
    575{
    576	struct device *dev = aq_nic_get_dev(self);
    577	struct aq_ring_buff_s *first = NULL;
    578	unsigned int dx = ring->sw_tail;
    579	struct aq_ring_buff_s *dx_buff;
    580	struct skb_shared_info *sinfo;
    581	unsigned int frag_count = 0U;
    582	unsigned int nr_frags = 0U;
    583	unsigned int ret = 0U;
    584	u16 total_len;
    585
    586	dx_buff = &ring->buff_ring[dx];
    587	dx_buff->flags = 0U;
    588
    589	sinfo = xdp_get_shared_info_from_frame(xdpf);
    590	total_len = xdpf->len;
    591	dx_buff->len = total_len;
    592	if (xdp_frame_has_frags(xdpf)) {
    593		nr_frags = sinfo->nr_frags;
    594		total_len += sinfo->xdp_frags_size;
    595	}
    596	dx_buff->pa = dma_map_single(dev, xdpf->data, dx_buff->len,
    597				     DMA_TO_DEVICE);
    598
    599	if (unlikely(dma_mapping_error(dev, dx_buff->pa)))
    600		goto exit;
    601
    602	first = dx_buff;
    603	dx_buff->len_pkt = total_len;
    604	dx_buff->is_sop = 1U;
    605	dx_buff->is_mapped = 1U;
    606	++ret;
    607
    608	for (; nr_frags--; ++frag_count) {
    609		skb_frag_t *frag = &sinfo->frags[frag_count];
    610		unsigned int frag_len = skb_frag_size(frag);
    611		unsigned int buff_offset = 0U;
    612		unsigned int buff_size = 0U;
    613		dma_addr_t frag_pa;
    614
    615		while (frag_len) {
    616			if (frag_len > AQ_CFG_TX_FRAME_MAX)
    617				buff_size = AQ_CFG_TX_FRAME_MAX;
    618			else
    619				buff_size = frag_len;
    620
    621			frag_pa = skb_frag_dma_map(dev, frag, buff_offset,
    622						   buff_size, DMA_TO_DEVICE);
    623
    624			if (unlikely(dma_mapping_error(dev, frag_pa)))
    625				goto mapping_error;
    626
    627			dx = aq_ring_next_dx(ring, dx);
    628			dx_buff = &ring->buff_ring[dx];
    629
    630			dx_buff->flags = 0U;
    631			dx_buff->len = buff_size;
    632			dx_buff->pa = frag_pa;
    633			dx_buff->is_mapped = 1U;
    634			dx_buff->eop_index = 0xffffU;
    635
    636			frag_len -= buff_size;
    637			buff_offset += buff_size;
    638
    639			++ret;
    640		}
    641	}
    642
    643	first->eop_index = dx;
    644	dx_buff->is_eop = 1U;
    645	dx_buff->skb = NULL;
    646	dx_buff->xdpf = xdpf;
    647	goto exit;
    648
    649mapping_error:
    650	for (dx = ring->sw_tail;
    651	     ret > 0;
    652	     --ret, dx = aq_ring_next_dx(ring, dx)) {
    653		dx_buff = &ring->buff_ring[dx];
    654
    655		if (!dx_buff->pa)
    656			continue;
    657		if (unlikely(dx_buff->is_sop))
    658			dma_unmap_single(dev, dx_buff->pa, dx_buff->len,
    659					 DMA_TO_DEVICE);
    660		else
    661			dma_unmap_page(dev, dx_buff->pa, dx_buff->len,
    662				       DMA_TO_DEVICE);
    663	}
    664
    665exit:
    666	return ret;
    667}
    668
    669unsigned int aq_nic_map_skb(struct aq_nic_s *self, struct sk_buff *skb,
    670			    struct aq_ring_s *ring)
    671{
    672	unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
    673	struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(self);
    674	struct device *dev = aq_nic_get_dev(self);
    675	struct aq_ring_buff_s *first = NULL;
    676	u8 ipver = ip_hdr(skb)->version;
    677	struct aq_ring_buff_s *dx_buff;
    678	bool need_context_tag = false;
    679	unsigned int frag_count = 0U;
    680	unsigned int ret = 0U;
    681	unsigned int dx;
    682	u8 l4proto = 0;
    683
    684	if (ipver == 4)
    685		l4proto = ip_hdr(skb)->protocol;
    686	else if (ipver == 6)
    687		l4proto = ipv6_hdr(skb)->nexthdr;
    688
    689	dx = ring->sw_tail;
    690	dx_buff = &ring->buff_ring[dx];
    691	dx_buff->flags = 0U;
    692
    693	if (unlikely(skb_is_gso(skb))) {
    694		dx_buff->mss = skb_shinfo(skb)->gso_size;
    695		if (l4proto == IPPROTO_TCP) {
    696			dx_buff->is_gso_tcp = 1U;
    697			dx_buff->len_l4 = tcp_hdrlen(skb);
    698		} else if (l4proto == IPPROTO_UDP) {
    699			dx_buff->is_gso_udp = 1U;
    700			dx_buff->len_l4 = sizeof(struct udphdr);
    701			/* UDP GSO Hardware does not replace packet length. */
    702			udp_hdr(skb)->len = htons(dx_buff->mss +
    703						  dx_buff->len_l4);
    704		} else {
    705			WARN_ONCE(true, "Bad GSO mode");
    706			goto exit;
    707		}
    708		dx_buff->len_pkt = skb->len;
    709		dx_buff->len_l2 = ETH_HLEN;
    710		dx_buff->len_l3 = skb_network_header_len(skb);
    711		dx_buff->eop_index = 0xffffU;
    712		dx_buff->is_ipv6 = (ipver == 6);
    713		need_context_tag = true;
    714	}
    715
    716	if (cfg->is_vlan_tx_insert && skb_vlan_tag_present(skb)) {
    717		dx_buff->vlan_tx_tag = skb_vlan_tag_get(skb);
    718		dx_buff->len_pkt = skb->len;
    719		dx_buff->is_vlan = 1U;
    720		need_context_tag = true;
    721	}
    722
    723	if (need_context_tag) {
    724		dx = aq_ring_next_dx(ring, dx);
    725		dx_buff = &ring->buff_ring[dx];
    726		dx_buff->flags = 0U;
    727		++ret;
    728	}
    729
    730	dx_buff->len = skb_headlen(skb);
    731	dx_buff->pa = dma_map_single(dev,
    732				     skb->data,
    733				     dx_buff->len,
    734				     DMA_TO_DEVICE);
    735
    736	if (unlikely(dma_mapping_error(dev, dx_buff->pa))) {
    737		ret = 0;
    738		goto exit;
    739	}
    740
    741	first = dx_buff;
    742	dx_buff->len_pkt = skb->len;
    743	dx_buff->is_sop = 1U;
    744	dx_buff->is_mapped = 1U;
    745	++ret;
    746
    747	if (skb->ip_summed == CHECKSUM_PARTIAL) {
    748		dx_buff->is_ip_cso = (htons(ETH_P_IP) == skb->protocol);
    749		dx_buff->is_tcp_cso = (l4proto == IPPROTO_TCP);
    750		dx_buff->is_udp_cso = (l4proto == IPPROTO_UDP);
    751	}
    752
    753	for (; nr_frags--; ++frag_count) {
    754		unsigned int frag_len = 0U;
    755		unsigned int buff_offset = 0U;
    756		unsigned int buff_size = 0U;
    757		dma_addr_t frag_pa;
    758		skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_count];
    759
    760		frag_len = skb_frag_size(frag);
    761
    762		while (frag_len) {
    763			if (frag_len > AQ_CFG_TX_FRAME_MAX)
    764				buff_size = AQ_CFG_TX_FRAME_MAX;
    765			else
    766				buff_size = frag_len;
    767
    768			frag_pa = skb_frag_dma_map(dev,
    769						   frag,
    770						   buff_offset,
    771						   buff_size,
    772						   DMA_TO_DEVICE);
    773
    774			if (unlikely(dma_mapping_error(dev,
    775						       frag_pa)))
    776				goto mapping_error;
    777
    778			dx = aq_ring_next_dx(ring, dx);
    779			dx_buff = &ring->buff_ring[dx];
    780
    781			dx_buff->flags = 0U;
    782			dx_buff->len = buff_size;
    783			dx_buff->pa = frag_pa;
    784			dx_buff->is_mapped = 1U;
    785			dx_buff->eop_index = 0xffffU;
    786
    787			frag_len -= buff_size;
    788			buff_offset += buff_size;
    789
    790			++ret;
    791		}
    792	}
    793
    794	first->eop_index = dx;
    795	dx_buff->is_eop = 1U;
    796	dx_buff->skb = skb;
    797	dx_buff->xdpf = NULL;
    798	goto exit;
    799
    800mapping_error:
    801	for (dx = ring->sw_tail;
    802	     ret > 0;
    803	     --ret, dx = aq_ring_next_dx(ring, dx)) {
    804		dx_buff = &ring->buff_ring[dx];
    805
    806		if (!(dx_buff->is_gso_tcp || dx_buff->is_gso_udp) &&
    807		    !dx_buff->is_vlan && dx_buff->pa) {
    808			if (unlikely(dx_buff->is_sop)) {
    809				dma_unmap_single(dev,
    810						 dx_buff->pa,
    811						 dx_buff->len,
    812						 DMA_TO_DEVICE);
    813			} else {
    814				dma_unmap_page(dev,
    815					       dx_buff->pa,
    816					       dx_buff->len,
    817					       DMA_TO_DEVICE);
    818			}
    819		}
    820	}
    821
    822exit:
    823	return ret;
    824}
    825
    826int aq_nic_xmit_xdpf(struct aq_nic_s *aq_nic, struct aq_ring_s *tx_ring,
    827		     struct xdp_frame *xdpf)
    828{
    829	u16 queue_index = AQ_NIC_RING2QMAP(aq_nic, tx_ring->idx);
    830	struct net_device *ndev = aq_nic_get_ndev(aq_nic);
    831	struct skb_shared_info *sinfo;
    832	int cpu = smp_processor_id();
    833	int err = NETDEV_TX_BUSY;
    834	struct netdev_queue *nq;
    835	unsigned int frags = 1;
    836
    837	if (xdp_frame_has_frags(xdpf)) {
    838		sinfo = xdp_get_shared_info_from_frame(xdpf);
    839		frags += sinfo->nr_frags;
    840	}
    841
    842	if (frags > AQ_CFG_SKB_FRAGS_MAX)
    843		return err;
    844
    845	nq = netdev_get_tx_queue(ndev, tx_ring->idx);
    846	__netif_tx_lock(nq, cpu);
    847
    848	aq_ring_update_queue_state(tx_ring);
    849
    850	/* Above status update may stop the queue. Check this. */
    851	if (__netif_subqueue_stopped(aq_nic_get_ndev(aq_nic), queue_index))
    852		goto out;
    853
    854	frags = aq_nic_map_xdp(aq_nic, xdpf, tx_ring);
    855	if (likely(frags))
    856		err = aq_nic->aq_hw_ops->hw_ring_tx_xmit(aq_nic->aq_hw, tx_ring,
    857							 frags);
    858out:
    859	__netif_tx_unlock(nq);
    860
    861	return err;
    862}
    863
    864int aq_nic_xmit(struct aq_nic_s *self, struct sk_buff *skb)
    865{
    866	struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(self);
    867	unsigned int vec = skb->queue_mapping % cfg->vecs;
    868	unsigned int tc = skb->queue_mapping / cfg->vecs;
    869	struct aq_ring_s *ring = NULL;
    870	unsigned int frags = 0U;
    871	int err = NETDEV_TX_OK;
    872
    873	frags = skb_shinfo(skb)->nr_frags + 1;
    874
    875	ring = self->aq_ring_tx[AQ_NIC_CFG_TCVEC2RING(cfg, tc, vec)];
    876
    877	if (frags > AQ_CFG_SKB_FRAGS_MAX) {
    878		dev_kfree_skb_any(skb);
    879		goto err_exit;
    880	}
    881
    882	aq_ring_update_queue_state(ring);
    883
    884	if (cfg->priv_flags & BIT(AQ_HW_LOOPBACK_DMA_NET)) {
    885		err = NETDEV_TX_BUSY;
    886		goto err_exit;
    887	}
    888
    889	/* Above status update may stop the queue. Check this. */
    890	if (__netif_subqueue_stopped(self->ndev,
    891				     AQ_NIC_RING2QMAP(self, ring->idx))) {
    892		err = NETDEV_TX_BUSY;
    893		goto err_exit;
    894	}
    895
    896	frags = aq_nic_map_skb(self, skb, ring);
    897
    898	if (likely(frags)) {
    899		err = self->aq_hw_ops->hw_ring_tx_xmit(self->aq_hw,
    900						       ring, frags);
    901	} else {
    902		err = NETDEV_TX_BUSY;
    903	}
    904
    905err_exit:
    906	return err;
    907}
    908
    909int aq_nic_update_interrupt_moderation_settings(struct aq_nic_s *self)
    910{
    911	return self->aq_hw_ops->hw_interrupt_moderation_set(self->aq_hw);
    912}
    913
    914int aq_nic_set_packet_filter(struct aq_nic_s *self, unsigned int flags)
    915{
    916	int err = 0;
    917
    918	err = self->aq_hw_ops->hw_packet_filter_set(self->aq_hw, flags);
    919	if (err < 0)
    920		goto err_exit;
    921
    922	self->packet_filter = flags;
    923
    924err_exit:
    925	return err;
    926}
    927
    928int aq_nic_set_multicast_list(struct aq_nic_s *self, struct net_device *ndev)
    929{
    930	const struct aq_hw_ops *hw_ops = self->aq_hw_ops;
    931	struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg;
    932	unsigned int packet_filter = ndev->flags;
    933	struct netdev_hw_addr *ha = NULL;
    934	unsigned int i = 0U;
    935	int err = 0;
    936
    937	self->mc_list.count = 0;
    938	if (netdev_uc_count(ndev) > AQ_HW_MULTICAST_ADDRESS_MAX) {
    939		packet_filter |= IFF_PROMISC;
    940	} else {
    941		netdev_for_each_uc_addr(ha, ndev) {
    942			ether_addr_copy(self->mc_list.ar[i++], ha->addr);
    943		}
    944	}
    945
    946	cfg->is_mc_list_enabled = !!(packet_filter & IFF_MULTICAST);
    947	if (cfg->is_mc_list_enabled) {
    948		if (i + netdev_mc_count(ndev) > AQ_HW_MULTICAST_ADDRESS_MAX) {
    949			packet_filter |= IFF_ALLMULTI;
    950		} else {
    951			netdev_for_each_mc_addr(ha, ndev) {
    952				ether_addr_copy(self->mc_list.ar[i++],
    953						ha->addr);
    954			}
    955		}
    956	}
    957
    958	if (i > 0 && i <= AQ_HW_MULTICAST_ADDRESS_MAX) {
    959		self->mc_list.count = i;
    960		err = hw_ops->hw_multicast_list_set(self->aq_hw,
    961						    self->mc_list.ar,
    962						    self->mc_list.count);
    963		if (err < 0)
    964			return err;
    965	}
    966
    967	return aq_nic_set_packet_filter(self, packet_filter);
    968}
    969
    970int aq_nic_set_mtu(struct aq_nic_s *self, int new_mtu)
    971{
    972	self->aq_nic_cfg.mtu = new_mtu;
    973
    974	return 0;
    975}
    976
    977int aq_nic_set_mac(struct aq_nic_s *self, struct net_device *ndev)
    978{
    979	return self->aq_hw_ops->hw_set_mac_address(self->aq_hw, ndev->dev_addr);
    980}
    981
    982unsigned int aq_nic_get_link_speed(struct aq_nic_s *self)
    983{
    984	return self->link_status.mbps;
    985}
    986
    987int aq_nic_get_regs(struct aq_nic_s *self, struct ethtool_regs *regs, void *p)
    988{
    989	u32 *regs_buff = p;
    990	int err = 0;
    991
    992	if (unlikely(!self->aq_hw_ops->hw_get_regs))
    993		return -EOPNOTSUPP;
    994
    995	regs->version = 1;
    996
    997	err = self->aq_hw_ops->hw_get_regs(self->aq_hw,
    998					   self->aq_nic_cfg.aq_hw_caps,
    999					   regs_buff);
   1000	if (err < 0)
   1001		goto err_exit;
   1002
   1003err_exit:
   1004	return err;
   1005}
   1006
   1007int aq_nic_get_regs_count(struct aq_nic_s *self)
   1008{
   1009	if (unlikely(!self->aq_hw_ops->hw_get_regs))
   1010		return 0;
   1011
   1012	return self->aq_nic_cfg.aq_hw_caps->mac_regs_count;
   1013}
   1014
   1015u64 *aq_nic_get_stats(struct aq_nic_s *self, u64 *data)
   1016{
   1017	struct aq_vec_s *aq_vec = NULL;
   1018	struct aq_stats_s *stats;
   1019	unsigned int count = 0U;
   1020	unsigned int i = 0U;
   1021	unsigned int tc;
   1022
   1023	if (self->aq_fw_ops->update_stats) {
   1024		mutex_lock(&self->fwreq_mutex);
   1025		self->aq_fw_ops->update_stats(self->aq_hw);
   1026		mutex_unlock(&self->fwreq_mutex);
   1027	}
   1028	stats = self->aq_hw_ops->hw_get_hw_stats(self->aq_hw);
   1029
   1030	if (!stats)
   1031		goto err_exit;
   1032
   1033	data[i] = stats->uprc + stats->mprc + stats->bprc;
   1034	data[++i] = stats->uprc;
   1035	data[++i] = stats->mprc;
   1036	data[++i] = stats->bprc;
   1037	data[++i] = stats->erpt;
   1038	data[++i] = stats->uptc + stats->mptc + stats->bptc;
   1039	data[++i] = stats->uptc;
   1040	data[++i] = stats->mptc;
   1041	data[++i] = stats->bptc;
   1042	data[++i] = stats->ubrc;
   1043	data[++i] = stats->ubtc;
   1044	data[++i] = stats->mbrc;
   1045	data[++i] = stats->mbtc;
   1046	data[++i] = stats->bbrc;
   1047	data[++i] = stats->bbtc;
   1048	if (stats->brc)
   1049		data[++i] = stats->brc;
   1050	else
   1051		data[++i] = stats->ubrc + stats->mbrc + stats->bbrc;
   1052	if (stats->btc)
   1053		data[++i] = stats->btc;
   1054	else
   1055		data[++i] = stats->ubtc + stats->mbtc + stats->bbtc;
   1056	data[++i] = stats->dma_pkt_rc;
   1057	data[++i] = stats->dma_pkt_tc;
   1058	data[++i] = stats->dma_oct_rc;
   1059	data[++i] = stats->dma_oct_tc;
   1060	data[++i] = stats->dpc;
   1061
   1062	i++;
   1063
   1064	data += i;
   1065
   1066	for (tc = 0U; tc < self->aq_nic_cfg.tcs; tc++) {
   1067		for (i = 0U, aq_vec = self->aq_vec[0];
   1068		     aq_vec && self->aq_vecs > i;
   1069		     ++i, aq_vec = self->aq_vec[i]) {
   1070			data += count;
   1071			count = aq_vec_get_sw_stats(aq_vec, tc, data);
   1072		}
   1073	}
   1074
   1075	data += count;
   1076
   1077err_exit:
   1078	return data;
   1079}
   1080
   1081static void aq_nic_update_ndev_stats(struct aq_nic_s *self)
   1082{
   1083	struct aq_stats_s *stats = self->aq_hw_ops->hw_get_hw_stats(self->aq_hw);
   1084	struct net_device *ndev = self->ndev;
   1085
   1086	ndev->stats.rx_packets = stats->dma_pkt_rc;
   1087	ndev->stats.rx_bytes = stats->dma_oct_rc;
   1088	ndev->stats.rx_errors = stats->erpr;
   1089	ndev->stats.rx_dropped = stats->dpc;
   1090	ndev->stats.tx_packets = stats->dma_pkt_tc;
   1091	ndev->stats.tx_bytes = stats->dma_oct_tc;
   1092	ndev->stats.tx_errors = stats->erpt;
   1093	ndev->stats.multicast = stats->mprc;
   1094}
   1095
   1096void aq_nic_get_link_ksettings(struct aq_nic_s *self,
   1097			       struct ethtool_link_ksettings *cmd)
   1098{
   1099	u32 lp_link_speed_msk;
   1100
   1101	if (self->aq_nic_cfg.aq_hw_caps->media_type == AQ_HW_MEDIA_TYPE_FIBRE)
   1102		cmd->base.port = PORT_FIBRE;
   1103	else
   1104		cmd->base.port = PORT_TP;
   1105
   1106	cmd->base.duplex = DUPLEX_UNKNOWN;
   1107	if (self->link_status.mbps)
   1108		cmd->base.duplex = self->link_status.full_duplex ?
   1109				   DUPLEX_FULL : DUPLEX_HALF;
   1110	cmd->base.autoneg = self->aq_nic_cfg.is_autoneg;
   1111
   1112	ethtool_link_ksettings_zero_link_mode(cmd, supported);
   1113
   1114	if (self->aq_nic_cfg.aq_hw_caps->link_speed_msk & AQ_NIC_RATE_10G)
   1115		ethtool_link_ksettings_add_link_mode(cmd, supported,
   1116						     10000baseT_Full);
   1117
   1118	if (self->aq_nic_cfg.aq_hw_caps->link_speed_msk & AQ_NIC_RATE_5G)
   1119		ethtool_link_ksettings_add_link_mode(cmd, supported,
   1120						     5000baseT_Full);
   1121
   1122	if (self->aq_nic_cfg.aq_hw_caps->link_speed_msk & AQ_NIC_RATE_2G5)
   1123		ethtool_link_ksettings_add_link_mode(cmd, supported,
   1124						     2500baseT_Full);
   1125
   1126	if (self->aq_nic_cfg.aq_hw_caps->link_speed_msk & AQ_NIC_RATE_1G)
   1127		ethtool_link_ksettings_add_link_mode(cmd, supported,
   1128						     1000baseT_Full);
   1129
   1130	if (self->aq_nic_cfg.aq_hw_caps->link_speed_msk & AQ_NIC_RATE_1G_HALF)
   1131		ethtool_link_ksettings_add_link_mode(cmd, supported,
   1132						     1000baseT_Half);
   1133
   1134	if (self->aq_nic_cfg.aq_hw_caps->link_speed_msk & AQ_NIC_RATE_100M)
   1135		ethtool_link_ksettings_add_link_mode(cmd, supported,
   1136						     100baseT_Full);
   1137
   1138	if (self->aq_nic_cfg.aq_hw_caps->link_speed_msk & AQ_NIC_RATE_100M_HALF)
   1139		ethtool_link_ksettings_add_link_mode(cmd, supported,
   1140						     100baseT_Half);
   1141
   1142	if (self->aq_nic_cfg.aq_hw_caps->link_speed_msk & AQ_NIC_RATE_10M)
   1143		ethtool_link_ksettings_add_link_mode(cmd, supported,
   1144						     10baseT_Full);
   1145
   1146	if (self->aq_nic_cfg.aq_hw_caps->link_speed_msk & AQ_NIC_RATE_10M_HALF)
   1147		ethtool_link_ksettings_add_link_mode(cmd, supported,
   1148						     10baseT_Half);
   1149
   1150	if (self->aq_nic_cfg.aq_hw_caps->flow_control) {
   1151		ethtool_link_ksettings_add_link_mode(cmd, supported,
   1152						     Pause);
   1153		ethtool_link_ksettings_add_link_mode(cmd, supported,
   1154						     Asym_Pause);
   1155	}
   1156
   1157	ethtool_link_ksettings_add_link_mode(cmd, supported, Autoneg);
   1158
   1159	if (self->aq_nic_cfg.aq_hw_caps->media_type == AQ_HW_MEDIA_TYPE_FIBRE)
   1160		ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE);
   1161	else
   1162		ethtool_link_ksettings_add_link_mode(cmd, supported, TP);
   1163
   1164	ethtool_link_ksettings_zero_link_mode(cmd, advertising);
   1165
   1166	if (self->aq_nic_cfg.is_autoneg)
   1167		ethtool_link_ksettings_add_link_mode(cmd, advertising, Autoneg);
   1168
   1169	if (self->aq_nic_cfg.link_speed_msk & AQ_NIC_RATE_10G)
   1170		ethtool_link_ksettings_add_link_mode(cmd, advertising,
   1171						     10000baseT_Full);
   1172
   1173	if (self->aq_nic_cfg.link_speed_msk & AQ_NIC_RATE_5G)
   1174		ethtool_link_ksettings_add_link_mode(cmd, advertising,
   1175						     5000baseT_Full);
   1176
   1177	if (self->aq_nic_cfg.link_speed_msk & AQ_NIC_RATE_2G5)
   1178		ethtool_link_ksettings_add_link_mode(cmd, advertising,
   1179						     2500baseT_Full);
   1180
   1181	if (self->aq_nic_cfg.link_speed_msk & AQ_NIC_RATE_1G)
   1182		ethtool_link_ksettings_add_link_mode(cmd, advertising,
   1183						     1000baseT_Full);
   1184
   1185	if (self->aq_nic_cfg.link_speed_msk & AQ_NIC_RATE_1G_HALF)
   1186		ethtool_link_ksettings_add_link_mode(cmd, advertising,
   1187						     1000baseT_Half);
   1188
   1189	if (self->aq_nic_cfg.link_speed_msk & AQ_NIC_RATE_100M)
   1190		ethtool_link_ksettings_add_link_mode(cmd, advertising,
   1191						     100baseT_Full);
   1192
   1193	if (self->aq_nic_cfg.link_speed_msk & AQ_NIC_RATE_100M_HALF)
   1194		ethtool_link_ksettings_add_link_mode(cmd, advertising,
   1195						     100baseT_Half);
   1196
   1197	if (self->aq_nic_cfg.link_speed_msk & AQ_NIC_RATE_10M)
   1198		ethtool_link_ksettings_add_link_mode(cmd, advertising,
   1199						     10baseT_Full);
   1200
   1201	if (self->aq_nic_cfg.link_speed_msk & AQ_NIC_RATE_10M_HALF)
   1202		ethtool_link_ksettings_add_link_mode(cmd, advertising,
   1203						     10baseT_Half);
   1204
   1205	if (self->aq_nic_cfg.fc.cur & AQ_NIC_FC_RX)
   1206		ethtool_link_ksettings_add_link_mode(cmd, advertising,
   1207						     Pause);
   1208
   1209	/* Asym is when either RX or TX, but not both */
   1210	if (!!(self->aq_nic_cfg.fc.cur & AQ_NIC_FC_TX) ^
   1211	    !!(self->aq_nic_cfg.fc.cur & AQ_NIC_FC_RX))
   1212		ethtool_link_ksettings_add_link_mode(cmd, advertising,
   1213						     Asym_Pause);
   1214
   1215	if (self->aq_nic_cfg.aq_hw_caps->media_type == AQ_HW_MEDIA_TYPE_FIBRE)
   1216		ethtool_link_ksettings_add_link_mode(cmd, advertising, FIBRE);
   1217	else
   1218		ethtool_link_ksettings_add_link_mode(cmd, advertising, TP);
   1219
   1220	ethtool_link_ksettings_zero_link_mode(cmd, lp_advertising);
   1221	lp_link_speed_msk = self->aq_hw->aq_link_status.lp_link_speed_msk;
   1222
   1223	if (lp_link_speed_msk & AQ_NIC_RATE_10G)
   1224		ethtool_link_ksettings_add_link_mode(cmd, lp_advertising,
   1225						     10000baseT_Full);
   1226
   1227	if (lp_link_speed_msk & AQ_NIC_RATE_5G)
   1228		ethtool_link_ksettings_add_link_mode(cmd, lp_advertising,
   1229						     5000baseT_Full);
   1230
   1231	if (lp_link_speed_msk & AQ_NIC_RATE_2G5)
   1232		ethtool_link_ksettings_add_link_mode(cmd, lp_advertising,
   1233						     2500baseT_Full);
   1234
   1235	if (lp_link_speed_msk & AQ_NIC_RATE_1G)
   1236		ethtool_link_ksettings_add_link_mode(cmd, lp_advertising,
   1237						     1000baseT_Full);
   1238
   1239	if (lp_link_speed_msk & AQ_NIC_RATE_1G_HALF)
   1240		ethtool_link_ksettings_add_link_mode(cmd, lp_advertising,
   1241						     1000baseT_Half);
   1242
   1243	if (lp_link_speed_msk & AQ_NIC_RATE_100M)
   1244		ethtool_link_ksettings_add_link_mode(cmd, lp_advertising,
   1245						     100baseT_Full);
   1246
   1247	if (lp_link_speed_msk & AQ_NIC_RATE_100M_HALF)
   1248		ethtool_link_ksettings_add_link_mode(cmd, lp_advertising,
   1249						     100baseT_Half);
   1250
   1251	if (lp_link_speed_msk & AQ_NIC_RATE_10M)
   1252		ethtool_link_ksettings_add_link_mode(cmd, lp_advertising,
   1253						     10baseT_Full);
   1254
   1255	if (lp_link_speed_msk & AQ_NIC_RATE_10M_HALF)
   1256		ethtool_link_ksettings_add_link_mode(cmd, lp_advertising,
   1257						     10baseT_Half);
   1258
   1259	if (self->aq_hw->aq_link_status.lp_flow_control & AQ_NIC_FC_RX)
   1260		ethtool_link_ksettings_add_link_mode(cmd, lp_advertising,
   1261						     Pause);
   1262	if (!!(self->aq_hw->aq_link_status.lp_flow_control & AQ_NIC_FC_TX) ^
   1263	    !!(self->aq_hw->aq_link_status.lp_flow_control & AQ_NIC_FC_RX))
   1264		ethtool_link_ksettings_add_link_mode(cmd, lp_advertising,
   1265						     Asym_Pause);
   1266}
   1267
   1268int aq_nic_set_link_ksettings(struct aq_nic_s *self,
   1269			      const struct ethtool_link_ksettings *cmd)
   1270{
   1271	int fduplex = (cmd->base.duplex == DUPLEX_FULL);
   1272	u32 speed = cmd->base.speed;
   1273	u32 rate = 0U;
   1274	int err = 0;
   1275
   1276	if (!fduplex && speed > SPEED_1000) {
   1277		err = -EINVAL;
   1278		goto err_exit;
   1279	}
   1280
   1281	if (cmd->base.autoneg == AUTONEG_ENABLE) {
   1282		rate = self->aq_nic_cfg.aq_hw_caps->link_speed_msk;
   1283		self->aq_nic_cfg.is_autoneg = true;
   1284	} else {
   1285		switch (speed) {
   1286		case SPEED_10:
   1287			rate = fduplex ? AQ_NIC_RATE_10M : AQ_NIC_RATE_10M_HALF;
   1288			break;
   1289
   1290		case SPEED_100:
   1291			rate = fduplex ? AQ_NIC_RATE_100M
   1292				       : AQ_NIC_RATE_100M_HALF;
   1293			break;
   1294
   1295		case SPEED_1000:
   1296			rate = fduplex ? AQ_NIC_RATE_1G : AQ_NIC_RATE_1G_HALF;
   1297			break;
   1298
   1299		case SPEED_2500:
   1300			rate = AQ_NIC_RATE_2G5;
   1301			break;
   1302
   1303		case SPEED_5000:
   1304			rate = AQ_NIC_RATE_5G;
   1305			break;
   1306
   1307		case SPEED_10000:
   1308			rate = AQ_NIC_RATE_10G;
   1309			break;
   1310
   1311		default:
   1312			err = -1;
   1313			goto err_exit;
   1314		}
   1315		if (!(self->aq_nic_cfg.aq_hw_caps->link_speed_msk & rate)) {
   1316			err = -1;
   1317			goto err_exit;
   1318		}
   1319
   1320		self->aq_nic_cfg.is_autoneg = false;
   1321	}
   1322
   1323	mutex_lock(&self->fwreq_mutex);
   1324	err = self->aq_fw_ops->set_link_speed(self->aq_hw, rate);
   1325	mutex_unlock(&self->fwreq_mutex);
   1326	if (err < 0)
   1327		goto err_exit;
   1328
   1329	self->aq_nic_cfg.link_speed_msk = rate;
   1330
   1331err_exit:
   1332	return err;
   1333}
   1334
   1335struct aq_nic_cfg_s *aq_nic_get_cfg(struct aq_nic_s *self)
   1336{
   1337	return &self->aq_nic_cfg;
   1338}
   1339
   1340u32 aq_nic_get_fw_version(struct aq_nic_s *self)
   1341{
   1342	return self->aq_hw_ops->hw_get_fw_version(self->aq_hw);
   1343}
   1344
   1345int aq_nic_set_loopback(struct aq_nic_s *self)
   1346{
   1347	struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg;
   1348
   1349	if (!self->aq_hw_ops->hw_set_loopback ||
   1350	    !self->aq_fw_ops->set_phyloopback)
   1351		return -EOPNOTSUPP;
   1352
   1353	mutex_lock(&self->fwreq_mutex);
   1354	self->aq_hw_ops->hw_set_loopback(self->aq_hw,
   1355					 AQ_HW_LOOPBACK_DMA_SYS,
   1356					 !!(cfg->priv_flags &
   1357					    BIT(AQ_HW_LOOPBACK_DMA_SYS)));
   1358
   1359	self->aq_hw_ops->hw_set_loopback(self->aq_hw,
   1360					 AQ_HW_LOOPBACK_PKT_SYS,
   1361					 !!(cfg->priv_flags &
   1362					    BIT(AQ_HW_LOOPBACK_PKT_SYS)));
   1363
   1364	self->aq_hw_ops->hw_set_loopback(self->aq_hw,
   1365					 AQ_HW_LOOPBACK_DMA_NET,
   1366					 !!(cfg->priv_flags &
   1367					    BIT(AQ_HW_LOOPBACK_DMA_NET)));
   1368
   1369	self->aq_fw_ops->set_phyloopback(self->aq_hw,
   1370					 AQ_HW_LOOPBACK_PHYINT_SYS,
   1371					 !!(cfg->priv_flags &
   1372					    BIT(AQ_HW_LOOPBACK_PHYINT_SYS)));
   1373
   1374	self->aq_fw_ops->set_phyloopback(self->aq_hw,
   1375					 AQ_HW_LOOPBACK_PHYEXT_SYS,
   1376					 !!(cfg->priv_flags &
   1377					    BIT(AQ_HW_LOOPBACK_PHYEXT_SYS)));
   1378	mutex_unlock(&self->fwreq_mutex);
   1379
   1380	return 0;
   1381}
   1382
   1383int aq_nic_stop(struct aq_nic_s *self)
   1384{
   1385	struct aq_vec_s *aq_vec = NULL;
   1386	unsigned int i = 0U;
   1387
   1388	netif_tx_disable(self->ndev);
   1389	netif_carrier_off(self->ndev);
   1390
   1391	del_timer_sync(&self->service_timer);
   1392	cancel_work_sync(&self->service_task);
   1393
   1394	self->aq_hw_ops->hw_irq_disable(self->aq_hw, AQ_CFG_IRQ_MASK);
   1395
   1396	if (self->aq_nic_cfg.is_polling)
   1397		del_timer_sync(&self->polling_timer);
   1398	else
   1399		aq_pci_func_free_irqs(self);
   1400
   1401	aq_ptp_irq_free(self);
   1402
   1403	for (i = 0U, aq_vec = self->aq_vec[0];
   1404		self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i])
   1405		aq_vec_stop(aq_vec);
   1406
   1407	aq_ptp_ring_stop(self);
   1408
   1409	return self->aq_hw_ops->hw_stop(self->aq_hw);
   1410}
   1411
   1412void aq_nic_set_power(struct aq_nic_s *self)
   1413{
   1414	if (self->power_state != AQ_HW_POWER_STATE_D0 ||
   1415	    self->aq_hw->aq_nic_cfg->wol)
   1416		if (likely(self->aq_fw_ops->set_power)) {
   1417			mutex_lock(&self->fwreq_mutex);
   1418			self->aq_fw_ops->set_power(self->aq_hw,
   1419						   self->power_state,
   1420						   self->ndev->dev_addr);
   1421			mutex_unlock(&self->fwreq_mutex);
   1422		}
   1423}
   1424
   1425void aq_nic_deinit(struct aq_nic_s *self, bool link_down)
   1426{
   1427	struct aq_vec_s *aq_vec = NULL;
   1428	unsigned int i = 0U;
   1429
   1430	if (!self)
   1431		goto err_exit;
   1432
   1433	for (i = 0U; i < self->aq_vecs; i++) {
   1434		aq_vec = self->aq_vec[i];
   1435		aq_vec_deinit(aq_vec);
   1436		aq_vec_ring_free(aq_vec);
   1437	}
   1438
   1439	aq_ptp_unregister(self);
   1440	aq_ptp_ring_deinit(self);
   1441	aq_ptp_ring_free(self);
   1442	aq_ptp_free(self);
   1443
   1444	if (likely(self->aq_fw_ops->deinit) && link_down) {
   1445		mutex_lock(&self->fwreq_mutex);
   1446		self->aq_fw_ops->deinit(self->aq_hw);
   1447		mutex_unlock(&self->fwreq_mutex);
   1448	}
   1449
   1450err_exit:;
   1451}
   1452
   1453void aq_nic_free_vectors(struct aq_nic_s *self)
   1454{
   1455	unsigned int i = 0U;
   1456
   1457	if (!self)
   1458		goto err_exit;
   1459
   1460	for (i = ARRAY_SIZE(self->aq_vec); i--;) {
   1461		if (self->aq_vec[i]) {
   1462			aq_vec_free(self->aq_vec[i]);
   1463			self->aq_vec[i] = NULL;
   1464		}
   1465	}
   1466
   1467err_exit:;
   1468}
   1469
   1470int aq_nic_realloc_vectors(struct aq_nic_s *self)
   1471{
   1472	struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(self);
   1473
   1474	aq_nic_free_vectors(self);
   1475
   1476	for (self->aq_vecs = 0; self->aq_vecs < cfg->vecs; self->aq_vecs++) {
   1477		self->aq_vec[self->aq_vecs] = aq_vec_alloc(self, self->aq_vecs,
   1478							   cfg);
   1479		if (unlikely(!self->aq_vec[self->aq_vecs]))
   1480			return -ENOMEM;
   1481	}
   1482
   1483	return 0;
   1484}
   1485
   1486void aq_nic_shutdown(struct aq_nic_s *self)
   1487{
   1488	int err = 0;
   1489
   1490	if (!self->ndev)
   1491		return;
   1492
   1493	rtnl_lock();
   1494
   1495	netif_device_detach(self->ndev);
   1496
   1497	if (netif_running(self->ndev)) {
   1498		err = aq_nic_stop(self);
   1499		if (err < 0)
   1500			goto err_exit;
   1501	}
   1502	aq_nic_deinit(self, !self->aq_hw->aq_nic_cfg->wol);
   1503	aq_nic_set_power(self);
   1504
   1505err_exit:
   1506	rtnl_unlock();
   1507}
   1508
   1509u8 aq_nic_reserve_filter(struct aq_nic_s *self, enum aq_rx_filter_type type)
   1510{
   1511	u8 location = 0xFF;
   1512	u32 fltr_cnt;
   1513	u32 n_bit;
   1514
   1515	switch (type) {
   1516	case aq_rx_filter_ethertype:
   1517		location = AQ_RX_LAST_LOC_FETHERT - AQ_RX_FIRST_LOC_FETHERT -
   1518			   self->aq_hw_rx_fltrs.fet_reserved_count;
   1519		self->aq_hw_rx_fltrs.fet_reserved_count++;
   1520		break;
   1521	case aq_rx_filter_l3l4:
   1522		fltr_cnt = AQ_RX_LAST_LOC_FL3L4 - AQ_RX_FIRST_LOC_FL3L4;
   1523		n_bit = fltr_cnt - self->aq_hw_rx_fltrs.fl3l4.reserved_count;
   1524
   1525		self->aq_hw_rx_fltrs.fl3l4.active_ipv4 |= BIT(n_bit);
   1526		self->aq_hw_rx_fltrs.fl3l4.reserved_count++;
   1527		location = n_bit;
   1528		break;
   1529	default:
   1530		break;
   1531	}
   1532
   1533	return location;
   1534}
   1535
   1536void aq_nic_release_filter(struct aq_nic_s *self, enum aq_rx_filter_type type,
   1537			   u32 location)
   1538{
   1539	switch (type) {
   1540	case aq_rx_filter_ethertype:
   1541		self->aq_hw_rx_fltrs.fet_reserved_count--;
   1542		break;
   1543	case aq_rx_filter_l3l4:
   1544		self->aq_hw_rx_fltrs.fl3l4.reserved_count--;
   1545		self->aq_hw_rx_fltrs.fl3l4.active_ipv4 &= ~BIT(location);
   1546		break;
   1547	default:
   1548		break;
   1549	}
   1550}
   1551
   1552int aq_nic_set_downshift(struct aq_nic_s *self, int val)
   1553{
   1554	int err = 0;
   1555	struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg;
   1556
   1557	if (!self->aq_fw_ops->set_downshift)
   1558		return -EOPNOTSUPP;
   1559
   1560	if (val > 15) {
   1561		netdev_err(self->ndev, "downshift counter should be <= 15\n");
   1562		return -EINVAL;
   1563	}
   1564	cfg->downshift_counter = val;
   1565
   1566	mutex_lock(&self->fwreq_mutex);
   1567	err = self->aq_fw_ops->set_downshift(self->aq_hw, cfg->downshift_counter);
   1568	mutex_unlock(&self->fwreq_mutex);
   1569
   1570	return err;
   1571}
   1572
   1573int aq_nic_set_media_detect(struct aq_nic_s *self, int val)
   1574{
   1575	struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg;
   1576	int err = 0;
   1577
   1578	if (!self->aq_fw_ops->set_media_detect)
   1579		return -EOPNOTSUPP;
   1580
   1581	if (val > 0 && val != AQ_HW_MEDIA_DETECT_CNT) {
   1582		netdev_err(self->ndev, "EDPD on this device could have only fixed value of %d\n",
   1583			   AQ_HW_MEDIA_DETECT_CNT);
   1584		return -EINVAL;
   1585	}
   1586
   1587	mutex_lock(&self->fwreq_mutex);
   1588	err = self->aq_fw_ops->set_media_detect(self->aq_hw, !!val);
   1589	mutex_unlock(&self->fwreq_mutex);
   1590
   1591	/* msecs plays no role - configuration is always fixed in PHY */
   1592	if (!err)
   1593		cfg->is_media_detect = !!val;
   1594
   1595	return err;
   1596}
   1597
   1598int aq_nic_setup_tc_mqprio(struct aq_nic_s *self, u32 tcs, u8 *prio_tc_map)
   1599{
   1600	struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg;
   1601	const unsigned int prev_vecs = cfg->vecs;
   1602	bool ndev_running;
   1603	int err = 0;
   1604	int i;
   1605
   1606	/* if already the same configuration or
   1607	 * disable request (tcs is 0) and we already is disabled
   1608	 */
   1609	if (tcs == cfg->tcs || (tcs == 0 && !cfg->is_qos))
   1610		return 0;
   1611
   1612	ndev_running = netif_running(self->ndev);
   1613	if (ndev_running)
   1614		dev_close(self->ndev);
   1615
   1616	cfg->tcs = tcs;
   1617	if (cfg->tcs == 0)
   1618		cfg->tcs = 1;
   1619	if (prio_tc_map)
   1620		memcpy(cfg->prio_tc_map, prio_tc_map, sizeof(cfg->prio_tc_map));
   1621	else
   1622		for (i = 0; i < sizeof(cfg->prio_tc_map); i++)
   1623			cfg->prio_tc_map[i] = cfg->tcs * i / 8;
   1624
   1625	cfg->is_qos = !!tcs;
   1626	cfg->is_ptp = (cfg->tcs <= AQ_HW_PTP_TC);
   1627	if (!cfg->is_ptp)
   1628		netdev_warn(self->ndev, "%s\n",
   1629			    "PTP is auto disabled due to requested TC count.");
   1630
   1631	netdev_set_num_tc(self->ndev, cfg->tcs);
   1632
   1633	/* Changing the number of TCs might change the number of vectors */
   1634	aq_nic_cfg_update_num_vecs(self);
   1635	if (prev_vecs != cfg->vecs) {
   1636		err = aq_nic_realloc_vectors(self);
   1637		if (err)
   1638			goto err_exit;
   1639	}
   1640
   1641	if (ndev_running)
   1642		err = dev_open(self->ndev, NULL);
   1643
   1644err_exit:
   1645	return err;
   1646}
   1647
   1648int aq_nic_setup_tc_max_rate(struct aq_nic_s *self, const unsigned int tc,
   1649			     const u32 max_rate)
   1650{
   1651	struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg;
   1652
   1653	if (tc >= AQ_CFG_TCS_MAX)
   1654		return -EINVAL;
   1655
   1656	if (max_rate && max_rate < 10) {
   1657		netdev_warn(self->ndev,
   1658			"Setting %s to the minimum usable value of %dMbps.\n",
   1659			"max rate", 10);
   1660		cfg->tc_max_rate[tc] = 10;
   1661	} else {
   1662		cfg->tc_max_rate[tc] = max_rate;
   1663	}
   1664
   1665	return 0;
   1666}
   1667
   1668int aq_nic_setup_tc_min_rate(struct aq_nic_s *self, const unsigned int tc,
   1669			     const u32 min_rate)
   1670{
   1671	struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg;
   1672
   1673	if (tc >= AQ_CFG_TCS_MAX)
   1674		return -EINVAL;
   1675
   1676	if (min_rate)
   1677		set_bit(tc, &cfg->tc_min_rate_msk);
   1678	else
   1679		clear_bit(tc, &cfg->tc_min_rate_msk);
   1680
   1681	if (min_rate && min_rate < 20) {
   1682		netdev_warn(self->ndev,
   1683			"Setting %s to the minimum usable value of %dMbps.\n",
   1684			"min rate", 20);
   1685		cfg->tc_min_rate[tc] = 20;
   1686	} else {
   1687		cfg->tc_min_rate[tc] = min_rate;
   1688	}
   1689
   1690	return 0;
   1691}