cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

ixgbe_ethtool.c (105862B)


      1// SPDX-License-Identifier: GPL-2.0
      2/* Copyright(c) 1999 - 2018 Intel Corporation. */
      3
      4/* ethtool support for ixgbe */
      5
      6#include <linux/interrupt.h>
      7#include <linux/types.h>
      8#include <linux/module.h>
      9#include <linux/slab.h>
     10#include <linux/pci.h>
     11#include <linux/netdevice.h>
     12#include <linux/ethtool.h>
     13#include <linux/vmalloc.h>
     14#include <linux/highmem.h>
     15#include <linux/uaccess.h>
     16
     17#include "ixgbe.h"
     18#include "ixgbe_phy.h"
     19
     20
     21#define IXGBE_ALL_RAR_ENTRIES 16
     22
     23enum {NETDEV_STATS, IXGBE_STATS};
     24
     25struct ixgbe_stats {
     26	char stat_string[ETH_GSTRING_LEN];
     27	int type;
     28	int sizeof_stat;
     29	int stat_offset;
     30};
     31
     32#define IXGBE_STAT(m)		IXGBE_STATS, \
     33				sizeof(((struct ixgbe_adapter *)0)->m), \
     34				offsetof(struct ixgbe_adapter, m)
     35#define IXGBE_NETDEV_STAT(m)	NETDEV_STATS, \
     36				sizeof(((struct rtnl_link_stats64 *)0)->m), \
     37				offsetof(struct rtnl_link_stats64, m)
     38
     39static const struct ixgbe_stats ixgbe_gstrings_stats[] = {
     40	{"rx_packets", IXGBE_NETDEV_STAT(rx_packets)},
     41	{"tx_packets", IXGBE_NETDEV_STAT(tx_packets)},
     42	{"rx_bytes", IXGBE_NETDEV_STAT(rx_bytes)},
     43	{"tx_bytes", IXGBE_NETDEV_STAT(tx_bytes)},
     44	{"rx_pkts_nic", IXGBE_STAT(stats.gprc)},
     45	{"tx_pkts_nic", IXGBE_STAT(stats.gptc)},
     46	{"rx_bytes_nic", IXGBE_STAT(stats.gorc)},
     47	{"tx_bytes_nic", IXGBE_STAT(stats.gotc)},
     48	{"lsc_int", IXGBE_STAT(lsc_int)},
     49	{"tx_busy", IXGBE_STAT(tx_busy)},
     50	{"non_eop_descs", IXGBE_STAT(non_eop_descs)},
     51	{"rx_errors", IXGBE_NETDEV_STAT(rx_errors)},
     52	{"tx_errors", IXGBE_NETDEV_STAT(tx_errors)},
     53	{"rx_dropped", IXGBE_NETDEV_STAT(rx_dropped)},
     54	{"tx_dropped", IXGBE_NETDEV_STAT(tx_dropped)},
     55	{"multicast", IXGBE_NETDEV_STAT(multicast)},
     56	{"broadcast", IXGBE_STAT(stats.bprc)},
     57	{"rx_no_buffer_count", IXGBE_STAT(stats.rnbc[0]) },
     58	{"collisions", IXGBE_NETDEV_STAT(collisions)},
     59	{"rx_over_errors", IXGBE_NETDEV_STAT(rx_over_errors)},
     60	{"rx_crc_errors", IXGBE_NETDEV_STAT(rx_crc_errors)},
     61	{"rx_frame_errors", IXGBE_NETDEV_STAT(rx_frame_errors)},
     62	{"hw_rsc_aggregated", IXGBE_STAT(rsc_total_count)},
     63	{"hw_rsc_flushed", IXGBE_STAT(rsc_total_flush)},
     64	{"fdir_match", IXGBE_STAT(stats.fdirmatch)},
     65	{"fdir_miss", IXGBE_STAT(stats.fdirmiss)},
     66	{"fdir_overflow", IXGBE_STAT(fdir_overflow)},
     67	{"rx_fifo_errors", IXGBE_NETDEV_STAT(rx_fifo_errors)},
     68	{"rx_missed_errors", IXGBE_NETDEV_STAT(rx_missed_errors)},
     69	{"tx_aborted_errors", IXGBE_NETDEV_STAT(tx_aborted_errors)},
     70	{"tx_carrier_errors", IXGBE_NETDEV_STAT(tx_carrier_errors)},
     71	{"tx_fifo_errors", IXGBE_NETDEV_STAT(tx_fifo_errors)},
     72	{"tx_heartbeat_errors", IXGBE_NETDEV_STAT(tx_heartbeat_errors)},
     73	{"tx_timeout_count", IXGBE_STAT(tx_timeout_count)},
     74	{"tx_restart_queue", IXGBE_STAT(restart_queue)},
     75	{"rx_length_errors", IXGBE_STAT(stats.rlec)},
     76	{"rx_long_length_errors", IXGBE_STAT(stats.roc)},
     77	{"rx_short_length_errors", IXGBE_STAT(stats.ruc)},
     78	{"tx_flow_control_xon", IXGBE_STAT(stats.lxontxc)},
     79	{"rx_flow_control_xon", IXGBE_STAT(stats.lxonrxc)},
     80	{"tx_flow_control_xoff", IXGBE_STAT(stats.lxofftxc)},
     81	{"rx_flow_control_xoff", IXGBE_STAT(stats.lxoffrxc)},
     82	{"rx_csum_offload_errors", IXGBE_STAT(hw_csum_rx_error)},
     83	{"alloc_rx_page", IXGBE_STAT(alloc_rx_page)},
     84	{"alloc_rx_page_failed", IXGBE_STAT(alloc_rx_page_failed)},
     85	{"alloc_rx_buff_failed", IXGBE_STAT(alloc_rx_buff_failed)},
     86	{"rx_no_dma_resources", IXGBE_STAT(hw_rx_no_dma_resources)},
     87	{"os2bmc_rx_by_bmc", IXGBE_STAT(stats.o2bgptc)},
     88	{"os2bmc_tx_by_bmc", IXGBE_STAT(stats.b2ospc)},
     89	{"os2bmc_tx_by_host", IXGBE_STAT(stats.o2bspc)},
     90	{"os2bmc_rx_by_host", IXGBE_STAT(stats.b2ogprc)},
     91	{"tx_hwtstamp_timeouts", IXGBE_STAT(tx_hwtstamp_timeouts)},
     92	{"tx_hwtstamp_skipped", IXGBE_STAT(tx_hwtstamp_skipped)},
     93	{"rx_hwtstamp_cleared", IXGBE_STAT(rx_hwtstamp_cleared)},
     94	{"tx_ipsec", IXGBE_STAT(tx_ipsec)},
     95	{"rx_ipsec", IXGBE_STAT(rx_ipsec)},
     96#ifdef IXGBE_FCOE
     97	{"fcoe_bad_fccrc", IXGBE_STAT(stats.fccrc)},
     98	{"rx_fcoe_dropped", IXGBE_STAT(stats.fcoerpdc)},
     99	{"rx_fcoe_packets", IXGBE_STAT(stats.fcoeprc)},
    100	{"rx_fcoe_dwords", IXGBE_STAT(stats.fcoedwrc)},
    101	{"fcoe_noddp", IXGBE_STAT(stats.fcoe_noddp)},
    102	{"fcoe_noddp_ext_buff", IXGBE_STAT(stats.fcoe_noddp_ext_buff)},
    103	{"tx_fcoe_packets", IXGBE_STAT(stats.fcoeptc)},
    104	{"tx_fcoe_dwords", IXGBE_STAT(stats.fcoedwtc)},
    105#endif /* IXGBE_FCOE */
    106};
    107
    108/* ixgbe allocates num_tx_queues and num_rx_queues symmetrically so
    109 * we set the num_rx_queues to evaluate to num_tx_queues. This is
    110 * used because we do not have a good way to get the max number of
    111 * rx queues with CONFIG_RPS disabled.
    112 */
    113#define IXGBE_NUM_RX_QUEUES netdev->num_tx_queues
    114
    115#define IXGBE_QUEUE_STATS_LEN ( \
    116	(netdev->num_tx_queues + IXGBE_NUM_RX_QUEUES) * \
    117	(sizeof(struct ixgbe_queue_stats) / sizeof(u64)))
    118#define IXGBE_GLOBAL_STATS_LEN ARRAY_SIZE(ixgbe_gstrings_stats)
    119#define IXGBE_PB_STATS_LEN ( \
    120			(sizeof(((struct ixgbe_adapter *)0)->stats.pxonrxc) + \
    121			 sizeof(((struct ixgbe_adapter *)0)->stats.pxontxc) + \
    122			 sizeof(((struct ixgbe_adapter *)0)->stats.pxoffrxc) + \
    123			 sizeof(((struct ixgbe_adapter *)0)->stats.pxofftxc)) \
    124			/ sizeof(u64))
    125#define IXGBE_STATS_LEN (IXGBE_GLOBAL_STATS_LEN + \
    126			 IXGBE_PB_STATS_LEN + \
    127			 IXGBE_QUEUE_STATS_LEN)
    128
    129static const char ixgbe_gstrings_test[][ETH_GSTRING_LEN] = {
    130	"Register test  (offline)", "Eeprom test    (offline)",
    131	"Interrupt test (offline)", "Loopback test  (offline)",
    132	"Link test   (on/offline)"
    133};
    134#define IXGBE_TEST_LEN sizeof(ixgbe_gstrings_test) / ETH_GSTRING_LEN
    135
    136static const char ixgbe_priv_flags_strings[][ETH_GSTRING_LEN] = {
    137#define IXGBE_PRIV_FLAGS_LEGACY_RX	BIT(0)
    138	"legacy-rx",
    139#define IXGBE_PRIV_FLAGS_VF_IPSEC_EN	BIT(1)
    140	"vf-ipsec",
    141#define IXGBE_PRIV_FLAGS_AUTO_DISABLE_VF	BIT(2)
    142	"mdd-disable-vf",
    143};
    144
    145#define IXGBE_PRIV_FLAGS_STR_LEN ARRAY_SIZE(ixgbe_priv_flags_strings)
    146
    147#define ixgbe_isbackplane(type) ((type) == ixgbe_media_type_backplane)
    148
    149static void ixgbe_set_supported_10gtypes(struct ixgbe_hw *hw,
    150					 struct ethtool_link_ksettings *cmd)
    151{
    152	if (!ixgbe_isbackplane(hw->phy.media_type)) {
    153		ethtool_link_ksettings_add_link_mode(cmd, supported,
    154						     10000baseT_Full);
    155		return;
    156	}
    157
    158	switch (hw->device_id) {
    159	case IXGBE_DEV_ID_82598:
    160	case IXGBE_DEV_ID_82599_KX4:
    161	case IXGBE_DEV_ID_82599_KX4_MEZZ:
    162	case IXGBE_DEV_ID_X550EM_X_KX4:
    163		ethtool_link_ksettings_add_link_mode
    164			(cmd, supported, 10000baseKX4_Full);
    165		break;
    166	case IXGBE_DEV_ID_82598_BX:
    167	case IXGBE_DEV_ID_82599_KR:
    168	case IXGBE_DEV_ID_X550EM_X_KR:
    169	case IXGBE_DEV_ID_X550EM_X_XFI:
    170		ethtool_link_ksettings_add_link_mode
    171			(cmd, supported, 10000baseKR_Full);
    172		break;
    173	default:
    174		ethtool_link_ksettings_add_link_mode
    175			(cmd, supported, 10000baseKX4_Full);
    176		ethtool_link_ksettings_add_link_mode
    177			(cmd, supported, 10000baseKR_Full);
    178		break;
    179	}
    180}
    181
    182static void ixgbe_set_advertising_10gtypes(struct ixgbe_hw *hw,
    183					   struct ethtool_link_ksettings *cmd)
    184{
    185	if (!ixgbe_isbackplane(hw->phy.media_type)) {
    186		ethtool_link_ksettings_add_link_mode(cmd, advertising,
    187						     10000baseT_Full);
    188		return;
    189	}
    190
    191	switch (hw->device_id) {
    192	case IXGBE_DEV_ID_82598:
    193	case IXGBE_DEV_ID_82599_KX4:
    194	case IXGBE_DEV_ID_82599_KX4_MEZZ:
    195	case IXGBE_DEV_ID_X550EM_X_KX4:
    196		ethtool_link_ksettings_add_link_mode
    197			(cmd, advertising, 10000baseKX4_Full);
    198		break;
    199	case IXGBE_DEV_ID_82598_BX:
    200	case IXGBE_DEV_ID_82599_KR:
    201	case IXGBE_DEV_ID_X550EM_X_KR:
    202	case IXGBE_DEV_ID_X550EM_X_XFI:
    203		ethtool_link_ksettings_add_link_mode
    204			(cmd, advertising, 10000baseKR_Full);
    205		break;
    206	default:
    207		ethtool_link_ksettings_add_link_mode
    208			(cmd, advertising, 10000baseKX4_Full);
    209		ethtool_link_ksettings_add_link_mode
    210			(cmd, advertising, 10000baseKR_Full);
    211		break;
    212	}
    213}
    214
    215static int ixgbe_get_link_ksettings(struct net_device *netdev,
    216				    struct ethtool_link_ksettings *cmd)
    217{
    218	struct ixgbe_adapter *adapter = netdev_priv(netdev);
    219	struct ixgbe_hw *hw = &adapter->hw;
    220	ixgbe_link_speed supported_link;
    221	bool autoneg = false;
    222
    223	ethtool_link_ksettings_zero_link_mode(cmd, supported);
    224	ethtool_link_ksettings_zero_link_mode(cmd, advertising);
    225
    226	hw->mac.ops.get_link_capabilities(hw, &supported_link, &autoneg);
    227
    228	/* set the supported link speeds */
    229	if (supported_link & IXGBE_LINK_SPEED_10GB_FULL) {
    230		ixgbe_set_supported_10gtypes(hw, cmd);
    231		ixgbe_set_advertising_10gtypes(hw, cmd);
    232	}
    233	if (supported_link & IXGBE_LINK_SPEED_5GB_FULL)
    234		ethtool_link_ksettings_add_link_mode(cmd, supported,
    235						     5000baseT_Full);
    236
    237	if (supported_link & IXGBE_LINK_SPEED_2_5GB_FULL)
    238		ethtool_link_ksettings_add_link_mode(cmd, supported,
    239						     2500baseT_Full);
    240
    241	if (supported_link & IXGBE_LINK_SPEED_1GB_FULL) {
    242		if (ixgbe_isbackplane(hw->phy.media_type)) {
    243			ethtool_link_ksettings_add_link_mode(cmd, supported,
    244							     1000baseKX_Full);
    245			ethtool_link_ksettings_add_link_mode(cmd, advertising,
    246							     1000baseKX_Full);
    247		} else {
    248			ethtool_link_ksettings_add_link_mode(cmd, supported,
    249							     1000baseT_Full);
    250			ethtool_link_ksettings_add_link_mode(cmd, advertising,
    251							     1000baseT_Full);
    252		}
    253	}
    254	if (supported_link & IXGBE_LINK_SPEED_100_FULL) {
    255		ethtool_link_ksettings_add_link_mode(cmd, supported,
    256						     100baseT_Full);
    257		ethtool_link_ksettings_add_link_mode(cmd, advertising,
    258						     100baseT_Full);
    259	}
    260	if (supported_link & IXGBE_LINK_SPEED_10_FULL) {
    261		ethtool_link_ksettings_add_link_mode(cmd, supported,
    262						     10baseT_Full);
    263		ethtool_link_ksettings_add_link_mode(cmd, advertising,
    264						     10baseT_Full);
    265	}
    266
    267	/* set the advertised speeds */
    268	if (hw->phy.autoneg_advertised) {
    269		ethtool_link_ksettings_zero_link_mode(cmd, advertising);
    270		if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10_FULL)
    271			ethtool_link_ksettings_add_link_mode(cmd, advertising,
    272							     10baseT_Full);
    273		if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_100_FULL)
    274			ethtool_link_ksettings_add_link_mode(cmd, advertising,
    275							     100baseT_Full);
    276		if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL)
    277			ixgbe_set_advertising_10gtypes(hw, cmd);
    278		if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL) {
    279			if (ethtool_link_ksettings_test_link_mode
    280				(cmd, supported, 1000baseKX_Full))
    281				ethtool_link_ksettings_add_link_mode
    282					(cmd, advertising, 1000baseKX_Full);
    283			else
    284				ethtool_link_ksettings_add_link_mode
    285					(cmd, advertising, 1000baseT_Full);
    286		}
    287		if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_5GB_FULL)
    288			ethtool_link_ksettings_add_link_mode(cmd, advertising,
    289							     5000baseT_Full);
    290		if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_2_5GB_FULL)
    291			ethtool_link_ksettings_add_link_mode(cmd, advertising,
    292							     2500baseT_Full);
    293	} else {
    294		if (hw->phy.multispeed_fiber && !autoneg) {
    295			if (supported_link & IXGBE_LINK_SPEED_10GB_FULL)
    296				ethtool_link_ksettings_add_link_mode
    297					(cmd, advertising, 10000baseT_Full);
    298		}
    299	}
    300
    301	if (autoneg) {
    302		ethtool_link_ksettings_add_link_mode(cmd, supported, Autoneg);
    303		ethtool_link_ksettings_add_link_mode(cmd, advertising, Autoneg);
    304		cmd->base.autoneg = AUTONEG_ENABLE;
    305	} else
    306		cmd->base.autoneg = AUTONEG_DISABLE;
    307
    308	/* Determine the remaining settings based on the PHY type. */
    309	switch (adapter->hw.phy.type) {
    310	case ixgbe_phy_tn:
    311	case ixgbe_phy_aq:
    312	case ixgbe_phy_x550em_ext_t:
    313	case ixgbe_phy_fw:
    314	case ixgbe_phy_cu_unknown:
    315		ethtool_link_ksettings_add_link_mode(cmd, supported, TP);
    316		ethtool_link_ksettings_add_link_mode(cmd, advertising, TP);
    317		cmd->base.port = PORT_TP;
    318		break;
    319	case ixgbe_phy_qt:
    320		ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE);
    321		ethtool_link_ksettings_add_link_mode(cmd, advertising, FIBRE);
    322		cmd->base.port = PORT_FIBRE;
    323		break;
    324	case ixgbe_phy_nl:
    325	case ixgbe_phy_sfp_passive_tyco:
    326	case ixgbe_phy_sfp_passive_unknown:
    327	case ixgbe_phy_sfp_ftl:
    328	case ixgbe_phy_sfp_avago:
    329	case ixgbe_phy_sfp_intel:
    330	case ixgbe_phy_sfp_unknown:
    331	case ixgbe_phy_qsfp_passive_unknown:
    332	case ixgbe_phy_qsfp_active_unknown:
    333	case ixgbe_phy_qsfp_intel:
    334	case ixgbe_phy_qsfp_unknown:
    335		/* SFP+ devices, further checking needed */
    336		switch (adapter->hw.phy.sfp_type) {
    337		case ixgbe_sfp_type_da_cu:
    338		case ixgbe_sfp_type_da_cu_core0:
    339		case ixgbe_sfp_type_da_cu_core1:
    340			ethtool_link_ksettings_add_link_mode(cmd, supported,
    341							     FIBRE);
    342			ethtool_link_ksettings_add_link_mode(cmd, advertising,
    343							     FIBRE);
    344			cmd->base.port = PORT_DA;
    345			break;
    346		case ixgbe_sfp_type_sr:
    347		case ixgbe_sfp_type_lr:
    348		case ixgbe_sfp_type_srlr_core0:
    349		case ixgbe_sfp_type_srlr_core1:
    350		case ixgbe_sfp_type_1g_sx_core0:
    351		case ixgbe_sfp_type_1g_sx_core1:
    352		case ixgbe_sfp_type_1g_lx_core0:
    353		case ixgbe_sfp_type_1g_lx_core1:
    354			ethtool_link_ksettings_add_link_mode(cmd, supported,
    355							     FIBRE);
    356			ethtool_link_ksettings_add_link_mode(cmd, advertising,
    357							     FIBRE);
    358			cmd->base.port = PORT_FIBRE;
    359			break;
    360		case ixgbe_sfp_type_not_present:
    361			ethtool_link_ksettings_add_link_mode(cmd, supported,
    362							     FIBRE);
    363			ethtool_link_ksettings_add_link_mode(cmd, advertising,
    364							     FIBRE);
    365			cmd->base.port = PORT_NONE;
    366			break;
    367		case ixgbe_sfp_type_1g_cu_core0:
    368		case ixgbe_sfp_type_1g_cu_core1:
    369			ethtool_link_ksettings_add_link_mode(cmd, supported,
    370							     TP);
    371			ethtool_link_ksettings_add_link_mode(cmd, advertising,
    372							     TP);
    373			cmd->base.port = PORT_TP;
    374			break;
    375		case ixgbe_sfp_type_unknown:
    376		default:
    377			ethtool_link_ksettings_add_link_mode(cmd, supported,
    378							     FIBRE);
    379			ethtool_link_ksettings_add_link_mode(cmd, advertising,
    380							     FIBRE);
    381			cmd->base.port = PORT_OTHER;
    382			break;
    383		}
    384		break;
    385	case ixgbe_phy_xaui:
    386		ethtool_link_ksettings_add_link_mode(cmd, supported,
    387						     FIBRE);
    388		ethtool_link_ksettings_add_link_mode(cmd, advertising,
    389						     FIBRE);
    390		cmd->base.port = PORT_NONE;
    391		break;
    392	case ixgbe_phy_unknown:
    393	case ixgbe_phy_generic:
    394	case ixgbe_phy_sfp_unsupported:
    395	default:
    396		ethtool_link_ksettings_add_link_mode(cmd, supported,
    397						     FIBRE);
    398		ethtool_link_ksettings_add_link_mode(cmd, advertising,
    399						     FIBRE);
    400		cmd->base.port = PORT_OTHER;
    401		break;
    402	}
    403
    404	/* Indicate pause support */
    405	ethtool_link_ksettings_add_link_mode(cmd, supported, Pause);
    406
    407	switch (hw->fc.requested_mode) {
    408	case ixgbe_fc_full:
    409		ethtool_link_ksettings_add_link_mode(cmd, advertising, Pause);
    410		break;
    411	case ixgbe_fc_rx_pause:
    412		ethtool_link_ksettings_add_link_mode(cmd, advertising, Pause);
    413		ethtool_link_ksettings_add_link_mode(cmd, advertising,
    414						     Asym_Pause);
    415		break;
    416	case ixgbe_fc_tx_pause:
    417		ethtool_link_ksettings_add_link_mode(cmd, advertising,
    418						     Asym_Pause);
    419		break;
    420	default:
    421		ethtool_link_ksettings_del_link_mode(cmd, advertising, Pause);
    422		ethtool_link_ksettings_del_link_mode(cmd, advertising,
    423						     Asym_Pause);
    424	}
    425
    426	if (netif_carrier_ok(netdev)) {
    427		switch (adapter->link_speed) {
    428		case IXGBE_LINK_SPEED_10GB_FULL:
    429			cmd->base.speed = SPEED_10000;
    430			break;
    431		case IXGBE_LINK_SPEED_5GB_FULL:
    432			cmd->base.speed = SPEED_5000;
    433			break;
    434		case IXGBE_LINK_SPEED_2_5GB_FULL:
    435			cmd->base.speed = SPEED_2500;
    436			break;
    437		case IXGBE_LINK_SPEED_1GB_FULL:
    438			cmd->base.speed = SPEED_1000;
    439			break;
    440		case IXGBE_LINK_SPEED_100_FULL:
    441			cmd->base.speed = SPEED_100;
    442			break;
    443		case IXGBE_LINK_SPEED_10_FULL:
    444			cmd->base.speed = SPEED_10;
    445			break;
    446		default:
    447			break;
    448		}
    449		cmd->base.duplex = DUPLEX_FULL;
    450	} else {
    451		cmd->base.speed = SPEED_UNKNOWN;
    452		cmd->base.duplex = DUPLEX_UNKNOWN;
    453	}
    454
    455	return 0;
    456}
    457
    458static int ixgbe_set_link_ksettings(struct net_device *netdev,
    459				    const struct ethtool_link_ksettings *cmd)
    460{
    461	struct ixgbe_adapter *adapter = netdev_priv(netdev);
    462	struct ixgbe_hw *hw = &adapter->hw;
    463	u32 advertised, old;
    464	s32 err = 0;
    465
    466	if ((hw->phy.media_type == ixgbe_media_type_copper) ||
    467	    (hw->phy.multispeed_fiber)) {
    468		/*
    469		 * this function does not support duplex forcing, but can
    470		 * limit the advertising of the adapter to the specified speed
    471		 */
    472		if (!linkmode_subset(cmd->link_modes.advertising,
    473				     cmd->link_modes.supported))
    474			return -EINVAL;
    475
    476		/* only allow one speed at a time if no autoneg */
    477		if (!cmd->base.autoneg && hw->phy.multispeed_fiber) {
    478			if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
    479								  10000baseT_Full) &&
    480			    ethtool_link_ksettings_test_link_mode(cmd, advertising,
    481								  1000baseT_Full))
    482				return -EINVAL;
    483		}
    484
    485		old = hw->phy.autoneg_advertised;
    486		advertised = 0;
    487		if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
    488							  10000baseT_Full))
    489			advertised |= IXGBE_LINK_SPEED_10GB_FULL;
    490		if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
    491							  5000baseT_Full))
    492			advertised |= IXGBE_LINK_SPEED_5GB_FULL;
    493		if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
    494							  2500baseT_Full))
    495			advertised |= IXGBE_LINK_SPEED_2_5GB_FULL;
    496		if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
    497							  1000baseT_Full))
    498			advertised |= IXGBE_LINK_SPEED_1GB_FULL;
    499
    500		if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
    501							  100baseT_Full))
    502			advertised |= IXGBE_LINK_SPEED_100_FULL;
    503
    504		if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
    505							  10baseT_Full))
    506			advertised |= IXGBE_LINK_SPEED_10_FULL;
    507
    508		if (old == advertised)
    509			return err;
    510		/* this sets the link speed and restarts auto-neg */
    511		while (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state))
    512			usleep_range(1000, 2000);
    513
    514		hw->mac.autotry_restart = true;
    515		err = hw->mac.ops.setup_link(hw, advertised, true);
    516		if (err) {
    517			e_info(probe, "setup link failed with code %d\n", err);
    518			hw->mac.ops.setup_link(hw, old, true);
    519		}
    520		clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state);
    521	} else {
    522		/* in this case we currently only support 10Gb/FULL */
    523		u32 speed = cmd->base.speed;
    524
    525		if ((cmd->base.autoneg == AUTONEG_ENABLE) ||
    526		    (!ethtool_link_ksettings_test_link_mode(cmd, advertising,
    527							    10000baseT_Full)) ||
    528		    (speed + cmd->base.duplex != SPEED_10000 + DUPLEX_FULL))
    529			return -EINVAL;
    530	}
    531
    532	return err;
    533}
    534
    535static void ixgbe_get_pause_stats(struct net_device *netdev,
    536				  struct ethtool_pause_stats *stats)
    537{
    538	struct ixgbe_adapter *adapter = netdev_priv(netdev);
    539	struct ixgbe_hw_stats *hwstats = &adapter->stats;
    540
    541	stats->tx_pause_frames = hwstats->lxontxc + hwstats->lxofftxc;
    542	stats->rx_pause_frames = hwstats->lxonrxc + hwstats->lxoffrxc;
    543}
    544
    545static void ixgbe_get_pauseparam(struct net_device *netdev,
    546				 struct ethtool_pauseparam *pause)
    547{
    548	struct ixgbe_adapter *adapter = netdev_priv(netdev);
    549	struct ixgbe_hw *hw = &adapter->hw;
    550
    551	if (ixgbe_device_supports_autoneg_fc(hw) &&
    552	    !hw->fc.disable_fc_autoneg)
    553		pause->autoneg = 1;
    554	else
    555		pause->autoneg = 0;
    556
    557	if (hw->fc.current_mode == ixgbe_fc_rx_pause) {
    558		pause->rx_pause = 1;
    559	} else if (hw->fc.current_mode == ixgbe_fc_tx_pause) {
    560		pause->tx_pause = 1;
    561	} else if (hw->fc.current_mode == ixgbe_fc_full) {
    562		pause->rx_pause = 1;
    563		pause->tx_pause = 1;
    564	}
    565}
    566
    567static int ixgbe_set_pauseparam(struct net_device *netdev,
    568				struct ethtool_pauseparam *pause)
    569{
    570	struct ixgbe_adapter *adapter = netdev_priv(netdev);
    571	struct ixgbe_hw *hw = &adapter->hw;
    572	struct ixgbe_fc_info fc = hw->fc;
    573
    574	/* 82598 does no support link flow control with DCB enabled */
    575	if ((hw->mac.type == ixgbe_mac_82598EB) &&
    576	    (adapter->flags & IXGBE_FLAG_DCB_ENABLED))
    577		return -EINVAL;
    578
    579	/* some devices do not support autoneg of link flow control */
    580	if ((pause->autoneg == AUTONEG_ENABLE) &&
    581	    !ixgbe_device_supports_autoneg_fc(hw))
    582		return -EINVAL;
    583
    584	fc.disable_fc_autoneg = (pause->autoneg != AUTONEG_ENABLE);
    585
    586	if ((pause->rx_pause && pause->tx_pause) || pause->autoneg)
    587		fc.requested_mode = ixgbe_fc_full;
    588	else if (pause->rx_pause && !pause->tx_pause)
    589		fc.requested_mode = ixgbe_fc_rx_pause;
    590	else if (!pause->rx_pause && pause->tx_pause)
    591		fc.requested_mode = ixgbe_fc_tx_pause;
    592	else
    593		fc.requested_mode = ixgbe_fc_none;
    594
    595	/* if the thing changed then we'll update and use new autoneg */
    596	if (memcmp(&fc, &hw->fc, sizeof(struct ixgbe_fc_info))) {
    597		hw->fc = fc;
    598		if (netif_running(netdev))
    599			ixgbe_reinit_locked(adapter);
    600		else
    601			ixgbe_reset(adapter);
    602	}
    603
    604	return 0;
    605}
    606
    607static u32 ixgbe_get_msglevel(struct net_device *netdev)
    608{
    609	struct ixgbe_adapter *adapter = netdev_priv(netdev);
    610	return adapter->msg_enable;
    611}
    612
    613static void ixgbe_set_msglevel(struct net_device *netdev, u32 data)
    614{
    615	struct ixgbe_adapter *adapter = netdev_priv(netdev);
    616	adapter->msg_enable = data;
    617}
    618
    619static int ixgbe_get_regs_len(struct net_device *netdev)
    620{
    621#define IXGBE_REGS_LEN  1145
    622	return IXGBE_REGS_LEN * sizeof(u32);
    623}
    624
    625#define IXGBE_GET_STAT(_A_, _R_) _A_->stats._R_
    626
    627static void ixgbe_get_regs(struct net_device *netdev,
    628			   struct ethtool_regs *regs, void *p)
    629{
    630	struct ixgbe_adapter *adapter = netdev_priv(netdev);
    631	struct ixgbe_hw *hw = &adapter->hw;
    632	u32 *regs_buff = p;
    633	u8 i;
    634
    635	memset(p, 0, IXGBE_REGS_LEN * sizeof(u32));
    636
    637	regs->version = hw->mac.type << 24 | hw->revision_id << 16 |
    638			hw->device_id;
    639
    640	/* General Registers */
    641	regs_buff[0] = IXGBE_READ_REG(hw, IXGBE_CTRL);
    642	regs_buff[1] = IXGBE_READ_REG(hw, IXGBE_STATUS);
    643	regs_buff[2] = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
    644	regs_buff[3] = IXGBE_READ_REG(hw, IXGBE_ESDP);
    645	regs_buff[4] = IXGBE_READ_REG(hw, IXGBE_EODSDP);
    646	regs_buff[5] = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
    647	regs_buff[6] = IXGBE_READ_REG(hw, IXGBE_FRTIMER);
    648	regs_buff[7] = IXGBE_READ_REG(hw, IXGBE_TCPTIMER);
    649
    650	/* NVM Register */
    651	regs_buff[8] = IXGBE_READ_REG(hw, IXGBE_EEC(hw));
    652	regs_buff[9] = IXGBE_READ_REG(hw, IXGBE_EERD);
    653	regs_buff[10] = IXGBE_READ_REG(hw, IXGBE_FLA(hw));
    654	regs_buff[11] = IXGBE_READ_REG(hw, IXGBE_EEMNGCTL);
    655	regs_buff[12] = IXGBE_READ_REG(hw, IXGBE_EEMNGDATA);
    656	regs_buff[13] = IXGBE_READ_REG(hw, IXGBE_FLMNGCTL);
    657	regs_buff[14] = IXGBE_READ_REG(hw, IXGBE_FLMNGDATA);
    658	regs_buff[15] = IXGBE_READ_REG(hw, IXGBE_FLMNGCNT);
    659	regs_buff[16] = IXGBE_READ_REG(hw, IXGBE_FLOP);
    660	regs_buff[17] = IXGBE_READ_REG(hw, IXGBE_GRC(hw));
    661
    662	/* Interrupt */
    663	/* don't read EICR because it can clear interrupt causes, instead
    664	 * read EICS which is a shadow but doesn't clear EICR */
    665	regs_buff[18] = IXGBE_READ_REG(hw, IXGBE_EICS);
    666	regs_buff[19] = IXGBE_READ_REG(hw, IXGBE_EICS);
    667	regs_buff[20] = IXGBE_READ_REG(hw, IXGBE_EIMS);
    668	regs_buff[21] = IXGBE_READ_REG(hw, IXGBE_EIMC);
    669	regs_buff[22] = IXGBE_READ_REG(hw, IXGBE_EIAC);
    670	regs_buff[23] = IXGBE_READ_REG(hw, IXGBE_EIAM);
    671	regs_buff[24] = IXGBE_READ_REG(hw, IXGBE_EITR(0));
    672	regs_buff[25] = IXGBE_READ_REG(hw, IXGBE_IVAR(0));
    673	regs_buff[26] = IXGBE_READ_REG(hw, IXGBE_MSIXT);
    674	regs_buff[27] = IXGBE_READ_REG(hw, IXGBE_MSIXPBA);
    675	regs_buff[28] = IXGBE_READ_REG(hw, IXGBE_PBACL(0));
    676	regs_buff[29] = IXGBE_READ_REG(hw, IXGBE_GPIE);
    677
    678	/* Flow Control */
    679	regs_buff[30] = IXGBE_READ_REG(hw, IXGBE_PFCTOP);
    680	for (i = 0; i < 4; i++)
    681		regs_buff[31 + i] = IXGBE_READ_REG(hw, IXGBE_FCTTV(i));
    682	for (i = 0; i < 8; i++) {
    683		switch (hw->mac.type) {
    684		case ixgbe_mac_82598EB:
    685			regs_buff[35 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTL(i));
    686			regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTH(i));
    687			break;
    688		case ixgbe_mac_82599EB:
    689		case ixgbe_mac_X540:
    690		case ixgbe_mac_X550:
    691		case ixgbe_mac_X550EM_x:
    692		case ixgbe_mac_x550em_a:
    693			regs_buff[35 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTL_82599(i));
    694			regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTH_82599(i));
    695			break;
    696		default:
    697			break;
    698		}
    699	}
    700	regs_buff[51] = IXGBE_READ_REG(hw, IXGBE_FCRTV);
    701	regs_buff[52] = IXGBE_READ_REG(hw, IXGBE_TFCS);
    702
    703	/* Receive DMA */
    704	for (i = 0; i < 64; i++)
    705		regs_buff[53 + i] = IXGBE_READ_REG(hw, IXGBE_RDBAL(i));
    706	for (i = 0; i < 64; i++)
    707		regs_buff[117 + i] = IXGBE_READ_REG(hw, IXGBE_RDBAH(i));
    708	for (i = 0; i < 64; i++)
    709		regs_buff[181 + i] = IXGBE_READ_REG(hw, IXGBE_RDLEN(i));
    710	for (i = 0; i < 64; i++)
    711		regs_buff[245 + i] = IXGBE_READ_REG(hw, IXGBE_RDH(i));
    712	for (i = 0; i < 64; i++)
    713		regs_buff[309 + i] = IXGBE_READ_REG(hw, IXGBE_RDT(i));
    714	for (i = 0; i < 64; i++)
    715		regs_buff[373 + i] = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
    716	for (i = 0; i < 16; i++)
    717		regs_buff[437 + i] = IXGBE_READ_REG(hw, IXGBE_SRRCTL(i));
    718	for (i = 0; i < 16; i++)
    719		regs_buff[453 + i] = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
    720	regs_buff[469] = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
    721	for (i = 0; i < 8; i++)
    722		regs_buff[470 + i] = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i));
    723	regs_buff[478] = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
    724	regs_buff[479] = IXGBE_READ_REG(hw, IXGBE_DROPEN);
    725
    726	/* Receive */
    727	regs_buff[480] = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
    728	regs_buff[481] = IXGBE_READ_REG(hw, IXGBE_RFCTL);
    729	for (i = 0; i < 16; i++)
    730		regs_buff[482 + i] = IXGBE_READ_REG(hw, IXGBE_RAL(i));
    731	for (i = 0; i < 16; i++)
    732		regs_buff[498 + i] = IXGBE_READ_REG(hw, IXGBE_RAH(i));
    733	regs_buff[514] = IXGBE_READ_REG(hw, IXGBE_PSRTYPE(0));
    734	regs_buff[515] = IXGBE_READ_REG(hw, IXGBE_FCTRL);
    735	regs_buff[516] = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
    736	regs_buff[517] = IXGBE_READ_REG(hw, IXGBE_MCSTCTRL);
    737	regs_buff[518] = IXGBE_READ_REG(hw, IXGBE_MRQC);
    738	regs_buff[519] = IXGBE_READ_REG(hw, IXGBE_VMD_CTL);
    739	for (i = 0; i < 8; i++)
    740		regs_buff[520 + i] = IXGBE_READ_REG(hw, IXGBE_IMIR(i));
    741	for (i = 0; i < 8; i++)
    742		regs_buff[528 + i] = IXGBE_READ_REG(hw, IXGBE_IMIREXT(i));
    743	regs_buff[536] = IXGBE_READ_REG(hw, IXGBE_IMIRVP);
    744
    745	/* Transmit */
    746	for (i = 0; i < 32; i++)
    747		regs_buff[537 + i] = IXGBE_READ_REG(hw, IXGBE_TDBAL(i));
    748	for (i = 0; i < 32; i++)
    749		regs_buff[569 + i] = IXGBE_READ_REG(hw, IXGBE_TDBAH(i));
    750	for (i = 0; i < 32; i++)
    751		regs_buff[601 + i] = IXGBE_READ_REG(hw, IXGBE_TDLEN(i));
    752	for (i = 0; i < 32; i++)
    753		regs_buff[633 + i] = IXGBE_READ_REG(hw, IXGBE_TDH(i));
    754	for (i = 0; i < 32; i++)
    755		regs_buff[665 + i] = IXGBE_READ_REG(hw, IXGBE_TDT(i));
    756	for (i = 0; i < 32; i++)
    757		regs_buff[697 + i] = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i));
    758	for (i = 0; i < 32; i++)
    759		regs_buff[729 + i] = IXGBE_READ_REG(hw, IXGBE_TDWBAL(i));
    760	for (i = 0; i < 32; i++)
    761		regs_buff[761 + i] = IXGBE_READ_REG(hw, IXGBE_TDWBAH(i));
    762	regs_buff[793] = IXGBE_READ_REG(hw, IXGBE_DTXCTL);
    763	for (i = 0; i < 16; i++)
    764		regs_buff[794 + i] = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i));
    765	regs_buff[810] = IXGBE_READ_REG(hw, IXGBE_TIPG);
    766	for (i = 0; i < 8; i++)
    767		regs_buff[811 + i] = IXGBE_READ_REG(hw, IXGBE_TXPBSIZE(i));
    768	regs_buff[819] = IXGBE_READ_REG(hw, IXGBE_MNGTXMAP);
    769
    770	/* Wake Up */
    771	regs_buff[820] = IXGBE_READ_REG(hw, IXGBE_WUC);
    772	regs_buff[821] = IXGBE_READ_REG(hw, IXGBE_WUFC);
    773	regs_buff[822] = IXGBE_READ_REG(hw, IXGBE_WUS);
    774	regs_buff[823] = IXGBE_READ_REG(hw, IXGBE_IPAV);
    775	regs_buff[824] = IXGBE_READ_REG(hw, IXGBE_IP4AT);
    776	regs_buff[825] = IXGBE_READ_REG(hw, IXGBE_IP6AT);
    777	regs_buff[826] = IXGBE_READ_REG(hw, IXGBE_WUPL);
    778	regs_buff[827] = IXGBE_READ_REG(hw, IXGBE_WUPM);
    779	regs_buff[828] = IXGBE_READ_REG(hw, IXGBE_FHFT(0));
    780
    781	/* DCB */
    782	regs_buff[829] = IXGBE_READ_REG(hw, IXGBE_RMCS);   /* same as FCCFG  */
    783	regs_buff[831] = IXGBE_READ_REG(hw, IXGBE_PDPMCS); /* same as RTTPCS */
    784
    785	switch (hw->mac.type) {
    786	case ixgbe_mac_82598EB:
    787		regs_buff[830] = IXGBE_READ_REG(hw, IXGBE_DPMCS);
    788		regs_buff[832] = IXGBE_READ_REG(hw, IXGBE_RUPPBMR);
    789		for (i = 0; i < 8; i++)
    790			regs_buff[833 + i] =
    791				IXGBE_READ_REG(hw, IXGBE_RT2CR(i));
    792		for (i = 0; i < 8; i++)
    793			regs_buff[841 + i] =
    794				IXGBE_READ_REG(hw, IXGBE_RT2SR(i));
    795		for (i = 0; i < 8; i++)
    796			regs_buff[849 + i] =
    797				IXGBE_READ_REG(hw, IXGBE_TDTQ2TCCR(i));
    798		for (i = 0; i < 8; i++)
    799			regs_buff[857 + i] =
    800				IXGBE_READ_REG(hw, IXGBE_TDTQ2TCSR(i));
    801		break;
    802	case ixgbe_mac_82599EB:
    803	case ixgbe_mac_X540:
    804	case ixgbe_mac_X550:
    805	case ixgbe_mac_X550EM_x:
    806	case ixgbe_mac_x550em_a:
    807		regs_buff[830] = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
    808		regs_buff[832] = IXGBE_READ_REG(hw, IXGBE_RTRPCS);
    809		for (i = 0; i < 8; i++)
    810			regs_buff[833 + i] =
    811				IXGBE_READ_REG(hw, IXGBE_RTRPT4C(i));
    812		for (i = 0; i < 8; i++)
    813			regs_buff[841 + i] =
    814				IXGBE_READ_REG(hw, IXGBE_RTRPT4S(i));
    815		for (i = 0; i < 8; i++)
    816			regs_buff[849 + i] =
    817				IXGBE_READ_REG(hw, IXGBE_RTTDT2C(i));
    818		for (i = 0; i < 8; i++)
    819			regs_buff[857 + i] =
    820				IXGBE_READ_REG(hw, IXGBE_RTTDT2S(i));
    821		break;
    822	default:
    823		break;
    824	}
    825
    826	for (i = 0; i < 8; i++)
    827		regs_buff[865 + i] =
    828		IXGBE_READ_REG(hw, IXGBE_TDPT2TCCR(i)); /* same as RTTPT2C */
    829	for (i = 0; i < 8; i++)
    830		regs_buff[873 + i] =
    831		IXGBE_READ_REG(hw, IXGBE_TDPT2TCSR(i)); /* same as RTTPT2S */
    832
    833	/* Statistics */
    834	regs_buff[881] = IXGBE_GET_STAT(adapter, crcerrs);
    835	regs_buff[882] = IXGBE_GET_STAT(adapter, illerrc);
    836	regs_buff[883] = IXGBE_GET_STAT(adapter, errbc);
    837	regs_buff[884] = IXGBE_GET_STAT(adapter, mspdc);
    838	for (i = 0; i < 8; i++)
    839		regs_buff[885 + i] = IXGBE_GET_STAT(adapter, mpc[i]);
    840	regs_buff[893] = IXGBE_GET_STAT(adapter, mlfc);
    841	regs_buff[894] = IXGBE_GET_STAT(adapter, mrfc);
    842	regs_buff[895] = IXGBE_GET_STAT(adapter, rlec);
    843	regs_buff[896] = IXGBE_GET_STAT(adapter, lxontxc);
    844	regs_buff[897] = IXGBE_GET_STAT(adapter, lxonrxc);
    845	regs_buff[898] = IXGBE_GET_STAT(adapter, lxofftxc);
    846	regs_buff[899] = IXGBE_GET_STAT(adapter, lxoffrxc);
    847	for (i = 0; i < 8; i++)
    848		regs_buff[900 + i] = IXGBE_GET_STAT(adapter, pxontxc[i]);
    849	for (i = 0; i < 8; i++)
    850		regs_buff[908 + i] = IXGBE_GET_STAT(adapter, pxonrxc[i]);
    851	for (i = 0; i < 8; i++)
    852		regs_buff[916 + i] = IXGBE_GET_STAT(adapter, pxofftxc[i]);
    853	for (i = 0; i < 8; i++)
    854		regs_buff[924 + i] = IXGBE_GET_STAT(adapter, pxoffrxc[i]);
    855	regs_buff[932] = IXGBE_GET_STAT(adapter, prc64);
    856	regs_buff[933] = IXGBE_GET_STAT(adapter, prc127);
    857	regs_buff[934] = IXGBE_GET_STAT(adapter, prc255);
    858	regs_buff[935] = IXGBE_GET_STAT(adapter, prc511);
    859	regs_buff[936] = IXGBE_GET_STAT(adapter, prc1023);
    860	regs_buff[937] = IXGBE_GET_STAT(adapter, prc1522);
    861	regs_buff[938] = IXGBE_GET_STAT(adapter, gprc);
    862	regs_buff[939] = IXGBE_GET_STAT(adapter, bprc);
    863	regs_buff[940] = IXGBE_GET_STAT(adapter, mprc);
    864	regs_buff[941] = IXGBE_GET_STAT(adapter, gptc);
    865	regs_buff[942] = (u32)IXGBE_GET_STAT(adapter, gorc);
    866	regs_buff[943] = (u32)(IXGBE_GET_STAT(adapter, gorc) >> 32);
    867	regs_buff[944] = (u32)IXGBE_GET_STAT(adapter, gotc);
    868	regs_buff[945] = (u32)(IXGBE_GET_STAT(adapter, gotc) >> 32);
    869	for (i = 0; i < 8; i++)
    870		regs_buff[946 + i] = IXGBE_GET_STAT(adapter, rnbc[i]);
    871	regs_buff[954] = IXGBE_GET_STAT(adapter, ruc);
    872	regs_buff[955] = IXGBE_GET_STAT(adapter, rfc);
    873	regs_buff[956] = IXGBE_GET_STAT(adapter, roc);
    874	regs_buff[957] = IXGBE_GET_STAT(adapter, rjc);
    875	regs_buff[958] = IXGBE_GET_STAT(adapter, mngprc);
    876	regs_buff[959] = IXGBE_GET_STAT(adapter, mngpdc);
    877	regs_buff[960] = IXGBE_GET_STAT(adapter, mngptc);
    878	regs_buff[961] = (u32)IXGBE_GET_STAT(adapter, tor);
    879	regs_buff[962] = (u32)(IXGBE_GET_STAT(adapter, tor) >> 32);
    880	regs_buff[963] = IXGBE_GET_STAT(adapter, tpr);
    881	regs_buff[964] = IXGBE_GET_STAT(adapter, tpt);
    882	regs_buff[965] = IXGBE_GET_STAT(adapter, ptc64);
    883	regs_buff[966] = IXGBE_GET_STAT(adapter, ptc127);
    884	regs_buff[967] = IXGBE_GET_STAT(adapter, ptc255);
    885	regs_buff[968] = IXGBE_GET_STAT(adapter, ptc511);
    886	regs_buff[969] = IXGBE_GET_STAT(adapter, ptc1023);
    887	regs_buff[970] = IXGBE_GET_STAT(adapter, ptc1522);
    888	regs_buff[971] = IXGBE_GET_STAT(adapter, mptc);
    889	regs_buff[972] = IXGBE_GET_STAT(adapter, bptc);
    890	regs_buff[973] = IXGBE_GET_STAT(adapter, xec);
    891	for (i = 0; i < 16; i++)
    892		regs_buff[974 + i] = IXGBE_GET_STAT(adapter, qprc[i]);
    893	for (i = 0; i < 16; i++)
    894		regs_buff[990 + i] = IXGBE_GET_STAT(adapter, qptc[i]);
    895	for (i = 0; i < 16; i++)
    896		regs_buff[1006 + i] = IXGBE_GET_STAT(adapter, qbrc[i]);
    897	for (i = 0; i < 16; i++)
    898		regs_buff[1022 + i] = IXGBE_GET_STAT(adapter, qbtc[i]);
    899
    900	/* MAC */
    901	regs_buff[1038] = IXGBE_READ_REG(hw, IXGBE_PCS1GCFIG);
    902	regs_buff[1039] = IXGBE_READ_REG(hw, IXGBE_PCS1GLCTL);
    903	regs_buff[1040] = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA);
    904	regs_buff[1041] = IXGBE_READ_REG(hw, IXGBE_PCS1GDBG0);
    905	regs_buff[1042] = IXGBE_READ_REG(hw, IXGBE_PCS1GDBG1);
    906	regs_buff[1043] = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
    907	regs_buff[1044] = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP);
    908	regs_buff[1045] = IXGBE_READ_REG(hw, IXGBE_PCS1GANNP);
    909	regs_buff[1046] = IXGBE_READ_REG(hw, IXGBE_PCS1GANLPNP);
    910	regs_buff[1047] = IXGBE_READ_REG(hw, IXGBE_HLREG0);
    911	regs_buff[1048] = IXGBE_READ_REG(hw, IXGBE_HLREG1);
    912	regs_buff[1049] = IXGBE_READ_REG(hw, IXGBE_PAP);
    913	regs_buff[1050] = IXGBE_READ_REG(hw, IXGBE_MACA);
    914	regs_buff[1051] = IXGBE_READ_REG(hw, IXGBE_APAE);
    915	regs_buff[1052] = IXGBE_READ_REG(hw, IXGBE_ARD);
    916	regs_buff[1053] = IXGBE_READ_REG(hw, IXGBE_AIS);
    917	regs_buff[1054] = IXGBE_READ_REG(hw, IXGBE_MSCA);
    918	regs_buff[1055] = IXGBE_READ_REG(hw, IXGBE_MSRWD);
    919	regs_buff[1056] = IXGBE_READ_REG(hw, IXGBE_MLADD);
    920	regs_buff[1057] = IXGBE_READ_REG(hw, IXGBE_MHADD);
    921	regs_buff[1058] = IXGBE_READ_REG(hw, IXGBE_TREG);
    922	regs_buff[1059] = IXGBE_READ_REG(hw, IXGBE_PCSS1);
    923	regs_buff[1060] = IXGBE_READ_REG(hw, IXGBE_PCSS2);
    924	regs_buff[1061] = IXGBE_READ_REG(hw, IXGBE_XPCSS);
    925	regs_buff[1062] = IXGBE_READ_REG(hw, IXGBE_SERDESC);
    926	regs_buff[1063] = IXGBE_READ_REG(hw, IXGBE_MACS);
    927	regs_buff[1064] = IXGBE_READ_REG(hw, IXGBE_AUTOC);
    928	regs_buff[1065] = IXGBE_READ_REG(hw, IXGBE_LINKS);
    929	regs_buff[1066] = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
    930	regs_buff[1067] = IXGBE_READ_REG(hw, IXGBE_AUTOC3);
    931	regs_buff[1068] = IXGBE_READ_REG(hw, IXGBE_ANLP1);
    932	regs_buff[1069] = IXGBE_READ_REG(hw, IXGBE_ANLP2);
    933	regs_buff[1070] = IXGBE_READ_REG(hw, IXGBE_ATLASCTL);
    934
    935	/* Diagnostic */
    936	regs_buff[1071] = IXGBE_READ_REG(hw, IXGBE_RDSTATCTL);
    937	for (i = 0; i < 8; i++)
    938		regs_buff[1072 + i] = IXGBE_READ_REG(hw, IXGBE_RDSTAT(i));
    939	regs_buff[1080] = IXGBE_READ_REG(hw, IXGBE_RDHMPN);
    940	for (i = 0; i < 4; i++)
    941		regs_buff[1081 + i] = IXGBE_READ_REG(hw, IXGBE_RIC_DW(i));
    942	regs_buff[1085] = IXGBE_READ_REG(hw, IXGBE_RDPROBE);
    943	regs_buff[1086] = IXGBE_READ_REG(hw, IXGBE_TDSTATCTL);
    944	for (i = 0; i < 8; i++)
    945		regs_buff[1087 + i] = IXGBE_READ_REG(hw, IXGBE_TDSTAT(i));
    946	regs_buff[1095] = IXGBE_READ_REG(hw, IXGBE_TDHMPN);
    947	for (i = 0; i < 4; i++)
    948		regs_buff[1096 + i] = IXGBE_READ_REG(hw, IXGBE_TIC_DW(i));
    949	regs_buff[1100] = IXGBE_READ_REG(hw, IXGBE_TDPROBE);
    950	regs_buff[1101] = IXGBE_READ_REG(hw, IXGBE_TXBUFCTRL);
    951	for (i = 0; i < 4; i++)
    952		regs_buff[1102 + i] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA(i));
    953	regs_buff[1106] = IXGBE_READ_REG(hw, IXGBE_RXBUFCTRL);
    954	for (i = 0; i < 4; i++)
    955		regs_buff[1107 + i] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA(i));
    956	for (i = 0; i < 8; i++)
    957		regs_buff[1111 + i] = IXGBE_READ_REG(hw, IXGBE_PCIE_DIAG(i));
    958	regs_buff[1119] = IXGBE_READ_REG(hw, IXGBE_RFVAL);
    959	regs_buff[1120] = IXGBE_READ_REG(hw, IXGBE_MDFTC1);
    960	regs_buff[1121] = IXGBE_READ_REG(hw, IXGBE_MDFTC2);
    961	regs_buff[1122] = IXGBE_READ_REG(hw, IXGBE_MDFTFIFO1);
    962	regs_buff[1123] = IXGBE_READ_REG(hw, IXGBE_MDFTFIFO2);
    963	regs_buff[1124] = IXGBE_READ_REG(hw, IXGBE_MDFTS);
    964	regs_buff[1125] = IXGBE_READ_REG(hw, IXGBE_PCIEECCCTL);
    965	regs_buff[1126] = IXGBE_READ_REG(hw, IXGBE_PBTXECC);
    966	regs_buff[1127] = IXGBE_READ_REG(hw, IXGBE_PBRXECC);
    967
    968	/* 82599 X540 specific registers  */
    969	regs_buff[1128] = IXGBE_READ_REG(hw, IXGBE_MFLCN);
    970
    971	/* 82599 X540 specific DCB registers  */
    972	regs_buff[1129] = IXGBE_READ_REG(hw, IXGBE_RTRUP2TC);
    973	regs_buff[1130] = IXGBE_READ_REG(hw, IXGBE_RTTUP2TC);
    974	for (i = 0; i < 4; i++)
    975		regs_buff[1131 + i] = IXGBE_READ_REG(hw, IXGBE_TXLLQ(i));
    976	regs_buff[1135] = IXGBE_READ_REG(hw, IXGBE_RTTBCNRM);
    977					/* same as RTTQCNRM */
    978	regs_buff[1136] = IXGBE_READ_REG(hw, IXGBE_RTTBCNRD);
    979					/* same as RTTQCNRR */
    980
    981	/* X540 specific DCB registers  */
    982	regs_buff[1137] = IXGBE_READ_REG(hw, IXGBE_RTTQCNCR);
    983	regs_buff[1138] = IXGBE_READ_REG(hw, IXGBE_RTTQCNTG);
    984
    985	/* Security config registers */
    986	regs_buff[1139] = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL);
    987	regs_buff[1140] = IXGBE_READ_REG(hw, IXGBE_SECTXSTAT);
    988	regs_buff[1141] = IXGBE_READ_REG(hw, IXGBE_SECTXBUFFAF);
    989	regs_buff[1142] = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG);
    990	regs_buff[1143] = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
    991	regs_buff[1144] = IXGBE_READ_REG(hw, IXGBE_SECRXSTAT);
    992}
    993
    994static int ixgbe_get_eeprom_len(struct net_device *netdev)
    995{
    996	struct ixgbe_adapter *adapter = netdev_priv(netdev);
    997	return adapter->hw.eeprom.word_size * 2;
    998}
    999
   1000static int ixgbe_get_eeprom(struct net_device *netdev,
   1001			    struct ethtool_eeprom *eeprom, u8 *bytes)
   1002{
   1003	struct ixgbe_adapter *adapter = netdev_priv(netdev);
   1004	struct ixgbe_hw *hw = &adapter->hw;
   1005	u16 *eeprom_buff;
   1006	int first_word, last_word, eeprom_len;
   1007	int ret_val = 0;
   1008	u16 i;
   1009
   1010	if (eeprom->len == 0)
   1011		return -EINVAL;
   1012
   1013	eeprom->magic = hw->vendor_id | (hw->device_id << 16);
   1014
   1015	first_word = eeprom->offset >> 1;
   1016	last_word = (eeprom->offset + eeprom->len - 1) >> 1;
   1017	eeprom_len = last_word - first_word + 1;
   1018
   1019	eeprom_buff = kmalloc_array(eeprom_len, sizeof(u16), GFP_KERNEL);
   1020	if (!eeprom_buff)
   1021		return -ENOMEM;
   1022
   1023	ret_val = hw->eeprom.ops.read_buffer(hw, first_word, eeprom_len,
   1024					     eeprom_buff);
   1025
   1026	/* Device's eeprom is always little-endian, word addressable */
   1027	for (i = 0; i < eeprom_len; i++)
   1028		le16_to_cpus(&eeprom_buff[i]);
   1029
   1030	memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1), eeprom->len);
   1031	kfree(eeprom_buff);
   1032
   1033	return ret_val;
   1034}
   1035
   1036static int ixgbe_set_eeprom(struct net_device *netdev,
   1037			    struct ethtool_eeprom *eeprom, u8 *bytes)
   1038{
   1039	struct ixgbe_adapter *adapter = netdev_priv(netdev);
   1040	struct ixgbe_hw *hw = &adapter->hw;
   1041	u16 *eeprom_buff;
   1042	void *ptr;
   1043	int max_len, first_word, last_word, ret_val = 0;
   1044	u16 i;
   1045
   1046	if (eeprom->len == 0)
   1047		return -EINVAL;
   1048
   1049	if (eeprom->magic != (hw->vendor_id | (hw->device_id << 16)))
   1050		return -EINVAL;
   1051
   1052	max_len = hw->eeprom.word_size * 2;
   1053
   1054	first_word = eeprom->offset >> 1;
   1055	last_word = (eeprom->offset + eeprom->len - 1) >> 1;
   1056	eeprom_buff = kmalloc(max_len, GFP_KERNEL);
   1057	if (!eeprom_buff)
   1058		return -ENOMEM;
   1059
   1060	ptr = eeprom_buff;
   1061
   1062	if (eeprom->offset & 1) {
   1063		/*
   1064		 * need read/modify/write of first changed EEPROM word
   1065		 * only the second byte of the word is being modified
   1066		 */
   1067		ret_val = hw->eeprom.ops.read(hw, first_word, &eeprom_buff[0]);
   1068		if (ret_val)
   1069			goto err;
   1070
   1071		ptr++;
   1072	}
   1073	if ((eeprom->offset + eeprom->len) & 1) {
   1074		/*
   1075		 * need read/modify/write of last changed EEPROM word
   1076		 * only the first byte of the word is being modified
   1077		 */
   1078		ret_val = hw->eeprom.ops.read(hw, last_word,
   1079					  &eeprom_buff[last_word - first_word]);
   1080		if (ret_val)
   1081			goto err;
   1082	}
   1083
   1084	/* Device's eeprom is always little-endian, word addressable */
   1085	for (i = 0; i < last_word - first_word + 1; i++)
   1086		le16_to_cpus(&eeprom_buff[i]);
   1087
   1088	memcpy(ptr, bytes, eeprom->len);
   1089
   1090	for (i = 0; i < last_word - first_word + 1; i++)
   1091		cpu_to_le16s(&eeprom_buff[i]);
   1092
   1093	ret_val = hw->eeprom.ops.write_buffer(hw, first_word,
   1094					      last_word - first_word + 1,
   1095					      eeprom_buff);
   1096
   1097	/* Update the checksum */
   1098	if (ret_val == 0)
   1099		hw->eeprom.ops.update_checksum(hw);
   1100
   1101err:
   1102	kfree(eeprom_buff);
   1103	return ret_val;
   1104}
   1105
   1106static void ixgbe_get_drvinfo(struct net_device *netdev,
   1107			      struct ethtool_drvinfo *drvinfo)
   1108{
   1109	struct ixgbe_adapter *adapter = netdev_priv(netdev);
   1110
   1111	strlcpy(drvinfo->driver, ixgbe_driver_name, sizeof(drvinfo->driver));
   1112
   1113	strlcpy(drvinfo->fw_version, adapter->eeprom_id,
   1114		sizeof(drvinfo->fw_version));
   1115
   1116	strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
   1117		sizeof(drvinfo->bus_info));
   1118
   1119	drvinfo->n_priv_flags = IXGBE_PRIV_FLAGS_STR_LEN;
   1120}
   1121
   1122static void ixgbe_get_ringparam(struct net_device *netdev,
   1123				struct ethtool_ringparam *ring,
   1124				struct kernel_ethtool_ringparam *kernel_ring,
   1125				struct netlink_ext_ack *extack)
   1126{
   1127	struct ixgbe_adapter *adapter = netdev_priv(netdev);
   1128	struct ixgbe_ring *tx_ring = adapter->tx_ring[0];
   1129	struct ixgbe_ring *rx_ring = adapter->rx_ring[0];
   1130
   1131	ring->rx_max_pending = IXGBE_MAX_RXD;
   1132	ring->tx_max_pending = IXGBE_MAX_TXD;
   1133	ring->rx_pending = rx_ring->count;
   1134	ring->tx_pending = tx_ring->count;
   1135}
   1136
   1137static int ixgbe_set_ringparam(struct net_device *netdev,
   1138			       struct ethtool_ringparam *ring,
   1139			       struct kernel_ethtool_ringparam *kernel_ring,
   1140			       struct netlink_ext_ack *extack)
   1141{
   1142	struct ixgbe_adapter *adapter = netdev_priv(netdev);
   1143	struct ixgbe_ring *temp_ring;
   1144	int i, j, err = 0;
   1145	u32 new_rx_count, new_tx_count;
   1146
   1147	if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
   1148		return -EINVAL;
   1149
   1150	new_tx_count = clamp_t(u32, ring->tx_pending,
   1151			       IXGBE_MIN_TXD, IXGBE_MAX_TXD);
   1152	new_tx_count = ALIGN(new_tx_count, IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE);
   1153
   1154	new_rx_count = clamp_t(u32, ring->rx_pending,
   1155			       IXGBE_MIN_RXD, IXGBE_MAX_RXD);
   1156	new_rx_count = ALIGN(new_rx_count, IXGBE_REQ_RX_DESCRIPTOR_MULTIPLE);
   1157
   1158	if ((new_tx_count == adapter->tx_ring_count) &&
   1159	    (new_rx_count == adapter->rx_ring_count)) {
   1160		/* nothing to do */
   1161		return 0;
   1162	}
   1163
   1164	while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state))
   1165		usleep_range(1000, 2000);
   1166
   1167	if (!netif_running(adapter->netdev)) {
   1168		for (i = 0; i < adapter->num_tx_queues; i++)
   1169			adapter->tx_ring[i]->count = new_tx_count;
   1170		for (i = 0; i < adapter->num_xdp_queues; i++)
   1171			adapter->xdp_ring[i]->count = new_tx_count;
   1172		for (i = 0; i < adapter->num_rx_queues; i++)
   1173			adapter->rx_ring[i]->count = new_rx_count;
   1174		adapter->tx_ring_count = new_tx_count;
   1175		adapter->xdp_ring_count = new_tx_count;
   1176		adapter->rx_ring_count = new_rx_count;
   1177		goto clear_reset;
   1178	}
   1179
   1180	/* allocate temporary buffer to store rings in */
   1181	i = max_t(int, adapter->num_tx_queues + adapter->num_xdp_queues,
   1182		  adapter->num_rx_queues);
   1183	temp_ring = vmalloc(array_size(i, sizeof(struct ixgbe_ring)));
   1184
   1185	if (!temp_ring) {
   1186		err = -ENOMEM;
   1187		goto clear_reset;
   1188	}
   1189
   1190	ixgbe_down(adapter);
   1191
   1192	/*
   1193	 * Setup new Tx resources and free the old Tx resources in that order.
   1194	 * We can then assign the new resources to the rings via a memcpy.
   1195	 * The advantage to this approach is that we are guaranteed to still
   1196	 * have resources even in the case of an allocation failure.
   1197	 */
   1198	if (new_tx_count != adapter->tx_ring_count) {
   1199		for (i = 0; i < adapter->num_tx_queues; i++) {
   1200			memcpy(&temp_ring[i], adapter->tx_ring[i],
   1201			       sizeof(struct ixgbe_ring));
   1202
   1203			temp_ring[i].count = new_tx_count;
   1204			err = ixgbe_setup_tx_resources(&temp_ring[i]);
   1205			if (err) {
   1206				while (i) {
   1207					i--;
   1208					ixgbe_free_tx_resources(&temp_ring[i]);
   1209				}
   1210				goto err_setup;
   1211			}
   1212		}
   1213
   1214		for (j = 0; j < adapter->num_xdp_queues; j++, i++) {
   1215			memcpy(&temp_ring[i], adapter->xdp_ring[j],
   1216			       sizeof(struct ixgbe_ring));
   1217
   1218			temp_ring[i].count = new_tx_count;
   1219			err = ixgbe_setup_tx_resources(&temp_ring[i]);
   1220			if (err) {
   1221				while (i) {
   1222					i--;
   1223					ixgbe_free_tx_resources(&temp_ring[i]);
   1224				}
   1225				goto err_setup;
   1226			}
   1227		}
   1228
   1229		for (i = 0; i < adapter->num_tx_queues; i++) {
   1230			ixgbe_free_tx_resources(adapter->tx_ring[i]);
   1231
   1232			memcpy(adapter->tx_ring[i], &temp_ring[i],
   1233			       sizeof(struct ixgbe_ring));
   1234		}
   1235		for (j = 0; j < adapter->num_xdp_queues; j++, i++) {
   1236			ixgbe_free_tx_resources(adapter->xdp_ring[j]);
   1237
   1238			memcpy(adapter->xdp_ring[j], &temp_ring[i],
   1239			       sizeof(struct ixgbe_ring));
   1240		}
   1241
   1242		adapter->tx_ring_count = new_tx_count;
   1243	}
   1244
   1245	/* Repeat the process for the Rx rings if needed */
   1246	if (new_rx_count != adapter->rx_ring_count) {
   1247		for (i = 0; i < adapter->num_rx_queues; i++) {
   1248			memcpy(&temp_ring[i], adapter->rx_ring[i],
   1249			       sizeof(struct ixgbe_ring));
   1250
   1251			/* Clear copied XDP RX-queue info */
   1252			memset(&temp_ring[i].xdp_rxq, 0,
   1253			       sizeof(temp_ring[i].xdp_rxq));
   1254
   1255			temp_ring[i].count = new_rx_count;
   1256			err = ixgbe_setup_rx_resources(adapter, &temp_ring[i]);
   1257			if (err) {
   1258				while (i) {
   1259					i--;
   1260					ixgbe_free_rx_resources(&temp_ring[i]);
   1261				}
   1262				goto err_setup;
   1263			}
   1264
   1265		}
   1266
   1267		for (i = 0; i < adapter->num_rx_queues; i++) {
   1268			ixgbe_free_rx_resources(adapter->rx_ring[i]);
   1269
   1270			memcpy(adapter->rx_ring[i], &temp_ring[i],
   1271			       sizeof(struct ixgbe_ring));
   1272		}
   1273
   1274		adapter->rx_ring_count = new_rx_count;
   1275	}
   1276
   1277err_setup:
   1278	ixgbe_up(adapter);
   1279	vfree(temp_ring);
   1280clear_reset:
   1281	clear_bit(__IXGBE_RESETTING, &adapter->state);
   1282	return err;
   1283}
   1284
   1285static int ixgbe_get_sset_count(struct net_device *netdev, int sset)
   1286{
   1287	switch (sset) {
   1288	case ETH_SS_TEST:
   1289		return IXGBE_TEST_LEN;
   1290	case ETH_SS_STATS:
   1291		return IXGBE_STATS_LEN;
   1292	case ETH_SS_PRIV_FLAGS:
   1293		return IXGBE_PRIV_FLAGS_STR_LEN;
   1294	default:
   1295		return -EOPNOTSUPP;
   1296	}
   1297}
   1298
   1299static void ixgbe_get_ethtool_stats(struct net_device *netdev,
   1300				    struct ethtool_stats *stats, u64 *data)
   1301{
   1302	struct ixgbe_adapter *adapter = netdev_priv(netdev);
   1303	struct rtnl_link_stats64 temp;
   1304	const struct rtnl_link_stats64 *net_stats;
   1305	unsigned int start;
   1306	struct ixgbe_ring *ring;
   1307	int i, j;
   1308	char *p = NULL;
   1309
   1310	ixgbe_update_stats(adapter);
   1311	net_stats = dev_get_stats(netdev, &temp);
   1312	for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) {
   1313		switch (ixgbe_gstrings_stats[i].type) {
   1314		case NETDEV_STATS:
   1315			p = (char *) net_stats +
   1316					ixgbe_gstrings_stats[i].stat_offset;
   1317			break;
   1318		case IXGBE_STATS:
   1319			p = (char *) adapter +
   1320					ixgbe_gstrings_stats[i].stat_offset;
   1321			break;
   1322		default:
   1323			data[i] = 0;
   1324			continue;
   1325		}
   1326
   1327		data[i] = (ixgbe_gstrings_stats[i].sizeof_stat ==
   1328			   sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
   1329	}
   1330	for (j = 0; j < netdev->num_tx_queues; j++) {
   1331		ring = adapter->tx_ring[j];
   1332		if (!ring) {
   1333			data[i] = 0;
   1334			data[i+1] = 0;
   1335			i += 2;
   1336			continue;
   1337		}
   1338
   1339		do {
   1340			start = u64_stats_fetch_begin_irq(&ring->syncp);
   1341			data[i]   = ring->stats.packets;
   1342			data[i+1] = ring->stats.bytes;
   1343		} while (u64_stats_fetch_retry_irq(&ring->syncp, start));
   1344		i += 2;
   1345	}
   1346	for (j = 0; j < IXGBE_NUM_RX_QUEUES; j++) {
   1347		ring = adapter->rx_ring[j];
   1348		if (!ring) {
   1349			data[i] = 0;
   1350			data[i+1] = 0;
   1351			i += 2;
   1352			continue;
   1353		}
   1354
   1355		do {
   1356			start = u64_stats_fetch_begin_irq(&ring->syncp);
   1357			data[i]   = ring->stats.packets;
   1358			data[i+1] = ring->stats.bytes;
   1359		} while (u64_stats_fetch_retry_irq(&ring->syncp, start));
   1360		i += 2;
   1361	}
   1362
   1363	for (j = 0; j < IXGBE_MAX_PACKET_BUFFERS; j++) {
   1364		data[i++] = adapter->stats.pxontxc[j];
   1365		data[i++] = adapter->stats.pxofftxc[j];
   1366	}
   1367	for (j = 0; j < IXGBE_MAX_PACKET_BUFFERS; j++) {
   1368		data[i++] = adapter->stats.pxonrxc[j];
   1369		data[i++] = adapter->stats.pxoffrxc[j];
   1370	}
   1371}
   1372
   1373static void ixgbe_get_strings(struct net_device *netdev, u32 stringset,
   1374			      u8 *data)
   1375{
   1376	unsigned int i;
   1377	u8 *p = data;
   1378
   1379	switch (stringset) {
   1380	case ETH_SS_TEST:
   1381		for (i = 0; i < IXGBE_TEST_LEN; i++)
   1382			ethtool_sprintf(&p, ixgbe_gstrings_test[i]);
   1383		break;
   1384	case ETH_SS_STATS:
   1385		for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++)
   1386			ethtool_sprintf(&p,
   1387					ixgbe_gstrings_stats[i].stat_string);
   1388		for (i = 0; i < netdev->num_tx_queues; i++) {
   1389			ethtool_sprintf(&p, "tx_queue_%u_packets", i);
   1390			ethtool_sprintf(&p, "tx_queue_%u_bytes", i);
   1391		}
   1392		for (i = 0; i < IXGBE_NUM_RX_QUEUES; i++) {
   1393			ethtool_sprintf(&p, "rx_queue_%u_packets", i);
   1394			ethtool_sprintf(&p, "rx_queue_%u_bytes", i);
   1395		}
   1396		for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++) {
   1397			ethtool_sprintf(&p, "tx_pb_%u_pxon", i);
   1398			ethtool_sprintf(&p, "tx_pb_%u_pxoff", i);
   1399		}
   1400		for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++) {
   1401			ethtool_sprintf(&p, "rx_pb_%u_pxon", i);
   1402			ethtool_sprintf(&p, "rx_pb_%u_pxoff", i);
   1403		}
   1404		/* BUG_ON(p - data != IXGBE_STATS_LEN * ETH_GSTRING_LEN); */
   1405		break;
   1406	case ETH_SS_PRIV_FLAGS:
   1407		memcpy(data, ixgbe_priv_flags_strings,
   1408		       IXGBE_PRIV_FLAGS_STR_LEN * ETH_GSTRING_LEN);
   1409	}
   1410}
   1411
   1412static int ixgbe_link_test(struct ixgbe_adapter *adapter, u64 *data)
   1413{
   1414	struct ixgbe_hw *hw = &adapter->hw;
   1415	bool link_up;
   1416	u32 link_speed = 0;
   1417
   1418	if (ixgbe_removed(hw->hw_addr)) {
   1419		*data = 1;
   1420		return 1;
   1421	}
   1422	*data = 0;
   1423
   1424	hw->mac.ops.check_link(hw, &link_speed, &link_up, true);
   1425	if (link_up)
   1426		return *data;
   1427	else
   1428		*data = 1;
   1429	return *data;
   1430}
   1431
   1432/* ethtool register test data */
   1433struct ixgbe_reg_test {
   1434	u16 reg;
   1435	u8  array_len;
   1436	u8  test_type;
   1437	u32 mask;
   1438	u32 write;
   1439};
   1440
   1441/* In the hardware, registers are laid out either singly, in arrays
   1442 * spaced 0x40 bytes apart, or in contiguous tables.  We assume
   1443 * most tests take place on arrays or single registers (handled
   1444 * as a single-element array) and special-case the tables.
   1445 * Table tests are always pattern tests.
   1446 *
   1447 * We also make provision for some required setup steps by specifying
   1448 * registers to be written without any read-back testing.
   1449 */
   1450
   1451#define PATTERN_TEST	1
   1452#define SET_READ_TEST	2
   1453#define WRITE_NO_TEST	3
   1454#define TABLE32_TEST	4
   1455#define TABLE64_TEST_LO	5
   1456#define TABLE64_TEST_HI	6
   1457
   1458/* default 82599 register test */
   1459static const struct ixgbe_reg_test reg_test_82599[] = {
   1460	{ IXGBE_FCRTL_82599(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
   1461	{ IXGBE_FCRTH_82599(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
   1462	{ IXGBE_PFCTOP, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
   1463	{ IXGBE_VLNCTRL, 1, PATTERN_TEST, 0x00000000, 0x00000000 },
   1464	{ IXGBE_RDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFF80 },
   1465	{ IXGBE_RDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
   1466	{ IXGBE_RDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
   1467	{ IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, IXGBE_RXDCTL_ENABLE },
   1468	{ IXGBE_RDT(0), 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
   1469	{ IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, 0 },
   1470	{ IXGBE_FCRTH(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
   1471	{ IXGBE_FCTTV(0), 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
   1472	{ IXGBE_TDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
   1473	{ IXGBE_TDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
   1474	{ IXGBE_TDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFF80 },
   1475	{ IXGBE_RXCTRL, 1, SET_READ_TEST, 0x00000001, 0x00000001 },
   1476	{ IXGBE_RAL(0), 16, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF },
   1477	{ IXGBE_RAL(0), 16, TABLE64_TEST_HI, 0x8001FFFF, 0x800CFFFF },
   1478	{ IXGBE_MTA(0), 128, TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
   1479	{ .reg = 0 }
   1480};
   1481
   1482/* default 82598 register test */
   1483static const struct ixgbe_reg_test reg_test_82598[] = {
   1484	{ IXGBE_FCRTL(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
   1485	{ IXGBE_FCRTH(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
   1486	{ IXGBE_PFCTOP, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
   1487	{ IXGBE_VLNCTRL, 1, PATTERN_TEST, 0x00000000, 0x00000000 },
   1488	{ IXGBE_RDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
   1489	{ IXGBE_RDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
   1490	{ IXGBE_RDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
   1491	/* Enable all four RX queues before testing. */
   1492	{ IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, IXGBE_RXDCTL_ENABLE },
   1493	/* RDH is read-only for 82598, only test RDT. */
   1494	{ IXGBE_RDT(0), 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
   1495	{ IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, 0 },
   1496	{ IXGBE_FCRTH(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
   1497	{ IXGBE_FCTTV(0), 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
   1498	{ IXGBE_TIPG, 1, PATTERN_TEST, 0x000000FF, 0x000000FF },
   1499	{ IXGBE_TDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
   1500	{ IXGBE_TDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
   1501	{ IXGBE_TDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
   1502	{ IXGBE_RXCTRL, 1, SET_READ_TEST, 0x00000003, 0x00000003 },
   1503	{ IXGBE_DTXCTL, 1, SET_READ_TEST, 0x00000005, 0x00000005 },
   1504	{ IXGBE_RAL(0), 16, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF },
   1505	{ IXGBE_RAL(0), 16, TABLE64_TEST_HI, 0x800CFFFF, 0x800CFFFF },
   1506	{ IXGBE_MTA(0), 128, TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
   1507	{ .reg = 0 }
   1508};
   1509
   1510static bool reg_pattern_test(struct ixgbe_adapter *adapter, u64 *data, int reg,
   1511			     u32 mask, u32 write)
   1512{
   1513	u32 pat, val, before;
   1514	static const u32 test_pattern[] = {
   1515		0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF};
   1516
   1517	if (ixgbe_removed(adapter->hw.hw_addr)) {
   1518		*data = 1;
   1519		return true;
   1520	}
   1521	for (pat = 0; pat < ARRAY_SIZE(test_pattern); pat++) {
   1522		before = ixgbe_read_reg(&adapter->hw, reg);
   1523		ixgbe_write_reg(&adapter->hw, reg, test_pattern[pat] & write);
   1524		val = ixgbe_read_reg(&adapter->hw, reg);
   1525		if (val != (test_pattern[pat] & write & mask)) {
   1526			e_err(drv, "pattern test reg %04X failed: got 0x%08X expected 0x%08X\n",
   1527			      reg, val, (test_pattern[pat] & write & mask));
   1528			*data = reg;
   1529			ixgbe_write_reg(&adapter->hw, reg, before);
   1530			return true;
   1531		}
   1532		ixgbe_write_reg(&adapter->hw, reg, before);
   1533	}
   1534	return false;
   1535}
   1536
   1537static bool reg_set_and_check(struct ixgbe_adapter *adapter, u64 *data, int reg,
   1538			      u32 mask, u32 write)
   1539{
   1540	u32 val, before;
   1541
   1542	if (ixgbe_removed(adapter->hw.hw_addr)) {
   1543		*data = 1;
   1544		return true;
   1545	}
   1546	before = ixgbe_read_reg(&adapter->hw, reg);
   1547	ixgbe_write_reg(&adapter->hw, reg, write & mask);
   1548	val = ixgbe_read_reg(&adapter->hw, reg);
   1549	if ((write & mask) != (val & mask)) {
   1550		e_err(drv, "set/check reg %04X test failed: got 0x%08X expected 0x%08X\n",
   1551		      reg, (val & mask), (write & mask));
   1552		*data = reg;
   1553		ixgbe_write_reg(&adapter->hw, reg, before);
   1554		return true;
   1555	}
   1556	ixgbe_write_reg(&adapter->hw, reg, before);
   1557	return false;
   1558}
   1559
   1560static int ixgbe_reg_test(struct ixgbe_adapter *adapter, u64 *data)
   1561{
   1562	const struct ixgbe_reg_test *test;
   1563	u32 value, before, after;
   1564	u32 i, toggle;
   1565
   1566	if (ixgbe_removed(adapter->hw.hw_addr)) {
   1567		e_err(drv, "Adapter removed - register test blocked\n");
   1568		*data = 1;
   1569		return 1;
   1570	}
   1571	switch (adapter->hw.mac.type) {
   1572	case ixgbe_mac_82598EB:
   1573		toggle = 0x7FFFF3FF;
   1574		test = reg_test_82598;
   1575		break;
   1576	case ixgbe_mac_82599EB:
   1577	case ixgbe_mac_X540:
   1578	case ixgbe_mac_X550:
   1579	case ixgbe_mac_X550EM_x:
   1580	case ixgbe_mac_x550em_a:
   1581		toggle = 0x7FFFF30F;
   1582		test = reg_test_82599;
   1583		break;
   1584	default:
   1585		*data = 1;
   1586		return 1;
   1587	}
   1588
   1589	/*
   1590	 * Because the status register is such a special case,
   1591	 * we handle it separately from the rest of the register
   1592	 * tests.  Some bits are read-only, some toggle, and some
   1593	 * are writeable on newer MACs.
   1594	 */
   1595	before = ixgbe_read_reg(&adapter->hw, IXGBE_STATUS);
   1596	value = (ixgbe_read_reg(&adapter->hw, IXGBE_STATUS) & toggle);
   1597	ixgbe_write_reg(&adapter->hw, IXGBE_STATUS, toggle);
   1598	after = ixgbe_read_reg(&adapter->hw, IXGBE_STATUS) & toggle;
   1599	if (value != after) {
   1600		e_err(drv, "failed STATUS register test got: 0x%08X expected: 0x%08X\n",
   1601		      after, value);
   1602		*data = 1;
   1603		return 1;
   1604	}
   1605	/* restore previous status */
   1606	ixgbe_write_reg(&adapter->hw, IXGBE_STATUS, before);
   1607
   1608	/*
   1609	 * Perform the remainder of the register test, looping through
   1610	 * the test table until we either fail or reach the null entry.
   1611	 */
   1612	while (test->reg) {
   1613		for (i = 0; i < test->array_len; i++) {
   1614			bool b = false;
   1615
   1616			switch (test->test_type) {
   1617			case PATTERN_TEST:
   1618				b = reg_pattern_test(adapter, data,
   1619						     test->reg + (i * 0x40),
   1620						     test->mask,
   1621						     test->write);
   1622				break;
   1623			case SET_READ_TEST:
   1624				b = reg_set_and_check(adapter, data,
   1625						      test->reg + (i * 0x40),
   1626						      test->mask,
   1627						      test->write);
   1628				break;
   1629			case WRITE_NO_TEST:
   1630				ixgbe_write_reg(&adapter->hw,
   1631						test->reg + (i * 0x40),
   1632						test->write);
   1633				break;
   1634			case TABLE32_TEST:
   1635				b = reg_pattern_test(adapter, data,
   1636						     test->reg + (i * 4),
   1637						     test->mask,
   1638						     test->write);
   1639				break;
   1640			case TABLE64_TEST_LO:
   1641				b = reg_pattern_test(adapter, data,
   1642						     test->reg + (i * 8),
   1643						     test->mask,
   1644						     test->write);
   1645				break;
   1646			case TABLE64_TEST_HI:
   1647				b = reg_pattern_test(adapter, data,
   1648						     (test->reg + 4) + (i * 8),
   1649						     test->mask,
   1650						     test->write);
   1651				break;
   1652			}
   1653			if (b)
   1654				return 1;
   1655		}
   1656		test++;
   1657	}
   1658
   1659	*data = 0;
   1660	return 0;
   1661}
   1662
   1663static int ixgbe_eeprom_test(struct ixgbe_adapter *adapter, u64 *data)
   1664{
   1665	struct ixgbe_hw *hw = &adapter->hw;
   1666	if (hw->eeprom.ops.validate_checksum(hw, NULL))
   1667		*data = 1;
   1668	else
   1669		*data = 0;
   1670	return *data;
   1671}
   1672
   1673static irqreturn_t ixgbe_test_intr(int irq, void *data)
   1674{
   1675	struct net_device *netdev = (struct net_device *) data;
   1676	struct ixgbe_adapter *adapter = netdev_priv(netdev);
   1677
   1678	adapter->test_icr |= IXGBE_READ_REG(&adapter->hw, IXGBE_EICR);
   1679
   1680	return IRQ_HANDLED;
   1681}
   1682
   1683static int ixgbe_intr_test(struct ixgbe_adapter *adapter, u64 *data)
   1684{
   1685	struct net_device *netdev = adapter->netdev;
   1686	u32 mask, i = 0, shared_int = true;
   1687	u32 irq = adapter->pdev->irq;
   1688
   1689	*data = 0;
   1690
   1691	/* Hook up test interrupt handler just for this test */
   1692	if (adapter->msix_entries) {
   1693		/* NOTE: we don't test MSI-X interrupts here, yet */
   1694		return 0;
   1695	} else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) {
   1696		shared_int = false;
   1697		if (request_irq(irq, ixgbe_test_intr, 0, netdev->name,
   1698				netdev)) {
   1699			*data = 1;
   1700			return -1;
   1701		}
   1702	} else if (!request_irq(irq, ixgbe_test_intr, IRQF_PROBE_SHARED,
   1703				netdev->name, netdev)) {
   1704		shared_int = false;
   1705	} else if (request_irq(irq, ixgbe_test_intr, IRQF_SHARED,
   1706			       netdev->name, netdev)) {
   1707		*data = 1;
   1708		return -1;
   1709	}
   1710	e_info(hw, "testing %s interrupt\n", shared_int ?
   1711	       "shared" : "unshared");
   1712
   1713	/* Disable all the interrupts */
   1714	IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFFFFFF);
   1715	IXGBE_WRITE_FLUSH(&adapter->hw);
   1716	usleep_range(10000, 20000);
   1717
   1718	/* Test each interrupt */
   1719	for (; i < 10; i++) {
   1720		/* Interrupt to test */
   1721		mask = BIT(i);
   1722
   1723		if (!shared_int) {
   1724			/*
   1725			 * Disable the interrupts to be reported in
   1726			 * the cause register and then force the same
   1727			 * interrupt and see if one gets posted.  If
   1728			 * an interrupt was posted to the bus, the
   1729			 * test failed.
   1730			 */
   1731			adapter->test_icr = 0;
   1732			IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC,
   1733					~mask & 0x00007FFF);
   1734			IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS,
   1735					~mask & 0x00007FFF);
   1736			IXGBE_WRITE_FLUSH(&adapter->hw);
   1737			usleep_range(10000, 20000);
   1738
   1739			if (adapter->test_icr & mask) {
   1740				*data = 3;
   1741				break;
   1742			}
   1743		}
   1744
   1745		/*
   1746		 * Enable the interrupt to be reported in the cause
   1747		 * register and then force the same interrupt and see
   1748		 * if one gets posted.  If an interrupt was not posted
   1749		 * to the bus, the test failed.
   1750		 */
   1751		adapter->test_icr = 0;
   1752		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask);
   1753		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
   1754		IXGBE_WRITE_FLUSH(&adapter->hw);
   1755		usleep_range(10000, 20000);
   1756
   1757		if (!(adapter->test_icr & mask)) {
   1758			*data = 4;
   1759			break;
   1760		}
   1761
   1762		if (!shared_int) {
   1763			/*
   1764			 * Disable the other interrupts to be reported in
   1765			 * the cause register and then force the other
   1766			 * interrupts and see if any get posted.  If
   1767			 * an interrupt was posted to the bus, the
   1768			 * test failed.
   1769			 */
   1770			adapter->test_icr = 0;
   1771			IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC,
   1772					~mask & 0x00007FFF);
   1773			IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS,
   1774					~mask & 0x00007FFF);
   1775			IXGBE_WRITE_FLUSH(&adapter->hw);
   1776			usleep_range(10000, 20000);
   1777
   1778			if (adapter->test_icr) {
   1779				*data = 5;
   1780				break;
   1781			}
   1782		}
   1783	}
   1784
   1785	/* Disable all the interrupts */
   1786	IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFFFFFF);
   1787	IXGBE_WRITE_FLUSH(&adapter->hw);
   1788	usleep_range(10000, 20000);
   1789
   1790	/* Unhook test interrupt handler */
   1791	free_irq(irq, netdev);
   1792
   1793	return *data;
   1794}
   1795
   1796static void ixgbe_free_desc_rings(struct ixgbe_adapter *adapter)
   1797{
   1798	/* Shut down the DMA engines now so they can be reinitialized later,
   1799	 * since the test rings and normally used rings should overlap on
   1800	 * queue 0 we can just use the standard disable Rx/Tx calls and they
   1801	 * will take care of disabling the test rings for us.
   1802	 */
   1803
   1804	/* first Rx */
   1805	ixgbe_disable_rx(adapter);
   1806
   1807	/* now Tx */
   1808	ixgbe_disable_tx(adapter);
   1809
   1810	ixgbe_reset(adapter);
   1811
   1812	ixgbe_free_tx_resources(&adapter->test_tx_ring);
   1813	ixgbe_free_rx_resources(&adapter->test_rx_ring);
   1814}
   1815
   1816static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter)
   1817{
   1818	struct ixgbe_ring *tx_ring = &adapter->test_tx_ring;
   1819	struct ixgbe_ring *rx_ring = &adapter->test_rx_ring;
   1820	struct ixgbe_hw *hw = &adapter->hw;
   1821	u32 rctl, reg_data;
   1822	int ret_val;
   1823	int err;
   1824
   1825	/* Setup Tx descriptor ring and Tx buffers */
   1826	tx_ring->count = IXGBE_DEFAULT_TXD;
   1827	tx_ring->queue_index = 0;
   1828	tx_ring->dev = &adapter->pdev->dev;
   1829	tx_ring->netdev = adapter->netdev;
   1830	tx_ring->reg_idx = adapter->tx_ring[0]->reg_idx;
   1831
   1832	err = ixgbe_setup_tx_resources(tx_ring);
   1833	if (err)
   1834		return 1;
   1835
   1836	switch (adapter->hw.mac.type) {
   1837	case ixgbe_mac_82599EB:
   1838	case ixgbe_mac_X540:
   1839	case ixgbe_mac_X550:
   1840	case ixgbe_mac_X550EM_x:
   1841	case ixgbe_mac_x550em_a:
   1842		reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_DMATXCTL);
   1843		reg_data |= IXGBE_DMATXCTL_TE;
   1844		IXGBE_WRITE_REG(&adapter->hw, IXGBE_DMATXCTL, reg_data);
   1845		break;
   1846	default:
   1847		break;
   1848	}
   1849
   1850	ixgbe_configure_tx_ring(adapter, tx_ring);
   1851
   1852	/* Setup Rx Descriptor ring and Rx buffers */
   1853	rx_ring->count = IXGBE_DEFAULT_RXD;
   1854	rx_ring->queue_index = 0;
   1855	rx_ring->dev = &adapter->pdev->dev;
   1856	rx_ring->netdev = adapter->netdev;
   1857	rx_ring->reg_idx = adapter->rx_ring[0]->reg_idx;
   1858
   1859	err = ixgbe_setup_rx_resources(adapter, rx_ring);
   1860	if (err) {
   1861		ret_val = 4;
   1862		goto err_nomem;
   1863	}
   1864
   1865	hw->mac.ops.disable_rx(hw);
   1866
   1867	ixgbe_configure_rx_ring(adapter, rx_ring);
   1868
   1869	rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_RXCTRL);
   1870	rctl |= IXGBE_RXCTRL_DMBYPS;
   1871	IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXCTRL, rctl);
   1872
   1873	hw->mac.ops.enable_rx(hw);
   1874
   1875	return 0;
   1876
   1877err_nomem:
   1878	ixgbe_free_desc_rings(adapter);
   1879	return ret_val;
   1880}
   1881
   1882static int ixgbe_setup_loopback_test(struct ixgbe_adapter *adapter)
   1883{
   1884	struct ixgbe_hw *hw = &adapter->hw;
   1885	u32 reg_data;
   1886
   1887
   1888	/* Setup MAC loopback */
   1889	reg_data = IXGBE_READ_REG(hw, IXGBE_HLREG0);
   1890	reg_data |= IXGBE_HLREG0_LPBK;
   1891	IXGBE_WRITE_REG(hw, IXGBE_HLREG0, reg_data);
   1892
   1893	reg_data = IXGBE_READ_REG(hw, IXGBE_FCTRL);
   1894	reg_data |= IXGBE_FCTRL_BAM | IXGBE_FCTRL_SBP | IXGBE_FCTRL_MPE;
   1895	IXGBE_WRITE_REG(hw, IXGBE_FCTRL, reg_data);
   1896
   1897	/* X540 and X550 needs to set the MACC.FLU bit to force link up */
   1898	switch (adapter->hw.mac.type) {
   1899	case ixgbe_mac_X540:
   1900	case ixgbe_mac_X550:
   1901	case ixgbe_mac_X550EM_x:
   1902	case ixgbe_mac_x550em_a:
   1903		reg_data = IXGBE_READ_REG(hw, IXGBE_MACC);
   1904		reg_data |= IXGBE_MACC_FLU;
   1905		IXGBE_WRITE_REG(hw, IXGBE_MACC, reg_data);
   1906		break;
   1907	default:
   1908		if (hw->mac.orig_autoc) {
   1909			reg_data = hw->mac.orig_autoc | IXGBE_AUTOC_FLU;
   1910			IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg_data);
   1911		} else {
   1912			return 10;
   1913		}
   1914	}
   1915	IXGBE_WRITE_FLUSH(hw);
   1916	usleep_range(10000, 20000);
   1917
   1918	/* Disable Atlas Tx lanes; re-enabled in reset path */
   1919	if (hw->mac.type == ixgbe_mac_82598EB) {
   1920		u8 atlas;
   1921
   1922		hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, &atlas);
   1923		atlas |= IXGBE_ATLAS_PDN_TX_REG_EN;
   1924		hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, atlas);
   1925
   1926		hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_10G, &atlas);
   1927		atlas |= IXGBE_ATLAS_PDN_TX_10G_QL_ALL;
   1928		hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_10G, atlas);
   1929
   1930		hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_1G, &atlas);
   1931		atlas |= IXGBE_ATLAS_PDN_TX_1G_QL_ALL;
   1932		hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_1G, atlas);
   1933
   1934		hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_AN, &atlas);
   1935		atlas |= IXGBE_ATLAS_PDN_TX_AN_QL_ALL;
   1936		hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_AN, atlas);
   1937	}
   1938
   1939	return 0;
   1940}
   1941
   1942static void ixgbe_loopback_cleanup(struct ixgbe_adapter *adapter)
   1943{
   1944	u32 reg_data;
   1945
   1946	reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_HLREG0);
   1947	reg_data &= ~IXGBE_HLREG0_LPBK;
   1948	IXGBE_WRITE_REG(&adapter->hw, IXGBE_HLREG0, reg_data);
   1949}
   1950
   1951static void ixgbe_create_lbtest_frame(struct sk_buff *skb,
   1952				      unsigned int frame_size)
   1953{
   1954	memset(skb->data, 0xFF, frame_size);
   1955	frame_size >>= 1;
   1956	memset(&skb->data[frame_size], 0xAA, frame_size / 2 - 1);
   1957	skb->data[frame_size + 10] = 0xBE;
   1958	skb->data[frame_size + 12] = 0xAF;
   1959}
   1960
   1961static bool ixgbe_check_lbtest_frame(struct ixgbe_rx_buffer *rx_buffer,
   1962				     unsigned int frame_size)
   1963{
   1964	unsigned char *data;
   1965	bool match = true;
   1966
   1967	frame_size >>= 1;
   1968
   1969	data = kmap(rx_buffer->page) + rx_buffer->page_offset;
   1970
   1971	if (data[3] != 0xFF ||
   1972	    data[frame_size + 10] != 0xBE ||
   1973	    data[frame_size + 12] != 0xAF)
   1974		match = false;
   1975
   1976	kunmap(rx_buffer->page);
   1977
   1978	return match;
   1979}
   1980
   1981static u16 ixgbe_clean_test_rings(struct ixgbe_ring *rx_ring,
   1982				  struct ixgbe_ring *tx_ring,
   1983				  unsigned int size)
   1984{
   1985	union ixgbe_adv_rx_desc *rx_desc;
   1986	u16 rx_ntc, tx_ntc, count = 0;
   1987
   1988	/* initialize next to clean and descriptor values */
   1989	rx_ntc = rx_ring->next_to_clean;
   1990	tx_ntc = tx_ring->next_to_clean;
   1991	rx_desc = IXGBE_RX_DESC(rx_ring, rx_ntc);
   1992
   1993	while (tx_ntc != tx_ring->next_to_use) {
   1994		union ixgbe_adv_tx_desc *tx_desc;
   1995		struct ixgbe_tx_buffer *tx_buffer;
   1996
   1997		tx_desc = IXGBE_TX_DESC(tx_ring, tx_ntc);
   1998
   1999		/* if DD is not set transmit has not completed */
   2000		if (!(tx_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)))
   2001			return count;
   2002
   2003		/* unmap buffer on Tx side */
   2004		tx_buffer = &tx_ring->tx_buffer_info[tx_ntc];
   2005
   2006		/* Free all the Tx ring sk_buffs */
   2007		dev_kfree_skb_any(tx_buffer->skb);
   2008
   2009		/* unmap skb header data */
   2010		dma_unmap_single(tx_ring->dev,
   2011				 dma_unmap_addr(tx_buffer, dma),
   2012				 dma_unmap_len(tx_buffer, len),
   2013				 DMA_TO_DEVICE);
   2014		dma_unmap_len_set(tx_buffer, len, 0);
   2015
   2016		/* increment Tx next to clean counter */
   2017		tx_ntc++;
   2018		if (tx_ntc == tx_ring->count)
   2019			tx_ntc = 0;
   2020	}
   2021
   2022	while (rx_desc->wb.upper.length) {
   2023		struct ixgbe_rx_buffer *rx_buffer;
   2024
   2025		/* check Rx buffer */
   2026		rx_buffer = &rx_ring->rx_buffer_info[rx_ntc];
   2027
   2028		/* sync Rx buffer for CPU read */
   2029		dma_sync_single_for_cpu(rx_ring->dev,
   2030					rx_buffer->dma,
   2031					ixgbe_rx_bufsz(rx_ring),
   2032					DMA_FROM_DEVICE);
   2033
   2034		/* verify contents of skb */
   2035		if (ixgbe_check_lbtest_frame(rx_buffer, size))
   2036			count++;
   2037		else
   2038			break;
   2039
   2040		/* sync Rx buffer for device write */
   2041		dma_sync_single_for_device(rx_ring->dev,
   2042					   rx_buffer->dma,
   2043					   ixgbe_rx_bufsz(rx_ring),
   2044					   DMA_FROM_DEVICE);
   2045
   2046		/* increment Rx next to clean counter */
   2047		rx_ntc++;
   2048		if (rx_ntc == rx_ring->count)
   2049			rx_ntc = 0;
   2050
   2051		/* fetch next descriptor */
   2052		rx_desc = IXGBE_RX_DESC(rx_ring, rx_ntc);
   2053	}
   2054
   2055	netdev_tx_reset_queue(txring_txq(tx_ring));
   2056
   2057	/* re-map buffers to ring, store next to clean values */
   2058	ixgbe_alloc_rx_buffers(rx_ring, count);
   2059	rx_ring->next_to_clean = rx_ntc;
   2060	tx_ring->next_to_clean = tx_ntc;
   2061
   2062	return count;
   2063}
   2064
   2065static int ixgbe_run_loopback_test(struct ixgbe_adapter *adapter)
   2066{
   2067	struct ixgbe_ring *tx_ring = &adapter->test_tx_ring;
   2068	struct ixgbe_ring *rx_ring = &adapter->test_rx_ring;
   2069	int i, j, lc, good_cnt, ret_val = 0;
   2070	unsigned int size = 1024;
   2071	netdev_tx_t tx_ret_val;
   2072	struct sk_buff *skb;
   2073	u32 flags_orig = adapter->flags;
   2074
   2075	/* DCB can modify the frames on Tx */
   2076	adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
   2077
   2078	/* allocate test skb */
   2079	skb = alloc_skb(size, GFP_KERNEL);
   2080	if (!skb)
   2081		return 11;
   2082
   2083	/* place data into test skb */
   2084	ixgbe_create_lbtest_frame(skb, size);
   2085	skb_put(skb, size);
   2086
   2087	/*
   2088	 * Calculate the loop count based on the largest descriptor ring
   2089	 * The idea is to wrap the largest ring a number of times using 64
   2090	 * send/receive pairs during each loop
   2091	 */
   2092
   2093	if (rx_ring->count <= tx_ring->count)
   2094		lc = ((tx_ring->count / 64) * 2) + 1;
   2095	else
   2096		lc = ((rx_ring->count / 64) * 2) + 1;
   2097
   2098	for (j = 0; j <= lc; j++) {
   2099		/* reset count of good packets */
   2100		good_cnt = 0;
   2101
   2102		/* place 64 packets on the transmit queue*/
   2103		for (i = 0; i < 64; i++) {
   2104			skb_get(skb);
   2105			tx_ret_val = ixgbe_xmit_frame_ring(skb,
   2106							   adapter,
   2107							   tx_ring);
   2108			if (tx_ret_val == NETDEV_TX_OK)
   2109				good_cnt++;
   2110		}
   2111
   2112		if (good_cnt != 64) {
   2113			ret_val = 12;
   2114			break;
   2115		}
   2116
   2117		/* allow 200 milliseconds for packets to go from Tx to Rx */
   2118		msleep(200);
   2119
   2120		good_cnt = ixgbe_clean_test_rings(rx_ring, tx_ring, size);
   2121		if (good_cnt != 64) {
   2122			ret_val = 13;
   2123			break;
   2124		}
   2125	}
   2126
   2127	/* free the original skb */
   2128	kfree_skb(skb);
   2129	adapter->flags = flags_orig;
   2130
   2131	return ret_val;
   2132}
   2133
   2134static int ixgbe_loopback_test(struct ixgbe_adapter *adapter, u64 *data)
   2135{
   2136	*data = ixgbe_setup_desc_rings(adapter);
   2137	if (*data)
   2138		goto out;
   2139	*data = ixgbe_setup_loopback_test(adapter);
   2140	if (*data)
   2141		goto err_loopback;
   2142	*data = ixgbe_run_loopback_test(adapter);
   2143	ixgbe_loopback_cleanup(adapter);
   2144
   2145err_loopback:
   2146	ixgbe_free_desc_rings(adapter);
   2147out:
   2148	return *data;
   2149}
   2150
   2151static void ixgbe_diag_test(struct net_device *netdev,
   2152			    struct ethtool_test *eth_test, u64 *data)
   2153{
   2154	struct ixgbe_adapter *adapter = netdev_priv(netdev);
   2155	bool if_running = netif_running(netdev);
   2156
   2157	if (ixgbe_removed(adapter->hw.hw_addr)) {
   2158		e_err(hw, "Adapter removed - test blocked\n");
   2159		data[0] = 1;
   2160		data[1] = 1;
   2161		data[2] = 1;
   2162		data[3] = 1;
   2163		data[4] = 1;
   2164		eth_test->flags |= ETH_TEST_FL_FAILED;
   2165		return;
   2166	}
   2167	set_bit(__IXGBE_TESTING, &adapter->state);
   2168	if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
   2169		struct ixgbe_hw *hw = &adapter->hw;
   2170
   2171		if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
   2172			int i;
   2173			for (i = 0; i < adapter->num_vfs; i++) {
   2174				if (adapter->vfinfo[i].clear_to_send) {
   2175					netdev_warn(netdev, "offline diagnostic is not supported when VFs are present\n");
   2176					data[0] = 1;
   2177					data[1] = 1;
   2178					data[2] = 1;
   2179					data[3] = 1;
   2180					data[4] = 1;
   2181					eth_test->flags |= ETH_TEST_FL_FAILED;
   2182					clear_bit(__IXGBE_TESTING,
   2183						  &adapter->state);
   2184					return;
   2185				}
   2186			}
   2187		}
   2188
   2189		/* Offline tests */
   2190		e_info(hw, "offline testing starting\n");
   2191
   2192		/* Link test performed before hardware reset so autoneg doesn't
   2193		 * interfere with test result
   2194		 */
   2195		if (ixgbe_link_test(adapter, &data[4]))
   2196			eth_test->flags |= ETH_TEST_FL_FAILED;
   2197
   2198		if (if_running)
   2199			/* indicate we're in test mode */
   2200			ixgbe_close(netdev);
   2201		else
   2202			ixgbe_reset(adapter);
   2203
   2204		e_info(hw, "register testing starting\n");
   2205		if (ixgbe_reg_test(adapter, &data[0]))
   2206			eth_test->flags |= ETH_TEST_FL_FAILED;
   2207
   2208		ixgbe_reset(adapter);
   2209		e_info(hw, "eeprom testing starting\n");
   2210		if (ixgbe_eeprom_test(adapter, &data[1]))
   2211			eth_test->flags |= ETH_TEST_FL_FAILED;
   2212
   2213		ixgbe_reset(adapter);
   2214		e_info(hw, "interrupt testing starting\n");
   2215		if (ixgbe_intr_test(adapter, &data[2]))
   2216			eth_test->flags |= ETH_TEST_FL_FAILED;
   2217
   2218		/* If SRIOV or VMDq is enabled then skip MAC
   2219		 * loopback diagnostic. */
   2220		if (adapter->flags & (IXGBE_FLAG_SRIOV_ENABLED |
   2221				      IXGBE_FLAG_VMDQ_ENABLED)) {
   2222			e_info(hw, "Skip MAC loopback diagnostic in VT mode\n");
   2223			data[3] = 0;
   2224			goto skip_loopback;
   2225		}
   2226
   2227		ixgbe_reset(adapter);
   2228		e_info(hw, "loopback testing starting\n");
   2229		if (ixgbe_loopback_test(adapter, &data[3]))
   2230			eth_test->flags |= ETH_TEST_FL_FAILED;
   2231
   2232skip_loopback:
   2233		ixgbe_reset(adapter);
   2234
   2235		/* clear testing bit and return adapter to previous state */
   2236		clear_bit(__IXGBE_TESTING, &adapter->state);
   2237		if (if_running)
   2238			ixgbe_open(netdev);
   2239		else if (hw->mac.ops.disable_tx_laser)
   2240			hw->mac.ops.disable_tx_laser(hw);
   2241	} else {
   2242		e_info(hw, "online testing starting\n");
   2243
   2244		/* Online tests */
   2245		if (ixgbe_link_test(adapter, &data[4]))
   2246			eth_test->flags |= ETH_TEST_FL_FAILED;
   2247
   2248		/* Offline tests aren't run; pass by default */
   2249		data[0] = 0;
   2250		data[1] = 0;
   2251		data[2] = 0;
   2252		data[3] = 0;
   2253
   2254		clear_bit(__IXGBE_TESTING, &adapter->state);
   2255	}
   2256}
   2257
   2258static int ixgbe_wol_exclusion(struct ixgbe_adapter *adapter,
   2259			       struct ethtool_wolinfo *wol)
   2260{
   2261	struct ixgbe_hw *hw = &adapter->hw;
   2262	int retval = 0;
   2263
   2264	/* WOL not supported for all devices */
   2265	if (!ixgbe_wol_supported(adapter, hw->device_id,
   2266				 hw->subsystem_device_id)) {
   2267		retval = 1;
   2268		wol->supported = 0;
   2269	}
   2270
   2271	return retval;
   2272}
   2273
   2274static void ixgbe_get_wol(struct net_device *netdev,
   2275			  struct ethtool_wolinfo *wol)
   2276{
   2277	struct ixgbe_adapter *adapter = netdev_priv(netdev);
   2278
   2279	wol->supported = WAKE_UCAST | WAKE_MCAST |
   2280			 WAKE_BCAST | WAKE_MAGIC;
   2281	wol->wolopts = 0;
   2282
   2283	if (ixgbe_wol_exclusion(adapter, wol) ||
   2284	    !device_can_wakeup(&adapter->pdev->dev))
   2285		return;
   2286
   2287	if (adapter->wol & IXGBE_WUFC_EX)
   2288		wol->wolopts |= WAKE_UCAST;
   2289	if (adapter->wol & IXGBE_WUFC_MC)
   2290		wol->wolopts |= WAKE_MCAST;
   2291	if (adapter->wol & IXGBE_WUFC_BC)
   2292		wol->wolopts |= WAKE_BCAST;
   2293	if (adapter->wol & IXGBE_WUFC_MAG)
   2294		wol->wolopts |= WAKE_MAGIC;
   2295}
   2296
   2297static int ixgbe_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
   2298{
   2299	struct ixgbe_adapter *adapter = netdev_priv(netdev);
   2300
   2301	if (wol->wolopts & (WAKE_PHY | WAKE_ARP | WAKE_MAGICSECURE |
   2302			    WAKE_FILTER))
   2303		return -EOPNOTSUPP;
   2304
   2305	if (ixgbe_wol_exclusion(adapter, wol))
   2306		return wol->wolopts ? -EOPNOTSUPP : 0;
   2307
   2308	adapter->wol = 0;
   2309
   2310	if (wol->wolopts & WAKE_UCAST)
   2311		adapter->wol |= IXGBE_WUFC_EX;
   2312	if (wol->wolopts & WAKE_MCAST)
   2313		adapter->wol |= IXGBE_WUFC_MC;
   2314	if (wol->wolopts & WAKE_BCAST)
   2315		adapter->wol |= IXGBE_WUFC_BC;
   2316	if (wol->wolopts & WAKE_MAGIC)
   2317		adapter->wol |= IXGBE_WUFC_MAG;
   2318
   2319	device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
   2320
   2321	return 0;
   2322}
   2323
   2324static int ixgbe_nway_reset(struct net_device *netdev)
   2325{
   2326	struct ixgbe_adapter *adapter = netdev_priv(netdev);
   2327
   2328	if (netif_running(netdev))
   2329		ixgbe_reinit_locked(adapter);
   2330
   2331	return 0;
   2332}
   2333
   2334static int ixgbe_set_phys_id(struct net_device *netdev,
   2335			     enum ethtool_phys_id_state state)
   2336{
   2337	struct ixgbe_adapter *adapter = netdev_priv(netdev);
   2338	struct ixgbe_hw *hw = &adapter->hw;
   2339
   2340	if (!hw->mac.ops.led_on || !hw->mac.ops.led_off)
   2341		return -EOPNOTSUPP;
   2342
   2343	switch (state) {
   2344	case ETHTOOL_ID_ACTIVE:
   2345		adapter->led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
   2346		return 2;
   2347
   2348	case ETHTOOL_ID_ON:
   2349		hw->mac.ops.led_on(hw, hw->mac.led_link_act);
   2350		break;
   2351
   2352	case ETHTOOL_ID_OFF:
   2353		hw->mac.ops.led_off(hw, hw->mac.led_link_act);
   2354		break;
   2355
   2356	case ETHTOOL_ID_INACTIVE:
   2357		/* Restore LED settings */
   2358		IXGBE_WRITE_REG(&adapter->hw, IXGBE_LEDCTL, adapter->led_reg);
   2359		break;
   2360	}
   2361
   2362	return 0;
   2363}
   2364
   2365static int ixgbe_get_coalesce(struct net_device *netdev,
   2366			      struct ethtool_coalesce *ec,
   2367			      struct kernel_ethtool_coalesce *kernel_coal,
   2368			      struct netlink_ext_ack *extack)
   2369{
   2370	struct ixgbe_adapter *adapter = netdev_priv(netdev);
   2371
   2372	/* only valid if in constant ITR mode */
   2373	if (adapter->rx_itr_setting <= 1)
   2374		ec->rx_coalesce_usecs = adapter->rx_itr_setting;
   2375	else
   2376		ec->rx_coalesce_usecs = adapter->rx_itr_setting >> 2;
   2377
   2378	/* if in mixed tx/rx queues per vector mode, report only rx settings */
   2379	if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count)
   2380		return 0;
   2381
   2382	/* only valid if in constant ITR mode */
   2383	if (adapter->tx_itr_setting <= 1)
   2384		ec->tx_coalesce_usecs = adapter->tx_itr_setting;
   2385	else
   2386		ec->tx_coalesce_usecs = adapter->tx_itr_setting >> 2;
   2387
   2388	return 0;
   2389}
   2390
   2391/*
   2392 * this function must be called before setting the new value of
   2393 * rx_itr_setting
   2394 */
   2395static bool ixgbe_update_rsc(struct ixgbe_adapter *adapter)
   2396{
   2397	struct net_device *netdev = adapter->netdev;
   2398
   2399	/* nothing to do if LRO or RSC are not enabled */
   2400	if (!(adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) ||
   2401	    !(netdev->features & NETIF_F_LRO))
   2402		return false;
   2403
   2404	/* check the feature flag value and enable RSC if necessary */
   2405	if (adapter->rx_itr_setting == 1 ||
   2406	    adapter->rx_itr_setting > IXGBE_MIN_RSC_ITR) {
   2407		if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)) {
   2408			adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED;
   2409			e_info(probe, "rx-usecs value high enough to re-enable RSC\n");
   2410			return true;
   2411		}
   2412	/* if interrupt rate is too high then disable RSC */
   2413	} else if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
   2414		adapter->flags2 &= ~IXGBE_FLAG2_RSC_ENABLED;
   2415		e_info(probe, "rx-usecs set too low, disabling RSC\n");
   2416		return true;
   2417	}
   2418	return false;
   2419}
   2420
   2421static int ixgbe_set_coalesce(struct net_device *netdev,
   2422			      struct ethtool_coalesce *ec,
   2423			      struct kernel_ethtool_coalesce *kernel_coal,
   2424			      struct netlink_ext_ack *extack)
   2425{
   2426	struct ixgbe_adapter *adapter = netdev_priv(netdev);
   2427	struct ixgbe_q_vector *q_vector;
   2428	int i;
   2429	u16 tx_itr_param, rx_itr_param, tx_itr_prev;
   2430	bool need_reset = false;
   2431
   2432	if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count) {
   2433		/* reject Tx specific changes in case of mixed RxTx vectors */
   2434		if (ec->tx_coalesce_usecs)
   2435			return -EINVAL;
   2436		tx_itr_prev = adapter->rx_itr_setting;
   2437	} else {
   2438		tx_itr_prev = adapter->tx_itr_setting;
   2439	}
   2440
   2441	if ((ec->rx_coalesce_usecs > (IXGBE_MAX_EITR >> 2)) ||
   2442	    (ec->tx_coalesce_usecs > (IXGBE_MAX_EITR >> 2)))
   2443		return -EINVAL;
   2444
   2445	if (ec->rx_coalesce_usecs > 1)
   2446		adapter->rx_itr_setting = ec->rx_coalesce_usecs << 2;
   2447	else
   2448		adapter->rx_itr_setting = ec->rx_coalesce_usecs;
   2449
   2450	if (adapter->rx_itr_setting == 1)
   2451		rx_itr_param = IXGBE_20K_ITR;
   2452	else
   2453		rx_itr_param = adapter->rx_itr_setting;
   2454
   2455	if (ec->tx_coalesce_usecs > 1)
   2456		adapter->tx_itr_setting = ec->tx_coalesce_usecs << 2;
   2457	else
   2458		adapter->tx_itr_setting = ec->tx_coalesce_usecs;
   2459
   2460	if (adapter->tx_itr_setting == 1)
   2461		tx_itr_param = IXGBE_12K_ITR;
   2462	else
   2463		tx_itr_param = adapter->tx_itr_setting;
   2464
   2465	/* mixed Rx/Tx */
   2466	if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count)
   2467		adapter->tx_itr_setting = adapter->rx_itr_setting;
   2468
   2469	/* detect ITR changes that require update of TXDCTL.WTHRESH */
   2470	if ((adapter->tx_itr_setting != 1) &&
   2471	    (adapter->tx_itr_setting < IXGBE_100K_ITR)) {
   2472		if ((tx_itr_prev == 1) ||
   2473		    (tx_itr_prev >= IXGBE_100K_ITR))
   2474			need_reset = true;
   2475	} else {
   2476		if ((tx_itr_prev != 1) &&
   2477		    (tx_itr_prev < IXGBE_100K_ITR))
   2478			need_reset = true;
   2479	}
   2480
   2481	/* check the old value and enable RSC if necessary */
   2482	need_reset |= ixgbe_update_rsc(adapter);
   2483
   2484	for (i = 0; i < adapter->num_q_vectors; i++) {
   2485		q_vector = adapter->q_vector[i];
   2486		if (q_vector->tx.count && !q_vector->rx.count)
   2487			/* tx only */
   2488			q_vector->itr = tx_itr_param;
   2489		else
   2490			/* rx only or mixed */
   2491			q_vector->itr = rx_itr_param;
   2492		ixgbe_write_eitr(q_vector);
   2493	}
   2494
   2495	/*
   2496	 * do reset here at the end to make sure EITR==0 case is handled
   2497	 * correctly w.r.t stopping tx, and changing TXDCTL.WTHRESH settings
   2498	 * also locks in RSC enable/disable which requires reset
   2499	 */
   2500	if (need_reset)
   2501		ixgbe_do_reset(netdev);
   2502
   2503	return 0;
   2504}
   2505
   2506static int ixgbe_get_ethtool_fdir_entry(struct ixgbe_adapter *adapter,
   2507					struct ethtool_rxnfc *cmd)
   2508{
   2509	union ixgbe_atr_input *mask = &adapter->fdir_mask;
   2510	struct ethtool_rx_flow_spec *fsp =
   2511		(struct ethtool_rx_flow_spec *)&cmd->fs;
   2512	struct hlist_node *node2;
   2513	struct ixgbe_fdir_filter *rule = NULL;
   2514
   2515	/* report total rule count */
   2516	cmd->data = (1024 << adapter->fdir_pballoc) - 2;
   2517
   2518	hlist_for_each_entry_safe(rule, node2,
   2519				  &adapter->fdir_filter_list, fdir_node) {
   2520		if (fsp->location <= rule->sw_idx)
   2521			break;
   2522	}
   2523
   2524	if (!rule || fsp->location != rule->sw_idx)
   2525		return -EINVAL;
   2526
   2527	/* fill out the flow spec entry */
   2528
   2529	/* set flow type field */
   2530	switch (rule->filter.formatted.flow_type) {
   2531	case IXGBE_ATR_FLOW_TYPE_TCPV4:
   2532		fsp->flow_type = TCP_V4_FLOW;
   2533		break;
   2534	case IXGBE_ATR_FLOW_TYPE_UDPV4:
   2535		fsp->flow_type = UDP_V4_FLOW;
   2536		break;
   2537	case IXGBE_ATR_FLOW_TYPE_SCTPV4:
   2538		fsp->flow_type = SCTP_V4_FLOW;
   2539		break;
   2540	case IXGBE_ATR_FLOW_TYPE_IPV4:
   2541		fsp->flow_type = IP_USER_FLOW;
   2542		fsp->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
   2543		fsp->h_u.usr_ip4_spec.proto = 0;
   2544		fsp->m_u.usr_ip4_spec.proto = 0;
   2545		break;
   2546	default:
   2547		return -EINVAL;
   2548	}
   2549
   2550	fsp->h_u.tcp_ip4_spec.psrc = rule->filter.formatted.src_port;
   2551	fsp->m_u.tcp_ip4_spec.psrc = mask->formatted.src_port;
   2552	fsp->h_u.tcp_ip4_spec.pdst = rule->filter.formatted.dst_port;
   2553	fsp->m_u.tcp_ip4_spec.pdst = mask->formatted.dst_port;
   2554	fsp->h_u.tcp_ip4_spec.ip4src = rule->filter.formatted.src_ip[0];
   2555	fsp->m_u.tcp_ip4_spec.ip4src = mask->formatted.src_ip[0];
   2556	fsp->h_u.tcp_ip4_spec.ip4dst = rule->filter.formatted.dst_ip[0];
   2557	fsp->m_u.tcp_ip4_spec.ip4dst = mask->formatted.dst_ip[0];
   2558	fsp->h_ext.vlan_tci = rule->filter.formatted.vlan_id;
   2559	fsp->m_ext.vlan_tci = mask->formatted.vlan_id;
   2560	fsp->h_ext.vlan_etype = rule->filter.formatted.flex_bytes;
   2561	fsp->m_ext.vlan_etype = mask->formatted.flex_bytes;
   2562	fsp->h_ext.data[1] = htonl(rule->filter.formatted.vm_pool);
   2563	fsp->m_ext.data[1] = htonl(mask->formatted.vm_pool);
   2564	fsp->flow_type |= FLOW_EXT;
   2565
   2566	/* record action */
   2567	if (rule->action == IXGBE_FDIR_DROP_QUEUE)
   2568		fsp->ring_cookie = RX_CLS_FLOW_DISC;
   2569	else
   2570		fsp->ring_cookie = rule->action;
   2571
   2572	return 0;
   2573}
   2574
   2575static int ixgbe_get_ethtool_fdir_all(struct ixgbe_adapter *adapter,
   2576				      struct ethtool_rxnfc *cmd,
   2577				      u32 *rule_locs)
   2578{
   2579	struct hlist_node *node2;
   2580	struct ixgbe_fdir_filter *rule;
   2581	int cnt = 0;
   2582
   2583	/* report total rule count */
   2584	cmd->data = (1024 << adapter->fdir_pballoc) - 2;
   2585
   2586	hlist_for_each_entry_safe(rule, node2,
   2587				  &adapter->fdir_filter_list, fdir_node) {
   2588		if (cnt == cmd->rule_cnt)
   2589			return -EMSGSIZE;
   2590		rule_locs[cnt] = rule->sw_idx;
   2591		cnt++;
   2592	}
   2593
   2594	cmd->rule_cnt = cnt;
   2595
   2596	return 0;
   2597}
   2598
   2599static int ixgbe_get_rss_hash_opts(struct ixgbe_adapter *adapter,
   2600				   struct ethtool_rxnfc *cmd)
   2601{
   2602	cmd->data = 0;
   2603
   2604	/* Report default options for RSS on ixgbe */
   2605	switch (cmd->flow_type) {
   2606	case TCP_V4_FLOW:
   2607		cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
   2608		fallthrough;
   2609	case UDP_V4_FLOW:
   2610		if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV4_UDP)
   2611			cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
   2612		fallthrough;
   2613	case SCTP_V4_FLOW:
   2614	case AH_ESP_V4_FLOW:
   2615	case AH_V4_FLOW:
   2616	case ESP_V4_FLOW:
   2617	case IPV4_FLOW:
   2618		cmd->data |= RXH_IP_SRC | RXH_IP_DST;
   2619		break;
   2620	case TCP_V6_FLOW:
   2621		cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
   2622		fallthrough;
   2623	case UDP_V6_FLOW:
   2624		if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV6_UDP)
   2625			cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
   2626		fallthrough;
   2627	case SCTP_V6_FLOW:
   2628	case AH_ESP_V6_FLOW:
   2629	case AH_V6_FLOW:
   2630	case ESP_V6_FLOW:
   2631	case IPV6_FLOW:
   2632		cmd->data |= RXH_IP_SRC | RXH_IP_DST;
   2633		break;
   2634	default:
   2635		return -EINVAL;
   2636	}
   2637
   2638	return 0;
   2639}
   2640
   2641static int ixgbe_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
   2642			   u32 *rule_locs)
   2643{
   2644	struct ixgbe_adapter *adapter = netdev_priv(dev);
   2645	int ret = -EOPNOTSUPP;
   2646
   2647	switch (cmd->cmd) {
   2648	case ETHTOOL_GRXRINGS:
   2649		cmd->data = adapter->num_rx_queues;
   2650		ret = 0;
   2651		break;
   2652	case ETHTOOL_GRXCLSRLCNT:
   2653		cmd->rule_cnt = adapter->fdir_filter_count;
   2654		ret = 0;
   2655		break;
   2656	case ETHTOOL_GRXCLSRULE:
   2657		ret = ixgbe_get_ethtool_fdir_entry(adapter, cmd);
   2658		break;
   2659	case ETHTOOL_GRXCLSRLALL:
   2660		ret = ixgbe_get_ethtool_fdir_all(adapter, cmd, rule_locs);
   2661		break;
   2662	case ETHTOOL_GRXFH:
   2663		ret = ixgbe_get_rss_hash_opts(adapter, cmd);
   2664		break;
   2665	default:
   2666		break;
   2667	}
   2668
   2669	return ret;
   2670}
   2671
   2672int ixgbe_update_ethtool_fdir_entry(struct ixgbe_adapter *adapter,
   2673				    struct ixgbe_fdir_filter *input,
   2674				    u16 sw_idx)
   2675{
   2676	struct ixgbe_hw *hw = &adapter->hw;
   2677	struct hlist_node *node2;
   2678	struct ixgbe_fdir_filter *rule, *parent;
   2679	int err = -EINVAL;
   2680
   2681	parent = NULL;
   2682	rule = NULL;
   2683
   2684	hlist_for_each_entry_safe(rule, node2,
   2685				  &adapter->fdir_filter_list, fdir_node) {
   2686		/* hash found, or no matching entry */
   2687		if (rule->sw_idx >= sw_idx)
   2688			break;
   2689		parent = rule;
   2690	}
   2691
   2692	/* if there is an old rule occupying our place remove it */
   2693	if (rule && (rule->sw_idx == sw_idx)) {
   2694		if (!input || (rule->filter.formatted.bkt_hash !=
   2695			       input->filter.formatted.bkt_hash)) {
   2696			err = ixgbe_fdir_erase_perfect_filter_82599(hw,
   2697								&rule->filter,
   2698								sw_idx);
   2699		}
   2700
   2701		hlist_del(&rule->fdir_node);
   2702		kfree(rule);
   2703		adapter->fdir_filter_count--;
   2704	}
   2705
   2706	/*
   2707	 * If no input this was a delete, err should be 0 if a rule was
   2708	 * successfully found and removed from the list else -EINVAL
   2709	 */
   2710	if (!input)
   2711		return err;
   2712
   2713	/* initialize node and set software index */
   2714	INIT_HLIST_NODE(&input->fdir_node);
   2715
   2716	/* add filter to the list */
   2717	if (parent)
   2718		hlist_add_behind(&input->fdir_node, &parent->fdir_node);
   2719	else
   2720		hlist_add_head(&input->fdir_node,
   2721			       &adapter->fdir_filter_list);
   2722
   2723	/* update counts */
   2724	adapter->fdir_filter_count++;
   2725
   2726	return 0;
   2727}
   2728
   2729static int ixgbe_flowspec_to_flow_type(struct ethtool_rx_flow_spec *fsp,
   2730				       u8 *flow_type)
   2731{
   2732	switch (fsp->flow_type & ~FLOW_EXT) {
   2733	case TCP_V4_FLOW:
   2734		*flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4;
   2735		break;
   2736	case UDP_V4_FLOW:
   2737		*flow_type = IXGBE_ATR_FLOW_TYPE_UDPV4;
   2738		break;
   2739	case SCTP_V4_FLOW:
   2740		*flow_type = IXGBE_ATR_FLOW_TYPE_SCTPV4;
   2741		break;
   2742	case IP_USER_FLOW:
   2743		switch (fsp->h_u.usr_ip4_spec.proto) {
   2744		case IPPROTO_TCP:
   2745			*flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4;
   2746			break;
   2747		case IPPROTO_UDP:
   2748			*flow_type = IXGBE_ATR_FLOW_TYPE_UDPV4;
   2749			break;
   2750		case IPPROTO_SCTP:
   2751			*flow_type = IXGBE_ATR_FLOW_TYPE_SCTPV4;
   2752			break;
   2753		case 0:
   2754			if (!fsp->m_u.usr_ip4_spec.proto) {
   2755				*flow_type = IXGBE_ATR_FLOW_TYPE_IPV4;
   2756				break;
   2757			}
   2758			fallthrough;
   2759		default:
   2760			return 0;
   2761		}
   2762		break;
   2763	default:
   2764		return 0;
   2765	}
   2766
   2767	return 1;
   2768}
   2769
   2770static int ixgbe_add_ethtool_fdir_entry(struct ixgbe_adapter *adapter,
   2771					struct ethtool_rxnfc *cmd)
   2772{
   2773	struct ethtool_rx_flow_spec *fsp =
   2774		(struct ethtool_rx_flow_spec *)&cmd->fs;
   2775	struct ixgbe_hw *hw = &adapter->hw;
   2776	struct ixgbe_fdir_filter *input;
   2777	union ixgbe_atr_input mask;
   2778	u8 queue;
   2779	int err;
   2780
   2781	if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))
   2782		return -EOPNOTSUPP;
   2783
   2784	/* ring_cookie is a masked into a set of queues and ixgbe pools or
   2785	 * we use the drop index.
   2786	 */
   2787	if (fsp->ring_cookie == RX_CLS_FLOW_DISC) {
   2788		queue = IXGBE_FDIR_DROP_QUEUE;
   2789	} else {
   2790		u32 ring = ethtool_get_flow_spec_ring(fsp->ring_cookie);
   2791		u8 vf = ethtool_get_flow_spec_ring_vf(fsp->ring_cookie);
   2792
   2793		if (!vf && (ring >= adapter->num_rx_queues))
   2794			return -EINVAL;
   2795		else if (vf &&
   2796			 ((vf > adapter->num_vfs) ||
   2797			   ring >= adapter->num_rx_queues_per_pool))
   2798			return -EINVAL;
   2799
   2800		/* Map the ring onto the absolute queue index */
   2801		if (!vf)
   2802			queue = adapter->rx_ring[ring]->reg_idx;
   2803		else
   2804			queue = ((vf - 1) *
   2805				adapter->num_rx_queues_per_pool) + ring;
   2806	}
   2807
   2808	/* Don't allow indexes to exist outside of available space */
   2809	if (fsp->location >= ((1024 << adapter->fdir_pballoc) - 2)) {
   2810		e_err(drv, "Location out of range\n");
   2811		return -EINVAL;
   2812	}
   2813
   2814	input = kzalloc(sizeof(*input), GFP_ATOMIC);
   2815	if (!input)
   2816		return -ENOMEM;
   2817
   2818	memset(&mask, 0, sizeof(union ixgbe_atr_input));
   2819
   2820	/* set SW index */
   2821	input->sw_idx = fsp->location;
   2822
   2823	/* record flow type */
   2824	if (!ixgbe_flowspec_to_flow_type(fsp,
   2825					 &input->filter.formatted.flow_type)) {
   2826		e_err(drv, "Unrecognized flow type\n");
   2827		goto err_out;
   2828	}
   2829
   2830	mask.formatted.flow_type = IXGBE_ATR_L4TYPE_IPV6_MASK |
   2831				   IXGBE_ATR_L4TYPE_MASK;
   2832
   2833	if (input->filter.formatted.flow_type == IXGBE_ATR_FLOW_TYPE_IPV4)
   2834		mask.formatted.flow_type &= IXGBE_ATR_L4TYPE_IPV6_MASK;
   2835
   2836	/* Copy input into formatted structures */
   2837	input->filter.formatted.src_ip[0] = fsp->h_u.tcp_ip4_spec.ip4src;
   2838	mask.formatted.src_ip[0] = fsp->m_u.tcp_ip4_spec.ip4src;
   2839	input->filter.formatted.dst_ip[0] = fsp->h_u.tcp_ip4_spec.ip4dst;
   2840	mask.formatted.dst_ip[0] = fsp->m_u.tcp_ip4_spec.ip4dst;
   2841	input->filter.formatted.src_port = fsp->h_u.tcp_ip4_spec.psrc;
   2842	mask.formatted.src_port = fsp->m_u.tcp_ip4_spec.psrc;
   2843	input->filter.formatted.dst_port = fsp->h_u.tcp_ip4_spec.pdst;
   2844	mask.formatted.dst_port = fsp->m_u.tcp_ip4_spec.pdst;
   2845
   2846	if (fsp->flow_type & FLOW_EXT) {
   2847		input->filter.formatted.vm_pool =
   2848				(unsigned char)ntohl(fsp->h_ext.data[1]);
   2849		mask.formatted.vm_pool =
   2850				(unsigned char)ntohl(fsp->m_ext.data[1]);
   2851		input->filter.formatted.vlan_id = fsp->h_ext.vlan_tci;
   2852		mask.formatted.vlan_id = fsp->m_ext.vlan_tci;
   2853		input->filter.formatted.flex_bytes =
   2854						fsp->h_ext.vlan_etype;
   2855		mask.formatted.flex_bytes = fsp->m_ext.vlan_etype;
   2856	}
   2857
   2858	/* determine if we need to drop or route the packet */
   2859	if (fsp->ring_cookie == RX_CLS_FLOW_DISC)
   2860		input->action = IXGBE_FDIR_DROP_QUEUE;
   2861	else
   2862		input->action = fsp->ring_cookie;
   2863
   2864	spin_lock(&adapter->fdir_perfect_lock);
   2865
   2866	if (hlist_empty(&adapter->fdir_filter_list)) {
   2867		/* save mask and program input mask into HW */
   2868		memcpy(&adapter->fdir_mask, &mask, sizeof(mask));
   2869		err = ixgbe_fdir_set_input_mask_82599(hw, &mask);
   2870		if (err) {
   2871			e_err(drv, "Error writing mask\n");
   2872			goto err_out_w_lock;
   2873		}
   2874	} else if (memcmp(&adapter->fdir_mask, &mask, sizeof(mask))) {
   2875		e_err(drv, "Only one mask supported per port\n");
   2876		goto err_out_w_lock;
   2877	}
   2878
   2879	/* apply mask and compute/store hash */
   2880	ixgbe_atr_compute_perfect_hash_82599(&input->filter, &mask);
   2881
   2882	/* program filters to filter memory */
   2883	err = ixgbe_fdir_write_perfect_filter_82599(hw,
   2884				&input->filter, input->sw_idx, queue);
   2885	if (err)
   2886		goto err_out_w_lock;
   2887
   2888	ixgbe_update_ethtool_fdir_entry(adapter, input, input->sw_idx);
   2889
   2890	spin_unlock(&adapter->fdir_perfect_lock);
   2891
   2892	return err;
   2893err_out_w_lock:
   2894	spin_unlock(&adapter->fdir_perfect_lock);
   2895err_out:
   2896	kfree(input);
   2897	return -EINVAL;
   2898}
   2899
   2900static int ixgbe_del_ethtool_fdir_entry(struct ixgbe_adapter *adapter,
   2901					struct ethtool_rxnfc *cmd)
   2902{
   2903	struct ethtool_rx_flow_spec *fsp =
   2904		(struct ethtool_rx_flow_spec *)&cmd->fs;
   2905	int err;
   2906
   2907	spin_lock(&adapter->fdir_perfect_lock);
   2908	err = ixgbe_update_ethtool_fdir_entry(adapter, NULL, fsp->location);
   2909	spin_unlock(&adapter->fdir_perfect_lock);
   2910
   2911	return err;
   2912}
   2913
   2914#define UDP_RSS_FLAGS (IXGBE_FLAG2_RSS_FIELD_IPV4_UDP | \
   2915		       IXGBE_FLAG2_RSS_FIELD_IPV6_UDP)
   2916static int ixgbe_set_rss_hash_opt(struct ixgbe_adapter *adapter,
   2917				  struct ethtool_rxnfc *nfc)
   2918{
   2919	u32 flags2 = adapter->flags2;
   2920
   2921	/*
   2922	 * RSS does not support anything other than hashing
   2923	 * to queues on src and dst IPs and ports
   2924	 */
   2925	if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
   2926			  RXH_L4_B_0_1 | RXH_L4_B_2_3))
   2927		return -EINVAL;
   2928
   2929	switch (nfc->flow_type) {
   2930	case TCP_V4_FLOW:
   2931	case TCP_V6_FLOW:
   2932		if (!(nfc->data & RXH_IP_SRC) ||
   2933		    !(nfc->data & RXH_IP_DST) ||
   2934		    !(nfc->data & RXH_L4_B_0_1) ||
   2935		    !(nfc->data & RXH_L4_B_2_3))
   2936			return -EINVAL;
   2937		break;
   2938	case UDP_V4_FLOW:
   2939		if (!(nfc->data & RXH_IP_SRC) ||
   2940		    !(nfc->data & RXH_IP_DST))
   2941			return -EINVAL;
   2942		switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
   2943		case 0:
   2944			flags2 &= ~IXGBE_FLAG2_RSS_FIELD_IPV4_UDP;
   2945			break;
   2946		case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
   2947			flags2 |= IXGBE_FLAG2_RSS_FIELD_IPV4_UDP;
   2948			break;
   2949		default:
   2950			return -EINVAL;
   2951		}
   2952		break;
   2953	case UDP_V6_FLOW:
   2954		if (!(nfc->data & RXH_IP_SRC) ||
   2955		    !(nfc->data & RXH_IP_DST))
   2956			return -EINVAL;
   2957		switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
   2958		case 0:
   2959			flags2 &= ~IXGBE_FLAG2_RSS_FIELD_IPV6_UDP;
   2960			break;
   2961		case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
   2962			flags2 |= IXGBE_FLAG2_RSS_FIELD_IPV6_UDP;
   2963			break;
   2964		default:
   2965			return -EINVAL;
   2966		}
   2967		break;
   2968	case AH_ESP_V4_FLOW:
   2969	case AH_V4_FLOW:
   2970	case ESP_V4_FLOW:
   2971	case SCTP_V4_FLOW:
   2972	case AH_ESP_V6_FLOW:
   2973	case AH_V6_FLOW:
   2974	case ESP_V6_FLOW:
   2975	case SCTP_V6_FLOW:
   2976		if (!(nfc->data & RXH_IP_SRC) ||
   2977		    !(nfc->data & RXH_IP_DST) ||
   2978		    (nfc->data & RXH_L4_B_0_1) ||
   2979		    (nfc->data & RXH_L4_B_2_3))
   2980			return -EINVAL;
   2981		break;
   2982	default:
   2983		return -EINVAL;
   2984	}
   2985
   2986	/* if we changed something we need to update flags */
   2987	if (flags2 != adapter->flags2) {
   2988		struct ixgbe_hw *hw = &adapter->hw;
   2989		u32 mrqc;
   2990		unsigned int pf_pool = adapter->num_vfs;
   2991
   2992		if ((hw->mac.type >= ixgbe_mac_X550) &&
   2993		    (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
   2994			mrqc = IXGBE_READ_REG(hw, IXGBE_PFVFMRQC(pf_pool));
   2995		else
   2996			mrqc = IXGBE_READ_REG(hw, IXGBE_MRQC);
   2997
   2998		if ((flags2 & UDP_RSS_FLAGS) &&
   2999		    !(adapter->flags2 & UDP_RSS_FLAGS))
   3000			e_warn(drv, "enabling UDP RSS: fragmented packets may arrive out of order to the stack above\n");
   3001
   3002		adapter->flags2 = flags2;
   3003
   3004		/* Perform hash on these packet types */
   3005		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4
   3006		      | IXGBE_MRQC_RSS_FIELD_IPV4_TCP
   3007		      | IXGBE_MRQC_RSS_FIELD_IPV6
   3008		      | IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
   3009
   3010		mrqc &= ~(IXGBE_MRQC_RSS_FIELD_IPV4_UDP |
   3011			  IXGBE_MRQC_RSS_FIELD_IPV6_UDP);
   3012
   3013		if (flags2 & IXGBE_FLAG2_RSS_FIELD_IPV4_UDP)
   3014			mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
   3015
   3016		if (flags2 & IXGBE_FLAG2_RSS_FIELD_IPV6_UDP)
   3017			mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
   3018
   3019		if ((hw->mac.type >= ixgbe_mac_X550) &&
   3020		    (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
   3021			IXGBE_WRITE_REG(hw, IXGBE_PFVFMRQC(pf_pool), mrqc);
   3022		else
   3023			IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
   3024	}
   3025
   3026	return 0;
   3027}
   3028
   3029static int ixgbe_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
   3030{
   3031	struct ixgbe_adapter *adapter = netdev_priv(dev);
   3032	int ret = -EOPNOTSUPP;
   3033
   3034	switch (cmd->cmd) {
   3035	case ETHTOOL_SRXCLSRLINS:
   3036		ret = ixgbe_add_ethtool_fdir_entry(adapter, cmd);
   3037		break;
   3038	case ETHTOOL_SRXCLSRLDEL:
   3039		ret = ixgbe_del_ethtool_fdir_entry(adapter, cmd);
   3040		break;
   3041	case ETHTOOL_SRXFH:
   3042		ret = ixgbe_set_rss_hash_opt(adapter, cmd);
   3043		break;
   3044	default:
   3045		break;
   3046	}
   3047
   3048	return ret;
   3049}
   3050
   3051static int ixgbe_rss_indir_tbl_max(struct ixgbe_adapter *adapter)
   3052{
   3053	if (adapter->hw.mac.type < ixgbe_mac_X550)
   3054		return 16;
   3055	else
   3056		return 64;
   3057}
   3058
   3059static u32 ixgbe_get_rxfh_key_size(struct net_device *netdev)
   3060{
   3061	return IXGBE_RSS_KEY_SIZE;
   3062}
   3063
   3064static u32 ixgbe_rss_indir_size(struct net_device *netdev)
   3065{
   3066	struct ixgbe_adapter *adapter = netdev_priv(netdev);
   3067
   3068	return ixgbe_rss_indir_tbl_entries(adapter);
   3069}
   3070
   3071static void ixgbe_get_reta(struct ixgbe_adapter *adapter, u32 *indir)
   3072{
   3073	int i, reta_size = ixgbe_rss_indir_tbl_entries(adapter);
   3074	u16 rss_m = adapter->ring_feature[RING_F_RSS].mask;
   3075
   3076	if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
   3077		rss_m = adapter->ring_feature[RING_F_RSS].indices - 1;
   3078
   3079	for (i = 0; i < reta_size; i++)
   3080		indir[i] = adapter->rss_indir_tbl[i] & rss_m;
   3081}
   3082
   3083static int ixgbe_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
   3084			  u8 *hfunc)
   3085{
   3086	struct ixgbe_adapter *adapter = netdev_priv(netdev);
   3087
   3088	if (hfunc)
   3089		*hfunc = ETH_RSS_HASH_TOP;
   3090
   3091	if (indir)
   3092		ixgbe_get_reta(adapter, indir);
   3093
   3094	if (key)
   3095		memcpy(key, adapter->rss_key, ixgbe_get_rxfh_key_size(netdev));
   3096
   3097	return 0;
   3098}
   3099
   3100static int ixgbe_set_rxfh(struct net_device *netdev, const u32 *indir,
   3101			  const u8 *key, const u8 hfunc)
   3102{
   3103	struct ixgbe_adapter *adapter = netdev_priv(netdev);
   3104	int i;
   3105	u32 reta_entries = ixgbe_rss_indir_tbl_entries(adapter);
   3106
   3107	if (hfunc)
   3108		return -EINVAL;
   3109
   3110	/* Fill out the redirection table */
   3111	if (indir) {
   3112		int max_queues = min_t(int, adapter->num_rx_queues,
   3113				       ixgbe_rss_indir_tbl_max(adapter));
   3114
   3115		/*Allow at least 2 queues w/ SR-IOV.*/
   3116		if ((adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) &&
   3117		    (max_queues < 2))
   3118			max_queues = 2;
   3119
   3120		/* Verify user input. */
   3121		for (i = 0; i < reta_entries; i++)
   3122			if (indir[i] >= max_queues)
   3123				return -EINVAL;
   3124
   3125		for (i = 0; i < reta_entries; i++)
   3126			adapter->rss_indir_tbl[i] = indir[i];
   3127
   3128		ixgbe_store_reta(adapter);
   3129	}
   3130
   3131	/* Fill out the rss hash key */
   3132	if (key) {
   3133		memcpy(adapter->rss_key, key, ixgbe_get_rxfh_key_size(netdev));
   3134		ixgbe_store_key(adapter);
   3135	}
   3136
   3137	return 0;
   3138}
   3139
   3140static int ixgbe_get_ts_info(struct net_device *dev,
   3141			     struct ethtool_ts_info *info)
   3142{
   3143	struct ixgbe_adapter *adapter = netdev_priv(dev);
   3144
   3145	/* we always support timestamping disabled */
   3146	info->rx_filters = BIT(HWTSTAMP_FILTER_NONE);
   3147
   3148	switch (adapter->hw.mac.type) {
   3149	case ixgbe_mac_X550:
   3150	case ixgbe_mac_X550EM_x:
   3151	case ixgbe_mac_x550em_a:
   3152		info->rx_filters |= BIT(HWTSTAMP_FILTER_ALL);
   3153		break;
   3154	case ixgbe_mac_X540:
   3155	case ixgbe_mac_82599EB:
   3156		info->rx_filters |=
   3157			BIT(HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
   3158			BIT(HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
   3159			BIT(HWTSTAMP_FILTER_PTP_V2_EVENT);
   3160		break;
   3161	default:
   3162		return ethtool_op_get_ts_info(dev, info);
   3163	}
   3164
   3165	info->so_timestamping =
   3166		SOF_TIMESTAMPING_TX_SOFTWARE |
   3167		SOF_TIMESTAMPING_RX_SOFTWARE |
   3168		SOF_TIMESTAMPING_SOFTWARE |
   3169		SOF_TIMESTAMPING_TX_HARDWARE |
   3170		SOF_TIMESTAMPING_RX_HARDWARE |
   3171		SOF_TIMESTAMPING_RAW_HARDWARE;
   3172
   3173	if (adapter->ptp_clock)
   3174		info->phc_index = ptp_clock_index(adapter->ptp_clock);
   3175	else
   3176		info->phc_index = -1;
   3177
   3178	info->tx_types =
   3179		BIT(HWTSTAMP_TX_OFF) |
   3180		BIT(HWTSTAMP_TX_ON);
   3181
   3182	return 0;
   3183}
   3184
   3185static unsigned int ixgbe_max_channels(struct ixgbe_adapter *adapter)
   3186{
   3187	unsigned int max_combined;
   3188	u8 tcs = adapter->hw_tcs;
   3189
   3190	if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) {
   3191		/* We only support one q_vector without MSI-X */
   3192		max_combined = 1;
   3193	} else if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
   3194		/* Limit value based on the queue mask */
   3195		max_combined = adapter->ring_feature[RING_F_RSS].mask + 1;
   3196	} else if (tcs > 1) {
   3197		/* For DCB report channels per traffic class */
   3198		if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
   3199			/* 8 TC w/ 4 queues per TC */
   3200			max_combined = 4;
   3201		} else if (tcs > 4) {
   3202			/* 8 TC w/ 8 queues per TC */
   3203			max_combined = 8;
   3204		} else {
   3205			/* 4 TC w/ 16 queues per TC */
   3206			max_combined = 16;
   3207		}
   3208	} else if (adapter->atr_sample_rate) {
   3209		/* support up to 64 queues with ATR */
   3210		max_combined = IXGBE_MAX_FDIR_INDICES;
   3211	} else {
   3212		/* support up to 16 queues with RSS */
   3213		max_combined = ixgbe_max_rss_indices(adapter);
   3214	}
   3215
   3216	return min_t(int, max_combined, num_online_cpus());
   3217}
   3218
   3219static void ixgbe_get_channels(struct net_device *dev,
   3220			       struct ethtool_channels *ch)
   3221{
   3222	struct ixgbe_adapter *adapter = netdev_priv(dev);
   3223
   3224	/* report maximum channels */
   3225	ch->max_combined = ixgbe_max_channels(adapter);
   3226
   3227	/* report info for other vector */
   3228	if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
   3229		ch->max_other = NON_Q_VECTORS;
   3230		ch->other_count = NON_Q_VECTORS;
   3231	}
   3232
   3233	/* record RSS queues */
   3234	ch->combined_count = adapter->ring_feature[RING_F_RSS].indices;
   3235
   3236	/* nothing else to report if RSS is disabled */
   3237	if (ch->combined_count == 1)
   3238		return;
   3239
   3240	/* we do not support ATR queueing if SR-IOV is enabled */
   3241	if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
   3242		return;
   3243
   3244	/* same thing goes for being DCB enabled */
   3245	if (adapter->hw_tcs > 1)
   3246		return;
   3247
   3248	/* if ATR is disabled we can exit */
   3249	if (!adapter->atr_sample_rate)
   3250		return;
   3251
   3252	/* report flow director queues as maximum channels */
   3253	ch->combined_count = adapter->ring_feature[RING_F_FDIR].indices;
   3254}
   3255
   3256static int ixgbe_set_channels(struct net_device *dev,
   3257			      struct ethtool_channels *ch)
   3258{
   3259	struct ixgbe_adapter *adapter = netdev_priv(dev);
   3260	unsigned int count = ch->combined_count;
   3261	u8 max_rss_indices = ixgbe_max_rss_indices(adapter);
   3262
   3263	/* verify they are not requesting separate vectors */
   3264	if (!count || ch->rx_count || ch->tx_count)
   3265		return -EINVAL;
   3266
   3267	/* verify other_count has not changed */
   3268	if (ch->other_count != NON_Q_VECTORS)
   3269		return -EINVAL;
   3270
   3271	/* verify the number of channels does not exceed hardware limits */
   3272	if (count > ixgbe_max_channels(adapter))
   3273		return -EINVAL;
   3274
   3275	/* update feature limits from largest to smallest supported values */
   3276	adapter->ring_feature[RING_F_FDIR].limit = count;
   3277
   3278	/* cap RSS limit */
   3279	if (count > max_rss_indices)
   3280		count = max_rss_indices;
   3281	adapter->ring_feature[RING_F_RSS].limit = count;
   3282
   3283#ifdef IXGBE_FCOE
   3284	/* cap FCoE limit at 8 */
   3285	if (count > IXGBE_FCRETA_SIZE)
   3286		count = IXGBE_FCRETA_SIZE;
   3287	adapter->ring_feature[RING_F_FCOE].limit = count;
   3288
   3289#endif
   3290	/* use setup TC to update any traffic class queue mapping */
   3291	return ixgbe_setup_tc(dev, adapter->hw_tcs);
   3292}
   3293
   3294static int ixgbe_get_module_info(struct net_device *dev,
   3295				       struct ethtool_modinfo *modinfo)
   3296{
   3297	struct ixgbe_adapter *adapter = netdev_priv(dev);
   3298	struct ixgbe_hw *hw = &adapter->hw;
   3299	s32 status;
   3300	u8 sff8472_rev, addr_mode;
   3301	bool page_swap = false;
   3302
   3303	if (hw->phy.type == ixgbe_phy_fw)
   3304		return -ENXIO;
   3305
   3306	/* Check whether we support SFF-8472 or not */
   3307	status = hw->phy.ops.read_i2c_eeprom(hw,
   3308					     IXGBE_SFF_SFF_8472_COMP,
   3309					     &sff8472_rev);
   3310	if (status)
   3311		return -EIO;
   3312
   3313	/* addressing mode is not supported */
   3314	status = hw->phy.ops.read_i2c_eeprom(hw,
   3315					     IXGBE_SFF_SFF_8472_SWAP,
   3316					     &addr_mode);
   3317	if (status)
   3318		return -EIO;
   3319
   3320	if (addr_mode & IXGBE_SFF_ADDRESSING_MODE) {
   3321		e_err(drv, "Address change required to access page 0xA2, but not supported. Please report the module type to the driver maintainers.\n");
   3322		page_swap = true;
   3323	}
   3324
   3325	if (sff8472_rev == IXGBE_SFF_SFF_8472_UNSUP || page_swap ||
   3326	    !(addr_mode & IXGBE_SFF_DDM_IMPLEMENTED)) {
   3327		/* We have a SFP, but it does not support SFF-8472 */
   3328		modinfo->type = ETH_MODULE_SFF_8079;
   3329		modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN;
   3330	} else {
   3331		/* We have a SFP which supports a revision of SFF-8472. */
   3332		modinfo->type = ETH_MODULE_SFF_8472;
   3333		modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
   3334	}
   3335
   3336	return 0;
   3337}
   3338
   3339static int ixgbe_get_module_eeprom(struct net_device *dev,
   3340					 struct ethtool_eeprom *ee,
   3341					 u8 *data)
   3342{
   3343	struct ixgbe_adapter *adapter = netdev_priv(dev);
   3344	struct ixgbe_hw *hw = &adapter->hw;
   3345	s32 status = IXGBE_ERR_PHY_ADDR_INVALID;
   3346	u8 databyte = 0xFF;
   3347	int i = 0;
   3348
   3349	if (ee->len == 0)
   3350		return -EINVAL;
   3351
   3352	if (hw->phy.type == ixgbe_phy_fw)
   3353		return -ENXIO;
   3354
   3355	for (i = ee->offset; i < ee->offset + ee->len; i++) {
   3356		/* I2C reads can take long time */
   3357		if (test_bit(__IXGBE_IN_SFP_INIT, &adapter->state))
   3358			return -EBUSY;
   3359
   3360		if (i < ETH_MODULE_SFF_8079_LEN)
   3361			status = hw->phy.ops.read_i2c_eeprom(hw, i, &databyte);
   3362		else
   3363			status = hw->phy.ops.read_i2c_sff8472(hw, i, &databyte);
   3364
   3365		if (status)
   3366			return -EIO;
   3367
   3368		data[i - ee->offset] = databyte;
   3369	}
   3370
   3371	return 0;
   3372}
   3373
   3374static const struct {
   3375	ixgbe_link_speed mac_speed;
   3376	u32 supported;
   3377} ixgbe_ls_map[] = {
   3378	{ IXGBE_LINK_SPEED_10_FULL, SUPPORTED_10baseT_Full },
   3379	{ IXGBE_LINK_SPEED_100_FULL, SUPPORTED_100baseT_Full },
   3380	{ IXGBE_LINK_SPEED_1GB_FULL, SUPPORTED_1000baseT_Full },
   3381	{ IXGBE_LINK_SPEED_2_5GB_FULL, SUPPORTED_2500baseX_Full },
   3382	{ IXGBE_LINK_SPEED_10GB_FULL, SUPPORTED_10000baseT_Full },
   3383};
   3384
   3385static const struct {
   3386	u32 lp_advertised;
   3387	u32 mac_speed;
   3388} ixgbe_lp_map[] = {
   3389	{ FW_PHY_ACT_UD_2_100M_TX_EEE, SUPPORTED_100baseT_Full },
   3390	{ FW_PHY_ACT_UD_2_1G_T_EEE, SUPPORTED_1000baseT_Full },
   3391	{ FW_PHY_ACT_UD_2_10G_T_EEE, SUPPORTED_10000baseT_Full },
   3392	{ FW_PHY_ACT_UD_2_1G_KX_EEE, SUPPORTED_1000baseKX_Full },
   3393	{ FW_PHY_ACT_UD_2_10G_KX4_EEE, SUPPORTED_10000baseKX4_Full },
   3394	{ FW_PHY_ACT_UD_2_10G_KR_EEE, SUPPORTED_10000baseKR_Full},
   3395};
   3396
   3397static int
   3398ixgbe_get_eee_fw(struct ixgbe_adapter *adapter, struct ethtool_eee *edata)
   3399{
   3400	u32 info[FW_PHY_ACT_DATA_COUNT] = { 0 };
   3401	struct ixgbe_hw *hw = &adapter->hw;
   3402	s32 rc;
   3403	u16 i;
   3404
   3405	rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_UD_2, &info);
   3406	if (rc)
   3407		return rc;
   3408
   3409	edata->lp_advertised = 0;
   3410	for (i = 0; i < ARRAY_SIZE(ixgbe_lp_map); ++i) {
   3411		if (info[0] & ixgbe_lp_map[i].lp_advertised)
   3412			edata->lp_advertised |= ixgbe_lp_map[i].mac_speed;
   3413	}
   3414
   3415	edata->supported = 0;
   3416	for (i = 0; i < ARRAY_SIZE(ixgbe_ls_map); ++i) {
   3417		if (hw->phy.eee_speeds_supported & ixgbe_ls_map[i].mac_speed)
   3418			edata->supported |= ixgbe_ls_map[i].supported;
   3419	}
   3420
   3421	edata->advertised = 0;
   3422	for (i = 0; i < ARRAY_SIZE(ixgbe_ls_map); ++i) {
   3423		if (hw->phy.eee_speeds_advertised & ixgbe_ls_map[i].mac_speed)
   3424			edata->advertised |= ixgbe_ls_map[i].supported;
   3425	}
   3426
   3427	edata->eee_enabled = !!edata->advertised;
   3428	edata->tx_lpi_enabled = edata->eee_enabled;
   3429	if (edata->advertised & edata->lp_advertised)
   3430		edata->eee_active = true;
   3431
   3432	return 0;
   3433}
   3434
   3435static int ixgbe_get_eee(struct net_device *netdev, struct ethtool_eee *edata)
   3436{
   3437	struct ixgbe_adapter *adapter = netdev_priv(netdev);
   3438	struct ixgbe_hw *hw = &adapter->hw;
   3439
   3440	if (!(adapter->flags2 & IXGBE_FLAG2_EEE_CAPABLE))
   3441		return -EOPNOTSUPP;
   3442
   3443	if (hw->phy.eee_speeds_supported && hw->phy.type == ixgbe_phy_fw)
   3444		return ixgbe_get_eee_fw(adapter, edata);
   3445
   3446	return -EOPNOTSUPP;
   3447}
   3448
   3449static int ixgbe_set_eee(struct net_device *netdev, struct ethtool_eee *edata)
   3450{
   3451	struct ixgbe_adapter *adapter = netdev_priv(netdev);
   3452	struct ixgbe_hw *hw = &adapter->hw;
   3453	struct ethtool_eee eee_data;
   3454	s32 ret_val;
   3455
   3456	if (!(adapter->flags2 & IXGBE_FLAG2_EEE_CAPABLE))
   3457		return -EOPNOTSUPP;
   3458
   3459	memset(&eee_data, 0, sizeof(struct ethtool_eee));
   3460
   3461	ret_val = ixgbe_get_eee(netdev, &eee_data);
   3462	if (ret_val)
   3463		return ret_val;
   3464
   3465	if (eee_data.eee_enabled && !edata->eee_enabled) {
   3466		if (eee_data.tx_lpi_enabled != edata->tx_lpi_enabled) {
   3467			e_err(drv, "Setting EEE tx-lpi is not supported\n");
   3468			return -EINVAL;
   3469		}
   3470
   3471		if (eee_data.tx_lpi_timer != edata->tx_lpi_timer) {
   3472			e_err(drv,
   3473			      "Setting EEE Tx LPI timer is not supported\n");
   3474			return -EINVAL;
   3475		}
   3476
   3477		if (eee_data.advertised != edata->advertised) {
   3478			e_err(drv,
   3479			      "Setting EEE advertised speeds is not supported\n");
   3480			return -EINVAL;
   3481		}
   3482	}
   3483
   3484	if (eee_data.eee_enabled != edata->eee_enabled) {
   3485		if (edata->eee_enabled) {
   3486			adapter->flags2 |= IXGBE_FLAG2_EEE_ENABLED;
   3487			hw->phy.eee_speeds_advertised =
   3488						   hw->phy.eee_speeds_supported;
   3489		} else {
   3490			adapter->flags2 &= ~IXGBE_FLAG2_EEE_ENABLED;
   3491			hw->phy.eee_speeds_advertised = 0;
   3492		}
   3493
   3494		/* reset link */
   3495		if (netif_running(netdev))
   3496			ixgbe_reinit_locked(adapter);
   3497		else
   3498			ixgbe_reset(adapter);
   3499	}
   3500
   3501	return 0;
   3502}
   3503
   3504static u32 ixgbe_get_priv_flags(struct net_device *netdev)
   3505{
   3506	struct ixgbe_adapter *adapter = netdev_priv(netdev);
   3507	u32 priv_flags = 0;
   3508
   3509	if (adapter->flags2 & IXGBE_FLAG2_RX_LEGACY)
   3510		priv_flags |= IXGBE_PRIV_FLAGS_LEGACY_RX;
   3511
   3512	if (adapter->flags2 & IXGBE_FLAG2_VF_IPSEC_ENABLED)
   3513		priv_flags |= IXGBE_PRIV_FLAGS_VF_IPSEC_EN;
   3514
   3515	if (adapter->flags2 & IXGBE_FLAG2_AUTO_DISABLE_VF)
   3516		priv_flags |= IXGBE_PRIV_FLAGS_AUTO_DISABLE_VF;
   3517
   3518	return priv_flags;
   3519}
   3520
   3521static int ixgbe_set_priv_flags(struct net_device *netdev, u32 priv_flags)
   3522{
   3523	struct ixgbe_adapter *adapter = netdev_priv(netdev);
   3524	unsigned int flags2 = adapter->flags2;
   3525	unsigned int i;
   3526
   3527	flags2 &= ~IXGBE_FLAG2_RX_LEGACY;
   3528	if (priv_flags & IXGBE_PRIV_FLAGS_LEGACY_RX)
   3529		flags2 |= IXGBE_FLAG2_RX_LEGACY;
   3530
   3531	flags2 &= ~IXGBE_FLAG2_VF_IPSEC_ENABLED;
   3532	if (priv_flags & IXGBE_PRIV_FLAGS_VF_IPSEC_EN)
   3533		flags2 |= IXGBE_FLAG2_VF_IPSEC_ENABLED;
   3534
   3535	flags2 &= ~IXGBE_FLAG2_AUTO_DISABLE_VF;
   3536	if (priv_flags & IXGBE_PRIV_FLAGS_AUTO_DISABLE_VF) {
   3537		if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
   3538			/* Reset primary abort counter */
   3539			for (i = 0; i < adapter->num_vfs; i++)
   3540				adapter->vfinfo[i].primary_abort_count = 0;
   3541
   3542			flags2 |= IXGBE_FLAG2_AUTO_DISABLE_VF;
   3543		} else {
   3544			e_info(probe,
   3545			       "Cannot set private flags: Operation not supported\n");
   3546			return -EOPNOTSUPP;
   3547		}
   3548	}
   3549
   3550	if (flags2 != adapter->flags2) {
   3551		adapter->flags2 = flags2;
   3552
   3553		/* reset interface to repopulate queues */
   3554		if (netif_running(netdev))
   3555			ixgbe_reinit_locked(adapter);
   3556	}
   3557
   3558	return 0;
   3559}
   3560
   3561static const struct ethtool_ops ixgbe_ethtool_ops = {
   3562	.supported_coalesce_params = ETHTOOL_COALESCE_USECS,
   3563	.get_drvinfo            = ixgbe_get_drvinfo,
   3564	.get_regs_len           = ixgbe_get_regs_len,
   3565	.get_regs               = ixgbe_get_regs,
   3566	.get_wol                = ixgbe_get_wol,
   3567	.set_wol                = ixgbe_set_wol,
   3568	.nway_reset             = ixgbe_nway_reset,
   3569	.get_link               = ethtool_op_get_link,
   3570	.get_eeprom_len         = ixgbe_get_eeprom_len,
   3571	.get_eeprom             = ixgbe_get_eeprom,
   3572	.set_eeprom             = ixgbe_set_eeprom,
   3573	.get_ringparam          = ixgbe_get_ringparam,
   3574	.set_ringparam          = ixgbe_set_ringparam,
   3575	.get_pause_stats	= ixgbe_get_pause_stats,
   3576	.get_pauseparam         = ixgbe_get_pauseparam,
   3577	.set_pauseparam         = ixgbe_set_pauseparam,
   3578	.get_msglevel           = ixgbe_get_msglevel,
   3579	.set_msglevel           = ixgbe_set_msglevel,
   3580	.self_test              = ixgbe_diag_test,
   3581	.get_strings            = ixgbe_get_strings,
   3582	.set_phys_id            = ixgbe_set_phys_id,
   3583	.get_sset_count         = ixgbe_get_sset_count,
   3584	.get_ethtool_stats      = ixgbe_get_ethtool_stats,
   3585	.get_coalesce           = ixgbe_get_coalesce,
   3586	.set_coalesce           = ixgbe_set_coalesce,
   3587	.get_rxnfc		= ixgbe_get_rxnfc,
   3588	.set_rxnfc		= ixgbe_set_rxnfc,
   3589	.get_rxfh_indir_size	= ixgbe_rss_indir_size,
   3590	.get_rxfh_key_size	= ixgbe_get_rxfh_key_size,
   3591	.get_rxfh		= ixgbe_get_rxfh,
   3592	.set_rxfh		= ixgbe_set_rxfh,
   3593	.get_eee		= ixgbe_get_eee,
   3594	.set_eee		= ixgbe_set_eee,
   3595	.get_channels		= ixgbe_get_channels,
   3596	.set_channels		= ixgbe_set_channels,
   3597	.get_priv_flags		= ixgbe_get_priv_flags,
   3598	.set_priv_flags		= ixgbe_set_priv_flags,
   3599	.get_ts_info		= ixgbe_get_ts_info,
   3600	.get_module_info	= ixgbe_get_module_info,
   3601	.get_module_eeprom	= ixgbe_get_module_eeprom,
   3602	.get_link_ksettings     = ixgbe_get_link_ksettings,
   3603	.set_link_ksettings     = ixgbe_set_link_ksettings,
   3604};
   3605
   3606void ixgbe_set_ethtool_ops(struct net_device *netdev)
   3607{
   3608	netdev->ethtool_ops = &ixgbe_ethtool_ops;
   3609}