cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

ethtool.c (65617B)


      1// SPDX-License-Identifier: GPL-2.0
      2/* Copyright(c) 1999 - 2018 Intel Corporation. */
      3
      4/* ethtool support for e1000 */
      5
      6#include <linux/netdevice.h>
      7#include <linux/interrupt.h>
      8#include <linux/ethtool.h>
      9#include <linux/pci.h>
     10#include <linux/slab.h>
     11#include <linux/delay.h>
     12#include <linux/vmalloc.h>
     13#include <linux/pm_runtime.h>
     14
     15#include "e1000.h"
     16
     17enum { NETDEV_STATS, E1000_STATS };
     18
     19struct e1000_stats {
     20	char stat_string[ETH_GSTRING_LEN];
     21	int type;
     22	int sizeof_stat;
     23	int stat_offset;
     24};
     25
     26static const char e1000e_priv_flags_strings[][ETH_GSTRING_LEN] = {
     27#define E1000E_PRIV_FLAGS_S0IX_ENABLED	BIT(0)
     28	"s0ix-enabled",
     29};
     30
     31#define E1000E_PRIV_FLAGS_STR_LEN ARRAY_SIZE(e1000e_priv_flags_strings)
     32
     33#define E1000_STAT(str, m) { \
     34		.stat_string = str, \
     35		.type = E1000_STATS, \
     36		.sizeof_stat = sizeof(((struct e1000_adapter *)0)->m), \
     37		.stat_offset = offsetof(struct e1000_adapter, m) }
     38#define E1000_NETDEV_STAT(str, m) { \
     39		.stat_string = str, \
     40		.type = NETDEV_STATS, \
     41		.sizeof_stat = sizeof(((struct rtnl_link_stats64 *)0)->m), \
     42		.stat_offset = offsetof(struct rtnl_link_stats64, m) }
     43
     44static const struct e1000_stats e1000_gstrings_stats[] = {
     45	E1000_STAT("rx_packets", stats.gprc),
     46	E1000_STAT("tx_packets", stats.gptc),
     47	E1000_STAT("rx_bytes", stats.gorc),
     48	E1000_STAT("tx_bytes", stats.gotc),
     49	E1000_STAT("rx_broadcast", stats.bprc),
     50	E1000_STAT("tx_broadcast", stats.bptc),
     51	E1000_STAT("rx_multicast", stats.mprc),
     52	E1000_STAT("tx_multicast", stats.mptc),
     53	E1000_NETDEV_STAT("rx_errors", rx_errors),
     54	E1000_NETDEV_STAT("tx_errors", tx_errors),
     55	E1000_NETDEV_STAT("tx_dropped", tx_dropped),
     56	E1000_STAT("multicast", stats.mprc),
     57	E1000_STAT("collisions", stats.colc),
     58	E1000_NETDEV_STAT("rx_length_errors", rx_length_errors),
     59	E1000_NETDEV_STAT("rx_over_errors", rx_over_errors),
     60	E1000_STAT("rx_crc_errors", stats.crcerrs),
     61	E1000_NETDEV_STAT("rx_frame_errors", rx_frame_errors),
     62	E1000_STAT("rx_no_buffer_count", stats.rnbc),
     63	E1000_STAT("rx_missed_errors", stats.mpc),
     64	E1000_STAT("tx_aborted_errors", stats.ecol),
     65	E1000_STAT("tx_carrier_errors", stats.tncrs),
     66	E1000_NETDEV_STAT("tx_fifo_errors", tx_fifo_errors),
     67	E1000_NETDEV_STAT("tx_heartbeat_errors", tx_heartbeat_errors),
     68	E1000_STAT("tx_window_errors", stats.latecol),
     69	E1000_STAT("tx_abort_late_coll", stats.latecol),
     70	E1000_STAT("tx_deferred_ok", stats.dc),
     71	E1000_STAT("tx_single_coll_ok", stats.scc),
     72	E1000_STAT("tx_multi_coll_ok", stats.mcc),
     73	E1000_STAT("tx_timeout_count", tx_timeout_count),
     74	E1000_STAT("tx_restart_queue", restart_queue),
     75	E1000_STAT("rx_long_length_errors", stats.roc),
     76	E1000_STAT("rx_short_length_errors", stats.ruc),
     77	E1000_STAT("rx_align_errors", stats.algnerrc),
     78	E1000_STAT("tx_tcp_seg_good", stats.tsctc),
     79	E1000_STAT("tx_tcp_seg_failed", stats.tsctfc),
     80	E1000_STAT("rx_flow_control_xon", stats.xonrxc),
     81	E1000_STAT("rx_flow_control_xoff", stats.xoffrxc),
     82	E1000_STAT("tx_flow_control_xon", stats.xontxc),
     83	E1000_STAT("tx_flow_control_xoff", stats.xofftxc),
     84	E1000_STAT("rx_csum_offload_good", hw_csum_good),
     85	E1000_STAT("rx_csum_offload_errors", hw_csum_err),
     86	E1000_STAT("rx_header_split", rx_hdr_split),
     87	E1000_STAT("alloc_rx_buff_failed", alloc_rx_buff_failed),
     88	E1000_STAT("tx_smbus", stats.mgptc),
     89	E1000_STAT("rx_smbus", stats.mgprc),
     90	E1000_STAT("dropped_smbus", stats.mgpdc),
     91	E1000_STAT("rx_dma_failed", rx_dma_failed),
     92	E1000_STAT("tx_dma_failed", tx_dma_failed),
     93	E1000_STAT("rx_hwtstamp_cleared", rx_hwtstamp_cleared),
     94	E1000_STAT("uncorr_ecc_errors", uncorr_errors),
     95	E1000_STAT("corr_ecc_errors", corr_errors),
     96	E1000_STAT("tx_hwtstamp_timeouts", tx_hwtstamp_timeouts),
     97	E1000_STAT("tx_hwtstamp_skipped", tx_hwtstamp_skipped),
     98};
     99
    100#define E1000_GLOBAL_STATS_LEN	ARRAY_SIZE(e1000_gstrings_stats)
    101#define E1000_STATS_LEN (E1000_GLOBAL_STATS_LEN)
    102static const char e1000_gstrings_test[][ETH_GSTRING_LEN] = {
    103	"Register test  (offline)", "Eeprom test    (offline)",
    104	"Interrupt test (offline)", "Loopback test  (offline)",
    105	"Link test   (on/offline)"
    106};
    107
    108#define E1000_TEST_LEN ARRAY_SIZE(e1000_gstrings_test)
    109
    110static int e1000_get_link_ksettings(struct net_device *netdev,
    111				    struct ethtool_link_ksettings *cmd)
    112{
    113	struct e1000_adapter *adapter = netdev_priv(netdev);
    114	struct e1000_hw *hw = &adapter->hw;
    115	u32 speed, supported, advertising;
    116
    117	if (hw->phy.media_type == e1000_media_type_copper) {
    118		supported = (SUPPORTED_10baseT_Half |
    119			     SUPPORTED_10baseT_Full |
    120			     SUPPORTED_100baseT_Half |
    121			     SUPPORTED_100baseT_Full |
    122			     SUPPORTED_1000baseT_Full |
    123			     SUPPORTED_Autoneg |
    124			     SUPPORTED_TP);
    125		if (hw->phy.type == e1000_phy_ife)
    126			supported &= ~SUPPORTED_1000baseT_Full;
    127		advertising = ADVERTISED_TP;
    128
    129		if (hw->mac.autoneg == 1) {
    130			advertising |= ADVERTISED_Autoneg;
    131			/* the e1000 autoneg seems to match ethtool nicely */
    132			advertising |= hw->phy.autoneg_advertised;
    133		}
    134
    135		cmd->base.port = PORT_TP;
    136		cmd->base.phy_address = hw->phy.addr;
    137	} else {
    138		supported   = (SUPPORTED_1000baseT_Full |
    139			       SUPPORTED_FIBRE |
    140			       SUPPORTED_Autoneg);
    141
    142		advertising = (ADVERTISED_1000baseT_Full |
    143			       ADVERTISED_FIBRE |
    144			       ADVERTISED_Autoneg);
    145
    146		cmd->base.port = PORT_FIBRE;
    147	}
    148
    149	speed = SPEED_UNKNOWN;
    150	cmd->base.duplex = DUPLEX_UNKNOWN;
    151
    152	if (netif_running(netdev)) {
    153		if (netif_carrier_ok(netdev)) {
    154			speed = adapter->link_speed;
    155			cmd->base.duplex = adapter->link_duplex - 1;
    156		}
    157	} else if (!pm_runtime_suspended(netdev->dev.parent)) {
    158		u32 status = er32(STATUS);
    159
    160		if (status & E1000_STATUS_LU) {
    161			if (status & E1000_STATUS_SPEED_1000)
    162				speed = SPEED_1000;
    163			else if (status & E1000_STATUS_SPEED_100)
    164				speed = SPEED_100;
    165			else
    166				speed = SPEED_10;
    167
    168			if (status & E1000_STATUS_FD)
    169				cmd->base.duplex = DUPLEX_FULL;
    170			else
    171				cmd->base.duplex = DUPLEX_HALF;
    172		}
    173	}
    174
    175	cmd->base.speed = speed;
    176	cmd->base.autoneg = ((hw->phy.media_type == e1000_media_type_fiber) ||
    177			 hw->mac.autoneg) ? AUTONEG_ENABLE : AUTONEG_DISABLE;
    178
    179	/* MDI-X => 2; MDI =>1; Invalid =>0 */
    180	if ((hw->phy.media_type == e1000_media_type_copper) &&
    181	    netif_carrier_ok(netdev))
    182		cmd->base.eth_tp_mdix = hw->phy.is_mdix ?
    183			ETH_TP_MDI_X : ETH_TP_MDI;
    184	else
    185		cmd->base.eth_tp_mdix = ETH_TP_MDI_INVALID;
    186
    187	if (hw->phy.mdix == AUTO_ALL_MODES)
    188		cmd->base.eth_tp_mdix_ctrl = ETH_TP_MDI_AUTO;
    189	else
    190		cmd->base.eth_tp_mdix_ctrl = hw->phy.mdix;
    191
    192	if (hw->phy.media_type != e1000_media_type_copper)
    193		cmd->base.eth_tp_mdix_ctrl = ETH_TP_MDI_INVALID;
    194
    195	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
    196						supported);
    197	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
    198						advertising);
    199
    200	return 0;
    201}
    202
    203static int e1000_set_spd_dplx(struct e1000_adapter *adapter, u32 spd, u8 dplx)
    204{
    205	struct e1000_mac_info *mac = &adapter->hw.mac;
    206
    207	mac->autoneg = 0;
    208
    209	/* Make sure dplx is at most 1 bit and lsb of speed is not set
    210	 * for the switch() below to work
    211	 */
    212	if ((spd & 1) || (dplx & ~1))
    213		goto err_inval;
    214
    215	/* Fiber NICs only allow 1000 gbps Full duplex */
    216	if ((adapter->hw.phy.media_type == e1000_media_type_fiber) &&
    217	    (spd != SPEED_1000) && (dplx != DUPLEX_FULL)) {
    218		goto err_inval;
    219	}
    220
    221	switch (spd + dplx) {
    222	case SPEED_10 + DUPLEX_HALF:
    223		mac->forced_speed_duplex = ADVERTISE_10_HALF;
    224		break;
    225	case SPEED_10 + DUPLEX_FULL:
    226		mac->forced_speed_duplex = ADVERTISE_10_FULL;
    227		break;
    228	case SPEED_100 + DUPLEX_HALF:
    229		mac->forced_speed_duplex = ADVERTISE_100_HALF;
    230		break;
    231	case SPEED_100 + DUPLEX_FULL:
    232		mac->forced_speed_duplex = ADVERTISE_100_FULL;
    233		break;
    234	case SPEED_1000 + DUPLEX_FULL:
    235		if (adapter->hw.phy.media_type == e1000_media_type_copper) {
    236			mac->autoneg = 1;
    237			adapter->hw.phy.autoneg_advertised =
    238				ADVERTISE_1000_FULL;
    239		} else {
    240			mac->forced_speed_duplex = ADVERTISE_1000_FULL;
    241		}
    242		break;
    243	case SPEED_1000 + DUPLEX_HALF:	/* not supported */
    244	default:
    245		goto err_inval;
    246	}
    247
    248	/* clear MDI, MDI(-X) override is only allowed when autoneg enabled */
    249	adapter->hw.phy.mdix = AUTO_ALL_MODES;
    250
    251	return 0;
    252
    253err_inval:
    254	e_err("Unsupported Speed/Duplex configuration\n");
    255	return -EINVAL;
    256}
    257
    258static int e1000_set_link_ksettings(struct net_device *netdev,
    259				    const struct ethtool_link_ksettings *cmd)
    260{
    261	struct e1000_adapter *adapter = netdev_priv(netdev);
    262	struct e1000_hw *hw = &adapter->hw;
    263	int ret_val = 0;
    264	u32 advertising;
    265
    266	ethtool_convert_link_mode_to_legacy_u32(&advertising,
    267						cmd->link_modes.advertising);
    268
    269	pm_runtime_get_sync(netdev->dev.parent);
    270
    271	/* When SoL/IDER sessions are active, autoneg/speed/duplex
    272	 * cannot be changed
    273	 */
    274	if (hw->phy.ops.check_reset_block &&
    275	    hw->phy.ops.check_reset_block(hw)) {
    276		e_err("Cannot change link characteristics when SoL/IDER is active.\n");
    277		ret_val = -EINVAL;
    278		goto out;
    279	}
    280
    281	/* MDI setting is only allowed when autoneg enabled because
    282	 * some hardware doesn't allow MDI setting when speed or
    283	 * duplex is forced.
    284	 */
    285	if (cmd->base.eth_tp_mdix_ctrl) {
    286		if (hw->phy.media_type != e1000_media_type_copper) {
    287			ret_val = -EOPNOTSUPP;
    288			goto out;
    289		}
    290
    291		if ((cmd->base.eth_tp_mdix_ctrl != ETH_TP_MDI_AUTO) &&
    292		    (cmd->base.autoneg != AUTONEG_ENABLE)) {
    293			e_err("forcing MDI/MDI-X state is not supported when link speed and/or duplex are forced\n");
    294			ret_val = -EINVAL;
    295			goto out;
    296		}
    297	}
    298
    299	while (test_and_set_bit(__E1000_RESETTING, &adapter->state))
    300		usleep_range(1000, 2000);
    301
    302	if (cmd->base.autoneg == AUTONEG_ENABLE) {
    303		hw->mac.autoneg = 1;
    304		if (hw->phy.media_type == e1000_media_type_fiber)
    305			hw->phy.autoneg_advertised = ADVERTISED_1000baseT_Full |
    306			    ADVERTISED_FIBRE | ADVERTISED_Autoneg;
    307		else
    308			hw->phy.autoneg_advertised = advertising |
    309			    ADVERTISED_TP | ADVERTISED_Autoneg;
    310		advertising = hw->phy.autoneg_advertised;
    311		if (adapter->fc_autoneg)
    312			hw->fc.requested_mode = e1000_fc_default;
    313	} else {
    314		u32 speed = cmd->base.speed;
    315		/* calling this overrides forced MDI setting */
    316		if (e1000_set_spd_dplx(adapter, speed, cmd->base.duplex)) {
    317			ret_val = -EINVAL;
    318			goto out;
    319		}
    320	}
    321
    322	/* MDI-X => 2; MDI => 1; Auto => 3 */
    323	if (cmd->base.eth_tp_mdix_ctrl) {
    324		/* fix up the value for auto (3 => 0) as zero is mapped
    325		 * internally to auto
    326		 */
    327		if (cmd->base.eth_tp_mdix_ctrl == ETH_TP_MDI_AUTO)
    328			hw->phy.mdix = AUTO_ALL_MODES;
    329		else
    330			hw->phy.mdix = cmd->base.eth_tp_mdix_ctrl;
    331	}
    332
    333	/* reset the link */
    334	if (netif_running(adapter->netdev)) {
    335		e1000e_down(adapter, true);
    336		e1000e_up(adapter);
    337	} else {
    338		e1000e_reset(adapter);
    339	}
    340
    341out:
    342	pm_runtime_put_sync(netdev->dev.parent);
    343	clear_bit(__E1000_RESETTING, &adapter->state);
    344	return ret_val;
    345}
    346
    347static void e1000_get_pauseparam(struct net_device *netdev,
    348				 struct ethtool_pauseparam *pause)
    349{
    350	struct e1000_adapter *adapter = netdev_priv(netdev);
    351	struct e1000_hw *hw = &adapter->hw;
    352
    353	pause->autoneg =
    354	    (adapter->fc_autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE);
    355
    356	if (hw->fc.current_mode == e1000_fc_rx_pause) {
    357		pause->rx_pause = 1;
    358	} else if (hw->fc.current_mode == e1000_fc_tx_pause) {
    359		pause->tx_pause = 1;
    360	} else if (hw->fc.current_mode == e1000_fc_full) {
    361		pause->rx_pause = 1;
    362		pause->tx_pause = 1;
    363	}
    364}
    365
    366static int e1000_set_pauseparam(struct net_device *netdev,
    367				struct ethtool_pauseparam *pause)
    368{
    369	struct e1000_adapter *adapter = netdev_priv(netdev);
    370	struct e1000_hw *hw = &adapter->hw;
    371	int retval = 0;
    372
    373	adapter->fc_autoneg = pause->autoneg;
    374
    375	while (test_and_set_bit(__E1000_RESETTING, &adapter->state))
    376		usleep_range(1000, 2000);
    377
    378	pm_runtime_get_sync(netdev->dev.parent);
    379
    380	if (adapter->fc_autoneg == AUTONEG_ENABLE) {
    381		hw->fc.requested_mode = e1000_fc_default;
    382		if (netif_running(adapter->netdev)) {
    383			e1000e_down(adapter, true);
    384			e1000e_up(adapter);
    385		} else {
    386			e1000e_reset(adapter);
    387		}
    388	} else {
    389		if (pause->rx_pause && pause->tx_pause)
    390			hw->fc.requested_mode = e1000_fc_full;
    391		else if (pause->rx_pause && !pause->tx_pause)
    392			hw->fc.requested_mode = e1000_fc_rx_pause;
    393		else if (!pause->rx_pause && pause->tx_pause)
    394			hw->fc.requested_mode = e1000_fc_tx_pause;
    395		else if (!pause->rx_pause && !pause->tx_pause)
    396			hw->fc.requested_mode = e1000_fc_none;
    397
    398		hw->fc.current_mode = hw->fc.requested_mode;
    399
    400		if (hw->phy.media_type == e1000_media_type_fiber) {
    401			retval = hw->mac.ops.setup_link(hw);
    402			/* implicit goto out */
    403		} else {
    404			retval = e1000e_force_mac_fc(hw);
    405			if (retval)
    406				goto out;
    407			e1000e_set_fc_watermarks(hw);
    408		}
    409	}
    410
    411out:
    412	pm_runtime_put_sync(netdev->dev.parent);
    413	clear_bit(__E1000_RESETTING, &adapter->state);
    414	return retval;
    415}
    416
    417static u32 e1000_get_msglevel(struct net_device *netdev)
    418{
    419	struct e1000_adapter *adapter = netdev_priv(netdev);
    420	return adapter->msg_enable;
    421}
    422
    423static void e1000_set_msglevel(struct net_device *netdev, u32 data)
    424{
    425	struct e1000_adapter *adapter = netdev_priv(netdev);
    426	adapter->msg_enable = data;
    427}
    428
    429static int e1000_get_regs_len(struct net_device __always_unused *netdev)
    430{
    431#define E1000_REGS_LEN 32	/* overestimate */
    432	return E1000_REGS_LEN * sizeof(u32);
    433}
    434
    435static void e1000_get_regs(struct net_device *netdev,
    436			   struct ethtool_regs *regs, void *p)
    437{
    438	struct e1000_adapter *adapter = netdev_priv(netdev);
    439	struct e1000_hw *hw = &adapter->hw;
    440	u32 *regs_buff = p;
    441	u16 phy_data;
    442
    443	pm_runtime_get_sync(netdev->dev.parent);
    444
    445	memset(p, 0, E1000_REGS_LEN * sizeof(u32));
    446
    447	regs->version = (1u << 24) |
    448			(adapter->pdev->revision << 16) |
    449			adapter->pdev->device;
    450
    451	regs_buff[0] = er32(CTRL);
    452	regs_buff[1] = er32(STATUS);
    453
    454	regs_buff[2] = er32(RCTL);
    455	regs_buff[3] = er32(RDLEN(0));
    456	regs_buff[4] = er32(RDH(0));
    457	regs_buff[5] = er32(RDT(0));
    458	regs_buff[6] = er32(RDTR);
    459
    460	regs_buff[7] = er32(TCTL);
    461	regs_buff[8] = er32(TDLEN(0));
    462	regs_buff[9] = er32(TDH(0));
    463	regs_buff[10] = er32(TDT(0));
    464	regs_buff[11] = er32(TIDV);
    465
    466	regs_buff[12] = adapter->hw.phy.type;	/* PHY type (IGP=1, M88=0) */
    467
    468	/* ethtool doesn't use anything past this point, so all this
    469	 * code is likely legacy junk for apps that may or may not exist
    470	 */
    471	if (hw->phy.type == e1000_phy_m88) {
    472		e1e_rphy(hw, M88E1000_PHY_SPEC_STATUS, &phy_data);
    473		regs_buff[13] = (u32)phy_data; /* cable length */
    474		regs_buff[14] = 0;  /* Dummy (to align w/ IGP phy reg dump) */
    475		regs_buff[15] = 0;  /* Dummy (to align w/ IGP phy reg dump) */
    476		regs_buff[16] = 0;  /* Dummy (to align w/ IGP phy reg dump) */
    477		e1e_rphy(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
    478		regs_buff[17] = (u32)phy_data; /* extended 10bt distance */
    479		regs_buff[18] = regs_buff[13]; /* cable polarity */
    480		regs_buff[19] = 0;  /* Dummy (to align w/ IGP phy reg dump) */
    481		regs_buff[20] = regs_buff[17]; /* polarity correction */
    482		/* phy receive errors */
    483		regs_buff[22] = adapter->phy_stats.receive_errors;
    484		regs_buff[23] = regs_buff[13]; /* mdix mode */
    485	}
    486	regs_buff[21] = 0;	/* was idle_errors */
    487	e1e_rphy(hw, MII_STAT1000, &phy_data);
    488	regs_buff[24] = (u32)phy_data;	/* phy local receiver status */
    489	regs_buff[25] = regs_buff[24];	/* phy remote receiver status */
    490
    491	pm_runtime_put_sync(netdev->dev.parent);
    492}
    493
    494static int e1000_get_eeprom_len(struct net_device *netdev)
    495{
    496	struct e1000_adapter *adapter = netdev_priv(netdev);
    497	return adapter->hw.nvm.word_size * 2;
    498}
    499
    500static int e1000_get_eeprom(struct net_device *netdev,
    501			    struct ethtool_eeprom *eeprom, u8 *bytes)
    502{
    503	struct e1000_adapter *adapter = netdev_priv(netdev);
    504	struct e1000_hw *hw = &adapter->hw;
    505	u16 *eeprom_buff;
    506	int first_word;
    507	int last_word;
    508	int ret_val = 0;
    509	u16 i;
    510
    511	if (eeprom->len == 0)
    512		return -EINVAL;
    513
    514	eeprom->magic = adapter->pdev->vendor | (adapter->pdev->device << 16);
    515
    516	first_word = eeprom->offset >> 1;
    517	last_word = (eeprom->offset + eeprom->len - 1) >> 1;
    518
    519	eeprom_buff = kmalloc_array(last_word - first_word + 1, sizeof(u16),
    520				    GFP_KERNEL);
    521	if (!eeprom_buff)
    522		return -ENOMEM;
    523
    524	pm_runtime_get_sync(netdev->dev.parent);
    525
    526	if (hw->nvm.type == e1000_nvm_eeprom_spi) {
    527		ret_val = e1000_read_nvm(hw, first_word,
    528					 last_word - first_word + 1,
    529					 eeprom_buff);
    530	} else {
    531		for (i = 0; i < last_word - first_word + 1; i++) {
    532			ret_val = e1000_read_nvm(hw, first_word + i, 1,
    533						 &eeprom_buff[i]);
    534			if (ret_val)
    535				break;
    536		}
    537	}
    538
    539	pm_runtime_put_sync(netdev->dev.parent);
    540
    541	if (ret_val) {
    542		/* a read error occurred, throw away the result */
    543		memset(eeprom_buff, 0xff, sizeof(u16) *
    544		       (last_word - first_word + 1));
    545	} else {
    546		/* Device's eeprom is always little-endian, word addressable */
    547		for (i = 0; i < last_word - first_word + 1; i++)
    548			le16_to_cpus(&eeprom_buff[i]);
    549	}
    550
    551	memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1), eeprom->len);
    552	kfree(eeprom_buff);
    553
    554	return ret_val;
    555}
    556
    557static int e1000_set_eeprom(struct net_device *netdev,
    558			    struct ethtool_eeprom *eeprom, u8 *bytes)
    559{
    560	struct e1000_adapter *adapter = netdev_priv(netdev);
    561	struct e1000_hw *hw = &adapter->hw;
    562	u16 *eeprom_buff;
    563	void *ptr;
    564	int max_len;
    565	int first_word;
    566	int last_word;
    567	int ret_val = 0;
    568	u16 i;
    569
    570	if (eeprom->len == 0)
    571		return -EOPNOTSUPP;
    572
    573	if (eeprom->magic !=
    574	    (adapter->pdev->vendor | (adapter->pdev->device << 16)))
    575		return -EFAULT;
    576
    577	if (adapter->flags & FLAG_READ_ONLY_NVM)
    578		return -EINVAL;
    579
    580	max_len = hw->nvm.word_size * 2;
    581
    582	first_word = eeprom->offset >> 1;
    583	last_word = (eeprom->offset + eeprom->len - 1) >> 1;
    584	eeprom_buff = kmalloc(max_len, GFP_KERNEL);
    585	if (!eeprom_buff)
    586		return -ENOMEM;
    587
    588	ptr = (void *)eeprom_buff;
    589
    590	pm_runtime_get_sync(netdev->dev.parent);
    591
    592	if (eeprom->offset & 1) {
    593		/* need read/modify/write of first changed EEPROM word */
    594		/* only the second byte of the word is being modified */
    595		ret_val = e1000_read_nvm(hw, first_word, 1, &eeprom_buff[0]);
    596		ptr++;
    597	}
    598	if (((eeprom->offset + eeprom->len) & 1) && (!ret_val))
    599		/* need read/modify/write of last changed EEPROM word */
    600		/* only the first byte of the word is being modified */
    601		ret_val = e1000_read_nvm(hw, last_word, 1,
    602					 &eeprom_buff[last_word - first_word]);
    603
    604	if (ret_val)
    605		goto out;
    606
    607	/* Device's eeprom is always little-endian, word addressable */
    608	for (i = 0; i < last_word - first_word + 1; i++)
    609		le16_to_cpus(&eeprom_buff[i]);
    610
    611	memcpy(ptr, bytes, eeprom->len);
    612
    613	for (i = 0; i < last_word - first_word + 1; i++)
    614		cpu_to_le16s(&eeprom_buff[i]);
    615
    616	ret_val = e1000_write_nvm(hw, first_word,
    617				  last_word - first_word + 1, eeprom_buff);
    618
    619	if (ret_val)
    620		goto out;
    621
    622	/* Update the checksum over the first part of the EEPROM if needed
    623	 * and flush shadow RAM for applicable controllers
    624	 */
    625	if ((first_word <= NVM_CHECKSUM_REG) ||
    626	    (hw->mac.type == e1000_82583) ||
    627	    (hw->mac.type == e1000_82574) ||
    628	    (hw->mac.type == e1000_82573))
    629		ret_val = e1000e_update_nvm_checksum(hw);
    630
    631out:
    632	pm_runtime_put_sync(netdev->dev.parent);
    633	kfree(eeprom_buff);
    634	return ret_val;
    635}
    636
    637static void e1000_get_drvinfo(struct net_device *netdev,
    638			      struct ethtool_drvinfo *drvinfo)
    639{
    640	struct e1000_adapter *adapter = netdev_priv(netdev);
    641
    642	strlcpy(drvinfo->driver, e1000e_driver_name, sizeof(drvinfo->driver));
    643
    644	/* EEPROM image version # is reported as firmware version # for
    645	 * PCI-E controllers
    646	 */
    647	snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
    648		 "%d.%d-%d",
    649		 (adapter->eeprom_vers & 0xF000) >> 12,
    650		 (adapter->eeprom_vers & 0x0FF0) >> 4,
    651		 (adapter->eeprom_vers & 0x000F));
    652
    653	strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
    654		sizeof(drvinfo->bus_info));
    655}
    656
    657static void e1000_get_ringparam(struct net_device *netdev,
    658				struct ethtool_ringparam *ring,
    659				struct kernel_ethtool_ringparam *kernel_ring,
    660				struct netlink_ext_ack *extack)
    661{
    662	struct e1000_adapter *adapter = netdev_priv(netdev);
    663
    664	ring->rx_max_pending = E1000_MAX_RXD;
    665	ring->tx_max_pending = E1000_MAX_TXD;
    666	ring->rx_pending = adapter->rx_ring_count;
    667	ring->tx_pending = adapter->tx_ring_count;
    668}
    669
    670static int e1000_set_ringparam(struct net_device *netdev,
    671			       struct ethtool_ringparam *ring,
    672			       struct kernel_ethtool_ringparam *kernel_ring,
    673			       struct netlink_ext_ack *extack)
    674{
    675	struct e1000_adapter *adapter = netdev_priv(netdev);
    676	struct e1000_ring *temp_tx = NULL, *temp_rx = NULL;
    677	int err = 0, size = sizeof(struct e1000_ring);
    678	bool set_tx = false, set_rx = false;
    679	u16 new_rx_count, new_tx_count;
    680
    681	if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
    682		return -EINVAL;
    683
    684	new_rx_count = clamp_t(u32, ring->rx_pending, E1000_MIN_RXD,
    685			       E1000_MAX_RXD);
    686	new_rx_count = ALIGN(new_rx_count, REQ_RX_DESCRIPTOR_MULTIPLE);
    687
    688	new_tx_count = clamp_t(u32, ring->tx_pending, E1000_MIN_TXD,
    689			       E1000_MAX_TXD);
    690	new_tx_count = ALIGN(new_tx_count, REQ_TX_DESCRIPTOR_MULTIPLE);
    691
    692	if ((new_tx_count == adapter->tx_ring_count) &&
    693	    (new_rx_count == adapter->rx_ring_count))
    694		/* nothing to do */
    695		return 0;
    696
    697	while (test_and_set_bit(__E1000_RESETTING, &adapter->state))
    698		usleep_range(1000, 2000);
    699
    700	if (!netif_running(adapter->netdev)) {
    701		/* Set counts now and allocate resources during open() */
    702		adapter->tx_ring->count = new_tx_count;
    703		adapter->rx_ring->count = new_rx_count;
    704		adapter->tx_ring_count = new_tx_count;
    705		adapter->rx_ring_count = new_rx_count;
    706		goto clear_reset;
    707	}
    708
    709	set_tx = (new_tx_count != adapter->tx_ring_count);
    710	set_rx = (new_rx_count != adapter->rx_ring_count);
    711
    712	/* Allocate temporary storage for ring updates */
    713	if (set_tx) {
    714		temp_tx = vmalloc(size);
    715		if (!temp_tx) {
    716			err = -ENOMEM;
    717			goto free_temp;
    718		}
    719	}
    720	if (set_rx) {
    721		temp_rx = vmalloc(size);
    722		if (!temp_rx) {
    723			err = -ENOMEM;
    724			goto free_temp;
    725		}
    726	}
    727
    728	pm_runtime_get_sync(netdev->dev.parent);
    729
    730	e1000e_down(adapter, true);
    731
    732	/* We can't just free everything and then setup again, because the
    733	 * ISRs in MSI-X mode get passed pointers to the Tx and Rx ring
    734	 * structs.  First, attempt to allocate new resources...
    735	 */
    736	if (set_tx) {
    737		memcpy(temp_tx, adapter->tx_ring, size);
    738		temp_tx->count = new_tx_count;
    739		err = e1000e_setup_tx_resources(temp_tx);
    740		if (err)
    741			goto err_setup;
    742	}
    743	if (set_rx) {
    744		memcpy(temp_rx, adapter->rx_ring, size);
    745		temp_rx->count = new_rx_count;
    746		err = e1000e_setup_rx_resources(temp_rx);
    747		if (err)
    748			goto err_setup_rx;
    749	}
    750
    751	/* ...then free the old resources and copy back any new ring data */
    752	if (set_tx) {
    753		e1000e_free_tx_resources(adapter->tx_ring);
    754		memcpy(adapter->tx_ring, temp_tx, size);
    755		adapter->tx_ring_count = new_tx_count;
    756	}
    757	if (set_rx) {
    758		e1000e_free_rx_resources(adapter->rx_ring);
    759		memcpy(adapter->rx_ring, temp_rx, size);
    760		adapter->rx_ring_count = new_rx_count;
    761	}
    762
    763err_setup_rx:
    764	if (err && set_tx)
    765		e1000e_free_tx_resources(temp_tx);
    766err_setup:
    767	e1000e_up(adapter);
    768	pm_runtime_put_sync(netdev->dev.parent);
    769free_temp:
    770	vfree(temp_tx);
    771	vfree(temp_rx);
    772clear_reset:
    773	clear_bit(__E1000_RESETTING, &adapter->state);
    774	return err;
    775}
    776
    777static bool reg_pattern_test(struct e1000_adapter *adapter, u64 *data,
    778			     int reg, int offset, u32 mask, u32 write)
    779{
    780	u32 pat, val;
    781	static const u32 test[] = {
    782		0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF
    783	};
    784	for (pat = 0; pat < ARRAY_SIZE(test); pat++) {
    785		E1000_WRITE_REG_ARRAY(&adapter->hw, reg, offset,
    786				      (test[pat] & write));
    787		val = E1000_READ_REG_ARRAY(&adapter->hw, reg, offset);
    788		if (val != (test[pat] & write & mask)) {
    789			e_err("pattern test failed (reg 0x%05X): got 0x%08X expected 0x%08X\n",
    790			      reg + (offset << 2), val,
    791			      (test[pat] & write & mask));
    792			*data = reg;
    793			return true;
    794		}
    795	}
    796	return false;
    797}
    798
    799static bool reg_set_and_check(struct e1000_adapter *adapter, u64 *data,
    800			      int reg, u32 mask, u32 write)
    801{
    802	u32 val;
    803
    804	__ew32(&adapter->hw, reg, write & mask);
    805	val = __er32(&adapter->hw, reg);
    806	if ((write & mask) != (val & mask)) {
    807		e_err("set/check test failed (reg 0x%05X): got 0x%08X expected 0x%08X\n",
    808		      reg, (val & mask), (write & mask));
    809		*data = reg;
    810		return true;
    811	}
    812	return false;
    813}
    814
    815#define REG_PATTERN_TEST_ARRAY(reg, offset, mask, write)                       \
    816	do {                                                                   \
    817		if (reg_pattern_test(adapter, data, reg, offset, mask, write)) \
    818			return 1;                                              \
    819	} while (0)
    820#define REG_PATTERN_TEST(reg, mask, write)                                     \
    821	REG_PATTERN_TEST_ARRAY(reg, 0, mask, write)
    822
    823#define REG_SET_AND_CHECK(reg, mask, write)                                    \
    824	do {                                                                   \
    825		if (reg_set_and_check(adapter, data, reg, mask, write))        \
    826			return 1;                                              \
    827	} while (0)
    828
    829static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data)
    830{
    831	struct e1000_hw *hw = &adapter->hw;
    832	struct e1000_mac_info *mac = &adapter->hw.mac;
    833	u32 value;
    834	u32 before;
    835	u32 after;
    836	u32 i;
    837	u32 toggle;
    838	u32 mask;
    839	u32 wlock_mac = 0;
    840
    841	/* The status register is Read Only, so a write should fail.
    842	 * Some bits that get toggled are ignored.  There are several bits
    843	 * on newer hardware that are r/w.
    844	 */
    845	switch (mac->type) {
    846	case e1000_82571:
    847	case e1000_82572:
    848	case e1000_80003es2lan:
    849		toggle = 0x7FFFF3FF;
    850		break;
    851	default:
    852		toggle = 0x7FFFF033;
    853		break;
    854	}
    855
    856	before = er32(STATUS);
    857	value = (er32(STATUS) & toggle);
    858	ew32(STATUS, toggle);
    859	after = er32(STATUS) & toggle;
    860	if (value != after) {
    861		e_err("failed STATUS register test got: 0x%08X expected: 0x%08X\n",
    862		      after, value);
    863		*data = 1;
    864		return 1;
    865	}
    866	/* restore previous status */
    867	ew32(STATUS, before);
    868
    869	if (!(adapter->flags & FLAG_IS_ICH)) {
    870		REG_PATTERN_TEST(E1000_FCAL, 0xFFFFFFFF, 0xFFFFFFFF);
    871		REG_PATTERN_TEST(E1000_FCAH, 0x0000FFFF, 0xFFFFFFFF);
    872		REG_PATTERN_TEST(E1000_FCT, 0x0000FFFF, 0xFFFFFFFF);
    873		REG_PATTERN_TEST(E1000_VET, 0x0000FFFF, 0xFFFFFFFF);
    874	}
    875
    876	REG_PATTERN_TEST(E1000_RDTR, 0x0000FFFF, 0xFFFFFFFF);
    877	REG_PATTERN_TEST(E1000_RDBAH(0), 0xFFFFFFFF, 0xFFFFFFFF);
    878	REG_PATTERN_TEST(E1000_RDLEN(0), 0x000FFF80, 0x000FFFFF);
    879	REG_PATTERN_TEST(E1000_RDH(0), 0x0000FFFF, 0x0000FFFF);
    880	REG_PATTERN_TEST(E1000_RDT(0), 0x0000FFFF, 0x0000FFFF);
    881	REG_PATTERN_TEST(E1000_FCRTH, 0x0000FFF8, 0x0000FFF8);
    882	REG_PATTERN_TEST(E1000_FCTTV, 0x0000FFFF, 0x0000FFFF);
    883	REG_PATTERN_TEST(E1000_TIPG, 0x3FFFFFFF, 0x3FFFFFFF);
    884	REG_PATTERN_TEST(E1000_TDBAH(0), 0xFFFFFFFF, 0xFFFFFFFF);
    885	REG_PATTERN_TEST(E1000_TDLEN(0), 0x000FFF80, 0x000FFFFF);
    886
    887	REG_SET_AND_CHECK(E1000_RCTL, 0xFFFFFFFF, 0x00000000);
    888
    889	before = ((adapter->flags & FLAG_IS_ICH) ? 0x06C3B33E : 0x06DFB3FE);
    890	REG_SET_AND_CHECK(E1000_RCTL, before, 0x003FFFFB);
    891	REG_SET_AND_CHECK(E1000_TCTL, 0xFFFFFFFF, 0x00000000);
    892
    893	REG_SET_AND_CHECK(E1000_RCTL, before, 0xFFFFFFFF);
    894	REG_PATTERN_TEST(E1000_RDBAL(0), 0xFFFFFFF0, 0xFFFFFFFF);
    895	if (!(adapter->flags & FLAG_IS_ICH))
    896		REG_PATTERN_TEST(E1000_TXCW, 0xC000FFFF, 0x0000FFFF);
    897	REG_PATTERN_TEST(E1000_TDBAL(0), 0xFFFFFFF0, 0xFFFFFFFF);
    898	REG_PATTERN_TEST(E1000_TIDV, 0x0000FFFF, 0x0000FFFF);
    899	mask = 0x8003FFFF;
    900	switch (mac->type) {
    901	case e1000_ich10lan:
    902	case e1000_pchlan:
    903	case e1000_pch2lan:
    904	case e1000_pch_lpt:
    905	case e1000_pch_spt:
    906	case e1000_pch_cnp:
    907	case e1000_pch_tgp:
    908	case e1000_pch_adp:
    909	case e1000_pch_mtp:
    910	case e1000_pch_lnp:
    911		mask |= BIT(18);
    912		break;
    913	default:
    914		break;
    915	}
    916
    917	if (mac->type >= e1000_pch_lpt)
    918		wlock_mac = (er32(FWSM) & E1000_FWSM_WLOCK_MAC_MASK) >>
    919		    E1000_FWSM_WLOCK_MAC_SHIFT;
    920
    921	for (i = 0; i < mac->rar_entry_count; i++) {
    922		if (mac->type >= e1000_pch_lpt) {
    923			/* Cannot test write-protected SHRAL[n] registers */
    924			if ((wlock_mac == 1) || (wlock_mac && (i > wlock_mac)))
    925				continue;
    926
    927			/* SHRAH[9] different than the others */
    928			if (i == 10)
    929				mask |= BIT(30);
    930			else
    931				mask &= ~BIT(30);
    932		}
    933		if (mac->type == e1000_pch2lan) {
    934			/* SHRAH[0,1,2] different than previous */
    935			if (i == 1)
    936				mask &= 0xFFF4FFFF;
    937			/* SHRAH[3] different than SHRAH[0,1,2] */
    938			if (i == 4)
    939				mask |= BIT(30);
    940			/* RAR[1-6] owned by management engine - skipping */
    941			if (i > 0)
    942				i += 6;
    943		}
    944
    945		REG_PATTERN_TEST_ARRAY(E1000_RA, ((i << 1) + 1), mask,
    946				       0xFFFFFFFF);
    947		/* reset index to actual value */
    948		if ((mac->type == e1000_pch2lan) && (i > 6))
    949			i -= 6;
    950	}
    951
    952	for (i = 0; i < mac->mta_reg_count; i++)
    953		REG_PATTERN_TEST_ARRAY(E1000_MTA, i, 0xFFFFFFFF, 0xFFFFFFFF);
    954
    955	*data = 0;
    956
    957	return 0;
    958}
    959
    960static int e1000_eeprom_test(struct e1000_adapter *adapter, u64 *data)
    961{
    962	u16 temp;
    963	u16 checksum = 0;
    964	u16 i;
    965
    966	*data = 0;
    967	/* Read and add up the contents of the EEPROM */
    968	for (i = 0; i < (NVM_CHECKSUM_REG + 1); i++) {
    969		if ((e1000_read_nvm(&adapter->hw, i, 1, &temp)) < 0) {
    970			*data = 1;
    971			return *data;
    972		}
    973		checksum += temp;
    974	}
    975
    976	/* If Checksum is not Correct return error else test passed */
    977	if ((checksum != (u16)NVM_SUM) && !(*data))
    978		*data = 2;
    979
    980	return *data;
    981}
    982
    983static irqreturn_t e1000_test_intr(int __always_unused irq, void *data)
    984{
    985	struct net_device *netdev = (struct net_device *)data;
    986	struct e1000_adapter *adapter = netdev_priv(netdev);
    987	struct e1000_hw *hw = &adapter->hw;
    988
    989	adapter->test_icr |= er32(ICR);
    990
    991	return IRQ_HANDLED;
    992}
    993
    994static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data)
    995{
    996	struct net_device *netdev = adapter->netdev;
    997	struct e1000_hw *hw = &adapter->hw;
    998	u32 mask;
    999	u32 shared_int = 1;
   1000	u32 irq = adapter->pdev->irq;
   1001	int i;
   1002	int ret_val = 0;
   1003	int int_mode = E1000E_INT_MODE_LEGACY;
   1004
   1005	*data = 0;
   1006
   1007	/* NOTE: we don't test MSI/MSI-X interrupts here, yet */
   1008	if (adapter->int_mode == E1000E_INT_MODE_MSIX) {
   1009		int_mode = adapter->int_mode;
   1010		e1000e_reset_interrupt_capability(adapter);
   1011		adapter->int_mode = E1000E_INT_MODE_LEGACY;
   1012		e1000e_set_interrupt_capability(adapter);
   1013	}
   1014	/* Hook up test interrupt handler just for this test */
   1015	if (!request_irq(irq, e1000_test_intr, IRQF_PROBE_SHARED, netdev->name,
   1016			 netdev)) {
   1017		shared_int = 0;
   1018	} else if (request_irq(irq, e1000_test_intr, IRQF_SHARED, netdev->name,
   1019			       netdev)) {
   1020		*data = 1;
   1021		ret_val = -1;
   1022		goto out;
   1023	}
   1024	e_info("testing %s interrupt\n", (shared_int ? "shared" : "unshared"));
   1025
   1026	/* Disable all the interrupts */
   1027	ew32(IMC, 0xFFFFFFFF);
   1028	e1e_flush();
   1029	usleep_range(10000, 11000);
   1030
   1031	/* Test each interrupt */
   1032	for (i = 0; i < 10; i++) {
   1033		/* Interrupt to test */
   1034		mask = BIT(i);
   1035
   1036		if (adapter->flags & FLAG_IS_ICH) {
   1037			switch (mask) {
   1038			case E1000_ICR_RXSEQ:
   1039				continue;
   1040			case 0x00000100:
   1041				if (adapter->hw.mac.type == e1000_ich8lan ||
   1042				    adapter->hw.mac.type == e1000_ich9lan)
   1043					continue;
   1044				break;
   1045			default:
   1046				break;
   1047			}
   1048		}
   1049
   1050		if (!shared_int) {
   1051			/* Disable the interrupt to be reported in
   1052			 * the cause register and then force the same
   1053			 * interrupt and see if one gets posted.  If
   1054			 * an interrupt was posted to the bus, the
   1055			 * test failed.
   1056			 */
   1057			adapter->test_icr = 0;
   1058			ew32(IMC, mask);
   1059			ew32(ICS, mask);
   1060			e1e_flush();
   1061			usleep_range(10000, 11000);
   1062
   1063			if (adapter->test_icr & mask) {
   1064				*data = 3;
   1065				break;
   1066			}
   1067		}
   1068
   1069		/* Enable the interrupt to be reported in
   1070		 * the cause register and then force the same
   1071		 * interrupt and see if one gets posted.  If
   1072		 * an interrupt was not posted to the bus, the
   1073		 * test failed.
   1074		 */
   1075		adapter->test_icr = 0;
   1076		ew32(IMS, mask);
   1077		ew32(ICS, mask);
   1078		e1e_flush();
   1079		usleep_range(10000, 11000);
   1080
   1081		if (!(adapter->test_icr & mask)) {
   1082			*data = 4;
   1083			break;
   1084		}
   1085
   1086		if (!shared_int) {
   1087			/* Disable the other interrupts to be reported in
   1088			 * the cause register and then force the other
   1089			 * interrupts and see if any get posted.  If
   1090			 * an interrupt was posted to the bus, the
   1091			 * test failed.
   1092			 */
   1093			adapter->test_icr = 0;
   1094			ew32(IMC, ~mask & 0x00007FFF);
   1095			ew32(ICS, ~mask & 0x00007FFF);
   1096			e1e_flush();
   1097			usleep_range(10000, 11000);
   1098
   1099			if (adapter->test_icr) {
   1100				*data = 5;
   1101				break;
   1102			}
   1103		}
   1104	}
   1105
   1106	/* Disable all the interrupts */
   1107	ew32(IMC, 0xFFFFFFFF);
   1108	e1e_flush();
   1109	usleep_range(10000, 11000);
   1110
   1111	/* Unhook test interrupt handler */
   1112	free_irq(irq, netdev);
   1113
   1114out:
   1115	if (int_mode == E1000E_INT_MODE_MSIX) {
   1116		e1000e_reset_interrupt_capability(adapter);
   1117		adapter->int_mode = int_mode;
   1118		e1000e_set_interrupt_capability(adapter);
   1119	}
   1120
   1121	return ret_val;
   1122}
   1123
   1124static void e1000_free_desc_rings(struct e1000_adapter *adapter)
   1125{
   1126	struct e1000_ring *tx_ring = &adapter->test_tx_ring;
   1127	struct e1000_ring *rx_ring = &adapter->test_rx_ring;
   1128	struct pci_dev *pdev = adapter->pdev;
   1129	struct e1000_buffer *buffer_info;
   1130	int i;
   1131
   1132	if (tx_ring->desc && tx_ring->buffer_info) {
   1133		for (i = 0; i < tx_ring->count; i++) {
   1134			buffer_info = &tx_ring->buffer_info[i];
   1135
   1136			if (buffer_info->dma)
   1137				dma_unmap_single(&pdev->dev,
   1138						 buffer_info->dma,
   1139						 buffer_info->length,
   1140						 DMA_TO_DEVICE);
   1141			dev_kfree_skb(buffer_info->skb);
   1142		}
   1143	}
   1144
   1145	if (rx_ring->desc && rx_ring->buffer_info) {
   1146		for (i = 0; i < rx_ring->count; i++) {
   1147			buffer_info = &rx_ring->buffer_info[i];
   1148
   1149			if (buffer_info->dma)
   1150				dma_unmap_single(&pdev->dev,
   1151						 buffer_info->dma,
   1152						 2048, DMA_FROM_DEVICE);
   1153			dev_kfree_skb(buffer_info->skb);
   1154		}
   1155	}
   1156
   1157	if (tx_ring->desc) {
   1158		dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
   1159				  tx_ring->dma);
   1160		tx_ring->desc = NULL;
   1161	}
   1162	if (rx_ring->desc) {
   1163		dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
   1164				  rx_ring->dma);
   1165		rx_ring->desc = NULL;
   1166	}
   1167
   1168	kfree(tx_ring->buffer_info);
   1169	tx_ring->buffer_info = NULL;
   1170	kfree(rx_ring->buffer_info);
   1171	rx_ring->buffer_info = NULL;
   1172}
   1173
   1174static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
   1175{
   1176	struct e1000_ring *tx_ring = &adapter->test_tx_ring;
   1177	struct e1000_ring *rx_ring = &adapter->test_rx_ring;
   1178	struct pci_dev *pdev = adapter->pdev;
   1179	struct e1000_hw *hw = &adapter->hw;
   1180	u32 rctl;
   1181	int i;
   1182	int ret_val;
   1183
   1184	/* Setup Tx descriptor ring and Tx buffers */
   1185
   1186	if (!tx_ring->count)
   1187		tx_ring->count = E1000_DEFAULT_TXD;
   1188
   1189	tx_ring->buffer_info = kcalloc(tx_ring->count,
   1190				       sizeof(struct e1000_buffer), GFP_KERNEL);
   1191	if (!tx_ring->buffer_info) {
   1192		ret_val = 1;
   1193		goto err_nomem;
   1194	}
   1195
   1196	tx_ring->size = tx_ring->count * sizeof(struct e1000_tx_desc);
   1197	tx_ring->size = ALIGN(tx_ring->size, 4096);
   1198	tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size,
   1199					   &tx_ring->dma, GFP_KERNEL);
   1200	if (!tx_ring->desc) {
   1201		ret_val = 2;
   1202		goto err_nomem;
   1203	}
   1204	tx_ring->next_to_use = 0;
   1205	tx_ring->next_to_clean = 0;
   1206
   1207	ew32(TDBAL(0), ((u64)tx_ring->dma & 0x00000000FFFFFFFF));
   1208	ew32(TDBAH(0), ((u64)tx_ring->dma >> 32));
   1209	ew32(TDLEN(0), tx_ring->count * sizeof(struct e1000_tx_desc));
   1210	ew32(TDH(0), 0);
   1211	ew32(TDT(0), 0);
   1212	ew32(TCTL, E1000_TCTL_PSP | E1000_TCTL_EN | E1000_TCTL_MULR |
   1213	     E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT |
   1214	     E1000_COLLISION_DISTANCE << E1000_COLD_SHIFT);
   1215
   1216	for (i = 0; i < tx_ring->count; i++) {
   1217		struct e1000_tx_desc *tx_desc = E1000_TX_DESC(*tx_ring, i);
   1218		struct sk_buff *skb;
   1219		unsigned int skb_size = 1024;
   1220
   1221		skb = alloc_skb(skb_size, GFP_KERNEL);
   1222		if (!skb) {
   1223			ret_val = 3;
   1224			goto err_nomem;
   1225		}
   1226		skb_put(skb, skb_size);
   1227		tx_ring->buffer_info[i].skb = skb;
   1228		tx_ring->buffer_info[i].length = skb->len;
   1229		tx_ring->buffer_info[i].dma =
   1230		    dma_map_single(&pdev->dev, skb->data, skb->len,
   1231				   DMA_TO_DEVICE);
   1232		if (dma_mapping_error(&pdev->dev,
   1233				      tx_ring->buffer_info[i].dma)) {
   1234			ret_val = 4;
   1235			goto err_nomem;
   1236		}
   1237		tx_desc->buffer_addr = cpu_to_le64(tx_ring->buffer_info[i].dma);
   1238		tx_desc->lower.data = cpu_to_le32(skb->len);
   1239		tx_desc->lower.data |= cpu_to_le32(E1000_TXD_CMD_EOP |
   1240						   E1000_TXD_CMD_IFCS |
   1241						   E1000_TXD_CMD_RS);
   1242		tx_desc->upper.data = 0;
   1243	}
   1244
   1245	/* Setup Rx descriptor ring and Rx buffers */
   1246
   1247	if (!rx_ring->count)
   1248		rx_ring->count = E1000_DEFAULT_RXD;
   1249
   1250	rx_ring->buffer_info = kcalloc(rx_ring->count,
   1251				       sizeof(struct e1000_buffer), GFP_KERNEL);
   1252	if (!rx_ring->buffer_info) {
   1253		ret_val = 5;
   1254		goto err_nomem;
   1255	}
   1256
   1257	rx_ring->size = rx_ring->count * sizeof(union e1000_rx_desc_extended);
   1258	rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size,
   1259					   &rx_ring->dma, GFP_KERNEL);
   1260	if (!rx_ring->desc) {
   1261		ret_val = 6;
   1262		goto err_nomem;
   1263	}
   1264	rx_ring->next_to_use = 0;
   1265	rx_ring->next_to_clean = 0;
   1266
   1267	rctl = er32(RCTL);
   1268	if (!(adapter->flags2 & FLAG2_NO_DISABLE_RX))
   1269		ew32(RCTL, rctl & ~E1000_RCTL_EN);
   1270	ew32(RDBAL(0), ((u64)rx_ring->dma & 0xFFFFFFFF));
   1271	ew32(RDBAH(0), ((u64)rx_ring->dma >> 32));
   1272	ew32(RDLEN(0), rx_ring->size);
   1273	ew32(RDH(0), 0);
   1274	ew32(RDT(0), 0);
   1275	rctl = E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_SZ_2048 |
   1276	    E1000_RCTL_UPE | E1000_RCTL_MPE | E1000_RCTL_LPE |
   1277	    E1000_RCTL_SBP | E1000_RCTL_SECRC |
   1278	    E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF |
   1279	    (adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
   1280	ew32(RCTL, rctl);
   1281
   1282	for (i = 0; i < rx_ring->count; i++) {
   1283		union e1000_rx_desc_extended *rx_desc;
   1284		struct sk_buff *skb;
   1285
   1286		skb = alloc_skb(2048 + NET_IP_ALIGN, GFP_KERNEL);
   1287		if (!skb) {
   1288			ret_val = 7;
   1289			goto err_nomem;
   1290		}
   1291		skb_reserve(skb, NET_IP_ALIGN);
   1292		rx_ring->buffer_info[i].skb = skb;
   1293		rx_ring->buffer_info[i].dma =
   1294		    dma_map_single(&pdev->dev, skb->data, 2048,
   1295				   DMA_FROM_DEVICE);
   1296		if (dma_mapping_error(&pdev->dev,
   1297				      rx_ring->buffer_info[i].dma)) {
   1298			ret_val = 8;
   1299			goto err_nomem;
   1300		}
   1301		rx_desc = E1000_RX_DESC_EXT(*rx_ring, i);
   1302		rx_desc->read.buffer_addr =
   1303		    cpu_to_le64(rx_ring->buffer_info[i].dma);
   1304		memset(skb->data, 0x00, skb->len);
   1305	}
   1306
   1307	return 0;
   1308
   1309err_nomem:
   1310	e1000_free_desc_rings(adapter);
   1311	return ret_val;
   1312}
   1313
   1314static void e1000_phy_disable_receiver(struct e1000_adapter *adapter)
   1315{
   1316	/* Write out to PHY registers 29 and 30 to disable the Receiver. */
   1317	e1e_wphy(&adapter->hw, 29, 0x001F);
   1318	e1e_wphy(&adapter->hw, 30, 0x8FFC);
   1319	e1e_wphy(&adapter->hw, 29, 0x001A);
   1320	e1e_wphy(&adapter->hw, 30, 0x8FF0);
   1321}
   1322
   1323static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter)
   1324{
   1325	struct e1000_hw *hw = &adapter->hw;
   1326	u32 ctrl_reg = 0;
   1327	u16 phy_reg = 0;
   1328	s32 ret_val = 0;
   1329
   1330	hw->mac.autoneg = 0;
   1331
   1332	if (hw->phy.type == e1000_phy_ife) {
   1333		/* force 100, set loopback */
   1334		e1e_wphy(hw, MII_BMCR, 0x6100);
   1335
   1336		/* Now set up the MAC to the same speed/duplex as the PHY. */
   1337		ctrl_reg = er32(CTRL);
   1338		ctrl_reg &= ~E1000_CTRL_SPD_SEL; /* Clear the speed sel bits */
   1339		ctrl_reg |= (E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */
   1340			     E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */
   1341			     E1000_CTRL_SPD_100 |/* Force Speed to 100 */
   1342			     E1000_CTRL_FD);	 /* Force Duplex to FULL */
   1343
   1344		ew32(CTRL, ctrl_reg);
   1345		e1e_flush();
   1346		usleep_range(500, 1000);
   1347
   1348		return 0;
   1349	}
   1350
   1351	/* Specific PHY configuration for loopback */
   1352	switch (hw->phy.type) {
   1353	case e1000_phy_m88:
   1354		/* Auto-MDI/MDIX Off */
   1355		e1e_wphy(hw, M88E1000_PHY_SPEC_CTRL, 0x0808);
   1356		/* reset to update Auto-MDI/MDIX */
   1357		e1e_wphy(hw, MII_BMCR, 0x9140);
   1358		/* autoneg off */
   1359		e1e_wphy(hw, MII_BMCR, 0x8140);
   1360		break;
   1361	case e1000_phy_gg82563:
   1362		e1e_wphy(hw, GG82563_PHY_KMRN_MODE_CTRL, 0x1CC);
   1363		break;
   1364	case e1000_phy_bm:
   1365		/* Set Default MAC Interface speed to 1GB */
   1366		e1e_rphy(hw, PHY_REG(2, 21), &phy_reg);
   1367		phy_reg &= ~0x0007;
   1368		phy_reg |= 0x006;
   1369		e1e_wphy(hw, PHY_REG(2, 21), phy_reg);
   1370		/* Assert SW reset for above settings to take effect */
   1371		hw->phy.ops.commit(hw);
   1372		usleep_range(1000, 2000);
   1373		/* Force Full Duplex */
   1374		e1e_rphy(hw, PHY_REG(769, 16), &phy_reg);
   1375		e1e_wphy(hw, PHY_REG(769, 16), phy_reg | 0x000C);
   1376		/* Set Link Up (in force link) */
   1377		e1e_rphy(hw, PHY_REG(776, 16), &phy_reg);
   1378		e1e_wphy(hw, PHY_REG(776, 16), phy_reg | 0x0040);
   1379		/* Force Link */
   1380		e1e_rphy(hw, PHY_REG(769, 16), &phy_reg);
   1381		e1e_wphy(hw, PHY_REG(769, 16), phy_reg | 0x0040);
   1382		/* Set Early Link Enable */
   1383		e1e_rphy(hw, PHY_REG(769, 20), &phy_reg);
   1384		e1e_wphy(hw, PHY_REG(769, 20), phy_reg | 0x0400);
   1385		break;
   1386	case e1000_phy_82577:
   1387	case e1000_phy_82578:
   1388		/* Workaround: K1 must be disabled for stable 1Gbps operation */
   1389		ret_val = hw->phy.ops.acquire(hw);
   1390		if (ret_val) {
   1391			e_err("Cannot setup 1Gbps loopback.\n");
   1392			return ret_val;
   1393		}
   1394		e1000_configure_k1_ich8lan(hw, false);
   1395		hw->phy.ops.release(hw);
   1396		break;
   1397	case e1000_phy_82579:
   1398		/* Disable PHY energy detect power down */
   1399		e1e_rphy(hw, PHY_REG(0, 21), &phy_reg);
   1400		e1e_wphy(hw, PHY_REG(0, 21), phy_reg & ~BIT(3));
   1401		/* Disable full chip energy detect */
   1402		e1e_rphy(hw, PHY_REG(776, 18), &phy_reg);
   1403		e1e_wphy(hw, PHY_REG(776, 18), phy_reg | 1);
   1404		/* Enable loopback on the PHY */
   1405		e1e_wphy(hw, I82577_PHY_LBK_CTRL, 0x8001);
   1406		break;
   1407	default:
   1408		break;
   1409	}
   1410
   1411	/* force 1000, set loopback */
   1412	e1e_wphy(hw, MII_BMCR, 0x4140);
   1413	msleep(250);
   1414
   1415	/* Now set up the MAC to the same speed/duplex as the PHY. */
   1416	ctrl_reg = er32(CTRL);
   1417	ctrl_reg &= ~E1000_CTRL_SPD_SEL; /* Clear the speed sel bits */
   1418	ctrl_reg |= (E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */
   1419		     E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */
   1420		     E1000_CTRL_SPD_1000 |/* Force Speed to 1000 */
   1421		     E1000_CTRL_FD);	 /* Force Duplex to FULL */
   1422
   1423	if (adapter->flags & FLAG_IS_ICH)
   1424		ctrl_reg |= E1000_CTRL_SLU;	/* Set Link Up */
   1425
   1426	if (hw->phy.media_type == e1000_media_type_copper &&
   1427	    hw->phy.type == e1000_phy_m88) {
   1428		ctrl_reg |= E1000_CTRL_ILOS;	/* Invert Loss of Signal */
   1429	} else {
   1430		/* Set the ILOS bit on the fiber Nic if half duplex link is
   1431		 * detected.
   1432		 */
   1433		if ((er32(STATUS) & E1000_STATUS_FD) == 0)
   1434			ctrl_reg |= (E1000_CTRL_ILOS | E1000_CTRL_SLU);
   1435	}
   1436
   1437	ew32(CTRL, ctrl_reg);
   1438
   1439	/* Disable the receiver on the PHY so when a cable is plugged in, the
   1440	 * PHY does not begin to autoneg when a cable is reconnected to the NIC.
   1441	 */
   1442	if (hw->phy.type == e1000_phy_m88)
   1443		e1000_phy_disable_receiver(adapter);
   1444
   1445	usleep_range(500, 1000);
   1446
   1447	return 0;
   1448}
   1449
   1450static int e1000_set_82571_fiber_loopback(struct e1000_adapter *adapter)
   1451{
   1452	struct e1000_hw *hw = &adapter->hw;
   1453	u32 ctrl = er32(CTRL);
   1454	int link;
   1455
   1456	/* special requirements for 82571/82572 fiber adapters */
   1457
   1458	/* jump through hoops to make sure link is up because serdes
   1459	 * link is hardwired up
   1460	 */
   1461	ctrl |= E1000_CTRL_SLU;
   1462	ew32(CTRL, ctrl);
   1463
   1464	/* disable autoneg */
   1465	ctrl = er32(TXCW);
   1466	ctrl &= ~BIT(31);
   1467	ew32(TXCW, ctrl);
   1468
   1469	link = (er32(STATUS) & E1000_STATUS_LU);
   1470
   1471	if (!link) {
   1472		/* set invert loss of signal */
   1473		ctrl = er32(CTRL);
   1474		ctrl |= E1000_CTRL_ILOS;
   1475		ew32(CTRL, ctrl);
   1476	}
   1477
   1478	/* special write to serdes control register to enable SerDes analog
   1479	 * loopback
   1480	 */
   1481	ew32(SCTL, E1000_SCTL_ENABLE_SERDES_LOOPBACK);
   1482	e1e_flush();
   1483	usleep_range(10000, 11000);
   1484
   1485	return 0;
   1486}
   1487
   1488/* only call this for fiber/serdes connections to es2lan */
   1489static int e1000_set_es2lan_mac_loopback(struct e1000_adapter *adapter)
   1490{
   1491	struct e1000_hw *hw = &adapter->hw;
   1492	u32 ctrlext = er32(CTRL_EXT);
   1493	u32 ctrl = er32(CTRL);
   1494
   1495	/* save CTRL_EXT to restore later, reuse an empty variable (unused
   1496	 * on mac_type 80003es2lan)
   1497	 */
   1498	adapter->tx_fifo_head = ctrlext;
   1499
   1500	/* clear the serdes mode bits, putting the device into mac loopback */
   1501	ctrlext &= ~E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES;
   1502	ew32(CTRL_EXT, ctrlext);
   1503
   1504	/* force speed to 1000/FD, link up */
   1505	ctrl &= ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100);
   1506	ctrl |= (E1000_CTRL_SLU | E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX |
   1507		 E1000_CTRL_SPD_1000 | E1000_CTRL_FD);
   1508	ew32(CTRL, ctrl);
   1509
   1510	/* set mac loopback */
   1511	ctrl = er32(RCTL);
   1512	ctrl |= E1000_RCTL_LBM_MAC;
   1513	ew32(RCTL, ctrl);
   1514
   1515	/* set testing mode parameters (no need to reset later) */
   1516#define KMRNCTRLSTA_OPMODE (0x1F << 16)
   1517#define KMRNCTRLSTA_OPMODE_1GB_FD_GMII 0x0582
   1518	ew32(KMRNCTRLSTA,
   1519	     (KMRNCTRLSTA_OPMODE | KMRNCTRLSTA_OPMODE_1GB_FD_GMII));
   1520
   1521	return 0;
   1522}
   1523
   1524static int e1000_setup_loopback_test(struct e1000_adapter *adapter)
   1525{
   1526	struct e1000_hw *hw = &adapter->hw;
   1527	u32 rctl, fext_nvm11, tarc0;
   1528
   1529	if (hw->mac.type >= e1000_pch_spt) {
   1530		fext_nvm11 = er32(FEXTNVM11);
   1531		fext_nvm11 |= E1000_FEXTNVM11_DISABLE_MULR_FIX;
   1532		ew32(FEXTNVM11, fext_nvm11);
   1533		tarc0 = er32(TARC(0));
   1534		/* clear bits 28 & 29 (control of MULR concurrent requests) */
   1535		tarc0 &= 0xcfffffff;
   1536		/* set bit 29 (value of MULR requests is now 2) */
   1537		tarc0 |= 0x20000000;
   1538		ew32(TARC(0), tarc0);
   1539	}
   1540	if (hw->phy.media_type == e1000_media_type_fiber ||
   1541	    hw->phy.media_type == e1000_media_type_internal_serdes) {
   1542		switch (hw->mac.type) {
   1543		case e1000_80003es2lan:
   1544			return e1000_set_es2lan_mac_loopback(adapter);
   1545		case e1000_82571:
   1546		case e1000_82572:
   1547			return e1000_set_82571_fiber_loopback(adapter);
   1548		default:
   1549			rctl = er32(RCTL);
   1550			rctl |= E1000_RCTL_LBM_TCVR;
   1551			ew32(RCTL, rctl);
   1552			return 0;
   1553		}
   1554	} else if (hw->phy.media_type == e1000_media_type_copper) {
   1555		return e1000_integrated_phy_loopback(adapter);
   1556	}
   1557
   1558	return 7;
   1559}
   1560
   1561static void e1000_loopback_cleanup(struct e1000_adapter *adapter)
   1562{
   1563	struct e1000_hw *hw = &adapter->hw;
   1564	u32 rctl, fext_nvm11, tarc0;
   1565	u16 phy_reg;
   1566
   1567	rctl = er32(RCTL);
   1568	rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC);
   1569	ew32(RCTL, rctl);
   1570
   1571	switch (hw->mac.type) {
   1572	case e1000_pch_spt:
   1573	case e1000_pch_cnp:
   1574	case e1000_pch_tgp:
   1575	case e1000_pch_adp:
   1576	case e1000_pch_mtp:
   1577	case e1000_pch_lnp:
   1578		fext_nvm11 = er32(FEXTNVM11);
   1579		fext_nvm11 &= ~E1000_FEXTNVM11_DISABLE_MULR_FIX;
   1580		ew32(FEXTNVM11, fext_nvm11);
   1581		tarc0 = er32(TARC(0));
   1582		/* clear bits 28 & 29 (control of MULR concurrent requests) */
   1583		/* set bit 29 (value of MULR requests is now 0) */
   1584		tarc0 &= 0xcfffffff;
   1585		ew32(TARC(0), tarc0);
   1586		fallthrough;
   1587	case e1000_80003es2lan:
   1588		if (hw->phy.media_type == e1000_media_type_fiber ||
   1589		    hw->phy.media_type == e1000_media_type_internal_serdes) {
   1590			/* restore CTRL_EXT, stealing space from tx_fifo_head */
   1591			ew32(CTRL_EXT, adapter->tx_fifo_head);
   1592			adapter->tx_fifo_head = 0;
   1593		}
   1594		fallthrough;
   1595	case e1000_82571:
   1596	case e1000_82572:
   1597		if (hw->phy.media_type == e1000_media_type_fiber ||
   1598		    hw->phy.media_type == e1000_media_type_internal_serdes) {
   1599			ew32(SCTL, E1000_SCTL_DISABLE_SERDES_LOOPBACK);
   1600			e1e_flush();
   1601			usleep_range(10000, 11000);
   1602			break;
   1603		}
   1604		fallthrough;
   1605	default:
   1606		hw->mac.autoneg = 1;
   1607		if (hw->phy.type == e1000_phy_gg82563)
   1608			e1e_wphy(hw, GG82563_PHY_KMRN_MODE_CTRL, 0x180);
   1609		e1e_rphy(hw, MII_BMCR, &phy_reg);
   1610		if (phy_reg & BMCR_LOOPBACK) {
   1611			phy_reg &= ~BMCR_LOOPBACK;
   1612			e1e_wphy(hw, MII_BMCR, phy_reg);
   1613			if (hw->phy.ops.commit)
   1614				hw->phy.ops.commit(hw);
   1615		}
   1616		break;
   1617	}
   1618}
   1619
   1620static void e1000_create_lbtest_frame(struct sk_buff *skb,
   1621				      unsigned int frame_size)
   1622{
   1623	memset(skb->data, 0xFF, frame_size);
   1624	frame_size &= ~1;
   1625	memset(&skb->data[frame_size / 2], 0xAA, frame_size / 2 - 1);
   1626	skb->data[frame_size / 2 + 10] = 0xBE;
   1627	skb->data[frame_size / 2 + 12] = 0xAF;
   1628}
   1629
   1630static int e1000_check_lbtest_frame(struct sk_buff *skb,
   1631				    unsigned int frame_size)
   1632{
   1633	frame_size &= ~1;
   1634	if (*(skb->data + 3) == 0xFF)
   1635		if ((*(skb->data + frame_size / 2 + 10) == 0xBE) &&
   1636		    (*(skb->data + frame_size / 2 + 12) == 0xAF))
   1637			return 0;
   1638	return 13;
   1639}
   1640
   1641static int e1000_run_loopback_test(struct e1000_adapter *adapter)
   1642{
   1643	struct e1000_ring *tx_ring = &adapter->test_tx_ring;
   1644	struct e1000_ring *rx_ring = &adapter->test_rx_ring;
   1645	struct pci_dev *pdev = adapter->pdev;
   1646	struct e1000_hw *hw = &adapter->hw;
   1647	struct e1000_buffer *buffer_info;
   1648	int i, j, k, l;
   1649	int lc;
   1650	int good_cnt;
   1651	int ret_val = 0;
   1652	unsigned long time;
   1653
   1654	ew32(RDT(0), rx_ring->count - 1);
   1655
   1656	/* Calculate the loop count based on the largest descriptor ring
   1657	 * The idea is to wrap the largest ring a number of times using 64
   1658	 * send/receive pairs during each loop
   1659	 */
   1660
   1661	if (rx_ring->count <= tx_ring->count)
   1662		lc = ((tx_ring->count / 64) * 2) + 1;
   1663	else
   1664		lc = ((rx_ring->count / 64) * 2) + 1;
   1665
   1666	k = 0;
   1667	l = 0;
   1668	/* loop count loop */
   1669	for (j = 0; j <= lc; j++) {
   1670		/* send the packets */
   1671		for (i = 0; i < 64; i++) {
   1672			buffer_info = &tx_ring->buffer_info[k];
   1673
   1674			e1000_create_lbtest_frame(buffer_info->skb, 1024);
   1675			dma_sync_single_for_device(&pdev->dev,
   1676						   buffer_info->dma,
   1677						   buffer_info->length,
   1678						   DMA_TO_DEVICE);
   1679			k++;
   1680			if (k == tx_ring->count)
   1681				k = 0;
   1682		}
   1683		ew32(TDT(0), k);
   1684		e1e_flush();
   1685		msleep(200);
   1686		time = jiffies;	/* set the start time for the receive */
   1687		good_cnt = 0;
   1688		/* receive the sent packets */
   1689		do {
   1690			buffer_info = &rx_ring->buffer_info[l];
   1691
   1692			dma_sync_single_for_cpu(&pdev->dev,
   1693						buffer_info->dma, 2048,
   1694						DMA_FROM_DEVICE);
   1695
   1696			ret_val = e1000_check_lbtest_frame(buffer_info->skb,
   1697							   1024);
   1698			if (!ret_val)
   1699				good_cnt++;
   1700			l++;
   1701			if (l == rx_ring->count)
   1702				l = 0;
   1703			/* time + 20 msecs (200 msecs on 2.4) is more than
   1704			 * enough time to complete the receives, if it's
   1705			 * exceeded, break and error off
   1706			 */
   1707		} while ((good_cnt < 64) && !time_after(jiffies, time + 20));
   1708		if (good_cnt != 64) {
   1709			ret_val = 13;	/* ret_val is the same as mis-compare */
   1710			break;
   1711		}
   1712		if (time_after(jiffies, time + 20)) {
   1713			ret_val = 14;	/* error code for time out error */
   1714			break;
   1715		}
   1716	}
   1717	return ret_val;
   1718}
   1719
   1720static int e1000_loopback_test(struct e1000_adapter *adapter, u64 *data)
   1721{
   1722	struct e1000_hw *hw = &adapter->hw;
   1723
   1724	/* PHY loopback cannot be performed if SoL/IDER sessions are active */
   1725	if (hw->phy.ops.check_reset_block &&
   1726	    hw->phy.ops.check_reset_block(hw)) {
   1727		e_err("Cannot do PHY loopback test when SoL/IDER is active.\n");
   1728		*data = 0;
   1729		goto out;
   1730	}
   1731
   1732	*data = e1000_setup_desc_rings(adapter);
   1733	if (*data)
   1734		goto out;
   1735
   1736	*data = e1000_setup_loopback_test(adapter);
   1737	if (*data)
   1738		goto err_loopback;
   1739
   1740	*data = e1000_run_loopback_test(adapter);
   1741	e1000_loopback_cleanup(adapter);
   1742
   1743err_loopback:
   1744	e1000_free_desc_rings(adapter);
   1745out:
   1746	return *data;
   1747}
   1748
   1749static int e1000_link_test(struct e1000_adapter *adapter, u64 *data)
   1750{
   1751	struct e1000_hw *hw = &adapter->hw;
   1752
   1753	*data = 0;
   1754	if (hw->phy.media_type == e1000_media_type_internal_serdes) {
   1755		int i = 0;
   1756
   1757		hw->mac.serdes_has_link = false;
   1758
   1759		/* On some blade server designs, link establishment
   1760		 * could take as long as 2-3 minutes
   1761		 */
   1762		do {
   1763			hw->mac.ops.check_for_link(hw);
   1764			if (hw->mac.serdes_has_link)
   1765				return *data;
   1766			msleep(20);
   1767		} while (i++ < 3750);
   1768
   1769		*data = 1;
   1770	} else {
   1771		hw->mac.ops.check_for_link(hw);
   1772		if (hw->mac.autoneg)
   1773			/* On some Phy/switch combinations, link establishment
   1774			 * can take a few seconds more than expected.
   1775			 */
   1776			msleep_interruptible(5000);
   1777
   1778		if (!(er32(STATUS) & E1000_STATUS_LU))
   1779			*data = 1;
   1780	}
   1781	return *data;
   1782}
   1783
   1784static int e1000e_get_sset_count(struct net_device __always_unused *netdev,
   1785				 int sset)
   1786{
   1787	switch (sset) {
   1788	case ETH_SS_TEST:
   1789		return E1000_TEST_LEN;
   1790	case ETH_SS_STATS:
   1791		return E1000_STATS_LEN;
   1792	case ETH_SS_PRIV_FLAGS:
   1793		return E1000E_PRIV_FLAGS_STR_LEN;
   1794	default:
   1795		return -EOPNOTSUPP;
   1796	}
   1797}
   1798
   1799static void e1000_diag_test(struct net_device *netdev,
   1800			    struct ethtool_test *eth_test, u64 *data)
   1801{
   1802	struct e1000_adapter *adapter = netdev_priv(netdev);
   1803	u16 autoneg_advertised;
   1804	u8 forced_speed_duplex;
   1805	u8 autoneg;
   1806	bool if_running = netif_running(netdev);
   1807
   1808	pm_runtime_get_sync(netdev->dev.parent);
   1809
   1810	set_bit(__E1000_TESTING, &adapter->state);
   1811
   1812	if (!if_running) {
   1813		/* Get control of and reset hardware */
   1814		if (adapter->flags & FLAG_HAS_AMT)
   1815			e1000e_get_hw_control(adapter);
   1816
   1817		e1000e_power_up_phy(adapter);
   1818
   1819		adapter->hw.phy.autoneg_wait_to_complete = 1;
   1820		e1000e_reset(adapter);
   1821		adapter->hw.phy.autoneg_wait_to_complete = 0;
   1822	}
   1823
   1824	if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
   1825		/* Offline tests */
   1826
   1827		/* save speed, duplex, autoneg settings */
   1828		autoneg_advertised = adapter->hw.phy.autoneg_advertised;
   1829		forced_speed_duplex = adapter->hw.mac.forced_speed_duplex;
   1830		autoneg = adapter->hw.mac.autoneg;
   1831
   1832		e_info("offline testing starting\n");
   1833
   1834		if (if_running)
   1835			/* indicate we're in test mode */
   1836			e1000e_close(netdev);
   1837
   1838		if (e1000_reg_test(adapter, &data[0]))
   1839			eth_test->flags |= ETH_TEST_FL_FAILED;
   1840
   1841		e1000e_reset(adapter);
   1842		if (e1000_eeprom_test(adapter, &data[1]))
   1843			eth_test->flags |= ETH_TEST_FL_FAILED;
   1844
   1845		e1000e_reset(adapter);
   1846		if (e1000_intr_test(adapter, &data[2]))
   1847			eth_test->flags |= ETH_TEST_FL_FAILED;
   1848
   1849		e1000e_reset(adapter);
   1850		if (e1000_loopback_test(adapter, &data[3]))
   1851			eth_test->flags |= ETH_TEST_FL_FAILED;
   1852
   1853		/* force this routine to wait until autoneg complete/timeout */
   1854		adapter->hw.phy.autoneg_wait_to_complete = 1;
   1855		e1000e_reset(adapter);
   1856		adapter->hw.phy.autoneg_wait_to_complete = 0;
   1857
   1858		if (e1000_link_test(adapter, &data[4]))
   1859			eth_test->flags |= ETH_TEST_FL_FAILED;
   1860
   1861		/* restore speed, duplex, autoneg settings */
   1862		adapter->hw.phy.autoneg_advertised = autoneg_advertised;
   1863		adapter->hw.mac.forced_speed_duplex = forced_speed_duplex;
   1864		adapter->hw.mac.autoneg = autoneg;
   1865		e1000e_reset(adapter);
   1866
   1867		clear_bit(__E1000_TESTING, &adapter->state);
   1868		if (if_running)
   1869			e1000e_open(netdev);
   1870	} else {
   1871		/* Online tests */
   1872
   1873		e_info("online testing starting\n");
   1874
   1875		/* register, eeprom, intr and loopback tests not run online */
   1876		data[0] = 0;
   1877		data[1] = 0;
   1878		data[2] = 0;
   1879		data[3] = 0;
   1880
   1881		if (e1000_link_test(adapter, &data[4]))
   1882			eth_test->flags |= ETH_TEST_FL_FAILED;
   1883
   1884		clear_bit(__E1000_TESTING, &adapter->state);
   1885	}
   1886
   1887	if (!if_running) {
   1888		e1000e_reset(adapter);
   1889
   1890		if (adapter->flags & FLAG_HAS_AMT)
   1891			e1000e_release_hw_control(adapter);
   1892	}
   1893
   1894	msleep_interruptible(4 * 1000);
   1895
   1896	pm_runtime_put_sync(netdev->dev.parent);
   1897}
   1898
   1899static void e1000_get_wol(struct net_device *netdev,
   1900			  struct ethtool_wolinfo *wol)
   1901{
   1902	struct e1000_adapter *adapter = netdev_priv(netdev);
   1903
   1904	wol->supported = 0;
   1905	wol->wolopts = 0;
   1906
   1907	if (!(adapter->flags & FLAG_HAS_WOL) ||
   1908	    !device_can_wakeup(&adapter->pdev->dev))
   1909		return;
   1910
   1911	wol->supported = WAKE_UCAST | WAKE_MCAST |
   1912	    WAKE_BCAST | WAKE_MAGIC | WAKE_PHY;
   1913
   1914	/* apply any specific unsupported masks here */
   1915	if (adapter->flags & FLAG_NO_WAKE_UCAST) {
   1916		wol->supported &= ~WAKE_UCAST;
   1917
   1918		if (adapter->wol & E1000_WUFC_EX)
   1919			e_err("Interface does not support directed (unicast) frame wake-up packets\n");
   1920	}
   1921
   1922	if (adapter->wol & E1000_WUFC_EX)
   1923		wol->wolopts |= WAKE_UCAST;
   1924	if (adapter->wol & E1000_WUFC_MC)
   1925		wol->wolopts |= WAKE_MCAST;
   1926	if (adapter->wol & E1000_WUFC_BC)
   1927		wol->wolopts |= WAKE_BCAST;
   1928	if (adapter->wol & E1000_WUFC_MAG)
   1929		wol->wolopts |= WAKE_MAGIC;
   1930	if (adapter->wol & E1000_WUFC_LNKC)
   1931		wol->wolopts |= WAKE_PHY;
   1932}
   1933
   1934static int e1000_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
   1935{
   1936	struct e1000_adapter *adapter = netdev_priv(netdev);
   1937
   1938	if (!(adapter->flags & FLAG_HAS_WOL) ||
   1939	    !device_can_wakeup(&adapter->pdev->dev) ||
   1940	    (wol->wolopts & ~(WAKE_UCAST | WAKE_MCAST | WAKE_BCAST |
   1941			      WAKE_MAGIC | WAKE_PHY)))
   1942		return -EOPNOTSUPP;
   1943
   1944	/* these settings will always override what we currently have */
   1945	adapter->wol = 0;
   1946
   1947	if (wol->wolopts & WAKE_UCAST)
   1948		adapter->wol |= E1000_WUFC_EX;
   1949	if (wol->wolopts & WAKE_MCAST)
   1950		adapter->wol |= E1000_WUFC_MC;
   1951	if (wol->wolopts & WAKE_BCAST)
   1952		adapter->wol |= E1000_WUFC_BC;
   1953	if (wol->wolopts & WAKE_MAGIC)
   1954		adapter->wol |= E1000_WUFC_MAG;
   1955	if (wol->wolopts & WAKE_PHY)
   1956		adapter->wol |= E1000_WUFC_LNKC;
   1957
   1958	device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
   1959
   1960	return 0;
   1961}
   1962
   1963static int e1000_set_phys_id(struct net_device *netdev,
   1964			     enum ethtool_phys_id_state state)
   1965{
   1966	struct e1000_adapter *adapter = netdev_priv(netdev);
   1967	struct e1000_hw *hw = &adapter->hw;
   1968
   1969	switch (state) {
   1970	case ETHTOOL_ID_ACTIVE:
   1971		pm_runtime_get_sync(netdev->dev.parent);
   1972
   1973		if (!hw->mac.ops.blink_led)
   1974			return 2;	/* cycle on/off twice per second */
   1975
   1976		hw->mac.ops.blink_led(hw);
   1977		break;
   1978
   1979	case ETHTOOL_ID_INACTIVE:
   1980		if (hw->phy.type == e1000_phy_ife)
   1981			e1e_wphy(hw, IFE_PHY_SPECIAL_CONTROL_LED, 0);
   1982		hw->mac.ops.led_off(hw);
   1983		hw->mac.ops.cleanup_led(hw);
   1984		pm_runtime_put_sync(netdev->dev.parent);
   1985		break;
   1986
   1987	case ETHTOOL_ID_ON:
   1988		hw->mac.ops.led_on(hw);
   1989		break;
   1990
   1991	case ETHTOOL_ID_OFF:
   1992		hw->mac.ops.led_off(hw);
   1993		break;
   1994	}
   1995
   1996	return 0;
   1997}
   1998
   1999static int e1000_get_coalesce(struct net_device *netdev,
   2000			      struct ethtool_coalesce *ec,
   2001			      struct kernel_ethtool_coalesce *kernel_coal,
   2002			      struct netlink_ext_ack *extack)
   2003{
   2004	struct e1000_adapter *adapter = netdev_priv(netdev);
   2005
   2006	if (adapter->itr_setting <= 4)
   2007		ec->rx_coalesce_usecs = adapter->itr_setting;
   2008	else
   2009		ec->rx_coalesce_usecs = 1000000 / adapter->itr_setting;
   2010
   2011	return 0;
   2012}
   2013
   2014static int e1000_set_coalesce(struct net_device *netdev,
   2015			      struct ethtool_coalesce *ec,
   2016			      struct kernel_ethtool_coalesce *kernel_coal,
   2017			      struct netlink_ext_ack *extack)
   2018{
   2019	struct e1000_adapter *adapter = netdev_priv(netdev);
   2020
   2021	if ((ec->rx_coalesce_usecs > E1000_MAX_ITR_USECS) ||
   2022	    ((ec->rx_coalesce_usecs > 4) &&
   2023	     (ec->rx_coalesce_usecs < E1000_MIN_ITR_USECS)) ||
   2024	    (ec->rx_coalesce_usecs == 2))
   2025		return -EINVAL;
   2026
   2027	if (ec->rx_coalesce_usecs == 4) {
   2028		adapter->itr_setting = 4;
   2029		adapter->itr = adapter->itr_setting;
   2030	} else if (ec->rx_coalesce_usecs <= 3) {
   2031		adapter->itr = 20000;
   2032		adapter->itr_setting = ec->rx_coalesce_usecs;
   2033	} else {
   2034		adapter->itr = (1000000 / ec->rx_coalesce_usecs);
   2035		adapter->itr_setting = adapter->itr & ~3;
   2036	}
   2037
   2038	pm_runtime_get_sync(netdev->dev.parent);
   2039
   2040	if (adapter->itr_setting != 0)
   2041		e1000e_write_itr(adapter, adapter->itr);
   2042	else
   2043		e1000e_write_itr(adapter, 0);
   2044
   2045	pm_runtime_put_sync(netdev->dev.parent);
   2046
   2047	return 0;
   2048}
   2049
   2050static int e1000_nway_reset(struct net_device *netdev)
   2051{
   2052	struct e1000_adapter *adapter = netdev_priv(netdev);
   2053
   2054	if (!netif_running(netdev))
   2055		return -EAGAIN;
   2056
   2057	if (!adapter->hw.mac.autoneg)
   2058		return -EINVAL;
   2059
   2060	pm_runtime_get_sync(netdev->dev.parent);
   2061	e1000e_reinit_locked(adapter);
   2062	pm_runtime_put_sync(netdev->dev.parent);
   2063
   2064	return 0;
   2065}
   2066
   2067static void e1000_get_ethtool_stats(struct net_device *netdev,
   2068				    struct ethtool_stats __always_unused *stats,
   2069				    u64 *data)
   2070{
   2071	struct e1000_adapter *adapter = netdev_priv(netdev);
   2072	struct rtnl_link_stats64 net_stats;
   2073	int i;
   2074	char *p = NULL;
   2075
   2076	pm_runtime_get_sync(netdev->dev.parent);
   2077
   2078	dev_get_stats(netdev, &net_stats);
   2079
   2080	pm_runtime_put_sync(netdev->dev.parent);
   2081
   2082	for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) {
   2083		switch (e1000_gstrings_stats[i].type) {
   2084		case NETDEV_STATS:
   2085			p = (char *)&net_stats +
   2086			    e1000_gstrings_stats[i].stat_offset;
   2087			break;
   2088		case E1000_STATS:
   2089			p = (char *)adapter +
   2090			    e1000_gstrings_stats[i].stat_offset;
   2091			break;
   2092		default:
   2093			data[i] = 0;
   2094			continue;
   2095		}
   2096
   2097		data[i] = (e1000_gstrings_stats[i].sizeof_stat ==
   2098			   sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
   2099	}
   2100}
   2101
   2102static void e1000_get_strings(struct net_device __always_unused *netdev,
   2103			      u32 stringset, u8 *data)
   2104{
   2105	u8 *p = data;
   2106	int i;
   2107
   2108	switch (stringset) {
   2109	case ETH_SS_TEST:
   2110		memcpy(data, e1000_gstrings_test, sizeof(e1000_gstrings_test));
   2111		break;
   2112	case ETH_SS_STATS:
   2113		for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) {
   2114			memcpy(p, e1000_gstrings_stats[i].stat_string,
   2115			       ETH_GSTRING_LEN);
   2116			p += ETH_GSTRING_LEN;
   2117		}
   2118		break;
   2119	case ETH_SS_PRIV_FLAGS:
   2120		memcpy(data, e1000e_priv_flags_strings,
   2121		       E1000E_PRIV_FLAGS_STR_LEN * ETH_GSTRING_LEN);
   2122		break;
   2123	}
   2124}
   2125
   2126static int e1000_get_rxnfc(struct net_device *netdev,
   2127			   struct ethtool_rxnfc *info,
   2128			   u32 __always_unused *rule_locs)
   2129{
   2130	info->data = 0;
   2131
   2132	switch (info->cmd) {
   2133	case ETHTOOL_GRXFH: {
   2134		struct e1000_adapter *adapter = netdev_priv(netdev);
   2135		struct e1000_hw *hw = &adapter->hw;
   2136		u32 mrqc;
   2137
   2138		pm_runtime_get_sync(netdev->dev.parent);
   2139		mrqc = er32(MRQC);
   2140		pm_runtime_put_sync(netdev->dev.parent);
   2141
   2142		if (!(mrqc & E1000_MRQC_RSS_FIELD_MASK))
   2143			return 0;
   2144
   2145		switch (info->flow_type) {
   2146		case TCP_V4_FLOW:
   2147			if (mrqc & E1000_MRQC_RSS_FIELD_IPV4_TCP)
   2148				info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
   2149			fallthrough;
   2150		case UDP_V4_FLOW:
   2151		case SCTP_V4_FLOW:
   2152		case AH_ESP_V4_FLOW:
   2153		case IPV4_FLOW:
   2154			if (mrqc & E1000_MRQC_RSS_FIELD_IPV4)
   2155				info->data |= RXH_IP_SRC | RXH_IP_DST;
   2156			break;
   2157		case TCP_V6_FLOW:
   2158			if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_TCP)
   2159				info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
   2160			fallthrough;
   2161		case UDP_V6_FLOW:
   2162		case SCTP_V6_FLOW:
   2163		case AH_ESP_V6_FLOW:
   2164		case IPV6_FLOW:
   2165			if (mrqc & E1000_MRQC_RSS_FIELD_IPV6)
   2166				info->data |= RXH_IP_SRC | RXH_IP_DST;
   2167			break;
   2168		default:
   2169			break;
   2170		}
   2171		return 0;
   2172	}
   2173	default:
   2174		return -EOPNOTSUPP;
   2175	}
   2176}
   2177
   2178static int e1000e_get_eee(struct net_device *netdev, struct ethtool_eee *edata)
   2179{
   2180	struct e1000_adapter *adapter = netdev_priv(netdev);
   2181	struct e1000_hw *hw = &adapter->hw;
   2182	u16 cap_addr, lpa_addr, pcs_stat_addr, phy_data;
   2183	u32 ret_val;
   2184
   2185	if (!(adapter->flags2 & FLAG2_HAS_EEE))
   2186		return -EOPNOTSUPP;
   2187
   2188	switch (hw->phy.type) {
   2189	case e1000_phy_82579:
   2190		cap_addr = I82579_EEE_CAPABILITY;
   2191		lpa_addr = I82579_EEE_LP_ABILITY;
   2192		pcs_stat_addr = I82579_EEE_PCS_STATUS;
   2193		break;
   2194	case e1000_phy_i217:
   2195		cap_addr = I217_EEE_CAPABILITY;
   2196		lpa_addr = I217_EEE_LP_ABILITY;
   2197		pcs_stat_addr = I217_EEE_PCS_STATUS;
   2198		break;
   2199	default:
   2200		return -EOPNOTSUPP;
   2201	}
   2202
   2203	pm_runtime_get_sync(netdev->dev.parent);
   2204
   2205	ret_val = hw->phy.ops.acquire(hw);
   2206	if (ret_val) {
   2207		pm_runtime_put_sync(netdev->dev.parent);
   2208		return -EBUSY;
   2209	}
   2210
   2211	/* EEE Capability */
   2212	ret_val = e1000_read_emi_reg_locked(hw, cap_addr, &phy_data);
   2213	if (ret_val)
   2214		goto release;
   2215	edata->supported = mmd_eee_cap_to_ethtool_sup_t(phy_data);
   2216
   2217	/* EEE Advertised */
   2218	edata->advertised = mmd_eee_adv_to_ethtool_adv_t(adapter->eee_advert);
   2219
   2220	/* EEE Link Partner Advertised */
   2221	ret_val = e1000_read_emi_reg_locked(hw, lpa_addr, &phy_data);
   2222	if (ret_val)
   2223		goto release;
   2224	edata->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(phy_data);
   2225
   2226	/* EEE PCS Status */
   2227	ret_val = e1000_read_emi_reg_locked(hw, pcs_stat_addr, &phy_data);
   2228	if (ret_val)
   2229		goto release;
   2230	if (hw->phy.type == e1000_phy_82579)
   2231		phy_data <<= 8;
   2232
   2233	/* Result of the EEE auto negotiation - there is no register that
   2234	 * has the status of the EEE negotiation so do a best-guess based
   2235	 * on whether Tx or Rx LPI indications have been received.
   2236	 */
   2237	if (phy_data & (E1000_EEE_TX_LPI_RCVD | E1000_EEE_RX_LPI_RCVD))
   2238		edata->eee_active = true;
   2239
   2240	edata->eee_enabled = !hw->dev_spec.ich8lan.eee_disable;
   2241	edata->tx_lpi_enabled = true;
   2242	edata->tx_lpi_timer = er32(LPIC) >> E1000_LPIC_LPIET_SHIFT;
   2243
   2244release:
   2245	hw->phy.ops.release(hw);
   2246	if (ret_val)
   2247		ret_val = -ENODATA;
   2248
   2249	pm_runtime_put_sync(netdev->dev.parent);
   2250
   2251	return ret_val;
   2252}
   2253
   2254static int e1000e_set_eee(struct net_device *netdev, struct ethtool_eee *edata)
   2255{
   2256	struct e1000_adapter *adapter = netdev_priv(netdev);
   2257	struct e1000_hw *hw = &adapter->hw;
   2258	struct ethtool_eee eee_curr;
   2259	s32 ret_val;
   2260
   2261	ret_val = e1000e_get_eee(netdev, &eee_curr);
   2262	if (ret_val)
   2263		return ret_val;
   2264
   2265	if (eee_curr.tx_lpi_enabled != edata->tx_lpi_enabled) {
   2266		e_err("Setting EEE tx-lpi is not supported\n");
   2267		return -EINVAL;
   2268	}
   2269
   2270	if (eee_curr.tx_lpi_timer != edata->tx_lpi_timer) {
   2271		e_err("Setting EEE Tx LPI timer is not supported\n");
   2272		return -EINVAL;
   2273	}
   2274
   2275	if (edata->advertised & ~(ADVERTISE_100_FULL | ADVERTISE_1000_FULL)) {
   2276		e_err("EEE advertisement supports only 100TX and/or 1000T full-duplex\n");
   2277		return -EINVAL;
   2278	}
   2279
   2280	adapter->eee_advert = ethtool_adv_to_mmd_eee_adv_t(edata->advertised);
   2281
   2282	hw->dev_spec.ich8lan.eee_disable = !edata->eee_enabled;
   2283
   2284	pm_runtime_get_sync(netdev->dev.parent);
   2285
   2286	/* reset the link */
   2287	if (netif_running(netdev))
   2288		e1000e_reinit_locked(adapter);
   2289	else
   2290		e1000e_reset(adapter);
   2291
   2292	pm_runtime_put_sync(netdev->dev.parent);
   2293
   2294	return 0;
   2295}
   2296
   2297static int e1000e_get_ts_info(struct net_device *netdev,
   2298			      struct ethtool_ts_info *info)
   2299{
   2300	struct e1000_adapter *adapter = netdev_priv(netdev);
   2301
   2302	ethtool_op_get_ts_info(netdev, info);
   2303
   2304	if (!(adapter->flags & FLAG_HAS_HW_TIMESTAMP))
   2305		return 0;
   2306
   2307	info->so_timestamping |= (SOF_TIMESTAMPING_TX_HARDWARE |
   2308				  SOF_TIMESTAMPING_RX_HARDWARE |
   2309				  SOF_TIMESTAMPING_RAW_HARDWARE);
   2310
   2311	info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON);
   2312
   2313	info->rx_filters = (BIT(HWTSTAMP_FILTER_NONE) |
   2314			    BIT(HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
   2315			    BIT(HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
   2316			    BIT(HWTSTAMP_FILTER_PTP_V2_L4_SYNC) |
   2317			    BIT(HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) |
   2318			    BIT(HWTSTAMP_FILTER_PTP_V2_L2_SYNC) |
   2319			    BIT(HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) |
   2320			    BIT(HWTSTAMP_FILTER_PTP_V2_EVENT) |
   2321			    BIT(HWTSTAMP_FILTER_PTP_V2_SYNC) |
   2322			    BIT(HWTSTAMP_FILTER_PTP_V2_DELAY_REQ) |
   2323			    BIT(HWTSTAMP_FILTER_ALL));
   2324
   2325	if (adapter->ptp_clock)
   2326		info->phc_index = ptp_clock_index(adapter->ptp_clock);
   2327
   2328	return 0;
   2329}
   2330
   2331static u32 e1000e_get_priv_flags(struct net_device *netdev)
   2332{
   2333	struct e1000_adapter *adapter = netdev_priv(netdev);
   2334	u32 priv_flags = 0;
   2335
   2336	if (adapter->flags2 & FLAG2_ENABLE_S0IX_FLOWS)
   2337		priv_flags |= E1000E_PRIV_FLAGS_S0IX_ENABLED;
   2338
   2339	return priv_flags;
   2340}
   2341
   2342static int e1000e_set_priv_flags(struct net_device *netdev, u32 priv_flags)
   2343{
   2344	struct e1000_adapter *adapter = netdev_priv(netdev);
   2345	unsigned int flags2 = adapter->flags2;
   2346
   2347	flags2 &= ~FLAG2_ENABLE_S0IX_FLOWS;
   2348	if (priv_flags & E1000E_PRIV_FLAGS_S0IX_ENABLED) {
   2349		struct e1000_hw *hw = &adapter->hw;
   2350
   2351		if (hw->mac.type < e1000_pch_cnp)
   2352			return -EINVAL;
   2353		flags2 |= FLAG2_ENABLE_S0IX_FLOWS;
   2354	}
   2355
   2356	if (flags2 != adapter->flags2)
   2357		adapter->flags2 = flags2;
   2358
   2359	return 0;
   2360}
   2361
   2362static const struct ethtool_ops e1000_ethtool_ops = {
   2363	.supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS,
   2364	.get_drvinfo		= e1000_get_drvinfo,
   2365	.get_regs_len		= e1000_get_regs_len,
   2366	.get_regs		= e1000_get_regs,
   2367	.get_wol		= e1000_get_wol,
   2368	.set_wol		= e1000_set_wol,
   2369	.get_msglevel		= e1000_get_msglevel,
   2370	.set_msglevel		= e1000_set_msglevel,
   2371	.nway_reset		= e1000_nway_reset,
   2372	.get_link		= ethtool_op_get_link,
   2373	.get_eeprom_len		= e1000_get_eeprom_len,
   2374	.get_eeprom		= e1000_get_eeprom,
   2375	.set_eeprom		= e1000_set_eeprom,
   2376	.get_ringparam		= e1000_get_ringparam,
   2377	.set_ringparam		= e1000_set_ringparam,
   2378	.get_pauseparam		= e1000_get_pauseparam,
   2379	.set_pauseparam		= e1000_set_pauseparam,
   2380	.self_test		= e1000_diag_test,
   2381	.get_strings		= e1000_get_strings,
   2382	.set_phys_id		= e1000_set_phys_id,
   2383	.get_ethtool_stats	= e1000_get_ethtool_stats,
   2384	.get_sset_count		= e1000e_get_sset_count,
   2385	.get_coalesce		= e1000_get_coalesce,
   2386	.set_coalesce		= e1000_set_coalesce,
   2387	.get_rxnfc		= e1000_get_rxnfc,
   2388	.get_ts_info		= e1000e_get_ts_info,
   2389	.get_eee		= e1000e_get_eee,
   2390	.set_eee		= e1000e_set_eee,
   2391	.get_link_ksettings	= e1000_get_link_ksettings,
   2392	.set_link_ksettings	= e1000_set_link_ksettings,
   2393	.get_priv_flags		= e1000e_get_priv_flags,
   2394	.set_priv_flags		= e1000e_set_priv_flags,
   2395};
   2396
   2397void e1000e_set_ethtool_ops(struct net_device *netdev)
   2398{
   2399	netdev->ethtool_ops = &e1000_ethtool_ops;
   2400}