cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

ethtool.c (27373B)


      1// SPDX-License-Identifier: GPL-2.0
      2/* Copyright(c) 1999 - 2018 Intel Corporation. */
      3
      4/* ethtool support for ixgbevf */
      5
      6#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
      7
      8#include <linux/types.h>
      9#include <linux/module.h>
     10#include <linux/slab.h>
     11#include <linux/pci.h>
     12#include <linux/netdevice.h>
     13#include <linux/ethtool.h>
     14#include <linux/vmalloc.h>
     15#include <linux/if_vlan.h>
     16#include <linux/uaccess.h>
     17
     18#include "ixgbevf.h"
     19
     20#define IXGBE_ALL_RAR_ENTRIES 16
     21
     22enum {NETDEV_STATS, IXGBEVF_STATS};
     23
     24struct ixgbe_stats {
     25	char stat_string[ETH_GSTRING_LEN];
     26	int type;
     27	int sizeof_stat;
     28	int stat_offset;
     29};
     30
     31#define IXGBEVF_STAT(_name, _stat) { \
     32	.stat_string = _name, \
     33	.type = IXGBEVF_STATS, \
     34	.sizeof_stat = sizeof_field(struct ixgbevf_adapter, _stat), \
     35	.stat_offset = offsetof(struct ixgbevf_adapter, _stat) \
     36}
     37
     38#define IXGBEVF_NETDEV_STAT(_net_stat) { \
     39	.stat_string = #_net_stat, \
     40	.type = NETDEV_STATS, \
     41	.sizeof_stat = sizeof_field(struct net_device_stats, _net_stat), \
     42	.stat_offset = offsetof(struct net_device_stats, _net_stat) \
     43}
     44
     45static struct ixgbe_stats ixgbevf_gstrings_stats[] = {
     46	IXGBEVF_NETDEV_STAT(rx_packets),
     47	IXGBEVF_NETDEV_STAT(tx_packets),
     48	IXGBEVF_NETDEV_STAT(rx_bytes),
     49	IXGBEVF_NETDEV_STAT(tx_bytes),
     50	IXGBEVF_STAT("tx_busy", tx_busy),
     51	IXGBEVF_STAT("tx_restart_queue", restart_queue),
     52	IXGBEVF_STAT("tx_timeout_count", tx_timeout_count),
     53	IXGBEVF_NETDEV_STAT(multicast),
     54	IXGBEVF_STAT("rx_csum_offload_errors", hw_csum_rx_error),
     55	IXGBEVF_STAT("alloc_rx_page", alloc_rx_page),
     56	IXGBEVF_STAT("alloc_rx_page_failed", alloc_rx_page_failed),
     57	IXGBEVF_STAT("alloc_rx_buff_failed", alloc_rx_buff_failed),
     58	IXGBEVF_STAT("tx_ipsec", tx_ipsec),
     59	IXGBEVF_STAT("rx_ipsec", rx_ipsec),
     60};
     61
     62#define IXGBEVF_QUEUE_STATS_LEN ( \
     63	(((struct ixgbevf_adapter *)netdev_priv(netdev))->num_tx_queues + \
     64	 ((struct ixgbevf_adapter *)netdev_priv(netdev))->num_xdp_queues + \
     65	 ((struct ixgbevf_adapter *)netdev_priv(netdev))->num_rx_queues) * \
     66	 (sizeof(struct ixgbevf_stats) / sizeof(u64)))
     67#define IXGBEVF_GLOBAL_STATS_LEN ARRAY_SIZE(ixgbevf_gstrings_stats)
     68
     69#define IXGBEVF_STATS_LEN (IXGBEVF_GLOBAL_STATS_LEN + IXGBEVF_QUEUE_STATS_LEN)
     70static const char ixgbe_gstrings_test[][ETH_GSTRING_LEN] = {
     71	"Register test  (offline)",
     72	"Link test   (on/offline)"
     73};
     74
     75#define IXGBEVF_TEST_LEN (sizeof(ixgbe_gstrings_test) / ETH_GSTRING_LEN)
     76
     77static const char ixgbevf_priv_flags_strings[][ETH_GSTRING_LEN] = {
     78#define IXGBEVF_PRIV_FLAGS_LEGACY_RX	BIT(0)
     79	"legacy-rx",
     80};
     81
     82#define IXGBEVF_PRIV_FLAGS_STR_LEN ARRAY_SIZE(ixgbevf_priv_flags_strings)
     83
     84static int ixgbevf_get_link_ksettings(struct net_device *netdev,
     85				      struct ethtool_link_ksettings *cmd)
     86{
     87	struct ixgbevf_adapter *adapter = netdev_priv(netdev);
     88
     89	ethtool_link_ksettings_zero_link_mode(cmd, supported);
     90	ethtool_link_ksettings_add_link_mode(cmd, supported, 10000baseT_Full);
     91	cmd->base.autoneg = AUTONEG_DISABLE;
     92	cmd->base.port = -1;
     93
     94	if (adapter->link_up) {
     95		__u32 speed = SPEED_10000;
     96
     97		switch (adapter->link_speed) {
     98		case IXGBE_LINK_SPEED_10GB_FULL:
     99			speed = SPEED_10000;
    100			break;
    101		case IXGBE_LINK_SPEED_1GB_FULL:
    102			speed = SPEED_1000;
    103			break;
    104		case IXGBE_LINK_SPEED_100_FULL:
    105			speed = SPEED_100;
    106			break;
    107		}
    108
    109		cmd->base.speed = speed;
    110		cmd->base.duplex = DUPLEX_FULL;
    111	} else {
    112		cmd->base.speed = SPEED_UNKNOWN;
    113		cmd->base.duplex = DUPLEX_UNKNOWN;
    114	}
    115
    116	return 0;
    117}
    118
    119static u32 ixgbevf_get_msglevel(struct net_device *netdev)
    120{
    121	struct ixgbevf_adapter *adapter = netdev_priv(netdev);
    122
    123	return adapter->msg_enable;
    124}
    125
    126static void ixgbevf_set_msglevel(struct net_device *netdev, u32 data)
    127{
    128	struct ixgbevf_adapter *adapter = netdev_priv(netdev);
    129
    130	adapter->msg_enable = data;
    131}
    132
    133#define IXGBE_GET_STAT(_A_, _R_) (_A_->stats._R_)
    134
    135static int ixgbevf_get_regs_len(struct net_device *netdev)
    136{
    137#define IXGBE_REGS_LEN 45
    138	return IXGBE_REGS_LEN * sizeof(u32);
    139}
    140
    141static void ixgbevf_get_regs(struct net_device *netdev,
    142			     struct ethtool_regs *regs,
    143			     void *p)
    144{
    145	struct ixgbevf_adapter *adapter = netdev_priv(netdev);
    146	struct ixgbe_hw *hw = &adapter->hw;
    147	u32 *regs_buff = p;
    148	u32 regs_len = ixgbevf_get_regs_len(netdev);
    149	u8 i;
    150
    151	memset(p, 0, regs_len);
    152
    153	/* generate a number suitable for ethtool's register version */
    154	regs->version = (1u << 24) | (hw->revision_id << 16) | hw->device_id;
    155
    156	/* General Registers */
    157	regs_buff[0] = IXGBE_READ_REG(hw, IXGBE_VFCTRL);
    158	regs_buff[1] = IXGBE_READ_REG(hw, IXGBE_VFSTATUS);
    159	regs_buff[2] = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
    160	regs_buff[3] = IXGBE_READ_REG(hw, IXGBE_VFRXMEMWRAP);
    161	regs_buff[4] = IXGBE_READ_REG(hw, IXGBE_VFFRTIMER);
    162
    163	/* Interrupt */
    164	/* don't read EICR because it can clear interrupt causes, instead
    165	 * read EICS which is a shadow but doesn't clear EICR
    166	 */
    167	regs_buff[5] = IXGBE_READ_REG(hw, IXGBE_VTEICS);
    168	regs_buff[6] = IXGBE_READ_REG(hw, IXGBE_VTEICS);
    169	regs_buff[7] = IXGBE_READ_REG(hw, IXGBE_VTEIMS);
    170	regs_buff[8] = IXGBE_READ_REG(hw, IXGBE_VTEIMC);
    171	regs_buff[9] = IXGBE_READ_REG(hw, IXGBE_VTEIAC);
    172	regs_buff[10] = IXGBE_READ_REG(hw, IXGBE_VTEIAM);
    173	regs_buff[11] = IXGBE_READ_REG(hw, IXGBE_VTEITR(0));
    174	regs_buff[12] = IXGBE_READ_REG(hw, IXGBE_VTIVAR(0));
    175	regs_buff[13] = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
    176
    177	/* Receive DMA */
    178	for (i = 0; i < 2; i++)
    179		regs_buff[14 + i] = IXGBE_READ_REG(hw, IXGBE_VFRDBAL(i));
    180	for (i = 0; i < 2; i++)
    181		regs_buff[16 + i] = IXGBE_READ_REG(hw, IXGBE_VFRDBAH(i));
    182	for (i = 0; i < 2; i++)
    183		regs_buff[18 + i] = IXGBE_READ_REG(hw, IXGBE_VFRDLEN(i));
    184	for (i = 0; i < 2; i++)
    185		regs_buff[20 + i] = IXGBE_READ_REG(hw, IXGBE_VFRDH(i));
    186	for (i = 0; i < 2; i++)
    187		regs_buff[22 + i] = IXGBE_READ_REG(hw, IXGBE_VFRDT(i));
    188	for (i = 0; i < 2; i++)
    189		regs_buff[24 + i] = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
    190	for (i = 0; i < 2; i++)
    191		regs_buff[26 + i] = IXGBE_READ_REG(hw, IXGBE_VFSRRCTL(i));
    192
    193	/* Receive */
    194	regs_buff[28] = IXGBE_READ_REG(hw, IXGBE_VFPSRTYPE);
    195
    196	/* Transmit */
    197	for (i = 0; i < 2; i++)
    198		regs_buff[29 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDBAL(i));
    199	for (i = 0; i < 2; i++)
    200		regs_buff[31 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDBAH(i));
    201	for (i = 0; i < 2; i++)
    202		regs_buff[33 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDLEN(i));
    203	for (i = 0; i < 2; i++)
    204		regs_buff[35 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDH(i));
    205	for (i = 0; i < 2; i++)
    206		regs_buff[37 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDT(i));
    207	for (i = 0; i < 2; i++)
    208		regs_buff[39 + i] = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
    209	for (i = 0; i < 2; i++)
    210		regs_buff[41 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDWBAL(i));
    211	for (i = 0; i < 2; i++)
    212		regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDWBAH(i));
    213}
    214
    215static void ixgbevf_get_drvinfo(struct net_device *netdev,
    216				struct ethtool_drvinfo *drvinfo)
    217{
    218	struct ixgbevf_adapter *adapter = netdev_priv(netdev);
    219
    220	strlcpy(drvinfo->driver, ixgbevf_driver_name, sizeof(drvinfo->driver));
    221	strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
    222		sizeof(drvinfo->bus_info));
    223
    224	drvinfo->n_priv_flags = IXGBEVF_PRIV_FLAGS_STR_LEN;
    225}
    226
    227static void ixgbevf_get_ringparam(struct net_device *netdev,
    228				  struct ethtool_ringparam *ring,
    229				  struct kernel_ethtool_ringparam *kernel_ring,
    230				  struct netlink_ext_ack *extack)
    231{
    232	struct ixgbevf_adapter *adapter = netdev_priv(netdev);
    233
    234	ring->rx_max_pending = IXGBEVF_MAX_RXD;
    235	ring->tx_max_pending = IXGBEVF_MAX_TXD;
    236	ring->rx_pending = adapter->rx_ring_count;
    237	ring->tx_pending = adapter->tx_ring_count;
    238}
    239
    240static int ixgbevf_set_ringparam(struct net_device *netdev,
    241				 struct ethtool_ringparam *ring,
    242				 struct kernel_ethtool_ringparam *kernel_ring,
    243				 struct netlink_ext_ack *extack)
    244{
    245	struct ixgbevf_adapter *adapter = netdev_priv(netdev);
    246	struct ixgbevf_ring *tx_ring = NULL, *rx_ring = NULL;
    247	u32 new_rx_count, new_tx_count;
    248	int i, j, err = 0;
    249
    250	if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
    251		return -EINVAL;
    252
    253	new_tx_count = max_t(u32, ring->tx_pending, IXGBEVF_MIN_TXD);
    254	new_tx_count = min_t(u32, new_tx_count, IXGBEVF_MAX_TXD);
    255	new_tx_count = ALIGN(new_tx_count, IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE);
    256
    257	new_rx_count = max_t(u32, ring->rx_pending, IXGBEVF_MIN_RXD);
    258	new_rx_count = min_t(u32, new_rx_count, IXGBEVF_MAX_RXD);
    259	new_rx_count = ALIGN(new_rx_count, IXGBE_REQ_RX_DESCRIPTOR_MULTIPLE);
    260
    261	/* if nothing to do return success */
    262	if ((new_tx_count == adapter->tx_ring_count) &&
    263	    (new_rx_count == adapter->rx_ring_count))
    264		return 0;
    265
    266	while (test_and_set_bit(__IXGBEVF_RESETTING, &adapter->state))
    267		usleep_range(1000, 2000);
    268
    269	if (!netif_running(adapter->netdev)) {
    270		for (i = 0; i < adapter->num_tx_queues; i++)
    271			adapter->tx_ring[i]->count = new_tx_count;
    272		for (i = 0; i < adapter->num_xdp_queues; i++)
    273			adapter->xdp_ring[i]->count = new_tx_count;
    274		for (i = 0; i < adapter->num_rx_queues; i++)
    275			adapter->rx_ring[i]->count = new_rx_count;
    276		adapter->tx_ring_count = new_tx_count;
    277		adapter->xdp_ring_count = new_tx_count;
    278		adapter->rx_ring_count = new_rx_count;
    279		goto clear_reset;
    280	}
    281
    282	if (new_tx_count != adapter->tx_ring_count) {
    283		tx_ring = vmalloc(array_size(sizeof(*tx_ring),
    284					     adapter->num_tx_queues +
    285						adapter->num_xdp_queues));
    286		if (!tx_ring) {
    287			err = -ENOMEM;
    288			goto clear_reset;
    289		}
    290
    291		for (i = 0; i < adapter->num_tx_queues; i++) {
    292			/* clone ring and setup updated count */
    293			tx_ring[i] = *adapter->tx_ring[i];
    294			tx_ring[i].count = new_tx_count;
    295			err = ixgbevf_setup_tx_resources(&tx_ring[i]);
    296			if (err) {
    297				while (i) {
    298					i--;
    299					ixgbevf_free_tx_resources(&tx_ring[i]);
    300				}
    301
    302				vfree(tx_ring);
    303				tx_ring = NULL;
    304
    305				goto clear_reset;
    306			}
    307		}
    308
    309		for (j = 0; j < adapter->num_xdp_queues; i++, j++) {
    310			/* clone ring and setup updated count */
    311			tx_ring[i] = *adapter->xdp_ring[j];
    312			tx_ring[i].count = new_tx_count;
    313			err = ixgbevf_setup_tx_resources(&tx_ring[i]);
    314			if (err) {
    315				while (i) {
    316					i--;
    317					ixgbevf_free_tx_resources(&tx_ring[i]);
    318				}
    319
    320				vfree(tx_ring);
    321				tx_ring = NULL;
    322
    323				goto clear_reset;
    324			}
    325		}
    326	}
    327
    328	if (new_rx_count != adapter->rx_ring_count) {
    329		rx_ring = vmalloc(array_size(sizeof(*rx_ring),
    330					     adapter->num_rx_queues));
    331		if (!rx_ring) {
    332			err = -ENOMEM;
    333			goto clear_reset;
    334		}
    335
    336		for (i = 0; i < adapter->num_rx_queues; i++) {
    337			/* clone ring and setup updated count */
    338			rx_ring[i] = *adapter->rx_ring[i];
    339
    340			/* Clear copied XDP RX-queue info */
    341			memset(&rx_ring[i].xdp_rxq, 0,
    342			       sizeof(rx_ring[i].xdp_rxq));
    343
    344			rx_ring[i].count = new_rx_count;
    345			err = ixgbevf_setup_rx_resources(adapter, &rx_ring[i]);
    346			if (err) {
    347				while (i) {
    348					i--;
    349					ixgbevf_free_rx_resources(&rx_ring[i]);
    350				}
    351
    352				vfree(rx_ring);
    353				rx_ring = NULL;
    354
    355				goto clear_reset;
    356			}
    357		}
    358	}
    359
    360	/* bring interface down to prepare for update */
    361	ixgbevf_down(adapter);
    362
    363	/* Tx */
    364	if (tx_ring) {
    365		for (i = 0; i < adapter->num_tx_queues; i++) {
    366			ixgbevf_free_tx_resources(adapter->tx_ring[i]);
    367			*adapter->tx_ring[i] = tx_ring[i];
    368		}
    369		adapter->tx_ring_count = new_tx_count;
    370
    371		for (j = 0; j < adapter->num_xdp_queues; i++, j++) {
    372			ixgbevf_free_tx_resources(adapter->xdp_ring[j]);
    373			*adapter->xdp_ring[j] = tx_ring[i];
    374		}
    375		adapter->xdp_ring_count = new_tx_count;
    376
    377		vfree(tx_ring);
    378		tx_ring = NULL;
    379	}
    380
    381	/* Rx */
    382	if (rx_ring) {
    383		for (i = 0; i < adapter->num_rx_queues; i++) {
    384			ixgbevf_free_rx_resources(adapter->rx_ring[i]);
    385			*adapter->rx_ring[i] = rx_ring[i];
    386		}
    387		adapter->rx_ring_count = new_rx_count;
    388
    389		vfree(rx_ring);
    390		rx_ring = NULL;
    391	}
    392
    393	/* restore interface using new values */
    394	ixgbevf_up(adapter);
    395
    396clear_reset:
    397	/* free Tx resources if Rx error is encountered */
    398	if (tx_ring) {
    399		for (i = 0;
    400		     i < adapter->num_tx_queues + adapter->num_xdp_queues; i++)
    401			ixgbevf_free_tx_resources(&tx_ring[i]);
    402		vfree(tx_ring);
    403	}
    404
    405	clear_bit(__IXGBEVF_RESETTING, &adapter->state);
    406	return err;
    407}
    408
    409static int ixgbevf_get_sset_count(struct net_device *netdev, int stringset)
    410{
    411	switch (stringset) {
    412	case ETH_SS_TEST:
    413		return IXGBEVF_TEST_LEN;
    414	case ETH_SS_STATS:
    415		return IXGBEVF_STATS_LEN;
    416	case ETH_SS_PRIV_FLAGS:
    417		return IXGBEVF_PRIV_FLAGS_STR_LEN;
    418	default:
    419		return -EINVAL;
    420	}
    421}
    422
    423static void ixgbevf_get_ethtool_stats(struct net_device *netdev,
    424				      struct ethtool_stats *stats, u64 *data)
    425{
    426	struct ixgbevf_adapter *adapter = netdev_priv(netdev);
    427	struct rtnl_link_stats64 temp;
    428	const struct rtnl_link_stats64 *net_stats;
    429	unsigned int start;
    430	struct ixgbevf_ring *ring;
    431	int i, j;
    432	char *p;
    433
    434	ixgbevf_update_stats(adapter);
    435	net_stats = dev_get_stats(netdev, &temp);
    436	for (i = 0; i < IXGBEVF_GLOBAL_STATS_LEN; i++) {
    437		switch (ixgbevf_gstrings_stats[i].type) {
    438		case NETDEV_STATS:
    439			p = (char *)net_stats +
    440					ixgbevf_gstrings_stats[i].stat_offset;
    441			break;
    442		case IXGBEVF_STATS:
    443			p = (char *)adapter +
    444					ixgbevf_gstrings_stats[i].stat_offset;
    445			break;
    446		default:
    447			data[i] = 0;
    448			continue;
    449		}
    450
    451		data[i] = (ixgbevf_gstrings_stats[i].sizeof_stat ==
    452			   sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
    453	}
    454
    455	/* populate Tx queue data */
    456	for (j = 0; j < adapter->num_tx_queues; j++) {
    457		ring = adapter->tx_ring[j];
    458		if (!ring) {
    459			data[i++] = 0;
    460			data[i++] = 0;
    461			continue;
    462		}
    463
    464		do {
    465			start = u64_stats_fetch_begin_irq(&ring->syncp);
    466			data[i]   = ring->stats.packets;
    467			data[i + 1] = ring->stats.bytes;
    468		} while (u64_stats_fetch_retry_irq(&ring->syncp, start));
    469		i += 2;
    470	}
    471
    472	/* populate XDP queue data */
    473	for (j = 0; j < adapter->num_xdp_queues; j++) {
    474		ring = adapter->xdp_ring[j];
    475		if (!ring) {
    476			data[i++] = 0;
    477			data[i++] = 0;
    478			continue;
    479		}
    480
    481		do {
    482			start = u64_stats_fetch_begin_irq(&ring->syncp);
    483			data[i] = ring->stats.packets;
    484			data[i + 1] = ring->stats.bytes;
    485		} while (u64_stats_fetch_retry_irq(&ring->syncp, start));
    486		i += 2;
    487	}
    488
    489	/* populate Rx queue data */
    490	for (j = 0; j < adapter->num_rx_queues; j++) {
    491		ring = adapter->rx_ring[j];
    492		if (!ring) {
    493			data[i++] = 0;
    494			data[i++] = 0;
    495			continue;
    496		}
    497
    498		do {
    499			start = u64_stats_fetch_begin_irq(&ring->syncp);
    500			data[i]   = ring->stats.packets;
    501			data[i + 1] = ring->stats.bytes;
    502		} while (u64_stats_fetch_retry_irq(&ring->syncp, start));
    503		i += 2;
    504	}
    505}
    506
    507static void ixgbevf_get_strings(struct net_device *netdev, u32 stringset,
    508				u8 *data)
    509{
    510	struct ixgbevf_adapter *adapter = netdev_priv(netdev);
    511	char *p = (char *)data;
    512	int i;
    513
    514	switch (stringset) {
    515	case ETH_SS_TEST:
    516		memcpy(data, *ixgbe_gstrings_test,
    517		       IXGBEVF_TEST_LEN * ETH_GSTRING_LEN);
    518		break;
    519	case ETH_SS_STATS:
    520		for (i = 0; i < IXGBEVF_GLOBAL_STATS_LEN; i++) {
    521			memcpy(p, ixgbevf_gstrings_stats[i].stat_string,
    522			       ETH_GSTRING_LEN);
    523			p += ETH_GSTRING_LEN;
    524		}
    525
    526		for (i = 0; i < adapter->num_tx_queues; i++) {
    527			sprintf(p, "tx_queue_%u_packets", i);
    528			p += ETH_GSTRING_LEN;
    529			sprintf(p, "tx_queue_%u_bytes", i);
    530			p += ETH_GSTRING_LEN;
    531		}
    532		for (i = 0; i < adapter->num_xdp_queues; i++) {
    533			sprintf(p, "xdp_queue_%u_packets", i);
    534			p += ETH_GSTRING_LEN;
    535			sprintf(p, "xdp_queue_%u_bytes", i);
    536			p += ETH_GSTRING_LEN;
    537		}
    538		for (i = 0; i < adapter->num_rx_queues; i++) {
    539			sprintf(p, "rx_queue_%u_packets", i);
    540			p += ETH_GSTRING_LEN;
    541			sprintf(p, "rx_queue_%u_bytes", i);
    542			p += ETH_GSTRING_LEN;
    543		}
    544		break;
    545	case ETH_SS_PRIV_FLAGS:
    546		memcpy(data, ixgbevf_priv_flags_strings,
    547		       IXGBEVF_PRIV_FLAGS_STR_LEN * ETH_GSTRING_LEN);
    548		break;
    549	}
    550}
    551
    552static int ixgbevf_link_test(struct ixgbevf_adapter *adapter, u64 *data)
    553{
    554	struct ixgbe_hw *hw = &adapter->hw;
    555	bool link_up;
    556	u32 link_speed = 0;
    557	*data = 0;
    558
    559	hw->mac.ops.check_link(hw, &link_speed, &link_up, true);
    560	if (!link_up)
    561		*data = 1;
    562
    563	return *data;
    564}
    565
    566/* ethtool register test data */
    567struct ixgbevf_reg_test {
    568	u16 reg;
    569	u8  array_len;
    570	u8  test_type;
    571	u32 mask;
    572	u32 write;
    573};
    574
    575/* In the hardware, registers are laid out either singly, in arrays
    576 * spaced 0x40 bytes apart, or in contiguous tables.  We assume
    577 * most tests take place on arrays or single registers (handled
    578 * as a single-element array) and special-case the tables.
    579 * Table tests are always pattern tests.
    580 *
    581 * We also make provision for some required setup steps by specifying
    582 * registers to be written without any read-back testing.
    583 */
    584
    585#define PATTERN_TEST	1
    586#define SET_READ_TEST	2
    587#define WRITE_NO_TEST	3
    588#define TABLE32_TEST	4
    589#define TABLE64_TEST_LO	5
    590#define TABLE64_TEST_HI	6
    591
    592/* default VF register test */
    593static const struct ixgbevf_reg_test reg_test_vf[] = {
    594	{ IXGBE_VFRDBAL(0), 2, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFF80 },
    595	{ IXGBE_VFRDBAH(0), 2, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
    596	{ IXGBE_VFRDLEN(0), 2, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
    597	{ IXGBE_VFRXDCTL(0), 2, WRITE_NO_TEST, 0, IXGBE_RXDCTL_ENABLE },
    598	{ IXGBE_VFRDT(0), 2, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
    599	{ IXGBE_VFRXDCTL(0), 2, WRITE_NO_TEST, 0, 0 },
    600	{ IXGBE_VFTDBAL(0), 2, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
    601	{ IXGBE_VFTDBAH(0), 2, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
    602	{ IXGBE_VFTDLEN(0), 2, PATTERN_TEST, 0x000FFF80, 0x000FFF80 },
    603	{ .reg = 0 }
    604};
    605
    606static const u32 register_test_patterns[] = {
    607	0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF
    608};
    609
    610static bool reg_pattern_test(struct ixgbevf_adapter *adapter, u64 *data,
    611			     int reg, u32 mask, u32 write)
    612{
    613	u32 pat, val, before;
    614
    615	if (IXGBE_REMOVED(adapter->hw.hw_addr)) {
    616		*data = 1;
    617		return true;
    618	}
    619	for (pat = 0; pat < ARRAY_SIZE(register_test_patterns); pat++) {
    620		before = ixgbevf_read_reg(&adapter->hw, reg);
    621		ixgbe_write_reg(&adapter->hw, reg,
    622				register_test_patterns[pat] & write);
    623		val = ixgbevf_read_reg(&adapter->hw, reg);
    624		if (val != (register_test_patterns[pat] & write & mask)) {
    625			hw_dbg(&adapter->hw,
    626			       "pattern test reg %04X failed: got 0x%08X expected 0x%08X\n",
    627			       reg, val,
    628			       register_test_patterns[pat] & write & mask);
    629			*data = reg;
    630			ixgbe_write_reg(&adapter->hw, reg, before);
    631			return true;
    632		}
    633		ixgbe_write_reg(&adapter->hw, reg, before);
    634	}
    635	return false;
    636}
    637
    638static bool reg_set_and_check(struct ixgbevf_adapter *adapter, u64 *data,
    639			      int reg, u32 mask, u32 write)
    640{
    641	u32 val, before;
    642
    643	if (IXGBE_REMOVED(adapter->hw.hw_addr)) {
    644		*data = 1;
    645		return true;
    646	}
    647	before = ixgbevf_read_reg(&adapter->hw, reg);
    648	ixgbe_write_reg(&adapter->hw, reg, write & mask);
    649	val = ixgbevf_read_reg(&adapter->hw, reg);
    650	if ((write & mask) != (val & mask)) {
    651		pr_err("set/check reg %04X test failed: got 0x%08X expected 0x%08X\n",
    652		       reg, (val & mask), write & mask);
    653		*data = reg;
    654		ixgbe_write_reg(&adapter->hw, reg, before);
    655		return true;
    656	}
    657	ixgbe_write_reg(&adapter->hw, reg, before);
    658	return false;
    659}
    660
    661static int ixgbevf_reg_test(struct ixgbevf_adapter *adapter, u64 *data)
    662{
    663	const struct ixgbevf_reg_test *test;
    664	u32 i;
    665
    666	if (IXGBE_REMOVED(adapter->hw.hw_addr)) {
    667		dev_err(&adapter->pdev->dev,
    668			"Adapter removed - register test blocked\n");
    669		*data = 1;
    670		return 1;
    671	}
    672	test = reg_test_vf;
    673
    674	/* Perform the register test, looping through the test table
    675	 * until we either fail or reach the null entry.
    676	 */
    677	while (test->reg) {
    678		for (i = 0; i < test->array_len; i++) {
    679			bool b = false;
    680
    681			switch (test->test_type) {
    682			case PATTERN_TEST:
    683				b = reg_pattern_test(adapter, data,
    684						     test->reg + (i * 0x40),
    685						     test->mask,
    686						     test->write);
    687				break;
    688			case SET_READ_TEST:
    689				b = reg_set_and_check(adapter, data,
    690						      test->reg + (i * 0x40),
    691						      test->mask,
    692						      test->write);
    693				break;
    694			case WRITE_NO_TEST:
    695				ixgbe_write_reg(&adapter->hw,
    696						test->reg + (i * 0x40),
    697						test->write);
    698				break;
    699			case TABLE32_TEST:
    700				b = reg_pattern_test(adapter, data,
    701						     test->reg + (i * 4),
    702						     test->mask,
    703						     test->write);
    704				break;
    705			case TABLE64_TEST_LO:
    706				b = reg_pattern_test(adapter, data,
    707						     test->reg + (i * 8),
    708						     test->mask,
    709						     test->write);
    710				break;
    711			case TABLE64_TEST_HI:
    712				b = reg_pattern_test(adapter, data,
    713						     test->reg + 4 + (i * 8),
    714						     test->mask,
    715						     test->write);
    716				break;
    717			}
    718			if (b)
    719				return 1;
    720		}
    721		test++;
    722	}
    723
    724	*data = 0;
    725	return *data;
    726}
    727
    728static void ixgbevf_diag_test(struct net_device *netdev,
    729			      struct ethtool_test *eth_test, u64 *data)
    730{
    731	struct ixgbevf_adapter *adapter = netdev_priv(netdev);
    732	bool if_running = netif_running(netdev);
    733
    734	if (IXGBE_REMOVED(adapter->hw.hw_addr)) {
    735		dev_err(&adapter->pdev->dev,
    736			"Adapter removed - test blocked\n");
    737		data[0] = 1;
    738		data[1] = 1;
    739		eth_test->flags |= ETH_TEST_FL_FAILED;
    740		return;
    741	}
    742	set_bit(__IXGBEVF_TESTING, &adapter->state);
    743	if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
    744		/* Offline tests */
    745
    746		hw_dbg(&adapter->hw, "offline testing starting\n");
    747
    748		/* Link test performed before hardware reset so autoneg doesn't
    749		 * interfere with test result
    750		 */
    751		if (ixgbevf_link_test(adapter, &data[1]))
    752			eth_test->flags |= ETH_TEST_FL_FAILED;
    753
    754		if (if_running)
    755			/* indicate we're in test mode */
    756			ixgbevf_close(netdev);
    757		else
    758			ixgbevf_reset(adapter);
    759
    760		hw_dbg(&adapter->hw, "register testing starting\n");
    761		if (ixgbevf_reg_test(adapter, &data[0]))
    762			eth_test->flags |= ETH_TEST_FL_FAILED;
    763
    764		ixgbevf_reset(adapter);
    765
    766		clear_bit(__IXGBEVF_TESTING, &adapter->state);
    767		if (if_running)
    768			ixgbevf_open(netdev);
    769	} else {
    770		hw_dbg(&adapter->hw, "online testing starting\n");
    771		/* Online tests */
    772		if (ixgbevf_link_test(adapter, &data[1]))
    773			eth_test->flags |= ETH_TEST_FL_FAILED;
    774
    775		/* Online tests aren't run; pass by default */
    776		data[0] = 0;
    777
    778		clear_bit(__IXGBEVF_TESTING, &adapter->state);
    779	}
    780	msleep_interruptible(4 * 1000);
    781}
    782
    783static int ixgbevf_nway_reset(struct net_device *netdev)
    784{
    785	struct ixgbevf_adapter *adapter = netdev_priv(netdev);
    786
    787	if (netif_running(netdev))
    788		ixgbevf_reinit_locked(adapter);
    789
    790	return 0;
    791}
    792
    793static int ixgbevf_get_coalesce(struct net_device *netdev,
    794				struct ethtool_coalesce *ec,
    795				struct kernel_ethtool_coalesce *kernel_coal,
    796				struct netlink_ext_ack *extack)
    797{
    798	struct ixgbevf_adapter *adapter = netdev_priv(netdev);
    799
    800	/* only valid if in constant ITR mode */
    801	if (adapter->rx_itr_setting <= 1)
    802		ec->rx_coalesce_usecs = adapter->rx_itr_setting;
    803	else
    804		ec->rx_coalesce_usecs = adapter->rx_itr_setting >> 2;
    805
    806	/* if in mixed Tx/Rx queues per vector mode, report only Rx settings */
    807	if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count)
    808		return 0;
    809
    810	/* only valid if in constant ITR mode */
    811	if (adapter->tx_itr_setting <= 1)
    812		ec->tx_coalesce_usecs = adapter->tx_itr_setting;
    813	else
    814		ec->tx_coalesce_usecs = adapter->tx_itr_setting >> 2;
    815
    816	return 0;
    817}
    818
    819static int ixgbevf_set_coalesce(struct net_device *netdev,
    820				struct ethtool_coalesce *ec,
    821				struct kernel_ethtool_coalesce *kernel_coal,
    822				struct netlink_ext_ack *extack)
    823{
    824	struct ixgbevf_adapter *adapter = netdev_priv(netdev);
    825	struct ixgbevf_q_vector *q_vector;
    826	int num_vectors, i;
    827	u16 tx_itr_param, rx_itr_param;
    828
    829	/* don't accept Tx specific changes if we've got mixed RxTx vectors */
    830	if (adapter->q_vector[0]->tx.count &&
    831	    adapter->q_vector[0]->rx.count && ec->tx_coalesce_usecs)
    832		return -EINVAL;
    833
    834	if ((ec->rx_coalesce_usecs > (IXGBE_MAX_EITR >> 2)) ||
    835	    (ec->tx_coalesce_usecs > (IXGBE_MAX_EITR >> 2)))
    836		return -EINVAL;
    837
    838	if (ec->rx_coalesce_usecs > 1)
    839		adapter->rx_itr_setting = ec->rx_coalesce_usecs << 2;
    840	else
    841		adapter->rx_itr_setting = ec->rx_coalesce_usecs;
    842
    843	if (adapter->rx_itr_setting == 1)
    844		rx_itr_param = IXGBE_20K_ITR;
    845	else
    846		rx_itr_param = adapter->rx_itr_setting;
    847
    848	if (ec->tx_coalesce_usecs > 1)
    849		adapter->tx_itr_setting = ec->tx_coalesce_usecs << 2;
    850	else
    851		adapter->tx_itr_setting = ec->tx_coalesce_usecs;
    852
    853	if (adapter->tx_itr_setting == 1)
    854		tx_itr_param = IXGBE_12K_ITR;
    855	else
    856		tx_itr_param = adapter->tx_itr_setting;
    857
    858	num_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
    859
    860	for (i = 0; i < num_vectors; i++) {
    861		q_vector = adapter->q_vector[i];
    862		if (q_vector->tx.count && !q_vector->rx.count)
    863			/* Tx only */
    864			q_vector->itr = tx_itr_param;
    865		else
    866			/* Rx only or mixed */
    867			q_vector->itr = rx_itr_param;
    868		ixgbevf_write_eitr(q_vector);
    869	}
    870
    871	return 0;
    872}
    873
    874static int ixgbevf_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
    875			     u32 *rules __always_unused)
    876{
    877	struct ixgbevf_adapter *adapter = netdev_priv(dev);
    878
    879	switch (info->cmd) {
    880	case ETHTOOL_GRXRINGS:
    881		info->data = adapter->num_rx_queues;
    882		return 0;
    883	default:
    884		hw_dbg(&adapter->hw, "Command parameters not supported\n");
    885		return -EOPNOTSUPP;
    886	}
    887}
    888
    889static u32 ixgbevf_get_rxfh_indir_size(struct net_device *netdev)
    890{
    891	struct ixgbevf_adapter *adapter = netdev_priv(netdev);
    892
    893	if (adapter->hw.mac.type >= ixgbe_mac_X550_vf)
    894		return IXGBEVF_X550_VFRETA_SIZE;
    895
    896	return IXGBEVF_82599_RETA_SIZE;
    897}
    898
    899static u32 ixgbevf_get_rxfh_key_size(struct net_device *netdev)
    900{
    901	return IXGBEVF_RSS_HASH_KEY_SIZE;
    902}
    903
    904static int ixgbevf_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
    905			    u8 *hfunc)
    906{
    907	struct ixgbevf_adapter *adapter = netdev_priv(netdev);
    908	int err = 0;
    909
    910	if (hfunc)
    911		*hfunc = ETH_RSS_HASH_TOP;
    912
    913	if (adapter->hw.mac.type >= ixgbe_mac_X550_vf) {
    914		if (key)
    915			memcpy(key, adapter->rss_key,
    916			       ixgbevf_get_rxfh_key_size(netdev));
    917
    918		if (indir) {
    919			int i;
    920
    921			for (i = 0; i < IXGBEVF_X550_VFRETA_SIZE; i++)
    922				indir[i] = adapter->rss_indir_tbl[i];
    923		}
    924	} else {
    925		/* If neither indirection table nor hash key was requested
    926		 *  - just return a success avoiding taking any locks.
    927		 */
    928		if (!indir && !key)
    929			return 0;
    930
    931		spin_lock_bh(&adapter->mbx_lock);
    932		if (indir)
    933			err = ixgbevf_get_reta_locked(&adapter->hw, indir,
    934						      adapter->num_rx_queues);
    935
    936		if (!err && key)
    937			err = ixgbevf_get_rss_key_locked(&adapter->hw, key);
    938
    939		spin_unlock_bh(&adapter->mbx_lock);
    940	}
    941
    942	return err;
    943}
    944
    945static u32 ixgbevf_get_priv_flags(struct net_device *netdev)
    946{
    947	struct ixgbevf_adapter *adapter = netdev_priv(netdev);
    948	u32 priv_flags = 0;
    949
    950	if (adapter->flags & IXGBEVF_FLAGS_LEGACY_RX)
    951		priv_flags |= IXGBEVF_PRIV_FLAGS_LEGACY_RX;
    952
    953	return priv_flags;
    954}
    955
    956static int ixgbevf_set_priv_flags(struct net_device *netdev, u32 priv_flags)
    957{
    958	struct ixgbevf_adapter *adapter = netdev_priv(netdev);
    959	unsigned int flags = adapter->flags;
    960
    961	flags &= ~IXGBEVF_FLAGS_LEGACY_RX;
    962	if (priv_flags & IXGBEVF_PRIV_FLAGS_LEGACY_RX)
    963		flags |= IXGBEVF_FLAGS_LEGACY_RX;
    964
    965	if (flags != adapter->flags) {
    966		adapter->flags = flags;
    967
    968		/* reset interface to repopulate queues */
    969		if (netif_running(netdev))
    970			ixgbevf_reinit_locked(adapter);
    971	}
    972
    973	return 0;
    974}
    975
    976static const struct ethtool_ops ixgbevf_ethtool_ops = {
    977	.supported_coalesce_params = ETHTOOL_COALESCE_USECS,
    978	.get_drvinfo		= ixgbevf_get_drvinfo,
    979	.get_regs_len		= ixgbevf_get_regs_len,
    980	.get_regs		= ixgbevf_get_regs,
    981	.nway_reset		= ixgbevf_nway_reset,
    982	.get_link		= ethtool_op_get_link,
    983	.get_ringparam		= ixgbevf_get_ringparam,
    984	.set_ringparam		= ixgbevf_set_ringparam,
    985	.get_msglevel		= ixgbevf_get_msglevel,
    986	.set_msglevel		= ixgbevf_set_msglevel,
    987	.self_test		= ixgbevf_diag_test,
    988	.get_sset_count		= ixgbevf_get_sset_count,
    989	.get_strings		= ixgbevf_get_strings,
    990	.get_ethtool_stats	= ixgbevf_get_ethtool_stats,
    991	.get_coalesce		= ixgbevf_get_coalesce,
    992	.set_coalesce		= ixgbevf_set_coalesce,
    993	.get_rxnfc		= ixgbevf_get_rxnfc,
    994	.get_rxfh_indir_size	= ixgbevf_get_rxfh_indir_size,
    995	.get_rxfh_key_size	= ixgbevf_get_rxfh_key_size,
    996	.get_rxfh		= ixgbevf_get_rxfh,
    997	.get_link_ksettings	= ixgbevf_get_link_ksettings,
    998	.get_priv_flags		= ixgbevf_get_priv_flags,
    999	.set_priv_flags		= ixgbevf_set_priv_flags,
   1000};
   1001
   1002void ixgbevf_set_ethtool_ops(struct net_device *netdev)
   1003{
   1004	netdev->ethtool_ops = &ixgbevf_ethtool_ops;
   1005}