cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

nicvf_ethtool.c (23734B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/*
      3 * Copyright (C) 2015 Cavium, Inc.
      4 */
      5
      6/* ETHTOOL Support for VNIC_VF Device*/
      7
      8#include <linux/ethtool.h>
      9#include <linux/pci.h>
     10#include <linux/net_tstamp.h>
     11
     12#include "nic_reg.h"
     13#include "nic.h"
     14#include "nicvf_queues.h"
     15#include "q_struct.h"
     16#include "thunder_bgx.h"
     17#include "../common/cavium_ptp.h"
     18
     19#define DRV_NAME	"nicvf"
     20
     21struct nicvf_stat {
     22	char name[ETH_GSTRING_LEN];
     23	unsigned int index;
     24};
     25
     26#define NICVF_HW_STAT(stat) { \
     27	.name = #stat, \
     28	.index = offsetof(struct nicvf_hw_stats, stat) / sizeof(u64), \
     29}
     30
     31#define NICVF_DRV_STAT(stat) { \
     32	.name = #stat, \
     33	.index = offsetof(struct nicvf_drv_stats, stat) / sizeof(u64), \
     34}
     35
     36static const struct nicvf_stat nicvf_hw_stats[] = {
     37	NICVF_HW_STAT(rx_bytes),
     38	NICVF_HW_STAT(rx_frames),
     39	NICVF_HW_STAT(rx_ucast_frames),
     40	NICVF_HW_STAT(rx_bcast_frames),
     41	NICVF_HW_STAT(rx_mcast_frames),
     42	NICVF_HW_STAT(rx_drops),
     43	NICVF_HW_STAT(rx_drop_red),
     44	NICVF_HW_STAT(rx_drop_red_bytes),
     45	NICVF_HW_STAT(rx_drop_overrun),
     46	NICVF_HW_STAT(rx_drop_overrun_bytes),
     47	NICVF_HW_STAT(rx_drop_bcast),
     48	NICVF_HW_STAT(rx_drop_mcast),
     49	NICVF_HW_STAT(rx_drop_l3_bcast),
     50	NICVF_HW_STAT(rx_drop_l3_mcast),
     51	NICVF_HW_STAT(rx_fcs_errors),
     52	NICVF_HW_STAT(rx_l2_errors),
     53	NICVF_HW_STAT(tx_bytes),
     54	NICVF_HW_STAT(tx_frames),
     55	NICVF_HW_STAT(tx_ucast_frames),
     56	NICVF_HW_STAT(tx_bcast_frames),
     57	NICVF_HW_STAT(tx_mcast_frames),
     58	NICVF_HW_STAT(tx_drops),
     59};
     60
     61static const struct nicvf_stat nicvf_drv_stats[] = {
     62	NICVF_DRV_STAT(rx_bgx_truncated_pkts),
     63	NICVF_DRV_STAT(rx_jabber_errs),
     64	NICVF_DRV_STAT(rx_fcs_errs),
     65	NICVF_DRV_STAT(rx_bgx_errs),
     66	NICVF_DRV_STAT(rx_prel2_errs),
     67	NICVF_DRV_STAT(rx_l2_hdr_malformed),
     68	NICVF_DRV_STAT(rx_oversize),
     69	NICVF_DRV_STAT(rx_undersize),
     70	NICVF_DRV_STAT(rx_l2_len_mismatch),
     71	NICVF_DRV_STAT(rx_l2_pclp),
     72	NICVF_DRV_STAT(rx_ip_ver_errs),
     73	NICVF_DRV_STAT(rx_ip_csum_errs),
     74	NICVF_DRV_STAT(rx_ip_hdr_malformed),
     75	NICVF_DRV_STAT(rx_ip_payload_malformed),
     76	NICVF_DRV_STAT(rx_ip_ttl_errs),
     77	NICVF_DRV_STAT(rx_l3_pclp),
     78	NICVF_DRV_STAT(rx_l4_malformed),
     79	NICVF_DRV_STAT(rx_l4_csum_errs),
     80	NICVF_DRV_STAT(rx_udp_len_errs),
     81	NICVF_DRV_STAT(rx_l4_port_errs),
     82	NICVF_DRV_STAT(rx_tcp_flag_errs),
     83	NICVF_DRV_STAT(rx_tcp_offset_errs),
     84	NICVF_DRV_STAT(rx_l4_pclp),
     85	NICVF_DRV_STAT(rx_truncated_pkts),
     86
     87	NICVF_DRV_STAT(tx_desc_fault),
     88	NICVF_DRV_STAT(tx_hdr_cons_err),
     89	NICVF_DRV_STAT(tx_subdesc_err),
     90	NICVF_DRV_STAT(tx_max_size_exceeded),
     91	NICVF_DRV_STAT(tx_imm_size_oflow),
     92	NICVF_DRV_STAT(tx_data_seq_err),
     93	NICVF_DRV_STAT(tx_mem_seq_err),
     94	NICVF_DRV_STAT(tx_lock_viol),
     95	NICVF_DRV_STAT(tx_data_fault),
     96	NICVF_DRV_STAT(tx_tstmp_conflict),
     97	NICVF_DRV_STAT(tx_tstmp_timeout),
     98	NICVF_DRV_STAT(tx_mem_fault),
     99	NICVF_DRV_STAT(tx_csum_overlap),
    100	NICVF_DRV_STAT(tx_csum_overflow),
    101
    102	NICVF_DRV_STAT(tx_tso),
    103	NICVF_DRV_STAT(tx_timeout),
    104	NICVF_DRV_STAT(txq_stop),
    105	NICVF_DRV_STAT(txq_wake),
    106	NICVF_DRV_STAT(rcv_buffer_alloc_failures),
    107	NICVF_DRV_STAT(page_alloc),
    108};
    109
    110static const struct nicvf_stat nicvf_queue_stats[] = {
    111	{ "bytes", 0 },
    112	{ "frames", 1 },
    113};
    114
    115static const unsigned int nicvf_n_hw_stats = ARRAY_SIZE(nicvf_hw_stats);
    116static const unsigned int nicvf_n_drv_stats = ARRAY_SIZE(nicvf_drv_stats);
    117static const unsigned int nicvf_n_queue_stats = ARRAY_SIZE(nicvf_queue_stats);
    118
    119static int nicvf_get_link_ksettings(struct net_device *netdev,
    120				    struct ethtool_link_ksettings *cmd)
    121{
    122	struct nicvf *nic = netdev_priv(netdev);
    123	u32 supported, advertising;
    124
    125	supported = 0;
    126	advertising = 0;
    127
    128	if (!nic->link_up) {
    129		cmd->base.duplex = DUPLEX_UNKNOWN;
    130		cmd->base.speed = SPEED_UNKNOWN;
    131		return 0;
    132	}
    133
    134	switch (nic->speed) {
    135	case SPEED_1000:
    136		cmd->base.port = PORT_MII | PORT_TP;
    137		cmd->base.autoneg = AUTONEG_ENABLE;
    138		supported |= SUPPORTED_MII | SUPPORTED_TP;
    139		supported |= SUPPORTED_1000baseT_Full |
    140				  SUPPORTED_1000baseT_Half |
    141				  SUPPORTED_100baseT_Full  |
    142				  SUPPORTED_100baseT_Half  |
    143				  SUPPORTED_10baseT_Full   |
    144				  SUPPORTED_10baseT_Half;
    145		supported |= SUPPORTED_Autoneg;
    146		advertising |= ADVERTISED_1000baseT_Full |
    147				    ADVERTISED_1000baseT_Half |
    148				    ADVERTISED_100baseT_Full  |
    149				    ADVERTISED_100baseT_Half  |
    150				    ADVERTISED_10baseT_Full   |
    151				    ADVERTISED_10baseT_Half;
    152		break;
    153	case SPEED_10000:
    154		if (nic->mac_type == BGX_MODE_RXAUI) {
    155			cmd->base.port = PORT_TP;
    156			supported |= SUPPORTED_TP;
    157		} else {
    158			cmd->base.port = PORT_FIBRE;
    159			supported |= SUPPORTED_FIBRE;
    160		}
    161		cmd->base.autoneg = AUTONEG_DISABLE;
    162		supported |= SUPPORTED_10000baseT_Full;
    163		break;
    164	case SPEED_40000:
    165		cmd->base.port = PORT_FIBRE;
    166		cmd->base.autoneg = AUTONEG_DISABLE;
    167		supported |= SUPPORTED_FIBRE;
    168		supported |= SUPPORTED_40000baseCR4_Full;
    169		break;
    170	}
    171	cmd->base.duplex = nic->duplex;
    172	cmd->base.speed = nic->speed;
    173
    174	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
    175						supported);
    176	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
    177						advertising);
    178
    179	return 0;
    180}
    181
    182static u32 nicvf_get_link(struct net_device *netdev)
    183{
    184	struct nicvf *nic = netdev_priv(netdev);
    185
    186	return nic->link_up;
    187}
    188
    189static void nicvf_get_drvinfo(struct net_device *netdev,
    190			      struct ethtool_drvinfo *info)
    191{
    192	struct nicvf *nic = netdev_priv(netdev);
    193
    194	strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
    195	strlcpy(info->bus_info, pci_name(nic->pdev), sizeof(info->bus_info));
    196}
    197
    198static u32 nicvf_get_msglevel(struct net_device *netdev)
    199{
    200	struct nicvf *nic = netdev_priv(netdev);
    201
    202	return nic->msg_enable;
    203}
    204
    205static void nicvf_set_msglevel(struct net_device *netdev, u32 lvl)
    206{
    207	struct nicvf *nic = netdev_priv(netdev);
    208
    209	nic->msg_enable = lvl;
    210}
    211
    212static void nicvf_get_qset_strings(struct nicvf *nic, u8 **data, int qset)
    213{
    214	int stats, qidx;
    215	int start_qidx = qset * MAX_RCV_QUEUES_PER_QS;
    216
    217	for (qidx = 0; qidx < nic->qs->rq_cnt; qidx++) {
    218		for (stats = 0; stats < nicvf_n_queue_stats; stats++) {
    219			sprintf(*data, "rxq%d: %s", qidx + start_qidx,
    220				nicvf_queue_stats[stats].name);
    221			*data += ETH_GSTRING_LEN;
    222		}
    223	}
    224
    225	for (qidx = 0; qidx < nic->qs->sq_cnt; qidx++) {
    226		for (stats = 0; stats < nicvf_n_queue_stats; stats++) {
    227			sprintf(*data, "txq%d: %s", qidx + start_qidx,
    228				nicvf_queue_stats[stats].name);
    229			*data += ETH_GSTRING_LEN;
    230		}
    231	}
    232}
    233
    234static void nicvf_get_strings(struct net_device *netdev, u32 sset, u8 *data)
    235{
    236	struct nicvf *nic = netdev_priv(netdev);
    237	int stats;
    238	int sqs;
    239
    240	if (sset != ETH_SS_STATS)
    241		return;
    242
    243	for (stats = 0; stats < nicvf_n_hw_stats; stats++) {
    244		memcpy(data, nicvf_hw_stats[stats].name, ETH_GSTRING_LEN);
    245		data += ETH_GSTRING_LEN;
    246	}
    247
    248	for (stats = 0; stats < nicvf_n_drv_stats; stats++) {
    249		memcpy(data, nicvf_drv_stats[stats].name, ETH_GSTRING_LEN);
    250		data += ETH_GSTRING_LEN;
    251	}
    252
    253	nicvf_get_qset_strings(nic, &data, 0);
    254
    255	for (sqs = 0; sqs < nic->sqs_count; sqs++) {
    256		if (!nic->snicvf[sqs])
    257			continue;
    258		nicvf_get_qset_strings(nic->snicvf[sqs], &data, sqs + 1);
    259	}
    260
    261	for (stats = 0; stats < BGX_RX_STATS_COUNT; stats++) {
    262		sprintf(data, "bgx_rxstat%d: ", stats);
    263		data += ETH_GSTRING_LEN;
    264	}
    265
    266	for (stats = 0; stats < BGX_TX_STATS_COUNT; stats++) {
    267		sprintf(data, "bgx_txstat%d: ", stats);
    268		data += ETH_GSTRING_LEN;
    269	}
    270}
    271
    272static int nicvf_get_sset_count(struct net_device *netdev, int sset)
    273{
    274	struct nicvf *nic = netdev_priv(netdev);
    275	int qstats_count;
    276	int sqs;
    277
    278	if (sset != ETH_SS_STATS)
    279		return -EINVAL;
    280
    281	qstats_count = nicvf_n_queue_stats *
    282		       (nic->qs->rq_cnt + nic->qs->sq_cnt);
    283	for (sqs = 0; sqs < nic->sqs_count; sqs++) {
    284		struct nicvf *snic;
    285
    286		snic = nic->snicvf[sqs];
    287		if (!snic)
    288			continue;
    289		qstats_count += nicvf_n_queue_stats *
    290				(snic->qs->rq_cnt + snic->qs->sq_cnt);
    291	}
    292
    293	return nicvf_n_hw_stats + nicvf_n_drv_stats +
    294		qstats_count +
    295		BGX_RX_STATS_COUNT + BGX_TX_STATS_COUNT;
    296}
    297
    298static void nicvf_get_qset_stats(struct nicvf *nic,
    299				 struct ethtool_stats *stats, u64 **data)
    300{
    301	int stat, qidx;
    302
    303	if (!nic)
    304		return;
    305
    306	for (qidx = 0; qidx < nic->qs->rq_cnt; qidx++) {
    307		nicvf_update_rq_stats(nic, qidx);
    308		for (stat = 0; stat < nicvf_n_queue_stats; stat++)
    309			*((*data)++) = ((u64 *)&nic->qs->rq[qidx].stats)
    310					[nicvf_queue_stats[stat].index];
    311	}
    312
    313	for (qidx = 0; qidx < nic->qs->sq_cnt; qidx++) {
    314		nicvf_update_sq_stats(nic, qidx);
    315		for (stat = 0; stat < nicvf_n_queue_stats; stat++)
    316			*((*data)++) = ((u64 *)&nic->qs->sq[qidx].stats)
    317					[nicvf_queue_stats[stat].index];
    318	}
    319}
    320
    321static void nicvf_get_ethtool_stats(struct net_device *netdev,
    322				    struct ethtool_stats *stats, u64 *data)
    323{
    324	struct nicvf *nic = netdev_priv(netdev);
    325	int stat, tmp_stats;
    326	int sqs, cpu;
    327
    328	nicvf_update_stats(nic);
    329
    330	/* Update LMAC stats */
    331	nicvf_update_lmac_stats(nic);
    332
    333	for (stat = 0; stat < nicvf_n_hw_stats; stat++)
    334		*(data++) = ((u64 *)&nic->hw_stats)
    335				[nicvf_hw_stats[stat].index];
    336	for (stat = 0; stat < nicvf_n_drv_stats; stat++) {
    337		tmp_stats = 0;
    338		for_each_possible_cpu(cpu)
    339			tmp_stats += ((u64 *)per_cpu_ptr(nic->drv_stats, cpu))
    340				     [nicvf_drv_stats[stat].index];
    341		*(data++) = tmp_stats;
    342	}
    343
    344	nicvf_get_qset_stats(nic, stats, &data);
    345
    346	for (sqs = 0; sqs < nic->sqs_count; sqs++) {
    347		if (!nic->snicvf[sqs])
    348			continue;
    349		nicvf_get_qset_stats(nic->snicvf[sqs], stats, &data);
    350	}
    351
    352	for (stat = 0; stat < BGX_RX_STATS_COUNT; stat++)
    353		*(data++) = nic->bgx_stats.rx_stats[stat];
    354	for (stat = 0; stat < BGX_TX_STATS_COUNT; stat++)
    355		*(data++) = nic->bgx_stats.tx_stats[stat];
    356}
    357
    358static int nicvf_get_regs_len(struct net_device *dev)
    359{
    360	return sizeof(u64) * NIC_VF_REG_COUNT;
    361}
    362
    363static void nicvf_get_regs(struct net_device *dev,
    364			   struct ethtool_regs *regs, void *reg)
    365{
    366	struct nicvf *nic = netdev_priv(dev);
    367	u64 *p = (u64 *)reg;
    368	u64 reg_offset;
    369	int mbox, key, stat, q;
    370	int i = 0;
    371
    372	regs->version = 0;
    373	memset(p, 0, NIC_VF_REG_COUNT);
    374
    375	p[i++] = nicvf_reg_read(nic, NIC_VNIC_CFG);
    376	/* Mailbox registers */
    377	for (mbox = 0; mbox < NIC_PF_VF_MAILBOX_SIZE; mbox++)
    378		p[i++] = nicvf_reg_read(nic,
    379					NIC_VF_PF_MAILBOX_0_1 | (mbox << 3));
    380
    381	p[i++] = nicvf_reg_read(nic, NIC_VF_INT);
    382	p[i++] = nicvf_reg_read(nic, NIC_VF_INT_W1S);
    383	p[i++] = nicvf_reg_read(nic, NIC_VF_ENA_W1C);
    384	p[i++] = nicvf_reg_read(nic, NIC_VF_ENA_W1S);
    385	p[i++] = nicvf_reg_read(nic, NIC_VNIC_RSS_CFG);
    386
    387	for (key = 0; key < RSS_HASH_KEY_SIZE; key++)
    388		p[i++] = nicvf_reg_read(nic, NIC_VNIC_RSS_KEY_0_4 | (key << 3));
    389
    390	/* Tx/Rx statistics */
    391	for (stat = 0; stat < TX_STATS_ENUM_LAST; stat++)
    392		p[i++] = nicvf_reg_read(nic,
    393					NIC_VNIC_TX_STAT_0_4 | (stat << 3));
    394
    395	for (i = 0; i < RX_STATS_ENUM_LAST; i++)
    396		p[i++] = nicvf_reg_read(nic,
    397					NIC_VNIC_RX_STAT_0_13 | (stat << 3));
    398
    399	p[i++] = nicvf_reg_read(nic, NIC_QSET_RQ_GEN_CFG);
    400
    401	/* All completion queue's registers */
    402	for (q = 0; q < MAX_CMP_QUEUES_PER_QS; q++) {
    403		p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_CFG, q);
    404		p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_CFG2, q);
    405		p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_THRESH, q);
    406		p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_BASE, q);
    407		p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_HEAD, q);
    408		p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_TAIL, q);
    409		p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_DOOR, q);
    410		p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_STATUS, q);
    411		p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_STATUS2, q);
    412		p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_DEBUG, q);
    413	}
    414
    415	/* All receive queue's registers */
    416	for (q = 0; q < MAX_RCV_QUEUES_PER_QS; q++) {
    417		p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_RQ_0_7_CFG, q);
    418		p[i++] = nicvf_queue_reg_read(nic,
    419						  NIC_QSET_RQ_0_7_STAT_0_1, q);
    420		reg_offset = NIC_QSET_RQ_0_7_STAT_0_1 | (1 << 3);
    421		p[i++] = nicvf_queue_reg_read(nic, reg_offset, q);
    422	}
    423
    424	for (q = 0; q < MAX_SND_QUEUES_PER_QS; q++) {
    425		p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_CFG, q);
    426		p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_THRESH, q);
    427		p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_BASE, q);
    428		p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_HEAD, q);
    429		p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_TAIL, q);
    430		p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_DOOR, q);
    431		p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_STATUS, q);
    432		p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_DEBUG, q);
    433		/* Padding, was NIC_QSET_SQ_0_7_CNM_CHG, which
    434		 * produces bus errors when read
    435		 */
    436		p[i++] = 0;
    437		p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_STAT_0_1, q);
    438		reg_offset = NIC_QSET_SQ_0_7_STAT_0_1 | (1 << 3);
    439		p[i++] = nicvf_queue_reg_read(nic, reg_offset, q);
    440	}
    441
    442	for (q = 0; q < MAX_RCV_BUF_DESC_RINGS_PER_QS; q++) {
    443		p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_CFG, q);
    444		p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_THRESH, q);
    445		p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_BASE, q);
    446		p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_HEAD, q);
    447		p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_TAIL, q);
    448		p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_DOOR, q);
    449		p[i++] = nicvf_queue_reg_read(nic,
    450					      NIC_QSET_RBDR_0_1_STATUS0, q);
    451		p[i++] = nicvf_queue_reg_read(nic,
    452					      NIC_QSET_RBDR_0_1_STATUS1, q);
    453		reg_offset = NIC_QSET_RBDR_0_1_PREFETCH_STATUS;
    454		p[i++] = nicvf_queue_reg_read(nic, reg_offset, q);
    455	}
    456}
    457
    458static int nicvf_get_coalesce(struct net_device *netdev,
    459			      struct ethtool_coalesce *cmd,
    460			      struct kernel_ethtool_coalesce *kernel_coal,
    461			      struct netlink_ext_ack *extack)
    462{
    463	struct nicvf *nic = netdev_priv(netdev);
    464
    465	cmd->rx_coalesce_usecs = nic->cq_coalesce_usecs;
    466	return 0;
    467}
    468
    469static void nicvf_get_ringparam(struct net_device *netdev,
    470				struct ethtool_ringparam *ring,
    471				struct kernel_ethtool_ringparam *kernel_ring,
    472				struct netlink_ext_ack *extack)
    473{
    474	struct nicvf *nic = netdev_priv(netdev);
    475	struct queue_set *qs = nic->qs;
    476
    477	ring->rx_max_pending = MAX_CMP_QUEUE_LEN;
    478	ring->rx_pending = qs->cq_len;
    479	ring->tx_max_pending = MAX_SND_QUEUE_LEN;
    480	ring->tx_pending = qs->sq_len;
    481}
    482
    483static int nicvf_set_ringparam(struct net_device *netdev,
    484			       struct ethtool_ringparam *ring,
    485			       struct kernel_ethtool_ringparam *kernel_ring,
    486			       struct netlink_ext_ack *extack)
    487{
    488	struct nicvf *nic = netdev_priv(netdev);
    489	struct queue_set *qs = nic->qs;
    490	u32 rx_count, tx_count;
    491
    492	/* Due to HW errata this is not supported on T88 pass 1.x silicon */
    493	if (pass1_silicon(nic->pdev))
    494		return -EINVAL;
    495
    496	if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
    497		return -EINVAL;
    498
    499	tx_count = clamp_t(u32, ring->tx_pending,
    500			   MIN_SND_QUEUE_LEN, MAX_SND_QUEUE_LEN);
    501	rx_count = clamp_t(u32, ring->rx_pending,
    502			   MIN_CMP_QUEUE_LEN, MAX_CMP_QUEUE_LEN);
    503
    504	if ((tx_count == qs->sq_len) && (rx_count == qs->cq_len))
    505		return 0;
    506
    507	/* Permitted lengths are 1K, 2K, 4K, 8K, 16K, 32K, 64K */
    508	qs->sq_len = rounddown_pow_of_two(tx_count);
    509	qs->cq_len = rounddown_pow_of_two(rx_count);
    510
    511	if (netif_running(netdev)) {
    512		nicvf_stop(netdev);
    513		nicvf_open(netdev);
    514	}
    515
    516	return 0;
    517}
    518
    519static int nicvf_get_rss_hash_opts(struct nicvf *nic,
    520				   struct ethtool_rxnfc *info)
    521{
    522	info->data = 0;
    523
    524	switch (info->flow_type) {
    525	case TCP_V4_FLOW:
    526	case TCP_V6_FLOW:
    527	case UDP_V4_FLOW:
    528	case UDP_V6_FLOW:
    529	case SCTP_V4_FLOW:
    530	case SCTP_V6_FLOW:
    531		info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
    532		fallthrough;
    533	case IPV4_FLOW:
    534	case IPV6_FLOW:
    535		info->data |= RXH_IP_SRC | RXH_IP_DST;
    536		break;
    537	default:
    538		return -EINVAL;
    539	}
    540
    541	return 0;
    542}
    543
    544static int nicvf_get_rxnfc(struct net_device *dev,
    545			   struct ethtool_rxnfc *info, u32 *rules)
    546{
    547	struct nicvf *nic = netdev_priv(dev);
    548	int ret = -EOPNOTSUPP;
    549
    550	switch (info->cmd) {
    551	case ETHTOOL_GRXRINGS:
    552		info->data = nic->rx_queues;
    553		ret = 0;
    554		break;
    555	case ETHTOOL_GRXFH:
    556		return nicvf_get_rss_hash_opts(nic, info);
    557	default:
    558		break;
    559	}
    560	return ret;
    561}
    562
    563static int nicvf_set_rss_hash_opts(struct nicvf *nic,
    564				   struct ethtool_rxnfc *info)
    565{
    566	struct nicvf_rss_info *rss = &nic->rss_info;
    567	u64 rss_cfg = nicvf_reg_read(nic, NIC_VNIC_RSS_CFG);
    568
    569	if (!rss->enable)
    570		netdev_err(nic->netdev,
    571			   "RSS is disabled, hash cannot be set\n");
    572
    573	netdev_info(nic->netdev, "Set RSS flow type = %d, data = %lld\n",
    574		    info->flow_type, info->data);
    575
    576	if (!(info->data & RXH_IP_SRC) || !(info->data & RXH_IP_DST))
    577		return -EINVAL;
    578
    579	switch (info->flow_type) {
    580	case TCP_V4_FLOW:
    581	case TCP_V6_FLOW:
    582		switch (info->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
    583		case 0:
    584			rss_cfg &= ~(1ULL << RSS_HASH_TCP);
    585			break;
    586		case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
    587			rss_cfg |= (1ULL << RSS_HASH_TCP);
    588			break;
    589		default:
    590			return -EINVAL;
    591		}
    592		break;
    593	case UDP_V4_FLOW:
    594	case UDP_V6_FLOW:
    595		switch (info->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
    596		case 0:
    597			rss_cfg &= ~(1ULL << RSS_HASH_UDP);
    598			break;
    599		case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
    600			rss_cfg |= (1ULL << RSS_HASH_UDP);
    601			break;
    602		default:
    603			return -EINVAL;
    604		}
    605		break;
    606	case SCTP_V4_FLOW:
    607	case SCTP_V6_FLOW:
    608		switch (info->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
    609		case 0:
    610			rss_cfg &= ~(1ULL << RSS_HASH_L4ETC);
    611			break;
    612		case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
    613			rss_cfg |= (1ULL << RSS_HASH_L4ETC);
    614			break;
    615		default:
    616			return -EINVAL;
    617		}
    618		break;
    619	case IPV4_FLOW:
    620	case IPV6_FLOW:
    621		rss_cfg = RSS_HASH_IP;
    622		break;
    623	default:
    624		return -EINVAL;
    625	}
    626
    627	nicvf_reg_write(nic, NIC_VNIC_RSS_CFG, rss_cfg);
    628	return 0;
    629}
    630
    631static int nicvf_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info)
    632{
    633	struct nicvf *nic = netdev_priv(dev);
    634
    635	switch (info->cmd) {
    636	case ETHTOOL_SRXFH:
    637		return nicvf_set_rss_hash_opts(nic, info);
    638	default:
    639		break;
    640	}
    641	return -EOPNOTSUPP;
    642}
    643
    644static u32 nicvf_get_rxfh_key_size(struct net_device *netdev)
    645{
    646	return RSS_HASH_KEY_SIZE * sizeof(u64);
    647}
    648
    649static u32 nicvf_get_rxfh_indir_size(struct net_device *dev)
    650{
    651	struct nicvf *nic = netdev_priv(dev);
    652
    653	return nic->rss_info.rss_size;
    654}
    655
    656static int nicvf_get_rxfh(struct net_device *dev, u32 *indir, u8 *hkey,
    657			  u8 *hfunc)
    658{
    659	struct nicvf *nic = netdev_priv(dev);
    660	struct nicvf_rss_info *rss = &nic->rss_info;
    661	int idx;
    662
    663	if (indir) {
    664		for (idx = 0; idx < rss->rss_size; idx++)
    665			indir[idx] = rss->ind_tbl[idx];
    666	}
    667
    668	if (hkey)
    669		memcpy(hkey, rss->key, RSS_HASH_KEY_SIZE * sizeof(u64));
    670
    671	if (hfunc)
    672		*hfunc = ETH_RSS_HASH_TOP;
    673
    674	return 0;
    675}
    676
    677static int nicvf_set_rxfh(struct net_device *dev, const u32 *indir,
    678			  const u8 *hkey, const u8 hfunc)
    679{
    680	struct nicvf *nic = netdev_priv(dev);
    681	struct nicvf_rss_info *rss = &nic->rss_info;
    682	int idx;
    683
    684	if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
    685		return -EOPNOTSUPP;
    686
    687	if (!rss->enable) {
    688		netdev_err(nic->netdev,
    689			   "RSS is disabled, cannot change settings\n");
    690		return -EIO;
    691	}
    692
    693	if (indir) {
    694		for (idx = 0; idx < rss->rss_size; idx++)
    695			rss->ind_tbl[idx] = indir[idx];
    696	}
    697
    698	if (hkey) {
    699		memcpy(rss->key, hkey, RSS_HASH_KEY_SIZE * sizeof(u64));
    700		nicvf_set_rss_key(nic);
    701	}
    702
    703	nicvf_config_rss(nic);
    704	return 0;
    705}
    706
    707/* Get no of queues device supports and current queue count */
    708static void nicvf_get_channels(struct net_device *dev,
    709			       struct ethtool_channels *channel)
    710{
    711	struct nicvf *nic = netdev_priv(dev);
    712
    713	memset(channel, 0, sizeof(*channel));
    714
    715	channel->max_rx = nic->max_queues;
    716	channel->max_tx = nic->max_queues;
    717
    718	channel->rx_count = nic->rx_queues;
    719	channel->tx_count = nic->tx_queues;
    720}
    721
    722/* Set no of Tx, Rx queues to be used */
    723static int nicvf_set_channels(struct net_device *dev,
    724			      struct ethtool_channels *channel)
    725{
    726	struct nicvf *nic = netdev_priv(dev);
    727	int err = 0;
    728	bool if_up = netif_running(dev);
    729	u8 cqcount, txq_count;
    730
    731	if (!channel->rx_count || !channel->tx_count)
    732		return -EINVAL;
    733	if (channel->rx_count > nic->max_queues)
    734		return -EINVAL;
    735	if (channel->tx_count > nic->max_queues)
    736		return -EINVAL;
    737
    738	if (nic->xdp_prog &&
    739	    ((channel->tx_count + channel->rx_count) > nic->max_queues)) {
    740		netdev_err(nic->netdev,
    741			   "XDP mode, RXQs + TXQs > Max %d\n",
    742			   nic->max_queues);
    743		return -EINVAL;
    744	}
    745
    746	if (if_up)
    747		nicvf_stop(dev);
    748
    749	nic->rx_queues = channel->rx_count;
    750	nic->tx_queues = channel->tx_count;
    751	if (!nic->xdp_prog)
    752		nic->xdp_tx_queues = 0;
    753	else
    754		nic->xdp_tx_queues = channel->rx_count;
    755
    756	txq_count = nic->xdp_tx_queues + nic->tx_queues;
    757	cqcount = max(nic->rx_queues, txq_count);
    758
    759	if (cqcount > MAX_CMP_QUEUES_PER_QS) {
    760		nic->sqs_count = roundup(cqcount, MAX_CMP_QUEUES_PER_QS);
    761		nic->sqs_count = (nic->sqs_count / MAX_CMP_QUEUES_PER_QS) - 1;
    762	} else {
    763		nic->sqs_count = 0;
    764	}
    765
    766	nic->qs->rq_cnt = min_t(u8, nic->rx_queues, MAX_RCV_QUEUES_PER_QS);
    767	nic->qs->sq_cnt = min_t(u8, txq_count, MAX_SND_QUEUES_PER_QS);
    768	nic->qs->cq_cnt = max(nic->qs->rq_cnt, nic->qs->sq_cnt);
    769
    770	err = nicvf_set_real_num_queues(dev, nic->tx_queues, nic->rx_queues);
    771	if (err)
    772		return err;
    773
    774	if (if_up)
    775		nicvf_open(dev);
    776
    777	netdev_info(dev, "Setting num Tx rings to %d, Rx rings to %d success\n",
    778		    nic->tx_queues, nic->rx_queues);
    779
    780	return err;
    781}
    782
    783static void nicvf_get_pauseparam(struct net_device *dev,
    784				 struct ethtool_pauseparam *pause)
    785{
    786	struct nicvf *nic = netdev_priv(dev);
    787	union nic_mbx mbx = {};
    788
    789	/* Supported only for 10G/40G interfaces */
    790	if ((nic->mac_type == BGX_MODE_SGMII) ||
    791	    (nic->mac_type == BGX_MODE_QSGMII) ||
    792	    (nic->mac_type == BGX_MODE_RGMII))
    793		return;
    794
    795	mbx.pfc.msg = NIC_MBOX_MSG_PFC;
    796	mbx.pfc.get = 1;
    797	if (!nicvf_send_msg_to_pf(nic, &mbx)) {
    798		pause->autoneg = nic->pfc.autoneg;
    799		pause->rx_pause = nic->pfc.fc_rx;
    800		pause->tx_pause = nic->pfc.fc_tx;
    801	}
    802}
    803
    804static int nicvf_set_pauseparam(struct net_device *dev,
    805				struct ethtool_pauseparam *pause)
    806{
    807	struct nicvf *nic = netdev_priv(dev);
    808	union nic_mbx mbx = {};
    809
    810	/* Supported only for 10G/40G interfaces */
    811	if ((nic->mac_type == BGX_MODE_SGMII) ||
    812	    (nic->mac_type == BGX_MODE_QSGMII) ||
    813	    (nic->mac_type == BGX_MODE_RGMII))
    814		return -EOPNOTSUPP;
    815
    816	if (pause->autoneg)
    817		return -EOPNOTSUPP;
    818
    819	mbx.pfc.msg = NIC_MBOX_MSG_PFC;
    820	mbx.pfc.get = 0;
    821	mbx.pfc.fc_rx = pause->rx_pause;
    822	mbx.pfc.fc_tx = pause->tx_pause;
    823	if (nicvf_send_msg_to_pf(nic, &mbx))
    824		return -EAGAIN;
    825
    826	nic->pfc.fc_rx = pause->rx_pause;
    827	nic->pfc.fc_tx = pause->tx_pause;
    828
    829	return 0;
    830}
    831
    832static int nicvf_get_ts_info(struct net_device *netdev,
    833			     struct ethtool_ts_info *info)
    834{
    835	struct nicvf *nic = netdev_priv(netdev);
    836
    837	if (!nic->ptp_clock)
    838		return ethtool_op_get_ts_info(netdev, info);
    839
    840	info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
    841				SOF_TIMESTAMPING_RX_SOFTWARE |
    842				SOF_TIMESTAMPING_SOFTWARE |
    843				SOF_TIMESTAMPING_TX_HARDWARE |
    844				SOF_TIMESTAMPING_RX_HARDWARE |
    845				SOF_TIMESTAMPING_RAW_HARDWARE;
    846
    847	info->phc_index = cavium_ptp_clock_index(nic->ptp_clock);
    848
    849	info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
    850
    851	info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
    852			   (1 << HWTSTAMP_FILTER_ALL);
    853
    854	return 0;
    855}
    856
    857static const struct ethtool_ops nicvf_ethtool_ops = {
    858	.get_link		= nicvf_get_link,
    859	.get_drvinfo		= nicvf_get_drvinfo,
    860	.get_msglevel		= nicvf_get_msglevel,
    861	.set_msglevel		= nicvf_set_msglevel,
    862	.get_strings		= nicvf_get_strings,
    863	.get_sset_count		= nicvf_get_sset_count,
    864	.get_ethtool_stats	= nicvf_get_ethtool_stats,
    865	.get_regs_len		= nicvf_get_regs_len,
    866	.get_regs		= nicvf_get_regs,
    867	.get_coalesce		= nicvf_get_coalesce,
    868	.get_ringparam		= nicvf_get_ringparam,
    869	.set_ringparam		= nicvf_set_ringparam,
    870	.get_rxnfc		= nicvf_get_rxnfc,
    871	.set_rxnfc		= nicvf_set_rxnfc,
    872	.get_rxfh_key_size	= nicvf_get_rxfh_key_size,
    873	.get_rxfh_indir_size	= nicvf_get_rxfh_indir_size,
    874	.get_rxfh		= nicvf_get_rxfh,
    875	.set_rxfh		= nicvf_set_rxfh,
    876	.get_channels		= nicvf_get_channels,
    877	.set_channels		= nicvf_set_channels,
    878	.get_pauseparam         = nicvf_get_pauseparam,
    879	.set_pauseparam         = nicvf_set_pauseparam,
    880	.get_ts_info		= nicvf_get_ts_info,
    881	.get_link_ksettings	= nicvf_get_link_ksettings,
    882};
    883
    884void nicvf_set_ethtool_ops(struct net_device *netdev)
    885{
    886	netdev->ethtool_ops = &nicvf_ethtool_ops;
    887}