cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

gve_ethtool.c (18428B)


      1// SPDX-License-Identifier: (GPL-2.0 OR MIT)
      2/* Google virtual Ethernet (gve) driver
      3 *
      4 * Copyright (C) 2015-2021 Google, Inc.
      5 */
      6
      7#include <linux/ethtool.h>
      8#include <linux/rtnetlink.h>
      9#include "gve.h"
     10#include "gve_adminq.h"
     11#include "gve_dqo.h"
     12
     13static void gve_get_drvinfo(struct net_device *netdev,
     14			    struct ethtool_drvinfo *info)
     15{
     16	struct gve_priv *priv = netdev_priv(netdev);
     17
     18	strscpy(info->driver, "gve", sizeof(info->driver));
     19	strscpy(info->version, gve_version_str, sizeof(info->version));
     20	strscpy(info->bus_info, pci_name(priv->pdev), sizeof(info->bus_info));
     21}
     22
     23static void gve_set_msglevel(struct net_device *netdev, u32 value)
     24{
     25	struct gve_priv *priv = netdev_priv(netdev);
     26
     27	priv->msg_enable = value;
     28}
     29
     30static u32 gve_get_msglevel(struct net_device *netdev)
     31{
     32	struct gve_priv *priv = netdev_priv(netdev);
     33
     34	return priv->msg_enable;
     35}
     36
     37static const char gve_gstrings_main_stats[][ETH_GSTRING_LEN] = {
     38	"rx_packets", "tx_packets", "rx_bytes", "tx_bytes",
     39	"rx_dropped", "tx_dropped", "tx_timeouts",
     40	"rx_skb_alloc_fail", "rx_buf_alloc_fail", "rx_desc_err_dropped_pkt",
     41	"interface_up_cnt", "interface_down_cnt", "reset_cnt",
     42	"page_alloc_fail", "dma_mapping_error", "stats_report_trigger_cnt",
     43};
     44
     45static const char gve_gstrings_rx_stats[][ETH_GSTRING_LEN] = {
     46	"rx_posted_desc[%u]", "rx_completed_desc[%u]", "rx_consumed_desc[%u]", "rx_bytes[%u]",
     47	"rx_cont_packet_cnt[%u]", "rx_frag_flip_cnt[%u]", "rx_frag_copy_cnt[%u]",
     48	"rx_dropped_pkt[%u]", "rx_copybreak_pkt[%u]", "rx_copied_pkt[%u]",
     49	"rx_queue_drop_cnt[%u]", "rx_no_buffers_posted[%u]",
     50	"rx_drops_packet_over_mru[%u]", "rx_drops_invalid_checksum[%u]",
     51};
     52
     53static const char gve_gstrings_tx_stats[][ETH_GSTRING_LEN] = {
     54	"tx_posted_desc[%u]", "tx_completed_desc[%u]", "tx_consumed_desc[%u]", "tx_bytes[%u]",
     55	"tx_wake[%u]", "tx_stop[%u]", "tx_event_counter[%u]",
     56	"tx_dma_mapping_error[%u]",
     57};
     58
     59static const char gve_gstrings_adminq_stats[][ETH_GSTRING_LEN] = {
     60	"adminq_prod_cnt", "adminq_cmd_fail", "adminq_timeouts",
     61	"adminq_describe_device_cnt", "adminq_cfg_device_resources_cnt",
     62	"adminq_register_page_list_cnt", "adminq_unregister_page_list_cnt",
     63	"adminq_create_tx_queue_cnt", "adminq_create_rx_queue_cnt",
     64	"adminq_destroy_tx_queue_cnt", "adminq_destroy_rx_queue_cnt",
     65	"adminq_dcfg_device_resources_cnt", "adminq_set_driver_parameter_cnt",
     66	"adminq_report_stats_cnt", "adminq_report_link_speed_cnt"
     67};
     68
     69static const char gve_gstrings_priv_flags[][ETH_GSTRING_LEN] = {
     70	"report-stats",
     71};
     72
     73#define GVE_MAIN_STATS_LEN  ARRAY_SIZE(gve_gstrings_main_stats)
     74#define GVE_ADMINQ_STATS_LEN  ARRAY_SIZE(gve_gstrings_adminq_stats)
     75#define NUM_GVE_TX_CNTS	ARRAY_SIZE(gve_gstrings_tx_stats)
     76#define NUM_GVE_RX_CNTS	ARRAY_SIZE(gve_gstrings_rx_stats)
     77#define GVE_PRIV_FLAGS_STR_LEN ARRAY_SIZE(gve_gstrings_priv_flags)
     78
     79static void gve_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
     80{
     81	struct gve_priv *priv = netdev_priv(netdev);
     82	char *s = (char *)data;
     83	int i, j;
     84
     85	switch (stringset) {
     86	case ETH_SS_STATS:
     87		memcpy(s, *gve_gstrings_main_stats,
     88		       sizeof(gve_gstrings_main_stats));
     89		s += sizeof(gve_gstrings_main_stats);
     90
     91		for (i = 0; i < priv->rx_cfg.num_queues; i++) {
     92			for (j = 0; j < NUM_GVE_RX_CNTS; j++) {
     93				snprintf(s, ETH_GSTRING_LEN,
     94					 gve_gstrings_rx_stats[j], i);
     95				s += ETH_GSTRING_LEN;
     96			}
     97		}
     98
     99		for (i = 0; i < priv->tx_cfg.num_queues; i++) {
    100			for (j = 0; j < NUM_GVE_TX_CNTS; j++) {
    101				snprintf(s, ETH_GSTRING_LEN,
    102					 gve_gstrings_tx_stats[j], i);
    103				s += ETH_GSTRING_LEN;
    104			}
    105		}
    106
    107		memcpy(s, *gve_gstrings_adminq_stats,
    108		       sizeof(gve_gstrings_adminq_stats));
    109		s += sizeof(gve_gstrings_adminq_stats);
    110		break;
    111
    112	case ETH_SS_PRIV_FLAGS:
    113		memcpy(s, *gve_gstrings_priv_flags,
    114		       sizeof(gve_gstrings_priv_flags));
    115		s += sizeof(gve_gstrings_priv_flags);
    116		break;
    117
    118	default:
    119		break;
    120	}
    121}
    122
    123static int gve_get_sset_count(struct net_device *netdev, int sset)
    124{
    125	struct gve_priv *priv = netdev_priv(netdev);
    126
    127	switch (sset) {
    128	case ETH_SS_STATS:
    129		return GVE_MAIN_STATS_LEN + GVE_ADMINQ_STATS_LEN +
    130		       (priv->rx_cfg.num_queues * NUM_GVE_RX_CNTS) +
    131		       (priv->tx_cfg.num_queues * NUM_GVE_TX_CNTS);
    132	case ETH_SS_PRIV_FLAGS:
    133		return GVE_PRIV_FLAGS_STR_LEN;
    134	default:
    135		return -EOPNOTSUPP;
    136	}
    137}
    138
    139static void
    140gve_get_ethtool_stats(struct net_device *netdev,
    141		      struct ethtool_stats *stats, u64 *data)
    142{
    143	u64 tmp_rx_pkts, tmp_rx_bytes, tmp_rx_skb_alloc_fail,
    144		tmp_rx_buf_alloc_fail, tmp_rx_desc_err_dropped_pkt,
    145		tmp_tx_pkts, tmp_tx_bytes;
    146	u64 rx_buf_alloc_fail, rx_desc_err_dropped_pkt, rx_pkts,
    147		rx_skb_alloc_fail, rx_bytes, tx_pkts, tx_bytes, tx_dropped;
    148	int stats_idx, base_stats_idx, max_stats_idx;
    149	struct stats *report_stats;
    150	int *rx_qid_to_stats_idx;
    151	int *tx_qid_to_stats_idx;
    152	struct gve_priv *priv;
    153	bool skip_nic_stats;
    154	unsigned int start;
    155	int ring;
    156	int i, j;
    157
    158	ASSERT_RTNL();
    159
    160	priv = netdev_priv(netdev);
    161	report_stats = priv->stats_report->stats;
    162	rx_qid_to_stats_idx = kmalloc_array(priv->rx_cfg.num_queues,
    163					    sizeof(int), GFP_KERNEL);
    164	if (!rx_qid_to_stats_idx)
    165		return;
    166	tx_qid_to_stats_idx = kmalloc_array(priv->tx_cfg.num_queues,
    167					    sizeof(int), GFP_KERNEL);
    168	if (!tx_qid_to_stats_idx) {
    169		kfree(rx_qid_to_stats_idx);
    170		return;
    171	}
    172	for (rx_pkts = 0, rx_bytes = 0, rx_skb_alloc_fail = 0,
    173	     rx_buf_alloc_fail = 0, rx_desc_err_dropped_pkt = 0, ring = 0;
    174	     ring < priv->rx_cfg.num_queues; ring++) {
    175		if (priv->rx) {
    176			do {
    177				struct gve_rx_ring *rx = &priv->rx[ring];
    178
    179				start =
    180				  u64_stats_fetch_begin(&priv->rx[ring].statss);
    181				tmp_rx_pkts = rx->rpackets;
    182				tmp_rx_bytes = rx->rbytes;
    183				tmp_rx_skb_alloc_fail = rx->rx_skb_alloc_fail;
    184				tmp_rx_buf_alloc_fail = rx->rx_buf_alloc_fail;
    185				tmp_rx_desc_err_dropped_pkt =
    186					rx->rx_desc_err_dropped_pkt;
    187			} while (u64_stats_fetch_retry(&priv->rx[ring].statss,
    188						       start));
    189			rx_pkts += tmp_rx_pkts;
    190			rx_bytes += tmp_rx_bytes;
    191			rx_skb_alloc_fail += tmp_rx_skb_alloc_fail;
    192			rx_buf_alloc_fail += tmp_rx_buf_alloc_fail;
    193			rx_desc_err_dropped_pkt += tmp_rx_desc_err_dropped_pkt;
    194		}
    195	}
    196	for (tx_pkts = 0, tx_bytes = 0, tx_dropped = 0, ring = 0;
    197	     ring < priv->tx_cfg.num_queues; ring++) {
    198		if (priv->tx) {
    199			do {
    200				start =
    201				  u64_stats_fetch_begin(&priv->tx[ring].statss);
    202				tmp_tx_pkts = priv->tx[ring].pkt_done;
    203				tmp_tx_bytes = priv->tx[ring].bytes_done;
    204			} while (u64_stats_fetch_retry(&priv->tx[ring].statss,
    205						       start));
    206			tx_pkts += tmp_tx_pkts;
    207			tx_bytes += tmp_tx_bytes;
    208			tx_dropped += priv->tx[ring].dropped_pkt;
    209		}
    210	}
    211
    212	i = 0;
    213	data[i++] = rx_pkts;
    214	data[i++] = tx_pkts;
    215	data[i++] = rx_bytes;
    216	data[i++] = tx_bytes;
    217	/* total rx dropped packets */
    218	data[i++] = rx_skb_alloc_fail + rx_buf_alloc_fail +
    219		    rx_desc_err_dropped_pkt;
    220	data[i++] = tx_dropped;
    221	data[i++] = priv->tx_timeo_cnt;
    222	data[i++] = rx_skb_alloc_fail;
    223	data[i++] = rx_buf_alloc_fail;
    224	data[i++] = rx_desc_err_dropped_pkt;
    225	data[i++] = priv->interface_up_cnt;
    226	data[i++] = priv->interface_down_cnt;
    227	data[i++] = priv->reset_cnt;
    228	data[i++] = priv->page_alloc_fail;
    229	data[i++] = priv->dma_mapping_error;
    230	data[i++] = priv->stats_report_trigger_cnt;
    231	i = GVE_MAIN_STATS_LEN;
    232
    233	/* For rx cross-reporting stats, start from nic rx stats in report */
    234	base_stats_idx = GVE_TX_STATS_REPORT_NUM * priv->tx_cfg.num_queues +
    235		GVE_RX_STATS_REPORT_NUM * priv->rx_cfg.num_queues;
    236	max_stats_idx = NIC_RX_STATS_REPORT_NUM * priv->rx_cfg.num_queues +
    237		base_stats_idx;
    238	/* Preprocess the stats report for rx, map queue id to start index */
    239	skip_nic_stats = false;
    240	for (stats_idx = base_stats_idx; stats_idx < max_stats_idx;
    241		stats_idx += NIC_RX_STATS_REPORT_NUM) {
    242		u32 stat_name = be32_to_cpu(report_stats[stats_idx].stat_name);
    243		u32 queue_id = be32_to_cpu(report_stats[stats_idx].queue_id);
    244
    245		if (stat_name == 0) {
    246			/* no stats written by NIC yet */
    247			skip_nic_stats = true;
    248			break;
    249		}
    250		rx_qid_to_stats_idx[queue_id] = stats_idx;
    251	}
    252	/* walk RX rings */
    253	if (priv->rx) {
    254		for (ring = 0; ring < priv->rx_cfg.num_queues; ring++) {
    255			struct gve_rx_ring *rx = &priv->rx[ring];
    256
    257			data[i++] = rx->fill_cnt;
    258			data[i++] = rx->cnt;
    259			data[i++] = rx->fill_cnt - rx->cnt;
    260			do {
    261				start =
    262				  u64_stats_fetch_begin(&priv->rx[ring].statss);
    263				tmp_rx_bytes = rx->rbytes;
    264				tmp_rx_skb_alloc_fail = rx->rx_skb_alloc_fail;
    265				tmp_rx_buf_alloc_fail = rx->rx_buf_alloc_fail;
    266				tmp_rx_desc_err_dropped_pkt =
    267					rx->rx_desc_err_dropped_pkt;
    268			} while (u64_stats_fetch_retry(&priv->rx[ring].statss,
    269						       start));
    270			data[i++] = tmp_rx_bytes;
    271			data[i++] = rx->rx_cont_packet_cnt;
    272			data[i++] = rx->rx_frag_flip_cnt;
    273			data[i++] = rx->rx_frag_copy_cnt;
    274			/* rx dropped packets */
    275			data[i++] = tmp_rx_skb_alloc_fail +
    276				tmp_rx_buf_alloc_fail +
    277				tmp_rx_desc_err_dropped_pkt;
    278			data[i++] = rx->rx_copybreak_pkt;
    279			data[i++] = rx->rx_copied_pkt;
    280			/* stats from NIC */
    281			if (skip_nic_stats) {
    282				/* skip NIC rx stats */
    283				i += NIC_RX_STATS_REPORT_NUM;
    284				continue;
    285			}
    286			for (j = 0; j < NIC_RX_STATS_REPORT_NUM; j++) {
    287				u64 value =
    288				be64_to_cpu(report_stats[rx_qid_to_stats_idx[ring] + j].value);
    289
    290				data[i++] = value;
    291			}
    292		}
    293	} else {
    294		i += priv->rx_cfg.num_queues * NUM_GVE_RX_CNTS;
    295	}
    296
    297	/* For tx cross-reporting stats, start from nic tx stats in report */
    298	base_stats_idx = max_stats_idx;
    299	max_stats_idx = NIC_TX_STATS_REPORT_NUM * priv->tx_cfg.num_queues +
    300		max_stats_idx;
    301	/* Preprocess the stats report for tx, map queue id to start index */
    302	skip_nic_stats = false;
    303	for (stats_idx = base_stats_idx; stats_idx < max_stats_idx;
    304		stats_idx += NIC_TX_STATS_REPORT_NUM) {
    305		u32 stat_name = be32_to_cpu(report_stats[stats_idx].stat_name);
    306		u32 queue_id = be32_to_cpu(report_stats[stats_idx].queue_id);
    307
    308		if (stat_name == 0) {
    309			/* no stats written by NIC yet */
    310			skip_nic_stats = true;
    311			break;
    312		}
    313		tx_qid_to_stats_idx[queue_id] = stats_idx;
    314	}
    315	/* walk TX rings */
    316	if (priv->tx) {
    317		for (ring = 0; ring < priv->tx_cfg.num_queues; ring++) {
    318			struct gve_tx_ring *tx = &priv->tx[ring];
    319
    320			if (gve_is_gqi(priv)) {
    321				data[i++] = tx->req;
    322				data[i++] = tx->done;
    323				data[i++] = tx->req - tx->done;
    324			} else {
    325				/* DQO doesn't currently support
    326				 * posted/completed descriptor counts;
    327				 */
    328				data[i++] = 0;
    329				data[i++] = 0;
    330				data[i++] = tx->dqo_tx.tail - tx->dqo_tx.head;
    331			}
    332			do {
    333				start =
    334				  u64_stats_fetch_begin(&priv->tx[ring].statss);
    335				tmp_tx_bytes = tx->bytes_done;
    336			} while (u64_stats_fetch_retry(&priv->tx[ring].statss,
    337						       start));
    338			data[i++] = tmp_tx_bytes;
    339			data[i++] = tx->wake_queue;
    340			data[i++] = tx->stop_queue;
    341			data[i++] = gve_tx_load_event_counter(priv, tx);
    342			data[i++] = tx->dma_mapping_error;
    343			/* stats from NIC */
    344			if (skip_nic_stats) {
    345				/* skip NIC tx stats */
    346				i += NIC_TX_STATS_REPORT_NUM;
    347				continue;
    348			}
    349			for (j = 0; j < NIC_TX_STATS_REPORT_NUM; j++) {
    350				u64 value =
    351				be64_to_cpu(report_stats[tx_qid_to_stats_idx[ring] + j].value);
    352				data[i++] = value;
    353			}
    354		}
    355	} else {
    356		i += priv->tx_cfg.num_queues * NUM_GVE_TX_CNTS;
    357	}
    358
    359	kfree(rx_qid_to_stats_idx);
    360	kfree(tx_qid_to_stats_idx);
    361	/* AQ Stats */
    362	data[i++] = priv->adminq_prod_cnt;
    363	data[i++] = priv->adminq_cmd_fail;
    364	data[i++] = priv->adminq_timeouts;
    365	data[i++] = priv->adminq_describe_device_cnt;
    366	data[i++] = priv->adminq_cfg_device_resources_cnt;
    367	data[i++] = priv->adminq_register_page_list_cnt;
    368	data[i++] = priv->adminq_unregister_page_list_cnt;
    369	data[i++] = priv->adminq_create_tx_queue_cnt;
    370	data[i++] = priv->adminq_create_rx_queue_cnt;
    371	data[i++] = priv->adminq_destroy_tx_queue_cnt;
    372	data[i++] = priv->adminq_destroy_rx_queue_cnt;
    373	data[i++] = priv->adminq_dcfg_device_resources_cnt;
    374	data[i++] = priv->adminq_set_driver_parameter_cnt;
    375	data[i++] = priv->adminq_report_stats_cnt;
    376	data[i++] = priv->adminq_report_link_speed_cnt;
    377}
    378
    379static void gve_get_channels(struct net_device *netdev,
    380			     struct ethtool_channels *cmd)
    381{
    382	struct gve_priv *priv = netdev_priv(netdev);
    383
    384	cmd->max_rx = priv->rx_cfg.max_queues;
    385	cmd->max_tx = priv->tx_cfg.max_queues;
    386	cmd->max_other = 0;
    387	cmd->max_combined = 0;
    388	cmd->rx_count = priv->rx_cfg.num_queues;
    389	cmd->tx_count = priv->tx_cfg.num_queues;
    390	cmd->other_count = 0;
    391	cmd->combined_count = 0;
    392}
    393
    394static int gve_set_channels(struct net_device *netdev,
    395			    struct ethtool_channels *cmd)
    396{
    397	struct gve_priv *priv = netdev_priv(netdev);
    398	struct gve_queue_config new_tx_cfg = priv->tx_cfg;
    399	struct gve_queue_config new_rx_cfg = priv->rx_cfg;
    400	struct ethtool_channels old_settings;
    401	int new_tx = cmd->tx_count;
    402	int new_rx = cmd->rx_count;
    403
    404	gve_get_channels(netdev, &old_settings);
    405
    406	/* Changing combined is not allowed */
    407	if (cmd->combined_count != old_settings.combined_count)
    408		return -EINVAL;
    409
    410	if (!new_rx || !new_tx)
    411		return -EINVAL;
    412
    413	if (!netif_carrier_ok(netdev)) {
    414		priv->tx_cfg.num_queues = new_tx;
    415		priv->rx_cfg.num_queues = new_rx;
    416		return 0;
    417	}
    418
    419	new_tx_cfg.num_queues = new_tx;
    420	new_rx_cfg.num_queues = new_rx;
    421
    422	return gve_adjust_queues(priv, new_rx_cfg, new_tx_cfg);
    423}
    424
    425static void gve_get_ringparam(struct net_device *netdev,
    426			      struct ethtool_ringparam *cmd,
    427			      struct kernel_ethtool_ringparam *kernel_cmd,
    428			      struct netlink_ext_ack *extack)
    429{
    430	struct gve_priv *priv = netdev_priv(netdev);
    431
    432	cmd->rx_max_pending = priv->rx_desc_cnt;
    433	cmd->tx_max_pending = priv->tx_desc_cnt;
    434	cmd->rx_pending = priv->rx_desc_cnt;
    435	cmd->tx_pending = priv->tx_desc_cnt;
    436}
    437
    438static int gve_user_reset(struct net_device *netdev, u32 *flags)
    439{
    440	struct gve_priv *priv = netdev_priv(netdev);
    441
    442	if (*flags == ETH_RESET_ALL) {
    443		*flags = 0;
    444		return gve_reset(priv, true);
    445	}
    446
    447	return -EOPNOTSUPP;
    448}
    449
    450static int gve_get_tunable(struct net_device *netdev,
    451			   const struct ethtool_tunable *etuna, void *value)
    452{
    453	struct gve_priv *priv = netdev_priv(netdev);
    454
    455	switch (etuna->id) {
    456	case ETHTOOL_RX_COPYBREAK:
    457		*(u32 *)value = priv->rx_copybreak;
    458		return 0;
    459	default:
    460		return -EOPNOTSUPP;
    461	}
    462}
    463
    464static int gve_set_tunable(struct net_device *netdev,
    465			   const struct ethtool_tunable *etuna,
    466			   const void *value)
    467{
    468	struct gve_priv *priv = netdev_priv(netdev);
    469	u32 len;
    470
    471	switch (etuna->id) {
    472	case ETHTOOL_RX_COPYBREAK:
    473	{
    474		u32 max_copybreak = gve_is_gqi(priv) ?
    475			(PAGE_SIZE / 2) : priv->data_buffer_size_dqo;
    476
    477		len = *(u32 *)value;
    478		if (len > max_copybreak)
    479			return -EINVAL;
    480		priv->rx_copybreak = len;
    481		return 0;
    482	}
    483	default:
    484		return -EOPNOTSUPP;
    485	}
    486}
    487
    488static u32 gve_get_priv_flags(struct net_device *netdev)
    489{
    490	struct gve_priv *priv = netdev_priv(netdev);
    491	u32 ret_flags = 0;
    492
    493	/* Only 1 flag exists currently: report-stats (BIT(O)), so set that flag. */
    494	if (priv->ethtool_flags & BIT(0))
    495		ret_flags |= BIT(0);
    496	return ret_flags;
    497}
    498
    499static int gve_set_priv_flags(struct net_device *netdev, u32 flags)
    500{
    501	struct gve_priv *priv = netdev_priv(netdev);
    502	u64 ori_flags, new_flags;
    503
    504	ori_flags = READ_ONCE(priv->ethtool_flags);
    505	new_flags = ori_flags;
    506
    507	/* Only one priv flag exists: report-stats (BIT(0))*/
    508	if (flags & BIT(0))
    509		new_flags |= BIT(0);
    510	else
    511		new_flags &= ~(BIT(0));
    512	priv->ethtool_flags = new_flags;
    513	/* start report-stats timer when user turns report stats on. */
    514	if (flags & BIT(0)) {
    515		mod_timer(&priv->stats_report_timer,
    516			  round_jiffies(jiffies +
    517					msecs_to_jiffies(priv->stats_report_timer_period)));
    518	}
    519	/* Zero off gve stats when report-stats turned off and */
    520	/* delete report stats timer. */
    521	if (!(flags & BIT(0)) && (ori_flags & BIT(0))) {
    522		int tx_stats_num = GVE_TX_STATS_REPORT_NUM *
    523			priv->tx_cfg.num_queues;
    524		int rx_stats_num = GVE_RX_STATS_REPORT_NUM *
    525			priv->rx_cfg.num_queues;
    526
    527		memset(priv->stats_report->stats, 0, (tx_stats_num + rx_stats_num) *
    528				   sizeof(struct stats));
    529		del_timer_sync(&priv->stats_report_timer);
    530	}
    531	return 0;
    532}
    533
    534static int gve_get_link_ksettings(struct net_device *netdev,
    535				  struct ethtool_link_ksettings *cmd)
    536{
    537	struct gve_priv *priv = netdev_priv(netdev);
    538	int err = gve_adminq_report_link_speed(priv);
    539
    540	cmd->base.speed = priv->link_speed;
    541	return err;
    542}
    543
    544static int gve_get_coalesce(struct net_device *netdev,
    545			    struct ethtool_coalesce *ec,
    546			    struct kernel_ethtool_coalesce *kernel_ec,
    547			    struct netlink_ext_ack *extack)
    548{
    549	struct gve_priv *priv = netdev_priv(netdev);
    550
    551	if (gve_is_gqi(priv))
    552		return -EOPNOTSUPP;
    553	ec->tx_coalesce_usecs = priv->tx_coalesce_usecs;
    554	ec->rx_coalesce_usecs = priv->rx_coalesce_usecs;
    555
    556	return 0;
    557}
    558
    559static int gve_set_coalesce(struct net_device *netdev,
    560			    struct ethtool_coalesce *ec,
    561			    struct kernel_ethtool_coalesce *kernel_ec,
    562			    struct netlink_ext_ack *extack)
    563{
    564	struct gve_priv *priv = netdev_priv(netdev);
    565	u32 tx_usecs_orig = priv->tx_coalesce_usecs;
    566	u32 rx_usecs_orig = priv->rx_coalesce_usecs;
    567	int idx;
    568
    569	if (gve_is_gqi(priv))
    570		return -EOPNOTSUPP;
    571
    572	if (ec->tx_coalesce_usecs > GVE_MAX_ITR_INTERVAL_DQO ||
    573	    ec->rx_coalesce_usecs > GVE_MAX_ITR_INTERVAL_DQO)
    574		return -EINVAL;
    575	priv->tx_coalesce_usecs = ec->tx_coalesce_usecs;
    576	priv->rx_coalesce_usecs = ec->rx_coalesce_usecs;
    577
    578	if (tx_usecs_orig != priv->tx_coalesce_usecs) {
    579		for (idx = 0; idx < priv->tx_cfg.num_queues; idx++) {
    580			int ntfy_idx = gve_tx_idx_to_ntfy(priv, idx);
    581			struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
    582
    583			gve_set_itr_coalesce_usecs_dqo(priv, block,
    584						       priv->tx_coalesce_usecs);
    585		}
    586	}
    587
    588	if (rx_usecs_orig != priv->rx_coalesce_usecs) {
    589		for (idx = 0; idx < priv->rx_cfg.num_queues; idx++) {
    590			int ntfy_idx = gve_rx_idx_to_ntfy(priv, idx);
    591			struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
    592
    593			gve_set_itr_coalesce_usecs_dqo(priv, block,
    594						       priv->rx_coalesce_usecs);
    595		}
    596	}
    597
    598	return 0;
    599}
    600
    601const struct ethtool_ops gve_ethtool_ops = {
    602	.supported_coalesce_params = ETHTOOL_COALESCE_USECS,
    603	.get_drvinfo = gve_get_drvinfo,
    604	.get_strings = gve_get_strings,
    605	.get_sset_count = gve_get_sset_count,
    606	.get_ethtool_stats = gve_get_ethtool_stats,
    607	.set_msglevel = gve_set_msglevel,
    608	.get_msglevel = gve_get_msglevel,
    609	.set_channels = gve_set_channels,
    610	.get_channels = gve_get_channels,
    611	.get_link = ethtool_op_get_link,
    612	.get_coalesce = gve_get_coalesce,
    613	.set_coalesce = gve_set_coalesce,
    614	.get_ringparam = gve_get_ringparam,
    615	.reset = gve_user_reset,
    616	.get_tunable = gve_get_tunable,
    617	.set_tunable = gve_set_tunable,
    618	.get_priv_flags = gve_get_priv_flags,
    619	.set_priv_flags = gve_set_priv_flags,
    620	.get_link_ksettings = gve_get_link_ksettings
    621};