cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

otx2_ethtool.c (37833B)


      1// SPDX-License-Identifier: GPL-2.0
      2/* Marvell RVU Ethernet driver
      3 *
      4 * Copyright (C) 2020 Marvell.
      5 *
      6 */
      7
      8#include <linux/pci.h>
      9#include <linux/ethtool.h>
     10#include <linux/stddef.h>
     11#include <linux/etherdevice.h>
     12#include <linux/log2.h>
     13#include <linux/net_tstamp.h>
     14#include <linux/linkmode.h>
     15
     16#include "otx2_common.h"
     17#include "otx2_ptp.h"
     18
     19#define DRV_NAME	"rvu-nicpf"
     20#define DRV_VF_NAME	"rvu-nicvf"
     21
     22struct otx2_stat {
     23	char name[ETH_GSTRING_LEN];
     24	unsigned int index;
     25};
     26
     27/* HW device stats */
     28#define OTX2_DEV_STAT(stat) { \
     29	.name = #stat, \
     30	.index = offsetof(struct otx2_dev_stats, stat) / sizeof(u64), \
     31}
     32
     33enum link_mode {
     34	OTX2_MODE_SUPPORTED,
     35	OTX2_MODE_ADVERTISED
     36};
     37
     38static const struct otx2_stat otx2_dev_stats[] = {
     39	OTX2_DEV_STAT(rx_ucast_frames),
     40	OTX2_DEV_STAT(rx_bcast_frames),
     41	OTX2_DEV_STAT(rx_mcast_frames),
     42
     43	OTX2_DEV_STAT(tx_ucast_frames),
     44	OTX2_DEV_STAT(tx_bcast_frames),
     45	OTX2_DEV_STAT(tx_mcast_frames),
     46};
     47
     48/* Driver level stats */
     49#define OTX2_DRV_STAT(stat) { \
     50	.name = #stat, \
     51	.index = offsetof(struct otx2_drv_stats, stat) / sizeof(atomic_t), \
     52}
     53
     54static const struct otx2_stat otx2_drv_stats[] = {
     55	OTX2_DRV_STAT(rx_fcs_errs),
     56	OTX2_DRV_STAT(rx_oversize_errs),
     57	OTX2_DRV_STAT(rx_undersize_errs),
     58	OTX2_DRV_STAT(rx_csum_errs),
     59	OTX2_DRV_STAT(rx_len_errs),
     60	OTX2_DRV_STAT(rx_other_errs),
     61};
     62
     63static const struct otx2_stat otx2_queue_stats[] = {
     64	{ "bytes", 0 },
     65	{ "frames", 1 },
     66};
     67
     68static const unsigned int otx2_n_dev_stats = ARRAY_SIZE(otx2_dev_stats);
     69static const unsigned int otx2_n_drv_stats = ARRAY_SIZE(otx2_drv_stats);
     70static const unsigned int otx2_n_queue_stats = ARRAY_SIZE(otx2_queue_stats);
     71
     72static struct cgx_fw_data *otx2_get_fwdata(struct otx2_nic *pfvf);
     73
     74static void otx2_get_drvinfo(struct net_device *netdev,
     75			     struct ethtool_drvinfo *info)
     76{
     77	struct otx2_nic *pfvf = netdev_priv(netdev);
     78
     79	strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
     80	strlcpy(info->bus_info, pci_name(pfvf->pdev), sizeof(info->bus_info));
     81}
     82
     83static void otx2_get_qset_strings(struct otx2_nic *pfvf, u8 **data, int qset)
     84{
     85	int start_qidx = qset * pfvf->hw.rx_queues;
     86	int qidx, stats;
     87
     88	for (qidx = 0; qidx < pfvf->hw.rx_queues; qidx++) {
     89		for (stats = 0; stats < otx2_n_queue_stats; stats++) {
     90			sprintf(*data, "rxq%d: %s", qidx + start_qidx,
     91				otx2_queue_stats[stats].name);
     92			*data += ETH_GSTRING_LEN;
     93		}
     94	}
     95	for (qidx = 0; qidx < pfvf->hw.tx_queues; qidx++) {
     96		for (stats = 0; stats < otx2_n_queue_stats; stats++) {
     97			sprintf(*data, "txq%d: %s", qidx + start_qidx,
     98				otx2_queue_stats[stats].name);
     99			*data += ETH_GSTRING_LEN;
    100		}
    101	}
    102}
    103
    104static void otx2_get_strings(struct net_device *netdev, u32 sset, u8 *data)
    105{
    106	struct otx2_nic *pfvf = netdev_priv(netdev);
    107	int stats;
    108
    109	if (sset != ETH_SS_STATS)
    110		return;
    111
    112	for (stats = 0; stats < otx2_n_dev_stats; stats++) {
    113		memcpy(data, otx2_dev_stats[stats].name, ETH_GSTRING_LEN);
    114		data += ETH_GSTRING_LEN;
    115	}
    116
    117	for (stats = 0; stats < otx2_n_drv_stats; stats++) {
    118		memcpy(data, otx2_drv_stats[stats].name, ETH_GSTRING_LEN);
    119		data += ETH_GSTRING_LEN;
    120	}
    121
    122	otx2_get_qset_strings(pfvf, &data, 0);
    123
    124	if (!test_bit(CN10K_RPM, &pfvf->hw.cap_flag)) {
    125		for (stats = 0; stats < CGX_RX_STATS_COUNT; stats++) {
    126			sprintf(data, "cgx_rxstat%d: ", stats);
    127			data += ETH_GSTRING_LEN;
    128		}
    129
    130		for (stats = 0; stats < CGX_TX_STATS_COUNT; stats++) {
    131			sprintf(data, "cgx_txstat%d: ", stats);
    132			data += ETH_GSTRING_LEN;
    133		}
    134	}
    135
    136	strcpy(data, "reset_count");
    137	data += ETH_GSTRING_LEN;
    138	sprintf(data, "Fec Corrected Errors: ");
    139	data += ETH_GSTRING_LEN;
    140	sprintf(data, "Fec Uncorrected Errors: ");
    141	data += ETH_GSTRING_LEN;
    142}
    143
    144static void otx2_get_qset_stats(struct otx2_nic *pfvf,
    145				struct ethtool_stats *stats, u64 **data)
    146{
    147	int stat, qidx;
    148
    149	if (!pfvf)
    150		return;
    151	for (qidx = 0; qidx < pfvf->hw.rx_queues; qidx++) {
    152		if (!otx2_update_rq_stats(pfvf, qidx)) {
    153			for (stat = 0; stat < otx2_n_queue_stats; stat++)
    154				*((*data)++) = 0;
    155			continue;
    156		}
    157		for (stat = 0; stat < otx2_n_queue_stats; stat++)
    158			*((*data)++) = ((u64 *)&pfvf->qset.rq[qidx].stats)
    159				[otx2_queue_stats[stat].index];
    160	}
    161
    162	for (qidx = 0; qidx < pfvf->hw.tx_queues; qidx++) {
    163		if (!otx2_update_sq_stats(pfvf, qidx)) {
    164			for (stat = 0; stat < otx2_n_queue_stats; stat++)
    165				*((*data)++) = 0;
    166			continue;
    167		}
    168		for (stat = 0; stat < otx2_n_queue_stats; stat++)
    169			*((*data)++) = ((u64 *)&pfvf->qset.sq[qidx].stats)
    170				[otx2_queue_stats[stat].index];
    171	}
    172}
    173
    174static int otx2_get_phy_fec_stats(struct otx2_nic *pfvf)
    175{
    176	struct msg_req *req;
    177	int rc = -ENOMEM;
    178
    179	mutex_lock(&pfvf->mbox.lock);
    180	req = otx2_mbox_alloc_msg_cgx_get_phy_fec_stats(&pfvf->mbox);
    181	if (!req)
    182		goto end;
    183
    184	if (!otx2_sync_mbox_msg(&pfvf->mbox))
    185		rc = 0;
    186end:
    187	mutex_unlock(&pfvf->mbox.lock);
    188	return rc;
    189}
    190
    191/* Get device and per queue statistics */
    192static void otx2_get_ethtool_stats(struct net_device *netdev,
    193				   struct ethtool_stats *stats, u64 *data)
    194{
    195	struct otx2_nic *pfvf = netdev_priv(netdev);
    196	u64 fec_corr_blks, fec_uncorr_blks;
    197	struct cgx_fw_data *rsp;
    198	int stat;
    199
    200	otx2_get_dev_stats(pfvf);
    201	for (stat = 0; stat < otx2_n_dev_stats; stat++)
    202		*(data++) = ((u64 *)&pfvf->hw.dev_stats)
    203				[otx2_dev_stats[stat].index];
    204
    205	for (stat = 0; stat < otx2_n_drv_stats; stat++)
    206		*(data++) = atomic_read(&((atomic_t *)&pfvf->hw.drv_stats)
    207						[otx2_drv_stats[stat].index]);
    208
    209	otx2_get_qset_stats(pfvf, stats, &data);
    210
    211	if (!test_bit(CN10K_RPM, &pfvf->hw.cap_flag)) {
    212		otx2_update_lmac_stats(pfvf);
    213		for (stat = 0; stat < CGX_RX_STATS_COUNT; stat++)
    214			*(data++) = pfvf->hw.cgx_rx_stats[stat];
    215		for (stat = 0; stat < CGX_TX_STATS_COUNT; stat++)
    216			*(data++) = pfvf->hw.cgx_tx_stats[stat];
    217	}
    218
    219	*(data++) = pfvf->reset_count;
    220
    221	fec_corr_blks = pfvf->hw.cgx_fec_corr_blks;
    222	fec_uncorr_blks = pfvf->hw.cgx_fec_uncorr_blks;
    223
    224	rsp = otx2_get_fwdata(pfvf);
    225	if (!IS_ERR(rsp) && rsp->fwdata.phy.misc.has_fec_stats &&
    226	    !otx2_get_phy_fec_stats(pfvf)) {
    227		/* Fetch fwdata again because it's been recently populated with
    228		 * latest PHY FEC stats.
    229		 */
    230		rsp = otx2_get_fwdata(pfvf);
    231		if (!IS_ERR(rsp)) {
    232			struct fec_stats_s *p = &rsp->fwdata.phy.fec_stats;
    233
    234			if (pfvf->linfo.fec == OTX2_FEC_BASER) {
    235				fec_corr_blks   = p->brfec_corr_blks;
    236				fec_uncorr_blks = p->brfec_uncorr_blks;
    237			} else {
    238				fec_corr_blks   = p->rsfec_corr_cws;
    239				fec_uncorr_blks = p->rsfec_uncorr_cws;
    240			}
    241		}
    242	}
    243
    244	*(data++) = fec_corr_blks;
    245	*(data++) = fec_uncorr_blks;
    246}
    247
    248static int otx2_get_sset_count(struct net_device *netdev, int sset)
    249{
    250	struct otx2_nic *pfvf = netdev_priv(netdev);
    251	int qstats_count, mac_stats = 0;
    252
    253	if (sset != ETH_SS_STATS)
    254		return -EINVAL;
    255
    256	qstats_count = otx2_n_queue_stats *
    257		       (pfvf->hw.rx_queues + pfvf->hw.tx_queues);
    258	if (!test_bit(CN10K_RPM, &pfvf->hw.cap_flag))
    259		mac_stats = CGX_RX_STATS_COUNT + CGX_TX_STATS_COUNT;
    260	otx2_update_lmac_fec_stats(pfvf);
    261
    262	return otx2_n_dev_stats + otx2_n_drv_stats + qstats_count +
    263	       mac_stats + OTX2_FEC_STATS_CNT + 1;
    264}
    265
    266/* Get no of queues device supports and current queue count */
    267static void otx2_get_channels(struct net_device *dev,
    268			      struct ethtool_channels *channel)
    269{
    270	struct otx2_nic *pfvf = netdev_priv(dev);
    271
    272	channel->max_rx = pfvf->hw.max_queues;
    273	channel->max_tx = pfvf->hw.max_queues;
    274
    275	channel->rx_count = pfvf->hw.rx_queues;
    276	channel->tx_count = pfvf->hw.tx_queues;
    277}
    278
    279/* Set no of Tx, Rx queues to be used */
    280static int otx2_set_channels(struct net_device *dev,
    281			     struct ethtool_channels *channel)
    282{
    283	struct otx2_nic *pfvf = netdev_priv(dev);
    284	bool if_up = netif_running(dev);
    285	int err = 0;
    286
    287	if (!channel->rx_count || !channel->tx_count)
    288		return -EINVAL;
    289
    290	if (bitmap_weight(&pfvf->rq_bmap, pfvf->hw.rx_queues) > 1) {
    291		netdev_err(dev,
    292			   "Receive queues are in use by TC police action\n");
    293		return -EINVAL;
    294	}
    295
    296	if (if_up)
    297		dev->netdev_ops->ndo_stop(dev);
    298
    299	err = otx2_set_real_num_queues(dev, channel->tx_count,
    300				       channel->rx_count);
    301	if (err)
    302		return err;
    303
    304	pfvf->hw.rx_queues = channel->rx_count;
    305	pfvf->hw.tx_queues = channel->tx_count;
    306	pfvf->qset.cq_cnt = pfvf->hw.tx_queues +  pfvf->hw.rx_queues;
    307
    308	if (if_up)
    309		err = dev->netdev_ops->ndo_open(dev);
    310
    311	netdev_info(dev, "Setting num Tx rings to %d, Rx rings to %d success\n",
    312		    pfvf->hw.tx_queues, pfvf->hw.rx_queues);
    313
    314	return err;
    315}
    316
    317static void otx2_get_pauseparam(struct net_device *netdev,
    318				struct ethtool_pauseparam *pause)
    319{
    320	struct otx2_nic *pfvf = netdev_priv(netdev);
    321	struct cgx_pause_frm_cfg *req, *rsp;
    322
    323	if (is_otx2_lbkvf(pfvf->pdev))
    324		return;
    325
    326	req = otx2_mbox_alloc_msg_cgx_cfg_pause_frm(&pfvf->mbox);
    327	if (!req)
    328		return;
    329
    330	if (!otx2_sync_mbox_msg(&pfvf->mbox)) {
    331		rsp = (struct cgx_pause_frm_cfg *)
    332		       otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &req->hdr);
    333		pause->rx_pause = rsp->rx_pause;
    334		pause->tx_pause = rsp->tx_pause;
    335	}
    336}
    337
    338static int otx2_set_pauseparam(struct net_device *netdev,
    339			       struct ethtool_pauseparam *pause)
    340{
    341	struct otx2_nic *pfvf = netdev_priv(netdev);
    342
    343	if (pause->autoneg)
    344		return -EOPNOTSUPP;
    345
    346	if (is_otx2_lbkvf(pfvf->pdev))
    347		return -EOPNOTSUPP;
    348
    349	if (pause->rx_pause)
    350		pfvf->flags |= OTX2_FLAG_RX_PAUSE_ENABLED;
    351	else
    352		pfvf->flags &= ~OTX2_FLAG_RX_PAUSE_ENABLED;
    353
    354	if (pause->tx_pause)
    355		pfvf->flags |= OTX2_FLAG_TX_PAUSE_ENABLED;
    356	else
    357		pfvf->flags &= ~OTX2_FLAG_TX_PAUSE_ENABLED;
    358
    359	return otx2_config_pause_frm(pfvf);
    360}
    361
    362static void otx2_get_ringparam(struct net_device *netdev,
    363			       struct ethtool_ringparam *ring,
    364			       struct kernel_ethtool_ringparam *kernel_ring,
    365			       struct netlink_ext_ack *extack)
    366{
    367	struct otx2_nic *pfvf = netdev_priv(netdev);
    368	struct otx2_qset *qs = &pfvf->qset;
    369
    370	ring->rx_max_pending = Q_COUNT(Q_SIZE_MAX);
    371	ring->rx_pending = qs->rqe_cnt ? qs->rqe_cnt : Q_COUNT(Q_SIZE_256);
    372	ring->tx_max_pending = Q_COUNT(Q_SIZE_MAX);
    373	ring->tx_pending = qs->sqe_cnt ? qs->sqe_cnt : Q_COUNT(Q_SIZE_4K);
    374	kernel_ring->rx_buf_len = pfvf->hw.rbuf_len;
    375	kernel_ring->cqe_size = pfvf->hw.xqe_size;
    376}
    377
    378static int otx2_set_ringparam(struct net_device *netdev,
    379			      struct ethtool_ringparam *ring,
    380			      struct kernel_ethtool_ringparam *kernel_ring,
    381			      struct netlink_ext_ack *extack)
    382{
    383	struct otx2_nic *pfvf = netdev_priv(netdev);
    384	u32 rx_buf_len = kernel_ring->rx_buf_len;
    385	u32 old_rx_buf_len = pfvf->hw.rbuf_len;
    386	u32 xqe_size = kernel_ring->cqe_size;
    387	bool if_up = netif_running(netdev);
    388	struct otx2_qset *qs = &pfvf->qset;
    389	u32 rx_count, tx_count;
    390
    391	if (ring->rx_mini_pending || ring->rx_jumbo_pending)
    392		return -EINVAL;
    393
    394	/* Hardware supports max size of 32k for a receive buffer
    395	 * and 1536 is typical ethernet frame size.
    396	 */
    397	if (rx_buf_len && (rx_buf_len < 1536 || rx_buf_len > 32768)) {
    398		netdev_err(netdev,
    399			   "Receive buffer range is 1536 - 32768");
    400		return -EINVAL;
    401	}
    402
    403	if (xqe_size != 128 && xqe_size != 512) {
    404		netdev_err(netdev,
    405			   "Completion event size must be 128 or 512");
    406		return -EINVAL;
    407	}
    408
    409	/* Permitted lengths are 16 64 256 1K 4K 16K 64K 256K 1M  */
    410	rx_count = ring->rx_pending;
    411	/* On some silicon variants a skid or reserved CQEs are
    412	 * needed to avoid CQ overflow.
    413	 */
    414	if (rx_count < pfvf->hw.rq_skid)
    415		rx_count =  pfvf->hw.rq_skid;
    416	rx_count = Q_COUNT(Q_SIZE(rx_count, 3));
    417
    418	/* Due pipelining impact minimum 2000 unused SQ CQE's
    419	 * need to be maintained to avoid CQ overflow, hence the
    420	 * minimum 4K size.
    421	 */
    422	tx_count = clamp_t(u32, ring->tx_pending,
    423			   Q_COUNT(Q_SIZE_4K), Q_COUNT(Q_SIZE_MAX));
    424	tx_count = Q_COUNT(Q_SIZE(tx_count, 3));
    425
    426	if (tx_count == qs->sqe_cnt && rx_count == qs->rqe_cnt &&
    427	    rx_buf_len == old_rx_buf_len && xqe_size == pfvf->hw.xqe_size)
    428		return 0;
    429
    430	if (if_up)
    431		netdev->netdev_ops->ndo_stop(netdev);
    432
    433	/* Assigned to the nearest possible exponent. */
    434	qs->sqe_cnt = tx_count;
    435	qs->rqe_cnt = rx_count;
    436
    437	pfvf->hw.rbuf_len = rx_buf_len;
    438	pfvf->hw.xqe_size = xqe_size;
    439
    440	if (if_up)
    441		return netdev->netdev_ops->ndo_open(netdev);
    442
    443	return 0;
    444}
    445
    446static int otx2_get_coalesce(struct net_device *netdev,
    447			     struct ethtool_coalesce *cmd,
    448			     struct kernel_ethtool_coalesce *kernel_coal,
    449			     struct netlink_ext_ack *extack)
    450{
    451	struct otx2_nic *pfvf = netdev_priv(netdev);
    452	struct otx2_hw *hw = &pfvf->hw;
    453
    454	cmd->rx_coalesce_usecs = hw->cq_time_wait;
    455	cmd->rx_max_coalesced_frames = hw->cq_ecount_wait;
    456	cmd->tx_coalesce_usecs = hw->cq_time_wait;
    457	cmd->tx_max_coalesced_frames = hw->cq_ecount_wait;
    458	if ((pfvf->flags & OTX2_FLAG_ADPTV_INT_COAL_ENABLED) ==
    459			OTX2_FLAG_ADPTV_INT_COAL_ENABLED) {
    460		cmd->use_adaptive_rx_coalesce = 1;
    461		cmd->use_adaptive_tx_coalesce = 1;
    462	} else {
    463		cmd->use_adaptive_rx_coalesce = 0;
    464		cmd->use_adaptive_tx_coalesce = 0;
    465	}
    466
    467	return 0;
    468}
    469
    470static int otx2_set_coalesce(struct net_device *netdev,
    471			     struct ethtool_coalesce *ec,
    472			     struct kernel_ethtool_coalesce *kernel_coal,
    473			     struct netlink_ext_ack *extack)
    474{
    475	struct otx2_nic *pfvf = netdev_priv(netdev);
    476	struct otx2_hw *hw = &pfvf->hw;
    477	u8 priv_coalesce_status;
    478	int qidx;
    479
    480	if (!ec->rx_max_coalesced_frames || !ec->tx_max_coalesced_frames)
    481		return 0;
    482
    483	if (ec->use_adaptive_rx_coalesce != ec->use_adaptive_tx_coalesce) {
    484		netdev_err(netdev,
    485			   "adaptive-rx should be same as adaptive-tx");
    486		return -EINVAL;
    487	}
    488
    489	/* Check and update coalesce status */
    490	if ((pfvf->flags & OTX2_FLAG_ADPTV_INT_COAL_ENABLED) ==
    491			OTX2_FLAG_ADPTV_INT_COAL_ENABLED) {
    492		priv_coalesce_status = 1;
    493		if (!ec->use_adaptive_rx_coalesce)
    494			pfvf->flags &= ~OTX2_FLAG_ADPTV_INT_COAL_ENABLED;
    495	} else {
    496		priv_coalesce_status = 0;
    497		if (ec->use_adaptive_rx_coalesce)
    498			pfvf->flags |= OTX2_FLAG_ADPTV_INT_COAL_ENABLED;
    499	}
    500
    501	/* 'cq_time_wait' is 8bit and is in multiple of 100ns,
    502	 * so clamp the user given value to the range of 1 to 25usec.
    503	 */
    504	ec->rx_coalesce_usecs = clamp_t(u32, ec->rx_coalesce_usecs,
    505					1, CQ_TIMER_THRESH_MAX);
    506	ec->tx_coalesce_usecs = clamp_t(u32, ec->tx_coalesce_usecs,
    507					1, CQ_TIMER_THRESH_MAX);
    508
    509	/* Rx and Tx are mapped to same CQ, check which one
    510	 * is changed, if both then choose the min.
    511	 */
    512	if (hw->cq_time_wait == ec->rx_coalesce_usecs)
    513		hw->cq_time_wait = ec->tx_coalesce_usecs;
    514	else if (hw->cq_time_wait == ec->tx_coalesce_usecs)
    515		hw->cq_time_wait = ec->rx_coalesce_usecs;
    516	else
    517		hw->cq_time_wait = min_t(u8, ec->rx_coalesce_usecs,
    518					 ec->tx_coalesce_usecs);
    519
    520	/* Max ecount_wait supported is 16bit,
    521	 * so clamp the user given value to the range of 1 to 64k.
    522	 */
    523	ec->rx_max_coalesced_frames = clamp_t(u32, ec->rx_max_coalesced_frames,
    524					      1, NAPI_POLL_WEIGHT);
    525	ec->tx_max_coalesced_frames = clamp_t(u32, ec->tx_max_coalesced_frames,
    526					      1, NAPI_POLL_WEIGHT);
    527
    528	/* Rx and Tx are mapped to same CQ, check which one
    529	 * is changed, if both then choose the min.
    530	 */
    531	if (hw->cq_ecount_wait == ec->rx_max_coalesced_frames)
    532		hw->cq_ecount_wait = ec->tx_max_coalesced_frames;
    533	else if (hw->cq_ecount_wait == ec->tx_max_coalesced_frames)
    534		hw->cq_ecount_wait = ec->rx_max_coalesced_frames;
    535	else
    536		hw->cq_ecount_wait = min_t(u16, ec->rx_max_coalesced_frames,
    537					   ec->tx_max_coalesced_frames);
    538
    539	/* Reset 'cq_time_wait' and 'cq_ecount_wait' to
    540	 * default values if coalesce status changed from
    541	 * 'on' to 'off'.
    542	 */
    543	if (priv_coalesce_status &&
    544	    ((pfvf->flags & OTX2_FLAG_ADPTV_INT_COAL_ENABLED) !=
    545	     OTX2_FLAG_ADPTV_INT_COAL_ENABLED)) {
    546		hw->cq_time_wait = CQ_TIMER_THRESH_DEFAULT;
    547		hw->cq_ecount_wait = CQ_CQE_THRESH_DEFAULT;
    548	}
    549
    550	if (netif_running(netdev)) {
    551		for (qidx = 0; qidx < pfvf->hw.cint_cnt; qidx++)
    552			otx2_config_irq_coalescing(pfvf, qidx);
    553	}
    554
    555	return 0;
    556}
    557
    558static int otx2_get_rss_hash_opts(struct otx2_nic *pfvf,
    559				  struct ethtool_rxnfc *nfc)
    560{
    561	struct otx2_rss_info *rss = &pfvf->hw.rss_info;
    562
    563	if (!(rss->flowkey_cfg &
    564	    (NIX_FLOW_KEY_TYPE_IPV4 | NIX_FLOW_KEY_TYPE_IPV6)))
    565		return 0;
    566
    567	/* Mimimum is IPv4 and IPv6, SIP/DIP */
    568	nfc->data = RXH_IP_SRC | RXH_IP_DST;
    569	if (rss->flowkey_cfg & NIX_FLOW_KEY_TYPE_VLAN)
    570		nfc->data |= RXH_VLAN;
    571
    572	switch (nfc->flow_type) {
    573	case TCP_V4_FLOW:
    574	case TCP_V6_FLOW:
    575		if (rss->flowkey_cfg & NIX_FLOW_KEY_TYPE_TCP)
    576			nfc->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
    577		break;
    578	case UDP_V4_FLOW:
    579	case UDP_V6_FLOW:
    580		if (rss->flowkey_cfg & NIX_FLOW_KEY_TYPE_UDP)
    581			nfc->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
    582		break;
    583	case SCTP_V4_FLOW:
    584	case SCTP_V6_FLOW:
    585		if (rss->flowkey_cfg & NIX_FLOW_KEY_TYPE_SCTP)
    586			nfc->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
    587		break;
    588	case AH_ESP_V4_FLOW:
    589	case AH_ESP_V6_FLOW:
    590		if (rss->flowkey_cfg & NIX_FLOW_KEY_TYPE_ESP)
    591			nfc->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
    592		break;
    593	case AH_V4_FLOW:
    594	case ESP_V4_FLOW:
    595	case IPV4_FLOW:
    596		break;
    597	case AH_V6_FLOW:
    598	case ESP_V6_FLOW:
    599	case IPV6_FLOW:
    600		break;
    601	default:
    602		return -EINVAL;
    603	}
    604
    605	return 0;
    606}
    607
    608static int otx2_set_rss_hash_opts(struct otx2_nic *pfvf,
    609				  struct ethtool_rxnfc *nfc)
    610{
    611	struct otx2_rss_info *rss = &pfvf->hw.rss_info;
    612	u32 rxh_l4 = RXH_L4_B_0_1 | RXH_L4_B_2_3;
    613	u32 rss_cfg = rss->flowkey_cfg;
    614
    615	if (!rss->enable) {
    616		netdev_err(pfvf->netdev,
    617			   "RSS is disabled, cannot change settings\n");
    618		return -EIO;
    619	}
    620
    621	/* Mimimum is IPv4 and IPv6, SIP/DIP */
    622	if (!(nfc->data & RXH_IP_SRC) || !(nfc->data & RXH_IP_DST))
    623		return -EINVAL;
    624
    625	if (nfc->data & RXH_VLAN)
    626		rss_cfg |=  NIX_FLOW_KEY_TYPE_VLAN;
    627	else
    628		rss_cfg &= ~NIX_FLOW_KEY_TYPE_VLAN;
    629
    630	switch (nfc->flow_type) {
    631	case TCP_V4_FLOW:
    632	case TCP_V6_FLOW:
    633		/* Different config for v4 and v6 is not supported.
    634		 * Both of them have to be either 4-tuple or 2-tuple.
    635		 */
    636		switch (nfc->data & rxh_l4) {
    637		case 0:
    638			rss_cfg &= ~NIX_FLOW_KEY_TYPE_TCP;
    639			break;
    640		case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
    641			rss_cfg |= NIX_FLOW_KEY_TYPE_TCP;
    642			break;
    643		default:
    644			return -EINVAL;
    645		}
    646		break;
    647	case UDP_V4_FLOW:
    648	case UDP_V6_FLOW:
    649		switch (nfc->data & rxh_l4) {
    650		case 0:
    651			rss_cfg &= ~NIX_FLOW_KEY_TYPE_UDP;
    652			break;
    653		case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
    654			rss_cfg |= NIX_FLOW_KEY_TYPE_UDP;
    655			break;
    656		default:
    657			return -EINVAL;
    658		}
    659		break;
    660	case SCTP_V4_FLOW:
    661	case SCTP_V6_FLOW:
    662		switch (nfc->data & rxh_l4) {
    663		case 0:
    664			rss_cfg &= ~NIX_FLOW_KEY_TYPE_SCTP;
    665			break;
    666		case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
    667			rss_cfg |= NIX_FLOW_KEY_TYPE_SCTP;
    668			break;
    669		default:
    670			return -EINVAL;
    671		}
    672		break;
    673	case AH_ESP_V4_FLOW:
    674	case AH_ESP_V6_FLOW:
    675		switch (nfc->data & rxh_l4) {
    676		case 0:
    677			rss_cfg &= ~(NIX_FLOW_KEY_TYPE_ESP |
    678				     NIX_FLOW_KEY_TYPE_AH);
    679			rss_cfg |= NIX_FLOW_KEY_TYPE_VLAN |
    680				   NIX_FLOW_KEY_TYPE_IPV4_PROTO;
    681			break;
    682		case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
    683			/* If VLAN hashing is also requested for ESP then do not
    684			 * allow because of hardware 40 bytes flow key limit.
    685			 */
    686			if (rss_cfg & NIX_FLOW_KEY_TYPE_VLAN) {
    687				netdev_err(pfvf->netdev,
    688					   "RSS hash of ESP or AH with VLAN is not supported\n");
    689				return -EOPNOTSUPP;
    690			}
    691
    692			rss_cfg |= NIX_FLOW_KEY_TYPE_ESP | NIX_FLOW_KEY_TYPE_AH;
    693			/* Disable IPv4 proto hashing since IPv6 SA+DA(32 bytes)
    694			 * and ESP SPI+sequence(8 bytes) uses hardware maximum
    695			 * limit of 40 byte flow key.
    696			 */
    697			rss_cfg &= ~NIX_FLOW_KEY_TYPE_IPV4_PROTO;
    698			break;
    699		default:
    700			return -EINVAL;
    701		}
    702		break;
    703	case IPV4_FLOW:
    704	case IPV6_FLOW:
    705		rss_cfg = NIX_FLOW_KEY_TYPE_IPV4 | NIX_FLOW_KEY_TYPE_IPV6;
    706		break;
    707	default:
    708		return -EINVAL;
    709	}
    710
    711	rss->flowkey_cfg = rss_cfg;
    712	otx2_set_flowkey_cfg(pfvf);
    713	return 0;
    714}
    715
    716static int otx2_get_rxnfc(struct net_device *dev,
    717			  struct ethtool_rxnfc *nfc, u32 *rules)
    718{
    719	bool ntuple = !!(dev->features & NETIF_F_NTUPLE);
    720	struct otx2_nic *pfvf = netdev_priv(dev);
    721	int ret = -EOPNOTSUPP;
    722
    723	switch (nfc->cmd) {
    724	case ETHTOOL_GRXRINGS:
    725		nfc->data = pfvf->hw.rx_queues;
    726		ret = 0;
    727		break;
    728	case ETHTOOL_GRXCLSRLCNT:
    729		if (netif_running(dev) && ntuple) {
    730			nfc->rule_cnt = pfvf->flow_cfg->nr_flows;
    731			ret = 0;
    732		}
    733		break;
    734	case ETHTOOL_GRXCLSRULE:
    735		if (netif_running(dev) && ntuple)
    736			ret = otx2_get_flow(pfvf, nfc,  nfc->fs.location);
    737		break;
    738	case ETHTOOL_GRXCLSRLALL:
    739		if (netif_running(dev) && ntuple)
    740			ret = otx2_get_all_flows(pfvf, nfc, rules);
    741		break;
    742	case ETHTOOL_GRXFH:
    743		return otx2_get_rss_hash_opts(pfvf, nfc);
    744	default:
    745		break;
    746	}
    747	return ret;
    748}
    749
    750static int otx2_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *nfc)
    751{
    752	bool ntuple = !!(dev->features & NETIF_F_NTUPLE);
    753	struct otx2_nic *pfvf = netdev_priv(dev);
    754	int ret = -EOPNOTSUPP;
    755
    756	switch (nfc->cmd) {
    757	case ETHTOOL_SRXFH:
    758		ret = otx2_set_rss_hash_opts(pfvf, nfc);
    759		break;
    760	case ETHTOOL_SRXCLSRLINS:
    761		if (netif_running(dev) && ntuple)
    762			ret = otx2_add_flow(pfvf, nfc);
    763		break;
    764	case ETHTOOL_SRXCLSRLDEL:
    765		if (netif_running(dev) && ntuple)
    766			ret = otx2_remove_flow(pfvf, nfc->fs.location);
    767		break;
    768	default:
    769		break;
    770	}
    771
    772	return ret;
    773}
    774
    775static u32 otx2_get_rxfh_key_size(struct net_device *netdev)
    776{
    777	struct otx2_nic *pfvf = netdev_priv(netdev);
    778	struct otx2_rss_info *rss;
    779
    780	rss = &pfvf->hw.rss_info;
    781
    782	return sizeof(rss->key);
    783}
    784
    785static u32 otx2_get_rxfh_indir_size(struct net_device *dev)
    786{
    787	return  MAX_RSS_INDIR_TBL_SIZE;
    788}
    789
    790static int otx2_rss_ctx_delete(struct otx2_nic *pfvf, int ctx_id)
    791{
    792	struct otx2_rss_info *rss = &pfvf->hw.rss_info;
    793
    794	otx2_rss_ctx_flow_del(pfvf, ctx_id);
    795	kfree(rss->rss_ctx[ctx_id]);
    796	rss->rss_ctx[ctx_id] = NULL;
    797
    798	return 0;
    799}
    800
    801static int otx2_rss_ctx_create(struct otx2_nic *pfvf,
    802			       u32 *rss_context)
    803{
    804	struct otx2_rss_info *rss = &pfvf->hw.rss_info;
    805	u8 ctx;
    806
    807	for (ctx = 0; ctx < MAX_RSS_GROUPS; ctx++) {
    808		if (!rss->rss_ctx[ctx])
    809			break;
    810	}
    811	if (ctx == MAX_RSS_GROUPS)
    812		return -EINVAL;
    813
    814	rss->rss_ctx[ctx] = kzalloc(sizeof(*rss->rss_ctx[ctx]), GFP_KERNEL);
    815	if (!rss->rss_ctx[ctx])
    816		return -ENOMEM;
    817	*rss_context = ctx;
    818
    819	return 0;
    820}
    821
    822/* RSS context configuration */
    823static int otx2_set_rxfh_context(struct net_device *dev, const u32 *indir,
    824				 const u8 *hkey, const u8 hfunc,
    825				 u32 *rss_context, bool delete)
    826{
    827	struct otx2_nic *pfvf = netdev_priv(dev);
    828	struct otx2_rss_ctx *rss_ctx;
    829	struct otx2_rss_info *rss;
    830	int ret, idx;
    831
    832	if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
    833		return -EOPNOTSUPP;
    834
    835	if (*rss_context != ETH_RXFH_CONTEXT_ALLOC &&
    836	    *rss_context >= MAX_RSS_GROUPS)
    837		return -EINVAL;
    838
    839	rss = &pfvf->hw.rss_info;
    840
    841	if (!rss->enable) {
    842		netdev_err(dev, "RSS is disabled, cannot change settings\n");
    843		return -EIO;
    844	}
    845
    846	if (hkey) {
    847		memcpy(rss->key, hkey, sizeof(rss->key));
    848		otx2_set_rss_key(pfvf);
    849	}
    850	if (delete)
    851		return otx2_rss_ctx_delete(pfvf, *rss_context);
    852
    853	if (*rss_context == ETH_RXFH_CONTEXT_ALLOC) {
    854		ret = otx2_rss_ctx_create(pfvf, rss_context);
    855		if (ret)
    856			return ret;
    857	}
    858	if (indir) {
    859		rss_ctx = rss->rss_ctx[*rss_context];
    860		for (idx = 0; idx < rss->rss_size; idx++)
    861			rss_ctx->ind_tbl[idx] = indir[idx];
    862	}
    863	otx2_set_rss_table(pfvf, *rss_context);
    864
    865	return 0;
    866}
    867
    868static int otx2_get_rxfh_context(struct net_device *dev, u32 *indir,
    869				 u8 *hkey, u8 *hfunc, u32 rss_context)
    870{
    871	struct otx2_nic *pfvf = netdev_priv(dev);
    872	struct otx2_rss_ctx *rss_ctx;
    873	struct otx2_rss_info *rss;
    874	int idx, rx_queues;
    875
    876	rss = &pfvf->hw.rss_info;
    877
    878	if (hfunc)
    879		*hfunc = ETH_RSS_HASH_TOP;
    880
    881	if (!indir)
    882		return 0;
    883
    884	if (!rss->enable && rss_context == DEFAULT_RSS_CONTEXT_GROUP) {
    885		rx_queues = pfvf->hw.rx_queues;
    886		for (idx = 0; idx < MAX_RSS_INDIR_TBL_SIZE; idx++)
    887			indir[idx] = ethtool_rxfh_indir_default(idx, rx_queues);
    888		return 0;
    889	}
    890	if (rss_context >= MAX_RSS_GROUPS)
    891		return -ENOENT;
    892
    893	rss_ctx = rss->rss_ctx[rss_context];
    894	if (!rss_ctx)
    895		return -ENOENT;
    896
    897	if (indir) {
    898		for (idx = 0; idx < rss->rss_size; idx++)
    899			indir[idx] = rss_ctx->ind_tbl[idx];
    900	}
    901	if (hkey)
    902		memcpy(hkey, rss->key, sizeof(rss->key));
    903
    904	return 0;
    905}
    906
    907/* Get RSS configuration */
    908static int otx2_get_rxfh(struct net_device *dev, u32 *indir,
    909			 u8 *hkey, u8 *hfunc)
    910{
    911	return otx2_get_rxfh_context(dev, indir, hkey, hfunc,
    912				     DEFAULT_RSS_CONTEXT_GROUP);
    913}
    914
    915/* Configure RSS table and hash key */
    916static int otx2_set_rxfh(struct net_device *dev, const u32 *indir,
    917			 const u8 *hkey, const u8 hfunc)
    918{
    919
    920	u32 rss_context = DEFAULT_RSS_CONTEXT_GROUP;
    921
    922	return otx2_set_rxfh_context(dev, indir, hkey, hfunc, &rss_context, 0);
    923}
    924
    925static u32 otx2_get_msglevel(struct net_device *netdev)
    926{
    927	struct otx2_nic *pfvf = netdev_priv(netdev);
    928
    929	return pfvf->msg_enable;
    930}
    931
    932static void otx2_set_msglevel(struct net_device *netdev, u32 val)
    933{
    934	struct otx2_nic *pfvf = netdev_priv(netdev);
    935
    936	pfvf->msg_enable = val;
    937}
    938
    939static u32 otx2_get_link(struct net_device *netdev)
    940{
    941	struct otx2_nic *pfvf = netdev_priv(netdev);
    942
    943	/* LBK link is internal and always UP */
    944	if (is_otx2_lbkvf(pfvf->pdev))
    945		return 1;
    946	return pfvf->linfo.link_up;
    947}
    948
    949static int otx2_get_ts_info(struct net_device *netdev,
    950			    struct ethtool_ts_info *info)
    951{
    952	struct otx2_nic *pfvf = netdev_priv(netdev);
    953
    954	if (!pfvf->ptp)
    955		return ethtool_op_get_ts_info(netdev, info);
    956
    957	info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
    958				SOF_TIMESTAMPING_RX_SOFTWARE |
    959				SOF_TIMESTAMPING_SOFTWARE |
    960				SOF_TIMESTAMPING_TX_HARDWARE |
    961				SOF_TIMESTAMPING_RX_HARDWARE |
    962				SOF_TIMESTAMPING_RAW_HARDWARE;
    963
    964	info->phc_index = otx2_ptp_clock_index(pfvf);
    965
    966	info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
    967
    968	info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
    969			   (1 << HWTSTAMP_FILTER_ALL);
    970
    971	return 0;
    972}
    973
    974static struct cgx_fw_data *otx2_get_fwdata(struct otx2_nic *pfvf)
    975{
    976	struct cgx_fw_data *rsp = NULL;
    977	struct msg_req *req;
    978	int err = 0;
    979
    980	mutex_lock(&pfvf->mbox.lock);
    981	req = otx2_mbox_alloc_msg_cgx_get_aux_link_info(&pfvf->mbox);
    982	if (!req) {
    983		mutex_unlock(&pfvf->mbox.lock);
    984		return ERR_PTR(-ENOMEM);
    985	}
    986
    987	err = otx2_sync_mbox_msg(&pfvf->mbox);
    988	if (!err) {
    989		rsp = (struct cgx_fw_data *)
    990			otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &req->hdr);
    991	} else {
    992		rsp = ERR_PTR(err);
    993	}
    994
    995	mutex_unlock(&pfvf->mbox.lock);
    996	return rsp;
    997}
    998
    999static int otx2_get_fecparam(struct net_device *netdev,
   1000			     struct ethtool_fecparam *fecparam)
   1001{
   1002	struct otx2_nic *pfvf = netdev_priv(netdev);
   1003	struct cgx_fw_data *rsp;
   1004	const int fec[] = {
   1005		ETHTOOL_FEC_OFF,
   1006		ETHTOOL_FEC_BASER,
   1007		ETHTOOL_FEC_RS,
   1008		ETHTOOL_FEC_BASER | ETHTOOL_FEC_RS};
   1009#define FEC_MAX_INDEX 4
   1010	if (pfvf->linfo.fec < FEC_MAX_INDEX)
   1011		fecparam->active_fec = fec[pfvf->linfo.fec];
   1012
   1013	rsp = otx2_get_fwdata(pfvf);
   1014	if (IS_ERR(rsp))
   1015		return PTR_ERR(rsp);
   1016
   1017	if (rsp->fwdata.supported_fec < FEC_MAX_INDEX) {
   1018		if (!rsp->fwdata.supported_fec)
   1019			fecparam->fec = ETHTOOL_FEC_NONE;
   1020		else
   1021			fecparam->fec = fec[rsp->fwdata.supported_fec];
   1022	}
   1023	return 0;
   1024}
   1025
   1026static int otx2_set_fecparam(struct net_device *netdev,
   1027			     struct ethtool_fecparam *fecparam)
   1028{
   1029	struct otx2_nic *pfvf = netdev_priv(netdev);
   1030	struct mbox *mbox = &pfvf->mbox;
   1031	struct fec_mode *req, *rsp;
   1032	int err = 0, fec = 0;
   1033
   1034	switch (fecparam->fec) {
   1035	/* Firmware does not support AUTO mode consider it as FEC_OFF */
   1036	case ETHTOOL_FEC_OFF:
   1037	case ETHTOOL_FEC_AUTO:
   1038		fec = OTX2_FEC_OFF;
   1039		break;
   1040	case ETHTOOL_FEC_RS:
   1041		fec = OTX2_FEC_RS;
   1042		break;
   1043	case ETHTOOL_FEC_BASER:
   1044		fec = OTX2_FEC_BASER;
   1045		break;
   1046	default:
   1047		netdev_warn(pfvf->netdev, "Unsupported FEC mode: %d",
   1048			    fecparam->fec);
   1049		return -EINVAL;
   1050	}
   1051
   1052	if (fec == pfvf->linfo.fec)
   1053		return 0;
   1054
   1055	mutex_lock(&mbox->lock);
   1056	req = otx2_mbox_alloc_msg_cgx_set_fec_param(&pfvf->mbox);
   1057	if (!req) {
   1058		err = -ENOMEM;
   1059		goto end;
   1060	}
   1061	req->fec = fec;
   1062	err = otx2_sync_mbox_msg(&pfvf->mbox);
   1063	if (err)
   1064		goto end;
   1065
   1066	rsp = (struct fec_mode *)otx2_mbox_get_rsp(&pfvf->mbox.mbox,
   1067						   0, &req->hdr);
   1068	if (rsp->fec >= 0)
   1069		pfvf->linfo.fec = rsp->fec;
   1070	else
   1071		err = rsp->fec;
   1072end:
   1073	mutex_unlock(&mbox->lock);
   1074	return err;
   1075}
   1076
   1077static void otx2_get_fec_info(u64 index, int req_mode,
   1078			      struct ethtool_link_ksettings *link_ksettings)
   1079{
   1080	__ETHTOOL_DECLARE_LINK_MODE_MASK(otx2_fec_modes) = { 0, };
   1081
   1082	switch (index) {
   1083	case OTX2_FEC_NONE:
   1084		linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT,
   1085				 otx2_fec_modes);
   1086		break;
   1087	case OTX2_FEC_BASER:
   1088		linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
   1089				 otx2_fec_modes);
   1090		break;
   1091	case OTX2_FEC_RS:
   1092		linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
   1093				 otx2_fec_modes);
   1094		break;
   1095	case OTX2_FEC_BASER | OTX2_FEC_RS:
   1096		linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
   1097				 otx2_fec_modes);
   1098		linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
   1099				 otx2_fec_modes);
   1100		break;
   1101	}
   1102
   1103	/* Add fec modes to existing modes */
   1104	if (req_mode == OTX2_MODE_ADVERTISED)
   1105		linkmode_or(link_ksettings->link_modes.advertising,
   1106			    link_ksettings->link_modes.advertising,
   1107			    otx2_fec_modes);
   1108	else
   1109		linkmode_or(link_ksettings->link_modes.supported,
   1110			    link_ksettings->link_modes.supported,
   1111			    otx2_fec_modes);
   1112}
   1113
   1114static void otx2_get_link_mode_info(u64 link_mode_bmap,
   1115				    bool req_mode,
   1116				    struct ethtool_link_ksettings
   1117				    *link_ksettings)
   1118{
   1119	__ETHTOOL_DECLARE_LINK_MODE_MASK(otx2_link_modes) = { 0, };
   1120	const int otx2_sgmii_features[6] = {
   1121		ETHTOOL_LINK_MODE_10baseT_Half_BIT,
   1122		ETHTOOL_LINK_MODE_10baseT_Full_BIT,
   1123		ETHTOOL_LINK_MODE_100baseT_Half_BIT,
   1124		ETHTOOL_LINK_MODE_100baseT_Full_BIT,
   1125		ETHTOOL_LINK_MODE_1000baseT_Half_BIT,
   1126		ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
   1127	};
   1128	/* CGX link modes to Ethtool link mode mapping */
   1129	const int cgx_link_mode[27] = {
   1130		0, /* SGMII  Mode */
   1131		ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
   1132		ETHTOOL_LINK_MODE_10000baseT_Full_BIT,
   1133		ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
   1134		ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
   1135		ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
   1136		0,
   1137		ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
   1138		0,
   1139		0,
   1140		ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
   1141		ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
   1142		ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
   1143		ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
   1144		ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
   1145		ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
   1146		0,
   1147		ETHTOOL_LINK_MODE_50000baseSR_Full_BIT,
   1148		0,
   1149		ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
   1150		ETHTOOL_LINK_MODE_50000baseCR_Full_BIT,
   1151		ETHTOOL_LINK_MODE_50000baseKR_Full_BIT,
   1152		0,
   1153		ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
   1154		ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
   1155		ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
   1156		ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT
   1157	};
   1158	u8 bit;
   1159
   1160	for_each_set_bit(bit, (unsigned long *)&link_mode_bmap, 27) {
   1161		/* SGMII mode is set */
   1162		if (bit == 0)
   1163			linkmode_set_bit_array(otx2_sgmii_features,
   1164					       ARRAY_SIZE(otx2_sgmii_features),
   1165					       otx2_link_modes);
   1166		else
   1167			linkmode_set_bit(cgx_link_mode[bit], otx2_link_modes);
   1168	}
   1169
   1170	if (req_mode == OTX2_MODE_ADVERTISED)
   1171		linkmode_copy(link_ksettings->link_modes.advertising,
   1172			      otx2_link_modes);
   1173	else
   1174		linkmode_copy(link_ksettings->link_modes.supported,
   1175			      otx2_link_modes);
   1176}
   1177
   1178static int otx2_get_link_ksettings(struct net_device *netdev,
   1179				   struct ethtool_link_ksettings *cmd)
   1180{
   1181	struct otx2_nic *pfvf = netdev_priv(netdev);
   1182	struct cgx_fw_data *rsp = NULL;
   1183
   1184	cmd->base.duplex  = pfvf->linfo.full_duplex;
   1185	cmd->base.speed   = pfvf->linfo.speed;
   1186	cmd->base.autoneg = pfvf->linfo.an;
   1187
   1188	rsp = otx2_get_fwdata(pfvf);
   1189	if (IS_ERR(rsp))
   1190		return PTR_ERR(rsp);
   1191
   1192	if (rsp->fwdata.supported_an)
   1193		ethtool_link_ksettings_add_link_mode(cmd,
   1194						     supported,
   1195						     Autoneg);
   1196
   1197	otx2_get_link_mode_info(rsp->fwdata.advertised_link_modes,
   1198				OTX2_MODE_ADVERTISED, cmd);
   1199	otx2_get_fec_info(rsp->fwdata.advertised_fec,
   1200			  OTX2_MODE_ADVERTISED, cmd);
   1201	otx2_get_link_mode_info(rsp->fwdata.supported_link_modes,
   1202				OTX2_MODE_SUPPORTED, cmd);
   1203	otx2_get_fec_info(rsp->fwdata.supported_fec,
   1204			  OTX2_MODE_SUPPORTED, cmd);
   1205	return 0;
   1206}
   1207
   1208static void otx2_get_advertised_mode(const struct ethtool_link_ksettings *cmd,
   1209				     u64 *mode)
   1210{
   1211	u32 bit_pos;
   1212
   1213	/* Firmware does not support requesting multiple advertised modes
   1214	 * return first set bit
   1215	 */
   1216	bit_pos = find_first_bit(cmd->link_modes.advertising,
   1217				 __ETHTOOL_LINK_MODE_MASK_NBITS);
   1218	if (bit_pos != __ETHTOOL_LINK_MODE_MASK_NBITS)
   1219		*mode = bit_pos;
   1220}
   1221
   1222static int otx2_set_link_ksettings(struct net_device *netdev,
   1223				   const struct ethtool_link_ksettings *cmd)
   1224{
   1225	struct otx2_nic *pf = netdev_priv(netdev);
   1226	struct ethtool_link_ksettings cur_ks;
   1227	struct cgx_set_link_mode_req *req;
   1228	struct mbox *mbox = &pf->mbox;
   1229	int err = 0;
   1230
   1231	memset(&cur_ks, 0, sizeof(struct ethtool_link_ksettings));
   1232
   1233	if (!ethtool_validate_speed(cmd->base.speed) ||
   1234	    !ethtool_validate_duplex(cmd->base.duplex))
   1235		return -EINVAL;
   1236
   1237	if (cmd->base.autoneg != AUTONEG_ENABLE &&
   1238	    cmd->base.autoneg != AUTONEG_DISABLE)
   1239		return -EINVAL;
   1240
   1241	otx2_get_link_ksettings(netdev, &cur_ks);
   1242
   1243	/* Check requested modes against supported modes by hardware */
   1244	if (!linkmode_subset(cmd->link_modes.advertising,
   1245			     cur_ks.link_modes.supported))
   1246		return -EINVAL;
   1247
   1248	mutex_lock(&mbox->lock);
   1249	req = otx2_mbox_alloc_msg_cgx_set_link_mode(&pf->mbox);
   1250	if (!req) {
   1251		err = -ENOMEM;
   1252		goto end;
   1253	}
   1254
   1255	req->args.speed = cmd->base.speed;
   1256	/* firmware expects 1 for half duplex and 0 for full duplex
   1257	 * hence inverting
   1258	 */
   1259	req->args.duplex = cmd->base.duplex ^ 0x1;
   1260	req->args.an = cmd->base.autoneg;
   1261	otx2_get_advertised_mode(cmd, &req->args.mode);
   1262
   1263	err = otx2_sync_mbox_msg(&pf->mbox);
   1264end:
   1265	mutex_unlock(&mbox->lock);
   1266	return err;
   1267}
   1268
   1269static const struct ethtool_ops otx2_ethtool_ops = {
   1270	.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
   1271				     ETHTOOL_COALESCE_MAX_FRAMES |
   1272				     ETHTOOL_COALESCE_USE_ADAPTIVE,
   1273	.supported_ring_params  = ETHTOOL_RING_USE_RX_BUF_LEN |
   1274				  ETHTOOL_RING_USE_CQE_SIZE,
   1275	.get_link		= otx2_get_link,
   1276	.get_drvinfo		= otx2_get_drvinfo,
   1277	.get_strings		= otx2_get_strings,
   1278	.get_ethtool_stats	= otx2_get_ethtool_stats,
   1279	.get_sset_count		= otx2_get_sset_count,
   1280	.set_channels		= otx2_set_channels,
   1281	.get_channels		= otx2_get_channels,
   1282	.get_ringparam		= otx2_get_ringparam,
   1283	.set_ringparam		= otx2_set_ringparam,
   1284	.get_coalesce		= otx2_get_coalesce,
   1285	.set_coalesce		= otx2_set_coalesce,
   1286	.get_rxnfc		= otx2_get_rxnfc,
   1287	.set_rxnfc              = otx2_set_rxnfc,
   1288	.get_rxfh_key_size	= otx2_get_rxfh_key_size,
   1289	.get_rxfh_indir_size	= otx2_get_rxfh_indir_size,
   1290	.get_rxfh		= otx2_get_rxfh,
   1291	.set_rxfh		= otx2_set_rxfh,
   1292	.get_rxfh_context	= otx2_get_rxfh_context,
   1293	.set_rxfh_context	= otx2_set_rxfh_context,
   1294	.get_msglevel		= otx2_get_msglevel,
   1295	.set_msglevel		= otx2_set_msglevel,
   1296	.get_pauseparam		= otx2_get_pauseparam,
   1297	.set_pauseparam		= otx2_set_pauseparam,
   1298	.get_ts_info		= otx2_get_ts_info,
   1299	.get_fecparam		= otx2_get_fecparam,
   1300	.set_fecparam		= otx2_set_fecparam,
   1301	.get_link_ksettings     = otx2_get_link_ksettings,
   1302	.set_link_ksettings     = otx2_set_link_ksettings,
   1303};
   1304
   1305void otx2_set_ethtool_ops(struct net_device *netdev)
   1306{
   1307	netdev->ethtool_ops = &otx2_ethtool_ops;
   1308}
   1309
   1310/* VF's ethtool APIs */
   1311static void otx2vf_get_drvinfo(struct net_device *netdev,
   1312			       struct ethtool_drvinfo *info)
   1313{
   1314	struct otx2_nic *vf = netdev_priv(netdev);
   1315
   1316	strlcpy(info->driver, DRV_VF_NAME, sizeof(info->driver));
   1317	strlcpy(info->bus_info, pci_name(vf->pdev), sizeof(info->bus_info));
   1318}
   1319
   1320static void otx2vf_get_strings(struct net_device *netdev, u32 sset, u8 *data)
   1321{
   1322	struct otx2_nic *vf = netdev_priv(netdev);
   1323	int stats;
   1324
   1325	if (sset != ETH_SS_STATS)
   1326		return;
   1327
   1328	for (stats = 0; stats < otx2_n_dev_stats; stats++) {
   1329		memcpy(data, otx2_dev_stats[stats].name, ETH_GSTRING_LEN);
   1330		data += ETH_GSTRING_LEN;
   1331	}
   1332
   1333	for (stats = 0; stats < otx2_n_drv_stats; stats++) {
   1334		memcpy(data, otx2_drv_stats[stats].name, ETH_GSTRING_LEN);
   1335		data += ETH_GSTRING_LEN;
   1336	}
   1337
   1338	otx2_get_qset_strings(vf, &data, 0);
   1339
   1340	strcpy(data, "reset_count");
   1341	data += ETH_GSTRING_LEN;
   1342}
   1343
   1344static void otx2vf_get_ethtool_stats(struct net_device *netdev,
   1345				     struct ethtool_stats *stats, u64 *data)
   1346{
   1347	struct otx2_nic *vf = netdev_priv(netdev);
   1348	int stat;
   1349
   1350	otx2_get_dev_stats(vf);
   1351	for (stat = 0; stat < otx2_n_dev_stats; stat++)
   1352		*(data++) = ((u64 *)&vf->hw.dev_stats)
   1353				[otx2_dev_stats[stat].index];
   1354
   1355	for (stat = 0; stat < otx2_n_drv_stats; stat++)
   1356		*(data++) = atomic_read(&((atomic_t *)&vf->hw.drv_stats)
   1357						[otx2_drv_stats[stat].index]);
   1358
   1359	otx2_get_qset_stats(vf, stats, &data);
   1360	*(data++) = vf->reset_count;
   1361}
   1362
   1363static int otx2vf_get_sset_count(struct net_device *netdev, int sset)
   1364{
   1365	struct otx2_nic *vf = netdev_priv(netdev);
   1366	int qstats_count;
   1367
   1368	if (sset != ETH_SS_STATS)
   1369		return -EINVAL;
   1370
   1371	qstats_count = otx2_n_queue_stats *
   1372		       (vf->hw.rx_queues + vf->hw.tx_queues);
   1373
   1374	return otx2_n_dev_stats + otx2_n_drv_stats + qstats_count + 1;
   1375}
   1376
   1377static int otx2vf_get_link_ksettings(struct net_device *netdev,
   1378				     struct ethtool_link_ksettings *cmd)
   1379{
   1380	struct otx2_nic *pfvf = netdev_priv(netdev);
   1381
   1382	if (is_otx2_lbkvf(pfvf->pdev)) {
   1383		cmd->base.duplex = DUPLEX_FULL;
   1384		cmd->base.speed = SPEED_100000;
   1385	} else {
   1386		return otx2_get_link_ksettings(netdev, cmd);
   1387	}
   1388	return 0;
   1389}
   1390
   1391static const struct ethtool_ops otx2vf_ethtool_ops = {
   1392	.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
   1393				     ETHTOOL_COALESCE_MAX_FRAMES |
   1394				     ETHTOOL_COALESCE_USE_ADAPTIVE,
   1395	.supported_ring_params  = ETHTOOL_RING_USE_RX_BUF_LEN |
   1396				  ETHTOOL_RING_USE_CQE_SIZE,
   1397	.get_link		= otx2_get_link,
   1398	.get_drvinfo		= otx2vf_get_drvinfo,
   1399	.get_strings		= otx2vf_get_strings,
   1400	.get_ethtool_stats	= otx2vf_get_ethtool_stats,
   1401	.get_sset_count		= otx2vf_get_sset_count,
   1402	.set_channels		= otx2_set_channels,
   1403	.get_channels		= otx2_get_channels,
   1404	.get_rxnfc		= otx2_get_rxnfc,
   1405	.set_rxnfc              = otx2_set_rxnfc,
   1406	.get_rxfh_key_size	= otx2_get_rxfh_key_size,
   1407	.get_rxfh_indir_size	= otx2_get_rxfh_indir_size,
   1408	.get_rxfh		= otx2_get_rxfh,
   1409	.set_rxfh		= otx2_set_rxfh,
   1410	.get_rxfh_context	= otx2_get_rxfh_context,
   1411	.set_rxfh_context	= otx2_set_rxfh_context,
   1412	.get_ringparam		= otx2_get_ringparam,
   1413	.set_ringparam		= otx2_set_ringparam,
   1414	.get_coalesce		= otx2_get_coalesce,
   1415	.set_coalesce		= otx2_set_coalesce,
   1416	.get_msglevel		= otx2_get_msglevel,
   1417	.set_msglevel		= otx2_set_msglevel,
   1418	.get_pauseparam		= otx2_get_pauseparam,
   1419	.set_pauseparam		= otx2_set_pauseparam,
   1420	.get_link_ksettings     = otx2vf_get_link_ksettings,
   1421	.get_ts_info		= otx2_get_ts_info,
   1422};
   1423
   1424void otx2vf_set_ethtool_ops(struct net_device *netdev)
   1425{
   1426	netdev->ethtool_ops = &otx2vf_ethtool_ops;
   1427}
   1428EXPORT_SYMBOL(otx2vf_set_ethtool_ops);