cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

en_ethtool.c (62790B)


      1/*
      2 * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
      3 *
      4 * This software is available to you under a choice of one of two
      5 * licenses.  You may choose to be licensed under the terms of the GNU
      6 * General Public License (GPL) Version 2, available from the file
      7 * COPYING in the main directory of this source tree, or the
      8 * OpenIB.org BSD license below:
      9 *
     10 *     Redistribution and use in source and binary forms, with or
     11 *     without modification, are permitted provided that the following
     12 *     conditions are met:
     13 *
     14 *      - Redistributions of source code must retain the above
     15 *        copyright notice, this list of conditions and the following
     16 *        disclaimer.
     17 *
     18 *      - Redistributions in binary form must reproduce the above
     19 *        copyright notice, this list of conditions and the following
     20 *        disclaimer in the documentation and/or other materials
     21 *        provided with the distribution.
     22 *
     23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
     24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
     25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
     26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
     27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
     28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
     29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
     30 * SOFTWARE.
     31 *
     32 */
     33
     34#include <linux/kernel.h>
     35#include <linux/ethtool.h>
     36#include <linux/netdevice.h>
     37#include <linux/mlx4/driver.h>
     38#include <linux/mlx4/device.h>
     39#include <linux/in.h>
     40#include <net/ip.h>
     41#include <linux/bitmap.h>
     42#include <linux/mii.h>
     43
     44#include "mlx4_en.h"
     45#include "en_port.h"
     46
     47#define EN_ETHTOOL_QP_ATTACH (1ull << 63)
     48#define EN_ETHTOOL_SHORT_MASK cpu_to_be16(0xffff)
     49#define EN_ETHTOOL_WORD_MASK  cpu_to_be32(0xffffffff)
     50
     51int mlx4_en_moderation_update(struct mlx4_en_priv *priv)
     52{
     53	int i, t;
     54	int err = 0;
     55
     56	for (t = 0 ; t < MLX4_EN_NUM_TX_TYPES; t++) {
     57		for (i = 0; i < priv->tx_ring_num[t]; i++) {
     58			priv->tx_cq[t][i]->moder_cnt = priv->tx_frames;
     59			priv->tx_cq[t][i]->moder_time = priv->tx_usecs;
     60			if (priv->port_up) {
     61				err = mlx4_en_set_cq_moder(priv,
     62							   priv->tx_cq[t][i]);
     63				if (err)
     64					return err;
     65			}
     66		}
     67	}
     68
     69	if (priv->adaptive_rx_coal)
     70		return 0;
     71
     72	for (i = 0; i < priv->rx_ring_num; i++) {
     73		priv->rx_cq[i]->moder_cnt = priv->rx_frames;
     74		priv->rx_cq[i]->moder_time = priv->rx_usecs;
     75		priv->last_moder_time[i] = MLX4_EN_AUTO_CONF;
     76		if (priv->port_up) {
     77			err = mlx4_en_set_cq_moder(priv, priv->rx_cq[i]);
     78			if (err)
     79				return err;
     80		}
     81	}
     82
     83	return err;
     84}
     85
     86static void
     87mlx4_en_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo)
     88{
     89	struct mlx4_en_priv *priv = netdev_priv(dev);
     90	struct mlx4_en_dev *mdev = priv->mdev;
     91
     92	strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
     93	strlcpy(drvinfo->version, DRV_VERSION,
     94		sizeof(drvinfo->version));
     95	snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
     96		"%d.%d.%d",
     97		(u16) (mdev->dev->caps.fw_ver >> 32),
     98		(u16) ((mdev->dev->caps.fw_ver >> 16) & 0xffff),
     99		(u16) (mdev->dev->caps.fw_ver & 0xffff));
    100	strlcpy(drvinfo->bus_info, pci_name(mdev->dev->persist->pdev),
    101		sizeof(drvinfo->bus_info));
    102}
    103
    104static const char mlx4_en_priv_flags[][ETH_GSTRING_LEN] = {
    105	"blueflame",
    106	"phv-bit"
    107};
    108
    109static const char main_strings[][ETH_GSTRING_LEN] = {
    110	/* main statistics */
    111	"rx_packets", "tx_packets", "rx_bytes", "tx_bytes", "rx_errors",
    112	"tx_errors", "rx_dropped", "tx_dropped", "multicast", "collisions",
    113	"rx_length_errors", "rx_over_errors", "rx_crc_errors",
    114	"rx_frame_errors", "rx_fifo_errors", "rx_missed_errors",
    115	"tx_aborted_errors", "tx_carrier_errors", "tx_fifo_errors",
    116	"tx_heartbeat_errors", "tx_window_errors",
    117
    118	/* port statistics */
    119	"tso_packets",
    120	"xmit_more",
    121	"queue_stopped", "wake_queue", "tx_timeout", "rx_alloc_pages",
    122	"rx_csum_good", "rx_csum_none", "rx_csum_complete", "tx_chksum_offload",
    123
    124	/* pf statistics */
    125	"pf_rx_packets",
    126	"pf_rx_bytes",
    127	"pf_tx_packets",
    128	"pf_tx_bytes",
    129
    130	/* priority flow control statistics rx */
    131	"rx_pause_prio_0", "rx_pause_duration_prio_0",
    132	"rx_pause_transition_prio_0",
    133	"rx_pause_prio_1", "rx_pause_duration_prio_1",
    134	"rx_pause_transition_prio_1",
    135	"rx_pause_prio_2", "rx_pause_duration_prio_2",
    136	"rx_pause_transition_prio_2",
    137	"rx_pause_prio_3", "rx_pause_duration_prio_3",
    138	"rx_pause_transition_prio_3",
    139	"rx_pause_prio_4", "rx_pause_duration_prio_4",
    140	"rx_pause_transition_prio_4",
    141	"rx_pause_prio_5", "rx_pause_duration_prio_5",
    142	"rx_pause_transition_prio_5",
    143	"rx_pause_prio_6", "rx_pause_duration_prio_6",
    144	"rx_pause_transition_prio_6",
    145	"rx_pause_prio_7", "rx_pause_duration_prio_7",
    146	"rx_pause_transition_prio_7",
    147
    148	/* flow control statistics rx */
    149	"rx_pause", "rx_pause_duration", "rx_pause_transition",
    150
    151	/* priority flow control statistics tx */
    152	"tx_pause_prio_0", "tx_pause_duration_prio_0",
    153	"tx_pause_transition_prio_0",
    154	"tx_pause_prio_1", "tx_pause_duration_prio_1",
    155	"tx_pause_transition_prio_1",
    156	"tx_pause_prio_2", "tx_pause_duration_prio_2",
    157	"tx_pause_transition_prio_2",
    158	"tx_pause_prio_3", "tx_pause_duration_prio_3",
    159	"tx_pause_transition_prio_3",
    160	"tx_pause_prio_4", "tx_pause_duration_prio_4",
    161	"tx_pause_transition_prio_4",
    162	"tx_pause_prio_5", "tx_pause_duration_prio_5",
    163	"tx_pause_transition_prio_5",
    164	"tx_pause_prio_6", "tx_pause_duration_prio_6",
    165	"tx_pause_transition_prio_6",
    166	"tx_pause_prio_7", "tx_pause_duration_prio_7",
    167	"tx_pause_transition_prio_7",
    168
    169	/* flow control statistics tx */
    170	"tx_pause", "tx_pause_duration", "tx_pause_transition",
    171
    172	/* packet statistics */
    173	"rx_multicast_packets",
    174	"rx_broadcast_packets",
    175	"rx_jabbers",
    176	"rx_in_range_length_error",
    177	"rx_out_range_length_error",
    178	"tx_multicast_packets",
    179	"tx_broadcast_packets",
    180	"rx_prio_0_packets", "rx_prio_0_bytes",
    181	"rx_prio_1_packets", "rx_prio_1_bytes",
    182	"rx_prio_2_packets", "rx_prio_2_bytes",
    183	"rx_prio_3_packets", "rx_prio_3_bytes",
    184	"rx_prio_4_packets", "rx_prio_4_bytes",
    185	"rx_prio_5_packets", "rx_prio_5_bytes",
    186	"rx_prio_6_packets", "rx_prio_6_bytes",
    187	"rx_prio_7_packets", "rx_prio_7_bytes",
    188	"rx_novlan_packets", "rx_novlan_bytes",
    189	"tx_prio_0_packets", "tx_prio_0_bytes",
    190	"tx_prio_1_packets", "tx_prio_1_bytes",
    191	"tx_prio_2_packets", "tx_prio_2_bytes",
    192	"tx_prio_3_packets", "tx_prio_3_bytes",
    193	"tx_prio_4_packets", "tx_prio_4_bytes",
    194	"tx_prio_5_packets", "tx_prio_5_bytes",
    195	"tx_prio_6_packets", "tx_prio_6_bytes",
    196	"tx_prio_7_packets", "tx_prio_7_bytes",
    197	"tx_novlan_packets", "tx_novlan_bytes",
    198
    199	/* xdp statistics */
    200	"rx_xdp_drop",
    201	"rx_xdp_redirect",
    202	"rx_xdp_redirect_fail",
    203	"rx_xdp_tx",
    204	"rx_xdp_tx_full",
    205
    206	/* phy statistics */
    207	"rx_packets_phy", "rx_bytes_phy",
    208	"tx_packets_phy", "tx_bytes_phy",
    209};
    210
    211static const char mlx4_en_test_names[][ETH_GSTRING_LEN]= {
    212	"Interrupt Test",
    213	"Link Test",
    214	"Speed Test",
    215	"Register Test",
    216	"Loopback Test",
    217};
    218
    219static u32 mlx4_en_get_msglevel(struct net_device *dev)
    220{
    221	return ((struct mlx4_en_priv *) netdev_priv(dev))->msg_enable;
    222}
    223
    224static void mlx4_en_set_msglevel(struct net_device *dev, u32 val)
    225{
    226	((struct mlx4_en_priv *) netdev_priv(dev))->msg_enable = val;
    227}
    228
    229static void mlx4_en_get_wol(struct net_device *netdev,
    230			    struct ethtool_wolinfo *wol)
    231{
    232	struct mlx4_en_priv *priv = netdev_priv(netdev);
    233	struct mlx4_caps *caps = &priv->mdev->dev->caps;
    234	int err = 0;
    235	u64 config = 0;
    236	u64 mask;
    237
    238	if ((priv->port < 1) || (priv->port > 2)) {
    239		en_err(priv, "Failed to get WoL information\n");
    240		return;
    241	}
    242
    243	mask = (priv->port == 1) ? MLX4_DEV_CAP_FLAG_WOL_PORT1 :
    244		MLX4_DEV_CAP_FLAG_WOL_PORT2;
    245
    246	if (!(caps->flags & mask)) {
    247		wol->supported = 0;
    248		wol->wolopts = 0;
    249		return;
    250	}
    251
    252	if (caps->wol_port[priv->port])
    253		wol->supported = WAKE_MAGIC;
    254	else
    255		wol->supported = 0;
    256
    257	err = mlx4_wol_read(priv->mdev->dev, &config, priv->port);
    258	if (err) {
    259		en_err(priv, "Failed to get WoL information\n");
    260		return;
    261	}
    262
    263	if ((config & MLX4_EN_WOL_ENABLED) && (config & MLX4_EN_WOL_MAGIC))
    264		wol->wolopts = WAKE_MAGIC;
    265	else
    266		wol->wolopts = 0;
    267}
    268
    269static int mlx4_en_set_wol(struct net_device *netdev,
    270			    struct ethtool_wolinfo *wol)
    271{
    272	struct mlx4_en_priv *priv = netdev_priv(netdev);
    273	u64 config = 0;
    274	int err = 0;
    275	u64 mask;
    276
    277	if ((priv->port < 1) || (priv->port > 2))
    278		return -EOPNOTSUPP;
    279
    280	mask = (priv->port == 1) ? MLX4_DEV_CAP_FLAG_WOL_PORT1 :
    281		MLX4_DEV_CAP_FLAG_WOL_PORT2;
    282
    283	if (!(priv->mdev->dev->caps.flags & mask))
    284		return -EOPNOTSUPP;
    285
    286	if (wol->supported & ~WAKE_MAGIC)
    287		return -EINVAL;
    288
    289	err = mlx4_wol_read(priv->mdev->dev, &config, priv->port);
    290	if (err) {
    291		en_err(priv, "Failed to get WoL info, unable to modify\n");
    292		return err;
    293	}
    294
    295	if (wol->wolopts & WAKE_MAGIC) {
    296		config |= MLX4_EN_WOL_DO_MODIFY | MLX4_EN_WOL_ENABLED |
    297				MLX4_EN_WOL_MAGIC;
    298	} else {
    299		config &= ~(MLX4_EN_WOL_ENABLED | MLX4_EN_WOL_MAGIC);
    300		config |= MLX4_EN_WOL_DO_MODIFY;
    301	}
    302
    303	err = mlx4_wol_write(priv->mdev->dev, config, priv->port);
    304	if (err)
    305		en_err(priv, "Failed to set WoL information\n");
    306
    307	return err;
    308}
    309
    310struct bitmap_iterator {
    311	unsigned long *stats_bitmap;
    312	unsigned int count;
    313	unsigned int iterator;
    314	bool advance_array; /* if set, force no increments */
    315};
    316
    317static inline void bitmap_iterator_init(struct bitmap_iterator *h,
    318					unsigned long *stats_bitmap,
    319					int count)
    320{
    321	h->iterator = 0;
    322	h->advance_array = !bitmap_empty(stats_bitmap, count);
    323	h->count = h->advance_array ? bitmap_weight(stats_bitmap, count)
    324		: count;
    325	h->stats_bitmap = stats_bitmap;
    326}
    327
    328static inline int bitmap_iterator_test(struct bitmap_iterator *h)
    329{
    330	return !h->advance_array ? 1 : test_bit(h->iterator, h->stats_bitmap);
    331}
    332
    333static inline int bitmap_iterator_inc(struct bitmap_iterator *h)
    334{
    335	return h->iterator++;
    336}
    337
    338static inline unsigned int
    339bitmap_iterator_count(struct bitmap_iterator *h)
    340{
    341	return h->count;
    342}
    343
    344static int mlx4_en_get_sset_count(struct net_device *dev, int sset)
    345{
    346	struct mlx4_en_priv *priv = netdev_priv(dev);
    347	struct bitmap_iterator it;
    348
    349	bitmap_iterator_init(&it, priv->stats_bitmap.bitmap, NUM_ALL_STATS);
    350
    351	switch (sset) {
    352	case ETH_SS_STATS:
    353		return bitmap_iterator_count(&it) +
    354			(priv->tx_ring_num[TX] * 2) +
    355			(priv->rx_ring_num * (3 + NUM_XDP_STATS));
    356	case ETH_SS_TEST:
    357		return MLX4_EN_NUM_SELF_TEST - !(priv->mdev->dev->caps.flags
    358					& MLX4_DEV_CAP_FLAG_UC_LOOPBACK) * 2;
    359	case ETH_SS_PRIV_FLAGS:
    360		return ARRAY_SIZE(mlx4_en_priv_flags);
    361	default:
    362		return -EOPNOTSUPP;
    363	}
    364}
    365
    366static void mlx4_en_get_ethtool_stats(struct net_device *dev,
    367		struct ethtool_stats *stats, uint64_t *data)
    368{
    369	struct mlx4_en_priv *priv = netdev_priv(dev);
    370	int index = 0;
    371	int i;
    372	struct bitmap_iterator it;
    373
    374	bitmap_iterator_init(&it, priv->stats_bitmap.bitmap, NUM_ALL_STATS);
    375
    376	spin_lock_bh(&priv->stats_lock);
    377
    378	mlx4_en_fold_software_stats(dev);
    379
    380	for (i = 0; i < NUM_MAIN_STATS; i++, bitmap_iterator_inc(&it))
    381		if (bitmap_iterator_test(&it))
    382			data[index++] = ((unsigned long *)&dev->stats)[i];
    383
    384	for (i = 0; i < NUM_PORT_STATS; i++, bitmap_iterator_inc(&it))
    385		if (bitmap_iterator_test(&it))
    386			data[index++] = ((unsigned long *)&priv->port_stats)[i];
    387
    388	for (i = 0; i < NUM_PF_STATS; i++, bitmap_iterator_inc(&it))
    389		if (bitmap_iterator_test(&it))
    390			data[index++] =
    391				((unsigned long *)&priv->pf_stats)[i];
    392
    393	for (i = 0; i < NUM_FLOW_PRIORITY_STATS_RX;
    394	     i++, bitmap_iterator_inc(&it))
    395		if (bitmap_iterator_test(&it))
    396			data[index++] =
    397				((u64 *)&priv->rx_priority_flowstats)[i];
    398
    399	for (i = 0; i < NUM_FLOW_STATS_RX; i++, bitmap_iterator_inc(&it))
    400		if (bitmap_iterator_test(&it))
    401			data[index++] = ((u64 *)&priv->rx_flowstats)[i];
    402
    403	for (i = 0; i < NUM_FLOW_PRIORITY_STATS_TX;
    404	     i++, bitmap_iterator_inc(&it))
    405		if (bitmap_iterator_test(&it))
    406			data[index++] =
    407				((u64 *)&priv->tx_priority_flowstats)[i];
    408
    409	for (i = 0; i < NUM_FLOW_STATS_TX; i++, bitmap_iterator_inc(&it))
    410		if (bitmap_iterator_test(&it))
    411			data[index++] = ((u64 *)&priv->tx_flowstats)[i];
    412
    413	for (i = 0; i < NUM_PKT_STATS; i++, bitmap_iterator_inc(&it))
    414		if (bitmap_iterator_test(&it))
    415			data[index++] = ((unsigned long *)&priv->pkstats)[i];
    416
    417	for (i = 0; i < NUM_XDP_STATS; i++, bitmap_iterator_inc(&it))
    418		if (bitmap_iterator_test(&it))
    419			data[index++] = ((unsigned long *)&priv->xdp_stats)[i];
    420
    421	for (i = 0; i < NUM_PHY_STATS; i++, bitmap_iterator_inc(&it))
    422		if (bitmap_iterator_test(&it))
    423			data[index++] = ((unsigned long *)&priv->phy_stats)[i];
    424
    425	for (i = 0; i < priv->tx_ring_num[TX]; i++) {
    426		data[index++] = priv->tx_ring[TX][i]->packets;
    427		data[index++] = priv->tx_ring[TX][i]->bytes;
    428	}
    429	for (i = 0; i < priv->rx_ring_num; i++) {
    430		data[index++] = priv->rx_ring[i]->packets;
    431		data[index++] = priv->rx_ring[i]->bytes;
    432		data[index++] = priv->rx_ring[i]->dropped;
    433		data[index++] = priv->rx_ring[i]->xdp_drop;
    434		data[index++] = priv->rx_ring[i]->xdp_redirect;
    435		data[index++] = priv->rx_ring[i]->xdp_redirect_fail;
    436		data[index++] = priv->rx_ring[i]->xdp_tx;
    437		data[index++] = priv->rx_ring[i]->xdp_tx_full;
    438	}
    439	spin_unlock_bh(&priv->stats_lock);
    440
    441}
    442
    443static void mlx4_en_self_test(struct net_device *dev,
    444			      struct ethtool_test *etest, u64 *buf)
    445{
    446	mlx4_en_ex_selftest(dev, &etest->flags, buf);
    447}
    448
    449static void mlx4_en_get_strings(struct net_device *dev,
    450				uint32_t stringset, uint8_t *data)
    451{
    452	struct mlx4_en_priv *priv = netdev_priv(dev);
    453	int index = 0;
    454	int i, strings = 0;
    455	struct bitmap_iterator it;
    456
    457	bitmap_iterator_init(&it, priv->stats_bitmap.bitmap, NUM_ALL_STATS);
    458
    459	switch (stringset) {
    460	case ETH_SS_TEST:
    461		for (i = 0; i < MLX4_EN_NUM_SELF_TEST - 2; i++)
    462			strcpy(data + i * ETH_GSTRING_LEN, mlx4_en_test_names[i]);
    463		if (priv->mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_UC_LOOPBACK)
    464			for (; i < MLX4_EN_NUM_SELF_TEST; i++)
    465				strcpy(data + i * ETH_GSTRING_LEN, mlx4_en_test_names[i]);
    466		break;
    467
    468	case ETH_SS_STATS:
    469		/* Add main counters */
    470		for (i = 0; i < NUM_MAIN_STATS; i++, strings++,
    471		     bitmap_iterator_inc(&it))
    472			if (bitmap_iterator_test(&it))
    473				strcpy(data + (index++) * ETH_GSTRING_LEN,
    474				       main_strings[strings]);
    475
    476		for (i = 0; i < NUM_PORT_STATS; i++, strings++,
    477		     bitmap_iterator_inc(&it))
    478			if (bitmap_iterator_test(&it))
    479				strcpy(data + (index++) * ETH_GSTRING_LEN,
    480				       main_strings[strings]);
    481
    482		for (i = 0; i < NUM_PF_STATS; i++, strings++,
    483		     bitmap_iterator_inc(&it))
    484			if (bitmap_iterator_test(&it))
    485				strcpy(data + (index++) * ETH_GSTRING_LEN,
    486				       main_strings[strings]);
    487
    488		for (i = 0; i < NUM_FLOW_STATS; i++, strings++,
    489		     bitmap_iterator_inc(&it))
    490			if (bitmap_iterator_test(&it))
    491				strcpy(data + (index++) * ETH_GSTRING_LEN,
    492				       main_strings[strings]);
    493
    494		for (i = 0; i < NUM_PKT_STATS; i++, strings++,
    495		     bitmap_iterator_inc(&it))
    496			if (bitmap_iterator_test(&it))
    497				strcpy(data + (index++) * ETH_GSTRING_LEN,
    498				       main_strings[strings]);
    499
    500		for (i = 0; i < NUM_XDP_STATS; i++, strings++,
    501		     bitmap_iterator_inc(&it))
    502			if (bitmap_iterator_test(&it))
    503				strcpy(data + (index++) * ETH_GSTRING_LEN,
    504				       main_strings[strings]);
    505
    506		for (i = 0; i < NUM_PHY_STATS; i++, strings++,
    507		     bitmap_iterator_inc(&it))
    508			if (bitmap_iterator_test(&it))
    509				strcpy(data + (index++) * ETH_GSTRING_LEN,
    510				       main_strings[strings]);
    511
    512		for (i = 0; i < priv->tx_ring_num[TX]; i++) {
    513			sprintf(data + (index++) * ETH_GSTRING_LEN,
    514				"tx%d_packets", i);
    515			sprintf(data + (index++) * ETH_GSTRING_LEN,
    516				"tx%d_bytes", i);
    517		}
    518		for (i = 0; i < priv->rx_ring_num; i++) {
    519			sprintf(data + (index++) * ETH_GSTRING_LEN,
    520				"rx%d_packets", i);
    521			sprintf(data + (index++) * ETH_GSTRING_LEN,
    522				"rx%d_bytes", i);
    523			sprintf(data + (index++) * ETH_GSTRING_LEN,
    524				"rx%d_dropped", i);
    525			sprintf(data + (index++) * ETH_GSTRING_LEN,
    526				"rx%d_xdp_drop", i);
    527			sprintf(data + (index++) * ETH_GSTRING_LEN,
    528				"rx%d_xdp_redirect", i);
    529			sprintf(data + (index++) * ETH_GSTRING_LEN,
    530				"rx%d_xdp_redirect_fail", i);
    531			sprintf(data + (index++) * ETH_GSTRING_LEN,
    532				"rx%d_xdp_tx", i);
    533			sprintf(data + (index++) * ETH_GSTRING_LEN,
    534				"rx%d_xdp_tx_full", i);
    535		}
    536		break;
    537	case ETH_SS_PRIV_FLAGS:
    538		for (i = 0; i < ARRAY_SIZE(mlx4_en_priv_flags); i++)
    539			strcpy(data + i * ETH_GSTRING_LEN,
    540			       mlx4_en_priv_flags[i]);
    541		break;
    542
    543	}
    544}
    545
    546static u32 mlx4_en_autoneg_get(struct net_device *dev)
    547{
    548	struct mlx4_en_priv *priv = netdev_priv(dev);
    549	struct mlx4_en_dev *mdev = priv->mdev;
    550	u32 autoneg = AUTONEG_DISABLE;
    551
    552	if ((mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ETH_BACKPL_AN_REP) &&
    553	    (priv->port_state.flags & MLX4_EN_PORT_ANE))
    554		autoneg = AUTONEG_ENABLE;
    555
    556	return autoneg;
    557}
    558
    559static void ptys2ethtool_update_supported_port(unsigned long *mask,
    560					       struct mlx4_ptys_reg *ptys_reg)
    561{
    562	u32 eth_proto = be32_to_cpu(ptys_reg->eth_proto_cap);
    563
    564	if (eth_proto & (MLX4_PROT_MASK(MLX4_10GBASE_T)
    565			 | MLX4_PROT_MASK(MLX4_1000BASE_T)
    566			 | MLX4_PROT_MASK(MLX4_100BASE_TX))) {
    567		__set_bit(ETHTOOL_LINK_MODE_TP_BIT, mask);
    568	} else if (eth_proto & (MLX4_PROT_MASK(MLX4_10GBASE_CR)
    569			 | MLX4_PROT_MASK(MLX4_10GBASE_SR)
    570			 | MLX4_PROT_MASK(MLX4_56GBASE_SR4)
    571			 | MLX4_PROT_MASK(MLX4_40GBASE_CR4)
    572			 | MLX4_PROT_MASK(MLX4_40GBASE_SR4)
    573			 | MLX4_PROT_MASK(MLX4_1000BASE_CX_SGMII))) {
    574		__set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, mask);
    575	} else if (eth_proto & (MLX4_PROT_MASK(MLX4_56GBASE_KR4)
    576			 | MLX4_PROT_MASK(MLX4_40GBASE_KR4)
    577			 | MLX4_PROT_MASK(MLX4_20GBASE_KR2)
    578			 | MLX4_PROT_MASK(MLX4_10GBASE_KR)
    579			 | MLX4_PROT_MASK(MLX4_10GBASE_KX4)
    580			 | MLX4_PROT_MASK(MLX4_1000BASE_KX))) {
    581		__set_bit(ETHTOOL_LINK_MODE_Backplane_BIT, mask);
    582	}
    583}
    584
    585static u32 ptys_get_active_port(struct mlx4_ptys_reg *ptys_reg)
    586{
    587	u32 eth_proto = be32_to_cpu(ptys_reg->eth_proto_oper);
    588
    589	if (!eth_proto) /* link down */
    590		eth_proto = be32_to_cpu(ptys_reg->eth_proto_cap);
    591
    592	if (eth_proto & (MLX4_PROT_MASK(MLX4_10GBASE_T)
    593			 | MLX4_PROT_MASK(MLX4_1000BASE_T)
    594			 | MLX4_PROT_MASK(MLX4_100BASE_TX))) {
    595			return PORT_TP;
    596	}
    597
    598	if (eth_proto & (MLX4_PROT_MASK(MLX4_10GBASE_SR)
    599			 | MLX4_PROT_MASK(MLX4_56GBASE_SR4)
    600			 | MLX4_PROT_MASK(MLX4_40GBASE_SR4)
    601			 | MLX4_PROT_MASK(MLX4_1000BASE_CX_SGMII))) {
    602			return PORT_FIBRE;
    603	}
    604
    605	if (eth_proto & (MLX4_PROT_MASK(MLX4_10GBASE_CR)
    606			 | MLX4_PROT_MASK(MLX4_56GBASE_CR4)
    607			 | MLX4_PROT_MASK(MLX4_40GBASE_CR4))) {
    608			return PORT_DA;
    609	}
    610
    611	if (eth_proto & (MLX4_PROT_MASK(MLX4_56GBASE_KR4)
    612			 | MLX4_PROT_MASK(MLX4_40GBASE_KR4)
    613			 | MLX4_PROT_MASK(MLX4_20GBASE_KR2)
    614			 | MLX4_PROT_MASK(MLX4_10GBASE_KR)
    615			 | MLX4_PROT_MASK(MLX4_10GBASE_KX4)
    616			 | MLX4_PROT_MASK(MLX4_1000BASE_KX))) {
    617			return PORT_NONE;
    618	}
    619	return PORT_OTHER;
    620}
    621
    622#define MLX4_LINK_MODES_SZ \
    623	(sizeof_field(struct mlx4_ptys_reg, eth_proto_cap) * 8)
    624
    625enum ethtool_report {
    626	SUPPORTED = 0,
    627	ADVERTISED = 1,
    628};
    629
    630struct ptys2ethtool_config {
    631	__ETHTOOL_DECLARE_LINK_MODE_MASK(supported);
    632	__ETHTOOL_DECLARE_LINK_MODE_MASK(advertised);
    633	u32 speed;
    634};
    635
    636static unsigned long *ptys2ethtool_link_mode(struct ptys2ethtool_config *cfg,
    637					     enum ethtool_report report)
    638{
    639	switch (report) {
    640	case SUPPORTED:
    641		return cfg->supported;
    642	case ADVERTISED:
    643		return cfg->advertised;
    644	}
    645	return NULL;
    646}
    647
    648#define MLX4_BUILD_PTYS2ETHTOOL_CONFIG(reg_, speed_, ...)		\
    649	({								\
    650		struct ptys2ethtool_config *cfg;			\
    651		static const unsigned int modes[] = { __VA_ARGS__ };	\
    652		unsigned int i;						\
    653		cfg = &ptys2ethtool_map[reg_];				\
    654		cfg->speed = speed_;					\
    655		linkmode_zero(cfg->supported);				\
    656		linkmode_zero(cfg->advertised);				\
    657		for (i = 0 ; i < ARRAY_SIZE(modes) ; ++i) {		\
    658			__set_bit(modes[i], cfg->supported);		\
    659			__set_bit(modes[i], cfg->advertised);		\
    660		}							\
    661	})
    662
    663/* Translates mlx4 link mode to equivalent ethtool Link modes/speed */
    664static struct ptys2ethtool_config ptys2ethtool_map[MLX4_LINK_MODES_SZ];
    665
    666void __init mlx4_en_init_ptys2ethtool_map(void)
    667{
    668	MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_100BASE_TX, SPEED_100,
    669				       ETHTOOL_LINK_MODE_100baseT_Full_BIT);
    670	MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_1000BASE_T, SPEED_1000,
    671				       ETHTOOL_LINK_MODE_1000baseT_Full_BIT);
    672	MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_1000BASE_CX_SGMII, SPEED_1000,
    673				       ETHTOOL_LINK_MODE_1000baseX_Full_BIT);
    674	MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_1000BASE_KX, SPEED_1000,
    675				       ETHTOOL_LINK_MODE_1000baseKX_Full_BIT);
    676	MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_10GBASE_T, SPEED_10000,
    677				       ETHTOOL_LINK_MODE_10000baseT_Full_BIT);
    678	MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_10GBASE_CX4, SPEED_10000,
    679				       ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT);
    680	MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_10GBASE_KX4, SPEED_10000,
    681				       ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT);
    682	MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_10GBASE_KR, SPEED_10000,
    683				       ETHTOOL_LINK_MODE_10000baseKR_Full_BIT);
    684	MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_10GBASE_CR, SPEED_10000,
    685				       ETHTOOL_LINK_MODE_10000baseCR_Full_BIT);
    686	MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_10GBASE_SR, SPEED_10000,
    687				       ETHTOOL_LINK_MODE_10000baseSR_Full_BIT);
    688	MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_20GBASE_KR2, SPEED_20000,
    689				       ETHTOOL_LINK_MODE_20000baseMLD2_Full_BIT,
    690				       ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT);
    691	MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_40GBASE_CR4, SPEED_40000,
    692				       ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT);
    693	MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_40GBASE_KR4, SPEED_40000,
    694				       ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT);
    695	MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_40GBASE_SR4, SPEED_40000,
    696				       ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT);
    697	MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_56GBASE_KR4, SPEED_56000,
    698				       ETHTOOL_LINK_MODE_56000baseKR4_Full_BIT);
    699	MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_56GBASE_CR4, SPEED_56000,
    700				       ETHTOOL_LINK_MODE_56000baseCR4_Full_BIT);
    701	MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_56GBASE_SR4, SPEED_56000,
    702				       ETHTOOL_LINK_MODE_56000baseSR4_Full_BIT);
    703};
    704
    705static void ptys2ethtool_update_link_modes(unsigned long *link_modes,
    706					   u32 eth_proto,
    707					   enum ethtool_report report)
    708{
    709	int i;
    710	for (i = 0; i < MLX4_LINK_MODES_SZ; i++) {
    711		if (eth_proto & MLX4_PROT_MASK(i))
    712			linkmode_or(link_modes, link_modes,
    713				    ptys2ethtool_link_mode(&ptys2ethtool_map[i], report));
    714	}
    715}
    716
    717static u32 ethtool2ptys_link_modes(const unsigned long *link_modes,
    718				   enum ethtool_report report)
    719{
    720	int i;
    721	u32 ptys_modes = 0;
    722
    723	for (i = 0; i < MLX4_LINK_MODES_SZ; i++) {
    724		ulong *map_mode = ptys2ethtool_link_mode(&ptys2ethtool_map[i],
    725							 report);
    726		if (linkmode_intersects(map_mode, link_modes))
    727			ptys_modes |= 1 << i;
    728	}
    729	return ptys_modes;
    730}
    731
    732/* Convert actual speed (SPEED_XXX) to ptys link modes */
    733static u32 speed2ptys_link_modes(u32 speed)
    734{
    735	int i;
    736	u32 ptys_modes = 0;
    737
    738	for (i = 0; i < MLX4_LINK_MODES_SZ; i++) {
    739		if (ptys2ethtool_map[i].speed == speed)
    740			ptys_modes |= 1 << i;
    741	}
    742	return ptys_modes;
    743}
    744
    745static int
    746ethtool_get_ptys_link_ksettings(struct net_device *dev,
    747				struct ethtool_link_ksettings *link_ksettings)
    748{
    749	struct mlx4_en_priv *priv = netdev_priv(dev);
    750	struct mlx4_ptys_reg ptys_reg;
    751	u32 eth_proto;
    752	int ret;
    753
    754	memset(&ptys_reg, 0, sizeof(ptys_reg));
    755	ptys_reg.local_port = priv->port;
    756	ptys_reg.proto_mask = MLX4_PTYS_EN;
    757	ret = mlx4_ACCESS_PTYS_REG(priv->mdev->dev,
    758				   MLX4_ACCESS_REG_QUERY, &ptys_reg);
    759	if (ret) {
    760		en_warn(priv, "Failed to run mlx4_ACCESS_PTYS_REG status(%x)",
    761			ret);
    762		return ret;
    763	}
    764	en_dbg(DRV, priv, "ptys_reg.proto_mask       %x\n",
    765	       ptys_reg.proto_mask);
    766	en_dbg(DRV, priv, "ptys_reg.eth_proto_cap    %x\n",
    767	       be32_to_cpu(ptys_reg.eth_proto_cap));
    768	en_dbg(DRV, priv, "ptys_reg.eth_proto_admin  %x\n",
    769	       be32_to_cpu(ptys_reg.eth_proto_admin));
    770	en_dbg(DRV, priv, "ptys_reg.eth_proto_oper   %x\n",
    771	       be32_to_cpu(ptys_reg.eth_proto_oper));
    772	en_dbg(DRV, priv, "ptys_reg.eth_proto_lp_adv %x\n",
    773	       be32_to_cpu(ptys_reg.eth_proto_lp_adv));
    774
    775	/* reset supported/advertising masks */
    776	ethtool_link_ksettings_zero_link_mode(link_ksettings, supported);
    777	ethtool_link_ksettings_zero_link_mode(link_ksettings, advertising);
    778
    779	ptys2ethtool_update_supported_port(link_ksettings->link_modes.supported,
    780					   &ptys_reg);
    781
    782	eth_proto = be32_to_cpu(ptys_reg.eth_proto_cap);
    783	ptys2ethtool_update_link_modes(link_ksettings->link_modes.supported,
    784				       eth_proto, SUPPORTED);
    785
    786	eth_proto = be32_to_cpu(ptys_reg.eth_proto_admin);
    787	ptys2ethtool_update_link_modes(link_ksettings->link_modes.advertising,
    788				       eth_proto, ADVERTISED);
    789
    790	ethtool_link_ksettings_add_link_mode(link_ksettings, supported,
    791					     Pause);
    792	ethtool_link_ksettings_add_link_mode(link_ksettings, supported,
    793					     Asym_Pause);
    794
    795	if (priv->prof->tx_pause)
    796		ethtool_link_ksettings_add_link_mode(link_ksettings,
    797						     advertising, Pause);
    798	if (priv->prof->tx_pause ^ priv->prof->rx_pause)
    799		ethtool_link_ksettings_add_link_mode(link_ksettings,
    800						     advertising, Asym_Pause);
    801
    802	link_ksettings->base.port = ptys_get_active_port(&ptys_reg);
    803
    804	if (mlx4_en_autoneg_get(dev)) {
    805		ethtool_link_ksettings_add_link_mode(link_ksettings,
    806						     supported, Autoneg);
    807		ethtool_link_ksettings_add_link_mode(link_ksettings,
    808						     advertising, Autoneg);
    809	}
    810
    811	link_ksettings->base.autoneg
    812		= (priv->port_state.flags & MLX4_EN_PORT_ANC) ?
    813		AUTONEG_ENABLE : AUTONEG_DISABLE;
    814
    815	eth_proto = be32_to_cpu(ptys_reg.eth_proto_lp_adv);
    816
    817	ethtool_link_ksettings_zero_link_mode(link_ksettings, lp_advertising);
    818	ptys2ethtool_update_link_modes(
    819		link_ksettings->link_modes.lp_advertising,
    820		eth_proto, ADVERTISED);
    821	if (priv->port_state.flags & MLX4_EN_PORT_ANC)
    822		ethtool_link_ksettings_add_link_mode(link_ksettings,
    823						     lp_advertising, Autoneg);
    824
    825	link_ksettings->base.phy_address = 0;
    826	link_ksettings->base.mdio_support = 0;
    827	link_ksettings->base.eth_tp_mdix = ETH_TP_MDI_INVALID;
    828	link_ksettings->base.eth_tp_mdix_ctrl = ETH_TP_MDI_AUTO;
    829
    830	return ret;
    831}
    832
    833static void
    834ethtool_get_default_link_ksettings(
    835	struct net_device *dev, struct ethtool_link_ksettings *link_ksettings)
    836{
    837	struct mlx4_en_priv *priv = netdev_priv(dev);
    838	int trans_type;
    839
    840	link_ksettings->base.autoneg = AUTONEG_DISABLE;
    841
    842	ethtool_link_ksettings_zero_link_mode(link_ksettings, supported);
    843	ethtool_link_ksettings_add_link_mode(link_ksettings, supported,
    844					     10000baseT_Full);
    845
    846	ethtool_link_ksettings_zero_link_mode(link_ksettings, advertising);
    847	ethtool_link_ksettings_add_link_mode(link_ksettings, advertising,
    848					     10000baseT_Full);
    849
    850	trans_type = priv->port_state.transceiver;
    851	if (trans_type > 0 && trans_type <= 0xC) {
    852		link_ksettings->base.port = PORT_FIBRE;
    853		ethtool_link_ksettings_add_link_mode(link_ksettings,
    854						     supported, FIBRE);
    855		ethtool_link_ksettings_add_link_mode(link_ksettings,
    856						     advertising, FIBRE);
    857	} else if (trans_type == 0x80 || trans_type == 0) {
    858		link_ksettings->base.port = PORT_TP;
    859		ethtool_link_ksettings_add_link_mode(link_ksettings,
    860						     supported, TP);
    861		ethtool_link_ksettings_add_link_mode(link_ksettings,
    862						     advertising, TP);
    863	} else  {
    864		link_ksettings->base.port = -1;
    865	}
    866}
    867
    868static int
    869mlx4_en_get_link_ksettings(struct net_device *dev,
    870			   struct ethtool_link_ksettings *link_ksettings)
    871{
    872	struct mlx4_en_priv *priv = netdev_priv(dev);
    873	int ret = -EINVAL;
    874
    875	if (mlx4_en_QUERY_PORT(priv->mdev, priv->port))
    876		return -ENOMEM;
    877
    878	en_dbg(DRV, priv, "query port state.flags ANC(%x) ANE(%x)\n",
    879	       priv->port_state.flags & MLX4_EN_PORT_ANC,
    880	       priv->port_state.flags & MLX4_EN_PORT_ANE);
    881
    882	if (priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ETH_PROT_CTRL)
    883		ret = ethtool_get_ptys_link_ksettings(dev, link_ksettings);
    884	if (ret) /* ETH PROT CRTL is not supported or PTYS CMD failed */
    885		ethtool_get_default_link_ksettings(dev, link_ksettings);
    886
    887	if (netif_carrier_ok(dev)) {
    888		link_ksettings->base.speed = priv->port_state.link_speed;
    889		link_ksettings->base.duplex = DUPLEX_FULL;
    890	} else {
    891		link_ksettings->base.speed = SPEED_UNKNOWN;
    892		link_ksettings->base.duplex = DUPLEX_UNKNOWN;
    893	}
    894	return 0;
    895}
    896
    897/* Calculate PTYS admin according ethtool speed (SPEED_XXX) */
    898static __be32 speed_set_ptys_admin(struct mlx4_en_priv *priv, u32 speed,
    899				   __be32 proto_cap)
    900{
    901	__be32 proto_admin = 0;
    902
    903	if (!speed) { /* Speed = 0 ==> Reset Link modes */
    904		proto_admin = proto_cap;
    905		en_info(priv, "Speed was set to 0, Reset advertised Link Modes to default (%x)\n",
    906			be32_to_cpu(proto_cap));
    907	} else {
    908		u32 ptys_link_modes = speed2ptys_link_modes(speed);
    909
    910		proto_admin = cpu_to_be32(ptys_link_modes) & proto_cap;
    911		en_info(priv, "Setting Speed to %d\n", speed);
    912	}
    913	return proto_admin;
    914}
    915
    916static int
    917mlx4_en_set_link_ksettings(struct net_device *dev,
    918			   const struct ethtool_link_ksettings *link_ksettings)
    919{
    920	struct mlx4_en_priv *priv = netdev_priv(dev);
    921	struct mlx4_ptys_reg ptys_reg;
    922	__be32 proto_admin;
    923	u8 cur_autoneg;
    924	int ret;
    925
    926	u32 ptys_adv = ethtool2ptys_link_modes(
    927		link_ksettings->link_modes.advertising, ADVERTISED);
    928	const int speed = link_ksettings->base.speed;
    929
    930	en_dbg(DRV, priv,
    931	       "Set Speed=%d adv={%*pbl} autoneg=%d duplex=%d\n",
    932	       speed, __ETHTOOL_LINK_MODE_MASK_NBITS,
    933	       link_ksettings->link_modes.advertising,
    934	       link_ksettings->base.autoneg,
    935	       link_ksettings->base.duplex);
    936
    937	if (!(priv->mdev->dev->caps.flags2 &
    938	      MLX4_DEV_CAP_FLAG2_ETH_PROT_CTRL) ||
    939	    (link_ksettings->base.duplex == DUPLEX_HALF))
    940		return -EINVAL;
    941
    942	memset(&ptys_reg, 0, sizeof(ptys_reg));
    943	ptys_reg.local_port = priv->port;
    944	ptys_reg.proto_mask = MLX4_PTYS_EN;
    945	ret = mlx4_ACCESS_PTYS_REG(priv->mdev->dev,
    946				   MLX4_ACCESS_REG_QUERY, &ptys_reg);
    947	if (ret) {
    948		en_warn(priv, "Failed to QUERY mlx4_ACCESS_PTYS_REG status(%x)\n",
    949			ret);
    950		return 0;
    951	}
    952
    953	cur_autoneg = ptys_reg.flags & MLX4_PTYS_AN_DISABLE_ADMIN ?
    954				AUTONEG_DISABLE : AUTONEG_ENABLE;
    955
    956	if (link_ksettings->base.autoneg == AUTONEG_DISABLE) {
    957		proto_admin = speed_set_ptys_admin(priv, speed,
    958						   ptys_reg.eth_proto_cap);
    959		if ((be32_to_cpu(proto_admin) &
    960		     (MLX4_PROT_MASK(MLX4_1000BASE_CX_SGMII) |
    961		      MLX4_PROT_MASK(MLX4_1000BASE_KX))) &&
    962		    (ptys_reg.flags & MLX4_PTYS_AN_DISABLE_CAP))
    963			ptys_reg.flags |= MLX4_PTYS_AN_DISABLE_ADMIN;
    964	} else {
    965		proto_admin = cpu_to_be32(ptys_adv);
    966		ptys_reg.flags &= ~MLX4_PTYS_AN_DISABLE_ADMIN;
    967	}
    968
    969	proto_admin &= ptys_reg.eth_proto_cap;
    970	if (!proto_admin) {
    971		en_warn(priv, "Not supported link mode(s) requested, check supported link modes.\n");
    972		return -EINVAL; /* nothing to change due to bad input */
    973	}
    974
    975	if ((proto_admin == ptys_reg.eth_proto_admin) &&
    976	    ((ptys_reg.flags & MLX4_PTYS_AN_DISABLE_CAP) &&
    977	     (link_ksettings->base.autoneg == cur_autoneg)))
    978		return 0; /* Nothing to change */
    979
    980	en_dbg(DRV, priv, "mlx4_ACCESS_PTYS_REG SET: ptys_reg.eth_proto_admin = 0x%x\n",
    981	       be32_to_cpu(proto_admin));
    982
    983	ptys_reg.eth_proto_admin = proto_admin;
    984	ret = mlx4_ACCESS_PTYS_REG(priv->mdev->dev, MLX4_ACCESS_REG_WRITE,
    985				   &ptys_reg);
    986	if (ret) {
    987		en_warn(priv, "Failed to write mlx4_ACCESS_PTYS_REG eth_proto_admin(0x%x) status(0x%x)",
    988			be32_to_cpu(ptys_reg.eth_proto_admin), ret);
    989		return ret;
    990	}
    991
    992	mutex_lock(&priv->mdev->state_lock);
    993	if (priv->port_up) {
    994		en_warn(priv, "Port link mode changed, restarting port...\n");
    995		mlx4_en_stop_port(dev, 1);
    996		if (mlx4_en_start_port(dev))
    997			en_err(priv, "Failed restarting port %d\n", priv->port);
    998	}
    999	mutex_unlock(&priv->mdev->state_lock);
   1000	return 0;
   1001}
   1002
   1003static int mlx4_en_get_coalesce(struct net_device *dev,
   1004				struct ethtool_coalesce *coal,
   1005				struct kernel_ethtool_coalesce *kernel_coal,
   1006				struct netlink_ext_ack *extack)
   1007{
   1008	struct mlx4_en_priv *priv = netdev_priv(dev);
   1009
   1010	coal->tx_coalesce_usecs = priv->tx_usecs;
   1011	coal->tx_max_coalesced_frames = priv->tx_frames;
   1012	coal->tx_max_coalesced_frames_irq = priv->tx_work_limit;
   1013
   1014	coal->rx_coalesce_usecs = priv->rx_usecs;
   1015	coal->rx_max_coalesced_frames = priv->rx_frames;
   1016
   1017	coal->pkt_rate_low = priv->pkt_rate_low;
   1018	coal->rx_coalesce_usecs_low = priv->rx_usecs_low;
   1019	coal->pkt_rate_high = priv->pkt_rate_high;
   1020	coal->rx_coalesce_usecs_high = priv->rx_usecs_high;
   1021	coal->rate_sample_interval = priv->sample_interval;
   1022	coal->use_adaptive_rx_coalesce = priv->adaptive_rx_coal;
   1023
   1024	return 0;
   1025}
   1026
   1027static int mlx4_en_set_coalesce(struct net_device *dev,
   1028				struct ethtool_coalesce *coal,
   1029				struct kernel_ethtool_coalesce *kernel_coal,
   1030				struct netlink_ext_ack *extack)
   1031{
   1032	struct mlx4_en_priv *priv = netdev_priv(dev);
   1033
   1034	if (!coal->tx_max_coalesced_frames_irq)
   1035		return -EINVAL;
   1036
   1037	if (coal->tx_coalesce_usecs > MLX4_EN_MAX_COAL_TIME ||
   1038	    coal->rx_coalesce_usecs > MLX4_EN_MAX_COAL_TIME ||
   1039	    coal->rx_coalesce_usecs_low > MLX4_EN_MAX_COAL_TIME ||
   1040	    coal->rx_coalesce_usecs_high > MLX4_EN_MAX_COAL_TIME) {
   1041		netdev_info(dev, "%s: maximum coalesce time supported is %d usecs\n",
   1042			    __func__, MLX4_EN_MAX_COAL_TIME);
   1043		return -ERANGE;
   1044	}
   1045
   1046	if (coal->tx_max_coalesced_frames > MLX4_EN_MAX_COAL_PKTS ||
   1047	    coal->rx_max_coalesced_frames > MLX4_EN_MAX_COAL_PKTS) {
   1048		netdev_info(dev, "%s: maximum coalesced frames supported is %d\n",
   1049			    __func__, MLX4_EN_MAX_COAL_PKTS);
   1050		return -ERANGE;
   1051	}
   1052
   1053	priv->rx_frames = (coal->rx_max_coalesced_frames ==
   1054			   MLX4_EN_AUTO_CONF) ?
   1055				MLX4_EN_RX_COAL_TARGET :
   1056				coal->rx_max_coalesced_frames;
   1057	priv->rx_usecs = (coal->rx_coalesce_usecs ==
   1058			  MLX4_EN_AUTO_CONF) ?
   1059				MLX4_EN_RX_COAL_TIME :
   1060				coal->rx_coalesce_usecs;
   1061
   1062	/* Setting TX coalescing parameters */
   1063	if (coal->tx_coalesce_usecs != priv->tx_usecs ||
   1064	    coal->tx_max_coalesced_frames != priv->tx_frames) {
   1065		priv->tx_usecs = coal->tx_coalesce_usecs;
   1066		priv->tx_frames = coal->tx_max_coalesced_frames;
   1067	}
   1068
   1069	/* Set adaptive coalescing params */
   1070	priv->pkt_rate_low = coal->pkt_rate_low;
   1071	priv->rx_usecs_low = coal->rx_coalesce_usecs_low;
   1072	priv->pkt_rate_high = coal->pkt_rate_high;
   1073	priv->rx_usecs_high = coal->rx_coalesce_usecs_high;
   1074	priv->sample_interval = coal->rate_sample_interval;
   1075	priv->adaptive_rx_coal = coal->use_adaptive_rx_coalesce;
   1076	priv->tx_work_limit = coal->tx_max_coalesced_frames_irq;
   1077
   1078	return mlx4_en_moderation_update(priv);
   1079}
   1080
   1081static int mlx4_en_set_pauseparam(struct net_device *dev,
   1082				struct ethtool_pauseparam *pause)
   1083{
   1084	struct mlx4_en_priv *priv = netdev_priv(dev);
   1085	struct mlx4_en_dev *mdev = priv->mdev;
   1086	u8 tx_pause, tx_ppp, rx_pause, rx_ppp;
   1087	int err;
   1088
   1089	if (pause->autoneg)
   1090		return -EINVAL;
   1091
   1092	tx_pause = !!(pause->tx_pause);
   1093	rx_pause = !!(pause->rx_pause);
   1094	rx_ppp = (tx_pause || rx_pause) ? 0 : priv->prof->rx_ppp;
   1095	tx_ppp = (tx_pause || rx_pause) ? 0 : priv->prof->tx_ppp;
   1096
   1097	err = mlx4_SET_PORT_general(mdev->dev, priv->port,
   1098				    priv->rx_skb_size + ETH_FCS_LEN,
   1099				    tx_pause, tx_ppp, rx_pause, rx_ppp);
   1100	if (err) {
   1101		en_err(priv, "Failed setting pause params, err = %d\n", err);
   1102		return err;
   1103	}
   1104
   1105	mlx4_en_update_pfc_stats_bitmap(mdev->dev, &priv->stats_bitmap,
   1106					rx_ppp, rx_pause, tx_ppp, tx_pause);
   1107
   1108	priv->prof->tx_pause = tx_pause;
   1109	priv->prof->rx_pause = rx_pause;
   1110	priv->prof->tx_ppp = tx_ppp;
   1111	priv->prof->rx_ppp = rx_ppp;
   1112
   1113	return err;
   1114}
   1115
   1116static void mlx4_en_get_pause_stats(struct net_device *dev,
   1117				    struct ethtool_pause_stats *stats)
   1118{
   1119	struct mlx4_en_priv *priv = netdev_priv(dev);
   1120	struct bitmap_iterator it;
   1121
   1122	bitmap_iterator_init(&it, priv->stats_bitmap.bitmap, NUM_ALL_STATS);
   1123
   1124	spin_lock_bh(&priv->stats_lock);
   1125	if (test_bit(FLOW_PRIORITY_STATS_IDX_TX_FRAMES,
   1126		     priv->stats_bitmap.bitmap))
   1127		stats->tx_pause_frames = priv->tx_flowstats.tx_pause;
   1128	if (test_bit(FLOW_PRIORITY_STATS_IDX_RX_FRAMES,
   1129		     priv->stats_bitmap.bitmap))
   1130		stats->rx_pause_frames = priv->rx_flowstats.rx_pause;
   1131	spin_unlock_bh(&priv->stats_lock);
   1132}
   1133
   1134static void mlx4_en_get_pauseparam(struct net_device *dev,
   1135				 struct ethtool_pauseparam *pause)
   1136{
   1137	struct mlx4_en_priv *priv = netdev_priv(dev);
   1138
   1139	pause->tx_pause = priv->prof->tx_pause;
   1140	pause->rx_pause = priv->prof->rx_pause;
   1141}
   1142
   1143static int mlx4_en_set_ringparam(struct net_device *dev,
   1144				 struct ethtool_ringparam *param,
   1145				 struct kernel_ethtool_ringparam *kernel_param,
   1146				 struct netlink_ext_ack *extack)
   1147{
   1148	struct mlx4_en_priv *priv = netdev_priv(dev);
   1149	struct mlx4_en_dev *mdev = priv->mdev;
   1150	struct mlx4_en_port_profile new_prof;
   1151	struct mlx4_en_priv *tmp;
   1152	u32 rx_size, tx_size;
   1153	int port_up = 0;
   1154	int err = 0;
   1155
   1156	if (param->rx_jumbo_pending || param->rx_mini_pending)
   1157		return -EINVAL;
   1158
   1159	if (param->rx_pending < MLX4_EN_MIN_RX_SIZE) {
   1160		en_warn(priv, "%s: rx_pending (%d) < min (%d)\n",
   1161			__func__, param->rx_pending,
   1162			MLX4_EN_MIN_RX_SIZE);
   1163		return -EINVAL;
   1164	}
   1165	if (param->tx_pending < MLX4_EN_MIN_TX_SIZE) {
   1166		en_warn(priv, "%s: tx_pending (%d) < min (%lu)\n",
   1167			__func__, param->tx_pending,
   1168			MLX4_EN_MIN_TX_SIZE);
   1169		return -EINVAL;
   1170	}
   1171
   1172	rx_size = roundup_pow_of_two(param->rx_pending);
   1173	tx_size = roundup_pow_of_two(param->tx_pending);
   1174
   1175	if (rx_size == (priv->port_up ? priv->rx_ring[0]->actual_size :
   1176					priv->rx_ring[0]->size) &&
   1177	    tx_size == priv->tx_ring[TX][0]->size)
   1178		return 0;
   1179
   1180	tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
   1181	if (!tmp)
   1182		return -ENOMEM;
   1183
   1184	mutex_lock(&mdev->state_lock);
   1185	memcpy(&new_prof, priv->prof, sizeof(struct mlx4_en_port_profile));
   1186	new_prof.tx_ring_size = tx_size;
   1187	new_prof.rx_ring_size = rx_size;
   1188	err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof, true);
   1189	if (err)
   1190		goto out;
   1191
   1192	if (priv->port_up) {
   1193		port_up = 1;
   1194		mlx4_en_stop_port(dev, 1);
   1195	}
   1196
   1197	mlx4_en_safe_replace_resources(priv, tmp);
   1198
   1199	if (port_up) {
   1200		err = mlx4_en_start_port(dev);
   1201		if (err)
   1202			en_err(priv, "Failed starting port\n");
   1203	}
   1204
   1205	err = mlx4_en_moderation_update(priv);
   1206out:
   1207	kfree(tmp);
   1208	mutex_unlock(&mdev->state_lock);
   1209	return err;
   1210}
   1211
   1212static void mlx4_en_get_ringparam(struct net_device *dev,
   1213				  struct ethtool_ringparam *param,
   1214				  struct kernel_ethtool_ringparam *kernel_param,
   1215				  struct netlink_ext_ack *extack)
   1216{
   1217	struct mlx4_en_priv *priv = netdev_priv(dev);
   1218
   1219	memset(param, 0, sizeof(*param));
   1220	param->rx_max_pending = MLX4_EN_MAX_RX_SIZE;
   1221	param->tx_max_pending = MLX4_EN_MAX_TX_SIZE;
   1222	param->rx_pending = priv->port_up ?
   1223		priv->rx_ring[0]->actual_size : priv->rx_ring[0]->size;
   1224	param->tx_pending = priv->tx_ring[TX][0]->size;
   1225}
   1226
   1227static u32 mlx4_en_get_rxfh_indir_size(struct net_device *dev)
   1228{
   1229	struct mlx4_en_priv *priv = netdev_priv(dev);
   1230
   1231	return rounddown_pow_of_two(priv->rx_ring_num);
   1232}
   1233
   1234static u32 mlx4_en_get_rxfh_key_size(struct net_device *netdev)
   1235{
   1236	return MLX4_EN_RSS_KEY_SIZE;
   1237}
   1238
   1239static int mlx4_en_check_rxfh_func(struct net_device *dev, u8 hfunc)
   1240{
   1241	struct mlx4_en_priv *priv = netdev_priv(dev);
   1242
   1243	/* check if requested function is supported by the device */
   1244	if (hfunc == ETH_RSS_HASH_TOP) {
   1245		if (!(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS_TOP))
   1246			return -EINVAL;
   1247		if (!(dev->features & NETIF_F_RXHASH))
   1248			en_warn(priv, "Toeplitz hash function should be used in conjunction with RX hashing for optimal performance\n");
   1249		return 0;
   1250	} else if (hfunc == ETH_RSS_HASH_XOR) {
   1251		if (!(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS_XOR))
   1252			return -EINVAL;
   1253		if (dev->features & NETIF_F_RXHASH)
   1254			en_warn(priv, "Enabling both XOR Hash function and RX Hashing can limit RPS functionality\n");
   1255		return 0;
   1256	}
   1257
   1258	return -EINVAL;
   1259}
   1260
   1261static int mlx4_en_get_rxfh(struct net_device *dev, u32 *ring_index, u8 *key,
   1262			    u8 *hfunc)
   1263{
   1264	struct mlx4_en_priv *priv = netdev_priv(dev);
   1265	u32 n = mlx4_en_get_rxfh_indir_size(dev);
   1266	u32 i, rss_rings;
   1267
   1268	rss_rings = priv->prof->rss_rings ?: n;
   1269	rss_rings = rounddown_pow_of_two(rss_rings);
   1270
   1271	for (i = 0; i < n; i++) {
   1272		if (!ring_index)
   1273			break;
   1274		ring_index[i] = i % rss_rings;
   1275	}
   1276	if (key)
   1277		memcpy(key, priv->rss_key, MLX4_EN_RSS_KEY_SIZE);
   1278	if (hfunc)
   1279		*hfunc = priv->rss_hash_fn;
   1280	return 0;
   1281}
   1282
   1283static int mlx4_en_set_rxfh(struct net_device *dev, const u32 *ring_index,
   1284			    const u8 *key, const u8 hfunc)
   1285{
   1286	struct mlx4_en_priv *priv = netdev_priv(dev);
   1287	u32 n = mlx4_en_get_rxfh_indir_size(dev);
   1288	struct mlx4_en_dev *mdev = priv->mdev;
   1289	int port_up = 0;
   1290	int err = 0;
   1291	int i;
   1292	int rss_rings = 0;
   1293
   1294	/* Calculate RSS table size and make sure flows are spread evenly
   1295	 * between rings
   1296	 */
   1297	for (i = 0; i < n; i++) {
   1298		if (!ring_index)
   1299			break;
   1300		if (i > 0 && !ring_index[i] && !rss_rings)
   1301			rss_rings = i;
   1302
   1303		if (ring_index[i] != (i % (rss_rings ?: n)))
   1304			return -EINVAL;
   1305	}
   1306
   1307	if (!rss_rings)
   1308		rss_rings = n;
   1309
   1310	/* RSS table size must be an order of 2 */
   1311	if (!is_power_of_2(rss_rings))
   1312		return -EINVAL;
   1313
   1314	if (hfunc != ETH_RSS_HASH_NO_CHANGE) {
   1315		err = mlx4_en_check_rxfh_func(dev, hfunc);
   1316		if (err)
   1317			return err;
   1318	}
   1319
   1320	mutex_lock(&mdev->state_lock);
   1321	if (priv->port_up) {
   1322		port_up = 1;
   1323		mlx4_en_stop_port(dev, 1);
   1324	}
   1325
   1326	if (ring_index)
   1327		priv->prof->rss_rings = rss_rings;
   1328	if (key)
   1329		memcpy(priv->rss_key, key, MLX4_EN_RSS_KEY_SIZE);
   1330	if (hfunc !=  ETH_RSS_HASH_NO_CHANGE)
   1331		priv->rss_hash_fn = hfunc;
   1332
   1333	if (port_up) {
   1334		err = mlx4_en_start_port(dev);
   1335		if (err)
   1336			en_err(priv, "Failed starting port\n");
   1337	}
   1338
   1339	mutex_unlock(&mdev->state_lock);
   1340	return err;
   1341}
   1342
   1343#define all_zeros_or_all_ones(field)		\
   1344	((field) == 0 || (field) == (__force typeof(field))-1)
   1345
   1346static int mlx4_en_validate_flow(struct net_device *dev,
   1347				 struct ethtool_rxnfc *cmd)
   1348{
   1349	struct ethtool_usrip4_spec *l3_mask;
   1350	struct ethtool_tcpip4_spec *l4_mask;
   1351	struct ethhdr *eth_mask;
   1352
   1353	if (cmd->fs.location >= MAX_NUM_OF_FS_RULES)
   1354		return -EINVAL;
   1355
   1356	if (cmd->fs.flow_type & FLOW_MAC_EXT) {
   1357		/* dest mac mask must be ff:ff:ff:ff:ff:ff */
   1358		if (!is_broadcast_ether_addr(cmd->fs.m_ext.h_dest))
   1359			return -EINVAL;
   1360	}
   1361
   1362	switch (cmd->fs.flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
   1363	case TCP_V4_FLOW:
   1364	case UDP_V4_FLOW:
   1365		if (cmd->fs.m_u.tcp_ip4_spec.tos)
   1366			return -EINVAL;
   1367		l4_mask = &cmd->fs.m_u.tcp_ip4_spec;
   1368		/* don't allow mask which isn't all 0 or 1 */
   1369		if (!all_zeros_or_all_ones(l4_mask->ip4src) ||
   1370		    !all_zeros_or_all_ones(l4_mask->ip4dst) ||
   1371		    !all_zeros_or_all_ones(l4_mask->psrc) ||
   1372		    !all_zeros_or_all_ones(l4_mask->pdst))
   1373			return -EINVAL;
   1374		break;
   1375	case IP_USER_FLOW:
   1376		l3_mask = &cmd->fs.m_u.usr_ip4_spec;
   1377		if (l3_mask->l4_4_bytes || l3_mask->tos || l3_mask->proto ||
   1378		    cmd->fs.h_u.usr_ip4_spec.ip_ver != ETH_RX_NFC_IP4 ||
   1379		    (!l3_mask->ip4src && !l3_mask->ip4dst) ||
   1380		    !all_zeros_or_all_ones(l3_mask->ip4src) ||
   1381		    !all_zeros_or_all_ones(l3_mask->ip4dst))
   1382			return -EINVAL;
   1383		break;
   1384	case ETHER_FLOW:
   1385		eth_mask = &cmd->fs.m_u.ether_spec;
   1386		/* source mac mask must not be set */
   1387		if (!is_zero_ether_addr(eth_mask->h_source))
   1388			return -EINVAL;
   1389
   1390		/* dest mac mask must be ff:ff:ff:ff:ff:ff */
   1391		if (!is_broadcast_ether_addr(eth_mask->h_dest))
   1392			return -EINVAL;
   1393
   1394		if (!all_zeros_or_all_ones(eth_mask->h_proto))
   1395			return -EINVAL;
   1396		break;
   1397	default:
   1398		return -EINVAL;
   1399	}
   1400
   1401	if ((cmd->fs.flow_type & FLOW_EXT)) {
   1402		if (cmd->fs.m_ext.vlan_etype ||
   1403		    !((cmd->fs.m_ext.vlan_tci & cpu_to_be16(VLAN_VID_MASK)) ==
   1404		      0 ||
   1405		      (cmd->fs.m_ext.vlan_tci & cpu_to_be16(VLAN_VID_MASK)) ==
   1406		      cpu_to_be16(VLAN_VID_MASK)))
   1407			return -EINVAL;
   1408
   1409		if (cmd->fs.m_ext.vlan_tci) {
   1410			if (be16_to_cpu(cmd->fs.h_ext.vlan_tci) >= VLAN_N_VID)
   1411				return -EINVAL;
   1412
   1413		}
   1414	}
   1415
   1416	return 0;
   1417}
   1418
   1419static int mlx4_en_ethtool_add_mac_rule(struct ethtool_rxnfc *cmd,
   1420					struct list_head *rule_list_h,
   1421					struct mlx4_spec_list *spec_l2,
   1422					unsigned char *mac)
   1423{
   1424	__be64 mac_msk = cpu_to_be64(MLX4_MAC_MASK << 16);
   1425
   1426	spec_l2->id = MLX4_NET_TRANS_RULE_ID_ETH;
   1427	memcpy(spec_l2->eth.dst_mac_msk, &mac_msk, ETH_ALEN);
   1428	memcpy(spec_l2->eth.dst_mac, mac, ETH_ALEN);
   1429
   1430	if ((cmd->fs.flow_type & FLOW_EXT) &&
   1431	    (cmd->fs.m_ext.vlan_tci & cpu_to_be16(VLAN_VID_MASK))) {
   1432		spec_l2->eth.vlan_id = cmd->fs.h_ext.vlan_tci;
   1433		spec_l2->eth.vlan_id_msk = cpu_to_be16(VLAN_VID_MASK);
   1434	}
   1435
   1436	list_add_tail(&spec_l2->list, rule_list_h);
   1437
   1438	return 0;
   1439}
   1440
   1441static int mlx4_en_ethtool_add_mac_rule_by_ipv4(struct mlx4_en_priv *priv,
   1442						struct ethtool_rxnfc *cmd,
   1443						struct list_head *rule_list_h,
   1444						struct mlx4_spec_list *spec_l2,
   1445						__be32 ipv4_dst)
   1446{
   1447#ifdef CONFIG_INET
   1448	unsigned char mac[ETH_ALEN];
   1449
   1450	if (!ipv4_is_multicast(ipv4_dst)) {
   1451		if (cmd->fs.flow_type & FLOW_MAC_EXT)
   1452			memcpy(&mac, cmd->fs.h_ext.h_dest, ETH_ALEN);
   1453		else
   1454			memcpy(&mac, priv->dev->dev_addr, ETH_ALEN);
   1455	} else {
   1456		ip_eth_mc_map(ipv4_dst, mac);
   1457	}
   1458
   1459	return mlx4_en_ethtool_add_mac_rule(cmd, rule_list_h, spec_l2, &mac[0]);
   1460#else
   1461	return -EINVAL;
   1462#endif
   1463}
   1464
   1465static int add_ip_rule(struct mlx4_en_priv *priv,
   1466		       struct ethtool_rxnfc *cmd,
   1467		       struct list_head *list_h)
   1468{
   1469	int err;
   1470	struct mlx4_spec_list *spec_l2 = NULL;
   1471	struct mlx4_spec_list *spec_l3 = NULL;
   1472	struct ethtool_usrip4_spec *l3_mask = &cmd->fs.m_u.usr_ip4_spec;
   1473
   1474	spec_l3 = kzalloc(sizeof(*spec_l3), GFP_KERNEL);
   1475	spec_l2 = kzalloc(sizeof(*spec_l2), GFP_KERNEL);
   1476	if (!spec_l2 || !spec_l3) {
   1477		err = -ENOMEM;
   1478		goto free_spec;
   1479	}
   1480
   1481	err = mlx4_en_ethtool_add_mac_rule_by_ipv4(priv, cmd, list_h, spec_l2,
   1482						   cmd->fs.h_u.
   1483						   usr_ip4_spec.ip4dst);
   1484	if (err)
   1485		goto free_spec;
   1486	spec_l3->id = MLX4_NET_TRANS_RULE_ID_IPV4;
   1487	spec_l3->ipv4.src_ip = cmd->fs.h_u.usr_ip4_spec.ip4src;
   1488	if (l3_mask->ip4src)
   1489		spec_l3->ipv4.src_ip_msk = EN_ETHTOOL_WORD_MASK;
   1490	spec_l3->ipv4.dst_ip = cmd->fs.h_u.usr_ip4_spec.ip4dst;
   1491	if (l3_mask->ip4dst)
   1492		spec_l3->ipv4.dst_ip_msk = EN_ETHTOOL_WORD_MASK;
   1493	list_add_tail(&spec_l3->list, list_h);
   1494
   1495	return 0;
   1496
   1497free_spec:
   1498	kfree(spec_l2);
   1499	kfree(spec_l3);
   1500	return err;
   1501}
   1502
   1503static int add_tcp_udp_rule(struct mlx4_en_priv *priv,
   1504			     struct ethtool_rxnfc *cmd,
   1505			     struct list_head *list_h, int proto)
   1506{
   1507	int err;
   1508	struct mlx4_spec_list *spec_l2 = NULL;
   1509	struct mlx4_spec_list *spec_l3 = NULL;
   1510	struct mlx4_spec_list *spec_l4 = NULL;
   1511	struct ethtool_tcpip4_spec *l4_mask = &cmd->fs.m_u.tcp_ip4_spec;
   1512
   1513	spec_l2 = kzalloc(sizeof(*spec_l2), GFP_KERNEL);
   1514	spec_l3 = kzalloc(sizeof(*spec_l3), GFP_KERNEL);
   1515	spec_l4 = kzalloc(sizeof(*spec_l4), GFP_KERNEL);
   1516	if (!spec_l2 || !spec_l3 || !spec_l4) {
   1517		err = -ENOMEM;
   1518		goto free_spec;
   1519	}
   1520
   1521	spec_l3->id = MLX4_NET_TRANS_RULE_ID_IPV4;
   1522
   1523	if (proto == TCP_V4_FLOW) {
   1524		err = mlx4_en_ethtool_add_mac_rule_by_ipv4(priv, cmd, list_h,
   1525							   spec_l2,
   1526							   cmd->fs.h_u.
   1527							   tcp_ip4_spec.ip4dst);
   1528		if (err)
   1529			goto free_spec;
   1530		spec_l4->id = MLX4_NET_TRANS_RULE_ID_TCP;
   1531		spec_l3->ipv4.src_ip = cmd->fs.h_u.tcp_ip4_spec.ip4src;
   1532		spec_l3->ipv4.dst_ip = cmd->fs.h_u.tcp_ip4_spec.ip4dst;
   1533		spec_l4->tcp_udp.src_port = cmd->fs.h_u.tcp_ip4_spec.psrc;
   1534		spec_l4->tcp_udp.dst_port = cmd->fs.h_u.tcp_ip4_spec.pdst;
   1535	} else {
   1536		err = mlx4_en_ethtool_add_mac_rule_by_ipv4(priv, cmd, list_h,
   1537							   spec_l2,
   1538							   cmd->fs.h_u.
   1539							   udp_ip4_spec.ip4dst);
   1540		if (err)
   1541			goto free_spec;
   1542		spec_l4->id = MLX4_NET_TRANS_RULE_ID_UDP;
   1543		spec_l3->ipv4.src_ip = cmd->fs.h_u.udp_ip4_spec.ip4src;
   1544		spec_l3->ipv4.dst_ip = cmd->fs.h_u.udp_ip4_spec.ip4dst;
   1545		spec_l4->tcp_udp.src_port = cmd->fs.h_u.udp_ip4_spec.psrc;
   1546		spec_l4->tcp_udp.dst_port = cmd->fs.h_u.udp_ip4_spec.pdst;
   1547	}
   1548
   1549	if (l4_mask->ip4src)
   1550		spec_l3->ipv4.src_ip_msk = EN_ETHTOOL_WORD_MASK;
   1551	if (l4_mask->ip4dst)
   1552		spec_l3->ipv4.dst_ip_msk = EN_ETHTOOL_WORD_MASK;
   1553
   1554	if (l4_mask->psrc)
   1555		spec_l4->tcp_udp.src_port_msk = EN_ETHTOOL_SHORT_MASK;
   1556	if (l4_mask->pdst)
   1557		spec_l4->tcp_udp.dst_port_msk = EN_ETHTOOL_SHORT_MASK;
   1558
   1559	list_add_tail(&spec_l3->list, list_h);
   1560	list_add_tail(&spec_l4->list, list_h);
   1561
   1562	return 0;
   1563
   1564free_spec:
   1565	kfree(spec_l2);
   1566	kfree(spec_l3);
   1567	kfree(spec_l4);
   1568	return err;
   1569}
   1570
   1571static int mlx4_en_ethtool_to_net_trans_rule(struct net_device *dev,
   1572					     struct ethtool_rxnfc *cmd,
   1573					     struct list_head *rule_list_h)
   1574{
   1575	int err;
   1576	struct ethhdr *eth_spec;
   1577	struct mlx4_spec_list *spec_l2;
   1578	struct mlx4_en_priv *priv = netdev_priv(dev);
   1579
   1580	err = mlx4_en_validate_flow(dev, cmd);
   1581	if (err)
   1582		return err;
   1583
   1584	switch (cmd->fs.flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
   1585	case ETHER_FLOW:
   1586		spec_l2 = kzalloc(sizeof(*spec_l2), GFP_KERNEL);
   1587		if (!spec_l2)
   1588			return -ENOMEM;
   1589
   1590		eth_spec = &cmd->fs.h_u.ether_spec;
   1591		mlx4_en_ethtool_add_mac_rule(cmd, rule_list_h, spec_l2,
   1592					     &eth_spec->h_dest[0]);
   1593		spec_l2->eth.ether_type = eth_spec->h_proto;
   1594		if (eth_spec->h_proto)
   1595			spec_l2->eth.ether_type_enable = 1;
   1596		break;
   1597	case IP_USER_FLOW:
   1598		err = add_ip_rule(priv, cmd, rule_list_h);
   1599		break;
   1600	case TCP_V4_FLOW:
   1601		err = add_tcp_udp_rule(priv, cmd, rule_list_h, TCP_V4_FLOW);
   1602		break;
   1603	case UDP_V4_FLOW:
   1604		err = add_tcp_udp_rule(priv, cmd, rule_list_h, UDP_V4_FLOW);
   1605		break;
   1606	}
   1607
   1608	return err;
   1609}
   1610
   1611static int mlx4_en_flow_replace(struct net_device *dev,
   1612				struct ethtool_rxnfc *cmd)
   1613{
   1614	int err;
   1615	struct mlx4_en_priv *priv = netdev_priv(dev);
   1616	struct ethtool_flow_id *loc_rule;
   1617	struct mlx4_spec_list *spec, *tmp_spec;
   1618	u32 qpn;
   1619	u64 reg_id;
   1620
   1621	struct mlx4_net_trans_rule rule = {
   1622		.queue_mode = MLX4_NET_TRANS_Q_FIFO,
   1623		.exclusive = 0,
   1624		.allow_loopback = 1,
   1625		.promisc_mode = MLX4_FS_REGULAR,
   1626	};
   1627
   1628	rule.port = priv->port;
   1629	rule.priority = MLX4_DOMAIN_ETHTOOL | cmd->fs.location;
   1630	INIT_LIST_HEAD(&rule.list);
   1631
   1632	/* Allow direct QP attaches if the EN_ETHTOOL_QP_ATTACH flag is set */
   1633	if (cmd->fs.ring_cookie == RX_CLS_FLOW_DISC)
   1634		qpn = priv->drop_qp.qpn;
   1635	else if (cmd->fs.ring_cookie & EN_ETHTOOL_QP_ATTACH) {
   1636		qpn = cmd->fs.ring_cookie & (EN_ETHTOOL_QP_ATTACH - 1);
   1637	} else {
   1638		if (cmd->fs.ring_cookie >= priv->rx_ring_num) {
   1639			en_warn(priv, "rxnfc: RX ring (%llu) doesn't exist\n",
   1640				cmd->fs.ring_cookie);
   1641			return -EINVAL;
   1642		}
   1643		qpn = priv->rss_map.qps[cmd->fs.ring_cookie].qpn;
   1644		if (!qpn) {
   1645			en_warn(priv, "rxnfc: RX ring (%llu) is inactive\n",
   1646				cmd->fs.ring_cookie);
   1647			return -EINVAL;
   1648		}
   1649	}
   1650	rule.qpn = qpn;
   1651	err = mlx4_en_ethtool_to_net_trans_rule(dev, cmd, &rule.list);
   1652	if (err)
   1653		goto out_free_list;
   1654
   1655	loc_rule = &priv->ethtool_rules[cmd->fs.location];
   1656	if (loc_rule->id) {
   1657		err = mlx4_flow_detach(priv->mdev->dev, loc_rule->id);
   1658		if (err) {
   1659			en_err(priv, "Fail to detach network rule at location %d. registration id = %llx\n",
   1660			       cmd->fs.location, loc_rule->id);
   1661			goto out_free_list;
   1662		}
   1663		loc_rule->id = 0;
   1664		memset(&loc_rule->flow_spec, 0,
   1665		       sizeof(struct ethtool_rx_flow_spec));
   1666		list_del(&loc_rule->list);
   1667	}
   1668	err = mlx4_flow_attach(priv->mdev->dev, &rule, &reg_id);
   1669	if (err) {
   1670		en_err(priv, "Fail to attach network rule at location %d\n",
   1671		       cmd->fs.location);
   1672		goto out_free_list;
   1673	}
   1674	loc_rule->id = reg_id;
   1675	memcpy(&loc_rule->flow_spec, &cmd->fs,
   1676	       sizeof(struct ethtool_rx_flow_spec));
   1677	list_add_tail(&loc_rule->list, &priv->ethtool_list);
   1678
   1679out_free_list:
   1680	list_for_each_entry_safe(spec, tmp_spec, &rule.list, list) {
   1681		list_del(&spec->list);
   1682		kfree(spec);
   1683	}
   1684	return err;
   1685}
   1686
   1687static int mlx4_en_flow_detach(struct net_device *dev,
   1688			       struct ethtool_rxnfc *cmd)
   1689{
   1690	int err = 0;
   1691	struct ethtool_flow_id *rule;
   1692	struct mlx4_en_priv *priv = netdev_priv(dev);
   1693
   1694	if (cmd->fs.location >= MAX_NUM_OF_FS_RULES)
   1695		return -EINVAL;
   1696
   1697	rule = &priv->ethtool_rules[cmd->fs.location];
   1698	if (!rule->id) {
   1699		err =  -ENOENT;
   1700		goto out;
   1701	}
   1702
   1703	err = mlx4_flow_detach(priv->mdev->dev, rule->id);
   1704	if (err) {
   1705		en_err(priv, "Fail to detach network rule at location %d. registration id = 0x%llx\n",
   1706		       cmd->fs.location, rule->id);
   1707		goto out;
   1708	}
   1709	rule->id = 0;
   1710	memset(&rule->flow_spec, 0, sizeof(struct ethtool_rx_flow_spec));
   1711	list_del(&rule->list);
   1712out:
   1713	return err;
   1714
   1715}
   1716
   1717static int mlx4_en_get_flow(struct net_device *dev, struct ethtool_rxnfc *cmd,
   1718			    int loc)
   1719{
   1720	int err = 0;
   1721	struct ethtool_flow_id *rule;
   1722	struct mlx4_en_priv *priv = netdev_priv(dev);
   1723
   1724	if (loc < 0 || loc >= MAX_NUM_OF_FS_RULES)
   1725		return -EINVAL;
   1726
   1727	rule = &priv->ethtool_rules[loc];
   1728	if (rule->id)
   1729		memcpy(&cmd->fs, &rule->flow_spec,
   1730		       sizeof(struct ethtool_rx_flow_spec));
   1731	else
   1732		err = -ENOENT;
   1733
   1734	return err;
   1735}
   1736
   1737static int mlx4_en_get_num_flows(struct mlx4_en_priv *priv)
   1738{
   1739
   1740	int i, res = 0;
   1741	for (i = 0; i < MAX_NUM_OF_FS_RULES; i++) {
   1742		if (priv->ethtool_rules[i].id)
   1743			res++;
   1744	}
   1745	return res;
   1746
   1747}
   1748
   1749static int mlx4_en_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
   1750			     u32 *rule_locs)
   1751{
   1752	struct mlx4_en_priv *priv = netdev_priv(dev);
   1753	struct mlx4_en_dev *mdev = priv->mdev;
   1754	int err = 0;
   1755	int i = 0, priority = 0;
   1756
   1757	if ((cmd->cmd == ETHTOOL_GRXCLSRLCNT ||
   1758	     cmd->cmd == ETHTOOL_GRXCLSRULE ||
   1759	     cmd->cmd == ETHTOOL_GRXCLSRLALL) &&
   1760	    (mdev->dev->caps.steering_mode !=
   1761	     MLX4_STEERING_MODE_DEVICE_MANAGED || !priv->port_up))
   1762		return -EINVAL;
   1763
   1764	switch (cmd->cmd) {
   1765	case ETHTOOL_GRXRINGS:
   1766		cmd->data = priv->rx_ring_num;
   1767		break;
   1768	case ETHTOOL_GRXCLSRLCNT:
   1769		cmd->rule_cnt = mlx4_en_get_num_flows(priv);
   1770		break;
   1771	case ETHTOOL_GRXCLSRULE:
   1772		err = mlx4_en_get_flow(dev, cmd, cmd->fs.location);
   1773		break;
   1774	case ETHTOOL_GRXCLSRLALL:
   1775		cmd->data = MAX_NUM_OF_FS_RULES;
   1776		while ((!err || err == -ENOENT) && priority < cmd->rule_cnt) {
   1777			err = mlx4_en_get_flow(dev, cmd, i);
   1778			if (!err)
   1779				rule_locs[priority++] = i;
   1780			i++;
   1781		}
   1782		err = 0;
   1783		break;
   1784	default:
   1785		err = -EOPNOTSUPP;
   1786		break;
   1787	}
   1788
   1789	return err;
   1790}
   1791
   1792static int mlx4_en_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
   1793{
   1794	int err = 0;
   1795	struct mlx4_en_priv *priv = netdev_priv(dev);
   1796	struct mlx4_en_dev *mdev = priv->mdev;
   1797
   1798	if (mdev->dev->caps.steering_mode !=
   1799	    MLX4_STEERING_MODE_DEVICE_MANAGED || !priv->port_up)
   1800		return -EINVAL;
   1801
   1802	switch (cmd->cmd) {
   1803	case ETHTOOL_SRXCLSRLINS:
   1804		err = mlx4_en_flow_replace(dev, cmd);
   1805		break;
   1806	case ETHTOOL_SRXCLSRLDEL:
   1807		err = mlx4_en_flow_detach(dev, cmd);
   1808		break;
   1809	default:
   1810		en_warn(priv, "Unsupported ethtool command. (%d)\n", cmd->cmd);
   1811		return -EINVAL;
   1812	}
   1813
   1814	return err;
   1815}
   1816
   1817static int mlx4_en_get_max_num_rx_rings(struct net_device *dev)
   1818{
   1819	return min_t(int, num_online_cpus(), MAX_RX_RINGS);
   1820}
   1821
   1822static void mlx4_en_get_channels(struct net_device *dev,
   1823				 struct ethtool_channels *channel)
   1824{
   1825	struct mlx4_en_priv *priv = netdev_priv(dev);
   1826
   1827	channel->max_rx = mlx4_en_get_max_num_rx_rings(dev);
   1828	channel->max_tx = priv->mdev->profile.max_num_tx_rings_p_up;
   1829
   1830	channel->rx_count = priv->rx_ring_num;
   1831	channel->tx_count = priv->tx_ring_num[TX] /
   1832			    priv->prof->num_up;
   1833}
   1834
   1835static int mlx4_en_set_channels(struct net_device *dev,
   1836				struct ethtool_channels *channel)
   1837{
   1838	struct mlx4_en_priv *priv = netdev_priv(dev);
   1839	struct mlx4_en_dev *mdev = priv->mdev;
   1840	struct mlx4_en_port_profile new_prof;
   1841	struct mlx4_en_priv *tmp;
   1842	int total_tx_count;
   1843	int port_up = 0;
   1844	int xdp_count;
   1845	int err = 0;
   1846	u8 up;
   1847
   1848	if (!channel->tx_count || !channel->rx_count)
   1849		return -EINVAL;
   1850
   1851	tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
   1852	if (!tmp)
   1853		return -ENOMEM;
   1854
   1855	mutex_lock(&mdev->state_lock);
   1856	xdp_count = priv->tx_ring_num[TX_XDP] ? channel->rx_count : 0;
   1857	total_tx_count = channel->tx_count * priv->prof->num_up + xdp_count;
   1858	if (total_tx_count > MAX_TX_RINGS) {
   1859		err = -EINVAL;
   1860		en_err(priv,
   1861		       "Total number of TX and XDP rings (%d) exceeds the maximum supported (%d)\n",
   1862		       total_tx_count, MAX_TX_RINGS);
   1863		goto out;
   1864	}
   1865
   1866	memcpy(&new_prof, priv->prof, sizeof(struct mlx4_en_port_profile));
   1867	new_prof.num_tx_rings_p_up = channel->tx_count;
   1868	new_prof.tx_ring_num[TX] = channel->tx_count * priv->prof->num_up;
   1869	new_prof.tx_ring_num[TX_XDP] = xdp_count;
   1870	new_prof.rx_ring_num = channel->rx_count;
   1871
   1872	err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof, true);
   1873	if (err)
   1874		goto out;
   1875
   1876	if (priv->port_up) {
   1877		port_up = 1;
   1878		mlx4_en_stop_port(dev, 1);
   1879	}
   1880
   1881	mlx4_en_safe_replace_resources(priv, tmp);
   1882
   1883	netif_set_real_num_rx_queues(dev, priv->rx_ring_num);
   1884
   1885	up = (priv->prof->num_up == MLX4_EN_NUM_UP_LOW) ?
   1886				    0 : priv->prof->num_up;
   1887	mlx4_en_setup_tc(dev, up);
   1888
   1889	en_warn(priv, "Using %d TX rings\n", priv->tx_ring_num[TX]);
   1890	en_warn(priv, "Using %d RX rings\n", priv->rx_ring_num);
   1891
   1892	if (port_up) {
   1893		err = mlx4_en_start_port(dev);
   1894		if (err)
   1895			en_err(priv, "Failed starting port\n");
   1896	}
   1897
   1898	err = mlx4_en_moderation_update(priv);
   1899out:
   1900	mutex_unlock(&mdev->state_lock);
   1901	kfree(tmp);
   1902	return err;
   1903}
   1904
   1905static int mlx4_en_get_ts_info(struct net_device *dev,
   1906			       struct ethtool_ts_info *info)
   1907{
   1908	struct mlx4_en_priv *priv = netdev_priv(dev);
   1909	struct mlx4_en_dev *mdev = priv->mdev;
   1910	int ret;
   1911
   1912	ret = ethtool_op_get_ts_info(dev, info);
   1913	if (ret)
   1914		return ret;
   1915
   1916	if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS) {
   1917		info->so_timestamping |=
   1918			SOF_TIMESTAMPING_TX_HARDWARE |
   1919			SOF_TIMESTAMPING_RX_HARDWARE |
   1920			SOF_TIMESTAMPING_RAW_HARDWARE;
   1921
   1922		info->tx_types =
   1923			(1 << HWTSTAMP_TX_OFF) |
   1924			(1 << HWTSTAMP_TX_ON);
   1925
   1926		info->rx_filters =
   1927			(1 << HWTSTAMP_FILTER_NONE) |
   1928			(1 << HWTSTAMP_FILTER_ALL);
   1929
   1930		if (mdev->ptp_clock)
   1931			info->phc_index = ptp_clock_index(mdev->ptp_clock);
   1932	}
   1933
   1934	return ret;
   1935}
   1936
   1937static int mlx4_en_set_priv_flags(struct net_device *dev, u32 flags)
   1938{
   1939	struct mlx4_en_priv *priv = netdev_priv(dev);
   1940	struct mlx4_en_dev *mdev = priv->mdev;
   1941	bool bf_enabled_new = !!(flags & MLX4_EN_PRIV_FLAGS_BLUEFLAME);
   1942	bool bf_enabled_old = !!(priv->pflags & MLX4_EN_PRIV_FLAGS_BLUEFLAME);
   1943	bool phv_enabled_new = !!(flags & MLX4_EN_PRIV_FLAGS_PHV);
   1944	bool phv_enabled_old = !!(priv->pflags & MLX4_EN_PRIV_FLAGS_PHV);
   1945	int i;
   1946	int ret = 0;
   1947
   1948	if (bf_enabled_new != bf_enabled_old) {
   1949		int t;
   1950
   1951		if (bf_enabled_new) {
   1952			bool bf_supported = true;
   1953
   1954			for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++)
   1955				for (i = 0; i < priv->tx_ring_num[t]; i++)
   1956					bf_supported &=
   1957						priv->tx_ring[t][i]->bf_alloced;
   1958
   1959			if (!bf_supported) {
   1960				en_err(priv, "BlueFlame is not supported\n");
   1961				return -EINVAL;
   1962			}
   1963
   1964			priv->pflags |= MLX4_EN_PRIV_FLAGS_BLUEFLAME;
   1965		} else {
   1966			priv->pflags &= ~MLX4_EN_PRIV_FLAGS_BLUEFLAME;
   1967		}
   1968
   1969		for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++)
   1970			for (i = 0; i < priv->tx_ring_num[t]; i++)
   1971				priv->tx_ring[t][i]->bf_enabled =
   1972					bf_enabled_new;
   1973
   1974		en_info(priv, "BlueFlame %s\n",
   1975			bf_enabled_new ?  "Enabled" : "Disabled");
   1976	}
   1977
   1978	if (phv_enabled_new != phv_enabled_old) {
   1979		ret = set_phv_bit(mdev->dev, priv->port, (int)phv_enabled_new);
   1980		if (ret)
   1981			return ret;
   1982		else if (phv_enabled_new)
   1983			priv->pflags |= MLX4_EN_PRIV_FLAGS_PHV;
   1984		else
   1985			priv->pflags &= ~MLX4_EN_PRIV_FLAGS_PHV;
   1986		en_info(priv, "PHV bit %s\n",
   1987			phv_enabled_new ?  "Enabled" : "Disabled");
   1988	}
   1989	return 0;
   1990}
   1991
   1992static u32 mlx4_en_get_priv_flags(struct net_device *dev)
   1993{
   1994	struct mlx4_en_priv *priv = netdev_priv(dev);
   1995
   1996	return priv->pflags;
   1997}
   1998
   1999static int mlx4_en_get_tunable(struct net_device *dev,
   2000			       const struct ethtool_tunable *tuna,
   2001			       void *data)
   2002{
   2003	const struct mlx4_en_priv *priv = netdev_priv(dev);
   2004	int ret = 0;
   2005
   2006	switch (tuna->id) {
   2007	case ETHTOOL_TX_COPYBREAK:
   2008		*(u32 *)data = priv->prof->inline_thold;
   2009		break;
   2010	default:
   2011		ret = -EINVAL;
   2012		break;
   2013	}
   2014
   2015	return ret;
   2016}
   2017
   2018static int mlx4_en_set_tunable(struct net_device *dev,
   2019			       const struct ethtool_tunable *tuna,
   2020			       const void *data)
   2021{
   2022	struct mlx4_en_priv *priv = netdev_priv(dev);
   2023	int val, ret = 0;
   2024
   2025	switch (tuna->id) {
   2026	case ETHTOOL_TX_COPYBREAK:
   2027		val = *(u32 *)data;
   2028		if (val < MIN_PKT_LEN || val > MAX_INLINE)
   2029			ret = -EINVAL;
   2030		else
   2031			priv->prof->inline_thold = val;
   2032		break;
   2033	default:
   2034		ret = -EINVAL;
   2035		break;
   2036	}
   2037
   2038	return ret;
   2039}
   2040
   2041static int mlx4_en_get_module_info(struct net_device *dev,
   2042				   struct ethtool_modinfo *modinfo)
   2043{
   2044	struct mlx4_en_priv *priv = netdev_priv(dev);
   2045	struct mlx4_en_dev *mdev = priv->mdev;
   2046	int ret;
   2047	u8 data[4];
   2048
   2049	/* Read first 2 bytes to get Module & REV ID */
   2050	ret = mlx4_get_module_info(mdev->dev, priv->port,
   2051				   0/*offset*/, 2/*size*/, data);
   2052	if (ret < 2)
   2053		return -EIO;
   2054
   2055	switch (data[0] /* identifier */) {
   2056	case MLX4_MODULE_ID_QSFP:
   2057		modinfo->type = ETH_MODULE_SFF_8436;
   2058		modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN;
   2059		break;
   2060	case MLX4_MODULE_ID_QSFP_PLUS:
   2061		if (data[1] >= 0x3) { /* revision id */
   2062			modinfo->type = ETH_MODULE_SFF_8636;
   2063			modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN;
   2064		} else {
   2065			modinfo->type = ETH_MODULE_SFF_8436;
   2066			modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN;
   2067		}
   2068		break;
   2069	case MLX4_MODULE_ID_QSFP28:
   2070		modinfo->type = ETH_MODULE_SFF_8636;
   2071		modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN;
   2072		break;
   2073	case MLX4_MODULE_ID_SFP:
   2074		modinfo->type = ETH_MODULE_SFF_8472;
   2075		modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
   2076		break;
   2077	default:
   2078		return -EINVAL;
   2079	}
   2080
   2081	return 0;
   2082}
   2083
   2084static int mlx4_en_get_module_eeprom(struct net_device *dev,
   2085				     struct ethtool_eeprom *ee,
   2086				     u8 *data)
   2087{
   2088	struct mlx4_en_priv *priv = netdev_priv(dev);
   2089	struct mlx4_en_dev *mdev = priv->mdev;
   2090	int offset = ee->offset;
   2091	int i = 0, ret;
   2092
   2093	if (ee->len == 0)
   2094		return -EINVAL;
   2095
   2096	memset(data, 0, ee->len);
   2097
   2098	while (i < ee->len) {
   2099		en_dbg(DRV, priv,
   2100		       "mlx4_get_module_info i(%d) offset(%d) len(%d)\n",
   2101		       i, offset, ee->len - i);
   2102
   2103		ret = mlx4_get_module_info(mdev->dev, priv->port,
   2104					   offset, ee->len - i, data + i);
   2105
   2106		if (!ret) /* Done reading */
   2107			return 0;
   2108
   2109		if (ret < 0) {
   2110			en_err(priv,
   2111			       "mlx4_get_module_info i(%d) offset(%d) bytes_to_read(%d) - FAILED (0x%x)\n",
   2112			       i, offset, ee->len - i, ret);
   2113			return ret;
   2114		}
   2115
   2116		i += ret;
   2117		offset += ret;
   2118	}
   2119	return 0;
   2120}
   2121
   2122static int mlx4_en_set_phys_id(struct net_device *dev,
   2123			       enum ethtool_phys_id_state state)
   2124{
   2125	int err;
   2126	u16 beacon_duration;
   2127	struct mlx4_en_priv *priv = netdev_priv(dev);
   2128	struct mlx4_en_dev *mdev = priv->mdev;
   2129
   2130	if (!(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_PORT_BEACON))
   2131		return -EOPNOTSUPP;
   2132
   2133	switch (state) {
   2134	case ETHTOOL_ID_ACTIVE:
   2135		beacon_duration = PORT_BEACON_MAX_LIMIT;
   2136		break;
   2137	case ETHTOOL_ID_INACTIVE:
   2138		beacon_duration = 0;
   2139		break;
   2140	default:
   2141		return -EOPNOTSUPP;
   2142	}
   2143
   2144	err = mlx4_SET_PORT_BEACON(mdev->dev, priv->port, beacon_duration);
   2145	return err;
   2146}
   2147
   2148const struct ethtool_ops mlx4_en_ethtool_ops = {
   2149	.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
   2150				     ETHTOOL_COALESCE_MAX_FRAMES |
   2151				     ETHTOOL_COALESCE_TX_MAX_FRAMES_IRQ |
   2152				     ETHTOOL_COALESCE_PKT_RATE_RX_USECS,
   2153	.get_drvinfo = mlx4_en_get_drvinfo,
   2154	.get_link_ksettings = mlx4_en_get_link_ksettings,
   2155	.set_link_ksettings = mlx4_en_set_link_ksettings,
   2156	.get_link = ethtool_op_get_link,
   2157	.get_strings = mlx4_en_get_strings,
   2158	.get_sset_count = mlx4_en_get_sset_count,
   2159	.get_ethtool_stats = mlx4_en_get_ethtool_stats,
   2160	.self_test = mlx4_en_self_test,
   2161	.set_phys_id = mlx4_en_set_phys_id,
   2162	.get_wol = mlx4_en_get_wol,
   2163	.set_wol = mlx4_en_set_wol,
   2164	.get_msglevel = mlx4_en_get_msglevel,
   2165	.set_msglevel = mlx4_en_set_msglevel,
   2166	.get_coalesce = mlx4_en_get_coalesce,
   2167	.set_coalesce = mlx4_en_set_coalesce,
   2168	.get_pause_stats = mlx4_en_get_pause_stats,
   2169	.get_pauseparam = mlx4_en_get_pauseparam,
   2170	.set_pauseparam = mlx4_en_set_pauseparam,
   2171	.get_ringparam = mlx4_en_get_ringparam,
   2172	.set_ringparam = mlx4_en_set_ringparam,
   2173	.get_rxnfc = mlx4_en_get_rxnfc,
   2174	.set_rxnfc = mlx4_en_set_rxnfc,
   2175	.get_rxfh_indir_size = mlx4_en_get_rxfh_indir_size,
   2176	.get_rxfh_key_size = mlx4_en_get_rxfh_key_size,
   2177	.get_rxfh = mlx4_en_get_rxfh,
   2178	.set_rxfh = mlx4_en_set_rxfh,
   2179	.get_channels = mlx4_en_get_channels,
   2180	.set_channels = mlx4_en_set_channels,
   2181	.get_ts_info = mlx4_en_get_ts_info,
   2182	.set_priv_flags = mlx4_en_set_priv_flags,
   2183	.get_priv_flags = mlx4_en_get_priv_flags,
   2184	.get_tunable		= mlx4_en_get_tunable,
   2185	.set_tunable		= mlx4_en_set_tunable,
   2186	.get_module_info = mlx4_en_get_module_info,
   2187	.get_module_eeprom = mlx4_en_get_module_eeprom
   2188};
   2189
   2190
   2191
   2192
   2193