cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

gianfar_ethtool.c (39484B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/*
      3 *  drivers/net/ethernet/freescale/gianfar_ethtool.c
      4 *
      5 *  Gianfar Ethernet Driver
      6 *  Ethtool support for Gianfar Enet
      7 *  Based on e1000 ethtool support
      8 *
      9 *  Author: Andy Fleming
     10 *  Maintainer: Kumar Gala
     11 *  Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com>
     12 *
     13 *  Copyright 2003-2006, 2008-2009, 2011 Freescale Semiconductor, Inc.
     14 */
     15
     16#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
     17
     18#include <linux/kernel.h>
     19#include <linux/string.h>
     20#include <linux/errno.h>
     21#include <linux/interrupt.h>
     22#include <linux/delay.h>
     23#include <linux/netdevice.h>
     24#include <linux/etherdevice.h>
     25#include <linux/net_tstamp.h>
     26#include <linux/skbuff.h>
     27#include <linux/spinlock.h>
     28#include <linux/mm.h>
     29
     30#include <asm/io.h>
     31#include <asm/irq.h>
     32#include <linux/uaccess.h>
     33#include <linux/module.h>
     34#include <linux/crc32.h>
     35#include <asm/types.h>
     36#include <linux/ethtool.h>
     37#include <linux/mii.h>
     38#include <linux/phy.h>
     39#include <linux/sort.h>
     40#include <linux/if_vlan.h>
     41#include <linux/of_platform.h>
     42#include <linux/fsl/ptp_qoriq.h>
     43
     44#include "gianfar.h"
     45
     46#define GFAR_MAX_COAL_USECS 0xffff
     47#define GFAR_MAX_COAL_FRAMES 0xff
     48
     49static const char stat_gstrings[][ETH_GSTRING_LEN] = {
     50	/* extra stats */
     51	"rx-allocation-errors",
     52	"rx-large-frame-errors",
     53	"rx-short-frame-errors",
     54	"rx-non-octet-errors",
     55	"rx-crc-errors",
     56	"rx-overrun-errors",
     57	"rx-busy-errors",
     58	"rx-babbling-errors",
     59	"rx-truncated-frames",
     60	"ethernet-bus-error",
     61	"tx-babbling-errors",
     62	"tx-underrun-errors",
     63	"tx-timeout-errors",
     64	/* rmon stats */
     65	"tx-rx-64-frames",
     66	"tx-rx-65-127-frames",
     67	"tx-rx-128-255-frames",
     68	"tx-rx-256-511-frames",
     69	"tx-rx-512-1023-frames",
     70	"tx-rx-1024-1518-frames",
     71	"tx-rx-1519-1522-good-vlan",
     72	"rx-bytes",
     73	"rx-packets",
     74	"rx-fcs-errors",
     75	"receive-multicast-packet",
     76	"receive-broadcast-packet",
     77	"rx-control-frame-packets",
     78	"rx-pause-frame-packets",
     79	"rx-unknown-op-code",
     80	"rx-alignment-error",
     81	"rx-frame-length-error",
     82	"rx-code-error",
     83	"rx-carrier-sense-error",
     84	"rx-undersize-packets",
     85	"rx-oversize-packets",
     86	"rx-fragmented-frames",
     87	"rx-jabber-frames",
     88	"rx-dropped-frames",
     89	"tx-byte-counter",
     90	"tx-packets",
     91	"tx-multicast-packets",
     92	"tx-broadcast-packets",
     93	"tx-pause-control-frames",
     94	"tx-deferral-packets",
     95	"tx-excessive-deferral-packets",
     96	"tx-single-collision-packets",
     97	"tx-multiple-collision-packets",
     98	"tx-late-collision-packets",
     99	"tx-excessive-collision-packets",
    100	"tx-total-collision",
    101	"reserved",
    102	"tx-dropped-frames",
    103	"tx-jabber-frames",
    104	"tx-fcs-errors",
    105	"tx-control-frames",
    106	"tx-oversize-frames",
    107	"tx-undersize-frames",
    108	"tx-fragmented-frames",
    109};
    110
    111/* Fill in a buffer with the strings which correspond to the
    112 * stats */
    113static void gfar_gstrings(struct net_device *dev, u32 stringset, u8 * buf)
    114{
    115	struct gfar_private *priv = netdev_priv(dev);
    116
    117	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON)
    118		memcpy(buf, stat_gstrings, GFAR_STATS_LEN * ETH_GSTRING_LEN);
    119	else
    120		memcpy(buf, stat_gstrings,
    121		       GFAR_EXTRA_STATS_LEN * ETH_GSTRING_LEN);
    122}
    123
    124/* Fill in an array of 64-bit statistics from various sources.
    125 * This array will be appended to the end of the ethtool_stats
    126 * structure, and returned to user space
    127 */
    128static void gfar_fill_stats(struct net_device *dev, struct ethtool_stats *dummy,
    129			    u64 *buf)
    130{
    131	int i;
    132	struct gfar_private *priv = netdev_priv(dev);
    133	struct gfar __iomem *regs = priv->gfargrp[0].regs;
    134	atomic64_t *extra = (atomic64_t *)&priv->extra_stats;
    135
    136	for (i = 0; i < GFAR_EXTRA_STATS_LEN; i++)
    137		buf[i] = atomic64_read(&extra[i]);
    138
    139	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) {
    140		u32 __iomem *rmon = (u32 __iomem *) &regs->rmon;
    141
    142		for (; i < GFAR_STATS_LEN; i++, rmon++)
    143			buf[i] = (u64) gfar_read(rmon);
    144	}
    145}
    146
    147static int gfar_sset_count(struct net_device *dev, int sset)
    148{
    149	struct gfar_private *priv = netdev_priv(dev);
    150
    151	switch (sset) {
    152	case ETH_SS_STATS:
    153		if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON)
    154			return GFAR_STATS_LEN;
    155		else
    156			return GFAR_EXTRA_STATS_LEN;
    157	default:
    158		return -EOPNOTSUPP;
    159	}
    160}
    161
    162/* Fills in the drvinfo structure with some basic info */
    163static void gfar_gdrvinfo(struct net_device *dev,
    164			  struct ethtool_drvinfo *drvinfo)
    165{
    166	strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
    167}
    168
    169/* Return the length of the register structure */
    170static int gfar_reglen(struct net_device *dev)
    171{
    172	return sizeof (struct gfar);
    173}
    174
    175/* Return a dump of the GFAR register space */
    176static void gfar_get_regs(struct net_device *dev, struct ethtool_regs *regs,
    177			  void *regbuf)
    178{
    179	int i;
    180	struct gfar_private *priv = netdev_priv(dev);
    181	u32 __iomem *theregs = (u32 __iomem *) priv->gfargrp[0].regs;
    182	u32 *buf = (u32 *) regbuf;
    183
    184	for (i = 0; i < sizeof (struct gfar) / sizeof (u32); i++)
    185		buf[i] = gfar_read(&theregs[i]);
    186}
    187
    188/* Convert microseconds to ethernet clock ticks, which changes
    189 * depending on what speed the controller is running at */
    190static unsigned int gfar_usecs2ticks(struct gfar_private *priv,
    191				     unsigned int usecs)
    192{
    193	struct net_device *ndev = priv->ndev;
    194	struct phy_device *phydev = ndev->phydev;
    195	unsigned int count;
    196
    197	/* The timer is different, depending on the interface speed */
    198	switch (phydev->speed) {
    199	case SPEED_1000:
    200		count = GFAR_GBIT_TIME;
    201		break;
    202	case SPEED_100:
    203		count = GFAR_100_TIME;
    204		break;
    205	case SPEED_10:
    206	default:
    207		count = GFAR_10_TIME;
    208		break;
    209	}
    210
    211	/* Make sure we return a number greater than 0
    212	 * if usecs > 0 */
    213	return DIV_ROUND_UP(usecs * 1000, count);
    214}
    215
    216/* Convert ethernet clock ticks to microseconds */
    217static unsigned int gfar_ticks2usecs(struct gfar_private *priv,
    218				     unsigned int ticks)
    219{
    220	struct net_device *ndev = priv->ndev;
    221	struct phy_device *phydev = ndev->phydev;
    222	unsigned int count;
    223
    224	/* The timer is different, depending on the interface speed */
    225	switch (phydev->speed) {
    226	case SPEED_1000:
    227		count = GFAR_GBIT_TIME;
    228		break;
    229	case SPEED_100:
    230		count = GFAR_100_TIME;
    231		break;
    232	case SPEED_10:
    233	default:
    234		count = GFAR_10_TIME;
    235		break;
    236	}
    237
    238	/* Make sure we return a number greater than 0 */
    239	/* if ticks is > 0 */
    240	return (ticks * count) / 1000;
    241}
    242
    243/* Get the coalescing parameters, and put them in the cvals
    244 * structure.  */
    245static int gfar_gcoalesce(struct net_device *dev,
    246			  struct ethtool_coalesce *cvals,
    247			  struct kernel_ethtool_coalesce *kernel_coal,
    248			  struct netlink_ext_ack *extack)
    249{
    250	struct gfar_private *priv = netdev_priv(dev);
    251	struct gfar_priv_rx_q *rx_queue = NULL;
    252	struct gfar_priv_tx_q *tx_queue = NULL;
    253	unsigned long rxtime;
    254	unsigned long rxcount;
    255	unsigned long txtime;
    256	unsigned long txcount;
    257
    258	if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_COALESCE))
    259		return -EOPNOTSUPP;
    260
    261	if (!dev->phydev)
    262		return -ENODEV;
    263
    264	rx_queue = priv->rx_queue[0];
    265	tx_queue = priv->tx_queue[0];
    266
    267	rxtime  = get_ictt_value(rx_queue->rxic);
    268	rxcount = get_icft_value(rx_queue->rxic);
    269	txtime  = get_ictt_value(tx_queue->txic);
    270	txcount = get_icft_value(tx_queue->txic);
    271	cvals->rx_coalesce_usecs = gfar_ticks2usecs(priv, rxtime);
    272	cvals->rx_max_coalesced_frames = rxcount;
    273
    274	cvals->tx_coalesce_usecs = gfar_ticks2usecs(priv, txtime);
    275	cvals->tx_max_coalesced_frames = txcount;
    276
    277	return 0;
    278}
    279
    280/* Change the coalescing values.
    281 * Both cvals->*_usecs and cvals->*_frames have to be > 0
    282 * in order for coalescing to be active
    283 */
    284static int gfar_scoalesce(struct net_device *dev,
    285			  struct ethtool_coalesce *cvals,
    286			  struct kernel_ethtool_coalesce *kernel_coal,
    287			  struct netlink_ext_ack *extack)
    288{
    289	struct gfar_private *priv = netdev_priv(dev);
    290	int i, err = 0;
    291
    292	if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_COALESCE))
    293		return -EOPNOTSUPP;
    294
    295	if (!dev->phydev)
    296		return -ENODEV;
    297
    298	/* Check the bounds of the values */
    299	if (cvals->rx_coalesce_usecs > GFAR_MAX_COAL_USECS) {
    300		netdev_info(dev, "Coalescing is limited to %d microseconds\n",
    301			    GFAR_MAX_COAL_USECS);
    302		return -EINVAL;
    303	}
    304
    305	if (cvals->rx_max_coalesced_frames > GFAR_MAX_COAL_FRAMES) {
    306		netdev_info(dev, "Coalescing is limited to %d frames\n",
    307			    GFAR_MAX_COAL_FRAMES);
    308		return -EINVAL;
    309	}
    310
    311	/* Check the bounds of the values */
    312	if (cvals->tx_coalesce_usecs > GFAR_MAX_COAL_USECS) {
    313		netdev_info(dev, "Coalescing is limited to %d microseconds\n",
    314			    GFAR_MAX_COAL_USECS);
    315		return -EINVAL;
    316	}
    317
    318	if (cvals->tx_max_coalesced_frames > GFAR_MAX_COAL_FRAMES) {
    319		netdev_info(dev, "Coalescing is limited to %d frames\n",
    320			    GFAR_MAX_COAL_FRAMES);
    321		return -EINVAL;
    322	}
    323
    324	while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state))
    325		cpu_relax();
    326
    327	/* Set up rx coalescing */
    328	if ((cvals->rx_coalesce_usecs == 0) ||
    329	    (cvals->rx_max_coalesced_frames == 0)) {
    330		for (i = 0; i < priv->num_rx_queues; i++)
    331			priv->rx_queue[i]->rxcoalescing = 0;
    332	} else {
    333		for (i = 0; i < priv->num_rx_queues; i++)
    334			priv->rx_queue[i]->rxcoalescing = 1;
    335	}
    336
    337	for (i = 0; i < priv->num_rx_queues; i++) {
    338		priv->rx_queue[i]->rxic = mk_ic_value(
    339			cvals->rx_max_coalesced_frames,
    340			gfar_usecs2ticks(priv, cvals->rx_coalesce_usecs));
    341	}
    342
    343	/* Set up tx coalescing */
    344	if ((cvals->tx_coalesce_usecs == 0) ||
    345	    (cvals->tx_max_coalesced_frames == 0)) {
    346		for (i = 0; i < priv->num_tx_queues; i++)
    347			priv->tx_queue[i]->txcoalescing = 0;
    348	} else {
    349		for (i = 0; i < priv->num_tx_queues; i++)
    350			priv->tx_queue[i]->txcoalescing = 1;
    351	}
    352
    353	for (i = 0; i < priv->num_tx_queues; i++) {
    354		priv->tx_queue[i]->txic = mk_ic_value(
    355			cvals->tx_max_coalesced_frames,
    356			gfar_usecs2ticks(priv, cvals->tx_coalesce_usecs));
    357	}
    358
    359	if (dev->flags & IFF_UP) {
    360		stop_gfar(dev);
    361		err = startup_gfar(dev);
    362	} else {
    363		gfar_mac_reset(priv);
    364	}
    365
    366	clear_bit_unlock(GFAR_RESETTING, &priv->state);
    367
    368	return err;
    369}
    370
    371/* Fills in rvals with the current ring parameters.  Currently,
    372 * rx, rx_mini, and rx_jumbo rings are the same size, as mini and
    373 * jumbo are ignored by the driver */
    374static void gfar_gringparam(struct net_device *dev,
    375			    struct ethtool_ringparam *rvals,
    376			    struct kernel_ethtool_ringparam *kernel_rvals,
    377			    struct netlink_ext_ack *extack)
    378{
    379	struct gfar_private *priv = netdev_priv(dev);
    380	struct gfar_priv_tx_q *tx_queue = NULL;
    381	struct gfar_priv_rx_q *rx_queue = NULL;
    382
    383	tx_queue = priv->tx_queue[0];
    384	rx_queue = priv->rx_queue[0];
    385
    386	rvals->rx_max_pending = GFAR_RX_MAX_RING_SIZE;
    387	rvals->rx_mini_max_pending = GFAR_RX_MAX_RING_SIZE;
    388	rvals->rx_jumbo_max_pending = GFAR_RX_MAX_RING_SIZE;
    389	rvals->tx_max_pending = GFAR_TX_MAX_RING_SIZE;
    390
    391	/* Values changeable by the user.  The valid values are
    392	 * in the range 1 to the "*_max_pending" counterpart above.
    393	 */
    394	rvals->rx_pending = rx_queue->rx_ring_size;
    395	rvals->rx_mini_pending = rx_queue->rx_ring_size;
    396	rvals->rx_jumbo_pending = rx_queue->rx_ring_size;
    397	rvals->tx_pending = tx_queue->tx_ring_size;
    398}
    399
    400/* Change the current ring parameters, stopping the controller if
    401 * necessary so that we don't mess things up while we're in motion.
    402 */
    403static int gfar_sringparam(struct net_device *dev,
    404			   struct ethtool_ringparam *rvals,
    405			   struct kernel_ethtool_ringparam *kernel_rvals,
    406			   struct netlink_ext_ack *extack)
    407{
    408	struct gfar_private *priv = netdev_priv(dev);
    409	int err = 0, i;
    410
    411	if (rvals->rx_pending > GFAR_RX_MAX_RING_SIZE)
    412		return -EINVAL;
    413
    414	if (!is_power_of_2(rvals->rx_pending)) {
    415		netdev_err(dev, "Ring sizes must be a power of 2\n");
    416		return -EINVAL;
    417	}
    418
    419	if (rvals->tx_pending > GFAR_TX_MAX_RING_SIZE)
    420		return -EINVAL;
    421
    422	if (!is_power_of_2(rvals->tx_pending)) {
    423		netdev_err(dev, "Ring sizes must be a power of 2\n");
    424		return -EINVAL;
    425	}
    426
    427	while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state))
    428		cpu_relax();
    429
    430	if (dev->flags & IFF_UP)
    431		stop_gfar(dev);
    432
    433	/* Change the sizes */
    434	for (i = 0; i < priv->num_rx_queues; i++)
    435		priv->rx_queue[i]->rx_ring_size = rvals->rx_pending;
    436
    437	for (i = 0; i < priv->num_tx_queues; i++)
    438		priv->tx_queue[i]->tx_ring_size = rvals->tx_pending;
    439
    440	/* Rebuild the rings with the new size */
    441	if (dev->flags & IFF_UP)
    442		err = startup_gfar(dev);
    443
    444	clear_bit_unlock(GFAR_RESETTING, &priv->state);
    445
    446	return err;
    447}
    448
    449static void gfar_gpauseparam(struct net_device *dev,
    450			     struct ethtool_pauseparam *epause)
    451{
    452	struct gfar_private *priv = netdev_priv(dev);
    453
    454	epause->autoneg = !!priv->pause_aneg_en;
    455	epause->rx_pause = !!priv->rx_pause_en;
    456	epause->tx_pause = !!priv->tx_pause_en;
    457}
    458
    459static int gfar_spauseparam(struct net_device *dev,
    460			    struct ethtool_pauseparam *epause)
    461{
    462	struct gfar_private *priv = netdev_priv(dev);
    463	struct phy_device *phydev = dev->phydev;
    464	struct gfar __iomem *regs = priv->gfargrp[0].regs;
    465
    466	if (!phydev)
    467		return -ENODEV;
    468
    469	if (!phy_validate_pause(phydev, epause))
    470		return -EINVAL;
    471
    472	priv->rx_pause_en = priv->tx_pause_en = 0;
    473	phy_set_asym_pause(phydev, epause->rx_pause, epause->tx_pause);
    474	if (epause->rx_pause) {
    475		priv->rx_pause_en = 1;
    476
    477		if (epause->tx_pause) {
    478			priv->tx_pause_en = 1;
    479		}
    480	} else if (epause->tx_pause) {
    481		priv->tx_pause_en = 1;
    482	}
    483
    484	if (epause->autoneg)
    485		priv->pause_aneg_en = 1;
    486	else
    487		priv->pause_aneg_en = 0;
    488
    489	if (!epause->autoneg) {
    490		u32 tempval = gfar_read(&regs->maccfg1);
    491
    492		tempval &= ~(MACCFG1_TX_FLOW | MACCFG1_RX_FLOW);
    493
    494		priv->tx_actual_en = 0;
    495		if (priv->tx_pause_en) {
    496			priv->tx_actual_en = 1;
    497			tempval |= MACCFG1_TX_FLOW;
    498		}
    499
    500		if (priv->rx_pause_en)
    501			tempval |= MACCFG1_RX_FLOW;
    502		gfar_write(&regs->maccfg1, tempval);
    503	}
    504
    505	return 0;
    506}
    507
    508int gfar_set_features(struct net_device *dev, netdev_features_t features)
    509{
    510	netdev_features_t changed = dev->features ^ features;
    511	struct gfar_private *priv = netdev_priv(dev);
    512	int err = 0;
    513
    514	if (!(changed & (NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
    515			 NETIF_F_RXCSUM)))
    516		return 0;
    517
    518	while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state))
    519		cpu_relax();
    520
    521	dev->features = features;
    522
    523	if (dev->flags & IFF_UP) {
    524		/* Now we take down the rings to rebuild them */
    525		stop_gfar(dev);
    526		err = startup_gfar(dev);
    527	} else {
    528		gfar_mac_reset(priv);
    529	}
    530
    531	clear_bit_unlock(GFAR_RESETTING, &priv->state);
    532
    533	return err;
    534}
    535
    536static uint32_t gfar_get_msglevel(struct net_device *dev)
    537{
    538	struct gfar_private *priv = netdev_priv(dev);
    539
    540	return priv->msg_enable;
    541}
    542
    543static void gfar_set_msglevel(struct net_device *dev, uint32_t data)
    544{
    545	struct gfar_private *priv = netdev_priv(dev);
    546
    547	priv->msg_enable = data;
    548}
    549
    550#ifdef CONFIG_PM
    551static void gfar_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
    552{
    553	struct gfar_private *priv = netdev_priv(dev);
    554
    555	wol->supported = 0;
    556	wol->wolopts = 0;
    557
    558	if (priv->wol_supported & GFAR_WOL_MAGIC)
    559		wol->supported |= WAKE_MAGIC;
    560
    561	if (priv->wol_supported & GFAR_WOL_FILER_UCAST)
    562		wol->supported |= WAKE_UCAST;
    563
    564	if (priv->wol_opts & GFAR_WOL_MAGIC)
    565		wol->wolopts |= WAKE_MAGIC;
    566
    567	if (priv->wol_opts & GFAR_WOL_FILER_UCAST)
    568		wol->wolopts |= WAKE_UCAST;
    569}
    570
    571static int gfar_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
    572{
    573	struct gfar_private *priv = netdev_priv(dev);
    574	u16 wol_opts = 0;
    575	int err;
    576
    577	if (!priv->wol_supported && wol->wolopts)
    578		return -EINVAL;
    579
    580	if (wol->wolopts & ~(WAKE_MAGIC | WAKE_UCAST))
    581		return -EINVAL;
    582
    583	if (wol->wolopts & WAKE_MAGIC) {
    584		wol_opts |= GFAR_WOL_MAGIC;
    585	} else {
    586		if (wol->wolopts & WAKE_UCAST)
    587			wol_opts |= GFAR_WOL_FILER_UCAST;
    588	}
    589
    590	wol_opts &= priv->wol_supported;
    591	priv->wol_opts = 0;
    592
    593	err = device_set_wakeup_enable(priv->dev, wol_opts);
    594	if (err)
    595		return err;
    596
    597	priv->wol_opts = wol_opts;
    598
    599	return 0;
    600}
    601#endif
    602
    603static void ethflow_to_filer_rules (struct gfar_private *priv, u64 ethflow)
    604{
    605	u32 fcr = 0x0, fpr = FPR_FILER_MASK;
    606
    607	if (ethflow & RXH_L2DA) {
    608		fcr = RQFCR_PID_DAH | RQFCR_CMP_NOMATCH |
    609		      RQFCR_HASH | RQFCR_AND | RQFCR_HASHTBL_0;
    610		priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
    611		priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
    612		gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
    613		priv->cur_filer_idx = priv->cur_filer_idx - 1;
    614
    615		fcr = RQFCR_PID_DAL | RQFCR_CMP_NOMATCH |
    616		      RQFCR_HASH | RQFCR_AND | RQFCR_HASHTBL_0;
    617		priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
    618		priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
    619		gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
    620		priv->cur_filer_idx = priv->cur_filer_idx - 1;
    621	}
    622
    623	if (ethflow & RXH_VLAN) {
    624		fcr = RQFCR_PID_VID | RQFCR_CMP_NOMATCH | RQFCR_HASH |
    625		      RQFCR_AND | RQFCR_HASHTBL_0;
    626		gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
    627		priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
    628		priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
    629		priv->cur_filer_idx = priv->cur_filer_idx - 1;
    630	}
    631
    632	if (ethflow & RXH_IP_SRC) {
    633		fcr = RQFCR_PID_SIA | RQFCR_CMP_NOMATCH | RQFCR_HASH |
    634		      RQFCR_AND | RQFCR_HASHTBL_0;
    635		priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
    636		priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
    637		gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
    638		priv->cur_filer_idx = priv->cur_filer_idx - 1;
    639	}
    640
    641	if (ethflow & (RXH_IP_DST)) {
    642		fcr = RQFCR_PID_DIA | RQFCR_CMP_NOMATCH | RQFCR_HASH |
    643		      RQFCR_AND | RQFCR_HASHTBL_0;
    644		priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
    645		priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
    646		gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
    647		priv->cur_filer_idx = priv->cur_filer_idx - 1;
    648	}
    649
    650	if (ethflow & RXH_L3_PROTO) {
    651		fcr = RQFCR_PID_L4P | RQFCR_CMP_NOMATCH | RQFCR_HASH |
    652		      RQFCR_AND | RQFCR_HASHTBL_0;
    653		priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
    654		priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
    655		gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
    656		priv->cur_filer_idx = priv->cur_filer_idx - 1;
    657	}
    658
    659	if (ethflow & RXH_L4_B_0_1) {
    660		fcr = RQFCR_PID_SPT | RQFCR_CMP_NOMATCH | RQFCR_HASH |
    661		      RQFCR_AND | RQFCR_HASHTBL_0;
    662		priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
    663		priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
    664		gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
    665		priv->cur_filer_idx = priv->cur_filer_idx - 1;
    666	}
    667
    668	if (ethflow & RXH_L4_B_2_3) {
    669		fcr = RQFCR_PID_DPT | RQFCR_CMP_NOMATCH | RQFCR_HASH |
    670		      RQFCR_AND | RQFCR_HASHTBL_0;
    671		priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
    672		priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
    673		gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
    674		priv->cur_filer_idx = priv->cur_filer_idx - 1;
    675	}
    676}
    677
    678static int gfar_ethflow_to_filer_table(struct gfar_private *priv, u64 ethflow,
    679				       u64 class)
    680{
    681	unsigned int cmp_rqfpr;
    682	unsigned int *local_rqfpr;
    683	unsigned int *local_rqfcr;
    684	int i = 0x0, k = 0x0;
    685	int j = MAX_FILER_IDX, l = 0x0;
    686	int ret = 1;
    687
    688	local_rqfpr = kmalloc_array(MAX_FILER_IDX + 1, sizeof(unsigned int),
    689				    GFP_KERNEL);
    690	local_rqfcr = kmalloc_array(MAX_FILER_IDX + 1, sizeof(unsigned int),
    691				    GFP_KERNEL);
    692	if (!local_rqfpr || !local_rqfcr) {
    693		ret = 0;
    694		goto err;
    695	}
    696
    697	switch (class) {
    698	case TCP_V4_FLOW:
    699		cmp_rqfpr = RQFPR_IPV4 |RQFPR_TCP;
    700		break;
    701	case UDP_V4_FLOW:
    702		cmp_rqfpr = RQFPR_IPV4 |RQFPR_UDP;
    703		break;
    704	case TCP_V6_FLOW:
    705		cmp_rqfpr = RQFPR_IPV6 |RQFPR_TCP;
    706		break;
    707	case UDP_V6_FLOW:
    708		cmp_rqfpr = RQFPR_IPV6 |RQFPR_UDP;
    709		break;
    710	default:
    711		netdev_err(priv->ndev,
    712			   "Right now this class is not supported\n");
    713		ret = 0;
    714		goto err;
    715	}
    716
    717	for (i = 0; i < MAX_FILER_IDX + 1; i++) {
    718		local_rqfpr[j] = priv->ftp_rqfpr[i];
    719		local_rqfcr[j] = priv->ftp_rqfcr[i];
    720		j--;
    721		if ((priv->ftp_rqfcr[i] ==
    722		     (RQFCR_PID_PARSE | RQFCR_CLE | RQFCR_AND)) &&
    723		    (priv->ftp_rqfpr[i] == cmp_rqfpr))
    724			break;
    725	}
    726
    727	if (i == MAX_FILER_IDX + 1) {
    728		netdev_err(priv->ndev,
    729			   "No parse rule found, can't create hash rules\n");
    730		ret = 0;
    731		goto err;
    732	}
    733
    734	/* If a match was found, then it begins the starting of a cluster rule
    735	 * if it was already programmed, we need to overwrite these rules
    736	 */
    737	for (l = i+1; l < MAX_FILER_IDX; l++) {
    738		if ((priv->ftp_rqfcr[l] & RQFCR_CLE) &&
    739		    !(priv->ftp_rqfcr[l] & RQFCR_AND)) {
    740			priv->ftp_rqfcr[l] = RQFCR_CLE | RQFCR_CMP_EXACT |
    741					     RQFCR_HASHTBL_0 | RQFCR_PID_MASK;
    742			priv->ftp_rqfpr[l] = FPR_FILER_MASK;
    743			gfar_write_filer(priv, l, priv->ftp_rqfcr[l],
    744					 priv->ftp_rqfpr[l]);
    745			break;
    746		}
    747
    748		if (!(priv->ftp_rqfcr[l] & RQFCR_CLE) &&
    749			(priv->ftp_rqfcr[l] & RQFCR_AND))
    750			continue;
    751		else {
    752			local_rqfpr[j] = priv->ftp_rqfpr[l];
    753			local_rqfcr[j] = priv->ftp_rqfcr[l];
    754			j--;
    755		}
    756	}
    757
    758	priv->cur_filer_idx = l - 1;
    759
    760	/* hash rules */
    761	ethflow_to_filer_rules(priv, ethflow);
    762
    763	/* Write back the popped out rules again */
    764	for (k = j+1; k < MAX_FILER_IDX; k++) {
    765		priv->ftp_rqfpr[priv->cur_filer_idx] = local_rqfpr[k];
    766		priv->ftp_rqfcr[priv->cur_filer_idx] = local_rqfcr[k];
    767		gfar_write_filer(priv, priv->cur_filer_idx,
    768				 local_rqfcr[k], local_rqfpr[k]);
    769		if (!priv->cur_filer_idx)
    770			break;
    771		priv->cur_filer_idx = priv->cur_filer_idx - 1;
    772	}
    773
    774err:
    775	kfree(local_rqfcr);
    776	kfree(local_rqfpr);
    777	return ret;
    778}
    779
    780static int gfar_set_hash_opts(struct gfar_private *priv,
    781			      struct ethtool_rxnfc *cmd)
    782{
    783	/* write the filer rules here */
    784	if (!gfar_ethflow_to_filer_table(priv, cmd->data, cmd->flow_type))
    785		return -EINVAL;
    786
    787	return 0;
    788}
    789
    790static int gfar_check_filer_hardware(struct gfar_private *priv)
    791{
    792	struct gfar __iomem *regs = priv->gfargrp[0].regs;
    793	u32 i;
    794
    795	/* Check if we are in FIFO mode */
    796	i = gfar_read(&regs->ecntrl);
    797	i &= ECNTRL_FIFM;
    798	if (i == ECNTRL_FIFM) {
    799		netdev_notice(priv->ndev, "Interface in FIFO mode\n");
    800		i = gfar_read(&regs->rctrl);
    801		i &= RCTRL_PRSDEP_MASK | RCTRL_PRSFM;
    802		if (i == (RCTRL_PRSDEP_MASK | RCTRL_PRSFM)) {
    803			netdev_info(priv->ndev,
    804				    "Receive Queue Filtering enabled\n");
    805		} else {
    806			netdev_warn(priv->ndev,
    807				    "Receive Queue Filtering disabled\n");
    808			return -EOPNOTSUPP;
    809		}
    810	}
    811	/* Or in standard mode */
    812	else {
    813		i = gfar_read(&regs->rctrl);
    814		i &= RCTRL_PRSDEP_MASK;
    815		if (i == RCTRL_PRSDEP_MASK) {
    816			netdev_info(priv->ndev,
    817				    "Receive Queue Filtering enabled\n");
    818		} else {
    819			netdev_warn(priv->ndev,
    820				    "Receive Queue Filtering disabled\n");
    821			return -EOPNOTSUPP;
    822		}
    823	}
    824
    825	/* Sets the properties for arbitrary filer rule
    826	 * to the first 4 Layer 4 Bytes
    827	 */
    828	gfar_write(&regs->rbifx, 0xC0C1C2C3);
    829	return 0;
    830}
    831
    832/* Write a mask to filer cache */
    833static void gfar_set_mask(u32 mask, struct filer_table *tab)
    834{
    835	tab->fe[tab->index].ctrl = RQFCR_AND | RQFCR_PID_MASK | RQFCR_CMP_EXACT;
    836	tab->fe[tab->index].prop = mask;
    837	tab->index++;
    838}
    839
    840/* Sets parse bits (e.g. IP or TCP) */
    841static void gfar_set_parse_bits(u32 value, u32 mask, struct filer_table *tab)
    842{
    843	gfar_set_mask(mask, tab);
    844	tab->fe[tab->index].ctrl = RQFCR_CMP_EXACT | RQFCR_PID_PARSE |
    845				   RQFCR_AND;
    846	tab->fe[tab->index].prop = value;
    847	tab->index++;
    848}
    849
    850static void gfar_set_general_attribute(u32 value, u32 mask, u32 flag,
    851				       struct filer_table *tab)
    852{
    853	gfar_set_mask(mask, tab);
    854	tab->fe[tab->index].ctrl = RQFCR_CMP_EXACT | RQFCR_AND | flag;
    855	tab->fe[tab->index].prop = value;
    856	tab->index++;
    857}
    858
    859/* For setting a tuple of value and mask of type flag
    860 * Example:
    861 * IP-Src = 10.0.0.0/255.0.0.0
    862 * value: 0x0A000000 mask: FF000000 flag: RQFPR_IPV4
    863 *
    864 * Ethtool gives us a value=0 and mask=~0 for don't care a tuple
    865 * For a don't care mask it gives us a 0
    866 *
    867 * The check if don't care and the mask adjustment if mask=0 is done for VLAN
    868 * and MAC stuff on an upper level (due to missing information on this level).
    869 * For these guys we can discard them if they are value=0 and mask=0.
    870 *
    871 * Further the all masks are one-padded for better hardware efficiency.
    872 */
    873static void gfar_set_attribute(u32 value, u32 mask, u32 flag,
    874			       struct filer_table *tab)
    875{
    876	switch (flag) {
    877		/* 3bit */
    878	case RQFCR_PID_PRI:
    879		if (!(value | mask))
    880			return;
    881		mask |= RQFCR_PID_PRI_MASK;
    882		break;
    883		/* 8bit */
    884	case RQFCR_PID_L4P:
    885	case RQFCR_PID_TOS:
    886		if (!~(mask | RQFCR_PID_L4P_MASK))
    887			return;
    888		if (!mask)
    889			mask = ~0;
    890		else
    891			mask |= RQFCR_PID_L4P_MASK;
    892		break;
    893		/* 12bit */
    894	case RQFCR_PID_VID:
    895		if (!(value | mask))
    896			return;
    897		mask |= RQFCR_PID_VID_MASK;
    898		break;
    899		/* 16bit */
    900	case RQFCR_PID_DPT:
    901	case RQFCR_PID_SPT:
    902	case RQFCR_PID_ETY:
    903		if (!~(mask | RQFCR_PID_PORT_MASK))
    904			return;
    905		if (!mask)
    906			mask = ~0;
    907		else
    908			mask |= RQFCR_PID_PORT_MASK;
    909		break;
    910		/* 24bit */
    911	case RQFCR_PID_DAH:
    912	case RQFCR_PID_DAL:
    913	case RQFCR_PID_SAH:
    914	case RQFCR_PID_SAL:
    915		if (!(value | mask))
    916			return;
    917		mask |= RQFCR_PID_MAC_MASK;
    918		break;
    919		/* for all real 32bit masks */
    920	default:
    921		if (!~mask)
    922			return;
    923		if (!mask)
    924			mask = ~0;
    925		break;
    926	}
    927	gfar_set_general_attribute(value, mask, flag, tab);
    928}
    929
    930/* Translates value and mask for UDP, TCP or SCTP */
    931static void gfar_set_basic_ip(struct ethtool_tcpip4_spec *value,
    932			      struct ethtool_tcpip4_spec *mask,
    933			      struct filer_table *tab)
    934{
    935	gfar_set_attribute(be32_to_cpu(value->ip4src),
    936			   be32_to_cpu(mask->ip4src),
    937			   RQFCR_PID_SIA, tab);
    938	gfar_set_attribute(be32_to_cpu(value->ip4dst),
    939			   be32_to_cpu(mask->ip4dst),
    940			   RQFCR_PID_DIA, tab);
    941	gfar_set_attribute(be16_to_cpu(value->pdst),
    942			   be16_to_cpu(mask->pdst),
    943			   RQFCR_PID_DPT, tab);
    944	gfar_set_attribute(be16_to_cpu(value->psrc),
    945			   be16_to_cpu(mask->psrc),
    946			   RQFCR_PID_SPT, tab);
    947	gfar_set_attribute(value->tos, mask->tos, RQFCR_PID_TOS, tab);
    948}
    949
    950/* Translates value and mask for RAW-IP4 */
    951static void gfar_set_user_ip(struct ethtool_usrip4_spec *value,
    952			     struct ethtool_usrip4_spec *mask,
    953			     struct filer_table *tab)
    954{
    955	gfar_set_attribute(be32_to_cpu(value->ip4src),
    956			   be32_to_cpu(mask->ip4src),
    957			   RQFCR_PID_SIA, tab);
    958	gfar_set_attribute(be32_to_cpu(value->ip4dst),
    959			   be32_to_cpu(mask->ip4dst),
    960			   RQFCR_PID_DIA, tab);
    961	gfar_set_attribute(value->tos, mask->tos, RQFCR_PID_TOS, tab);
    962	gfar_set_attribute(value->proto, mask->proto, RQFCR_PID_L4P, tab);
    963	gfar_set_attribute(be32_to_cpu(value->l4_4_bytes),
    964			   be32_to_cpu(mask->l4_4_bytes),
    965			   RQFCR_PID_ARB, tab);
    966
    967}
    968
    969/* Translates value and mask for ETHER spec */
    970static void gfar_set_ether(struct ethhdr *value, struct ethhdr *mask,
    971			   struct filer_table *tab)
    972{
    973	u32 upper_temp_mask = 0;
    974	u32 lower_temp_mask = 0;
    975
    976	/* Source address */
    977	if (!is_broadcast_ether_addr(mask->h_source)) {
    978		if (is_zero_ether_addr(mask->h_source)) {
    979			upper_temp_mask = 0xFFFFFFFF;
    980			lower_temp_mask = 0xFFFFFFFF;
    981		} else {
    982			upper_temp_mask = mask->h_source[0] << 16 |
    983					  mask->h_source[1] << 8  |
    984					  mask->h_source[2];
    985			lower_temp_mask = mask->h_source[3] << 16 |
    986					  mask->h_source[4] << 8  |
    987					  mask->h_source[5];
    988		}
    989		/* Upper 24bit */
    990		gfar_set_attribute(value->h_source[0] << 16 |
    991				   value->h_source[1] << 8  |
    992				   value->h_source[2],
    993				   upper_temp_mask, RQFCR_PID_SAH, tab);
    994		/* And the same for the lower part */
    995		gfar_set_attribute(value->h_source[3] << 16 |
    996				   value->h_source[4] << 8  |
    997				   value->h_source[5],
    998				   lower_temp_mask, RQFCR_PID_SAL, tab);
    999	}
   1000	/* Destination address */
   1001	if (!is_broadcast_ether_addr(mask->h_dest)) {
   1002		/* Special for destination is limited broadcast */
   1003		if ((is_broadcast_ether_addr(value->h_dest) &&
   1004		    is_zero_ether_addr(mask->h_dest))) {
   1005			gfar_set_parse_bits(RQFPR_EBC, RQFPR_EBC, tab);
   1006		} else {
   1007			if (is_zero_ether_addr(mask->h_dest)) {
   1008				upper_temp_mask = 0xFFFFFFFF;
   1009				lower_temp_mask = 0xFFFFFFFF;
   1010			} else {
   1011				upper_temp_mask = mask->h_dest[0] << 16 |
   1012						  mask->h_dest[1] << 8  |
   1013						  mask->h_dest[2];
   1014				lower_temp_mask = mask->h_dest[3] << 16 |
   1015						  mask->h_dest[4] << 8  |
   1016						  mask->h_dest[5];
   1017			}
   1018
   1019			/* Upper 24bit */
   1020			gfar_set_attribute(value->h_dest[0] << 16 |
   1021					   value->h_dest[1] << 8  |
   1022					   value->h_dest[2],
   1023					   upper_temp_mask, RQFCR_PID_DAH, tab);
   1024			/* And the same for the lower part */
   1025			gfar_set_attribute(value->h_dest[3] << 16 |
   1026					   value->h_dest[4] << 8  |
   1027					   value->h_dest[5],
   1028					   lower_temp_mask, RQFCR_PID_DAL, tab);
   1029		}
   1030	}
   1031
   1032	gfar_set_attribute(be16_to_cpu(value->h_proto),
   1033			   be16_to_cpu(mask->h_proto),
   1034			   RQFCR_PID_ETY, tab);
   1035}
   1036
   1037static inline u32 vlan_tci_vid(struct ethtool_rx_flow_spec *rule)
   1038{
   1039	return be16_to_cpu(rule->h_ext.vlan_tci) & VLAN_VID_MASK;
   1040}
   1041
   1042static inline u32 vlan_tci_vidm(struct ethtool_rx_flow_spec *rule)
   1043{
   1044	return be16_to_cpu(rule->m_ext.vlan_tci) & VLAN_VID_MASK;
   1045}
   1046
   1047static inline u32 vlan_tci_cfi(struct ethtool_rx_flow_spec *rule)
   1048{
   1049	return be16_to_cpu(rule->h_ext.vlan_tci) & VLAN_CFI_MASK;
   1050}
   1051
   1052static inline u32 vlan_tci_cfim(struct ethtool_rx_flow_spec *rule)
   1053{
   1054	return be16_to_cpu(rule->m_ext.vlan_tci) & VLAN_CFI_MASK;
   1055}
   1056
   1057static inline u32 vlan_tci_prio(struct ethtool_rx_flow_spec *rule)
   1058{
   1059	return (be16_to_cpu(rule->h_ext.vlan_tci) & VLAN_PRIO_MASK) >>
   1060		VLAN_PRIO_SHIFT;
   1061}
   1062
   1063static inline u32 vlan_tci_priom(struct ethtool_rx_flow_spec *rule)
   1064{
   1065	return (be16_to_cpu(rule->m_ext.vlan_tci) & VLAN_PRIO_MASK) >>
   1066		VLAN_PRIO_SHIFT;
   1067}
   1068
   1069/* Convert a rule to binary filter format of gianfar */
   1070static int gfar_convert_to_filer(struct ethtool_rx_flow_spec *rule,
   1071				 struct filer_table *tab)
   1072{
   1073	u32 vlan = 0, vlan_mask = 0;
   1074	u32 id = 0, id_mask = 0;
   1075	u32 cfi = 0, cfi_mask = 0;
   1076	u32 prio = 0, prio_mask = 0;
   1077	u32 old_index = tab->index;
   1078
   1079	/* Check if vlan is wanted */
   1080	if ((rule->flow_type & FLOW_EXT) &&
   1081	    (rule->m_ext.vlan_tci != cpu_to_be16(0xFFFF))) {
   1082		if (!rule->m_ext.vlan_tci)
   1083			rule->m_ext.vlan_tci = cpu_to_be16(0xFFFF);
   1084
   1085		vlan = RQFPR_VLN;
   1086		vlan_mask = RQFPR_VLN;
   1087
   1088		/* Separate the fields */
   1089		id = vlan_tci_vid(rule);
   1090		id_mask = vlan_tci_vidm(rule);
   1091		cfi = vlan_tci_cfi(rule);
   1092		cfi_mask = vlan_tci_cfim(rule);
   1093		prio = vlan_tci_prio(rule);
   1094		prio_mask = vlan_tci_priom(rule);
   1095
   1096		if (cfi_mask) {
   1097			if (cfi)
   1098				vlan |= RQFPR_CFI;
   1099			vlan_mask |= RQFPR_CFI;
   1100		}
   1101	}
   1102
   1103	switch (rule->flow_type & ~FLOW_EXT) {
   1104	case TCP_V4_FLOW:
   1105		gfar_set_parse_bits(RQFPR_IPV4 | RQFPR_TCP | vlan,
   1106				    RQFPR_IPV4 | RQFPR_TCP | vlan_mask, tab);
   1107		gfar_set_basic_ip(&rule->h_u.tcp_ip4_spec,
   1108				  &rule->m_u.tcp_ip4_spec, tab);
   1109		break;
   1110	case UDP_V4_FLOW:
   1111		gfar_set_parse_bits(RQFPR_IPV4 | RQFPR_UDP | vlan,
   1112				    RQFPR_IPV4 | RQFPR_UDP | vlan_mask, tab);
   1113		gfar_set_basic_ip(&rule->h_u.udp_ip4_spec,
   1114				  &rule->m_u.udp_ip4_spec, tab);
   1115		break;
   1116	case SCTP_V4_FLOW:
   1117		gfar_set_parse_bits(RQFPR_IPV4 | vlan, RQFPR_IPV4 | vlan_mask,
   1118				    tab);
   1119		gfar_set_attribute(132, 0, RQFCR_PID_L4P, tab);
   1120		gfar_set_basic_ip((struct ethtool_tcpip4_spec *)&rule->h_u,
   1121				  (struct ethtool_tcpip4_spec *)&rule->m_u,
   1122				  tab);
   1123		break;
   1124	case IP_USER_FLOW:
   1125		gfar_set_parse_bits(RQFPR_IPV4 | vlan, RQFPR_IPV4 | vlan_mask,
   1126				    tab);
   1127		gfar_set_user_ip((struct ethtool_usrip4_spec *) &rule->h_u,
   1128				 (struct ethtool_usrip4_spec *) &rule->m_u,
   1129				 tab);
   1130		break;
   1131	case ETHER_FLOW:
   1132		if (vlan)
   1133			gfar_set_parse_bits(vlan, vlan_mask, tab);
   1134		gfar_set_ether((struct ethhdr *) &rule->h_u,
   1135			       (struct ethhdr *) &rule->m_u, tab);
   1136		break;
   1137	default:
   1138		return -1;
   1139	}
   1140
   1141	/* Set the vlan attributes in the end */
   1142	if (vlan) {
   1143		gfar_set_attribute(id, id_mask, RQFCR_PID_VID, tab);
   1144		gfar_set_attribute(prio, prio_mask, RQFCR_PID_PRI, tab);
   1145	}
   1146
   1147	/* If there has been nothing written till now, it must be a default */
   1148	if (tab->index == old_index) {
   1149		gfar_set_mask(0xFFFFFFFF, tab);
   1150		tab->fe[tab->index].ctrl = 0x20;
   1151		tab->fe[tab->index].prop = 0x0;
   1152		tab->index++;
   1153	}
   1154
   1155	/* Remove last AND */
   1156	tab->fe[tab->index - 1].ctrl &= (~RQFCR_AND);
   1157
   1158	/* Specify which queue to use or to drop */
   1159	if (rule->ring_cookie == RX_CLS_FLOW_DISC)
   1160		tab->fe[tab->index - 1].ctrl |= RQFCR_RJE;
   1161	else
   1162		tab->fe[tab->index - 1].ctrl |= (rule->ring_cookie << 10);
   1163
   1164	/* Only big enough entries can be clustered */
   1165	if (tab->index > (old_index + 2)) {
   1166		tab->fe[old_index + 1].ctrl |= RQFCR_CLE;
   1167		tab->fe[tab->index - 1].ctrl |= RQFCR_CLE;
   1168	}
   1169
   1170	/* In rare cases the cache can be full while there is
   1171	 * free space in hw
   1172	 */
   1173	if (tab->index > MAX_FILER_CACHE_IDX - 1)
   1174		return -EBUSY;
   1175
   1176	return 0;
   1177}
   1178
   1179/* Write the bit-pattern from software's buffer to hardware registers */
   1180static int gfar_write_filer_table(struct gfar_private *priv,
   1181				  struct filer_table *tab)
   1182{
   1183	u32 i = 0;
   1184	if (tab->index > MAX_FILER_IDX - 1)
   1185		return -EBUSY;
   1186
   1187	/* Fill regular entries */
   1188	for (; i < MAX_FILER_IDX && (tab->fe[i].ctrl | tab->fe[i].prop); i++)
   1189		gfar_write_filer(priv, i, tab->fe[i].ctrl, tab->fe[i].prop);
   1190	/* Fill the rest with fall-troughs */
   1191	for (; i < MAX_FILER_IDX; i++)
   1192		gfar_write_filer(priv, i, 0x60, 0xFFFFFFFF);
   1193	/* Last entry must be default accept
   1194	 * because that's what people expect
   1195	 */
   1196	gfar_write_filer(priv, i, 0x20, 0x0);
   1197
   1198	return 0;
   1199}
   1200
   1201static int gfar_check_capability(struct ethtool_rx_flow_spec *flow,
   1202				 struct gfar_private *priv)
   1203{
   1204
   1205	if (flow->flow_type & FLOW_EXT)	{
   1206		if (~flow->m_ext.data[0] || ~flow->m_ext.data[1])
   1207			netdev_warn(priv->ndev,
   1208				    "User-specific data not supported!\n");
   1209		if (~flow->m_ext.vlan_etype)
   1210			netdev_warn(priv->ndev,
   1211				    "VLAN-etype not supported!\n");
   1212	}
   1213	if (flow->flow_type == IP_USER_FLOW)
   1214		if (flow->h_u.usr_ip4_spec.ip_ver != ETH_RX_NFC_IP4)
   1215			netdev_warn(priv->ndev,
   1216				    "IP-Version differing from IPv4 not supported!\n");
   1217
   1218	return 0;
   1219}
   1220
   1221static int gfar_process_filer_changes(struct gfar_private *priv)
   1222{
   1223	struct ethtool_flow_spec_container *j;
   1224	struct filer_table *tab;
   1225	s32 ret = 0;
   1226
   1227	/* So index is set to zero, too! */
   1228	tab = kzalloc(sizeof(*tab), GFP_KERNEL);
   1229	if (tab == NULL)
   1230		return -ENOMEM;
   1231
   1232	/* Now convert the existing filer data from flow_spec into
   1233	 * filer tables binary format
   1234	 */
   1235	list_for_each_entry(j, &priv->rx_list.list, list) {
   1236		ret = gfar_convert_to_filer(&j->fs, tab);
   1237		if (ret == -EBUSY) {
   1238			netdev_err(priv->ndev,
   1239				   "Rule not added: No free space!\n");
   1240			goto end;
   1241		}
   1242		if (ret == -1) {
   1243			netdev_err(priv->ndev,
   1244				   "Rule not added: Unsupported Flow-type!\n");
   1245			goto end;
   1246		}
   1247	}
   1248
   1249	/* Write everything to hardware */
   1250	ret = gfar_write_filer_table(priv, tab);
   1251	if (ret == -EBUSY) {
   1252		netdev_err(priv->ndev, "Rule not added: No free space!\n");
   1253		goto end;
   1254	}
   1255
   1256end:
   1257	kfree(tab);
   1258	return ret;
   1259}
   1260
   1261static void gfar_invert_masks(struct ethtool_rx_flow_spec *flow)
   1262{
   1263	u32 i = 0;
   1264
   1265	for (i = 0; i < sizeof(flow->m_u); i++)
   1266		flow->m_u.hdata[i] ^= 0xFF;
   1267
   1268	flow->m_ext.vlan_etype ^= cpu_to_be16(0xFFFF);
   1269	flow->m_ext.vlan_tci ^= cpu_to_be16(0xFFFF);
   1270	flow->m_ext.data[0] ^= cpu_to_be32(~0);
   1271	flow->m_ext.data[1] ^= cpu_to_be32(~0);
   1272}
   1273
   1274static int gfar_add_cls(struct gfar_private *priv,
   1275			struct ethtool_rx_flow_spec *flow)
   1276{
   1277	struct ethtool_flow_spec_container *temp, *comp;
   1278	int ret = 0;
   1279
   1280	temp = kmalloc(sizeof(*temp), GFP_KERNEL);
   1281	if (temp == NULL)
   1282		return -ENOMEM;
   1283	memcpy(&temp->fs, flow, sizeof(temp->fs));
   1284
   1285	gfar_invert_masks(&temp->fs);
   1286	ret = gfar_check_capability(&temp->fs, priv);
   1287	if (ret)
   1288		goto clean_mem;
   1289	/* Link in the new element at the right @location */
   1290	if (list_empty(&priv->rx_list.list)) {
   1291		ret = gfar_check_filer_hardware(priv);
   1292		if (ret != 0)
   1293			goto clean_mem;
   1294		list_add(&temp->list, &priv->rx_list.list);
   1295		goto process;
   1296	} else {
   1297		list_for_each_entry(comp, &priv->rx_list.list, list) {
   1298			if (comp->fs.location > flow->location) {
   1299				list_add_tail(&temp->list, &comp->list);
   1300				goto process;
   1301			}
   1302			if (comp->fs.location == flow->location) {
   1303				netdev_err(priv->ndev,
   1304					   "Rule not added: ID %d not free!\n",
   1305					   flow->location);
   1306				ret = -EBUSY;
   1307				goto clean_mem;
   1308			}
   1309		}
   1310		list_add_tail(&temp->list, &priv->rx_list.list);
   1311	}
   1312
   1313process:
   1314	priv->rx_list.count++;
   1315	ret = gfar_process_filer_changes(priv);
   1316	if (ret)
   1317		goto clean_list;
   1318	return ret;
   1319
   1320clean_list:
   1321	priv->rx_list.count--;
   1322	list_del(&temp->list);
   1323clean_mem:
   1324	kfree(temp);
   1325	return ret;
   1326}
   1327
   1328static int gfar_del_cls(struct gfar_private *priv, u32 loc)
   1329{
   1330	struct ethtool_flow_spec_container *comp;
   1331	u32 ret = -EINVAL;
   1332
   1333	if (list_empty(&priv->rx_list.list))
   1334		return ret;
   1335
   1336	list_for_each_entry(comp, &priv->rx_list.list, list) {
   1337		if (comp->fs.location == loc) {
   1338			list_del(&comp->list);
   1339			kfree(comp);
   1340			priv->rx_list.count--;
   1341			gfar_process_filer_changes(priv);
   1342			ret = 0;
   1343			break;
   1344		}
   1345	}
   1346
   1347	return ret;
   1348}
   1349
   1350static int gfar_get_cls(struct gfar_private *priv, struct ethtool_rxnfc *cmd)
   1351{
   1352	struct ethtool_flow_spec_container *comp;
   1353	u32 ret = -EINVAL;
   1354
   1355	list_for_each_entry(comp, &priv->rx_list.list, list) {
   1356		if (comp->fs.location == cmd->fs.location) {
   1357			memcpy(&cmd->fs, &comp->fs, sizeof(cmd->fs));
   1358			gfar_invert_masks(&cmd->fs);
   1359			ret = 0;
   1360			break;
   1361		}
   1362	}
   1363
   1364	return ret;
   1365}
   1366
   1367static int gfar_get_cls_all(struct gfar_private *priv,
   1368			    struct ethtool_rxnfc *cmd, u32 *rule_locs)
   1369{
   1370	struct ethtool_flow_spec_container *comp;
   1371	u32 i = 0;
   1372
   1373	list_for_each_entry(comp, &priv->rx_list.list, list) {
   1374		if (i == cmd->rule_cnt)
   1375			return -EMSGSIZE;
   1376		rule_locs[i] = comp->fs.location;
   1377		i++;
   1378	}
   1379
   1380	cmd->data = MAX_FILER_IDX;
   1381	cmd->rule_cnt = i;
   1382
   1383	return 0;
   1384}
   1385
   1386static int gfar_set_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
   1387{
   1388	struct gfar_private *priv = netdev_priv(dev);
   1389	int ret = 0;
   1390
   1391	if (test_bit(GFAR_RESETTING, &priv->state))
   1392		return -EBUSY;
   1393
   1394	mutex_lock(&priv->rx_queue_access);
   1395
   1396	switch (cmd->cmd) {
   1397	case ETHTOOL_SRXFH:
   1398		ret = gfar_set_hash_opts(priv, cmd);
   1399		break;
   1400	case ETHTOOL_SRXCLSRLINS:
   1401		if ((cmd->fs.ring_cookie != RX_CLS_FLOW_DISC &&
   1402		     cmd->fs.ring_cookie >= priv->num_rx_queues) ||
   1403		    cmd->fs.location >= MAX_FILER_IDX) {
   1404			ret = -EINVAL;
   1405			break;
   1406		}
   1407		ret = gfar_add_cls(priv, &cmd->fs);
   1408		break;
   1409	case ETHTOOL_SRXCLSRLDEL:
   1410		ret = gfar_del_cls(priv, cmd->fs.location);
   1411		break;
   1412	default:
   1413		ret = -EINVAL;
   1414	}
   1415
   1416	mutex_unlock(&priv->rx_queue_access);
   1417
   1418	return ret;
   1419}
   1420
   1421static int gfar_get_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
   1422			u32 *rule_locs)
   1423{
   1424	struct gfar_private *priv = netdev_priv(dev);
   1425	int ret = 0;
   1426
   1427	switch (cmd->cmd) {
   1428	case ETHTOOL_GRXRINGS:
   1429		cmd->data = priv->num_rx_queues;
   1430		break;
   1431	case ETHTOOL_GRXCLSRLCNT:
   1432		cmd->rule_cnt = priv->rx_list.count;
   1433		break;
   1434	case ETHTOOL_GRXCLSRULE:
   1435		ret = gfar_get_cls(priv, cmd);
   1436		break;
   1437	case ETHTOOL_GRXCLSRLALL:
   1438		ret = gfar_get_cls_all(priv, cmd, rule_locs);
   1439		break;
   1440	default:
   1441		ret = -EINVAL;
   1442		break;
   1443	}
   1444
   1445	return ret;
   1446}
   1447
   1448static int gfar_get_ts_info(struct net_device *dev,
   1449			    struct ethtool_ts_info *info)
   1450{
   1451	struct gfar_private *priv = netdev_priv(dev);
   1452	struct platform_device *ptp_dev;
   1453	struct device_node *ptp_node;
   1454	struct ptp_qoriq *ptp = NULL;
   1455
   1456	info->phc_index = -1;
   1457
   1458	if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)) {
   1459		info->so_timestamping = SOF_TIMESTAMPING_RX_SOFTWARE |
   1460					SOF_TIMESTAMPING_SOFTWARE;
   1461		return 0;
   1462	}
   1463
   1464	ptp_node = of_find_compatible_node(NULL, NULL, "fsl,etsec-ptp");
   1465	if (ptp_node) {
   1466		ptp_dev = of_find_device_by_node(ptp_node);
   1467		of_node_put(ptp_node);
   1468		if (ptp_dev)
   1469			ptp = platform_get_drvdata(ptp_dev);
   1470	}
   1471
   1472	if (ptp)
   1473		info->phc_index = ptp->phc_index;
   1474
   1475	info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE |
   1476				SOF_TIMESTAMPING_RX_HARDWARE |
   1477				SOF_TIMESTAMPING_RAW_HARDWARE;
   1478	info->tx_types = (1 << HWTSTAMP_TX_OFF) |
   1479			 (1 << HWTSTAMP_TX_ON);
   1480	info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
   1481			   (1 << HWTSTAMP_FILTER_ALL);
   1482	return 0;
   1483}
   1484
   1485const struct ethtool_ops gfar_ethtool_ops = {
   1486	.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
   1487				     ETHTOOL_COALESCE_MAX_FRAMES,
   1488	.get_drvinfo = gfar_gdrvinfo,
   1489	.get_regs_len = gfar_reglen,
   1490	.get_regs = gfar_get_regs,
   1491	.get_link = ethtool_op_get_link,
   1492	.get_coalesce = gfar_gcoalesce,
   1493	.set_coalesce = gfar_scoalesce,
   1494	.get_ringparam = gfar_gringparam,
   1495	.set_ringparam = gfar_sringparam,
   1496	.get_pauseparam = gfar_gpauseparam,
   1497	.set_pauseparam = gfar_spauseparam,
   1498	.get_strings = gfar_gstrings,
   1499	.get_sset_count = gfar_sset_count,
   1500	.get_ethtool_stats = gfar_fill_stats,
   1501	.get_msglevel = gfar_get_msglevel,
   1502	.set_msglevel = gfar_set_msglevel,
   1503#ifdef CONFIG_PM
   1504	.get_wol = gfar_get_wol,
   1505	.set_wol = gfar_set_wol,
   1506#endif
   1507	.set_rxnfc = gfar_set_nfc,
   1508	.get_rxnfc = gfar_get_nfc,
   1509	.get_ts_info = gfar_get_ts_info,
   1510	.get_link_ksettings = phy_ethtool_get_link_ksettings,
   1511	.set_link_ksettings = phy_ethtool_set_link_ksettings,
   1512};