cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

bcmsysport.c (77734B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/*
      3 * Broadcom BCM7xxx System Port Ethernet MAC driver
      4 *
      5 * Copyright (C) 2014 Broadcom Corporation
      6 */
      7
      8#define pr_fmt(fmt)	KBUILD_MODNAME ": " fmt
      9
     10#include <linux/init.h>
     11#include <linux/interrupt.h>
     12#include <linux/module.h>
     13#include <linux/kernel.h>
     14#include <linux/netdevice.h>
     15#include <linux/dsa/brcm.h>
     16#include <linux/etherdevice.h>
     17#include <linux/platform_device.h>
     18#include <linux/of.h>
     19#include <linux/of_net.h>
     20#include <linux/of_mdio.h>
     21#include <linux/phy.h>
     22#include <linux/phy_fixed.h>
     23#include <net/dsa.h>
     24#include <linux/clk.h>
     25#include <net/ip.h>
     26#include <net/ipv6.h>
     27
     28#include "bcmsysport.h"
     29
     30/* I/O accessors register helpers */
     31#define BCM_SYSPORT_IO_MACRO(name, offset) \
     32static inline u32 name##_readl(struct bcm_sysport_priv *priv, u32 off)	\
     33{									\
     34	u32 reg = readl_relaxed(priv->base + offset + off);		\
     35	return reg;							\
     36}									\
     37static inline void name##_writel(struct bcm_sysport_priv *priv,		\
     38				  u32 val, u32 off)			\
     39{									\
     40	writel_relaxed(val, priv->base + offset + off);			\
     41}									\
     42
     43BCM_SYSPORT_IO_MACRO(intrl2_0, SYS_PORT_INTRL2_0_OFFSET);
     44BCM_SYSPORT_IO_MACRO(intrl2_1, SYS_PORT_INTRL2_1_OFFSET);
     45BCM_SYSPORT_IO_MACRO(umac, SYS_PORT_UMAC_OFFSET);
     46BCM_SYSPORT_IO_MACRO(gib, SYS_PORT_GIB_OFFSET);
     47BCM_SYSPORT_IO_MACRO(tdma, SYS_PORT_TDMA_OFFSET);
     48BCM_SYSPORT_IO_MACRO(rxchk, SYS_PORT_RXCHK_OFFSET);
     49BCM_SYSPORT_IO_MACRO(txchk, SYS_PORT_TXCHK_OFFSET);
     50BCM_SYSPORT_IO_MACRO(rbuf, SYS_PORT_RBUF_OFFSET);
     51BCM_SYSPORT_IO_MACRO(tbuf, SYS_PORT_TBUF_OFFSET);
     52BCM_SYSPORT_IO_MACRO(topctrl, SYS_PORT_TOPCTRL_OFFSET);
     53
     54/* On SYSTEMPORT Lite, any register after RDMA_STATUS has the exact
     55 * same layout, except it has been moved by 4 bytes up, *sigh*
     56 */
     57static inline u32 rdma_readl(struct bcm_sysport_priv *priv, u32 off)
     58{
     59	if (priv->is_lite && off >= RDMA_STATUS)
     60		off += 4;
     61	return readl_relaxed(priv->base + SYS_PORT_RDMA_OFFSET + off);
     62}
     63
     64static inline void rdma_writel(struct bcm_sysport_priv *priv, u32 val, u32 off)
     65{
     66	if (priv->is_lite && off >= RDMA_STATUS)
     67		off += 4;
     68	writel_relaxed(val, priv->base + SYS_PORT_RDMA_OFFSET + off);
     69}
     70
     71static inline u32 tdma_control_bit(struct bcm_sysport_priv *priv, u32 bit)
     72{
     73	if (!priv->is_lite) {
     74		return BIT(bit);
     75	} else {
     76		if (bit >= ACB_ALGO)
     77			return BIT(bit + 1);
     78		else
     79			return BIT(bit);
     80	}
     81}
     82
     83/* L2-interrupt masking/unmasking helpers, does automatic saving of the applied
     84 * mask in a software copy to avoid CPU_MASK_STATUS reads in hot-paths.
     85  */
     86#define BCM_SYSPORT_INTR_L2(which)	\
     87static inline void intrl2_##which##_mask_clear(struct bcm_sysport_priv *priv, \
     88						u32 mask)		\
     89{									\
     90	priv->irq##which##_mask &= ~(mask);				\
     91	intrl2_##which##_writel(priv, mask, INTRL2_CPU_MASK_CLEAR);	\
     92}									\
     93static inline void intrl2_##which##_mask_set(struct bcm_sysport_priv *priv, \
     94						u32 mask)		\
     95{									\
     96	intrl2_## which##_writel(priv, mask, INTRL2_CPU_MASK_SET);	\
     97	priv->irq##which##_mask |= (mask);				\
     98}									\
     99
    100BCM_SYSPORT_INTR_L2(0)
    101BCM_SYSPORT_INTR_L2(1)
    102
    103/* Register accesses to GISB/RBUS registers are expensive (few hundred
    104 * nanoseconds), so keep the check for 64-bits explicit here to save
    105 * one register write per-packet on 32-bits platforms.
    106 */
    107static inline void dma_desc_set_addr(struct bcm_sysport_priv *priv,
    108				     void __iomem *d,
    109				     dma_addr_t addr)
    110{
    111#ifdef CONFIG_PHYS_ADDR_T_64BIT
    112	writel_relaxed(upper_32_bits(addr) & DESC_ADDR_HI_MASK,
    113		     d + DESC_ADDR_HI_STATUS_LEN);
    114#endif
    115	writel_relaxed(lower_32_bits(addr), d + DESC_ADDR_LO);
    116}
    117
    118/* Ethtool operations */
    119static void bcm_sysport_set_rx_csum(struct net_device *dev,
    120				    netdev_features_t wanted)
    121{
    122	struct bcm_sysport_priv *priv = netdev_priv(dev);
    123	u32 reg;
    124
    125	priv->rx_chk_en = !!(wanted & NETIF_F_RXCSUM);
    126	reg = rxchk_readl(priv, RXCHK_CONTROL);
    127	/* Clear L2 header checks, which would prevent BPDUs
    128	 * from being received.
    129	 */
    130	reg &= ~RXCHK_L2_HDR_DIS;
    131	if (priv->rx_chk_en)
    132		reg |= RXCHK_EN;
    133	else
    134		reg &= ~RXCHK_EN;
    135
    136	/* If UniMAC forwards CRC, we need to skip over it to get
    137	 * a valid CHK bit to be set in the per-packet status word
    138	 */
    139	if (priv->rx_chk_en && priv->crc_fwd)
    140		reg |= RXCHK_SKIP_FCS;
    141	else
    142		reg &= ~RXCHK_SKIP_FCS;
    143
    144	/* If Broadcom tags are enabled (e.g: using a switch), make
    145	 * sure we tell the RXCHK hardware to expect a 4-bytes Broadcom
    146	 * tag after the Ethernet MAC Source Address.
    147	 */
    148	if (netdev_uses_dsa(dev))
    149		reg |= RXCHK_BRCM_TAG_EN;
    150	else
    151		reg &= ~RXCHK_BRCM_TAG_EN;
    152
    153	rxchk_writel(priv, reg, RXCHK_CONTROL);
    154}
    155
    156static void bcm_sysport_set_tx_csum(struct net_device *dev,
    157				    netdev_features_t wanted)
    158{
    159	struct bcm_sysport_priv *priv = netdev_priv(dev);
    160	u32 reg;
    161
    162	/* Hardware transmit checksum requires us to enable the Transmit status
    163	 * block prepended to the packet contents
    164	 */
    165	priv->tsb_en = !!(wanted & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
    166				    NETIF_F_HW_VLAN_CTAG_TX));
    167	reg = tdma_readl(priv, TDMA_CONTROL);
    168	if (priv->tsb_en)
    169		reg |= tdma_control_bit(priv, TSB_EN);
    170	else
    171		reg &= ~tdma_control_bit(priv, TSB_EN);
    172	/* Indicating that software inserts Broadcom tags is needed for the TX
    173	 * checksum to be computed correctly when using VLAN HW acceleration,
    174	 * else it has no effect, so it can always be turned on.
    175	 */
    176	if (netdev_uses_dsa(dev))
    177		reg |= tdma_control_bit(priv, SW_BRCM_TAG);
    178	else
    179		reg &= ~tdma_control_bit(priv, SW_BRCM_TAG);
    180	tdma_writel(priv, reg, TDMA_CONTROL);
    181
    182	/* Default TPID is ETH_P_8021AD, change to ETH_P_8021Q */
    183	if (wanted & NETIF_F_HW_VLAN_CTAG_TX)
    184		tdma_writel(priv, ETH_P_8021Q, TDMA_TPID);
    185}
    186
    187static int bcm_sysport_set_features(struct net_device *dev,
    188				    netdev_features_t features)
    189{
    190	struct bcm_sysport_priv *priv = netdev_priv(dev);
    191	int ret;
    192
    193	ret = clk_prepare_enable(priv->clk);
    194	if (ret)
    195		return ret;
    196
    197	/* Read CRC forward */
    198	if (!priv->is_lite)
    199		priv->crc_fwd = !!(umac_readl(priv, UMAC_CMD) & CMD_CRC_FWD);
    200	else
    201		priv->crc_fwd = !((gib_readl(priv, GIB_CONTROL) &
    202				  GIB_FCS_STRIP) >> GIB_FCS_STRIP_SHIFT);
    203
    204	bcm_sysport_set_rx_csum(dev, features);
    205	bcm_sysport_set_tx_csum(dev, features);
    206
    207	clk_disable_unprepare(priv->clk);
    208
    209	return 0;
    210}
    211
    212/* Hardware counters must be kept in sync because the order/offset
    213 * is important here (order in structure declaration = order in hardware)
    214 */
    215static const struct bcm_sysport_stats bcm_sysport_gstrings_stats[] = {
    216	/* general stats */
    217	STAT_NETDEV64(rx_packets),
    218	STAT_NETDEV64(tx_packets),
    219	STAT_NETDEV64(rx_bytes),
    220	STAT_NETDEV64(tx_bytes),
    221	STAT_NETDEV(rx_errors),
    222	STAT_NETDEV(tx_errors),
    223	STAT_NETDEV(rx_dropped),
    224	STAT_NETDEV(tx_dropped),
    225	STAT_NETDEV(multicast),
    226	/* UniMAC RSV counters */
    227	STAT_MIB_RX("rx_64_octets", mib.rx.pkt_cnt.cnt_64),
    228	STAT_MIB_RX("rx_65_127_oct", mib.rx.pkt_cnt.cnt_127),
    229	STAT_MIB_RX("rx_128_255_oct", mib.rx.pkt_cnt.cnt_255),
    230	STAT_MIB_RX("rx_256_511_oct", mib.rx.pkt_cnt.cnt_511),
    231	STAT_MIB_RX("rx_512_1023_oct", mib.rx.pkt_cnt.cnt_1023),
    232	STAT_MIB_RX("rx_1024_1518_oct", mib.rx.pkt_cnt.cnt_1518),
    233	STAT_MIB_RX("rx_vlan_1519_1522_oct", mib.rx.pkt_cnt.cnt_mgv),
    234	STAT_MIB_RX("rx_1522_2047_oct", mib.rx.pkt_cnt.cnt_2047),
    235	STAT_MIB_RX("rx_2048_4095_oct", mib.rx.pkt_cnt.cnt_4095),
    236	STAT_MIB_RX("rx_4096_9216_oct", mib.rx.pkt_cnt.cnt_9216),
    237	STAT_MIB_RX("rx_pkts", mib.rx.pkt),
    238	STAT_MIB_RX("rx_bytes", mib.rx.bytes),
    239	STAT_MIB_RX("rx_multicast", mib.rx.mca),
    240	STAT_MIB_RX("rx_broadcast", mib.rx.bca),
    241	STAT_MIB_RX("rx_fcs", mib.rx.fcs),
    242	STAT_MIB_RX("rx_control", mib.rx.cf),
    243	STAT_MIB_RX("rx_pause", mib.rx.pf),
    244	STAT_MIB_RX("rx_unknown", mib.rx.uo),
    245	STAT_MIB_RX("rx_align", mib.rx.aln),
    246	STAT_MIB_RX("rx_outrange", mib.rx.flr),
    247	STAT_MIB_RX("rx_code", mib.rx.cde),
    248	STAT_MIB_RX("rx_carrier", mib.rx.fcr),
    249	STAT_MIB_RX("rx_oversize", mib.rx.ovr),
    250	STAT_MIB_RX("rx_jabber", mib.rx.jbr),
    251	STAT_MIB_RX("rx_mtu_err", mib.rx.mtue),
    252	STAT_MIB_RX("rx_good_pkts", mib.rx.pok),
    253	STAT_MIB_RX("rx_unicast", mib.rx.uc),
    254	STAT_MIB_RX("rx_ppp", mib.rx.ppp),
    255	STAT_MIB_RX("rx_crc", mib.rx.rcrc),
    256	/* UniMAC TSV counters */
    257	STAT_MIB_TX("tx_64_octets", mib.tx.pkt_cnt.cnt_64),
    258	STAT_MIB_TX("tx_65_127_oct", mib.tx.pkt_cnt.cnt_127),
    259	STAT_MIB_TX("tx_128_255_oct", mib.tx.pkt_cnt.cnt_255),
    260	STAT_MIB_TX("tx_256_511_oct", mib.tx.pkt_cnt.cnt_511),
    261	STAT_MIB_TX("tx_512_1023_oct", mib.tx.pkt_cnt.cnt_1023),
    262	STAT_MIB_TX("tx_1024_1518_oct", mib.tx.pkt_cnt.cnt_1518),
    263	STAT_MIB_TX("tx_vlan_1519_1522_oct", mib.tx.pkt_cnt.cnt_mgv),
    264	STAT_MIB_TX("tx_1522_2047_oct", mib.tx.pkt_cnt.cnt_2047),
    265	STAT_MIB_TX("tx_2048_4095_oct", mib.tx.pkt_cnt.cnt_4095),
    266	STAT_MIB_TX("tx_4096_9216_oct", mib.tx.pkt_cnt.cnt_9216),
    267	STAT_MIB_TX("tx_pkts", mib.tx.pkts),
    268	STAT_MIB_TX("tx_multicast", mib.tx.mca),
    269	STAT_MIB_TX("tx_broadcast", mib.tx.bca),
    270	STAT_MIB_TX("tx_pause", mib.tx.pf),
    271	STAT_MIB_TX("tx_control", mib.tx.cf),
    272	STAT_MIB_TX("tx_fcs_err", mib.tx.fcs),
    273	STAT_MIB_TX("tx_oversize", mib.tx.ovr),
    274	STAT_MIB_TX("tx_defer", mib.tx.drf),
    275	STAT_MIB_TX("tx_excess_defer", mib.tx.edf),
    276	STAT_MIB_TX("tx_single_col", mib.tx.scl),
    277	STAT_MIB_TX("tx_multi_col", mib.tx.mcl),
    278	STAT_MIB_TX("tx_late_col", mib.tx.lcl),
    279	STAT_MIB_TX("tx_excess_col", mib.tx.ecl),
    280	STAT_MIB_TX("tx_frags", mib.tx.frg),
    281	STAT_MIB_TX("tx_total_col", mib.tx.ncl),
    282	STAT_MIB_TX("tx_jabber", mib.tx.jbr),
    283	STAT_MIB_TX("tx_bytes", mib.tx.bytes),
    284	STAT_MIB_TX("tx_good_pkts", mib.tx.pok),
    285	STAT_MIB_TX("tx_unicast", mib.tx.uc),
    286	/* UniMAC RUNT counters */
    287	STAT_RUNT("rx_runt_pkts", mib.rx_runt_cnt),
    288	STAT_RUNT("rx_runt_valid_fcs", mib.rx_runt_fcs),
    289	STAT_RUNT("rx_runt_inval_fcs_align", mib.rx_runt_fcs_align),
    290	STAT_RUNT("rx_runt_bytes", mib.rx_runt_bytes),
    291	/* RXCHK misc statistics */
    292	STAT_RXCHK("rxchk_bad_csum", mib.rxchk_bad_csum, RXCHK_BAD_CSUM_CNTR),
    293	STAT_RXCHK("rxchk_other_pkt_disc", mib.rxchk_other_pkt_disc,
    294		   RXCHK_OTHER_DISC_CNTR),
    295	/* RBUF misc statistics */
    296	STAT_RBUF("rbuf_ovflow_cnt", mib.rbuf_ovflow_cnt, RBUF_OVFL_DISC_CNTR),
    297	STAT_RBUF("rbuf_err_cnt", mib.rbuf_err_cnt, RBUF_ERR_PKT_CNTR),
    298	STAT_MIB_SOFT("alloc_rx_buff_failed", mib.alloc_rx_buff_failed),
    299	STAT_MIB_SOFT("rx_dma_failed", mib.rx_dma_failed),
    300	STAT_MIB_SOFT("tx_dma_failed", mib.tx_dma_failed),
    301	STAT_MIB_SOFT("tx_realloc_tsb", mib.tx_realloc_tsb),
    302	STAT_MIB_SOFT("tx_realloc_tsb_failed", mib.tx_realloc_tsb_failed),
    303	/* Per TX-queue statistics are dynamically appended */
    304};
    305
    306#define BCM_SYSPORT_STATS_LEN	ARRAY_SIZE(bcm_sysport_gstrings_stats)
    307
    308static void bcm_sysport_get_drvinfo(struct net_device *dev,
    309				    struct ethtool_drvinfo *info)
    310{
    311	strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
    312	strlcpy(info->bus_info, "platform", sizeof(info->bus_info));
    313}
    314
    315static u32 bcm_sysport_get_msglvl(struct net_device *dev)
    316{
    317	struct bcm_sysport_priv *priv = netdev_priv(dev);
    318
    319	return priv->msg_enable;
    320}
    321
    322static void bcm_sysport_set_msglvl(struct net_device *dev, u32 enable)
    323{
    324	struct bcm_sysport_priv *priv = netdev_priv(dev);
    325
    326	priv->msg_enable = enable;
    327}
    328
    329static inline bool bcm_sysport_lite_stat_valid(enum bcm_sysport_stat_type type)
    330{
    331	switch (type) {
    332	case BCM_SYSPORT_STAT_NETDEV:
    333	case BCM_SYSPORT_STAT_NETDEV64:
    334	case BCM_SYSPORT_STAT_RXCHK:
    335	case BCM_SYSPORT_STAT_RBUF:
    336	case BCM_SYSPORT_STAT_SOFT:
    337		return true;
    338	default:
    339		return false;
    340	}
    341}
    342
    343static int bcm_sysport_get_sset_count(struct net_device *dev, int string_set)
    344{
    345	struct bcm_sysport_priv *priv = netdev_priv(dev);
    346	const struct bcm_sysport_stats *s;
    347	unsigned int i, j;
    348
    349	switch (string_set) {
    350	case ETH_SS_STATS:
    351		for (i = 0, j = 0; i < BCM_SYSPORT_STATS_LEN; i++) {
    352			s = &bcm_sysport_gstrings_stats[i];
    353			if (priv->is_lite &&
    354			    !bcm_sysport_lite_stat_valid(s->type))
    355				continue;
    356			j++;
    357		}
    358		/* Include per-queue statistics */
    359		return j + dev->num_tx_queues * NUM_SYSPORT_TXQ_STAT;
    360	default:
    361		return -EOPNOTSUPP;
    362	}
    363}
    364
    365static void bcm_sysport_get_strings(struct net_device *dev,
    366				    u32 stringset, u8 *data)
    367{
    368	struct bcm_sysport_priv *priv = netdev_priv(dev);
    369	const struct bcm_sysport_stats *s;
    370	char buf[128];
    371	int i, j;
    372
    373	switch (stringset) {
    374	case ETH_SS_STATS:
    375		for (i = 0, j = 0; i < BCM_SYSPORT_STATS_LEN; i++) {
    376			s = &bcm_sysport_gstrings_stats[i];
    377			if (priv->is_lite &&
    378			    !bcm_sysport_lite_stat_valid(s->type))
    379				continue;
    380
    381			memcpy(data + j * ETH_GSTRING_LEN, s->stat_string,
    382			       ETH_GSTRING_LEN);
    383			j++;
    384		}
    385
    386		for (i = 0; i < dev->num_tx_queues; i++) {
    387			snprintf(buf, sizeof(buf), "txq%d_packets", i);
    388			memcpy(data + j * ETH_GSTRING_LEN, buf,
    389			       ETH_GSTRING_LEN);
    390			j++;
    391
    392			snprintf(buf, sizeof(buf), "txq%d_bytes", i);
    393			memcpy(data + j * ETH_GSTRING_LEN, buf,
    394			       ETH_GSTRING_LEN);
    395			j++;
    396		}
    397		break;
    398	default:
    399		break;
    400	}
    401}
    402
    403static void bcm_sysport_update_mib_counters(struct bcm_sysport_priv *priv)
    404{
    405	int i, j = 0;
    406
    407	for (i = 0; i < BCM_SYSPORT_STATS_LEN; i++) {
    408		const struct bcm_sysport_stats *s;
    409		u8 offset = 0;
    410		u32 val = 0;
    411		char *p;
    412
    413		s = &bcm_sysport_gstrings_stats[i];
    414		switch (s->type) {
    415		case BCM_SYSPORT_STAT_NETDEV:
    416		case BCM_SYSPORT_STAT_NETDEV64:
    417		case BCM_SYSPORT_STAT_SOFT:
    418			continue;
    419		case BCM_SYSPORT_STAT_MIB_RX:
    420		case BCM_SYSPORT_STAT_MIB_TX:
    421		case BCM_SYSPORT_STAT_RUNT:
    422			if (priv->is_lite)
    423				continue;
    424
    425			if (s->type != BCM_SYSPORT_STAT_MIB_RX)
    426				offset = UMAC_MIB_STAT_OFFSET;
    427			val = umac_readl(priv, UMAC_MIB_START + j + offset);
    428			break;
    429		case BCM_SYSPORT_STAT_RXCHK:
    430			val = rxchk_readl(priv, s->reg_offset);
    431			if (val == ~0)
    432				rxchk_writel(priv, 0, s->reg_offset);
    433			break;
    434		case BCM_SYSPORT_STAT_RBUF:
    435			val = rbuf_readl(priv, s->reg_offset);
    436			if (val == ~0)
    437				rbuf_writel(priv, 0, s->reg_offset);
    438			break;
    439		}
    440
    441		j += s->stat_sizeof;
    442		p = (char *)priv + s->stat_offset;
    443		*(u32 *)p = val;
    444	}
    445
    446	netif_dbg(priv, hw, priv->netdev, "updated MIB counters\n");
    447}
    448
    449static void bcm_sysport_update_tx_stats(struct bcm_sysport_priv *priv,
    450					u64 *tx_bytes, u64 *tx_packets)
    451{
    452	struct bcm_sysport_tx_ring *ring;
    453	u64 bytes = 0, packets = 0;
    454	unsigned int start;
    455	unsigned int q;
    456
    457	for (q = 0; q < priv->netdev->num_tx_queues; q++) {
    458		ring = &priv->tx_rings[q];
    459		do {
    460			start = u64_stats_fetch_begin_irq(&priv->syncp);
    461			bytes = ring->bytes;
    462			packets = ring->packets;
    463		} while (u64_stats_fetch_retry_irq(&priv->syncp, start));
    464
    465		*tx_bytes += bytes;
    466		*tx_packets += packets;
    467	}
    468}
    469
    470static void bcm_sysport_get_stats(struct net_device *dev,
    471				  struct ethtool_stats *stats, u64 *data)
    472{
    473	struct bcm_sysport_priv *priv = netdev_priv(dev);
    474	struct bcm_sysport_stats64 *stats64 = &priv->stats64;
    475	struct u64_stats_sync *syncp = &priv->syncp;
    476	struct bcm_sysport_tx_ring *ring;
    477	u64 tx_bytes = 0, tx_packets = 0;
    478	unsigned int start;
    479	int i, j;
    480
    481	if (netif_running(dev)) {
    482		bcm_sysport_update_mib_counters(priv);
    483		bcm_sysport_update_tx_stats(priv, &tx_bytes, &tx_packets);
    484		stats64->tx_bytes = tx_bytes;
    485		stats64->tx_packets = tx_packets;
    486	}
    487
    488	for (i =  0, j = 0; i < BCM_SYSPORT_STATS_LEN; i++) {
    489		const struct bcm_sysport_stats *s;
    490		char *p;
    491
    492		s = &bcm_sysport_gstrings_stats[i];
    493		if (s->type == BCM_SYSPORT_STAT_NETDEV)
    494			p = (char *)&dev->stats;
    495		else if (s->type == BCM_SYSPORT_STAT_NETDEV64)
    496			p = (char *)stats64;
    497		else
    498			p = (char *)priv;
    499
    500		if (priv->is_lite && !bcm_sysport_lite_stat_valid(s->type))
    501			continue;
    502		p += s->stat_offset;
    503
    504		if (s->stat_sizeof == sizeof(u64) &&
    505		    s->type == BCM_SYSPORT_STAT_NETDEV64) {
    506			do {
    507				start = u64_stats_fetch_begin_irq(syncp);
    508				data[i] = *(u64 *)p;
    509			} while (u64_stats_fetch_retry_irq(syncp, start));
    510		} else
    511			data[i] = *(u32 *)p;
    512		j++;
    513	}
    514
    515	/* For SYSTEMPORT Lite since we have holes in our statistics, j would
    516	 * be equal to BCM_SYSPORT_STATS_LEN at the end of the loop, but it
    517	 * needs to point to how many total statistics we have minus the
    518	 * number of per TX queue statistics
    519	 */
    520	j = bcm_sysport_get_sset_count(dev, ETH_SS_STATS) -
    521	    dev->num_tx_queues * NUM_SYSPORT_TXQ_STAT;
    522
    523	for (i = 0; i < dev->num_tx_queues; i++) {
    524		ring = &priv->tx_rings[i];
    525		data[j] = ring->packets;
    526		j++;
    527		data[j] = ring->bytes;
    528		j++;
    529	}
    530}
    531
    532static void bcm_sysport_get_wol(struct net_device *dev,
    533				struct ethtool_wolinfo *wol)
    534{
    535	struct bcm_sysport_priv *priv = netdev_priv(dev);
    536
    537	wol->supported = WAKE_MAGIC | WAKE_MAGICSECURE | WAKE_FILTER;
    538	wol->wolopts = priv->wolopts;
    539
    540	if (!(priv->wolopts & WAKE_MAGICSECURE))
    541		return;
    542
    543	memcpy(wol->sopass, priv->sopass, sizeof(priv->sopass));
    544}
    545
    546static int bcm_sysport_set_wol(struct net_device *dev,
    547			       struct ethtool_wolinfo *wol)
    548{
    549	struct bcm_sysport_priv *priv = netdev_priv(dev);
    550	struct device *kdev = &priv->pdev->dev;
    551	u32 supported = WAKE_MAGIC | WAKE_MAGICSECURE | WAKE_FILTER;
    552
    553	if (!device_can_wakeup(kdev))
    554		return -ENOTSUPP;
    555
    556	if (wol->wolopts & ~supported)
    557		return -EINVAL;
    558
    559	if (wol->wolopts & WAKE_MAGICSECURE)
    560		memcpy(priv->sopass, wol->sopass, sizeof(priv->sopass));
    561
    562	/* Flag the device and relevant IRQ as wakeup capable */
    563	if (wol->wolopts) {
    564		device_set_wakeup_enable(kdev, 1);
    565		if (priv->wol_irq_disabled)
    566			enable_irq_wake(priv->wol_irq);
    567		priv->wol_irq_disabled = 0;
    568	} else {
    569		device_set_wakeup_enable(kdev, 0);
    570		/* Avoid unbalanced disable_irq_wake calls */
    571		if (!priv->wol_irq_disabled)
    572			disable_irq_wake(priv->wol_irq);
    573		priv->wol_irq_disabled = 1;
    574	}
    575
    576	priv->wolopts = wol->wolopts;
    577
    578	return 0;
    579}
    580
    581static void bcm_sysport_set_rx_coalesce(struct bcm_sysport_priv *priv,
    582					u32 usecs, u32 pkts)
    583{
    584	u32 reg;
    585
    586	reg = rdma_readl(priv, RDMA_MBDONE_INTR);
    587	reg &= ~(RDMA_INTR_THRESH_MASK |
    588		 RDMA_TIMEOUT_MASK << RDMA_TIMEOUT_SHIFT);
    589	reg |= pkts;
    590	reg |= DIV_ROUND_UP(usecs * 1000, 8192) << RDMA_TIMEOUT_SHIFT;
    591	rdma_writel(priv, reg, RDMA_MBDONE_INTR);
    592}
    593
    594static void bcm_sysport_set_tx_coalesce(struct bcm_sysport_tx_ring *ring,
    595					struct ethtool_coalesce *ec)
    596{
    597	struct bcm_sysport_priv *priv = ring->priv;
    598	u32 reg;
    599
    600	reg = tdma_readl(priv, TDMA_DESC_RING_INTR_CONTROL(ring->index));
    601	reg &= ~(RING_INTR_THRESH_MASK |
    602		 RING_TIMEOUT_MASK << RING_TIMEOUT_SHIFT);
    603	reg |= ec->tx_max_coalesced_frames;
    604	reg |= DIV_ROUND_UP(ec->tx_coalesce_usecs * 1000, 8192) <<
    605			    RING_TIMEOUT_SHIFT;
    606	tdma_writel(priv, reg, TDMA_DESC_RING_INTR_CONTROL(ring->index));
    607}
    608
    609static int bcm_sysport_get_coalesce(struct net_device *dev,
    610				    struct ethtool_coalesce *ec,
    611				    struct kernel_ethtool_coalesce *kernel_coal,
    612				    struct netlink_ext_ack *extack)
    613{
    614	struct bcm_sysport_priv *priv = netdev_priv(dev);
    615	u32 reg;
    616
    617	reg = tdma_readl(priv, TDMA_DESC_RING_INTR_CONTROL(0));
    618
    619	ec->tx_coalesce_usecs = (reg >> RING_TIMEOUT_SHIFT) * 8192 / 1000;
    620	ec->tx_max_coalesced_frames = reg & RING_INTR_THRESH_MASK;
    621
    622	reg = rdma_readl(priv, RDMA_MBDONE_INTR);
    623
    624	ec->rx_coalesce_usecs = (reg >> RDMA_TIMEOUT_SHIFT) * 8192 / 1000;
    625	ec->rx_max_coalesced_frames = reg & RDMA_INTR_THRESH_MASK;
    626	ec->use_adaptive_rx_coalesce = priv->dim.use_dim;
    627
    628	return 0;
    629}
    630
    631static int bcm_sysport_set_coalesce(struct net_device *dev,
    632				    struct ethtool_coalesce *ec,
    633				    struct kernel_ethtool_coalesce *kernel_coal,
    634				    struct netlink_ext_ack *extack)
    635{
    636	struct bcm_sysport_priv *priv = netdev_priv(dev);
    637	struct dim_cq_moder moder;
    638	u32 usecs, pkts;
    639	unsigned int i;
    640
    641	/* Base system clock is 125Mhz, DMA timeout is this reference clock
    642	 * divided by 1024, which yield roughly 8.192 us, our maximum value has
    643	 * to fit in the RING_TIMEOUT_MASK (16 bits).
    644	 */
    645	if (ec->tx_max_coalesced_frames > RING_INTR_THRESH_MASK ||
    646	    ec->tx_coalesce_usecs > (RING_TIMEOUT_MASK * 8) + 1 ||
    647	    ec->rx_max_coalesced_frames > RDMA_INTR_THRESH_MASK ||
    648	    ec->rx_coalesce_usecs > (RDMA_TIMEOUT_MASK * 8) + 1)
    649		return -EINVAL;
    650
    651	if ((ec->tx_coalesce_usecs == 0 && ec->tx_max_coalesced_frames == 0) ||
    652	    (ec->rx_coalesce_usecs == 0 && ec->rx_max_coalesced_frames == 0))
    653		return -EINVAL;
    654
    655	for (i = 0; i < dev->num_tx_queues; i++)
    656		bcm_sysport_set_tx_coalesce(&priv->tx_rings[i], ec);
    657
    658	priv->rx_coalesce_usecs = ec->rx_coalesce_usecs;
    659	priv->rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
    660	usecs = priv->rx_coalesce_usecs;
    661	pkts = priv->rx_max_coalesced_frames;
    662
    663	if (ec->use_adaptive_rx_coalesce && !priv->dim.use_dim) {
    664		moder = net_dim_get_def_rx_moderation(priv->dim.dim.mode);
    665		usecs = moder.usec;
    666		pkts = moder.pkts;
    667	}
    668
    669	priv->dim.use_dim = ec->use_adaptive_rx_coalesce;
    670
    671	/* Apply desired coalescing parameters */
    672	bcm_sysport_set_rx_coalesce(priv, usecs, pkts);
    673
    674	return 0;
    675}
    676
    677static void bcm_sysport_free_cb(struct bcm_sysport_cb *cb)
    678{
    679	dev_consume_skb_any(cb->skb);
    680	cb->skb = NULL;
    681	dma_unmap_addr_set(cb, dma_addr, 0);
    682}
    683
    684static struct sk_buff *bcm_sysport_rx_refill(struct bcm_sysport_priv *priv,
    685					     struct bcm_sysport_cb *cb)
    686{
    687	struct device *kdev = &priv->pdev->dev;
    688	struct net_device *ndev = priv->netdev;
    689	struct sk_buff *skb, *rx_skb;
    690	dma_addr_t mapping;
    691
    692	/* Allocate a new SKB for a new packet */
    693	skb = __netdev_alloc_skb(priv->netdev, RX_BUF_LENGTH,
    694				 GFP_ATOMIC | __GFP_NOWARN);
    695	if (!skb) {
    696		priv->mib.alloc_rx_buff_failed++;
    697		netif_err(priv, rx_err, ndev, "SKB alloc failed\n");
    698		return NULL;
    699	}
    700
    701	mapping = dma_map_single(kdev, skb->data,
    702				 RX_BUF_LENGTH, DMA_FROM_DEVICE);
    703	if (dma_mapping_error(kdev, mapping)) {
    704		priv->mib.rx_dma_failed++;
    705		dev_kfree_skb_any(skb);
    706		netif_err(priv, rx_err, ndev, "DMA mapping failure\n");
    707		return NULL;
    708	}
    709
    710	/* Grab the current SKB on the ring */
    711	rx_skb = cb->skb;
    712	if (likely(rx_skb))
    713		dma_unmap_single(kdev, dma_unmap_addr(cb, dma_addr),
    714				 RX_BUF_LENGTH, DMA_FROM_DEVICE);
    715
    716	/* Put the new SKB on the ring */
    717	cb->skb = skb;
    718	dma_unmap_addr_set(cb, dma_addr, mapping);
    719	dma_desc_set_addr(priv, cb->bd_addr, mapping);
    720
    721	netif_dbg(priv, rx_status, ndev, "RX refill\n");
    722
    723	/* Return the current SKB to the caller */
    724	return rx_skb;
    725}
    726
    727static int bcm_sysport_alloc_rx_bufs(struct bcm_sysport_priv *priv)
    728{
    729	struct bcm_sysport_cb *cb;
    730	struct sk_buff *skb;
    731	unsigned int i;
    732
    733	for (i = 0; i < priv->num_rx_bds; i++) {
    734		cb = &priv->rx_cbs[i];
    735		skb = bcm_sysport_rx_refill(priv, cb);
    736		dev_kfree_skb(skb);
    737		if (!cb->skb)
    738			return -ENOMEM;
    739	}
    740
    741	return 0;
    742}
    743
    744/* Poll the hardware for up to budget packets to process */
    745static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv,
    746					unsigned int budget)
    747{
    748	struct bcm_sysport_stats64 *stats64 = &priv->stats64;
    749	struct net_device *ndev = priv->netdev;
    750	unsigned int processed = 0, to_process;
    751	unsigned int processed_bytes = 0;
    752	struct bcm_sysport_cb *cb;
    753	struct sk_buff *skb;
    754	unsigned int p_index;
    755	u16 len, status;
    756	struct bcm_rsb *rsb;
    757
    758	/* Clear status before servicing to reduce spurious interrupts */
    759	intrl2_0_writel(priv, INTRL2_0_RDMA_MBDONE, INTRL2_CPU_CLEAR);
    760
    761	/* Determine how much we should process since last call, SYSTEMPORT Lite
    762	 * groups the producer and consumer indexes into the same 32-bit
    763	 * which we access using RDMA_CONS_INDEX
    764	 */
    765	if (!priv->is_lite)
    766		p_index = rdma_readl(priv, RDMA_PROD_INDEX);
    767	else
    768		p_index = rdma_readl(priv, RDMA_CONS_INDEX);
    769	p_index &= RDMA_PROD_INDEX_MASK;
    770
    771	to_process = (p_index - priv->rx_c_index) & RDMA_CONS_INDEX_MASK;
    772
    773	netif_dbg(priv, rx_status, ndev,
    774		  "p_index=%d rx_c_index=%d to_process=%d\n",
    775		  p_index, priv->rx_c_index, to_process);
    776
    777	while ((processed < to_process) && (processed < budget)) {
    778		cb = &priv->rx_cbs[priv->rx_read_ptr];
    779		skb = bcm_sysport_rx_refill(priv, cb);
    780
    781
    782		/* We do not have a backing SKB, so we do not a corresponding
    783		 * DMA mapping for this incoming packet since
    784		 * bcm_sysport_rx_refill always either has both skb and mapping
    785		 * or none.
    786		 */
    787		if (unlikely(!skb)) {
    788			netif_err(priv, rx_err, ndev, "out of memory!\n");
    789			ndev->stats.rx_dropped++;
    790			ndev->stats.rx_errors++;
    791			goto next;
    792		}
    793
    794		/* Extract the Receive Status Block prepended */
    795		rsb = (struct bcm_rsb *)skb->data;
    796		len = (rsb->rx_status_len >> DESC_LEN_SHIFT) & DESC_LEN_MASK;
    797		status = (rsb->rx_status_len >> DESC_STATUS_SHIFT) &
    798			  DESC_STATUS_MASK;
    799
    800		netif_dbg(priv, rx_status, ndev,
    801			  "p=%d, c=%d, rd_ptr=%d, len=%d, flag=0x%04x\n",
    802			  p_index, priv->rx_c_index, priv->rx_read_ptr,
    803			  len, status);
    804
    805		if (unlikely(len > RX_BUF_LENGTH)) {
    806			netif_err(priv, rx_status, ndev, "oversized packet\n");
    807			ndev->stats.rx_length_errors++;
    808			ndev->stats.rx_errors++;
    809			dev_kfree_skb_any(skb);
    810			goto next;
    811		}
    812
    813		if (unlikely(!(status & DESC_EOP) || !(status & DESC_SOP))) {
    814			netif_err(priv, rx_status, ndev, "fragmented packet!\n");
    815			ndev->stats.rx_dropped++;
    816			ndev->stats.rx_errors++;
    817			dev_kfree_skb_any(skb);
    818			goto next;
    819		}
    820
    821		if (unlikely(status & (RX_STATUS_ERR | RX_STATUS_OVFLOW))) {
    822			netif_err(priv, rx_err, ndev, "error packet\n");
    823			if (status & RX_STATUS_OVFLOW)
    824				ndev->stats.rx_over_errors++;
    825			ndev->stats.rx_dropped++;
    826			ndev->stats.rx_errors++;
    827			dev_kfree_skb_any(skb);
    828			goto next;
    829		}
    830
    831		skb_put(skb, len);
    832
    833		/* Hardware validated our checksum */
    834		if (likely(status & DESC_L4_CSUM))
    835			skb->ip_summed = CHECKSUM_UNNECESSARY;
    836
    837		/* Hardware pre-pends packets with 2bytes before Ethernet
    838		 * header plus we have the Receive Status Block, strip off all
    839		 * of this from the SKB.
    840		 */
    841		skb_pull(skb, sizeof(*rsb) + 2);
    842		len -= (sizeof(*rsb) + 2);
    843		processed_bytes += len;
    844
    845		/* UniMAC may forward CRC */
    846		if (priv->crc_fwd) {
    847			skb_trim(skb, len - ETH_FCS_LEN);
    848			len -= ETH_FCS_LEN;
    849		}
    850
    851		skb->protocol = eth_type_trans(skb, ndev);
    852		ndev->stats.rx_packets++;
    853		ndev->stats.rx_bytes += len;
    854		u64_stats_update_begin(&priv->syncp);
    855		stats64->rx_packets++;
    856		stats64->rx_bytes += len;
    857		u64_stats_update_end(&priv->syncp);
    858
    859		napi_gro_receive(&priv->napi, skb);
    860next:
    861		processed++;
    862		priv->rx_read_ptr++;
    863
    864		if (priv->rx_read_ptr == priv->num_rx_bds)
    865			priv->rx_read_ptr = 0;
    866	}
    867
    868	priv->dim.packets = processed;
    869	priv->dim.bytes = processed_bytes;
    870
    871	return processed;
    872}
    873
    874static void bcm_sysport_tx_reclaim_one(struct bcm_sysport_tx_ring *ring,
    875				       struct bcm_sysport_cb *cb,
    876				       unsigned int *bytes_compl,
    877				       unsigned int *pkts_compl)
    878{
    879	struct bcm_sysport_priv *priv = ring->priv;
    880	struct device *kdev = &priv->pdev->dev;
    881
    882	if (cb->skb) {
    883		*bytes_compl += cb->skb->len;
    884		dma_unmap_single(kdev, dma_unmap_addr(cb, dma_addr),
    885				 dma_unmap_len(cb, dma_len),
    886				 DMA_TO_DEVICE);
    887		(*pkts_compl)++;
    888		bcm_sysport_free_cb(cb);
    889	/* SKB fragment */
    890	} else if (dma_unmap_addr(cb, dma_addr)) {
    891		*bytes_compl += dma_unmap_len(cb, dma_len);
    892		dma_unmap_page(kdev, dma_unmap_addr(cb, dma_addr),
    893			       dma_unmap_len(cb, dma_len), DMA_TO_DEVICE);
    894		dma_unmap_addr_set(cb, dma_addr, 0);
    895	}
    896}
    897
    898/* Reclaim queued SKBs for transmission completion, lockless version */
    899static unsigned int __bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv,
    900					     struct bcm_sysport_tx_ring *ring)
    901{
    902	unsigned int pkts_compl = 0, bytes_compl = 0;
    903	struct net_device *ndev = priv->netdev;
    904	unsigned int txbds_processed = 0;
    905	struct bcm_sysport_cb *cb;
    906	unsigned int txbds_ready;
    907	unsigned int c_index;
    908	u32 hw_ind;
    909
    910	/* Clear status before servicing to reduce spurious interrupts */
    911	if (!ring->priv->is_lite)
    912		intrl2_1_writel(ring->priv, BIT(ring->index), INTRL2_CPU_CLEAR);
    913	else
    914		intrl2_0_writel(ring->priv, BIT(ring->index +
    915				INTRL2_0_TDMA_MBDONE_SHIFT), INTRL2_CPU_CLEAR);
    916
    917	/* Compute how many descriptors have been processed since last call */
    918	hw_ind = tdma_readl(priv, TDMA_DESC_RING_PROD_CONS_INDEX(ring->index));
    919	c_index = (hw_ind >> RING_CONS_INDEX_SHIFT) & RING_CONS_INDEX_MASK;
    920	txbds_ready = (c_index - ring->c_index) & RING_CONS_INDEX_MASK;
    921
    922	netif_dbg(priv, tx_done, ndev,
    923		  "ring=%d old_c_index=%u c_index=%u txbds_ready=%u\n",
    924		  ring->index, ring->c_index, c_index, txbds_ready);
    925
    926	while (txbds_processed < txbds_ready) {
    927		cb = &ring->cbs[ring->clean_index];
    928		bcm_sysport_tx_reclaim_one(ring, cb, &bytes_compl, &pkts_compl);
    929
    930		ring->desc_count++;
    931		txbds_processed++;
    932
    933		if (likely(ring->clean_index < ring->size - 1))
    934			ring->clean_index++;
    935		else
    936			ring->clean_index = 0;
    937	}
    938
    939	u64_stats_update_begin(&priv->syncp);
    940	ring->packets += pkts_compl;
    941	ring->bytes += bytes_compl;
    942	u64_stats_update_end(&priv->syncp);
    943
    944	ring->c_index = c_index;
    945
    946	netif_dbg(priv, tx_done, ndev,
    947		  "ring=%d c_index=%d pkts_compl=%d, bytes_compl=%d\n",
    948		  ring->index, ring->c_index, pkts_compl, bytes_compl);
    949
    950	return pkts_compl;
    951}
    952
    953/* Locked version of the per-ring TX reclaim routine */
    954static unsigned int bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv,
    955					   struct bcm_sysport_tx_ring *ring)
    956{
    957	struct netdev_queue *txq;
    958	unsigned int released;
    959	unsigned long flags;
    960
    961	txq = netdev_get_tx_queue(priv->netdev, ring->index);
    962
    963	spin_lock_irqsave(&ring->lock, flags);
    964	released = __bcm_sysport_tx_reclaim(priv, ring);
    965	if (released)
    966		netif_tx_wake_queue(txq);
    967
    968	spin_unlock_irqrestore(&ring->lock, flags);
    969
    970	return released;
    971}
    972
    973/* Locked version of the per-ring TX reclaim, but does not wake the queue */
    974static void bcm_sysport_tx_clean(struct bcm_sysport_priv *priv,
    975				 struct bcm_sysport_tx_ring *ring)
    976{
    977	unsigned long flags;
    978
    979	spin_lock_irqsave(&ring->lock, flags);
    980	__bcm_sysport_tx_reclaim(priv, ring);
    981	spin_unlock_irqrestore(&ring->lock, flags);
    982}
    983
    984static int bcm_sysport_tx_poll(struct napi_struct *napi, int budget)
    985{
    986	struct bcm_sysport_tx_ring *ring =
    987		container_of(napi, struct bcm_sysport_tx_ring, napi);
    988	unsigned int work_done = 0;
    989
    990	work_done = bcm_sysport_tx_reclaim(ring->priv, ring);
    991
    992	if (work_done == 0) {
    993		napi_complete(napi);
    994		/* re-enable TX interrupt */
    995		if (!ring->priv->is_lite)
    996			intrl2_1_mask_clear(ring->priv, BIT(ring->index));
    997		else
    998			intrl2_0_mask_clear(ring->priv, BIT(ring->index +
    999					    INTRL2_0_TDMA_MBDONE_SHIFT));
   1000
   1001		return 0;
   1002	}
   1003
   1004	return budget;
   1005}
   1006
   1007static void bcm_sysport_tx_reclaim_all(struct bcm_sysport_priv *priv)
   1008{
   1009	unsigned int q;
   1010
   1011	for (q = 0; q < priv->netdev->num_tx_queues; q++)
   1012		bcm_sysport_tx_reclaim(priv, &priv->tx_rings[q]);
   1013}
   1014
   1015static int bcm_sysport_poll(struct napi_struct *napi, int budget)
   1016{
   1017	struct bcm_sysport_priv *priv =
   1018		container_of(napi, struct bcm_sysport_priv, napi);
   1019	struct dim_sample dim_sample = {};
   1020	unsigned int work_done = 0;
   1021
   1022	work_done = bcm_sysport_desc_rx(priv, budget);
   1023
   1024	priv->rx_c_index += work_done;
   1025	priv->rx_c_index &= RDMA_CONS_INDEX_MASK;
   1026
   1027	/* SYSTEMPORT Lite groups the producer/consumer index, producer is
   1028	 * maintained by HW, but writes to it will be ignore while RDMA
   1029	 * is active
   1030	 */
   1031	if (!priv->is_lite)
   1032		rdma_writel(priv, priv->rx_c_index, RDMA_CONS_INDEX);
   1033	else
   1034		rdma_writel(priv, priv->rx_c_index << 16, RDMA_CONS_INDEX);
   1035
   1036	if (work_done < budget) {
   1037		napi_complete_done(napi, work_done);
   1038		/* re-enable RX interrupts */
   1039		intrl2_0_mask_clear(priv, INTRL2_0_RDMA_MBDONE);
   1040	}
   1041
   1042	if (priv->dim.use_dim) {
   1043		dim_update_sample(priv->dim.event_ctr, priv->dim.packets,
   1044				  priv->dim.bytes, &dim_sample);
   1045		net_dim(&priv->dim.dim, dim_sample);
   1046	}
   1047
   1048	return work_done;
   1049}
   1050
   1051static void mpd_enable_set(struct bcm_sysport_priv *priv, bool enable)
   1052{
   1053	u32 reg, bit;
   1054
   1055	reg = umac_readl(priv, UMAC_MPD_CTRL);
   1056	if (enable)
   1057		reg |= MPD_EN;
   1058	else
   1059		reg &= ~MPD_EN;
   1060	umac_writel(priv, reg, UMAC_MPD_CTRL);
   1061
   1062	if (priv->is_lite)
   1063		bit = RBUF_ACPI_EN_LITE;
   1064	else
   1065		bit = RBUF_ACPI_EN;
   1066
   1067	reg = rbuf_readl(priv, RBUF_CONTROL);
   1068	if (enable)
   1069		reg |= bit;
   1070	else
   1071		reg &= ~bit;
   1072	rbuf_writel(priv, reg, RBUF_CONTROL);
   1073}
   1074
   1075static void bcm_sysport_resume_from_wol(struct bcm_sysport_priv *priv)
   1076{
   1077	unsigned int index;
   1078	u32 reg;
   1079
   1080	/* Disable RXCHK, active filters and Broadcom tag matching */
   1081	reg = rxchk_readl(priv, RXCHK_CONTROL);
   1082	reg &= ~(RXCHK_BRCM_TAG_MATCH_MASK <<
   1083		 RXCHK_BRCM_TAG_MATCH_SHIFT | RXCHK_EN | RXCHK_BRCM_TAG_EN);
   1084	rxchk_writel(priv, reg, RXCHK_CONTROL);
   1085
   1086	/* Make sure we restore correct CID index in case HW lost
   1087	 * its context during deep idle state
   1088	 */
   1089	for_each_set_bit(index, priv->filters, RXCHK_BRCM_TAG_MAX) {
   1090		rxchk_writel(priv, priv->filters_loc[index] <<
   1091			     RXCHK_BRCM_TAG_CID_SHIFT, RXCHK_BRCM_TAG(index));
   1092		rxchk_writel(priv, 0xff00ffff, RXCHK_BRCM_TAG_MASK(index));
   1093	}
   1094
   1095	/* Clear the MagicPacket detection logic */
   1096	mpd_enable_set(priv, false);
   1097
   1098	reg = intrl2_0_readl(priv, INTRL2_CPU_STATUS);
   1099	if (reg & INTRL2_0_MPD)
   1100		netdev_info(priv->netdev, "Wake-on-LAN (MPD) interrupt!\n");
   1101
   1102	if (reg & INTRL2_0_BRCM_MATCH_TAG) {
   1103		reg = rxchk_readl(priv, RXCHK_BRCM_TAG_MATCH_STATUS) &
   1104				  RXCHK_BRCM_TAG_MATCH_MASK;
   1105		netdev_info(priv->netdev,
   1106			    "Wake-on-LAN (filters 0x%02x) interrupt!\n", reg);
   1107	}
   1108
   1109	netif_dbg(priv, wol, priv->netdev, "resumed from WOL\n");
   1110}
   1111
   1112static void bcm_sysport_dim_work(struct work_struct *work)
   1113{
   1114	struct dim *dim = container_of(work, struct dim, work);
   1115	struct bcm_sysport_net_dim *ndim =
   1116			container_of(dim, struct bcm_sysport_net_dim, dim);
   1117	struct bcm_sysport_priv *priv =
   1118			container_of(ndim, struct bcm_sysport_priv, dim);
   1119	struct dim_cq_moder cur_profile = net_dim_get_rx_moderation(dim->mode,
   1120								    dim->profile_ix);
   1121
   1122	bcm_sysport_set_rx_coalesce(priv, cur_profile.usec, cur_profile.pkts);
   1123	dim->state = DIM_START_MEASURE;
   1124}
   1125
   1126/* RX and misc interrupt routine */
   1127static irqreturn_t bcm_sysport_rx_isr(int irq, void *dev_id)
   1128{
   1129	struct net_device *dev = dev_id;
   1130	struct bcm_sysport_priv *priv = netdev_priv(dev);
   1131	struct bcm_sysport_tx_ring *txr;
   1132	unsigned int ring, ring_bit;
   1133
   1134	priv->irq0_stat = intrl2_0_readl(priv, INTRL2_CPU_STATUS) &
   1135			  ~intrl2_0_readl(priv, INTRL2_CPU_MASK_STATUS);
   1136	intrl2_0_writel(priv, priv->irq0_stat, INTRL2_CPU_CLEAR);
   1137
   1138	if (unlikely(priv->irq0_stat == 0)) {
   1139		netdev_warn(priv->netdev, "spurious RX interrupt\n");
   1140		return IRQ_NONE;
   1141	}
   1142
   1143	if (priv->irq0_stat & INTRL2_0_RDMA_MBDONE) {
   1144		priv->dim.event_ctr++;
   1145		if (likely(napi_schedule_prep(&priv->napi))) {
   1146			/* disable RX interrupts */
   1147			intrl2_0_mask_set(priv, INTRL2_0_RDMA_MBDONE);
   1148			__napi_schedule_irqoff(&priv->napi);
   1149		}
   1150	}
   1151
   1152	/* TX ring is full, perform a full reclaim since we do not know
   1153	 * which one would trigger this interrupt
   1154	 */
   1155	if (priv->irq0_stat & INTRL2_0_TX_RING_FULL)
   1156		bcm_sysport_tx_reclaim_all(priv);
   1157
   1158	if (!priv->is_lite)
   1159		goto out;
   1160
   1161	for (ring = 0; ring < dev->num_tx_queues; ring++) {
   1162		ring_bit = BIT(ring + INTRL2_0_TDMA_MBDONE_SHIFT);
   1163		if (!(priv->irq0_stat & ring_bit))
   1164			continue;
   1165
   1166		txr = &priv->tx_rings[ring];
   1167
   1168		if (likely(napi_schedule_prep(&txr->napi))) {
   1169			intrl2_0_mask_set(priv, ring_bit);
   1170			__napi_schedule(&txr->napi);
   1171		}
   1172	}
   1173out:
   1174	return IRQ_HANDLED;
   1175}
   1176
   1177/* TX interrupt service routine */
   1178static irqreturn_t bcm_sysport_tx_isr(int irq, void *dev_id)
   1179{
   1180	struct net_device *dev = dev_id;
   1181	struct bcm_sysport_priv *priv = netdev_priv(dev);
   1182	struct bcm_sysport_tx_ring *txr;
   1183	unsigned int ring;
   1184
   1185	priv->irq1_stat = intrl2_1_readl(priv, INTRL2_CPU_STATUS) &
   1186				~intrl2_1_readl(priv, INTRL2_CPU_MASK_STATUS);
   1187	intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
   1188
   1189	if (unlikely(priv->irq1_stat == 0)) {
   1190		netdev_warn(priv->netdev, "spurious TX interrupt\n");
   1191		return IRQ_NONE;
   1192	}
   1193
   1194	for (ring = 0; ring < dev->num_tx_queues; ring++) {
   1195		if (!(priv->irq1_stat & BIT(ring)))
   1196			continue;
   1197
   1198		txr = &priv->tx_rings[ring];
   1199
   1200		if (likely(napi_schedule_prep(&txr->napi))) {
   1201			intrl2_1_mask_set(priv, BIT(ring));
   1202			__napi_schedule_irqoff(&txr->napi);
   1203		}
   1204	}
   1205
   1206	return IRQ_HANDLED;
   1207}
   1208
   1209static irqreturn_t bcm_sysport_wol_isr(int irq, void *dev_id)
   1210{
   1211	struct bcm_sysport_priv *priv = dev_id;
   1212
   1213	pm_wakeup_event(&priv->pdev->dev, 0);
   1214
   1215	return IRQ_HANDLED;
   1216}
   1217
   1218#ifdef CONFIG_NET_POLL_CONTROLLER
   1219static void bcm_sysport_poll_controller(struct net_device *dev)
   1220{
   1221	struct bcm_sysport_priv *priv = netdev_priv(dev);
   1222
   1223	disable_irq(priv->irq0);
   1224	bcm_sysport_rx_isr(priv->irq0, priv);
   1225	enable_irq(priv->irq0);
   1226
   1227	if (!priv->is_lite) {
   1228		disable_irq(priv->irq1);
   1229		bcm_sysport_tx_isr(priv->irq1, priv);
   1230		enable_irq(priv->irq1);
   1231	}
   1232}
   1233#endif
   1234
   1235static struct sk_buff *bcm_sysport_insert_tsb(struct sk_buff *skb,
   1236					      struct net_device *dev)
   1237{
   1238	struct bcm_sysport_priv *priv = netdev_priv(dev);
   1239	struct sk_buff *nskb;
   1240	struct bcm_tsb *tsb;
   1241	u32 csum_info;
   1242	u8 ip_proto;
   1243	u16 csum_start;
   1244	__be16 ip_ver;
   1245
   1246	/* Re-allocate SKB if needed */
   1247	if (unlikely(skb_headroom(skb) < sizeof(*tsb))) {
   1248		nskb = skb_realloc_headroom(skb, sizeof(*tsb));
   1249		if (!nskb) {
   1250			dev_kfree_skb_any(skb);
   1251			priv->mib.tx_realloc_tsb_failed++;
   1252			dev->stats.tx_errors++;
   1253			dev->stats.tx_dropped++;
   1254			return NULL;
   1255		}
   1256		dev_consume_skb_any(skb);
   1257		skb = nskb;
   1258		priv->mib.tx_realloc_tsb++;
   1259	}
   1260
   1261	tsb = skb_push(skb, sizeof(*tsb));
   1262	/* Zero-out TSB by default */
   1263	memset(tsb, 0, sizeof(*tsb));
   1264
   1265	if (skb_vlan_tag_present(skb)) {
   1266		tsb->pcp_dei_vid = skb_vlan_tag_get_prio(skb) & PCP_DEI_MASK;
   1267		tsb->pcp_dei_vid |= (u32)skb_vlan_tag_get_id(skb) << VID_SHIFT;
   1268	}
   1269
   1270	if (skb->ip_summed == CHECKSUM_PARTIAL) {
   1271		ip_ver = skb->protocol;
   1272		switch (ip_ver) {
   1273		case htons(ETH_P_IP):
   1274			ip_proto = ip_hdr(skb)->protocol;
   1275			break;
   1276		case htons(ETH_P_IPV6):
   1277			ip_proto = ipv6_hdr(skb)->nexthdr;
   1278			break;
   1279		default:
   1280			return skb;
   1281		}
   1282
   1283		/* Get the checksum offset and the L4 (transport) offset */
   1284		csum_start = skb_checksum_start_offset(skb) - sizeof(*tsb);
   1285		/* Account for the HW inserted VLAN tag */
   1286		if (skb_vlan_tag_present(skb))
   1287			csum_start += VLAN_HLEN;
   1288		csum_info = (csum_start + skb->csum_offset) & L4_CSUM_PTR_MASK;
   1289		csum_info |= (csum_start << L4_PTR_SHIFT);
   1290
   1291		if (ip_proto == IPPROTO_TCP || ip_proto == IPPROTO_UDP) {
   1292			csum_info |= L4_LENGTH_VALID;
   1293			if (ip_proto == IPPROTO_UDP &&
   1294			    ip_ver == htons(ETH_P_IP))
   1295				csum_info |= L4_UDP;
   1296		} else {
   1297			csum_info = 0;
   1298		}
   1299
   1300		tsb->l4_ptr_dest_map = csum_info;
   1301	}
   1302
   1303	return skb;
   1304}
   1305
   1306static netdev_tx_t bcm_sysport_xmit(struct sk_buff *skb,
   1307				    struct net_device *dev)
   1308{
   1309	struct bcm_sysport_priv *priv = netdev_priv(dev);
   1310	struct device *kdev = &priv->pdev->dev;
   1311	struct bcm_sysport_tx_ring *ring;
   1312	unsigned long flags, desc_flags;
   1313	struct bcm_sysport_cb *cb;
   1314	struct netdev_queue *txq;
   1315	u32 len_status, addr_lo;
   1316	unsigned int skb_len;
   1317	dma_addr_t mapping;
   1318	u16 queue;
   1319	int ret;
   1320
   1321	queue = skb_get_queue_mapping(skb);
   1322	txq = netdev_get_tx_queue(dev, queue);
   1323	ring = &priv->tx_rings[queue];
   1324
   1325	/* lock against tx reclaim in BH context and TX ring full interrupt */
   1326	spin_lock_irqsave(&ring->lock, flags);
   1327	if (unlikely(ring->desc_count == 0)) {
   1328		netif_tx_stop_queue(txq);
   1329		netdev_err(dev, "queue %d awake and ring full!\n", queue);
   1330		ret = NETDEV_TX_BUSY;
   1331		goto out;
   1332	}
   1333
   1334	/* Insert TSB and checksum infos */
   1335	if (priv->tsb_en) {
   1336		skb = bcm_sysport_insert_tsb(skb, dev);
   1337		if (!skb) {
   1338			ret = NETDEV_TX_OK;
   1339			goto out;
   1340		}
   1341	}
   1342
   1343	skb_len = skb->len;
   1344
   1345	mapping = dma_map_single(kdev, skb->data, skb_len, DMA_TO_DEVICE);
   1346	if (dma_mapping_error(kdev, mapping)) {
   1347		priv->mib.tx_dma_failed++;
   1348		netif_err(priv, tx_err, dev, "DMA map failed at %p (len=%d)\n",
   1349			  skb->data, skb_len);
   1350		ret = NETDEV_TX_OK;
   1351		goto out;
   1352	}
   1353
   1354	/* Remember the SKB for future freeing */
   1355	cb = &ring->cbs[ring->curr_desc];
   1356	cb->skb = skb;
   1357	dma_unmap_addr_set(cb, dma_addr, mapping);
   1358	dma_unmap_len_set(cb, dma_len, skb_len);
   1359
   1360	addr_lo = lower_32_bits(mapping);
   1361	len_status = upper_32_bits(mapping) & DESC_ADDR_HI_MASK;
   1362	len_status |= (skb_len << DESC_LEN_SHIFT);
   1363	len_status |= (DESC_SOP | DESC_EOP | TX_STATUS_APP_CRC) <<
   1364		       DESC_STATUS_SHIFT;
   1365	if (skb->ip_summed == CHECKSUM_PARTIAL)
   1366		len_status |= (DESC_L4_CSUM << DESC_STATUS_SHIFT);
   1367	if (skb_vlan_tag_present(skb))
   1368		len_status |= (TX_STATUS_VLAN_VID_TSB << DESC_STATUS_SHIFT);
   1369
   1370	ring->curr_desc++;
   1371	if (ring->curr_desc == ring->size)
   1372		ring->curr_desc = 0;
   1373	ring->desc_count--;
   1374
   1375	/* Ports are latched, so write upper address first */
   1376	spin_lock_irqsave(&priv->desc_lock, desc_flags);
   1377	tdma_writel(priv, len_status, TDMA_WRITE_PORT_HI(ring->index));
   1378	tdma_writel(priv, addr_lo, TDMA_WRITE_PORT_LO(ring->index));
   1379	spin_unlock_irqrestore(&priv->desc_lock, desc_flags);
   1380
   1381	/* Check ring space and update SW control flow */
   1382	if (ring->desc_count == 0)
   1383		netif_tx_stop_queue(txq);
   1384
   1385	netif_dbg(priv, tx_queued, dev, "ring=%d desc_count=%d, curr_desc=%d\n",
   1386		  ring->index, ring->desc_count, ring->curr_desc);
   1387
   1388	ret = NETDEV_TX_OK;
   1389out:
   1390	spin_unlock_irqrestore(&ring->lock, flags);
   1391	return ret;
   1392}
   1393
   1394static void bcm_sysport_tx_timeout(struct net_device *dev, unsigned int txqueue)
   1395{
   1396	netdev_warn(dev, "transmit timeout!\n");
   1397
   1398	netif_trans_update(dev);
   1399	dev->stats.tx_errors++;
   1400
   1401	netif_tx_wake_all_queues(dev);
   1402}
   1403
   1404/* phylib adjust link callback */
   1405static void bcm_sysport_adj_link(struct net_device *dev)
   1406{
   1407	struct bcm_sysport_priv *priv = netdev_priv(dev);
   1408	struct phy_device *phydev = dev->phydev;
   1409	unsigned int changed = 0;
   1410	u32 cmd_bits = 0, reg;
   1411
   1412	if (priv->old_link != phydev->link) {
   1413		changed = 1;
   1414		priv->old_link = phydev->link;
   1415	}
   1416
   1417	if (priv->old_duplex != phydev->duplex) {
   1418		changed = 1;
   1419		priv->old_duplex = phydev->duplex;
   1420	}
   1421
   1422	if (priv->is_lite)
   1423		goto out;
   1424
   1425	switch (phydev->speed) {
   1426	case SPEED_2500:
   1427		cmd_bits = CMD_SPEED_2500;
   1428		break;
   1429	case SPEED_1000:
   1430		cmd_bits = CMD_SPEED_1000;
   1431		break;
   1432	case SPEED_100:
   1433		cmd_bits = CMD_SPEED_100;
   1434		break;
   1435	case SPEED_10:
   1436		cmd_bits = CMD_SPEED_10;
   1437		break;
   1438	default:
   1439		break;
   1440	}
   1441	cmd_bits <<= CMD_SPEED_SHIFT;
   1442
   1443	if (phydev->duplex == DUPLEX_HALF)
   1444		cmd_bits |= CMD_HD_EN;
   1445
   1446	if (priv->old_pause != phydev->pause) {
   1447		changed = 1;
   1448		priv->old_pause = phydev->pause;
   1449	}
   1450
   1451	if (!phydev->pause)
   1452		cmd_bits |= CMD_RX_PAUSE_IGNORE | CMD_TX_PAUSE_IGNORE;
   1453
   1454	if (!changed)
   1455		return;
   1456
   1457	if (phydev->link) {
   1458		reg = umac_readl(priv, UMAC_CMD);
   1459		reg &= ~((CMD_SPEED_MASK << CMD_SPEED_SHIFT) |
   1460			CMD_HD_EN | CMD_RX_PAUSE_IGNORE |
   1461			CMD_TX_PAUSE_IGNORE);
   1462		reg |= cmd_bits;
   1463		umac_writel(priv, reg, UMAC_CMD);
   1464	}
   1465out:
   1466	if (changed)
   1467		phy_print_status(phydev);
   1468}
   1469
   1470static void bcm_sysport_init_dim(struct bcm_sysport_priv *priv,
   1471				 void (*cb)(struct work_struct *work))
   1472{
   1473	struct bcm_sysport_net_dim *dim = &priv->dim;
   1474
   1475	INIT_WORK(&dim->dim.work, cb);
   1476	dim->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
   1477	dim->event_ctr = 0;
   1478	dim->packets = 0;
   1479	dim->bytes = 0;
   1480}
   1481
   1482static void bcm_sysport_init_rx_coalesce(struct bcm_sysport_priv *priv)
   1483{
   1484	struct bcm_sysport_net_dim *dim = &priv->dim;
   1485	struct dim_cq_moder moder;
   1486	u32 usecs, pkts;
   1487
   1488	usecs = priv->rx_coalesce_usecs;
   1489	pkts = priv->rx_max_coalesced_frames;
   1490
   1491	/* If DIM was enabled, re-apply default parameters */
   1492	if (dim->use_dim) {
   1493		moder = net_dim_get_def_rx_moderation(dim->dim.mode);
   1494		usecs = moder.usec;
   1495		pkts = moder.pkts;
   1496	}
   1497
   1498	bcm_sysport_set_rx_coalesce(priv, usecs, pkts);
   1499}
   1500
   1501static int bcm_sysport_init_tx_ring(struct bcm_sysport_priv *priv,
   1502				    unsigned int index)
   1503{
   1504	struct bcm_sysport_tx_ring *ring = &priv->tx_rings[index];
   1505	size_t size;
   1506	u32 reg;
   1507
   1508	/* Simple descriptors partitioning for now */
   1509	size = 256;
   1510
   1511	ring->cbs = kcalloc(size, sizeof(struct bcm_sysport_cb), GFP_KERNEL);
   1512	if (!ring->cbs) {
   1513		netif_err(priv, hw, priv->netdev, "CB allocation failed\n");
   1514		return -ENOMEM;
   1515	}
   1516
   1517	/* Initialize SW view of the ring */
   1518	spin_lock_init(&ring->lock);
   1519	ring->priv = priv;
   1520	netif_napi_add_tx(priv->netdev, &ring->napi, bcm_sysport_tx_poll);
   1521	ring->index = index;
   1522	ring->size = size;
   1523	ring->clean_index = 0;
   1524	ring->alloc_size = ring->size;
   1525	ring->desc_count = ring->size;
   1526	ring->curr_desc = 0;
   1527
   1528	/* Initialize HW ring */
   1529	tdma_writel(priv, RING_EN, TDMA_DESC_RING_HEAD_TAIL_PTR(index));
   1530	tdma_writel(priv, 0, TDMA_DESC_RING_COUNT(index));
   1531	tdma_writel(priv, 1, TDMA_DESC_RING_INTR_CONTROL(index));
   1532	tdma_writel(priv, 0, TDMA_DESC_RING_PROD_CONS_INDEX(index));
   1533
   1534	/* Configure QID and port mapping */
   1535	reg = tdma_readl(priv, TDMA_DESC_RING_MAPPING(index));
   1536	reg &= ~(RING_QID_MASK | RING_PORT_ID_MASK << RING_PORT_ID_SHIFT);
   1537	if (ring->inspect) {
   1538		reg |= ring->switch_queue & RING_QID_MASK;
   1539		reg |= ring->switch_port << RING_PORT_ID_SHIFT;
   1540	} else {
   1541		reg |= RING_IGNORE_STATUS;
   1542	}
   1543	tdma_writel(priv, reg, TDMA_DESC_RING_MAPPING(index));
   1544	reg = 0;
   1545	/* Adjust the packet size calculations if SYSTEMPORT is responsible
   1546	 * for HW insertion of VLAN tags
   1547	 */
   1548	if (priv->netdev->features & NETIF_F_HW_VLAN_CTAG_TX)
   1549		reg = VLAN_HLEN << RING_PKT_SIZE_ADJ_SHIFT;
   1550	tdma_writel(priv, reg, TDMA_DESC_RING_PCP_DEI_VID(index));
   1551
   1552	/* Enable ACB algorithm 2 */
   1553	reg = tdma_readl(priv, TDMA_CONTROL);
   1554	reg |= tdma_control_bit(priv, ACB_ALGO);
   1555	tdma_writel(priv, reg, TDMA_CONTROL);
   1556
   1557	/* Do not use tdma_control_bit() here because TSB_SWAP1 collides
   1558	 * with the original definition of ACB_ALGO
   1559	 */
   1560	reg = tdma_readl(priv, TDMA_CONTROL);
   1561	if (priv->is_lite)
   1562		reg &= ~BIT(TSB_SWAP1);
   1563	/* Set a correct TSB format based on host endian */
   1564	if (!IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
   1565		reg |= tdma_control_bit(priv, TSB_SWAP0);
   1566	else
   1567		reg &= ~tdma_control_bit(priv, TSB_SWAP0);
   1568	tdma_writel(priv, reg, TDMA_CONTROL);
   1569
   1570	/* Program the number of descriptors as MAX_THRESHOLD and half of
   1571	 * its size for the hysteresis trigger
   1572	 */
   1573	tdma_writel(priv, ring->size |
   1574			1 << RING_HYST_THRESH_SHIFT,
   1575			TDMA_DESC_RING_MAX_HYST(index));
   1576
   1577	/* Enable the ring queue in the arbiter */
   1578	reg = tdma_readl(priv, TDMA_TIER1_ARB_0_QUEUE_EN);
   1579	reg |= (1 << index);
   1580	tdma_writel(priv, reg, TDMA_TIER1_ARB_0_QUEUE_EN);
   1581
   1582	napi_enable(&ring->napi);
   1583
   1584	netif_dbg(priv, hw, priv->netdev,
   1585		  "TDMA cfg, size=%d, switch q=%d,port=%d\n",
   1586		  ring->size, ring->switch_queue,
   1587		  ring->switch_port);
   1588
   1589	return 0;
   1590}
   1591
   1592static void bcm_sysport_fini_tx_ring(struct bcm_sysport_priv *priv,
   1593				     unsigned int index)
   1594{
   1595	struct bcm_sysport_tx_ring *ring = &priv->tx_rings[index];
   1596	u32 reg;
   1597
   1598	/* Caller should stop the TDMA engine */
   1599	reg = tdma_readl(priv, TDMA_STATUS);
   1600	if (!(reg & TDMA_DISABLED))
   1601		netdev_warn(priv->netdev, "TDMA not stopped!\n");
   1602
   1603	/* ring->cbs is the last part in bcm_sysport_init_tx_ring which could
   1604	 * fail, so by checking this pointer we know whether the TX ring was
   1605	 * fully initialized or not.
   1606	 */
   1607	if (!ring->cbs)
   1608		return;
   1609
   1610	napi_disable(&ring->napi);
   1611	netif_napi_del(&ring->napi);
   1612
   1613	bcm_sysport_tx_clean(priv, ring);
   1614
   1615	kfree(ring->cbs);
   1616	ring->cbs = NULL;
   1617	ring->size = 0;
   1618	ring->alloc_size = 0;
   1619
   1620	netif_dbg(priv, hw, priv->netdev, "TDMA fini done\n");
   1621}
   1622
   1623/* RDMA helper */
   1624static inline int rdma_enable_set(struct bcm_sysport_priv *priv,
   1625				  unsigned int enable)
   1626{
   1627	unsigned int timeout = 1000;
   1628	u32 reg;
   1629
   1630	reg = rdma_readl(priv, RDMA_CONTROL);
   1631	if (enable)
   1632		reg |= RDMA_EN;
   1633	else
   1634		reg &= ~RDMA_EN;
   1635	rdma_writel(priv, reg, RDMA_CONTROL);
   1636
   1637	/* Poll for RMDA disabling completion */
   1638	do {
   1639		reg = rdma_readl(priv, RDMA_STATUS);
   1640		if (!!(reg & RDMA_DISABLED) == !enable)
   1641			return 0;
   1642		usleep_range(1000, 2000);
   1643	} while (timeout-- > 0);
   1644
   1645	netdev_err(priv->netdev, "timeout waiting for RDMA to finish\n");
   1646
   1647	return -ETIMEDOUT;
   1648}
   1649
   1650/* TDMA helper */
   1651static inline int tdma_enable_set(struct bcm_sysport_priv *priv,
   1652				  unsigned int enable)
   1653{
   1654	unsigned int timeout = 1000;
   1655	u32 reg;
   1656
   1657	reg = tdma_readl(priv, TDMA_CONTROL);
   1658	if (enable)
   1659		reg |= tdma_control_bit(priv, TDMA_EN);
   1660	else
   1661		reg &= ~tdma_control_bit(priv, TDMA_EN);
   1662	tdma_writel(priv, reg, TDMA_CONTROL);
   1663
   1664	/* Poll for TMDA disabling completion */
   1665	do {
   1666		reg = tdma_readl(priv, TDMA_STATUS);
   1667		if (!!(reg & TDMA_DISABLED) == !enable)
   1668			return 0;
   1669
   1670		usleep_range(1000, 2000);
   1671	} while (timeout-- > 0);
   1672
   1673	netdev_err(priv->netdev, "timeout waiting for TDMA to finish\n");
   1674
   1675	return -ETIMEDOUT;
   1676}
   1677
   1678static int bcm_sysport_init_rx_ring(struct bcm_sysport_priv *priv)
   1679{
   1680	struct bcm_sysport_cb *cb;
   1681	u32 reg;
   1682	int ret;
   1683	int i;
   1684
   1685	/* Initialize SW view of the RX ring */
   1686	priv->num_rx_bds = priv->num_rx_desc_words / WORDS_PER_DESC;
   1687	priv->rx_bds = priv->base + SYS_PORT_RDMA_OFFSET;
   1688	priv->rx_c_index = 0;
   1689	priv->rx_read_ptr = 0;
   1690	priv->rx_cbs = kcalloc(priv->num_rx_bds, sizeof(struct bcm_sysport_cb),
   1691				GFP_KERNEL);
   1692	if (!priv->rx_cbs) {
   1693		netif_err(priv, hw, priv->netdev, "CB allocation failed\n");
   1694		return -ENOMEM;
   1695	}
   1696
   1697	for (i = 0; i < priv->num_rx_bds; i++) {
   1698		cb = priv->rx_cbs + i;
   1699		cb->bd_addr = priv->rx_bds + i * DESC_SIZE;
   1700	}
   1701
   1702	ret = bcm_sysport_alloc_rx_bufs(priv);
   1703	if (ret) {
   1704		netif_err(priv, hw, priv->netdev, "SKB allocation failed\n");
   1705		return ret;
   1706	}
   1707
   1708	/* Initialize HW, ensure RDMA is disabled */
   1709	reg = rdma_readl(priv, RDMA_STATUS);
   1710	if (!(reg & RDMA_DISABLED))
   1711		rdma_enable_set(priv, 0);
   1712
   1713	rdma_writel(priv, 0, RDMA_WRITE_PTR_LO);
   1714	rdma_writel(priv, 0, RDMA_WRITE_PTR_HI);
   1715	rdma_writel(priv, 0, RDMA_PROD_INDEX);
   1716	rdma_writel(priv, 0, RDMA_CONS_INDEX);
   1717	rdma_writel(priv, priv->num_rx_bds << RDMA_RING_SIZE_SHIFT |
   1718			  RX_BUF_LENGTH, RDMA_RING_BUF_SIZE);
   1719	/* Operate the queue in ring mode */
   1720	rdma_writel(priv, 0, RDMA_START_ADDR_HI);
   1721	rdma_writel(priv, 0, RDMA_START_ADDR_LO);
   1722	rdma_writel(priv, 0, RDMA_END_ADDR_HI);
   1723	rdma_writel(priv, priv->num_rx_desc_words - 1, RDMA_END_ADDR_LO);
   1724
   1725	netif_dbg(priv, hw, priv->netdev,
   1726		  "RDMA cfg, num_rx_bds=%d, rx_bds=%p\n",
   1727		  priv->num_rx_bds, priv->rx_bds);
   1728
   1729	return 0;
   1730}
   1731
   1732static void bcm_sysport_fini_rx_ring(struct bcm_sysport_priv *priv)
   1733{
   1734	struct bcm_sysport_cb *cb;
   1735	unsigned int i;
   1736	u32 reg;
   1737
   1738	/* Caller should ensure RDMA is disabled */
   1739	reg = rdma_readl(priv, RDMA_STATUS);
   1740	if (!(reg & RDMA_DISABLED))
   1741		netdev_warn(priv->netdev, "RDMA not stopped!\n");
   1742
   1743	for (i = 0; i < priv->num_rx_bds; i++) {
   1744		cb = &priv->rx_cbs[i];
   1745		if (dma_unmap_addr(cb, dma_addr))
   1746			dma_unmap_single(&priv->pdev->dev,
   1747					 dma_unmap_addr(cb, dma_addr),
   1748					 RX_BUF_LENGTH, DMA_FROM_DEVICE);
   1749		bcm_sysport_free_cb(cb);
   1750	}
   1751
   1752	kfree(priv->rx_cbs);
   1753	priv->rx_cbs = NULL;
   1754
   1755	netif_dbg(priv, hw, priv->netdev, "RDMA fini done\n");
   1756}
   1757
   1758static void bcm_sysport_set_rx_mode(struct net_device *dev)
   1759{
   1760	struct bcm_sysport_priv *priv = netdev_priv(dev);
   1761	u32 reg;
   1762
   1763	if (priv->is_lite)
   1764		return;
   1765
   1766	reg = umac_readl(priv, UMAC_CMD);
   1767	if (dev->flags & IFF_PROMISC)
   1768		reg |= CMD_PROMISC;
   1769	else
   1770		reg &= ~CMD_PROMISC;
   1771	umac_writel(priv, reg, UMAC_CMD);
   1772
   1773	/* No support for ALLMULTI */
   1774	if (dev->flags & IFF_ALLMULTI)
   1775		return;
   1776}
   1777
   1778static inline void umac_enable_set(struct bcm_sysport_priv *priv,
   1779				   u32 mask, unsigned int enable)
   1780{
   1781	u32 reg;
   1782
   1783	if (!priv->is_lite) {
   1784		reg = umac_readl(priv, UMAC_CMD);
   1785		if (enable)
   1786			reg |= mask;
   1787		else
   1788			reg &= ~mask;
   1789		umac_writel(priv, reg, UMAC_CMD);
   1790	} else {
   1791		reg = gib_readl(priv, GIB_CONTROL);
   1792		if (enable)
   1793			reg |= mask;
   1794		else
   1795			reg &= ~mask;
   1796		gib_writel(priv, reg, GIB_CONTROL);
   1797	}
   1798
   1799	/* UniMAC stops on a packet boundary, wait for a full-sized packet
   1800	 * to be processed (1 msec).
   1801	 */
   1802	if (enable == 0)
   1803		usleep_range(1000, 2000);
   1804}
   1805
   1806static inline void umac_reset(struct bcm_sysport_priv *priv)
   1807{
   1808	u32 reg;
   1809
   1810	if (priv->is_lite)
   1811		return;
   1812
   1813	reg = umac_readl(priv, UMAC_CMD);
   1814	reg |= CMD_SW_RESET;
   1815	umac_writel(priv, reg, UMAC_CMD);
   1816	udelay(10);
   1817	reg = umac_readl(priv, UMAC_CMD);
   1818	reg &= ~CMD_SW_RESET;
   1819	umac_writel(priv, reg, UMAC_CMD);
   1820}
   1821
   1822static void umac_set_hw_addr(struct bcm_sysport_priv *priv,
   1823			     const unsigned char *addr)
   1824{
   1825	u32 mac0 = (addr[0] << 24) | (addr[1] << 16) | (addr[2] << 8) |
   1826		    addr[3];
   1827	u32 mac1 = (addr[4] << 8) | addr[5];
   1828
   1829	if (!priv->is_lite) {
   1830		umac_writel(priv, mac0, UMAC_MAC0);
   1831		umac_writel(priv, mac1, UMAC_MAC1);
   1832	} else {
   1833		gib_writel(priv, mac0, GIB_MAC0);
   1834		gib_writel(priv, mac1, GIB_MAC1);
   1835	}
   1836}
   1837
   1838static void topctrl_flush(struct bcm_sysport_priv *priv)
   1839{
   1840	topctrl_writel(priv, RX_FLUSH, RX_FLUSH_CNTL);
   1841	topctrl_writel(priv, TX_FLUSH, TX_FLUSH_CNTL);
   1842	mdelay(1);
   1843	topctrl_writel(priv, 0, RX_FLUSH_CNTL);
   1844	topctrl_writel(priv, 0, TX_FLUSH_CNTL);
   1845}
   1846
   1847static int bcm_sysport_change_mac(struct net_device *dev, void *p)
   1848{
   1849	struct bcm_sysport_priv *priv = netdev_priv(dev);
   1850	struct sockaddr *addr = p;
   1851
   1852	if (!is_valid_ether_addr(addr->sa_data))
   1853		return -EINVAL;
   1854
   1855	eth_hw_addr_set(dev, addr->sa_data);
   1856
   1857	/* interface is disabled, changes to MAC will be reflected on next
   1858	 * open call
   1859	 */
   1860	if (!netif_running(dev))
   1861		return 0;
   1862
   1863	umac_set_hw_addr(priv, dev->dev_addr);
   1864
   1865	return 0;
   1866}
   1867
   1868static void bcm_sysport_get_stats64(struct net_device *dev,
   1869				    struct rtnl_link_stats64 *stats)
   1870{
   1871	struct bcm_sysport_priv *priv = netdev_priv(dev);
   1872	struct bcm_sysport_stats64 *stats64 = &priv->stats64;
   1873	unsigned int start;
   1874
   1875	netdev_stats_to_stats64(stats, &dev->stats);
   1876
   1877	bcm_sysport_update_tx_stats(priv, &stats->tx_bytes,
   1878				    &stats->tx_packets);
   1879
   1880	do {
   1881		start = u64_stats_fetch_begin_irq(&priv->syncp);
   1882		stats->rx_packets = stats64->rx_packets;
   1883		stats->rx_bytes = stats64->rx_bytes;
   1884	} while (u64_stats_fetch_retry_irq(&priv->syncp, start));
   1885}
   1886
   1887static void bcm_sysport_netif_start(struct net_device *dev)
   1888{
   1889	struct bcm_sysport_priv *priv = netdev_priv(dev);
   1890
   1891	/* Enable NAPI */
   1892	bcm_sysport_init_dim(priv, bcm_sysport_dim_work);
   1893	bcm_sysport_init_rx_coalesce(priv);
   1894	napi_enable(&priv->napi);
   1895
   1896	/* Enable RX interrupt and TX ring full interrupt */
   1897	intrl2_0_mask_clear(priv, INTRL2_0_RDMA_MBDONE | INTRL2_0_TX_RING_FULL);
   1898
   1899	phy_start(dev->phydev);
   1900
   1901	/* Enable TX interrupts for the TXQs */
   1902	if (!priv->is_lite)
   1903		intrl2_1_mask_clear(priv, 0xffffffff);
   1904	else
   1905		intrl2_0_mask_clear(priv, INTRL2_0_TDMA_MBDONE_MASK);
   1906}
   1907
   1908static void rbuf_init(struct bcm_sysport_priv *priv)
   1909{
   1910	u32 reg;
   1911
   1912	reg = rbuf_readl(priv, RBUF_CONTROL);
   1913	reg |= RBUF_4B_ALGN | RBUF_RSB_EN;
   1914	/* Set a correct RSB format on SYSTEMPORT Lite */
   1915	if (priv->is_lite)
   1916		reg &= ~RBUF_RSB_SWAP1;
   1917
   1918	/* Set a correct RSB format based on host endian */
   1919	if (!IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
   1920		reg |= RBUF_RSB_SWAP0;
   1921	else
   1922		reg &= ~RBUF_RSB_SWAP0;
   1923	rbuf_writel(priv, reg, RBUF_CONTROL);
   1924}
   1925
   1926static inline void bcm_sysport_mask_all_intrs(struct bcm_sysport_priv *priv)
   1927{
   1928	intrl2_0_mask_set(priv, 0xffffffff);
   1929	intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
   1930	if (!priv->is_lite) {
   1931		intrl2_1_mask_set(priv, 0xffffffff);
   1932		intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
   1933	}
   1934}
   1935
   1936static inline void gib_set_pad_extension(struct bcm_sysport_priv *priv)
   1937{
   1938	u32 reg;
   1939
   1940	reg = gib_readl(priv, GIB_CONTROL);
   1941	/* Include Broadcom tag in pad extension and fix up IPG_LENGTH */
   1942	if (netdev_uses_dsa(priv->netdev)) {
   1943		reg &= ~(GIB_PAD_EXTENSION_MASK << GIB_PAD_EXTENSION_SHIFT);
   1944		reg |= ENET_BRCM_TAG_LEN << GIB_PAD_EXTENSION_SHIFT;
   1945	}
   1946	reg &= ~(GIB_IPG_LEN_MASK << GIB_IPG_LEN_SHIFT);
   1947	reg |= 12 << GIB_IPG_LEN_SHIFT;
   1948	gib_writel(priv, reg, GIB_CONTROL);
   1949}
   1950
   1951static int bcm_sysport_open(struct net_device *dev)
   1952{
   1953	struct bcm_sysport_priv *priv = netdev_priv(dev);
   1954	struct phy_device *phydev;
   1955	unsigned int i;
   1956	int ret;
   1957
   1958	clk_prepare_enable(priv->clk);
   1959
   1960	/* Reset UniMAC */
   1961	umac_reset(priv);
   1962
   1963	/* Flush TX and RX FIFOs at TOPCTRL level */
   1964	topctrl_flush(priv);
   1965
   1966	/* Disable the UniMAC RX/TX */
   1967	umac_enable_set(priv, CMD_RX_EN | CMD_TX_EN, 0);
   1968
   1969	/* Enable RBUF 2bytes alignment and Receive Status Block */
   1970	rbuf_init(priv);
   1971
   1972	/* Set maximum frame length */
   1973	if (!priv->is_lite)
   1974		umac_writel(priv, UMAC_MAX_MTU_SIZE, UMAC_MAX_FRAME_LEN);
   1975	else
   1976		gib_set_pad_extension(priv);
   1977
   1978	/* Apply features again in case we changed them while interface was
   1979	 * down
   1980	 */
   1981	bcm_sysport_set_features(dev, dev->features);
   1982
   1983	/* Set MAC address */
   1984	umac_set_hw_addr(priv, dev->dev_addr);
   1985
   1986	phydev = of_phy_connect(dev, priv->phy_dn, bcm_sysport_adj_link,
   1987				0, priv->phy_interface);
   1988	if (!phydev) {
   1989		netdev_err(dev, "could not attach to PHY\n");
   1990		ret = -ENODEV;
   1991		goto out_clk_disable;
   1992	}
   1993
   1994	/* Reset house keeping link status */
   1995	priv->old_duplex = -1;
   1996	priv->old_link = -1;
   1997	priv->old_pause = -1;
   1998
   1999	/* mask all interrupts and request them */
   2000	bcm_sysport_mask_all_intrs(priv);
   2001
   2002	ret = request_irq(priv->irq0, bcm_sysport_rx_isr, 0, dev->name, dev);
   2003	if (ret) {
   2004		netdev_err(dev, "failed to request RX interrupt\n");
   2005		goto out_phy_disconnect;
   2006	}
   2007
   2008	if (!priv->is_lite) {
   2009		ret = request_irq(priv->irq1, bcm_sysport_tx_isr, 0,
   2010				  dev->name, dev);
   2011		if (ret) {
   2012			netdev_err(dev, "failed to request TX interrupt\n");
   2013			goto out_free_irq0;
   2014		}
   2015	}
   2016
   2017	/* Initialize both hardware and software ring */
   2018	spin_lock_init(&priv->desc_lock);
   2019	for (i = 0; i < dev->num_tx_queues; i++) {
   2020		ret = bcm_sysport_init_tx_ring(priv, i);
   2021		if (ret) {
   2022			netdev_err(dev, "failed to initialize TX ring %d\n",
   2023				   i);
   2024			goto out_free_tx_ring;
   2025		}
   2026	}
   2027
   2028	/* Initialize linked-list */
   2029	tdma_writel(priv, TDMA_LL_RAM_INIT_BUSY, TDMA_STATUS);
   2030
   2031	/* Initialize RX ring */
   2032	ret = bcm_sysport_init_rx_ring(priv);
   2033	if (ret) {
   2034		netdev_err(dev, "failed to initialize RX ring\n");
   2035		goto out_free_rx_ring;
   2036	}
   2037
   2038	/* Turn on RDMA */
   2039	ret = rdma_enable_set(priv, 1);
   2040	if (ret)
   2041		goto out_free_rx_ring;
   2042
   2043	/* Turn on TDMA */
   2044	ret = tdma_enable_set(priv, 1);
   2045	if (ret)
   2046		goto out_clear_rx_int;
   2047
   2048	/* Turn on UniMAC TX/RX */
   2049	umac_enable_set(priv, CMD_RX_EN | CMD_TX_EN, 1);
   2050
   2051	bcm_sysport_netif_start(dev);
   2052
   2053	netif_tx_start_all_queues(dev);
   2054
   2055	return 0;
   2056
   2057out_clear_rx_int:
   2058	intrl2_0_mask_set(priv, INTRL2_0_RDMA_MBDONE | INTRL2_0_TX_RING_FULL);
   2059out_free_rx_ring:
   2060	bcm_sysport_fini_rx_ring(priv);
   2061out_free_tx_ring:
   2062	for (i = 0; i < dev->num_tx_queues; i++)
   2063		bcm_sysport_fini_tx_ring(priv, i);
   2064	if (!priv->is_lite)
   2065		free_irq(priv->irq1, dev);
   2066out_free_irq0:
   2067	free_irq(priv->irq0, dev);
   2068out_phy_disconnect:
   2069	phy_disconnect(phydev);
   2070out_clk_disable:
   2071	clk_disable_unprepare(priv->clk);
   2072	return ret;
   2073}
   2074
   2075static void bcm_sysport_netif_stop(struct net_device *dev)
   2076{
   2077	struct bcm_sysport_priv *priv = netdev_priv(dev);
   2078
   2079	/* stop all software from updating hardware */
   2080	netif_tx_disable(dev);
   2081	napi_disable(&priv->napi);
   2082	cancel_work_sync(&priv->dim.dim.work);
   2083	phy_stop(dev->phydev);
   2084
   2085	/* mask all interrupts */
   2086	bcm_sysport_mask_all_intrs(priv);
   2087}
   2088
   2089static int bcm_sysport_stop(struct net_device *dev)
   2090{
   2091	struct bcm_sysport_priv *priv = netdev_priv(dev);
   2092	unsigned int i;
   2093	int ret;
   2094
   2095	bcm_sysport_netif_stop(dev);
   2096
   2097	/* Disable UniMAC RX */
   2098	umac_enable_set(priv, CMD_RX_EN, 0);
   2099
   2100	ret = tdma_enable_set(priv, 0);
   2101	if (ret) {
   2102		netdev_err(dev, "timeout disabling RDMA\n");
   2103		return ret;
   2104	}
   2105
   2106	/* Wait for a maximum packet size to be drained */
   2107	usleep_range(2000, 3000);
   2108
   2109	ret = rdma_enable_set(priv, 0);
   2110	if (ret) {
   2111		netdev_err(dev, "timeout disabling TDMA\n");
   2112		return ret;
   2113	}
   2114
   2115	/* Disable UniMAC TX */
   2116	umac_enable_set(priv, CMD_TX_EN, 0);
   2117
   2118	/* Free RX/TX rings SW structures */
   2119	for (i = 0; i < dev->num_tx_queues; i++)
   2120		bcm_sysport_fini_tx_ring(priv, i);
   2121	bcm_sysport_fini_rx_ring(priv);
   2122
   2123	free_irq(priv->irq0, dev);
   2124	if (!priv->is_lite)
   2125		free_irq(priv->irq1, dev);
   2126
   2127	/* Disconnect from PHY */
   2128	phy_disconnect(dev->phydev);
   2129
   2130	clk_disable_unprepare(priv->clk);
   2131
   2132	return 0;
   2133}
   2134
   2135static int bcm_sysport_rule_find(struct bcm_sysport_priv *priv,
   2136				 u64 location)
   2137{
   2138	unsigned int index;
   2139	u32 reg;
   2140
   2141	for_each_set_bit(index, priv->filters, RXCHK_BRCM_TAG_MAX) {
   2142		reg = rxchk_readl(priv, RXCHK_BRCM_TAG(index));
   2143		reg >>= RXCHK_BRCM_TAG_CID_SHIFT;
   2144		reg &= RXCHK_BRCM_TAG_CID_MASK;
   2145		if (reg == location)
   2146			return index;
   2147	}
   2148
   2149	return -EINVAL;
   2150}
   2151
   2152static int bcm_sysport_rule_get(struct bcm_sysport_priv *priv,
   2153				struct ethtool_rxnfc *nfc)
   2154{
   2155	int index;
   2156
   2157	/* This is not a rule that we know about */
   2158	index = bcm_sysport_rule_find(priv, nfc->fs.location);
   2159	if (index < 0)
   2160		return -EOPNOTSUPP;
   2161
   2162	nfc->fs.ring_cookie = RX_CLS_FLOW_WAKE;
   2163
   2164	return 0;
   2165}
   2166
   2167static int bcm_sysport_rule_set(struct bcm_sysport_priv *priv,
   2168				struct ethtool_rxnfc *nfc)
   2169{
   2170	unsigned int index;
   2171	u32 reg;
   2172
   2173	/* We cannot match locations greater than what the classification ID
   2174	 * permits (256 entries)
   2175	 */
   2176	if (nfc->fs.location > RXCHK_BRCM_TAG_CID_MASK)
   2177		return -E2BIG;
   2178
   2179	/* We cannot support flows that are not destined for a wake-up */
   2180	if (nfc->fs.ring_cookie != RX_CLS_FLOW_WAKE)
   2181		return -EOPNOTSUPP;
   2182
   2183	index = find_first_zero_bit(priv->filters, RXCHK_BRCM_TAG_MAX);
   2184	if (index >= RXCHK_BRCM_TAG_MAX)
   2185		/* All filters are already in use, we cannot match more rules */
   2186		return -ENOSPC;
   2187
   2188	/* Location is the classification ID, and index is the position
   2189	 * within one of our 8 possible filters to be programmed
   2190	 */
   2191	reg = rxchk_readl(priv, RXCHK_BRCM_TAG(index));
   2192	reg &= ~(RXCHK_BRCM_TAG_CID_MASK << RXCHK_BRCM_TAG_CID_SHIFT);
   2193	reg |= nfc->fs.location << RXCHK_BRCM_TAG_CID_SHIFT;
   2194	rxchk_writel(priv, reg, RXCHK_BRCM_TAG(index));
   2195	rxchk_writel(priv, 0xff00ffff, RXCHK_BRCM_TAG_MASK(index));
   2196
   2197	priv->filters_loc[index] = nfc->fs.location;
   2198	set_bit(index, priv->filters);
   2199
   2200	return 0;
   2201}
   2202
   2203static int bcm_sysport_rule_del(struct bcm_sysport_priv *priv,
   2204				u64 location)
   2205{
   2206	int index;
   2207
   2208	/* This is not a rule that we know about */
   2209	index = bcm_sysport_rule_find(priv, location);
   2210	if (index < 0)
   2211		return -EOPNOTSUPP;
   2212
   2213	/* No need to disable this filter if it was enabled, this will
   2214	 * be taken care of during suspend time by bcm_sysport_suspend_to_wol
   2215	 */
   2216	clear_bit(index, priv->filters);
   2217	priv->filters_loc[index] = 0;
   2218
   2219	return 0;
   2220}
   2221
   2222static int bcm_sysport_get_rxnfc(struct net_device *dev,
   2223				 struct ethtool_rxnfc *nfc, u32 *rule_locs)
   2224{
   2225	struct bcm_sysport_priv *priv = netdev_priv(dev);
   2226	int ret = -EOPNOTSUPP;
   2227
   2228	switch (nfc->cmd) {
   2229	case ETHTOOL_GRXCLSRULE:
   2230		ret = bcm_sysport_rule_get(priv, nfc);
   2231		break;
   2232	default:
   2233		break;
   2234	}
   2235
   2236	return ret;
   2237}
   2238
   2239static int bcm_sysport_set_rxnfc(struct net_device *dev,
   2240				 struct ethtool_rxnfc *nfc)
   2241{
   2242	struct bcm_sysport_priv *priv = netdev_priv(dev);
   2243	int ret = -EOPNOTSUPP;
   2244
   2245	switch (nfc->cmd) {
   2246	case ETHTOOL_SRXCLSRLINS:
   2247		ret = bcm_sysport_rule_set(priv, nfc);
   2248		break;
   2249	case ETHTOOL_SRXCLSRLDEL:
   2250		ret = bcm_sysport_rule_del(priv, nfc->fs.location);
   2251		break;
   2252	default:
   2253		break;
   2254	}
   2255
   2256	return ret;
   2257}
   2258
   2259static const struct ethtool_ops bcm_sysport_ethtool_ops = {
   2260	.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
   2261				     ETHTOOL_COALESCE_MAX_FRAMES |
   2262				     ETHTOOL_COALESCE_USE_ADAPTIVE_RX,
   2263	.get_drvinfo		= bcm_sysport_get_drvinfo,
   2264	.get_msglevel		= bcm_sysport_get_msglvl,
   2265	.set_msglevel		= bcm_sysport_set_msglvl,
   2266	.get_link		= ethtool_op_get_link,
   2267	.get_strings		= bcm_sysport_get_strings,
   2268	.get_ethtool_stats	= bcm_sysport_get_stats,
   2269	.get_sset_count		= bcm_sysport_get_sset_count,
   2270	.get_wol		= bcm_sysport_get_wol,
   2271	.set_wol		= bcm_sysport_set_wol,
   2272	.get_coalesce		= bcm_sysport_get_coalesce,
   2273	.set_coalesce		= bcm_sysport_set_coalesce,
   2274	.get_link_ksettings     = phy_ethtool_get_link_ksettings,
   2275	.set_link_ksettings     = phy_ethtool_set_link_ksettings,
   2276	.get_rxnfc		= bcm_sysport_get_rxnfc,
   2277	.set_rxnfc		= bcm_sysport_set_rxnfc,
   2278};
   2279
   2280static u16 bcm_sysport_select_queue(struct net_device *dev, struct sk_buff *skb,
   2281				    struct net_device *sb_dev)
   2282{
   2283	struct bcm_sysport_priv *priv = netdev_priv(dev);
   2284	u16 queue = skb_get_queue_mapping(skb);
   2285	struct bcm_sysport_tx_ring *tx_ring;
   2286	unsigned int q, port;
   2287
   2288	if (!netdev_uses_dsa(dev))
   2289		return netdev_pick_tx(dev, skb, NULL);
   2290
   2291	/* DSA tagging layer will have configured the correct queue */
   2292	q = BRCM_TAG_GET_QUEUE(queue);
   2293	port = BRCM_TAG_GET_PORT(queue);
   2294	tx_ring = priv->ring_map[q + port * priv->per_port_num_tx_queues];
   2295
   2296	if (unlikely(!tx_ring))
   2297		return netdev_pick_tx(dev, skb, NULL);
   2298
   2299	return tx_ring->index;
   2300}
   2301
   2302static const struct net_device_ops bcm_sysport_netdev_ops = {
   2303	.ndo_start_xmit		= bcm_sysport_xmit,
   2304	.ndo_tx_timeout		= bcm_sysport_tx_timeout,
   2305	.ndo_open		= bcm_sysport_open,
   2306	.ndo_stop		= bcm_sysport_stop,
   2307	.ndo_set_features	= bcm_sysport_set_features,
   2308	.ndo_set_rx_mode	= bcm_sysport_set_rx_mode,
   2309	.ndo_set_mac_address	= bcm_sysport_change_mac,
   2310#ifdef CONFIG_NET_POLL_CONTROLLER
   2311	.ndo_poll_controller	= bcm_sysport_poll_controller,
   2312#endif
   2313	.ndo_get_stats64	= bcm_sysport_get_stats64,
   2314	.ndo_select_queue	= bcm_sysport_select_queue,
   2315};
   2316
   2317static int bcm_sysport_map_queues(struct net_device *dev,
   2318				  struct net_device *slave_dev)
   2319{
   2320	struct dsa_port *dp = dsa_port_from_netdev(slave_dev);
   2321	struct bcm_sysport_priv *priv = netdev_priv(dev);
   2322	struct bcm_sysport_tx_ring *ring;
   2323	unsigned int num_tx_queues;
   2324	unsigned int q, qp, port;
   2325
   2326	/* We can't be setting up queue inspection for non directly attached
   2327	 * switches
   2328	 */
   2329	if (dp->ds->index)
   2330		return 0;
   2331
   2332	port = dp->index;
   2333
   2334	/* On SYSTEMPORT Lite we have twice as less queues, so we cannot do a
   2335	 * 1:1 mapping, we can only do a 2:1 mapping. By reducing the number of
   2336	 * per-port (slave_dev) network devices queue, we achieve just that.
   2337	 * This need to happen now before any slave network device is used such
   2338	 * it accurately reflects the number of real TX queues.
   2339	 */
   2340	if (priv->is_lite)
   2341		netif_set_real_num_tx_queues(slave_dev,
   2342					     slave_dev->num_tx_queues / 2);
   2343
   2344	num_tx_queues = slave_dev->real_num_tx_queues;
   2345
   2346	if (priv->per_port_num_tx_queues &&
   2347	    priv->per_port_num_tx_queues != num_tx_queues)
   2348		netdev_warn(slave_dev, "asymmetric number of per-port queues\n");
   2349
   2350	priv->per_port_num_tx_queues = num_tx_queues;
   2351
   2352	for (q = 0, qp = 0; q < dev->num_tx_queues && qp < num_tx_queues;
   2353	     q++) {
   2354		ring = &priv->tx_rings[q];
   2355
   2356		if (ring->inspect)
   2357			continue;
   2358
   2359		/* Just remember the mapping actual programming done
   2360		 * during bcm_sysport_init_tx_ring
   2361		 */
   2362		ring->switch_queue = qp;
   2363		ring->switch_port = port;
   2364		ring->inspect = true;
   2365		priv->ring_map[qp + port * num_tx_queues] = ring;
   2366		qp++;
   2367	}
   2368
   2369	return 0;
   2370}
   2371
   2372static int bcm_sysport_unmap_queues(struct net_device *dev,
   2373				    struct net_device *slave_dev)
   2374{
   2375	struct dsa_port *dp = dsa_port_from_netdev(slave_dev);
   2376	struct bcm_sysport_priv *priv = netdev_priv(dev);
   2377	struct bcm_sysport_tx_ring *ring;
   2378	unsigned int num_tx_queues;
   2379	unsigned int q, qp, port;
   2380
   2381	port = dp->index;
   2382
   2383	num_tx_queues = slave_dev->real_num_tx_queues;
   2384
   2385	for (q = 0; q < dev->num_tx_queues; q++) {
   2386		ring = &priv->tx_rings[q];
   2387
   2388		if (ring->switch_port != port)
   2389			continue;
   2390
   2391		if (!ring->inspect)
   2392			continue;
   2393
   2394		ring->inspect = false;
   2395		qp = ring->switch_queue;
   2396		priv->ring_map[qp + port * num_tx_queues] = NULL;
   2397	}
   2398
   2399	return 0;
   2400}
   2401
   2402static int bcm_sysport_netdevice_event(struct notifier_block *nb,
   2403				       unsigned long event, void *ptr)
   2404{
   2405	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
   2406	struct netdev_notifier_changeupper_info *info = ptr;
   2407	struct bcm_sysport_priv *priv;
   2408	int ret = 0;
   2409
   2410	priv = container_of(nb, struct bcm_sysport_priv, netdev_notifier);
   2411	if (priv->netdev != dev)
   2412		return NOTIFY_DONE;
   2413
   2414	switch (event) {
   2415	case NETDEV_CHANGEUPPER:
   2416		if (dev->netdev_ops != &bcm_sysport_netdev_ops)
   2417			return NOTIFY_DONE;
   2418
   2419		if (!dsa_slave_dev_check(info->upper_dev))
   2420			return NOTIFY_DONE;
   2421
   2422		if (info->linking)
   2423			ret = bcm_sysport_map_queues(dev, info->upper_dev);
   2424		else
   2425			ret = bcm_sysport_unmap_queues(dev, info->upper_dev);
   2426		break;
   2427	}
   2428
   2429	return notifier_from_errno(ret);
   2430}
   2431
   2432#define REV_FMT	"v%2x.%02x"
   2433
   2434static const struct bcm_sysport_hw_params bcm_sysport_params[] = {
   2435	[SYSTEMPORT] = {
   2436		.is_lite = false,
   2437		.num_rx_desc_words = SP_NUM_HW_RX_DESC_WORDS,
   2438	},
   2439	[SYSTEMPORT_LITE] = {
   2440		.is_lite = true,
   2441		.num_rx_desc_words = SP_LT_NUM_HW_RX_DESC_WORDS,
   2442	},
   2443};
   2444
   2445static const struct of_device_id bcm_sysport_of_match[] = {
   2446	{ .compatible = "brcm,systemportlite-v1.00",
   2447	  .data = &bcm_sysport_params[SYSTEMPORT_LITE] },
   2448	{ .compatible = "brcm,systemport-v1.00",
   2449	  .data = &bcm_sysport_params[SYSTEMPORT] },
   2450	{ .compatible = "brcm,systemport",
   2451	  .data = &bcm_sysport_params[SYSTEMPORT] },
   2452	{ /* sentinel */ }
   2453};
   2454MODULE_DEVICE_TABLE(of, bcm_sysport_of_match);
   2455
   2456static int bcm_sysport_probe(struct platform_device *pdev)
   2457{
   2458	const struct bcm_sysport_hw_params *params;
   2459	const struct of_device_id *of_id = NULL;
   2460	struct bcm_sysport_priv *priv;
   2461	struct device_node *dn;
   2462	struct net_device *dev;
   2463	u32 txq, rxq;
   2464	int ret;
   2465
   2466	dn = pdev->dev.of_node;
   2467	of_id = of_match_node(bcm_sysport_of_match, dn);
   2468	if (!of_id || !of_id->data)
   2469		return -EINVAL;
   2470
   2471	ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40));
   2472	if (ret)
   2473		ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
   2474	if (ret) {
   2475		dev_err(&pdev->dev, "unable to set DMA mask: %d\n", ret);
   2476		return ret;
   2477	}
   2478
   2479	/* Fairly quickly we need to know the type of adapter we have */
   2480	params = of_id->data;
   2481
   2482	/* Read the Transmit/Receive Queue properties */
   2483	if (of_property_read_u32(dn, "systemport,num-txq", &txq))
   2484		txq = TDMA_NUM_RINGS;
   2485	if (of_property_read_u32(dn, "systemport,num-rxq", &rxq))
   2486		rxq = 1;
   2487
   2488	/* Sanity check the number of transmit queues */
   2489	if (!txq || txq > TDMA_NUM_RINGS)
   2490		return -EINVAL;
   2491
   2492	dev = alloc_etherdev_mqs(sizeof(*priv), txq, rxq);
   2493	if (!dev)
   2494		return -ENOMEM;
   2495
   2496	/* Initialize private members */
   2497	priv = netdev_priv(dev);
   2498
   2499	priv->clk = devm_clk_get_optional(&pdev->dev, "sw_sysport");
   2500	if (IS_ERR(priv->clk)) {
   2501		ret = PTR_ERR(priv->clk);
   2502		goto err_free_netdev;
   2503	}
   2504
   2505	/* Allocate number of TX rings */
   2506	priv->tx_rings = devm_kcalloc(&pdev->dev, txq,
   2507				      sizeof(struct bcm_sysport_tx_ring),
   2508				      GFP_KERNEL);
   2509	if (!priv->tx_rings) {
   2510		ret = -ENOMEM;
   2511		goto err_free_netdev;
   2512	}
   2513
   2514	priv->is_lite = params->is_lite;
   2515	priv->num_rx_desc_words = params->num_rx_desc_words;
   2516
   2517	priv->irq0 = platform_get_irq(pdev, 0);
   2518	if (!priv->is_lite) {
   2519		priv->irq1 = platform_get_irq(pdev, 1);
   2520		priv->wol_irq = platform_get_irq(pdev, 2);
   2521	} else {
   2522		priv->wol_irq = platform_get_irq(pdev, 1);
   2523	}
   2524	if (priv->irq0 <= 0 || (priv->irq1 <= 0 && !priv->is_lite)) {
   2525		ret = -EINVAL;
   2526		goto err_free_netdev;
   2527	}
   2528
   2529	priv->base = devm_platform_ioremap_resource(pdev, 0);
   2530	if (IS_ERR(priv->base)) {
   2531		ret = PTR_ERR(priv->base);
   2532		goto err_free_netdev;
   2533	}
   2534
   2535	priv->netdev = dev;
   2536	priv->pdev = pdev;
   2537
   2538	ret = of_get_phy_mode(dn, &priv->phy_interface);
   2539	/* Default to GMII interface mode */
   2540	if (ret)
   2541		priv->phy_interface = PHY_INTERFACE_MODE_GMII;
   2542
   2543	/* In the case of a fixed PHY, the DT node associated
   2544	 * to the PHY is the Ethernet MAC DT node.
   2545	 */
   2546	if (of_phy_is_fixed_link(dn)) {
   2547		ret = of_phy_register_fixed_link(dn);
   2548		if (ret) {
   2549			dev_err(&pdev->dev, "failed to register fixed PHY\n");
   2550			goto err_free_netdev;
   2551		}
   2552
   2553		priv->phy_dn = dn;
   2554	}
   2555
   2556	/* Initialize netdevice members */
   2557	ret = of_get_ethdev_address(dn, dev);
   2558	if (ret) {
   2559		dev_warn(&pdev->dev, "using random Ethernet MAC\n");
   2560		eth_hw_addr_random(dev);
   2561	}
   2562
   2563	SET_NETDEV_DEV(dev, &pdev->dev);
   2564	dev_set_drvdata(&pdev->dev, dev);
   2565	dev->ethtool_ops = &bcm_sysport_ethtool_ops;
   2566	dev->netdev_ops = &bcm_sysport_netdev_ops;
   2567	netif_napi_add(dev, &priv->napi, bcm_sysport_poll, 64);
   2568
   2569	dev->features |= NETIF_F_RXCSUM | NETIF_F_HIGHDMA |
   2570			 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
   2571			 NETIF_F_HW_VLAN_CTAG_TX;
   2572	dev->hw_features |= dev->features;
   2573	dev->vlan_features |= dev->features;
   2574	dev->max_mtu = UMAC_MAX_MTU_SIZE;
   2575
   2576	/* Request the WOL interrupt and advertise suspend if available */
   2577	priv->wol_irq_disabled = 1;
   2578	ret = devm_request_irq(&pdev->dev, priv->wol_irq,
   2579			       bcm_sysport_wol_isr, 0, dev->name, priv);
   2580	if (!ret)
   2581		device_set_wakeup_capable(&pdev->dev, 1);
   2582
   2583	priv->wol_clk = devm_clk_get_optional(&pdev->dev, "sw_sysportwol");
   2584	if (IS_ERR(priv->wol_clk)) {
   2585		ret = PTR_ERR(priv->wol_clk);
   2586		goto err_deregister_fixed_link;
   2587	}
   2588
   2589	/* Set the needed headroom once and for all */
   2590	BUILD_BUG_ON(sizeof(struct bcm_tsb) != 8);
   2591	dev->needed_headroom += sizeof(struct bcm_tsb);
   2592
   2593	/* libphy will adjust the link state accordingly */
   2594	netif_carrier_off(dev);
   2595
   2596	priv->rx_max_coalesced_frames = 1;
   2597	u64_stats_init(&priv->syncp);
   2598
   2599	priv->netdev_notifier.notifier_call = bcm_sysport_netdevice_event;
   2600
   2601	ret = register_netdevice_notifier(&priv->netdev_notifier);
   2602	if (ret) {
   2603		dev_err(&pdev->dev, "failed to register DSA notifier\n");
   2604		goto err_deregister_fixed_link;
   2605	}
   2606
   2607	ret = register_netdev(dev);
   2608	if (ret) {
   2609		dev_err(&pdev->dev, "failed to register net_device\n");
   2610		goto err_deregister_notifier;
   2611	}
   2612
   2613	clk_prepare_enable(priv->clk);
   2614
   2615	priv->rev = topctrl_readl(priv, REV_CNTL) & REV_MASK;
   2616	dev_info(&pdev->dev,
   2617		 "Broadcom SYSTEMPORT%s " REV_FMT
   2618		 " (irqs: %d, %d, TXQs: %d, RXQs: %d)\n",
   2619		 priv->is_lite ? " Lite" : "",
   2620		 (priv->rev >> 8) & 0xff, priv->rev & 0xff,
   2621		 priv->irq0, priv->irq1, txq, rxq);
   2622
   2623	clk_disable_unprepare(priv->clk);
   2624
   2625	return 0;
   2626
   2627err_deregister_notifier:
   2628	unregister_netdevice_notifier(&priv->netdev_notifier);
   2629err_deregister_fixed_link:
   2630	if (of_phy_is_fixed_link(dn))
   2631		of_phy_deregister_fixed_link(dn);
   2632err_free_netdev:
   2633	free_netdev(dev);
   2634	return ret;
   2635}
   2636
   2637static int bcm_sysport_remove(struct platform_device *pdev)
   2638{
   2639	struct net_device *dev = dev_get_drvdata(&pdev->dev);
   2640	struct bcm_sysport_priv *priv = netdev_priv(dev);
   2641	struct device_node *dn = pdev->dev.of_node;
   2642
   2643	/* Not much to do, ndo_close has been called
   2644	 * and we use managed allocations
   2645	 */
   2646	unregister_netdevice_notifier(&priv->netdev_notifier);
   2647	unregister_netdev(dev);
   2648	if (of_phy_is_fixed_link(dn))
   2649		of_phy_deregister_fixed_link(dn);
   2650	free_netdev(dev);
   2651	dev_set_drvdata(&pdev->dev, NULL);
   2652
   2653	return 0;
   2654}
   2655
   2656static int bcm_sysport_suspend_to_wol(struct bcm_sysport_priv *priv)
   2657{
   2658	struct net_device *ndev = priv->netdev;
   2659	unsigned int timeout = 1000;
   2660	unsigned int index, i = 0;
   2661	u32 reg;
   2662
   2663	reg = umac_readl(priv, UMAC_MPD_CTRL);
   2664	if (priv->wolopts & (WAKE_MAGIC | WAKE_MAGICSECURE))
   2665		reg |= MPD_EN;
   2666	reg &= ~PSW_EN;
   2667	if (priv->wolopts & WAKE_MAGICSECURE) {
   2668		/* Program the SecureOn password */
   2669		umac_writel(priv, get_unaligned_be16(&priv->sopass[0]),
   2670			    UMAC_PSW_MS);
   2671		umac_writel(priv, get_unaligned_be32(&priv->sopass[2]),
   2672			    UMAC_PSW_LS);
   2673		reg |= PSW_EN;
   2674	}
   2675	umac_writel(priv, reg, UMAC_MPD_CTRL);
   2676
   2677	if (priv->wolopts & WAKE_FILTER) {
   2678		/* Turn on ACPI matching to steal packets from RBUF */
   2679		reg = rbuf_readl(priv, RBUF_CONTROL);
   2680		if (priv->is_lite)
   2681			reg |= RBUF_ACPI_EN_LITE;
   2682		else
   2683			reg |= RBUF_ACPI_EN;
   2684		rbuf_writel(priv, reg, RBUF_CONTROL);
   2685
   2686		/* Enable RXCHK, active filters and Broadcom tag matching */
   2687		reg = rxchk_readl(priv, RXCHK_CONTROL);
   2688		reg &= ~(RXCHK_BRCM_TAG_MATCH_MASK <<
   2689			 RXCHK_BRCM_TAG_MATCH_SHIFT);
   2690		for_each_set_bit(index, priv->filters, RXCHK_BRCM_TAG_MAX) {
   2691			reg |= BIT(RXCHK_BRCM_TAG_MATCH_SHIFT + i);
   2692			i++;
   2693		}
   2694		reg |= RXCHK_EN | RXCHK_BRCM_TAG_EN;
   2695		rxchk_writel(priv, reg, RXCHK_CONTROL);
   2696	}
   2697
   2698	/* Make sure RBUF entered WoL mode as result */
   2699	do {
   2700		reg = rbuf_readl(priv, RBUF_STATUS);
   2701		if (reg & RBUF_WOL_MODE)
   2702			break;
   2703
   2704		udelay(10);
   2705	} while (timeout-- > 0);
   2706
   2707	/* Do not leave the UniMAC RBUF matching only MPD packets */
   2708	if (!timeout) {
   2709		mpd_enable_set(priv, false);
   2710		netif_err(priv, wol, ndev, "failed to enter WOL mode\n");
   2711		return -ETIMEDOUT;
   2712	}
   2713
   2714	/* UniMAC receive needs to be turned on */
   2715	umac_enable_set(priv, CMD_RX_EN, 1);
   2716
   2717	netif_dbg(priv, wol, ndev, "entered WOL mode\n");
   2718
   2719	return 0;
   2720}
   2721
   2722static int __maybe_unused bcm_sysport_suspend(struct device *d)
   2723{
   2724	struct net_device *dev = dev_get_drvdata(d);
   2725	struct bcm_sysport_priv *priv = netdev_priv(dev);
   2726	unsigned int i;
   2727	int ret = 0;
   2728	u32 reg;
   2729
   2730	if (!netif_running(dev))
   2731		return 0;
   2732
   2733	netif_device_detach(dev);
   2734
   2735	bcm_sysport_netif_stop(dev);
   2736
   2737	phy_suspend(dev->phydev);
   2738
   2739	/* Disable UniMAC RX */
   2740	umac_enable_set(priv, CMD_RX_EN, 0);
   2741
   2742	ret = rdma_enable_set(priv, 0);
   2743	if (ret) {
   2744		netdev_err(dev, "RDMA timeout!\n");
   2745		return ret;
   2746	}
   2747
   2748	/* Disable RXCHK if enabled */
   2749	if (priv->rx_chk_en) {
   2750		reg = rxchk_readl(priv, RXCHK_CONTROL);
   2751		reg &= ~RXCHK_EN;
   2752		rxchk_writel(priv, reg, RXCHK_CONTROL);
   2753	}
   2754
   2755	/* Flush RX pipe */
   2756	if (!priv->wolopts)
   2757		topctrl_writel(priv, RX_FLUSH, RX_FLUSH_CNTL);
   2758
   2759	ret = tdma_enable_set(priv, 0);
   2760	if (ret) {
   2761		netdev_err(dev, "TDMA timeout!\n");
   2762		return ret;
   2763	}
   2764
   2765	/* Wait for a packet boundary */
   2766	usleep_range(2000, 3000);
   2767
   2768	umac_enable_set(priv, CMD_TX_EN, 0);
   2769
   2770	topctrl_writel(priv, TX_FLUSH, TX_FLUSH_CNTL);
   2771
   2772	/* Free RX/TX rings SW structures */
   2773	for (i = 0; i < dev->num_tx_queues; i++)
   2774		bcm_sysport_fini_tx_ring(priv, i);
   2775	bcm_sysport_fini_rx_ring(priv);
   2776
   2777	/* Get prepared for Wake-on-LAN */
   2778	if (device_may_wakeup(d) && priv->wolopts) {
   2779		clk_prepare_enable(priv->wol_clk);
   2780		ret = bcm_sysport_suspend_to_wol(priv);
   2781	}
   2782
   2783	clk_disable_unprepare(priv->clk);
   2784
   2785	return ret;
   2786}
   2787
   2788static int __maybe_unused bcm_sysport_resume(struct device *d)
   2789{
   2790	struct net_device *dev = dev_get_drvdata(d);
   2791	struct bcm_sysport_priv *priv = netdev_priv(dev);
   2792	unsigned int i;
   2793	int ret;
   2794
   2795	if (!netif_running(dev))
   2796		return 0;
   2797
   2798	clk_prepare_enable(priv->clk);
   2799	if (priv->wolopts)
   2800		clk_disable_unprepare(priv->wol_clk);
   2801
   2802	umac_reset(priv);
   2803
   2804	/* Disable the UniMAC RX/TX */
   2805	umac_enable_set(priv, CMD_RX_EN | CMD_TX_EN, 0);
   2806
   2807	/* We may have been suspended and never received a WOL event that
   2808	 * would turn off MPD detection, take care of that now
   2809	 */
   2810	bcm_sysport_resume_from_wol(priv);
   2811
   2812	/* Initialize both hardware and software ring */
   2813	for (i = 0; i < dev->num_tx_queues; i++) {
   2814		ret = bcm_sysport_init_tx_ring(priv, i);
   2815		if (ret) {
   2816			netdev_err(dev, "failed to initialize TX ring %d\n",
   2817				   i);
   2818			goto out_free_tx_rings;
   2819		}
   2820	}
   2821
   2822	/* Initialize linked-list */
   2823	tdma_writel(priv, TDMA_LL_RAM_INIT_BUSY, TDMA_STATUS);
   2824
   2825	/* Initialize RX ring */
   2826	ret = bcm_sysport_init_rx_ring(priv);
   2827	if (ret) {
   2828		netdev_err(dev, "failed to initialize RX ring\n");
   2829		goto out_free_rx_ring;
   2830	}
   2831
   2832	/* RX pipe enable */
   2833	topctrl_writel(priv, 0, RX_FLUSH_CNTL);
   2834
   2835	ret = rdma_enable_set(priv, 1);
   2836	if (ret) {
   2837		netdev_err(dev, "failed to enable RDMA\n");
   2838		goto out_free_rx_ring;
   2839	}
   2840
   2841	/* Restore enabled features */
   2842	bcm_sysport_set_features(dev, dev->features);
   2843
   2844	rbuf_init(priv);
   2845
   2846	/* Set maximum frame length */
   2847	if (!priv->is_lite)
   2848		umac_writel(priv, UMAC_MAX_MTU_SIZE, UMAC_MAX_FRAME_LEN);
   2849	else
   2850		gib_set_pad_extension(priv);
   2851
   2852	/* Set MAC address */
   2853	umac_set_hw_addr(priv, dev->dev_addr);
   2854
   2855	umac_enable_set(priv, CMD_RX_EN, 1);
   2856
   2857	/* TX pipe enable */
   2858	topctrl_writel(priv, 0, TX_FLUSH_CNTL);
   2859
   2860	umac_enable_set(priv, CMD_TX_EN, 1);
   2861
   2862	ret = tdma_enable_set(priv, 1);
   2863	if (ret) {
   2864		netdev_err(dev, "TDMA timeout!\n");
   2865		goto out_free_rx_ring;
   2866	}
   2867
   2868	phy_resume(dev->phydev);
   2869
   2870	bcm_sysport_netif_start(dev);
   2871
   2872	netif_device_attach(dev);
   2873
   2874	return 0;
   2875
   2876out_free_rx_ring:
   2877	bcm_sysport_fini_rx_ring(priv);
   2878out_free_tx_rings:
   2879	for (i = 0; i < dev->num_tx_queues; i++)
   2880		bcm_sysport_fini_tx_ring(priv, i);
   2881	clk_disable_unprepare(priv->clk);
   2882	return ret;
   2883}
   2884
   2885static SIMPLE_DEV_PM_OPS(bcm_sysport_pm_ops,
   2886		bcm_sysport_suspend, bcm_sysport_resume);
   2887
   2888static struct platform_driver bcm_sysport_driver = {
   2889	.probe	= bcm_sysport_probe,
   2890	.remove	= bcm_sysport_remove,
   2891	.driver =  {
   2892		.name = "brcm-systemport",
   2893		.of_match_table = bcm_sysport_of_match,
   2894		.pm = &bcm_sysport_pm_ops,
   2895	},
   2896};
   2897module_platform_driver(bcm_sysport_driver);
   2898
   2899MODULE_AUTHOR("Broadcom Corporation");
   2900MODULE_DESCRIPTION("Broadcom System Port Ethernet MAC driver");
   2901MODULE_ALIAS("platform:brcm-systemport");
   2902MODULE_LICENSE("GPL");