cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

bcmgenet.c (117371B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/*
      3 * Broadcom GENET (Gigabit Ethernet) controller driver
      4 *
      5 * Copyright (c) 2014-2020 Broadcom
      6 */
      7
      8#define pr_fmt(fmt)				"bcmgenet: " fmt
      9
     10#include <linux/acpi.h>
     11#include <linux/kernel.h>
     12#include <linux/module.h>
     13#include <linux/sched.h>
     14#include <linux/types.h>
     15#include <linux/fcntl.h>
     16#include <linux/interrupt.h>
     17#include <linux/string.h>
     18#include <linux/if_ether.h>
     19#include <linux/init.h>
     20#include <linux/errno.h>
     21#include <linux/delay.h>
     22#include <linux/platform_device.h>
     23#include <linux/dma-mapping.h>
     24#include <linux/pm.h>
     25#include <linux/clk.h>
     26#include <net/arp.h>
     27
     28#include <linux/mii.h>
     29#include <linux/ethtool.h>
     30#include <linux/netdevice.h>
     31#include <linux/inetdevice.h>
     32#include <linux/etherdevice.h>
     33#include <linux/skbuff.h>
     34#include <linux/in.h>
     35#include <linux/ip.h>
     36#include <linux/ipv6.h>
     37#include <linux/phy.h>
     38#include <linux/platform_data/bcmgenet.h>
     39
     40#include <asm/unaligned.h>
     41
     42#include "bcmgenet.h"
     43
     44/* Maximum number of hardware queues, downsized if needed */
     45#define GENET_MAX_MQ_CNT	4
     46
     47/* Default highest priority queue for multi queue support */
     48#define GENET_Q0_PRIORITY	0
     49
     50#define GENET_Q16_RX_BD_CNT	\
     51	(TOTAL_DESC - priv->hw_params->rx_queues * priv->hw_params->rx_bds_per_q)
     52#define GENET_Q16_TX_BD_CNT	\
     53	(TOTAL_DESC - priv->hw_params->tx_queues * priv->hw_params->tx_bds_per_q)
     54
     55#define RX_BUF_LENGTH		2048
     56#define SKB_ALIGNMENT		32
     57
     58/* Tx/Rx DMA register offset, skip 256 descriptors */
     59#define WORDS_PER_BD(p)		(p->hw_params->words_per_bd)
     60#define DMA_DESC_SIZE		(WORDS_PER_BD(priv) * sizeof(u32))
     61
     62#define GENET_TDMA_REG_OFF	(priv->hw_params->tdma_offset + \
     63				TOTAL_DESC * DMA_DESC_SIZE)
     64
     65#define GENET_RDMA_REG_OFF	(priv->hw_params->rdma_offset + \
     66				TOTAL_DESC * DMA_DESC_SIZE)
     67
     68/* Forward declarations */
     69static void bcmgenet_set_rx_mode(struct net_device *dev);
     70
     71static inline void bcmgenet_writel(u32 value, void __iomem *offset)
     72{
     73	/* MIPS chips strapped for BE will automagically configure the
     74	 * peripheral registers for CPU-native byte order.
     75	 */
     76	if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
     77		__raw_writel(value, offset);
     78	else
     79		writel_relaxed(value, offset);
     80}
     81
     82static inline u32 bcmgenet_readl(void __iomem *offset)
     83{
     84	if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
     85		return __raw_readl(offset);
     86	else
     87		return readl_relaxed(offset);
     88}
     89
     90static inline void dmadesc_set_length_status(struct bcmgenet_priv *priv,
     91					     void __iomem *d, u32 value)
     92{
     93	bcmgenet_writel(value, d + DMA_DESC_LENGTH_STATUS);
     94}
     95
     96static inline void dmadesc_set_addr(struct bcmgenet_priv *priv,
     97				    void __iomem *d,
     98				    dma_addr_t addr)
     99{
    100	bcmgenet_writel(lower_32_bits(addr), d + DMA_DESC_ADDRESS_LO);
    101
    102	/* Register writes to GISB bus can take couple hundred nanoseconds
    103	 * and are done for each packet, save these expensive writes unless
    104	 * the platform is explicitly configured for 64-bits/LPAE.
    105	 */
    106#ifdef CONFIG_PHYS_ADDR_T_64BIT
    107	if (priv->hw_params->flags & GENET_HAS_40BITS)
    108		bcmgenet_writel(upper_32_bits(addr), d + DMA_DESC_ADDRESS_HI);
    109#endif
    110}
    111
    112/* Combined address + length/status setter */
    113static inline void dmadesc_set(struct bcmgenet_priv *priv,
    114			       void __iomem *d, dma_addr_t addr, u32 val)
    115{
    116	dmadesc_set_addr(priv, d, addr);
    117	dmadesc_set_length_status(priv, d, val);
    118}
    119
    120static inline dma_addr_t dmadesc_get_addr(struct bcmgenet_priv *priv,
    121					  void __iomem *d)
    122{
    123	dma_addr_t addr;
    124
    125	addr = bcmgenet_readl(d + DMA_DESC_ADDRESS_LO);
    126
    127	/* Register writes to GISB bus can take couple hundred nanoseconds
    128	 * and are done for each packet, save these expensive writes unless
    129	 * the platform is explicitly configured for 64-bits/LPAE.
    130	 */
    131#ifdef CONFIG_PHYS_ADDR_T_64BIT
    132	if (priv->hw_params->flags & GENET_HAS_40BITS)
    133		addr |= (u64)bcmgenet_readl(d + DMA_DESC_ADDRESS_HI) << 32;
    134#endif
    135	return addr;
    136}
    137
    138#define GENET_VER_FMT	"%1d.%1d EPHY: 0x%04x"
    139
    140#define GENET_MSG_DEFAULT	(NETIF_MSG_DRV | NETIF_MSG_PROBE | \
    141				NETIF_MSG_LINK)
    142
    143static inline u32 bcmgenet_rbuf_ctrl_get(struct bcmgenet_priv *priv)
    144{
    145	if (GENET_IS_V1(priv))
    146		return bcmgenet_rbuf_readl(priv, RBUF_FLUSH_CTRL_V1);
    147	else
    148		return bcmgenet_sys_readl(priv, SYS_RBUF_FLUSH_CTRL);
    149}
    150
    151static inline void bcmgenet_rbuf_ctrl_set(struct bcmgenet_priv *priv, u32 val)
    152{
    153	if (GENET_IS_V1(priv))
    154		bcmgenet_rbuf_writel(priv, val, RBUF_FLUSH_CTRL_V1);
    155	else
    156		bcmgenet_sys_writel(priv, val, SYS_RBUF_FLUSH_CTRL);
    157}
    158
    159/* These macros are defined to deal with register map change
    160 * between GENET1.1 and GENET2. Only those currently being used
    161 * by driver are defined.
    162 */
    163static inline u32 bcmgenet_tbuf_ctrl_get(struct bcmgenet_priv *priv)
    164{
    165	if (GENET_IS_V1(priv))
    166		return bcmgenet_rbuf_readl(priv, TBUF_CTRL_V1);
    167	else
    168		return bcmgenet_readl(priv->base +
    169				      priv->hw_params->tbuf_offset + TBUF_CTRL);
    170}
    171
    172static inline void bcmgenet_tbuf_ctrl_set(struct bcmgenet_priv *priv, u32 val)
    173{
    174	if (GENET_IS_V1(priv))
    175		bcmgenet_rbuf_writel(priv, val, TBUF_CTRL_V1);
    176	else
    177		bcmgenet_writel(val, priv->base +
    178				priv->hw_params->tbuf_offset + TBUF_CTRL);
    179}
    180
    181static inline u32 bcmgenet_bp_mc_get(struct bcmgenet_priv *priv)
    182{
    183	if (GENET_IS_V1(priv))
    184		return bcmgenet_rbuf_readl(priv, TBUF_BP_MC_V1);
    185	else
    186		return bcmgenet_readl(priv->base +
    187				      priv->hw_params->tbuf_offset + TBUF_BP_MC);
    188}
    189
    190static inline void bcmgenet_bp_mc_set(struct bcmgenet_priv *priv, u32 val)
    191{
    192	if (GENET_IS_V1(priv))
    193		bcmgenet_rbuf_writel(priv, val, TBUF_BP_MC_V1);
    194	else
    195		bcmgenet_writel(val, priv->base +
    196				priv->hw_params->tbuf_offset + TBUF_BP_MC);
    197}
    198
    199/* RX/TX DMA register accessors */
    200enum dma_reg {
    201	DMA_RING_CFG = 0,
    202	DMA_CTRL,
    203	DMA_STATUS,
    204	DMA_SCB_BURST_SIZE,
    205	DMA_ARB_CTRL,
    206	DMA_PRIORITY_0,
    207	DMA_PRIORITY_1,
    208	DMA_PRIORITY_2,
    209	DMA_INDEX2RING_0,
    210	DMA_INDEX2RING_1,
    211	DMA_INDEX2RING_2,
    212	DMA_INDEX2RING_3,
    213	DMA_INDEX2RING_4,
    214	DMA_INDEX2RING_5,
    215	DMA_INDEX2RING_6,
    216	DMA_INDEX2RING_7,
    217	DMA_RING0_TIMEOUT,
    218	DMA_RING1_TIMEOUT,
    219	DMA_RING2_TIMEOUT,
    220	DMA_RING3_TIMEOUT,
    221	DMA_RING4_TIMEOUT,
    222	DMA_RING5_TIMEOUT,
    223	DMA_RING6_TIMEOUT,
    224	DMA_RING7_TIMEOUT,
    225	DMA_RING8_TIMEOUT,
    226	DMA_RING9_TIMEOUT,
    227	DMA_RING10_TIMEOUT,
    228	DMA_RING11_TIMEOUT,
    229	DMA_RING12_TIMEOUT,
    230	DMA_RING13_TIMEOUT,
    231	DMA_RING14_TIMEOUT,
    232	DMA_RING15_TIMEOUT,
    233	DMA_RING16_TIMEOUT,
    234};
    235
    236static const u8 bcmgenet_dma_regs_v3plus[] = {
    237	[DMA_RING_CFG]		= 0x00,
    238	[DMA_CTRL]		= 0x04,
    239	[DMA_STATUS]		= 0x08,
    240	[DMA_SCB_BURST_SIZE]	= 0x0C,
    241	[DMA_ARB_CTRL]		= 0x2C,
    242	[DMA_PRIORITY_0]	= 0x30,
    243	[DMA_PRIORITY_1]	= 0x34,
    244	[DMA_PRIORITY_2]	= 0x38,
    245	[DMA_RING0_TIMEOUT]	= 0x2C,
    246	[DMA_RING1_TIMEOUT]	= 0x30,
    247	[DMA_RING2_TIMEOUT]	= 0x34,
    248	[DMA_RING3_TIMEOUT]	= 0x38,
    249	[DMA_RING4_TIMEOUT]	= 0x3c,
    250	[DMA_RING5_TIMEOUT]	= 0x40,
    251	[DMA_RING6_TIMEOUT]	= 0x44,
    252	[DMA_RING7_TIMEOUT]	= 0x48,
    253	[DMA_RING8_TIMEOUT]	= 0x4c,
    254	[DMA_RING9_TIMEOUT]	= 0x50,
    255	[DMA_RING10_TIMEOUT]	= 0x54,
    256	[DMA_RING11_TIMEOUT]	= 0x58,
    257	[DMA_RING12_TIMEOUT]	= 0x5c,
    258	[DMA_RING13_TIMEOUT]	= 0x60,
    259	[DMA_RING14_TIMEOUT]	= 0x64,
    260	[DMA_RING15_TIMEOUT]	= 0x68,
    261	[DMA_RING16_TIMEOUT]	= 0x6C,
    262	[DMA_INDEX2RING_0]	= 0x70,
    263	[DMA_INDEX2RING_1]	= 0x74,
    264	[DMA_INDEX2RING_2]	= 0x78,
    265	[DMA_INDEX2RING_3]	= 0x7C,
    266	[DMA_INDEX2RING_4]	= 0x80,
    267	[DMA_INDEX2RING_5]	= 0x84,
    268	[DMA_INDEX2RING_6]	= 0x88,
    269	[DMA_INDEX2RING_7]	= 0x8C,
    270};
    271
    272static const u8 bcmgenet_dma_regs_v2[] = {
    273	[DMA_RING_CFG]		= 0x00,
    274	[DMA_CTRL]		= 0x04,
    275	[DMA_STATUS]		= 0x08,
    276	[DMA_SCB_BURST_SIZE]	= 0x0C,
    277	[DMA_ARB_CTRL]		= 0x30,
    278	[DMA_PRIORITY_0]	= 0x34,
    279	[DMA_PRIORITY_1]	= 0x38,
    280	[DMA_PRIORITY_2]	= 0x3C,
    281	[DMA_RING0_TIMEOUT]	= 0x2C,
    282	[DMA_RING1_TIMEOUT]	= 0x30,
    283	[DMA_RING2_TIMEOUT]	= 0x34,
    284	[DMA_RING3_TIMEOUT]	= 0x38,
    285	[DMA_RING4_TIMEOUT]	= 0x3c,
    286	[DMA_RING5_TIMEOUT]	= 0x40,
    287	[DMA_RING6_TIMEOUT]	= 0x44,
    288	[DMA_RING7_TIMEOUT]	= 0x48,
    289	[DMA_RING8_TIMEOUT]	= 0x4c,
    290	[DMA_RING9_TIMEOUT]	= 0x50,
    291	[DMA_RING10_TIMEOUT]	= 0x54,
    292	[DMA_RING11_TIMEOUT]	= 0x58,
    293	[DMA_RING12_TIMEOUT]	= 0x5c,
    294	[DMA_RING13_TIMEOUT]	= 0x60,
    295	[DMA_RING14_TIMEOUT]	= 0x64,
    296	[DMA_RING15_TIMEOUT]	= 0x68,
    297	[DMA_RING16_TIMEOUT]	= 0x6C,
    298};
    299
    300static const u8 bcmgenet_dma_regs_v1[] = {
    301	[DMA_CTRL]		= 0x00,
    302	[DMA_STATUS]		= 0x04,
    303	[DMA_SCB_BURST_SIZE]	= 0x0C,
    304	[DMA_ARB_CTRL]		= 0x30,
    305	[DMA_PRIORITY_0]	= 0x34,
    306	[DMA_PRIORITY_1]	= 0x38,
    307	[DMA_PRIORITY_2]	= 0x3C,
    308	[DMA_RING0_TIMEOUT]	= 0x2C,
    309	[DMA_RING1_TIMEOUT]	= 0x30,
    310	[DMA_RING2_TIMEOUT]	= 0x34,
    311	[DMA_RING3_TIMEOUT]	= 0x38,
    312	[DMA_RING4_TIMEOUT]	= 0x3c,
    313	[DMA_RING5_TIMEOUT]	= 0x40,
    314	[DMA_RING6_TIMEOUT]	= 0x44,
    315	[DMA_RING7_TIMEOUT]	= 0x48,
    316	[DMA_RING8_TIMEOUT]	= 0x4c,
    317	[DMA_RING9_TIMEOUT]	= 0x50,
    318	[DMA_RING10_TIMEOUT]	= 0x54,
    319	[DMA_RING11_TIMEOUT]	= 0x58,
    320	[DMA_RING12_TIMEOUT]	= 0x5c,
    321	[DMA_RING13_TIMEOUT]	= 0x60,
    322	[DMA_RING14_TIMEOUT]	= 0x64,
    323	[DMA_RING15_TIMEOUT]	= 0x68,
    324	[DMA_RING16_TIMEOUT]	= 0x6C,
    325};
    326
    327/* Set at runtime once bcmgenet version is known */
    328static const u8 *bcmgenet_dma_regs;
    329
    330static inline struct bcmgenet_priv *dev_to_priv(struct device *dev)
    331{
    332	return netdev_priv(dev_get_drvdata(dev));
    333}
    334
    335static inline u32 bcmgenet_tdma_readl(struct bcmgenet_priv *priv,
    336				      enum dma_reg r)
    337{
    338	return bcmgenet_readl(priv->base + GENET_TDMA_REG_OFF +
    339			      DMA_RINGS_SIZE + bcmgenet_dma_regs[r]);
    340}
    341
    342static inline void bcmgenet_tdma_writel(struct bcmgenet_priv *priv,
    343					u32 val, enum dma_reg r)
    344{
    345	bcmgenet_writel(val, priv->base + GENET_TDMA_REG_OFF +
    346			DMA_RINGS_SIZE + bcmgenet_dma_regs[r]);
    347}
    348
    349static inline u32 bcmgenet_rdma_readl(struct bcmgenet_priv *priv,
    350				      enum dma_reg r)
    351{
    352	return bcmgenet_readl(priv->base + GENET_RDMA_REG_OFF +
    353			      DMA_RINGS_SIZE + bcmgenet_dma_regs[r]);
    354}
    355
    356static inline void bcmgenet_rdma_writel(struct bcmgenet_priv *priv,
    357					u32 val, enum dma_reg r)
    358{
    359	bcmgenet_writel(val, priv->base + GENET_RDMA_REG_OFF +
    360			DMA_RINGS_SIZE + bcmgenet_dma_regs[r]);
    361}
    362
    363/* RDMA/TDMA ring registers and accessors
    364 * we merge the common fields and just prefix with T/D the registers
    365 * having different meaning depending on the direction
    366 */
    367enum dma_ring_reg {
    368	TDMA_READ_PTR = 0,
    369	RDMA_WRITE_PTR = TDMA_READ_PTR,
    370	TDMA_READ_PTR_HI,
    371	RDMA_WRITE_PTR_HI = TDMA_READ_PTR_HI,
    372	TDMA_CONS_INDEX,
    373	RDMA_PROD_INDEX = TDMA_CONS_INDEX,
    374	TDMA_PROD_INDEX,
    375	RDMA_CONS_INDEX = TDMA_PROD_INDEX,
    376	DMA_RING_BUF_SIZE,
    377	DMA_START_ADDR,
    378	DMA_START_ADDR_HI,
    379	DMA_END_ADDR,
    380	DMA_END_ADDR_HI,
    381	DMA_MBUF_DONE_THRESH,
    382	TDMA_FLOW_PERIOD,
    383	RDMA_XON_XOFF_THRESH = TDMA_FLOW_PERIOD,
    384	TDMA_WRITE_PTR,
    385	RDMA_READ_PTR = TDMA_WRITE_PTR,
    386	TDMA_WRITE_PTR_HI,
    387	RDMA_READ_PTR_HI = TDMA_WRITE_PTR_HI
    388};
    389
    390/* GENET v4 supports 40-bits pointer addressing
    391 * for obvious reasons the LO and HI word parts
    392 * are contiguous, but this offsets the other
    393 * registers.
    394 */
    395static const u8 genet_dma_ring_regs_v4[] = {
    396	[TDMA_READ_PTR]			= 0x00,
    397	[TDMA_READ_PTR_HI]		= 0x04,
    398	[TDMA_CONS_INDEX]		= 0x08,
    399	[TDMA_PROD_INDEX]		= 0x0C,
    400	[DMA_RING_BUF_SIZE]		= 0x10,
    401	[DMA_START_ADDR]		= 0x14,
    402	[DMA_START_ADDR_HI]		= 0x18,
    403	[DMA_END_ADDR]			= 0x1C,
    404	[DMA_END_ADDR_HI]		= 0x20,
    405	[DMA_MBUF_DONE_THRESH]		= 0x24,
    406	[TDMA_FLOW_PERIOD]		= 0x28,
    407	[TDMA_WRITE_PTR]		= 0x2C,
    408	[TDMA_WRITE_PTR_HI]		= 0x30,
    409};
    410
    411static const u8 genet_dma_ring_regs_v123[] = {
    412	[TDMA_READ_PTR]			= 0x00,
    413	[TDMA_CONS_INDEX]		= 0x04,
    414	[TDMA_PROD_INDEX]		= 0x08,
    415	[DMA_RING_BUF_SIZE]		= 0x0C,
    416	[DMA_START_ADDR]		= 0x10,
    417	[DMA_END_ADDR]			= 0x14,
    418	[DMA_MBUF_DONE_THRESH]		= 0x18,
    419	[TDMA_FLOW_PERIOD]		= 0x1C,
    420	[TDMA_WRITE_PTR]		= 0x20,
    421};
    422
    423/* Set at runtime once GENET version is known */
    424static const u8 *genet_dma_ring_regs;
    425
    426static inline u32 bcmgenet_tdma_ring_readl(struct bcmgenet_priv *priv,
    427					   unsigned int ring,
    428					   enum dma_ring_reg r)
    429{
    430	return bcmgenet_readl(priv->base + GENET_TDMA_REG_OFF +
    431			      (DMA_RING_SIZE * ring) +
    432			      genet_dma_ring_regs[r]);
    433}
    434
    435static inline void bcmgenet_tdma_ring_writel(struct bcmgenet_priv *priv,
    436					     unsigned int ring, u32 val,
    437					     enum dma_ring_reg r)
    438{
    439	bcmgenet_writel(val, priv->base + GENET_TDMA_REG_OFF +
    440			(DMA_RING_SIZE * ring) +
    441			genet_dma_ring_regs[r]);
    442}
    443
    444static inline u32 bcmgenet_rdma_ring_readl(struct bcmgenet_priv *priv,
    445					   unsigned int ring,
    446					   enum dma_ring_reg r)
    447{
    448	return bcmgenet_readl(priv->base + GENET_RDMA_REG_OFF +
    449			      (DMA_RING_SIZE * ring) +
    450			      genet_dma_ring_regs[r]);
    451}
    452
    453static inline void bcmgenet_rdma_ring_writel(struct bcmgenet_priv *priv,
    454					     unsigned int ring, u32 val,
    455					     enum dma_ring_reg r)
    456{
    457	bcmgenet_writel(val, priv->base + GENET_RDMA_REG_OFF +
    458			(DMA_RING_SIZE * ring) +
    459			genet_dma_ring_regs[r]);
    460}
    461
    462static void bcmgenet_hfb_enable_filter(struct bcmgenet_priv *priv, u32 f_index)
    463{
    464	u32 offset;
    465	u32 reg;
    466
    467	offset = HFB_FLT_ENABLE_V3PLUS + (f_index < 32) * sizeof(u32);
    468	reg = bcmgenet_hfb_reg_readl(priv, offset);
    469	reg |= (1 << (f_index % 32));
    470	bcmgenet_hfb_reg_writel(priv, reg, offset);
    471	reg = bcmgenet_hfb_reg_readl(priv, HFB_CTRL);
    472	reg |= RBUF_HFB_EN;
    473	bcmgenet_hfb_reg_writel(priv, reg, HFB_CTRL);
    474}
    475
    476static void bcmgenet_hfb_disable_filter(struct bcmgenet_priv *priv, u32 f_index)
    477{
    478	u32 offset, reg, reg1;
    479
    480	offset = HFB_FLT_ENABLE_V3PLUS;
    481	reg = bcmgenet_hfb_reg_readl(priv, offset);
    482	reg1 = bcmgenet_hfb_reg_readl(priv, offset + sizeof(u32));
    483	if  (f_index < 32) {
    484		reg1 &= ~(1 << (f_index % 32));
    485		bcmgenet_hfb_reg_writel(priv, reg1, offset + sizeof(u32));
    486	} else {
    487		reg &= ~(1 << (f_index % 32));
    488		bcmgenet_hfb_reg_writel(priv, reg, offset);
    489	}
    490	if (!reg && !reg1) {
    491		reg = bcmgenet_hfb_reg_readl(priv, HFB_CTRL);
    492		reg &= ~RBUF_HFB_EN;
    493		bcmgenet_hfb_reg_writel(priv, reg, HFB_CTRL);
    494	}
    495}
    496
    497static void bcmgenet_hfb_set_filter_rx_queue_mapping(struct bcmgenet_priv *priv,
    498						     u32 f_index, u32 rx_queue)
    499{
    500	u32 offset;
    501	u32 reg;
    502
    503	offset = f_index / 8;
    504	reg = bcmgenet_rdma_readl(priv, DMA_INDEX2RING_0 + offset);
    505	reg &= ~(0xF << (4 * (f_index % 8)));
    506	reg |= ((rx_queue & 0xF) << (4 * (f_index % 8)));
    507	bcmgenet_rdma_writel(priv, reg, DMA_INDEX2RING_0 + offset);
    508}
    509
    510static void bcmgenet_hfb_set_filter_length(struct bcmgenet_priv *priv,
    511					   u32 f_index, u32 f_length)
    512{
    513	u32 offset;
    514	u32 reg;
    515
    516	offset = HFB_FLT_LEN_V3PLUS +
    517		 ((priv->hw_params->hfb_filter_cnt - 1 - f_index) / 4) *
    518		 sizeof(u32);
    519	reg = bcmgenet_hfb_reg_readl(priv, offset);
    520	reg &= ~(0xFF << (8 * (f_index % 4)));
    521	reg |= ((f_length & 0xFF) << (8 * (f_index % 4)));
    522	bcmgenet_hfb_reg_writel(priv, reg, offset);
    523}
    524
    525static int bcmgenet_hfb_validate_mask(void *mask, size_t size)
    526{
    527	while (size) {
    528		switch (*(unsigned char *)mask++) {
    529		case 0x00:
    530		case 0x0f:
    531		case 0xf0:
    532		case 0xff:
    533			size--;
    534			continue;
    535		default:
    536			return -EINVAL;
    537		}
    538	}
    539
    540	return 0;
    541}
    542
    543#define VALIDATE_MASK(x) \
    544	bcmgenet_hfb_validate_mask(&(x), sizeof(x))
    545
    546static int bcmgenet_hfb_insert_data(struct bcmgenet_priv *priv, u32 f_index,
    547				    u32 offset, void *val, void *mask,
    548				    size_t size)
    549{
    550	u32 index, tmp;
    551
    552	index = f_index * priv->hw_params->hfb_filter_size + offset / 2;
    553	tmp = bcmgenet_hfb_readl(priv, index * sizeof(u32));
    554
    555	while (size--) {
    556		if (offset++ & 1) {
    557			tmp &= ~0x300FF;
    558			tmp |= (*(unsigned char *)val++);
    559			switch ((*(unsigned char *)mask++)) {
    560			case 0xFF:
    561				tmp |= 0x30000;
    562				break;
    563			case 0xF0:
    564				tmp |= 0x20000;
    565				break;
    566			case 0x0F:
    567				tmp |= 0x10000;
    568				break;
    569			}
    570			bcmgenet_hfb_writel(priv, tmp, index++ * sizeof(u32));
    571			if (size)
    572				tmp = bcmgenet_hfb_readl(priv,
    573							 index * sizeof(u32));
    574		} else {
    575			tmp &= ~0xCFF00;
    576			tmp |= (*(unsigned char *)val++) << 8;
    577			switch ((*(unsigned char *)mask++)) {
    578			case 0xFF:
    579				tmp |= 0xC0000;
    580				break;
    581			case 0xF0:
    582				tmp |= 0x80000;
    583				break;
    584			case 0x0F:
    585				tmp |= 0x40000;
    586				break;
    587			}
    588			if (!size)
    589				bcmgenet_hfb_writel(priv, tmp, index * sizeof(u32));
    590		}
    591	}
    592
    593	return 0;
    594}
    595
    596static void bcmgenet_hfb_create_rxnfc_filter(struct bcmgenet_priv *priv,
    597					     struct bcmgenet_rxnfc_rule *rule)
    598{
    599	struct ethtool_rx_flow_spec *fs = &rule->fs;
    600	u32 offset = 0, f_length = 0, f;
    601	u8 val_8, mask_8;
    602	__be16 val_16;
    603	u16 mask_16;
    604	size_t size;
    605
    606	f = fs->location;
    607	if (fs->flow_type & FLOW_MAC_EXT) {
    608		bcmgenet_hfb_insert_data(priv, f, 0,
    609					 &fs->h_ext.h_dest, &fs->m_ext.h_dest,
    610					 sizeof(fs->h_ext.h_dest));
    611	}
    612
    613	if (fs->flow_type & FLOW_EXT) {
    614		if (fs->m_ext.vlan_etype ||
    615		    fs->m_ext.vlan_tci) {
    616			bcmgenet_hfb_insert_data(priv, f, 12,
    617						 &fs->h_ext.vlan_etype,
    618						 &fs->m_ext.vlan_etype,
    619						 sizeof(fs->h_ext.vlan_etype));
    620			bcmgenet_hfb_insert_data(priv, f, 14,
    621						 &fs->h_ext.vlan_tci,
    622						 &fs->m_ext.vlan_tci,
    623						 sizeof(fs->h_ext.vlan_tci));
    624			offset += VLAN_HLEN;
    625			f_length += DIV_ROUND_UP(VLAN_HLEN, 2);
    626		}
    627	}
    628
    629	switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
    630	case ETHER_FLOW:
    631		f_length += DIV_ROUND_UP(ETH_HLEN, 2);
    632		bcmgenet_hfb_insert_data(priv, f, 0,
    633					 &fs->h_u.ether_spec.h_dest,
    634					 &fs->m_u.ether_spec.h_dest,
    635					 sizeof(fs->h_u.ether_spec.h_dest));
    636		bcmgenet_hfb_insert_data(priv, f, ETH_ALEN,
    637					 &fs->h_u.ether_spec.h_source,
    638					 &fs->m_u.ether_spec.h_source,
    639					 sizeof(fs->h_u.ether_spec.h_source));
    640		bcmgenet_hfb_insert_data(priv, f, (2 * ETH_ALEN) + offset,
    641					 &fs->h_u.ether_spec.h_proto,
    642					 &fs->m_u.ether_spec.h_proto,
    643					 sizeof(fs->h_u.ether_spec.h_proto));
    644		break;
    645	case IP_USER_FLOW:
    646		f_length += DIV_ROUND_UP(ETH_HLEN + 20, 2);
    647		/* Specify IP Ether Type */
    648		val_16 = htons(ETH_P_IP);
    649		mask_16 = 0xFFFF;
    650		bcmgenet_hfb_insert_data(priv, f, (2 * ETH_ALEN) + offset,
    651					 &val_16, &mask_16, sizeof(val_16));
    652		bcmgenet_hfb_insert_data(priv, f, 15 + offset,
    653					 &fs->h_u.usr_ip4_spec.tos,
    654					 &fs->m_u.usr_ip4_spec.tos,
    655					 sizeof(fs->h_u.usr_ip4_spec.tos));
    656		bcmgenet_hfb_insert_data(priv, f, 23 + offset,
    657					 &fs->h_u.usr_ip4_spec.proto,
    658					 &fs->m_u.usr_ip4_spec.proto,
    659					 sizeof(fs->h_u.usr_ip4_spec.proto));
    660		bcmgenet_hfb_insert_data(priv, f, 26 + offset,
    661					 &fs->h_u.usr_ip4_spec.ip4src,
    662					 &fs->m_u.usr_ip4_spec.ip4src,
    663					 sizeof(fs->h_u.usr_ip4_spec.ip4src));
    664		bcmgenet_hfb_insert_data(priv, f, 30 + offset,
    665					 &fs->h_u.usr_ip4_spec.ip4dst,
    666					 &fs->m_u.usr_ip4_spec.ip4dst,
    667					 sizeof(fs->h_u.usr_ip4_spec.ip4dst));
    668		if (!fs->m_u.usr_ip4_spec.l4_4_bytes)
    669			break;
    670
    671		/* Only supports 20 byte IPv4 header */
    672		val_8 = 0x45;
    673		mask_8 = 0xFF;
    674		bcmgenet_hfb_insert_data(priv, f, ETH_HLEN + offset,
    675					 &val_8, &mask_8,
    676					 sizeof(val_8));
    677		size = sizeof(fs->h_u.usr_ip4_spec.l4_4_bytes);
    678		bcmgenet_hfb_insert_data(priv, f,
    679					 ETH_HLEN + 20 + offset,
    680					 &fs->h_u.usr_ip4_spec.l4_4_bytes,
    681					 &fs->m_u.usr_ip4_spec.l4_4_bytes,
    682					 size);
    683		f_length += DIV_ROUND_UP(size, 2);
    684		break;
    685	}
    686
    687	bcmgenet_hfb_set_filter_length(priv, f, 2 * f_length);
    688	if (!fs->ring_cookie || fs->ring_cookie == RX_CLS_FLOW_WAKE) {
    689		/* Ring 0 flows can be handled by the default Descriptor Ring
    690		 * We'll map them to ring 0, but don't enable the filter
    691		 */
    692		bcmgenet_hfb_set_filter_rx_queue_mapping(priv, f, 0);
    693		rule->state = BCMGENET_RXNFC_STATE_DISABLED;
    694	} else {
    695		/* Other Rx rings are direct mapped here */
    696		bcmgenet_hfb_set_filter_rx_queue_mapping(priv, f,
    697							 fs->ring_cookie);
    698		bcmgenet_hfb_enable_filter(priv, f);
    699		rule->state = BCMGENET_RXNFC_STATE_ENABLED;
    700	}
    701}
    702
    703/* bcmgenet_hfb_clear
    704 *
    705 * Clear Hardware Filter Block and disable all filtering.
    706 */
    707static void bcmgenet_hfb_clear_filter(struct bcmgenet_priv *priv, u32 f_index)
    708{
    709	u32 base, i;
    710
    711	base = f_index * priv->hw_params->hfb_filter_size;
    712	for (i = 0; i < priv->hw_params->hfb_filter_size; i++)
    713		bcmgenet_hfb_writel(priv, 0x0, (base + i) * sizeof(u32));
    714}
    715
    716static void bcmgenet_hfb_clear(struct bcmgenet_priv *priv)
    717{
    718	u32 i;
    719
    720	if (GENET_IS_V1(priv) || GENET_IS_V2(priv))
    721		return;
    722
    723	bcmgenet_hfb_reg_writel(priv, 0x0, HFB_CTRL);
    724	bcmgenet_hfb_reg_writel(priv, 0x0, HFB_FLT_ENABLE_V3PLUS);
    725	bcmgenet_hfb_reg_writel(priv, 0x0, HFB_FLT_ENABLE_V3PLUS + 4);
    726
    727	for (i = DMA_INDEX2RING_0; i <= DMA_INDEX2RING_7; i++)
    728		bcmgenet_rdma_writel(priv, 0x0, i);
    729
    730	for (i = 0; i < (priv->hw_params->hfb_filter_cnt / 4); i++)
    731		bcmgenet_hfb_reg_writel(priv, 0x0,
    732					HFB_FLT_LEN_V3PLUS + i * sizeof(u32));
    733
    734	for (i = 0; i < priv->hw_params->hfb_filter_cnt; i++)
    735		bcmgenet_hfb_clear_filter(priv, i);
    736}
    737
    738static void bcmgenet_hfb_init(struct bcmgenet_priv *priv)
    739{
    740	int i;
    741
    742	INIT_LIST_HEAD(&priv->rxnfc_list);
    743	if (GENET_IS_V1(priv) || GENET_IS_V2(priv))
    744		return;
    745
    746	for (i = 0; i < MAX_NUM_OF_FS_RULES; i++) {
    747		INIT_LIST_HEAD(&priv->rxnfc_rules[i].list);
    748		priv->rxnfc_rules[i].state = BCMGENET_RXNFC_STATE_UNUSED;
    749	}
    750
    751	bcmgenet_hfb_clear(priv);
    752}
    753
    754static int bcmgenet_begin(struct net_device *dev)
    755{
    756	struct bcmgenet_priv *priv = netdev_priv(dev);
    757
    758	/* Turn on the clock */
    759	return clk_prepare_enable(priv->clk);
    760}
    761
    762static void bcmgenet_complete(struct net_device *dev)
    763{
    764	struct bcmgenet_priv *priv = netdev_priv(dev);
    765
    766	/* Turn off the clock */
    767	clk_disable_unprepare(priv->clk);
    768}
    769
    770static int bcmgenet_get_link_ksettings(struct net_device *dev,
    771				       struct ethtool_link_ksettings *cmd)
    772{
    773	if (!netif_running(dev))
    774		return -EINVAL;
    775
    776	if (!dev->phydev)
    777		return -ENODEV;
    778
    779	phy_ethtool_ksettings_get(dev->phydev, cmd);
    780
    781	return 0;
    782}
    783
    784static int bcmgenet_set_link_ksettings(struct net_device *dev,
    785				       const struct ethtool_link_ksettings *cmd)
    786{
    787	if (!netif_running(dev))
    788		return -EINVAL;
    789
    790	if (!dev->phydev)
    791		return -ENODEV;
    792
    793	return phy_ethtool_ksettings_set(dev->phydev, cmd);
    794}
    795
    796static int bcmgenet_set_features(struct net_device *dev,
    797				 netdev_features_t features)
    798{
    799	struct bcmgenet_priv *priv = netdev_priv(dev);
    800	u32 reg;
    801	int ret;
    802
    803	ret = clk_prepare_enable(priv->clk);
    804	if (ret)
    805		return ret;
    806
    807	/* Make sure we reflect the value of CRC_CMD_FWD */
    808	reg = bcmgenet_umac_readl(priv, UMAC_CMD);
    809	priv->crc_fwd_en = !!(reg & CMD_CRC_FWD);
    810
    811	clk_disable_unprepare(priv->clk);
    812
    813	return ret;
    814}
    815
    816static u32 bcmgenet_get_msglevel(struct net_device *dev)
    817{
    818	struct bcmgenet_priv *priv = netdev_priv(dev);
    819
    820	return priv->msg_enable;
    821}
    822
    823static void bcmgenet_set_msglevel(struct net_device *dev, u32 level)
    824{
    825	struct bcmgenet_priv *priv = netdev_priv(dev);
    826
    827	priv->msg_enable = level;
    828}
    829
    830static int bcmgenet_get_coalesce(struct net_device *dev,
    831				 struct ethtool_coalesce *ec,
    832				 struct kernel_ethtool_coalesce *kernel_coal,
    833				 struct netlink_ext_ack *extack)
    834{
    835	struct bcmgenet_priv *priv = netdev_priv(dev);
    836	struct bcmgenet_rx_ring *ring;
    837	unsigned int i;
    838
    839	ec->tx_max_coalesced_frames =
    840		bcmgenet_tdma_ring_readl(priv, DESC_INDEX,
    841					 DMA_MBUF_DONE_THRESH);
    842	ec->rx_max_coalesced_frames =
    843		bcmgenet_rdma_ring_readl(priv, DESC_INDEX,
    844					 DMA_MBUF_DONE_THRESH);
    845	ec->rx_coalesce_usecs =
    846		bcmgenet_rdma_readl(priv, DMA_RING16_TIMEOUT) * 8192 / 1000;
    847
    848	for (i = 0; i < priv->hw_params->rx_queues; i++) {
    849		ring = &priv->rx_rings[i];
    850		ec->use_adaptive_rx_coalesce |= ring->dim.use_dim;
    851	}
    852	ring = &priv->rx_rings[DESC_INDEX];
    853	ec->use_adaptive_rx_coalesce |= ring->dim.use_dim;
    854
    855	return 0;
    856}
    857
    858static void bcmgenet_set_rx_coalesce(struct bcmgenet_rx_ring *ring,
    859				     u32 usecs, u32 pkts)
    860{
    861	struct bcmgenet_priv *priv = ring->priv;
    862	unsigned int i = ring->index;
    863	u32 reg;
    864
    865	bcmgenet_rdma_ring_writel(priv, i, pkts, DMA_MBUF_DONE_THRESH);
    866
    867	reg = bcmgenet_rdma_readl(priv, DMA_RING0_TIMEOUT + i);
    868	reg &= ~DMA_TIMEOUT_MASK;
    869	reg |= DIV_ROUND_UP(usecs * 1000, 8192);
    870	bcmgenet_rdma_writel(priv, reg, DMA_RING0_TIMEOUT + i);
    871}
    872
    873static void bcmgenet_set_ring_rx_coalesce(struct bcmgenet_rx_ring *ring,
    874					  struct ethtool_coalesce *ec)
    875{
    876	struct dim_cq_moder moder;
    877	u32 usecs, pkts;
    878
    879	ring->rx_coalesce_usecs = ec->rx_coalesce_usecs;
    880	ring->rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
    881	usecs = ring->rx_coalesce_usecs;
    882	pkts = ring->rx_max_coalesced_frames;
    883
    884	if (ec->use_adaptive_rx_coalesce && !ring->dim.use_dim) {
    885		moder = net_dim_get_def_rx_moderation(ring->dim.dim.mode);
    886		usecs = moder.usec;
    887		pkts = moder.pkts;
    888	}
    889
    890	ring->dim.use_dim = ec->use_adaptive_rx_coalesce;
    891	bcmgenet_set_rx_coalesce(ring, usecs, pkts);
    892}
    893
    894static int bcmgenet_set_coalesce(struct net_device *dev,
    895				 struct ethtool_coalesce *ec,
    896				 struct kernel_ethtool_coalesce *kernel_coal,
    897				 struct netlink_ext_ack *extack)
    898{
    899	struct bcmgenet_priv *priv = netdev_priv(dev);
    900	unsigned int i;
    901
    902	/* Base system clock is 125Mhz, DMA timeout is this reference clock
    903	 * divided by 1024, which yields roughly 8.192us, our maximum value
    904	 * has to fit in the DMA_TIMEOUT_MASK (16 bits)
    905	 */
    906	if (ec->tx_max_coalesced_frames > DMA_INTR_THRESHOLD_MASK ||
    907	    ec->tx_max_coalesced_frames == 0 ||
    908	    ec->rx_max_coalesced_frames > DMA_INTR_THRESHOLD_MASK ||
    909	    ec->rx_coalesce_usecs > (DMA_TIMEOUT_MASK * 8) + 1)
    910		return -EINVAL;
    911
    912	if (ec->rx_coalesce_usecs == 0 && ec->rx_max_coalesced_frames == 0)
    913		return -EINVAL;
    914
    915	/* GENET TDMA hardware does not support a configurable timeout, but will
    916	 * always generate an interrupt either after MBDONE packets have been
    917	 * transmitted, or when the ring is empty.
    918	 */
    919
    920	/* Program all TX queues with the same values, as there is no
    921	 * ethtool knob to do coalescing on a per-queue basis
    922	 */
    923	for (i = 0; i < priv->hw_params->tx_queues; i++)
    924		bcmgenet_tdma_ring_writel(priv, i,
    925					  ec->tx_max_coalesced_frames,
    926					  DMA_MBUF_DONE_THRESH);
    927	bcmgenet_tdma_ring_writel(priv, DESC_INDEX,
    928				  ec->tx_max_coalesced_frames,
    929				  DMA_MBUF_DONE_THRESH);
    930
    931	for (i = 0; i < priv->hw_params->rx_queues; i++)
    932		bcmgenet_set_ring_rx_coalesce(&priv->rx_rings[i], ec);
    933	bcmgenet_set_ring_rx_coalesce(&priv->rx_rings[DESC_INDEX], ec);
    934
    935	return 0;
    936}
    937
    938static void bcmgenet_get_pauseparam(struct net_device *dev,
    939				    struct ethtool_pauseparam *epause)
    940{
    941	struct bcmgenet_priv *priv;
    942	u32 umac_cmd;
    943
    944	priv = netdev_priv(dev);
    945
    946	epause->autoneg = priv->autoneg_pause;
    947
    948	if (netif_carrier_ok(dev)) {
    949		/* report active state when link is up */
    950		umac_cmd = bcmgenet_umac_readl(priv, UMAC_CMD);
    951		epause->tx_pause = !(umac_cmd & CMD_TX_PAUSE_IGNORE);
    952		epause->rx_pause = !(umac_cmd & CMD_RX_PAUSE_IGNORE);
    953	} else {
    954		/* otherwise report stored settings */
    955		epause->tx_pause = priv->tx_pause;
    956		epause->rx_pause = priv->rx_pause;
    957	}
    958}
    959
    960static int bcmgenet_set_pauseparam(struct net_device *dev,
    961				   struct ethtool_pauseparam *epause)
    962{
    963	struct bcmgenet_priv *priv = netdev_priv(dev);
    964
    965	if (!dev->phydev)
    966		return -ENODEV;
    967
    968	if (!phy_validate_pause(dev->phydev, epause))
    969		return -EINVAL;
    970
    971	priv->autoneg_pause = !!epause->autoneg;
    972	priv->tx_pause = !!epause->tx_pause;
    973	priv->rx_pause = !!epause->rx_pause;
    974
    975	bcmgenet_phy_pause_set(dev, priv->rx_pause, priv->tx_pause);
    976
    977	return 0;
    978}
    979
    980/* standard ethtool support functions. */
    981enum bcmgenet_stat_type {
    982	BCMGENET_STAT_NETDEV = -1,
    983	BCMGENET_STAT_MIB_RX,
    984	BCMGENET_STAT_MIB_TX,
    985	BCMGENET_STAT_RUNT,
    986	BCMGENET_STAT_MISC,
    987	BCMGENET_STAT_SOFT,
    988};
    989
    990struct bcmgenet_stats {
    991	char stat_string[ETH_GSTRING_LEN];
    992	int stat_sizeof;
    993	int stat_offset;
    994	enum bcmgenet_stat_type type;
    995	/* reg offset from UMAC base for misc counters */
    996	u16 reg_offset;
    997};
    998
    999#define STAT_NETDEV(m) { \
   1000	.stat_string = __stringify(m), \
   1001	.stat_sizeof = sizeof(((struct net_device_stats *)0)->m), \
   1002	.stat_offset = offsetof(struct net_device_stats, m), \
   1003	.type = BCMGENET_STAT_NETDEV, \
   1004}
   1005
   1006#define STAT_GENET_MIB(str, m, _type) { \
   1007	.stat_string = str, \
   1008	.stat_sizeof = sizeof(((struct bcmgenet_priv *)0)->m), \
   1009	.stat_offset = offsetof(struct bcmgenet_priv, m), \
   1010	.type = _type, \
   1011}
   1012
   1013#define STAT_GENET_MIB_RX(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_MIB_RX)
   1014#define STAT_GENET_MIB_TX(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_MIB_TX)
   1015#define STAT_GENET_RUNT(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_RUNT)
   1016#define STAT_GENET_SOFT_MIB(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_SOFT)
   1017
   1018#define STAT_GENET_MISC(str, m, offset) { \
   1019	.stat_string = str, \
   1020	.stat_sizeof = sizeof(((struct bcmgenet_priv *)0)->m), \
   1021	.stat_offset = offsetof(struct bcmgenet_priv, m), \
   1022	.type = BCMGENET_STAT_MISC, \
   1023	.reg_offset = offset, \
   1024}
   1025
   1026#define STAT_GENET_Q(num) \
   1027	STAT_GENET_SOFT_MIB("txq" __stringify(num) "_packets", \
   1028			tx_rings[num].packets), \
   1029	STAT_GENET_SOFT_MIB("txq" __stringify(num) "_bytes", \
   1030			tx_rings[num].bytes), \
   1031	STAT_GENET_SOFT_MIB("rxq" __stringify(num) "_bytes", \
   1032			rx_rings[num].bytes),	 \
   1033	STAT_GENET_SOFT_MIB("rxq" __stringify(num) "_packets", \
   1034			rx_rings[num].packets), \
   1035	STAT_GENET_SOFT_MIB("rxq" __stringify(num) "_errors", \
   1036			rx_rings[num].errors), \
   1037	STAT_GENET_SOFT_MIB("rxq" __stringify(num) "_dropped", \
   1038			rx_rings[num].dropped)
   1039
   1040/* There is a 0xC gap between the end of RX and beginning of TX stats and then
   1041 * between the end of TX stats and the beginning of the RX RUNT
   1042 */
   1043#define BCMGENET_STAT_OFFSET	0xc
   1044
   1045/* Hardware counters must be kept in sync because the order/offset
   1046 * is important here (order in structure declaration = order in hardware)
   1047 */
   1048static const struct bcmgenet_stats bcmgenet_gstrings_stats[] = {
   1049	/* general stats */
   1050	STAT_NETDEV(rx_packets),
   1051	STAT_NETDEV(tx_packets),
   1052	STAT_NETDEV(rx_bytes),
   1053	STAT_NETDEV(tx_bytes),
   1054	STAT_NETDEV(rx_errors),
   1055	STAT_NETDEV(tx_errors),
   1056	STAT_NETDEV(rx_dropped),
   1057	STAT_NETDEV(tx_dropped),
   1058	STAT_NETDEV(multicast),
   1059	/* UniMAC RSV counters */
   1060	STAT_GENET_MIB_RX("rx_64_octets", mib.rx.pkt_cnt.cnt_64),
   1061	STAT_GENET_MIB_RX("rx_65_127_oct", mib.rx.pkt_cnt.cnt_127),
   1062	STAT_GENET_MIB_RX("rx_128_255_oct", mib.rx.pkt_cnt.cnt_255),
   1063	STAT_GENET_MIB_RX("rx_256_511_oct", mib.rx.pkt_cnt.cnt_511),
   1064	STAT_GENET_MIB_RX("rx_512_1023_oct", mib.rx.pkt_cnt.cnt_1023),
   1065	STAT_GENET_MIB_RX("rx_1024_1518_oct", mib.rx.pkt_cnt.cnt_1518),
   1066	STAT_GENET_MIB_RX("rx_vlan_1519_1522_oct", mib.rx.pkt_cnt.cnt_mgv),
   1067	STAT_GENET_MIB_RX("rx_1522_2047_oct", mib.rx.pkt_cnt.cnt_2047),
   1068	STAT_GENET_MIB_RX("rx_2048_4095_oct", mib.rx.pkt_cnt.cnt_4095),
   1069	STAT_GENET_MIB_RX("rx_4096_9216_oct", mib.rx.pkt_cnt.cnt_9216),
   1070	STAT_GENET_MIB_RX("rx_pkts", mib.rx.pkt),
   1071	STAT_GENET_MIB_RX("rx_bytes", mib.rx.bytes),
   1072	STAT_GENET_MIB_RX("rx_multicast", mib.rx.mca),
   1073	STAT_GENET_MIB_RX("rx_broadcast", mib.rx.bca),
   1074	STAT_GENET_MIB_RX("rx_fcs", mib.rx.fcs),
   1075	STAT_GENET_MIB_RX("rx_control", mib.rx.cf),
   1076	STAT_GENET_MIB_RX("rx_pause", mib.rx.pf),
   1077	STAT_GENET_MIB_RX("rx_unknown", mib.rx.uo),
   1078	STAT_GENET_MIB_RX("rx_align", mib.rx.aln),
   1079	STAT_GENET_MIB_RX("rx_outrange", mib.rx.flr),
   1080	STAT_GENET_MIB_RX("rx_code", mib.rx.cde),
   1081	STAT_GENET_MIB_RX("rx_carrier", mib.rx.fcr),
   1082	STAT_GENET_MIB_RX("rx_oversize", mib.rx.ovr),
   1083	STAT_GENET_MIB_RX("rx_jabber", mib.rx.jbr),
   1084	STAT_GENET_MIB_RX("rx_mtu_err", mib.rx.mtue),
   1085	STAT_GENET_MIB_RX("rx_good_pkts", mib.rx.pok),
   1086	STAT_GENET_MIB_RX("rx_unicast", mib.rx.uc),
   1087	STAT_GENET_MIB_RX("rx_ppp", mib.rx.ppp),
   1088	STAT_GENET_MIB_RX("rx_crc", mib.rx.rcrc),
   1089	/* UniMAC TSV counters */
   1090	STAT_GENET_MIB_TX("tx_64_octets", mib.tx.pkt_cnt.cnt_64),
   1091	STAT_GENET_MIB_TX("tx_65_127_oct", mib.tx.pkt_cnt.cnt_127),
   1092	STAT_GENET_MIB_TX("tx_128_255_oct", mib.tx.pkt_cnt.cnt_255),
   1093	STAT_GENET_MIB_TX("tx_256_511_oct", mib.tx.pkt_cnt.cnt_511),
   1094	STAT_GENET_MIB_TX("tx_512_1023_oct", mib.tx.pkt_cnt.cnt_1023),
   1095	STAT_GENET_MIB_TX("tx_1024_1518_oct", mib.tx.pkt_cnt.cnt_1518),
   1096	STAT_GENET_MIB_TX("tx_vlan_1519_1522_oct", mib.tx.pkt_cnt.cnt_mgv),
   1097	STAT_GENET_MIB_TX("tx_1522_2047_oct", mib.tx.pkt_cnt.cnt_2047),
   1098	STAT_GENET_MIB_TX("tx_2048_4095_oct", mib.tx.pkt_cnt.cnt_4095),
   1099	STAT_GENET_MIB_TX("tx_4096_9216_oct", mib.tx.pkt_cnt.cnt_9216),
   1100	STAT_GENET_MIB_TX("tx_pkts", mib.tx.pkts),
   1101	STAT_GENET_MIB_TX("tx_multicast", mib.tx.mca),
   1102	STAT_GENET_MIB_TX("tx_broadcast", mib.tx.bca),
   1103	STAT_GENET_MIB_TX("tx_pause", mib.tx.pf),
   1104	STAT_GENET_MIB_TX("tx_control", mib.tx.cf),
   1105	STAT_GENET_MIB_TX("tx_fcs_err", mib.tx.fcs),
   1106	STAT_GENET_MIB_TX("tx_oversize", mib.tx.ovr),
   1107	STAT_GENET_MIB_TX("tx_defer", mib.tx.drf),
   1108	STAT_GENET_MIB_TX("tx_excess_defer", mib.tx.edf),
   1109	STAT_GENET_MIB_TX("tx_single_col", mib.tx.scl),
   1110	STAT_GENET_MIB_TX("tx_multi_col", mib.tx.mcl),
   1111	STAT_GENET_MIB_TX("tx_late_col", mib.tx.lcl),
   1112	STAT_GENET_MIB_TX("tx_excess_col", mib.tx.ecl),
   1113	STAT_GENET_MIB_TX("tx_frags", mib.tx.frg),
   1114	STAT_GENET_MIB_TX("tx_total_col", mib.tx.ncl),
   1115	STAT_GENET_MIB_TX("tx_jabber", mib.tx.jbr),
   1116	STAT_GENET_MIB_TX("tx_bytes", mib.tx.bytes),
   1117	STAT_GENET_MIB_TX("tx_good_pkts", mib.tx.pok),
   1118	STAT_GENET_MIB_TX("tx_unicast", mib.tx.uc),
   1119	/* UniMAC RUNT counters */
   1120	STAT_GENET_RUNT("rx_runt_pkts", mib.rx_runt_cnt),
   1121	STAT_GENET_RUNT("rx_runt_valid_fcs", mib.rx_runt_fcs),
   1122	STAT_GENET_RUNT("rx_runt_inval_fcs_align", mib.rx_runt_fcs_align),
   1123	STAT_GENET_RUNT("rx_runt_bytes", mib.rx_runt_bytes),
   1124	/* Misc UniMAC counters */
   1125	STAT_GENET_MISC("rbuf_ovflow_cnt", mib.rbuf_ovflow_cnt,
   1126			UMAC_RBUF_OVFL_CNT_V1),
   1127	STAT_GENET_MISC("rbuf_err_cnt", mib.rbuf_err_cnt,
   1128			UMAC_RBUF_ERR_CNT_V1),
   1129	STAT_GENET_MISC("mdf_err_cnt", mib.mdf_err_cnt, UMAC_MDF_ERR_CNT),
   1130	STAT_GENET_SOFT_MIB("alloc_rx_buff_failed", mib.alloc_rx_buff_failed),
   1131	STAT_GENET_SOFT_MIB("rx_dma_failed", mib.rx_dma_failed),
   1132	STAT_GENET_SOFT_MIB("tx_dma_failed", mib.tx_dma_failed),
   1133	STAT_GENET_SOFT_MIB("tx_realloc_tsb", mib.tx_realloc_tsb),
   1134	STAT_GENET_SOFT_MIB("tx_realloc_tsb_failed",
   1135			    mib.tx_realloc_tsb_failed),
   1136	/* Per TX queues */
   1137	STAT_GENET_Q(0),
   1138	STAT_GENET_Q(1),
   1139	STAT_GENET_Q(2),
   1140	STAT_GENET_Q(3),
   1141	STAT_GENET_Q(16),
   1142};
   1143
   1144#define BCMGENET_STATS_LEN	ARRAY_SIZE(bcmgenet_gstrings_stats)
   1145
   1146static void bcmgenet_get_drvinfo(struct net_device *dev,
   1147				 struct ethtool_drvinfo *info)
   1148{
   1149	strlcpy(info->driver, "bcmgenet", sizeof(info->driver));
   1150}
   1151
   1152static int bcmgenet_get_sset_count(struct net_device *dev, int string_set)
   1153{
   1154	switch (string_set) {
   1155	case ETH_SS_STATS:
   1156		return BCMGENET_STATS_LEN;
   1157	default:
   1158		return -EOPNOTSUPP;
   1159	}
   1160}
   1161
   1162static void bcmgenet_get_strings(struct net_device *dev, u32 stringset,
   1163				 u8 *data)
   1164{
   1165	int i;
   1166
   1167	switch (stringset) {
   1168	case ETH_SS_STATS:
   1169		for (i = 0; i < BCMGENET_STATS_LEN; i++) {
   1170			memcpy(data + i * ETH_GSTRING_LEN,
   1171			       bcmgenet_gstrings_stats[i].stat_string,
   1172			       ETH_GSTRING_LEN);
   1173		}
   1174		break;
   1175	}
   1176}
   1177
   1178static u32 bcmgenet_update_stat_misc(struct bcmgenet_priv *priv, u16 offset)
   1179{
   1180	u16 new_offset;
   1181	u32 val;
   1182
   1183	switch (offset) {
   1184	case UMAC_RBUF_OVFL_CNT_V1:
   1185		if (GENET_IS_V2(priv))
   1186			new_offset = RBUF_OVFL_CNT_V2;
   1187		else
   1188			new_offset = RBUF_OVFL_CNT_V3PLUS;
   1189
   1190		val = bcmgenet_rbuf_readl(priv,	new_offset);
   1191		/* clear if overflowed */
   1192		if (val == ~0)
   1193			bcmgenet_rbuf_writel(priv, 0, new_offset);
   1194		break;
   1195	case UMAC_RBUF_ERR_CNT_V1:
   1196		if (GENET_IS_V2(priv))
   1197			new_offset = RBUF_ERR_CNT_V2;
   1198		else
   1199			new_offset = RBUF_ERR_CNT_V3PLUS;
   1200
   1201		val = bcmgenet_rbuf_readl(priv,	new_offset);
   1202		/* clear if overflowed */
   1203		if (val == ~0)
   1204			bcmgenet_rbuf_writel(priv, 0, new_offset);
   1205		break;
   1206	default:
   1207		val = bcmgenet_umac_readl(priv, offset);
   1208		/* clear if overflowed */
   1209		if (val == ~0)
   1210			bcmgenet_umac_writel(priv, 0, offset);
   1211		break;
   1212	}
   1213
   1214	return val;
   1215}
   1216
   1217static void bcmgenet_update_mib_counters(struct bcmgenet_priv *priv)
   1218{
   1219	int i, j = 0;
   1220
   1221	for (i = 0; i < BCMGENET_STATS_LEN; i++) {
   1222		const struct bcmgenet_stats *s;
   1223		u8 offset = 0;
   1224		u32 val = 0;
   1225		char *p;
   1226
   1227		s = &bcmgenet_gstrings_stats[i];
   1228		switch (s->type) {
   1229		case BCMGENET_STAT_NETDEV:
   1230		case BCMGENET_STAT_SOFT:
   1231			continue;
   1232		case BCMGENET_STAT_RUNT:
   1233			offset += BCMGENET_STAT_OFFSET;
   1234			fallthrough;
   1235		case BCMGENET_STAT_MIB_TX:
   1236			offset += BCMGENET_STAT_OFFSET;
   1237			fallthrough;
   1238		case BCMGENET_STAT_MIB_RX:
   1239			val = bcmgenet_umac_readl(priv,
   1240						  UMAC_MIB_START + j + offset);
   1241			offset = 0;	/* Reset Offset */
   1242			break;
   1243		case BCMGENET_STAT_MISC:
   1244			if (GENET_IS_V1(priv)) {
   1245				val = bcmgenet_umac_readl(priv, s->reg_offset);
   1246				/* clear if overflowed */
   1247				if (val == ~0)
   1248					bcmgenet_umac_writel(priv, 0,
   1249							     s->reg_offset);
   1250			} else {
   1251				val = bcmgenet_update_stat_misc(priv,
   1252								s->reg_offset);
   1253			}
   1254			break;
   1255		}
   1256
   1257		j += s->stat_sizeof;
   1258		p = (char *)priv + s->stat_offset;
   1259		*(u32 *)p = val;
   1260	}
   1261}
   1262
   1263static void bcmgenet_get_ethtool_stats(struct net_device *dev,
   1264				       struct ethtool_stats *stats,
   1265				       u64 *data)
   1266{
   1267	struct bcmgenet_priv *priv = netdev_priv(dev);
   1268	int i;
   1269
   1270	if (netif_running(dev))
   1271		bcmgenet_update_mib_counters(priv);
   1272
   1273	dev->netdev_ops->ndo_get_stats(dev);
   1274
   1275	for (i = 0; i < BCMGENET_STATS_LEN; i++) {
   1276		const struct bcmgenet_stats *s;
   1277		char *p;
   1278
   1279		s = &bcmgenet_gstrings_stats[i];
   1280		if (s->type == BCMGENET_STAT_NETDEV)
   1281			p = (char *)&dev->stats;
   1282		else
   1283			p = (char *)priv;
   1284		p += s->stat_offset;
   1285		if (sizeof(unsigned long) != sizeof(u32) &&
   1286		    s->stat_sizeof == sizeof(unsigned long))
   1287			data[i] = *(unsigned long *)p;
   1288		else
   1289			data[i] = *(u32 *)p;
   1290	}
   1291}
   1292
   1293static void bcmgenet_eee_enable_set(struct net_device *dev, bool enable)
   1294{
   1295	struct bcmgenet_priv *priv = netdev_priv(dev);
   1296	u32 off = priv->hw_params->tbuf_offset + TBUF_ENERGY_CTRL;
   1297	u32 reg;
   1298
   1299	if (enable && !priv->clk_eee_enabled) {
   1300		clk_prepare_enable(priv->clk_eee);
   1301		priv->clk_eee_enabled = true;
   1302	}
   1303
   1304	reg = bcmgenet_umac_readl(priv, UMAC_EEE_CTRL);
   1305	if (enable)
   1306		reg |= EEE_EN;
   1307	else
   1308		reg &= ~EEE_EN;
   1309	bcmgenet_umac_writel(priv, reg, UMAC_EEE_CTRL);
   1310
   1311	/* Enable EEE and switch to a 27Mhz clock automatically */
   1312	reg = bcmgenet_readl(priv->base + off);
   1313	if (enable)
   1314		reg |= TBUF_EEE_EN | TBUF_PM_EN;
   1315	else
   1316		reg &= ~(TBUF_EEE_EN | TBUF_PM_EN);
   1317	bcmgenet_writel(reg, priv->base + off);
   1318
   1319	/* Do the same for thing for RBUF */
   1320	reg = bcmgenet_rbuf_readl(priv, RBUF_ENERGY_CTRL);
   1321	if (enable)
   1322		reg |= RBUF_EEE_EN | RBUF_PM_EN;
   1323	else
   1324		reg &= ~(RBUF_EEE_EN | RBUF_PM_EN);
   1325	bcmgenet_rbuf_writel(priv, reg, RBUF_ENERGY_CTRL);
   1326
   1327	if (!enable && priv->clk_eee_enabled) {
   1328		clk_disable_unprepare(priv->clk_eee);
   1329		priv->clk_eee_enabled = false;
   1330	}
   1331
   1332	priv->eee.eee_enabled = enable;
   1333	priv->eee.eee_active = enable;
   1334}
   1335
   1336static int bcmgenet_get_eee(struct net_device *dev, struct ethtool_eee *e)
   1337{
   1338	struct bcmgenet_priv *priv = netdev_priv(dev);
   1339	struct ethtool_eee *p = &priv->eee;
   1340
   1341	if (GENET_IS_V1(priv))
   1342		return -EOPNOTSUPP;
   1343
   1344	if (!dev->phydev)
   1345		return -ENODEV;
   1346
   1347	e->eee_enabled = p->eee_enabled;
   1348	e->eee_active = p->eee_active;
   1349	e->tx_lpi_timer = bcmgenet_umac_readl(priv, UMAC_EEE_LPI_TIMER);
   1350
   1351	return phy_ethtool_get_eee(dev->phydev, e);
   1352}
   1353
   1354static int bcmgenet_set_eee(struct net_device *dev, struct ethtool_eee *e)
   1355{
   1356	struct bcmgenet_priv *priv = netdev_priv(dev);
   1357	struct ethtool_eee *p = &priv->eee;
   1358	int ret = 0;
   1359
   1360	if (GENET_IS_V1(priv))
   1361		return -EOPNOTSUPP;
   1362
   1363	if (!dev->phydev)
   1364		return -ENODEV;
   1365
   1366	p->eee_enabled = e->eee_enabled;
   1367
   1368	if (!p->eee_enabled) {
   1369		bcmgenet_eee_enable_set(dev, false);
   1370	} else {
   1371		ret = phy_init_eee(dev->phydev, false);
   1372		if (ret) {
   1373			netif_err(priv, hw, dev, "EEE initialization failed\n");
   1374			return ret;
   1375		}
   1376
   1377		bcmgenet_umac_writel(priv, e->tx_lpi_timer, UMAC_EEE_LPI_TIMER);
   1378		bcmgenet_eee_enable_set(dev, true);
   1379	}
   1380
   1381	return phy_ethtool_set_eee(dev->phydev, e);
   1382}
   1383
   1384static int bcmgenet_validate_flow(struct net_device *dev,
   1385				  struct ethtool_rxnfc *cmd)
   1386{
   1387	struct ethtool_usrip4_spec *l4_mask;
   1388	struct ethhdr *eth_mask;
   1389
   1390	if (cmd->fs.location >= MAX_NUM_OF_FS_RULES) {
   1391		netdev_err(dev, "rxnfc: Invalid location (%d)\n",
   1392			   cmd->fs.location);
   1393		return -EINVAL;
   1394	}
   1395
   1396	switch (cmd->fs.flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
   1397	case IP_USER_FLOW:
   1398		l4_mask = &cmd->fs.m_u.usr_ip4_spec;
   1399		/* don't allow mask which isn't valid */
   1400		if (VALIDATE_MASK(l4_mask->ip4src) ||
   1401		    VALIDATE_MASK(l4_mask->ip4dst) ||
   1402		    VALIDATE_MASK(l4_mask->l4_4_bytes) ||
   1403		    VALIDATE_MASK(l4_mask->proto) ||
   1404		    VALIDATE_MASK(l4_mask->ip_ver) ||
   1405		    VALIDATE_MASK(l4_mask->tos)) {
   1406			netdev_err(dev, "rxnfc: Unsupported mask\n");
   1407			return -EINVAL;
   1408		}
   1409		break;
   1410	case ETHER_FLOW:
   1411		eth_mask = &cmd->fs.m_u.ether_spec;
   1412		/* don't allow mask which isn't valid */
   1413		if (VALIDATE_MASK(eth_mask->h_dest) ||
   1414		    VALIDATE_MASK(eth_mask->h_source) ||
   1415		    VALIDATE_MASK(eth_mask->h_proto)) {
   1416			netdev_err(dev, "rxnfc: Unsupported mask\n");
   1417			return -EINVAL;
   1418		}
   1419		break;
   1420	default:
   1421		netdev_err(dev, "rxnfc: Unsupported flow type (0x%x)\n",
   1422			   cmd->fs.flow_type);
   1423		return -EINVAL;
   1424	}
   1425
   1426	if ((cmd->fs.flow_type & FLOW_EXT)) {
   1427		/* don't allow mask which isn't valid */
   1428		if (VALIDATE_MASK(cmd->fs.m_ext.vlan_etype) ||
   1429		    VALIDATE_MASK(cmd->fs.m_ext.vlan_tci)) {
   1430			netdev_err(dev, "rxnfc: Unsupported mask\n");
   1431			return -EINVAL;
   1432		}
   1433		if (cmd->fs.m_ext.data[0] || cmd->fs.m_ext.data[1]) {
   1434			netdev_err(dev, "rxnfc: user-def not supported\n");
   1435			return -EINVAL;
   1436		}
   1437	}
   1438
   1439	if ((cmd->fs.flow_type & FLOW_MAC_EXT)) {
   1440		/* don't allow mask which isn't valid */
   1441		if (VALIDATE_MASK(cmd->fs.m_ext.h_dest)) {
   1442			netdev_err(dev, "rxnfc: Unsupported mask\n");
   1443			return -EINVAL;
   1444		}
   1445	}
   1446
   1447	return 0;
   1448}
   1449
   1450static int bcmgenet_insert_flow(struct net_device *dev,
   1451				struct ethtool_rxnfc *cmd)
   1452{
   1453	struct bcmgenet_priv *priv = netdev_priv(dev);
   1454	struct bcmgenet_rxnfc_rule *loc_rule;
   1455	int err;
   1456
   1457	if (priv->hw_params->hfb_filter_size < 128) {
   1458		netdev_err(dev, "rxnfc: Not supported by this device\n");
   1459		return -EINVAL;
   1460	}
   1461
   1462	if (cmd->fs.ring_cookie > priv->hw_params->rx_queues &&
   1463	    cmd->fs.ring_cookie != RX_CLS_FLOW_WAKE) {
   1464		netdev_err(dev, "rxnfc: Unsupported action (%llu)\n",
   1465			   cmd->fs.ring_cookie);
   1466		return -EINVAL;
   1467	}
   1468
   1469	err = bcmgenet_validate_flow(dev, cmd);
   1470	if (err)
   1471		return err;
   1472
   1473	loc_rule = &priv->rxnfc_rules[cmd->fs.location];
   1474	if (loc_rule->state == BCMGENET_RXNFC_STATE_ENABLED)
   1475		bcmgenet_hfb_disable_filter(priv, cmd->fs.location);
   1476	if (loc_rule->state != BCMGENET_RXNFC_STATE_UNUSED) {
   1477		list_del(&loc_rule->list);
   1478		bcmgenet_hfb_clear_filter(priv, cmd->fs.location);
   1479	}
   1480	loc_rule->state = BCMGENET_RXNFC_STATE_UNUSED;
   1481	memcpy(&loc_rule->fs, &cmd->fs,
   1482	       sizeof(struct ethtool_rx_flow_spec));
   1483
   1484	bcmgenet_hfb_create_rxnfc_filter(priv, loc_rule);
   1485
   1486	list_add_tail(&loc_rule->list, &priv->rxnfc_list);
   1487
   1488	return 0;
   1489}
   1490
   1491static int bcmgenet_delete_flow(struct net_device *dev,
   1492				struct ethtool_rxnfc *cmd)
   1493{
   1494	struct bcmgenet_priv *priv = netdev_priv(dev);
   1495	struct bcmgenet_rxnfc_rule *rule;
   1496	int err = 0;
   1497
   1498	if (cmd->fs.location >= MAX_NUM_OF_FS_RULES)
   1499		return -EINVAL;
   1500
   1501	rule = &priv->rxnfc_rules[cmd->fs.location];
   1502	if (rule->state == BCMGENET_RXNFC_STATE_UNUSED) {
   1503		err =  -ENOENT;
   1504		goto out;
   1505	}
   1506
   1507	if (rule->state == BCMGENET_RXNFC_STATE_ENABLED)
   1508		bcmgenet_hfb_disable_filter(priv, cmd->fs.location);
   1509	if (rule->state != BCMGENET_RXNFC_STATE_UNUSED) {
   1510		list_del(&rule->list);
   1511		bcmgenet_hfb_clear_filter(priv, cmd->fs.location);
   1512	}
   1513	rule->state = BCMGENET_RXNFC_STATE_UNUSED;
   1514	memset(&rule->fs, 0, sizeof(struct ethtool_rx_flow_spec));
   1515
   1516out:
   1517	return err;
   1518}
   1519
   1520static int bcmgenet_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
   1521{
   1522	struct bcmgenet_priv *priv = netdev_priv(dev);
   1523	int err = 0;
   1524
   1525	switch (cmd->cmd) {
   1526	case ETHTOOL_SRXCLSRLINS:
   1527		err = bcmgenet_insert_flow(dev, cmd);
   1528		break;
   1529	case ETHTOOL_SRXCLSRLDEL:
   1530		err = bcmgenet_delete_flow(dev, cmd);
   1531		break;
   1532	default:
   1533		netdev_warn(priv->dev, "Unsupported ethtool command. (%d)\n",
   1534			    cmd->cmd);
   1535		return -EINVAL;
   1536	}
   1537
   1538	return err;
   1539}
   1540
   1541static int bcmgenet_get_flow(struct net_device *dev, struct ethtool_rxnfc *cmd,
   1542			     int loc)
   1543{
   1544	struct bcmgenet_priv *priv = netdev_priv(dev);
   1545	struct bcmgenet_rxnfc_rule *rule;
   1546	int err = 0;
   1547
   1548	if (loc < 0 || loc >= MAX_NUM_OF_FS_RULES)
   1549		return -EINVAL;
   1550
   1551	rule = &priv->rxnfc_rules[loc];
   1552	if (rule->state == BCMGENET_RXNFC_STATE_UNUSED)
   1553		err = -ENOENT;
   1554	else
   1555		memcpy(&cmd->fs, &rule->fs,
   1556		       sizeof(struct ethtool_rx_flow_spec));
   1557
   1558	return err;
   1559}
   1560
   1561static int bcmgenet_get_num_flows(struct bcmgenet_priv *priv)
   1562{
   1563	struct list_head *pos;
   1564	int res = 0;
   1565
   1566	list_for_each(pos, &priv->rxnfc_list)
   1567		res++;
   1568
   1569	return res;
   1570}
   1571
   1572static int bcmgenet_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
   1573			      u32 *rule_locs)
   1574{
   1575	struct bcmgenet_priv *priv = netdev_priv(dev);
   1576	struct bcmgenet_rxnfc_rule *rule;
   1577	int err = 0;
   1578	int i = 0;
   1579
   1580	switch (cmd->cmd) {
   1581	case ETHTOOL_GRXRINGS:
   1582		cmd->data = priv->hw_params->rx_queues ?: 1;
   1583		break;
   1584	case ETHTOOL_GRXCLSRLCNT:
   1585		cmd->rule_cnt = bcmgenet_get_num_flows(priv);
   1586		cmd->data = MAX_NUM_OF_FS_RULES;
   1587		break;
   1588	case ETHTOOL_GRXCLSRULE:
   1589		err = bcmgenet_get_flow(dev, cmd, cmd->fs.location);
   1590		break;
   1591	case ETHTOOL_GRXCLSRLALL:
   1592		list_for_each_entry(rule, &priv->rxnfc_list, list)
   1593			if (i < cmd->rule_cnt)
   1594				rule_locs[i++] = rule->fs.location;
   1595		cmd->rule_cnt = i;
   1596		cmd->data = MAX_NUM_OF_FS_RULES;
   1597		break;
   1598	default:
   1599		err = -EOPNOTSUPP;
   1600		break;
   1601	}
   1602
   1603	return err;
   1604}
   1605
   1606/* standard ethtool support functions. */
   1607static const struct ethtool_ops bcmgenet_ethtool_ops = {
   1608	.supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS |
   1609				     ETHTOOL_COALESCE_MAX_FRAMES |
   1610				     ETHTOOL_COALESCE_USE_ADAPTIVE_RX,
   1611	.begin			= bcmgenet_begin,
   1612	.complete		= bcmgenet_complete,
   1613	.get_strings		= bcmgenet_get_strings,
   1614	.get_sset_count		= bcmgenet_get_sset_count,
   1615	.get_ethtool_stats	= bcmgenet_get_ethtool_stats,
   1616	.get_drvinfo		= bcmgenet_get_drvinfo,
   1617	.get_link		= ethtool_op_get_link,
   1618	.get_msglevel		= bcmgenet_get_msglevel,
   1619	.set_msglevel		= bcmgenet_set_msglevel,
   1620	.get_wol		= bcmgenet_get_wol,
   1621	.set_wol		= bcmgenet_set_wol,
   1622	.get_eee		= bcmgenet_get_eee,
   1623	.set_eee		= bcmgenet_set_eee,
   1624	.nway_reset		= phy_ethtool_nway_reset,
   1625	.get_coalesce		= bcmgenet_get_coalesce,
   1626	.set_coalesce		= bcmgenet_set_coalesce,
   1627	.get_link_ksettings	= bcmgenet_get_link_ksettings,
   1628	.set_link_ksettings	= bcmgenet_set_link_ksettings,
   1629	.get_ts_info		= ethtool_op_get_ts_info,
   1630	.get_rxnfc		= bcmgenet_get_rxnfc,
   1631	.set_rxnfc		= bcmgenet_set_rxnfc,
   1632	.get_pauseparam		= bcmgenet_get_pauseparam,
   1633	.set_pauseparam		= bcmgenet_set_pauseparam,
   1634};
   1635
   1636/* Power down the unimac, based on mode. */
   1637static int bcmgenet_power_down(struct bcmgenet_priv *priv,
   1638				enum bcmgenet_power_mode mode)
   1639{
   1640	int ret = 0;
   1641	u32 reg;
   1642
   1643	switch (mode) {
   1644	case GENET_POWER_CABLE_SENSE:
   1645		phy_detach(priv->dev->phydev);
   1646		break;
   1647
   1648	case GENET_POWER_WOL_MAGIC:
   1649		ret = bcmgenet_wol_power_down_cfg(priv, mode);
   1650		break;
   1651
   1652	case GENET_POWER_PASSIVE:
   1653		/* Power down LED */
   1654		if (priv->hw_params->flags & GENET_HAS_EXT) {
   1655			reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
   1656			if (GENET_IS_V5(priv) && !priv->ephy_16nm)
   1657				reg |= EXT_PWR_DOWN_PHY_EN |
   1658				       EXT_PWR_DOWN_PHY_RD |
   1659				       EXT_PWR_DOWN_PHY_SD |
   1660				       EXT_PWR_DOWN_PHY_RX |
   1661				       EXT_PWR_DOWN_PHY_TX |
   1662				       EXT_IDDQ_GLBL_PWR;
   1663			else
   1664				reg |= EXT_PWR_DOWN_PHY;
   1665
   1666			reg |= (EXT_PWR_DOWN_DLL | EXT_PWR_DOWN_BIAS);
   1667			bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
   1668
   1669			bcmgenet_phy_power_set(priv->dev, false);
   1670		}
   1671		break;
   1672	default:
   1673		break;
   1674	}
   1675
   1676	return ret;
   1677}
   1678
   1679static void bcmgenet_power_up(struct bcmgenet_priv *priv,
   1680			      enum bcmgenet_power_mode mode)
   1681{
   1682	u32 reg;
   1683
   1684	if (!(priv->hw_params->flags & GENET_HAS_EXT))
   1685		return;
   1686
   1687	reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
   1688
   1689	switch (mode) {
   1690	case GENET_POWER_PASSIVE:
   1691		reg &= ~(EXT_PWR_DOWN_DLL | EXT_PWR_DOWN_BIAS |
   1692			 EXT_ENERGY_DET_MASK);
   1693		if (GENET_IS_V5(priv) && !priv->ephy_16nm) {
   1694			reg &= ~(EXT_PWR_DOWN_PHY_EN |
   1695				 EXT_PWR_DOWN_PHY_RD |
   1696				 EXT_PWR_DOWN_PHY_SD |
   1697				 EXT_PWR_DOWN_PHY_RX |
   1698				 EXT_PWR_DOWN_PHY_TX |
   1699				 EXT_IDDQ_GLBL_PWR);
   1700			reg |=   EXT_PHY_RESET;
   1701			bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
   1702			mdelay(1);
   1703
   1704			reg &=  ~EXT_PHY_RESET;
   1705		} else {
   1706			reg &= ~EXT_PWR_DOWN_PHY;
   1707			reg |= EXT_PWR_DN_EN_LD;
   1708		}
   1709		bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
   1710		bcmgenet_phy_power_set(priv->dev, true);
   1711		break;
   1712
   1713	case GENET_POWER_CABLE_SENSE:
   1714		/* enable APD */
   1715		if (!GENET_IS_V5(priv)) {
   1716			reg |= EXT_PWR_DN_EN_LD;
   1717			bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
   1718		}
   1719		break;
   1720	case GENET_POWER_WOL_MAGIC:
   1721		bcmgenet_wol_power_up_cfg(priv, mode);
   1722		return;
   1723	default:
   1724		break;
   1725	}
   1726}
   1727
   1728static struct enet_cb *bcmgenet_get_txcb(struct bcmgenet_priv *priv,
   1729					 struct bcmgenet_tx_ring *ring)
   1730{
   1731	struct enet_cb *tx_cb_ptr;
   1732
   1733	tx_cb_ptr = ring->cbs;
   1734	tx_cb_ptr += ring->write_ptr - ring->cb_ptr;
   1735
   1736	/* Advancing local write pointer */
   1737	if (ring->write_ptr == ring->end_ptr)
   1738		ring->write_ptr = ring->cb_ptr;
   1739	else
   1740		ring->write_ptr++;
   1741
   1742	return tx_cb_ptr;
   1743}
   1744
   1745static struct enet_cb *bcmgenet_put_txcb(struct bcmgenet_priv *priv,
   1746					 struct bcmgenet_tx_ring *ring)
   1747{
   1748	struct enet_cb *tx_cb_ptr;
   1749
   1750	tx_cb_ptr = ring->cbs;
   1751	tx_cb_ptr += ring->write_ptr - ring->cb_ptr;
   1752
   1753	/* Rewinding local write pointer */
   1754	if (ring->write_ptr == ring->cb_ptr)
   1755		ring->write_ptr = ring->end_ptr;
   1756	else
   1757		ring->write_ptr--;
   1758
   1759	return tx_cb_ptr;
   1760}
   1761
   1762static inline void bcmgenet_rx_ring16_int_disable(struct bcmgenet_rx_ring *ring)
   1763{
   1764	bcmgenet_intrl2_0_writel(ring->priv, UMAC_IRQ_RXDMA_DONE,
   1765				 INTRL2_CPU_MASK_SET);
   1766}
   1767
   1768static inline void bcmgenet_rx_ring16_int_enable(struct bcmgenet_rx_ring *ring)
   1769{
   1770	bcmgenet_intrl2_0_writel(ring->priv, UMAC_IRQ_RXDMA_DONE,
   1771				 INTRL2_CPU_MASK_CLEAR);
   1772}
   1773
   1774static inline void bcmgenet_rx_ring_int_disable(struct bcmgenet_rx_ring *ring)
   1775{
   1776	bcmgenet_intrl2_1_writel(ring->priv,
   1777				 1 << (UMAC_IRQ1_RX_INTR_SHIFT + ring->index),
   1778				 INTRL2_CPU_MASK_SET);
   1779}
   1780
   1781static inline void bcmgenet_rx_ring_int_enable(struct bcmgenet_rx_ring *ring)
   1782{
   1783	bcmgenet_intrl2_1_writel(ring->priv,
   1784				 1 << (UMAC_IRQ1_RX_INTR_SHIFT + ring->index),
   1785				 INTRL2_CPU_MASK_CLEAR);
   1786}
   1787
   1788static inline void bcmgenet_tx_ring16_int_disable(struct bcmgenet_tx_ring *ring)
   1789{
   1790	bcmgenet_intrl2_0_writel(ring->priv, UMAC_IRQ_TXDMA_DONE,
   1791				 INTRL2_CPU_MASK_SET);
   1792}
   1793
   1794static inline void bcmgenet_tx_ring16_int_enable(struct bcmgenet_tx_ring *ring)
   1795{
   1796	bcmgenet_intrl2_0_writel(ring->priv, UMAC_IRQ_TXDMA_DONE,
   1797				 INTRL2_CPU_MASK_CLEAR);
   1798}
   1799
   1800static inline void bcmgenet_tx_ring_int_enable(struct bcmgenet_tx_ring *ring)
   1801{
   1802	bcmgenet_intrl2_1_writel(ring->priv, 1 << ring->index,
   1803				 INTRL2_CPU_MASK_CLEAR);
   1804}
   1805
   1806static inline void bcmgenet_tx_ring_int_disable(struct bcmgenet_tx_ring *ring)
   1807{
   1808	bcmgenet_intrl2_1_writel(ring->priv, 1 << ring->index,
   1809				 INTRL2_CPU_MASK_SET);
   1810}
   1811
   1812/* Simple helper to free a transmit control block's resources
   1813 * Returns an skb when the last transmit control block associated with the
   1814 * skb is freed.  The skb should be freed by the caller if necessary.
   1815 */
   1816static struct sk_buff *bcmgenet_free_tx_cb(struct device *dev,
   1817					   struct enet_cb *cb)
   1818{
   1819	struct sk_buff *skb;
   1820
   1821	skb = cb->skb;
   1822
   1823	if (skb) {
   1824		cb->skb = NULL;
   1825		if (cb == GENET_CB(skb)->first_cb)
   1826			dma_unmap_single(dev, dma_unmap_addr(cb, dma_addr),
   1827					 dma_unmap_len(cb, dma_len),
   1828					 DMA_TO_DEVICE);
   1829		else
   1830			dma_unmap_page(dev, dma_unmap_addr(cb, dma_addr),
   1831				       dma_unmap_len(cb, dma_len),
   1832				       DMA_TO_DEVICE);
   1833		dma_unmap_addr_set(cb, dma_addr, 0);
   1834
   1835		if (cb == GENET_CB(skb)->last_cb)
   1836			return skb;
   1837
   1838	} else if (dma_unmap_addr(cb, dma_addr)) {
   1839		dma_unmap_page(dev,
   1840			       dma_unmap_addr(cb, dma_addr),
   1841			       dma_unmap_len(cb, dma_len),
   1842			       DMA_TO_DEVICE);
   1843		dma_unmap_addr_set(cb, dma_addr, 0);
   1844	}
   1845
   1846	return NULL;
   1847}
   1848
   1849/* Simple helper to free a receive control block's resources */
   1850static struct sk_buff *bcmgenet_free_rx_cb(struct device *dev,
   1851					   struct enet_cb *cb)
   1852{
   1853	struct sk_buff *skb;
   1854
   1855	skb = cb->skb;
   1856	cb->skb = NULL;
   1857
   1858	if (dma_unmap_addr(cb, dma_addr)) {
   1859		dma_unmap_single(dev, dma_unmap_addr(cb, dma_addr),
   1860				 dma_unmap_len(cb, dma_len), DMA_FROM_DEVICE);
   1861		dma_unmap_addr_set(cb, dma_addr, 0);
   1862	}
   1863
   1864	return skb;
   1865}
   1866
   1867/* Unlocked version of the reclaim routine */
   1868static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev,
   1869					  struct bcmgenet_tx_ring *ring)
   1870{
   1871	struct bcmgenet_priv *priv = netdev_priv(dev);
   1872	unsigned int txbds_processed = 0;
   1873	unsigned int bytes_compl = 0;
   1874	unsigned int pkts_compl = 0;
   1875	unsigned int txbds_ready;
   1876	unsigned int c_index;
   1877	struct sk_buff *skb;
   1878
   1879	/* Clear status before servicing to reduce spurious interrupts */
   1880	if (ring->index == DESC_INDEX)
   1881		bcmgenet_intrl2_0_writel(priv, UMAC_IRQ_TXDMA_DONE,
   1882					 INTRL2_CPU_CLEAR);
   1883	else
   1884		bcmgenet_intrl2_1_writel(priv, (1 << ring->index),
   1885					 INTRL2_CPU_CLEAR);
   1886
   1887	/* Compute how many buffers are transmitted since last xmit call */
   1888	c_index = bcmgenet_tdma_ring_readl(priv, ring->index, TDMA_CONS_INDEX)
   1889		& DMA_C_INDEX_MASK;
   1890	txbds_ready = (c_index - ring->c_index) & DMA_C_INDEX_MASK;
   1891
   1892	netif_dbg(priv, tx_done, dev,
   1893		  "%s ring=%d old_c_index=%u c_index=%u txbds_ready=%u\n",
   1894		  __func__, ring->index, ring->c_index, c_index, txbds_ready);
   1895
   1896	/* Reclaim transmitted buffers */
   1897	while (txbds_processed < txbds_ready) {
   1898		skb = bcmgenet_free_tx_cb(&priv->pdev->dev,
   1899					  &priv->tx_cbs[ring->clean_ptr]);
   1900		if (skb) {
   1901			pkts_compl++;
   1902			bytes_compl += GENET_CB(skb)->bytes_sent;
   1903			dev_consume_skb_any(skb);
   1904		}
   1905
   1906		txbds_processed++;
   1907		if (likely(ring->clean_ptr < ring->end_ptr))
   1908			ring->clean_ptr++;
   1909		else
   1910			ring->clean_ptr = ring->cb_ptr;
   1911	}
   1912
   1913	ring->free_bds += txbds_processed;
   1914	ring->c_index = c_index;
   1915
   1916	ring->packets += pkts_compl;
   1917	ring->bytes += bytes_compl;
   1918
   1919	netdev_tx_completed_queue(netdev_get_tx_queue(dev, ring->queue),
   1920				  pkts_compl, bytes_compl);
   1921
   1922	return txbds_processed;
   1923}
   1924
   1925static unsigned int bcmgenet_tx_reclaim(struct net_device *dev,
   1926				struct bcmgenet_tx_ring *ring)
   1927{
   1928	unsigned int released;
   1929
   1930	spin_lock_bh(&ring->lock);
   1931	released = __bcmgenet_tx_reclaim(dev, ring);
   1932	spin_unlock_bh(&ring->lock);
   1933
   1934	return released;
   1935}
   1936
   1937static int bcmgenet_tx_poll(struct napi_struct *napi, int budget)
   1938{
   1939	struct bcmgenet_tx_ring *ring =
   1940		container_of(napi, struct bcmgenet_tx_ring, napi);
   1941	unsigned int work_done = 0;
   1942	struct netdev_queue *txq;
   1943
   1944	spin_lock(&ring->lock);
   1945	work_done = __bcmgenet_tx_reclaim(ring->priv->dev, ring);
   1946	if (ring->free_bds > (MAX_SKB_FRAGS + 1)) {
   1947		txq = netdev_get_tx_queue(ring->priv->dev, ring->queue);
   1948		netif_tx_wake_queue(txq);
   1949	}
   1950	spin_unlock(&ring->lock);
   1951
   1952	if (work_done == 0) {
   1953		napi_complete(napi);
   1954		ring->int_enable(ring);
   1955
   1956		return 0;
   1957	}
   1958
   1959	return budget;
   1960}
   1961
   1962static void bcmgenet_tx_reclaim_all(struct net_device *dev)
   1963{
   1964	struct bcmgenet_priv *priv = netdev_priv(dev);
   1965	int i;
   1966
   1967	if (netif_is_multiqueue(dev)) {
   1968		for (i = 0; i < priv->hw_params->tx_queues; i++)
   1969			bcmgenet_tx_reclaim(dev, &priv->tx_rings[i]);
   1970	}
   1971
   1972	bcmgenet_tx_reclaim(dev, &priv->tx_rings[DESC_INDEX]);
   1973}
   1974
   1975/* Reallocate the SKB to put enough headroom in front of it and insert
   1976 * the transmit checksum offsets in the descriptors
   1977 */
   1978static struct sk_buff *bcmgenet_add_tsb(struct net_device *dev,
   1979					struct sk_buff *skb)
   1980{
   1981	struct bcmgenet_priv *priv = netdev_priv(dev);
   1982	struct status_64 *status = NULL;
   1983	struct sk_buff *new_skb;
   1984	u16 offset;
   1985	u8 ip_proto;
   1986	__be16 ip_ver;
   1987	u32 tx_csum_info;
   1988
   1989	if (unlikely(skb_headroom(skb) < sizeof(*status))) {
   1990		/* If 64 byte status block enabled, must make sure skb has
   1991		 * enough headroom for us to insert 64B status block.
   1992		 */
   1993		new_skb = skb_realloc_headroom(skb, sizeof(*status));
   1994		if (!new_skb) {
   1995			dev_kfree_skb_any(skb);
   1996			priv->mib.tx_realloc_tsb_failed++;
   1997			dev->stats.tx_dropped++;
   1998			return NULL;
   1999		}
   2000		dev_consume_skb_any(skb);
   2001		skb = new_skb;
   2002		priv->mib.tx_realloc_tsb++;
   2003	}
   2004
   2005	skb_push(skb, sizeof(*status));
   2006	status = (struct status_64 *)skb->data;
   2007
   2008	if (skb->ip_summed  == CHECKSUM_PARTIAL) {
   2009		ip_ver = skb->protocol;
   2010		switch (ip_ver) {
   2011		case htons(ETH_P_IP):
   2012			ip_proto = ip_hdr(skb)->protocol;
   2013			break;
   2014		case htons(ETH_P_IPV6):
   2015			ip_proto = ipv6_hdr(skb)->nexthdr;
   2016			break;
   2017		default:
   2018			/* don't use UDP flag */
   2019			ip_proto = 0;
   2020			break;
   2021		}
   2022
   2023		offset = skb_checksum_start_offset(skb) - sizeof(*status);
   2024		tx_csum_info = (offset << STATUS_TX_CSUM_START_SHIFT) |
   2025				(offset + skb->csum_offset) |
   2026				STATUS_TX_CSUM_LV;
   2027
   2028		/* Set the special UDP flag for UDP */
   2029		if (ip_proto == IPPROTO_UDP)
   2030			tx_csum_info |= STATUS_TX_CSUM_PROTO_UDP;
   2031
   2032		status->tx_csum_info = tx_csum_info;
   2033	}
   2034
   2035	return skb;
   2036}
   2037
   2038static void bcmgenet_hide_tsb(struct sk_buff *skb)
   2039{
   2040	__skb_pull(skb, sizeof(struct status_64));
   2041}
   2042
   2043static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev)
   2044{
   2045	struct bcmgenet_priv *priv = netdev_priv(dev);
   2046	struct device *kdev = &priv->pdev->dev;
   2047	struct bcmgenet_tx_ring *ring = NULL;
   2048	struct enet_cb *tx_cb_ptr;
   2049	struct netdev_queue *txq;
   2050	int nr_frags, index;
   2051	dma_addr_t mapping;
   2052	unsigned int size;
   2053	skb_frag_t *frag;
   2054	u32 len_stat;
   2055	int ret;
   2056	int i;
   2057
   2058	index = skb_get_queue_mapping(skb);
   2059	/* Mapping strategy:
   2060	 * queue_mapping = 0, unclassified, packet xmited through ring16
   2061	 * queue_mapping = 1, goes to ring 0. (highest priority queue
   2062	 * queue_mapping = 2, goes to ring 1.
   2063	 * queue_mapping = 3, goes to ring 2.
   2064	 * queue_mapping = 4, goes to ring 3.
   2065	 */
   2066	if (index == 0)
   2067		index = DESC_INDEX;
   2068	else
   2069		index -= 1;
   2070
   2071	ring = &priv->tx_rings[index];
   2072	txq = netdev_get_tx_queue(dev, ring->queue);
   2073
   2074	nr_frags = skb_shinfo(skb)->nr_frags;
   2075
   2076	spin_lock(&ring->lock);
   2077	if (ring->free_bds <= (nr_frags + 1)) {
   2078		if (!netif_tx_queue_stopped(txq)) {
   2079			netif_tx_stop_queue(txq);
   2080			netdev_err(dev,
   2081				   "%s: tx ring %d full when queue %d awake\n",
   2082				   __func__, index, ring->queue);
   2083		}
   2084		ret = NETDEV_TX_BUSY;
   2085		goto out;
   2086	}
   2087
   2088	/* Retain how many bytes will be sent on the wire, without TSB inserted
   2089	 * by transmit checksum offload
   2090	 */
   2091	GENET_CB(skb)->bytes_sent = skb->len;
   2092
   2093	/* add the Transmit Status Block */
   2094	skb = bcmgenet_add_tsb(dev, skb);
   2095	if (!skb) {
   2096		ret = NETDEV_TX_OK;
   2097		goto out;
   2098	}
   2099
   2100	for (i = 0; i <= nr_frags; i++) {
   2101		tx_cb_ptr = bcmgenet_get_txcb(priv, ring);
   2102
   2103		BUG_ON(!tx_cb_ptr);
   2104
   2105		if (!i) {
   2106			/* Transmit single SKB or head of fragment list */
   2107			GENET_CB(skb)->first_cb = tx_cb_ptr;
   2108			size = skb_headlen(skb);
   2109			mapping = dma_map_single(kdev, skb->data, size,
   2110						 DMA_TO_DEVICE);
   2111		} else {
   2112			/* xmit fragment */
   2113			frag = &skb_shinfo(skb)->frags[i - 1];
   2114			size = skb_frag_size(frag);
   2115			mapping = skb_frag_dma_map(kdev, frag, 0, size,
   2116						   DMA_TO_DEVICE);
   2117		}
   2118
   2119		ret = dma_mapping_error(kdev, mapping);
   2120		if (ret) {
   2121			priv->mib.tx_dma_failed++;
   2122			netif_err(priv, tx_err, dev, "Tx DMA map failed\n");
   2123			ret = NETDEV_TX_OK;
   2124			goto out_unmap_frags;
   2125		}
   2126		dma_unmap_addr_set(tx_cb_ptr, dma_addr, mapping);
   2127		dma_unmap_len_set(tx_cb_ptr, dma_len, size);
   2128
   2129		tx_cb_ptr->skb = skb;
   2130
   2131		len_stat = (size << DMA_BUFLENGTH_SHIFT) |
   2132			   (priv->hw_params->qtag_mask << DMA_TX_QTAG_SHIFT);
   2133
   2134		/* Note: if we ever change from DMA_TX_APPEND_CRC below we
   2135		 * will need to restore software padding of "runt" packets
   2136		 */
   2137		if (!i) {
   2138			len_stat |= DMA_TX_APPEND_CRC | DMA_SOP;
   2139			if (skb->ip_summed == CHECKSUM_PARTIAL)
   2140				len_stat |= DMA_TX_DO_CSUM;
   2141		}
   2142		if (i == nr_frags)
   2143			len_stat |= DMA_EOP;
   2144
   2145		dmadesc_set(priv, tx_cb_ptr->bd_addr, mapping, len_stat);
   2146	}
   2147
   2148	GENET_CB(skb)->last_cb = tx_cb_ptr;
   2149
   2150	bcmgenet_hide_tsb(skb);
   2151	skb_tx_timestamp(skb);
   2152
   2153	/* Decrement total BD count and advance our write pointer */
   2154	ring->free_bds -= nr_frags + 1;
   2155	ring->prod_index += nr_frags + 1;
   2156	ring->prod_index &= DMA_P_INDEX_MASK;
   2157
   2158	netdev_tx_sent_queue(txq, GENET_CB(skb)->bytes_sent);
   2159
   2160	if (ring->free_bds <= (MAX_SKB_FRAGS + 1))
   2161		netif_tx_stop_queue(txq);
   2162
   2163	if (!netdev_xmit_more() || netif_xmit_stopped(txq))
   2164		/* Packets are ready, update producer index */
   2165		bcmgenet_tdma_ring_writel(priv, ring->index,
   2166					  ring->prod_index, TDMA_PROD_INDEX);
   2167out:
   2168	spin_unlock(&ring->lock);
   2169
   2170	return ret;
   2171
   2172out_unmap_frags:
   2173	/* Back up for failed control block mapping */
   2174	bcmgenet_put_txcb(priv, ring);
   2175
   2176	/* Unmap successfully mapped control blocks */
   2177	while (i-- > 0) {
   2178		tx_cb_ptr = bcmgenet_put_txcb(priv, ring);
   2179		bcmgenet_free_tx_cb(kdev, tx_cb_ptr);
   2180	}
   2181
   2182	dev_kfree_skb(skb);
   2183	goto out;
   2184}
   2185
   2186static struct sk_buff *bcmgenet_rx_refill(struct bcmgenet_priv *priv,
   2187					  struct enet_cb *cb)
   2188{
   2189	struct device *kdev = &priv->pdev->dev;
   2190	struct sk_buff *skb;
   2191	struct sk_buff *rx_skb;
   2192	dma_addr_t mapping;
   2193
   2194	/* Allocate a new Rx skb */
   2195	skb = __netdev_alloc_skb(priv->dev, priv->rx_buf_len + SKB_ALIGNMENT,
   2196				 GFP_ATOMIC | __GFP_NOWARN);
   2197	if (!skb) {
   2198		priv->mib.alloc_rx_buff_failed++;
   2199		netif_err(priv, rx_err, priv->dev,
   2200			  "%s: Rx skb allocation failed\n", __func__);
   2201		return NULL;
   2202	}
   2203
   2204	/* DMA-map the new Rx skb */
   2205	mapping = dma_map_single(kdev, skb->data, priv->rx_buf_len,
   2206				 DMA_FROM_DEVICE);
   2207	if (dma_mapping_error(kdev, mapping)) {
   2208		priv->mib.rx_dma_failed++;
   2209		dev_kfree_skb_any(skb);
   2210		netif_err(priv, rx_err, priv->dev,
   2211			  "%s: Rx skb DMA mapping failed\n", __func__);
   2212		return NULL;
   2213	}
   2214
   2215	/* Grab the current Rx skb from the ring and DMA-unmap it */
   2216	rx_skb = bcmgenet_free_rx_cb(kdev, cb);
   2217
   2218	/* Put the new Rx skb on the ring */
   2219	cb->skb = skb;
   2220	dma_unmap_addr_set(cb, dma_addr, mapping);
   2221	dma_unmap_len_set(cb, dma_len, priv->rx_buf_len);
   2222	dmadesc_set_addr(priv, cb->bd_addr, mapping);
   2223
   2224	/* Return the current Rx skb to caller */
   2225	return rx_skb;
   2226}
   2227
   2228/* bcmgenet_desc_rx - descriptor based rx process.
   2229 * this could be called from bottom half, or from NAPI polling method.
   2230 */
   2231static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring,
   2232				     unsigned int budget)
   2233{
   2234	struct bcmgenet_priv *priv = ring->priv;
   2235	struct net_device *dev = priv->dev;
   2236	struct enet_cb *cb;
   2237	struct sk_buff *skb;
   2238	u32 dma_length_status;
   2239	unsigned long dma_flag;
   2240	int len;
   2241	unsigned int rxpktprocessed = 0, rxpkttoprocess;
   2242	unsigned int bytes_processed = 0;
   2243	unsigned int p_index, mask;
   2244	unsigned int discards;
   2245
   2246	/* Clear status before servicing to reduce spurious interrupts */
   2247	if (ring->index == DESC_INDEX) {
   2248		bcmgenet_intrl2_0_writel(priv, UMAC_IRQ_RXDMA_DONE,
   2249					 INTRL2_CPU_CLEAR);
   2250	} else {
   2251		mask = 1 << (UMAC_IRQ1_RX_INTR_SHIFT + ring->index);
   2252		bcmgenet_intrl2_1_writel(priv,
   2253					 mask,
   2254					 INTRL2_CPU_CLEAR);
   2255	}
   2256
   2257	p_index = bcmgenet_rdma_ring_readl(priv, ring->index, RDMA_PROD_INDEX);
   2258
   2259	discards = (p_index >> DMA_P_INDEX_DISCARD_CNT_SHIFT) &
   2260		   DMA_P_INDEX_DISCARD_CNT_MASK;
   2261	if (discards > ring->old_discards) {
   2262		discards = discards - ring->old_discards;
   2263		ring->errors += discards;
   2264		ring->old_discards += discards;
   2265
   2266		/* Clear HW register when we reach 75% of maximum 0xFFFF */
   2267		if (ring->old_discards >= 0xC000) {
   2268			ring->old_discards = 0;
   2269			bcmgenet_rdma_ring_writel(priv, ring->index, 0,
   2270						  RDMA_PROD_INDEX);
   2271		}
   2272	}
   2273
   2274	p_index &= DMA_P_INDEX_MASK;
   2275	rxpkttoprocess = (p_index - ring->c_index) & DMA_C_INDEX_MASK;
   2276
   2277	netif_dbg(priv, rx_status, dev,
   2278		  "RDMA: rxpkttoprocess=%d\n", rxpkttoprocess);
   2279
   2280	while ((rxpktprocessed < rxpkttoprocess) &&
   2281	       (rxpktprocessed < budget)) {
   2282		struct status_64 *status;
   2283		__be16 rx_csum;
   2284
   2285		cb = &priv->rx_cbs[ring->read_ptr];
   2286		skb = bcmgenet_rx_refill(priv, cb);
   2287
   2288		if (unlikely(!skb)) {
   2289			ring->dropped++;
   2290			goto next;
   2291		}
   2292
   2293		status = (struct status_64 *)skb->data;
   2294		dma_length_status = status->length_status;
   2295		if (dev->features & NETIF_F_RXCSUM) {
   2296			rx_csum = (__force __be16)(status->rx_csum & 0xffff);
   2297			if (rx_csum) {
   2298				skb->csum = (__force __wsum)ntohs(rx_csum);
   2299				skb->ip_summed = CHECKSUM_COMPLETE;
   2300			}
   2301		}
   2302
   2303		/* DMA flags and length are still valid no matter how
   2304		 * we got the Receive Status Vector (64B RSB or register)
   2305		 */
   2306		dma_flag = dma_length_status & 0xffff;
   2307		len = dma_length_status >> DMA_BUFLENGTH_SHIFT;
   2308
   2309		netif_dbg(priv, rx_status, dev,
   2310			  "%s:p_ind=%d c_ind=%d read_ptr=%d len_stat=0x%08x\n",
   2311			  __func__, p_index, ring->c_index,
   2312			  ring->read_ptr, dma_length_status);
   2313
   2314		if (unlikely(!(dma_flag & DMA_EOP) || !(dma_flag & DMA_SOP))) {
   2315			netif_err(priv, rx_status, dev,
   2316				  "dropping fragmented packet!\n");
   2317			ring->errors++;
   2318			dev_kfree_skb_any(skb);
   2319			goto next;
   2320		}
   2321
   2322		/* report errors */
   2323		if (unlikely(dma_flag & (DMA_RX_CRC_ERROR |
   2324						DMA_RX_OV |
   2325						DMA_RX_NO |
   2326						DMA_RX_LG |
   2327						DMA_RX_RXER))) {
   2328			netif_err(priv, rx_status, dev, "dma_flag=0x%x\n",
   2329				  (unsigned int)dma_flag);
   2330			if (dma_flag & DMA_RX_CRC_ERROR)
   2331				dev->stats.rx_crc_errors++;
   2332			if (dma_flag & DMA_RX_OV)
   2333				dev->stats.rx_over_errors++;
   2334			if (dma_flag & DMA_RX_NO)
   2335				dev->stats.rx_frame_errors++;
   2336			if (dma_flag & DMA_RX_LG)
   2337				dev->stats.rx_length_errors++;
   2338			dev->stats.rx_errors++;
   2339			dev_kfree_skb_any(skb);
   2340			goto next;
   2341		} /* error packet */
   2342
   2343		skb_put(skb, len);
   2344
   2345		/* remove RSB and hardware 2bytes added for IP alignment */
   2346		skb_pull(skb, 66);
   2347		len -= 66;
   2348
   2349		if (priv->crc_fwd_en) {
   2350			skb_trim(skb, len - ETH_FCS_LEN);
   2351			len -= ETH_FCS_LEN;
   2352		}
   2353
   2354		bytes_processed += len;
   2355
   2356		/*Finish setting up the received SKB and send it to the kernel*/
   2357		skb->protocol = eth_type_trans(skb, priv->dev);
   2358		ring->packets++;
   2359		ring->bytes += len;
   2360		if (dma_flag & DMA_RX_MULT)
   2361			dev->stats.multicast++;
   2362
   2363		/* Notify kernel */
   2364		napi_gro_receive(&ring->napi, skb);
   2365		netif_dbg(priv, rx_status, dev, "pushed up to kernel\n");
   2366
   2367next:
   2368		rxpktprocessed++;
   2369		if (likely(ring->read_ptr < ring->end_ptr))
   2370			ring->read_ptr++;
   2371		else
   2372			ring->read_ptr = ring->cb_ptr;
   2373
   2374		ring->c_index = (ring->c_index + 1) & DMA_C_INDEX_MASK;
   2375		bcmgenet_rdma_ring_writel(priv, ring->index, ring->c_index, RDMA_CONS_INDEX);
   2376	}
   2377
   2378	ring->dim.bytes = bytes_processed;
   2379	ring->dim.packets = rxpktprocessed;
   2380
   2381	return rxpktprocessed;
   2382}
   2383
   2384/* Rx NAPI polling method */
   2385static int bcmgenet_rx_poll(struct napi_struct *napi, int budget)
   2386{
   2387	struct bcmgenet_rx_ring *ring = container_of(napi,
   2388			struct bcmgenet_rx_ring, napi);
   2389	struct dim_sample dim_sample = {};
   2390	unsigned int work_done;
   2391
   2392	work_done = bcmgenet_desc_rx(ring, budget);
   2393
   2394	if (work_done < budget) {
   2395		napi_complete_done(napi, work_done);
   2396		ring->int_enable(ring);
   2397	}
   2398
   2399	if (ring->dim.use_dim) {
   2400		dim_update_sample(ring->dim.event_ctr, ring->dim.packets,
   2401				  ring->dim.bytes, &dim_sample);
   2402		net_dim(&ring->dim.dim, dim_sample);
   2403	}
   2404
   2405	return work_done;
   2406}
   2407
   2408static void bcmgenet_dim_work(struct work_struct *work)
   2409{
   2410	struct dim *dim = container_of(work, struct dim, work);
   2411	struct bcmgenet_net_dim *ndim =
   2412			container_of(dim, struct bcmgenet_net_dim, dim);
   2413	struct bcmgenet_rx_ring *ring =
   2414			container_of(ndim, struct bcmgenet_rx_ring, dim);
   2415	struct dim_cq_moder cur_profile =
   2416			net_dim_get_rx_moderation(dim->mode, dim->profile_ix);
   2417
   2418	bcmgenet_set_rx_coalesce(ring, cur_profile.usec, cur_profile.pkts);
   2419	dim->state = DIM_START_MEASURE;
   2420}
   2421
   2422/* Assign skb to RX DMA descriptor. */
   2423static int bcmgenet_alloc_rx_buffers(struct bcmgenet_priv *priv,
   2424				     struct bcmgenet_rx_ring *ring)
   2425{
   2426	struct enet_cb *cb;
   2427	struct sk_buff *skb;
   2428	int i;
   2429
   2430	netif_dbg(priv, hw, priv->dev, "%s\n", __func__);
   2431
   2432	/* loop here for each buffer needing assign */
   2433	for (i = 0; i < ring->size; i++) {
   2434		cb = ring->cbs + i;
   2435		skb = bcmgenet_rx_refill(priv, cb);
   2436		if (skb)
   2437			dev_consume_skb_any(skb);
   2438		if (!cb->skb)
   2439			return -ENOMEM;
   2440	}
   2441
   2442	return 0;
   2443}
   2444
   2445static void bcmgenet_free_rx_buffers(struct bcmgenet_priv *priv)
   2446{
   2447	struct sk_buff *skb;
   2448	struct enet_cb *cb;
   2449	int i;
   2450
   2451	for (i = 0; i < priv->num_rx_bds; i++) {
   2452		cb = &priv->rx_cbs[i];
   2453
   2454		skb = bcmgenet_free_rx_cb(&priv->pdev->dev, cb);
   2455		if (skb)
   2456			dev_consume_skb_any(skb);
   2457	}
   2458}
   2459
   2460static void umac_enable_set(struct bcmgenet_priv *priv, u32 mask, bool enable)
   2461{
   2462	u32 reg;
   2463
   2464	reg = bcmgenet_umac_readl(priv, UMAC_CMD);
   2465	if (reg & CMD_SW_RESET)
   2466		return;
   2467	if (enable)
   2468		reg |= mask;
   2469	else
   2470		reg &= ~mask;
   2471	bcmgenet_umac_writel(priv, reg, UMAC_CMD);
   2472
   2473	/* UniMAC stops on a packet boundary, wait for a full-size packet
   2474	 * to be processed
   2475	 */
   2476	if (enable == 0)
   2477		usleep_range(1000, 2000);
   2478}
   2479
   2480static void reset_umac(struct bcmgenet_priv *priv)
   2481{
   2482	/* 7358a0/7552a0: bad default in RBUF_FLUSH_CTRL.umac_sw_rst */
   2483	bcmgenet_rbuf_ctrl_set(priv, 0);
   2484	udelay(10);
   2485
   2486	/* issue soft reset and disable MAC while updating its registers */
   2487	bcmgenet_umac_writel(priv, CMD_SW_RESET, UMAC_CMD);
   2488	udelay(2);
   2489}
   2490
   2491static void bcmgenet_intr_disable(struct bcmgenet_priv *priv)
   2492{
   2493	/* Mask all interrupts.*/
   2494	bcmgenet_intrl2_0_writel(priv, 0xFFFFFFFF, INTRL2_CPU_MASK_SET);
   2495	bcmgenet_intrl2_0_writel(priv, 0xFFFFFFFF, INTRL2_CPU_CLEAR);
   2496	bcmgenet_intrl2_1_writel(priv, 0xFFFFFFFF, INTRL2_CPU_MASK_SET);
   2497	bcmgenet_intrl2_1_writel(priv, 0xFFFFFFFF, INTRL2_CPU_CLEAR);
   2498}
   2499
   2500static void bcmgenet_link_intr_enable(struct bcmgenet_priv *priv)
   2501{
   2502	u32 int0_enable = 0;
   2503
   2504	/* Monitor cable plug/unplugged event for internal PHY, external PHY
   2505	 * and MoCA PHY
   2506	 */
   2507	if (priv->internal_phy) {
   2508		int0_enable |= UMAC_IRQ_LINK_EVENT;
   2509		if (GENET_IS_V1(priv) || GENET_IS_V2(priv) || GENET_IS_V3(priv))
   2510			int0_enable |= UMAC_IRQ_PHY_DET_R;
   2511	} else if (priv->ext_phy) {
   2512		int0_enable |= UMAC_IRQ_LINK_EVENT;
   2513	} else if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) {
   2514		if (priv->hw_params->flags & GENET_HAS_MOCA_LINK_DET)
   2515			int0_enable |= UMAC_IRQ_LINK_EVENT;
   2516	}
   2517	bcmgenet_intrl2_0_writel(priv, int0_enable, INTRL2_CPU_MASK_CLEAR);
   2518}
   2519
   2520static void init_umac(struct bcmgenet_priv *priv)
   2521{
   2522	struct device *kdev = &priv->pdev->dev;
   2523	u32 reg;
   2524	u32 int0_enable = 0;
   2525
   2526	dev_dbg(&priv->pdev->dev, "bcmgenet: init_umac\n");
   2527
   2528	reset_umac(priv);
   2529
   2530	/* clear tx/rx counter */
   2531	bcmgenet_umac_writel(priv,
   2532			     MIB_RESET_RX | MIB_RESET_TX | MIB_RESET_RUNT,
   2533			     UMAC_MIB_CTRL);
   2534	bcmgenet_umac_writel(priv, 0, UMAC_MIB_CTRL);
   2535
   2536	bcmgenet_umac_writel(priv, ENET_MAX_MTU_SIZE, UMAC_MAX_FRAME_LEN);
   2537
   2538	/* init tx registers, enable TSB */
   2539	reg = bcmgenet_tbuf_ctrl_get(priv);
   2540	reg |= TBUF_64B_EN;
   2541	bcmgenet_tbuf_ctrl_set(priv, reg);
   2542
   2543	/* init rx registers, enable ip header optimization and RSB */
   2544	reg = bcmgenet_rbuf_readl(priv, RBUF_CTRL);
   2545	reg |= RBUF_ALIGN_2B | RBUF_64B_EN;
   2546	bcmgenet_rbuf_writel(priv, reg, RBUF_CTRL);
   2547
   2548	/* enable rx checksumming */
   2549	reg = bcmgenet_rbuf_readl(priv, RBUF_CHK_CTRL);
   2550	reg |= RBUF_RXCHK_EN | RBUF_L3_PARSE_DIS;
   2551	/* If UniMAC forwards CRC, we need to skip over it to get
   2552	 * a valid CHK bit to be set in the per-packet status word
   2553	 */
   2554	if (priv->crc_fwd_en)
   2555		reg |= RBUF_SKIP_FCS;
   2556	else
   2557		reg &= ~RBUF_SKIP_FCS;
   2558	bcmgenet_rbuf_writel(priv, reg, RBUF_CHK_CTRL);
   2559
   2560	if (!GENET_IS_V1(priv) && !GENET_IS_V2(priv))
   2561		bcmgenet_rbuf_writel(priv, 1, RBUF_TBUF_SIZE_CTRL);
   2562
   2563	bcmgenet_intr_disable(priv);
   2564
   2565	/* Configure backpressure vectors for MoCA */
   2566	if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) {
   2567		reg = bcmgenet_bp_mc_get(priv);
   2568		reg |= BIT(priv->hw_params->bp_in_en_shift);
   2569
   2570		/* bp_mask: back pressure mask */
   2571		if (netif_is_multiqueue(priv->dev))
   2572			reg |= priv->hw_params->bp_in_mask;
   2573		else
   2574			reg &= ~priv->hw_params->bp_in_mask;
   2575		bcmgenet_bp_mc_set(priv, reg);
   2576	}
   2577
   2578	/* Enable MDIO interrupts on GENET v3+ */
   2579	if (priv->hw_params->flags & GENET_HAS_MDIO_INTR)
   2580		int0_enable |= (UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR);
   2581
   2582	bcmgenet_intrl2_0_writel(priv, int0_enable, INTRL2_CPU_MASK_CLEAR);
   2583
   2584	dev_dbg(kdev, "done init umac\n");
   2585}
   2586
   2587static void bcmgenet_init_dim(struct bcmgenet_rx_ring *ring,
   2588			      void (*cb)(struct work_struct *work))
   2589{
   2590	struct bcmgenet_net_dim *dim = &ring->dim;
   2591
   2592	INIT_WORK(&dim->dim.work, cb);
   2593	dim->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
   2594	dim->event_ctr = 0;
   2595	dim->packets = 0;
   2596	dim->bytes = 0;
   2597}
   2598
   2599static void bcmgenet_init_rx_coalesce(struct bcmgenet_rx_ring *ring)
   2600{
   2601	struct bcmgenet_net_dim *dim = &ring->dim;
   2602	struct dim_cq_moder moder;
   2603	u32 usecs, pkts;
   2604
   2605	usecs = ring->rx_coalesce_usecs;
   2606	pkts = ring->rx_max_coalesced_frames;
   2607
   2608	/* If DIM was enabled, re-apply default parameters */
   2609	if (dim->use_dim) {
   2610		moder = net_dim_get_def_rx_moderation(dim->dim.mode);
   2611		usecs = moder.usec;
   2612		pkts = moder.pkts;
   2613	}
   2614
   2615	bcmgenet_set_rx_coalesce(ring, usecs, pkts);
   2616}
   2617
   2618/* Initialize a Tx ring along with corresponding hardware registers */
   2619static void bcmgenet_init_tx_ring(struct bcmgenet_priv *priv,
   2620				  unsigned int index, unsigned int size,
   2621				  unsigned int start_ptr, unsigned int end_ptr)
   2622{
   2623	struct bcmgenet_tx_ring *ring = &priv->tx_rings[index];
   2624	u32 words_per_bd = WORDS_PER_BD(priv);
   2625	u32 flow_period_val = 0;
   2626
   2627	spin_lock_init(&ring->lock);
   2628	ring->priv = priv;
   2629	ring->index = index;
   2630	if (index == DESC_INDEX) {
   2631		ring->queue = 0;
   2632		ring->int_enable = bcmgenet_tx_ring16_int_enable;
   2633		ring->int_disable = bcmgenet_tx_ring16_int_disable;
   2634	} else {
   2635		ring->queue = index + 1;
   2636		ring->int_enable = bcmgenet_tx_ring_int_enable;
   2637		ring->int_disable = bcmgenet_tx_ring_int_disable;
   2638	}
   2639	ring->cbs = priv->tx_cbs + start_ptr;
   2640	ring->size = size;
   2641	ring->clean_ptr = start_ptr;
   2642	ring->c_index = 0;
   2643	ring->free_bds = size;
   2644	ring->write_ptr = start_ptr;
   2645	ring->cb_ptr = start_ptr;
   2646	ring->end_ptr = end_ptr - 1;
   2647	ring->prod_index = 0;
   2648
   2649	/* Set flow period for ring != 16 */
   2650	if (index != DESC_INDEX)
   2651		flow_period_val = ENET_MAX_MTU_SIZE << 16;
   2652
   2653	bcmgenet_tdma_ring_writel(priv, index, 0, TDMA_PROD_INDEX);
   2654	bcmgenet_tdma_ring_writel(priv, index, 0, TDMA_CONS_INDEX);
   2655	bcmgenet_tdma_ring_writel(priv, index, 1, DMA_MBUF_DONE_THRESH);
   2656	/* Disable rate control for now */
   2657	bcmgenet_tdma_ring_writel(priv, index, flow_period_val,
   2658				  TDMA_FLOW_PERIOD);
   2659	bcmgenet_tdma_ring_writel(priv, index,
   2660				  ((size << DMA_RING_SIZE_SHIFT) |
   2661				   RX_BUF_LENGTH), DMA_RING_BUF_SIZE);
   2662
   2663	/* Set start and end address, read and write pointers */
   2664	bcmgenet_tdma_ring_writel(priv, index, start_ptr * words_per_bd,
   2665				  DMA_START_ADDR);
   2666	bcmgenet_tdma_ring_writel(priv, index, start_ptr * words_per_bd,
   2667				  TDMA_READ_PTR);
   2668	bcmgenet_tdma_ring_writel(priv, index, start_ptr * words_per_bd,
   2669				  TDMA_WRITE_PTR);
   2670	bcmgenet_tdma_ring_writel(priv, index, end_ptr * words_per_bd - 1,
   2671				  DMA_END_ADDR);
   2672
   2673	/* Initialize Tx NAPI */
   2674	netif_napi_add_tx(priv->dev, &ring->napi, bcmgenet_tx_poll);
   2675}
   2676
   2677/* Initialize a RDMA ring */
   2678static int bcmgenet_init_rx_ring(struct bcmgenet_priv *priv,
   2679				 unsigned int index, unsigned int size,
   2680				 unsigned int start_ptr, unsigned int end_ptr)
   2681{
   2682	struct bcmgenet_rx_ring *ring = &priv->rx_rings[index];
   2683	u32 words_per_bd = WORDS_PER_BD(priv);
   2684	int ret;
   2685
   2686	ring->priv = priv;
   2687	ring->index = index;
   2688	if (index == DESC_INDEX) {
   2689		ring->int_enable = bcmgenet_rx_ring16_int_enable;
   2690		ring->int_disable = bcmgenet_rx_ring16_int_disable;
   2691	} else {
   2692		ring->int_enable = bcmgenet_rx_ring_int_enable;
   2693		ring->int_disable = bcmgenet_rx_ring_int_disable;
   2694	}
   2695	ring->cbs = priv->rx_cbs + start_ptr;
   2696	ring->size = size;
   2697	ring->c_index = 0;
   2698	ring->read_ptr = start_ptr;
   2699	ring->cb_ptr = start_ptr;
   2700	ring->end_ptr = end_ptr - 1;
   2701
   2702	ret = bcmgenet_alloc_rx_buffers(priv, ring);
   2703	if (ret)
   2704		return ret;
   2705
   2706	bcmgenet_init_dim(ring, bcmgenet_dim_work);
   2707	bcmgenet_init_rx_coalesce(ring);
   2708
   2709	/* Initialize Rx NAPI */
   2710	netif_napi_add(priv->dev, &ring->napi, bcmgenet_rx_poll,
   2711		       NAPI_POLL_WEIGHT);
   2712
   2713	bcmgenet_rdma_ring_writel(priv, index, 0, RDMA_PROD_INDEX);
   2714	bcmgenet_rdma_ring_writel(priv, index, 0, RDMA_CONS_INDEX);
   2715	bcmgenet_rdma_ring_writel(priv, index,
   2716				  ((size << DMA_RING_SIZE_SHIFT) |
   2717				   RX_BUF_LENGTH), DMA_RING_BUF_SIZE);
   2718	bcmgenet_rdma_ring_writel(priv, index,
   2719				  (DMA_FC_THRESH_LO <<
   2720				   DMA_XOFF_THRESHOLD_SHIFT) |
   2721				   DMA_FC_THRESH_HI, RDMA_XON_XOFF_THRESH);
   2722
   2723	/* Set start and end address, read and write pointers */
   2724	bcmgenet_rdma_ring_writel(priv, index, start_ptr * words_per_bd,
   2725				  DMA_START_ADDR);
   2726	bcmgenet_rdma_ring_writel(priv, index, start_ptr * words_per_bd,
   2727				  RDMA_READ_PTR);
   2728	bcmgenet_rdma_ring_writel(priv, index, start_ptr * words_per_bd,
   2729				  RDMA_WRITE_PTR);
   2730	bcmgenet_rdma_ring_writel(priv, index, end_ptr * words_per_bd - 1,
   2731				  DMA_END_ADDR);
   2732
   2733	return ret;
   2734}
   2735
   2736static void bcmgenet_enable_tx_napi(struct bcmgenet_priv *priv)
   2737{
   2738	unsigned int i;
   2739	struct bcmgenet_tx_ring *ring;
   2740
   2741	for (i = 0; i < priv->hw_params->tx_queues; ++i) {
   2742		ring = &priv->tx_rings[i];
   2743		napi_enable(&ring->napi);
   2744		ring->int_enable(ring);
   2745	}
   2746
   2747	ring = &priv->tx_rings[DESC_INDEX];
   2748	napi_enable(&ring->napi);
   2749	ring->int_enable(ring);
   2750}
   2751
   2752static void bcmgenet_disable_tx_napi(struct bcmgenet_priv *priv)
   2753{
   2754	unsigned int i;
   2755	struct bcmgenet_tx_ring *ring;
   2756
   2757	for (i = 0; i < priv->hw_params->tx_queues; ++i) {
   2758		ring = &priv->tx_rings[i];
   2759		napi_disable(&ring->napi);
   2760	}
   2761
   2762	ring = &priv->tx_rings[DESC_INDEX];
   2763	napi_disable(&ring->napi);
   2764}
   2765
   2766static void bcmgenet_fini_tx_napi(struct bcmgenet_priv *priv)
   2767{
   2768	unsigned int i;
   2769	struct bcmgenet_tx_ring *ring;
   2770
   2771	for (i = 0; i < priv->hw_params->tx_queues; ++i) {
   2772		ring = &priv->tx_rings[i];
   2773		netif_napi_del(&ring->napi);
   2774	}
   2775
   2776	ring = &priv->tx_rings[DESC_INDEX];
   2777	netif_napi_del(&ring->napi);
   2778}
   2779
   2780/* Initialize Tx queues
   2781 *
   2782 * Queues 0-3 are priority-based, each one has 32 descriptors,
   2783 * with queue 0 being the highest priority queue.
   2784 *
   2785 * Queue 16 is the default Tx queue with
   2786 * GENET_Q16_TX_BD_CNT = 256 - 4 * 32 = 128 descriptors.
   2787 *
   2788 * The transmit control block pool is then partitioned as follows:
   2789 * - Tx queue 0 uses tx_cbs[0..31]
   2790 * - Tx queue 1 uses tx_cbs[32..63]
   2791 * - Tx queue 2 uses tx_cbs[64..95]
   2792 * - Tx queue 3 uses tx_cbs[96..127]
   2793 * - Tx queue 16 uses tx_cbs[128..255]
   2794 */
   2795static void bcmgenet_init_tx_queues(struct net_device *dev)
   2796{
   2797	struct bcmgenet_priv *priv = netdev_priv(dev);
   2798	u32 i, dma_enable;
   2799	u32 dma_ctrl, ring_cfg;
   2800	u32 dma_priority[3] = {0, 0, 0};
   2801
   2802	dma_ctrl = bcmgenet_tdma_readl(priv, DMA_CTRL);
   2803	dma_enable = dma_ctrl & DMA_EN;
   2804	dma_ctrl &= ~DMA_EN;
   2805	bcmgenet_tdma_writel(priv, dma_ctrl, DMA_CTRL);
   2806
   2807	dma_ctrl = 0;
   2808	ring_cfg = 0;
   2809
   2810	/* Enable strict priority arbiter mode */
   2811	bcmgenet_tdma_writel(priv, DMA_ARBITER_SP, DMA_ARB_CTRL);
   2812
   2813	/* Initialize Tx priority queues */
   2814	for (i = 0; i < priv->hw_params->tx_queues; i++) {
   2815		bcmgenet_init_tx_ring(priv, i, priv->hw_params->tx_bds_per_q,
   2816				      i * priv->hw_params->tx_bds_per_q,
   2817				      (i + 1) * priv->hw_params->tx_bds_per_q);
   2818		ring_cfg |= (1 << i);
   2819		dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT));
   2820		dma_priority[DMA_PRIO_REG_INDEX(i)] |=
   2821			((GENET_Q0_PRIORITY + i) << DMA_PRIO_REG_SHIFT(i));
   2822	}
   2823
   2824	/* Initialize Tx default queue 16 */
   2825	bcmgenet_init_tx_ring(priv, DESC_INDEX, GENET_Q16_TX_BD_CNT,
   2826			      priv->hw_params->tx_queues *
   2827			      priv->hw_params->tx_bds_per_q,
   2828			      TOTAL_DESC);
   2829	ring_cfg |= (1 << DESC_INDEX);
   2830	dma_ctrl |= (1 << (DESC_INDEX + DMA_RING_BUF_EN_SHIFT));
   2831	dma_priority[DMA_PRIO_REG_INDEX(DESC_INDEX)] |=
   2832		((GENET_Q0_PRIORITY + priv->hw_params->tx_queues) <<
   2833		 DMA_PRIO_REG_SHIFT(DESC_INDEX));
   2834
   2835	/* Set Tx queue priorities */
   2836	bcmgenet_tdma_writel(priv, dma_priority[0], DMA_PRIORITY_0);
   2837	bcmgenet_tdma_writel(priv, dma_priority[1], DMA_PRIORITY_1);
   2838	bcmgenet_tdma_writel(priv, dma_priority[2], DMA_PRIORITY_2);
   2839
   2840	/* Enable Tx queues */
   2841	bcmgenet_tdma_writel(priv, ring_cfg, DMA_RING_CFG);
   2842
   2843	/* Enable Tx DMA */
   2844	if (dma_enable)
   2845		dma_ctrl |= DMA_EN;
   2846	bcmgenet_tdma_writel(priv, dma_ctrl, DMA_CTRL);
   2847}
   2848
   2849static void bcmgenet_enable_rx_napi(struct bcmgenet_priv *priv)
   2850{
   2851	unsigned int i;
   2852	struct bcmgenet_rx_ring *ring;
   2853
   2854	for (i = 0; i < priv->hw_params->rx_queues; ++i) {
   2855		ring = &priv->rx_rings[i];
   2856		napi_enable(&ring->napi);
   2857		ring->int_enable(ring);
   2858	}
   2859
   2860	ring = &priv->rx_rings[DESC_INDEX];
   2861	napi_enable(&ring->napi);
   2862	ring->int_enable(ring);
   2863}
   2864
   2865static void bcmgenet_disable_rx_napi(struct bcmgenet_priv *priv)
   2866{
   2867	unsigned int i;
   2868	struct bcmgenet_rx_ring *ring;
   2869
   2870	for (i = 0; i < priv->hw_params->rx_queues; ++i) {
   2871		ring = &priv->rx_rings[i];
   2872		napi_disable(&ring->napi);
   2873		cancel_work_sync(&ring->dim.dim.work);
   2874	}
   2875
   2876	ring = &priv->rx_rings[DESC_INDEX];
   2877	napi_disable(&ring->napi);
   2878	cancel_work_sync(&ring->dim.dim.work);
   2879}
   2880
   2881static void bcmgenet_fini_rx_napi(struct bcmgenet_priv *priv)
   2882{
   2883	unsigned int i;
   2884	struct bcmgenet_rx_ring *ring;
   2885
   2886	for (i = 0; i < priv->hw_params->rx_queues; ++i) {
   2887		ring = &priv->rx_rings[i];
   2888		netif_napi_del(&ring->napi);
   2889	}
   2890
   2891	ring = &priv->rx_rings[DESC_INDEX];
   2892	netif_napi_del(&ring->napi);
   2893}
   2894
   2895/* Initialize Rx queues
   2896 *
   2897 * Queues 0-15 are priority queues. Hardware Filtering Block (HFB) can be
   2898 * used to direct traffic to these queues.
   2899 *
   2900 * Queue 16 is the default Rx queue with GENET_Q16_RX_BD_CNT descriptors.
   2901 */
   2902static int bcmgenet_init_rx_queues(struct net_device *dev)
   2903{
   2904	struct bcmgenet_priv *priv = netdev_priv(dev);
   2905	u32 i;
   2906	u32 dma_enable;
   2907	u32 dma_ctrl;
   2908	u32 ring_cfg;
   2909	int ret;
   2910
   2911	dma_ctrl = bcmgenet_rdma_readl(priv, DMA_CTRL);
   2912	dma_enable = dma_ctrl & DMA_EN;
   2913	dma_ctrl &= ~DMA_EN;
   2914	bcmgenet_rdma_writel(priv, dma_ctrl, DMA_CTRL);
   2915
   2916	dma_ctrl = 0;
   2917	ring_cfg = 0;
   2918
   2919	/* Initialize Rx priority queues */
   2920	for (i = 0; i < priv->hw_params->rx_queues; i++) {
   2921		ret = bcmgenet_init_rx_ring(priv, i,
   2922					    priv->hw_params->rx_bds_per_q,
   2923					    i * priv->hw_params->rx_bds_per_q,
   2924					    (i + 1) *
   2925					    priv->hw_params->rx_bds_per_q);
   2926		if (ret)
   2927			return ret;
   2928
   2929		ring_cfg |= (1 << i);
   2930		dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT));
   2931	}
   2932
   2933	/* Initialize Rx default queue 16 */
   2934	ret = bcmgenet_init_rx_ring(priv, DESC_INDEX, GENET_Q16_RX_BD_CNT,
   2935				    priv->hw_params->rx_queues *
   2936				    priv->hw_params->rx_bds_per_q,
   2937				    TOTAL_DESC);
   2938	if (ret)
   2939		return ret;
   2940
   2941	ring_cfg |= (1 << DESC_INDEX);
   2942	dma_ctrl |= (1 << (DESC_INDEX + DMA_RING_BUF_EN_SHIFT));
   2943
   2944	/* Enable rings */
   2945	bcmgenet_rdma_writel(priv, ring_cfg, DMA_RING_CFG);
   2946
   2947	/* Configure ring as descriptor ring and re-enable DMA if enabled */
   2948	if (dma_enable)
   2949		dma_ctrl |= DMA_EN;
   2950	bcmgenet_rdma_writel(priv, dma_ctrl, DMA_CTRL);
   2951
   2952	return 0;
   2953}
   2954
   2955static int bcmgenet_dma_teardown(struct bcmgenet_priv *priv)
   2956{
   2957	int ret = 0;
   2958	int timeout = 0;
   2959	u32 reg;
   2960	u32 dma_ctrl;
   2961	int i;
   2962
   2963	/* Disable TDMA to stop add more frames in TX DMA */
   2964	reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
   2965	reg &= ~DMA_EN;
   2966	bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
   2967
   2968	/* Check TDMA status register to confirm TDMA is disabled */
   2969	while (timeout++ < DMA_TIMEOUT_VAL) {
   2970		reg = bcmgenet_tdma_readl(priv, DMA_STATUS);
   2971		if (reg & DMA_DISABLED)
   2972			break;
   2973
   2974		udelay(1);
   2975	}
   2976
   2977	if (timeout == DMA_TIMEOUT_VAL) {
   2978		netdev_warn(priv->dev, "Timed out while disabling TX DMA\n");
   2979		ret = -ETIMEDOUT;
   2980	}
   2981
   2982	/* Wait 10ms for packet drain in both tx and rx dma */
   2983	usleep_range(10000, 20000);
   2984
   2985	/* Disable RDMA */
   2986	reg = bcmgenet_rdma_readl(priv, DMA_CTRL);
   2987	reg &= ~DMA_EN;
   2988	bcmgenet_rdma_writel(priv, reg, DMA_CTRL);
   2989
   2990	timeout = 0;
   2991	/* Check RDMA status register to confirm RDMA is disabled */
   2992	while (timeout++ < DMA_TIMEOUT_VAL) {
   2993		reg = bcmgenet_rdma_readl(priv, DMA_STATUS);
   2994		if (reg & DMA_DISABLED)
   2995			break;
   2996
   2997		udelay(1);
   2998	}
   2999
   3000	if (timeout == DMA_TIMEOUT_VAL) {
   3001		netdev_warn(priv->dev, "Timed out while disabling RX DMA\n");
   3002		ret = -ETIMEDOUT;
   3003	}
   3004
   3005	dma_ctrl = 0;
   3006	for (i = 0; i < priv->hw_params->rx_queues; i++)
   3007		dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT));
   3008	reg = bcmgenet_rdma_readl(priv, DMA_CTRL);
   3009	reg &= ~dma_ctrl;
   3010	bcmgenet_rdma_writel(priv, reg, DMA_CTRL);
   3011
   3012	dma_ctrl = 0;
   3013	for (i = 0; i < priv->hw_params->tx_queues; i++)
   3014		dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT));
   3015	reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
   3016	reg &= ~dma_ctrl;
   3017	bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
   3018
   3019	return ret;
   3020}
   3021
   3022static void bcmgenet_fini_dma(struct bcmgenet_priv *priv)
   3023{
   3024	struct netdev_queue *txq;
   3025	int i;
   3026
   3027	bcmgenet_fini_rx_napi(priv);
   3028	bcmgenet_fini_tx_napi(priv);
   3029
   3030	for (i = 0; i < priv->num_tx_bds; i++)
   3031		dev_kfree_skb(bcmgenet_free_tx_cb(&priv->pdev->dev,
   3032						  priv->tx_cbs + i));
   3033
   3034	for (i = 0; i < priv->hw_params->tx_queues; i++) {
   3035		txq = netdev_get_tx_queue(priv->dev, priv->tx_rings[i].queue);
   3036		netdev_tx_reset_queue(txq);
   3037	}
   3038
   3039	txq = netdev_get_tx_queue(priv->dev, priv->tx_rings[DESC_INDEX].queue);
   3040	netdev_tx_reset_queue(txq);
   3041
   3042	bcmgenet_free_rx_buffers(priv);
   3043	kfree(priv->rx_cbs);
   3044	kfree(priv->tx_cbs);
   3045}
   3046
   3047/* init_edma: Initialize DMA control register */
   3048static int bcmgenet_init_dma(struct bcmgenet_priv *priv)
   3049{
   3050	int ret;
   3051	unsigned int i;
   3052	struct enet_cb *cb;
   3053
   3054	netif_dbg(priv, hw, priv->dev, "%s\n", __func__);
   3055
   3056	/* Initialize common Rx ring structures */
   3057	priv->rx_bds = priv->base + priv->hw_params->rdma_offset;
   3058	priv->num_rx_bds = TOTAL_DESC;
   3059	priv->rx_cbs = kcalloc(priv->num_rx_bds, sizeof(struct enet_cb),
   3060			       GFP_KERNEL);
   3061	if (!priv->rx_cbs)
   3062		return -ENOMEM;
   3063
   3064	for (i = 0; i < priv->num_rx_bds; i++) {
   3065		cb = priv->rx_cbs + i;
   3066		cb->bd_addr = priv->rx_bds + i * DMA_DESC_SIZE;
   3067	}
   3068
   3069	/* Initialize common TX ring structures */
   3070	priv->tx_bds = priv->base + priv->hw_params->tdma_offset;
   3071	priv->num_tx_bds = TOTAL_DESC;
   3072	priv->tx_cbs = kcalloc(priv->num_tx_bds, sizeof(struct enet_cb),
   3073			       GFP_KERNEL);
   3074	if (!priv->tx_cbs) {
   3075		kfree(priv->rx_cbs);
   3076		return -ENOMEM;
   3077	}
   3078
   3079	for (i = 0; i < priv->num_tx_bds; i++) {
   3080		cb = priv->tx_cbs + i;
   3081		cb->bd_addr = priv->tx_bds + i * DMA_DESC_SIZE;
   3082	}
   3083
   3084	/* Init rDma */
   3085	bcmgenet_rdma_writel(priv, priv->dma_max_burst_length,
   3086			     DMA_SCB_BURST_SIZE);
   3087
   3088	/* Initialize Rx queues */
   3089	ret = bcmgenet_init_rx_queues(priv->dev);
   3090	if (ret) {
   3091		netdev_err(priv->dev, "failed to initialize Rx queues\n");
   3092		bcmgenet_free_rx_buffers(priv);
   3093		kfree(priv->rx_cbs);
   3094		kfree(priv->tx_cbs);
   3095		return ret;
   3096	}
   3097
   3098	/* Init tDma */
   3099	bcmgenet_tdma_writel(priv, priv->dma_max_burst_length,
   3100			     DMA_SCB_BURST_SIZE);
   3101
   3102	/* Initialize Tx queues */
   3103	bcmgenet_init_tx_queues(priv->dev);
   3104
   3105	return 0;
   3106}
   3107
   3108/* Interrupt bottom half */
   3109static void bcmgenet_irq_task(struct work_struct *work)
   3110{
   3111	unsigned int status;
   3112	struct bcmgenet_priv *priv = container_of(
   3113			work, struct bcmgenet_priv, bcmgenet_irq_work);
   3114
   3115	netif_dbg(priv, intr, priv->dev, "%s\n", __func__);
   3116
   3117	spin_lock_irq(&priv->lock);
   3118	status = priv->irq0_stat;
   3119	priv->irq0_stat = 0;
   3120	spin_unlock_irq(&priv->lock);
   3121
   3122	if (status & UMAC_IRQ_PHY_DET_R &&
   3123	    priv->dev->phydev->autoneg != AUTONEG_ENABLE) {
   3124		phy_init_hw(priv->dev->phydev);
   3125		genphy_config_aneg(priv->dev->phydev);
   3126	}
   3127
   3128	/* Link UP/DOWN event */
   3129	if (status & UMAC_IRQ_LINK_EVENT)
   3130		phy_mac_interrupt(priv->dev->phydev);
   3131
   3132}
   3133
   3134/* bcmgenet_isr1: handle Rx and Tx priority queues */
   3135static irqreturn_t bcmgenet_isr1(int irq, void *dev_id)
   3136{
   3137	struct bcmgenet_priv *priv = dev_id;
   3138	struct bcmgenet_rx_ring *rx_ring;
   3139	struct bcmgenet_tx_ring *tx_ring;
   3140	unsigned int index, status;
   3141
   3142	/* Read irq status */
   3143	status = bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_STAT) &
   3144		~bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_MASK_STATUS);
   3145
   3146	/* clear interrupts */
   3147	bcmgenet_intrl2_1_writel(priv, status, INTRL2_CPU_CLEAR);
   3148
   3149	netif_dbg(priv, intr, priv->dev,
   3150		  "%s: IRQ=0x%x\n", __func__, status);
   3151
   3152	/* Check Rx priority queue interrupts */
   3153	for (index = 0; index < priv->hw_params->rx_queues; index++) {
   3154		if (!(status & BIT(UMAC_IRQ1_RX_INTR_SHIFT + index)))
   3155			continue;
   3156
   3157		rx_ring = &priv->rx_rings[index];
   3158		rx_ring->dim.event_ctr++;
   3159
   3160		if (likely(napi_schedule_prep(&rx_ring->napi))) {
   3161			rx_ring->int_disable(rx_ring);
   3162			__napi_schedule_irqoff(&rx_ring->napi);
   3163		}
   3164	}
   3165
   3166	/* Check Tx priority queue interrupts */
   3167	for (index = 0; index < priv->hw_params->tx_queues; index++) {
   3168		if (!(status & BIT(index)))
   3169			continue;
   3170
   3171		tx_ring = &priv->tx_rings[index];
   3172
   3173		if (likely(napi_schedule_prep(&tx_ring->napi))) {
   3174			tx_ring->int_disable(tx_ring);
   3175			__napi_schedule_irqoff(&tx_ring->napi);
   3176		}
   3177	}
   3178
   3179	return IRQ_HANDLED;
   3180}
   3181
   3182/* bcmgenet_isr0: handle Rx and Tx default queues + other stuff */
   3183static irqreturn_t bcmgenet_isr0(int irq, void *dev_id)
   3184{
   3185	struct bcmgenet_priv *priv = dev_id;
   3186	struct bcmgenet_rx_ring *rx_ring;
   3187	struct bcmgenet_tx_ring *tx_ring;
   3188	unsigned int status;
   3189	unsigned long flags;
   3190
   3191	/* Read irq status */
   3192	status = bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_STAT) &
   3193		~bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_MASK_STATUS);
   3194
   3195	/* clear interrupts */
   3196	bcmgenet_intrl2_0_writel(priv, status, INTRL2_CPU_CLEAR);
   3197
   3198	netif_dbg(priv, intr, priv->dev,
   3199		  "IRQ=0x%x\n", status);
   3200
   3201	if (status & UMAC_IRQ_RXDMA_DONE) {
   3202		rx_ring = &priv->rx_rings[DESC_INDEX];
   3203		rx_ring->dim.event_ctr++;
   3204
   3205		if (likely(napi_schedule_prep(&rx_ring->napi))) {
   3206			rx_ring->int_disable(rx_ring);
   3207			__napi_schedule_irqoff(&rx_ring->napi);
   3208		}
   3209	}
   3210
   3211	if (status & UMAC_IRQ_TXDMA_DONE) {
   3212		tx_ring = &priv->tx_rings[DESC_INDEX];
   3213
   3214		if (likely(napi_schedule_prep(&tx_ring->napi))) {
   3215			tx_ring->int_disable(tx_ring);
   3216			__napi_schedule_irqoff(&tx_ring->napi);
   3217		}
   3218	}
   3219
   3220	if ((priv->hw_params->flags & GENET_HAS_MDIO_INTR) &&
   3221		status & (UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR)) {
   3222		wake_up(&priv->wq);
   3223	}
   3224
   3225	/* all other interested interrupts handled in bottom half */
   3226	status &= (UMAC_IRQ_LINK_EVENT | UMAC_IRQ_PHY_DET_R);
   3227	if (status) {
   3228		/* Save irq status for bottom-half processing. */
   3229		spin_lock_irqsave(&priv->lock, flags);
   3230		priv->irq0_stat |= status;
   3231		spin_unlock_irqrestore(&priv->lock, flags);
   3232
   3233		schedule_work(&priv->bcmgenet_irq_work);
   3234	}
   3235
   3236	return IRQ_HANDLED;
   3237}
   3238
   3239static irqreturn_t bcmgenet_wol_isr(int irq, void *dev_id)
   3240{
   3241	/* Acknowledge the interrupt */
   3242	return IRQ_HANDLED;
   3243}
   3244
   3245#ifdef CONFIG_NET_POLL_CONTROLLER
   3246static void bcmgenet_poll_controller(struct net_device *dev)
   3247{
   3248	struct bcmgenet_priv *priv = netdev_priv(dev);
   3249
   3250	/* Invoke the main RX/TX interrupt handler */
   3251	disable_irq(priv->irq0);
   3252	bcmgenet_isr0(priv->irq0, priv);
   3253	enable_irq(priv->irq0);
   3254
   3255	/* And the interrupt handler for RX/TX priority queues */
   3256	disable_irq(priv->irq1);
   3257	bcmgenet_isr1(priv->irq1, priv);
   3258	enable_irq(priv->irq1);
   3259}
   3260#endif
   3261
   3262static void bcmgenet_umac_reset(struct bcmgenet_priv *priv)
   3263{
   3264	u32 reg;
   3265
   3266	reg = bcmgenet_rbuf_ctrl_get(priv);
   3267	reg |= BIT(1);
   3268	bcmgenet_rbuf_ctrl_set(priv, reg);
   3269	udelay(10);
   3270
   3271	reg &= ~BIT(1);
   3272	bcmgenet_rbuf_ctrl_set(priv, reg);
   3273	udelay(10);
   3274}
   3275
   3276static void bcmgenet_set_hw_addr(struct bcmgenet_priv *priv,
   3277				 const unsigned char *addr)
   3278{
   3279	bcmgenet_umac_writel(priv, get_unaligned_be32(&addr[0]), UMAC_MAC0);
   3280	bcmgenet_umac_writel(priv, get_unaligned_be16(&addr[4]), UMAC_MAC1);
   3281}
   3282
   3283static void bcmgenet_get_hw_addr(struct bcmgenet_priv *priv,
   3284				 unsigned char *addr)
   3285{
   3286	u32 addr_tmp;
   3287
   3288	addr_tmp = bcmgenet_umac_readl(priv, UMAC_MAC0);
   3289	put_unaligned_be32(addr_tmp, &addr[0]);
   3290	addr_tmp = bcmgenet_umac_readl(priv, UMAC_MAC1);
   3291	put_unaligned_be16(addr_tmp, &addr[4]);
   3292}
   3293
   3294/* Returns a reusable dma control register value */
   3295static u32 bcmgenet_dma_disable(struct bcmgenet_priv *priv)
   3296{
   3297	unsigned int i;
   3298	u32 reg;
   3299	u32 dma_ctrl;
   3300
   3301	/* disable DMA */
   3302	dma_ctrl = 1 << (DESC_INDEX + DMA_RING_BUF_EN_SHIFT) | DMA_EN;
   3303	for (i = 0; i < priv->hw_params->tx_queues; i++)
   3304		dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT));
   3305	reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
   3306	reg &= ~dma_ctrl;
   3307	bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
   3308
   3309	dma_ctrl = 1 << (DESC_INDEX + DMA_RING_BUF_EN_SHIFT) | DMA_EN;
   3310	for (i = 0; i < priv->hw_params->rx_queues; i++)
   3311		dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT));
   3312	reg = bcmgenet_rdma_readl(priv, DMA_CTRL);
   3313	reg &= ~dma_ctrl;
   3314	bcmgenet_rdma_writel(priv, reg, DMA_CTRL);
   3315
   3316	bcmgenet_umac_writel(priv, 1, UMAC_TX_FLUSH);
   3317	udelay(10);
   3318	bcmgenet_umac_writel(priv, 0, UMAC_TX_FLUSH);
   3319
   3320	return dma_ctrl;
   3321}
   3322
   3323static void bcmgenet_enable_dma(struct bcmgenet_priv *priv, u32 dma_ctrl)
   3324{
   3325	u32 reg;
   3326
   3327	reg = bcmgenet_rdma_readl(priv, DMA_CTRL);
   3328	reg |= dma_ctrl;
   3329	bcmgenet_rdma_writel(priv, reg, DMA_CTRL);
   3330
   3331	reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
   3332	reg |= dma_ctrl;
   3333	bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
   3334}
   3335
   3336static void bcmgenet_netif_start(struct net_device *dev)
   3337{
   3338	struct bcmgenet_priv *priv = netdev_priv(dev);
   3339
   3340	/* Start the network engine */
   3341	bcmgenet_set_rx_mode(dev);
   3342	bcmgenet_enable_rx_napi(priv);
   3343
   3344	umac_enable_set(priv, CMD_TX_EN | CMD_RX_EN, true);
   3345
   3346	bcmgenet_enable_tx_napi(priv);
   3347
   3348	/* Monitor link interrupts now */
   3349	bcmgenet_link_intr_enable(priv);
   3350
   3351	phy_start(dev->phydev);
   3352}
   3353
   3354static int bcmgenet_open(struct net_device *dev)
   3355{
   3356	struct bcmgenet_priv *priv = netdev_priv(dev);
   3357	unsigned long dma_ctrl;
   3358	int ret;
   3359
   3360	netif_dbg(priv, ifup, dev, "bcmgenet_open\n");
   3361
   3362	/* Turn on the clock */
   3363	clk_prepare_enable(priv->clk);
   3364
   3365	/* If this is an internal GPHY, power it back on now, before UniMAC is
   3366	 * brought out of reset as absolutely no UniMAC activity is allowed
   3367	 */
   3368	if (priv->internal_phy)
   3369		bcmgenet_power_up(priv, GENET_POWER_PASSIVE);
   3370
   3371	/* take MAC out of reset */
   3372	bcmgenet_umac_reset(priv);
   3373
   3374	init_umac(priv);
   3375
   3376	/* Apply features again in case we changed them while interface was
   3377	 * down
   3378	 */
   3379	bcmgenet_set_features(dev, dev->features);
   3380
   3381	bcmgenet_set_hw_addr(priv, dev->dev_addr);
   3382
   3383	/* Disable RX/TX DMA and flush TX queues */
   3384	dma_ctrl = bcmgenet_dma_disable(priv);
   3385
   3386	/* Reinitialize TDMA and RDMA and SW housekeeping */
   3387	ret = bcmgenet_init_dma(priv);
   3388	if (ret) {
   3389		netdev_err(dev, "failed to initialize DMA\n");
   3390		goto err_clk_disable;
   3391	}
   3392
   3393	/* Always enable ring 16 - descriptor ring */
   3394	bcmgenet_enable_dma(priv, dma_ctrl);
   3395
   3396	/* HFB init */
   3397	bcmgenet_hfb_init(priv);
   3398
   3399	ret = request_irq(priv->irq0, bcmgenet_isr0, IRQF_SHARED,
   3400			  dev->name, priv);
   3401	if (ret < 0) {
   3402		netdev_err(dev, "can't request IRQ %d\n", priv->irq0);
   3403		goto err_fini_dma;
   3404	}
   3405
   3406	ret = request_irq(priv->irq1, bcmgenet_isr1, IRQF_SHARED,
   3407			  dev->name, priv);
   3408	if (ret < 0) {
   3409		netdev_err(dev, "can't request IRQ %d\n", priv->irq1);
   3410		goto err_irq0;
   3411	}
   3412
   3413	ret = bcmgenet_mii_probe(dev);
   3414	if (ret) {
   3415		netdev_err(dev, "failed to connect to PHY\n");
   3416		goto err_irq1;
   3417	}
   3418
   3419	bcmgenet_phy_pause_set(dev, priv->rx_pause, priv->tx_pause);
   3420
   3421	bcmgenet_netif_start(dev);
   3422
   3423	netif_tx_start_all_queues(dev);
   3424
   3425	return 0;
   3426
   3427err_irq1:
   3428	free_irq(priv->irq1, priv);
   3429err_irq0:
   3430	free_irq(priv->irq0, priv);
   3431err_fini_dma:
   3432	bcmgenet_dma_teardown(priv);
   3433	bcmgenet_fini_dma(priv);
   3434err_clk_disable:
   3435	if (priv->internal_phy)
   3436		bcmgenet_power_down(priv, GENET_POWER_PASSIVE);
   3437	clk_disable_unprepare(priv->clk);
   3438	return ret;
   3439}
   3440
   3441static void bcmgenet_netif_stop(struct net_device *dev)
   3442{
   3443	struct bcmgenet_priv *priv = netdev_priv(dev);
   3444
   3445	bcmgenet_disable_tx_napi(priv);
   3446	netif_tx_disable(dev);
   3447
   3448	/* Disable MAC receive */
   3449	umac_enable_set(priv, CMD_RX_EN, false);
   3450
   3451	bcmgenet_dma_teardown(priv);
   3452
   3453	/* Disable MAC transmit. TX DMA disabled must be done before this */
   3454	umac_enable_set(priv, CMD_TX_EN, false);
   3455
   3456	phy_stop(dev->phydev);
   3457	bcmgenet_disable_rx_napi(priv);
   3458	bcmgenet_intr_disable(priv);
   3459
   3460	/* Wait for pending work items to complete. Since interrupts are
   3461	 * disabled no new work will be scheduled.
   3462	 */
   3463	cancel_work_sync(&priv->bcmgenet_irq_work);
   3464
   3465	/* tx reclaim */
   3466	bcmgenet_tx_reclaim_all(dev);
   3467	bcmgenet_fini_dma(priv);
   3468}
   3469
   3470static int bcmgenet_close(struct net_device *dev)
   3471{
   3472	struct bcmgenet_priv *priv = netdev_priv(dev);
   3473	int ret = 0;
   3474
   3475	netif_dbg(priv, ifdown, dev, "bcmgenet_close\n");
   3476
   3477	bcmgenet_netif_stop(dev);
   3478
   3479	/* Really kill the PHY state machine and disconnect from it */
   3480	phy_disconnect(dev->phydev);
   3481
   3482	free_irq(priv->irq0, priv);
   3483	free_irq(priv->irq1, priv);
   3484
   3485	if (priv->internal_phy)
   3486		ret = bcmgenet_power_down(priv, GENET_POWER_PASSIVE);
   3487
   3488	clk_disable_unprepare(priv->clk);
   3489
   3490	return ret;
   3491}
   3492
   3493static void bcmgenet_dump_tx_queue(struct bcmgenet_tx_ring *ring)
   3494{
   3495	struct bcmgenet_priv *priv = ring->priv;
   3496	u32 p_index, c_index, intsts, intmsk;
   3497	struct netdev_queue *txq;
   3498	unsigned int free_bds;
   3499	bool txq_stopped;
   3500
   3501	if (!netif_msg_tx_err(priv))
   3502		return;
   3503
   3504	txq = netdev_get_tx_queue(priv->dev, ring->queue);
   3505
   3506	spin_lock(&ring->lock);
   3507	if (ring->index == DESC_INDEX) {
   3508		intsts = ~bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_MASK_STATUS);
   3509		intmsk = UMAC_IRQ_TXDMA_DONE | UMAC_IRQ_TXDMA_MBDONE;
   3510	} else {
   3511		intsts = ~bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_MASK_STATUS);
   3512		intmsk = 1 << ring->index;
   3513	}
   3514	c_index = bcmgenet_tdma_ring_readl(priv, ring->index, TDMA_CONS_INDEX);
   3515	p_index = bcmgenet_tdma_ring_readl(priv, ring->index, TDMA_PROD_INDEX);
   3516	txq_stopped = netif_tx_queue_stopped(txq);
   3517	free_bds = ring->free_bds;
   3518	spin_unlock(&ring->lock);
   3519
   3520	netif_err(priv, tx_err, priv->dev, "Ring %d queue %d status summary\n"
   3521		  "TX queue status: %s, interrupts: %s\n"
   3522		  "(sw)free_bds: %d (sw)size: %d\n"
   3523		  "(sw)p_index: %d (hw)p_index: %d\n"
   3524		  "(sw)c_index: %d (hw)c_index: %d\n"
   3525		  "(sw)clean_p: %d (sw)write_p: %d\n"
   3526		  "(sw)cb_ptr: %d (sw)end_ptr: %d\n",
   3527		  ring->index, ring->queue,
   3528		  txq_stopped ? "stopped" : "active",
   3529		  intsts & intmsk ? "enabled" : "disabled",
   3530		  free_bds, ring->size,
   3531		  ring->prod_index, p_index & DMA_P_INDEX_MASK,
   3532		  ring->c_index, c_index & DMA_C_INDEX_MASK,
   3533		  ring->clean_ptr, ring->write_ptr,
   3534		  ring->cb_ptr, ring->end_ptr);
   3535}
   3536
   3537static void bcmgenet_timeout(struct net_device *dev, unsigned int txqueue)
   3538{
   3539	struct bcmgenet_priv *priv = netdev_priv(dev);
   3540	u32 int0_enable = 0;
   3541	u32 int1_enable = 0;
   3542	unsigned int q;
   3543
   3544	netif_dbg(priv, tx_err, dev, "bcmgenet_timeout\n");
   3545
   3546	for (q = 0; q < priv->hw_params->tx_queues; q++)
   3547		bcmgenet_dump_tx_queue(&priv->tx_rings[q]);
   3548	bcmgenet_dump_tx_queue(&priv->tx_rings[DESC_INDEX]);
   3549
   3550	bcmgenet_tx_reclaim_all(dev);
   3551
   3552	for (q = 0; q < priv->hw_params->tx_queues; q++)
   3553		int1_enable |= (1 << q);
   3554
   3555	int0_enable = UMAC_IRQ_TXDMA_DONE;
   3556
   3557	/* Re-enable TX interrupts if disabled */
   3558	bcmgenet_intrl2_0_writel(priv, int0_enable, INTRL2_CPU_MASK_CLEAR);
   3559	bcmgenet_intrl2_1_writel(priv, int1_enable, INTRL2_CPU_MASK_CLEAR);
   3560
   3561	netif_trans_update(dev);
   3562
   3563	dev->stats.tx_errors++;
   3564
   3565	netif_tx_wake_all_queues(dev);
   3566}
   3567
   3568#define MAX_MDF_FILTER	17
   3569
   3570static inline void bcmgenet_set_mdf_addr(struct bcmgenet_priv *priv,
   3571					 const unsigned char *addr,
   3572					 int *i)
   3573{
   3574	bcmgenet_umac_writel(priv, addr[0] << 8 | addr[1],
   3575			     UMAC_MDF_ADDR + (*i * 4));
   3576	bcmgenet_umac_writel(priv, addr[2] << 24 | addr[3] << 16 |
   3577			     addr[4] << 8 | addr[5],
   3578			     UMAC_MDF_ADDR + ((*i + 1) * 4));
   3579	*i += 2;
   3580}
   3581
   3582static void bcmgenet_set_rx_mode(struct net_device *dev)
   3583{
   3584	struct bcmgenet_priv *priv = netdev_priv(dev);
   3585	struct netdev_hw_addr *ha;
   3586	int i, nfilter;
   3587	u32 reg;
   3588
   3589	netif_dbg(priv, hw, dev, "%s: %08X\n", __func__, dev->flags);
   3590
   3591	/* Number of filters needed */
   3592	nfilter = netdev_uc_count(dev) + netdev_mc_count(dev) + 2;
   3593
   3594	/*
   3595	 * Turn on promicuous mode for three scenarios
   3596	 * 1. IFF_PROMISC flag is set
   3597	 * 2. IFF_ALLMULTI flag is set
   3598	 * 3. The number of filters needed exceeds the number filters
   3599	 *    supported by the hardware.
   3600	*/
   3601	reg = bcmgenet_umac_readl(priv, UMAC_CMD);
   3602	if ((dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) ||
   3603	    (nfilter > MAX_MDF_FILTER)) {
   3604		reg |= CMD_PROMISC;
   3605		bcmgenet_umac_writel(priv, reg, UMAC_CMD);
   3606		bcmgenet_umac_writel(priv, 0, UMAC_MDF_CTRL);
   3607		return;
   3608	} else {
   3609		reg &= ~CMD_PROMISC;
   3610		bcmgenet_umac_writel(priv, reg, UMAC_CMD);
   3611	}
   3612
   3613	/* update MDF filter */
   3614	i = 0;
   3615	/* Broadcast */
   3616	bcmgenet_set_mdf_addr(priv, dev->broadcast, &i);
   3617	/* my own address.*/
   3618	bcmgenet_set_mdf_addr(priv, dev->dev_addr, &i);
   3619
   3620	/* Unicast */
   3621	netdev_for_each_uc_addr(ha, dev)
   3622		bcmgenet_set_mdf_addr(priv, ha->addr, &i);
   3623
   3624	/* Multicast */
   3625	netdev_for_each_mc_addr(ha, dev)
   3626		bcmgenet_set_mdf_addr(priv, ha->addr, &i);
   3627
   3628	/* Enable filters */
   3629	reg = GENMASK(MAX_MDF_FILTER - 1, MAX_MDF_FILTER - nfilter);
   3630	bcmgenet_umac_writel(priv, reg, UMAC_MDF_CTRL);
   3631}
   3632
   3633/* Set the hardware MAC address. */
   3634static int bcmgenet_set_mac_addr(struct net_device *dev, void *p)
   3635{
   3636	struct sockaddr *addr = p;
   3637
   3638	/* Setting the MAC address at the hardware level is not possible
   3639	 * without disabling the UniMAC RX/TX enable bits.
   3640	 */
   3641	if (netif_running(dev))
   3642		return -EBUSY;
   3643
   3644	eth_hw_addr_set(dev, addr->sa_data);
   3645
   3646	return 0;
   3647}
   3648
   3649static struct net_device_stats *bcmgenet_get_stats(struct net_device *dev)
   3650{
   3651	struct bcmgenet_priv *priv = netdev_priv(dev);
   3652	unsigned long tx_bytes = 0, tx_packets = 0;
   3653	unsigned long rx_bytes = 0, rx_packets = 0;
   3654	unsigned long rx_errors = 0, rx_dropped = 0;
   3655	struct bcmgenet_tx_ring *tx_ring;
   3656	struct bcmgenet_rx_ring *rx_ring;
   3657	unsigned int q;
   3658
   3659	for (q = 0; q < priv->hw_params->tx_queues; q++) {
   3660		tx_ring = &priv->tx_rings[q];
   3661		tx_bytes += tx_ring->bytes;
   3662		tx_packets += tx_ring->packets;
   3663	}
   3664	tx_ring = &priv->tx_rings[DESC_INDEX];
   3665	tx_bytes += tx_ring->bytes;
   3666	tx_packets += tx_ring->packets;
   3667
   3668	for (q = 0; q < priv->hw_params->rx_queues; q++) {
   3669		rx_ring = &priv->rx_rings[q];
   3670
   3671		rx_bytes += rx_ring->bytes;
   3672		rx_packets += rx_ring->packets;
   3673		rx_errors += rx_ring->errors;
   3674		rx_dropped += rx_ring->dropped;
   3675	}
   3676	rx_ring = &priv->rx_rings[DESC_INDEX];
   3677	rx_bytes += rx_ring->bytes;
   3678	rx_packets += rx_ring->packets;
   3679	rx_errors += rx_ring->errors;
   3680	rx_dropped += rx_ring->dropped;
   3681
   3682	dev->stats.tx_bytes = tx_bytes;
   3683	dev->stats.tx_packets = tx_packets;
   3684	dev->stats.rx_bytes = rx_bytes;
   3685	dev->stats.rx_packets = rx_packets;
   3686	dev->stats.rx_errors = rx_errors;
   3687	dev->stats.rx_missed_errors = rx_errors;
   3688	dev->stats.rx_dropped = rx_dropped;
   3689	return &dev->stats;
   3690}
   3691
   3692static int bcmgenet_change_carrier(struct net_device *dev, bool new_carrier)
   3693{
   3694	struct bcmgenet_priv *priv = netdev_priv(dev);
   3695
   3696	if (!dev->phydev || !phy_is_pseudo_fixed_link(dev->phydev) ||
   3697	    priv->phy_interface != PHY_INTERFACE_MODE_MOCA)
   3698		return -EOPNOTSUPP;
   3699
   3700	if (new_carrier)
   3701		netif_carrier_on(dev);
   3702	else
   3703		netif_carrier_off(dev);
   3704
   3705	return 0;
   3706}
   3707
   3708static const struct net_device_ops bcmgenet_netdev_ops = {
   3709	.ndo_open		= bcmgenet_open,
   3710	.ndo_stop		= bcmgenet_close,
   3711	.ndo_start_xmit		= bcmgenet_xmit,
   3712	.ndo_tx_timeout		= bcmgenet_timeout,
   3713	.ndo_set_rx_mode	= bcmgenet_set_rx_mode,
   3714	.ndo_set_mac_address	= bcmgenet_set_mac_addr,
   3715	.ndo_eth_ioctl		= phy_do_ioctl_running,
   3716	.ndo_set_features	= bcmgenet_set_features,
   3717#ifdef CONFIG_NET_POLL_CONTROLLER
   3718	.ndo_poll_controller	= bcmgenet_poll_controller,
   3719#endif
   3720	.ndo_get_stats		= bcmgenet_get_stats,
   3721	.ndo_change_carrier	= bcmgenet_change_carrier,
   3722};
   3723
   3724/* Array of GENET hardware parameters/characteristics */
   3725static struct bcmgenet_hw_params bcmgenet_hw_params[] = {
   3726	[GENET_V1] = {
   3727		.tx_queues = 0,
   3728		.tx_bds_per_q = 0,
   3729		.rx_queues = 0,
   3730		.rx_bds_per_q = 0,
   3731		.bp_in_en_shift = 16,
   3732		.bp_in_mask = 0xffff,
   3733		.hfb_filter_cnt = 16,
   3734		.qtag_mask = 0x1F,
   3735		.hfb_offset = 0x1000,
   3736		.rdma_offset = 0x2000,
   3737		.tdma_offset = 0x3000,
   3738		.words_per_bd = 2,
   3739	},
   3740	[GENET_V2] = {
   3741		.tx_queues = 4,
   3742		.tx_bds_per_q = 32,
   3743		.rx_queues = 0,
   3744		.rx_bds_per_q = 0,
   3745		.bp_in_en_shift = 16,
   3746		.bp_in_mask = 0xffff,
   3747		.hfb_filter_cnt = 16,
   3748		.qtag_mask = 0x1F,
   3749		.tbuf_offset = 0x0600,
   3750		.hfb_offset = 0x1000,
   3751		.hfb_reg_offset = 0x2000,
   3752		.rdma_offset = 0x3000,
   3753		.tdma_offset = 0x4000,
   3754		.words_per_bd = 2,
   3755		.flags = GENET_HAS_EXT,
   3756	},
   3757	[GENET_V3] = {
   3758		.tx_queues = 4,
   3759		.tx_bds_per_q = 32,
   3760		.rx_queues = 0,
   3761		.rx_bds_per_q = 0,
   3762		.bp_in_en_shift = 17,
   3763		.bp_in_mask = 0x1ffff,
   3764		.hfb_filter_cnt = 48,
   3765		.hfb_filter_size = 128,
   3766		.qtag_mask = 0x3F,
   3767		.tbuf_offset = 0x0600,
   3768		.hfb_offset = 0x8000,
   3769		.hfb_reg_offset = 0xfc00,
   3770		.rdma_offset = 0x10000,
   3771		.tdma_offset = 0x11000,
   3772		.words_per_bd = 2,
   3773		.flags = GENET_HAS_EXT | GENET_HAS_MDIO_INTR |
   3774			 GENET_HAS_MOCA_LINK_DET,
   3775	},
   3776	[GENET_V4] = {
   3777		.tx_queues = 4,
   3778		.tx_bds_per_q = 32,
   3779		.rx_queues = 0,
   3780		.rx_bds_per_q = 0,
   3781		.bp_in_en_shift = 17,
   3782		.bp_in_mask = 0x1ffff,
   3783		.hfb_filter_cnt = 48,
   3784		.hfb_filter_size = 128,
   3785		.qtag_mask = 0x3F,
   3786		.tbuf_offset = 0x0600,
   3787		.hfb_offset = 0x8000,
   3788		.hfb_reg_offset = 0xfc00,
   3789		.rdma_offset = 0x2000,
   3790		.tdma_offset = 0x4000,
   3791		.words_per_bd = 3,
   3792		.flags = GENET_HAS_40BITS | GENET_HAS_EXT |
   3793			 GENET_HAS_MDIO_INTR | GENET_HAS_MOCA_LINK_DET,
   3794	},
   3795	[GENET_V5] = {
   3796		.tx_queues = 4,
   3797		.tx_bds_per_q = 32,
   3798		.rx_queues = 0,
   3799		.rx_bds_per_q = 0,
   3800		.bp_in_en_shift = 17,
   3801		.bp_in_mask = 0x1ffff,
   3802		.hfb_filter_cnt = 48,
   3803		.hfb_filter_size = 128,
   3804		.qtag_mask = 0x3F,
   3805		.tbuf_offset = 0x0600,
   3806		.hfb_offset = 0x8000,
   3807		.hfb_reg_offset = 0xfc00,
   3808		.rdma_offset = 0x2000,
   3809		.tdma_offset = 0x4000,
   3810		.words_per_bd = 3,
   3811		.flags = GENET_HAS_40BITS | GENET_HAS_EXT |
   3812			 GENET_HAS_MDIO_INTR | GENET_HAS_MOCA_LINK_DET,
   3813	},
   3814};
   3815
   3816/* Infer hardware parameters from the detected GENET version */
   3817static void bcmgenet_set_hw_params(struct bcmgenet_priv *priv)
   3818{
   3819	struct bcmgenet_hw_params *params;
   3820	u32 reg;
   3821	u8 major;
   3822	u16 gphy_rev;
   3823
   3824	if (GENET_IS_V5(priv) || GENET_IS_V4(priv)) {
   3825		bcmgenet_dma_regs = bcmgenet_dma_regs_v3plus;
   3826		genet_dma_ring_regs = genet_dma_ring_regs_v4;
   3827	} else if (GENET_IS_V3(priv)) {
   3828		bcmgenet_dma_regs = bcmgenet_dma_regs_v3plus;
   3829		genet_dma_ring_regs = genet_dma_ring_regs_v123;
   3830	} else if (GENET_IS_V2(priv)) {
   3831		bcmgenet_dma_regs = bcmgenet_dma_regs_v2;
   3832		genet_dma_ring_regs = genet_dma_ring_regs_v123;
   3833	} else if (GENET_IS_V1(priv)) {
   3834		bcmgenet_dma_regs = bcmgenet_dma_regs_v1;
   3835		genet_dma_ring_regs = genet_dma_ring_regs_v123;
   3836	}
   3837
   3838	/* enum genet_version starts at 1 */
   3839	priv->hw_params = &bcmgenet_hw_params[priv->version];
   3840	params = priv->hw_params;
   3841
   3842	/* Read GENET HW version */
   3843	reg = bcmgenet_sys_readl(priv, SYS_REV_CTRL);
   3844	major = (reg >> 24 & 0x0f);
   3845	if (major == 6)
   3846		major = 5;
   3847	else if (major == 5)
   3848		major = 4;
   3849	else if (major == 0)
   3850		major = 1;
   3851	if (major != priv->version) {
   3852		dev_err(&priv->pdev->dev,
   3853			"GENET version mismatch, got: %d, configured for: %d\n",
   3854			major, priv->version);
   3855	}
   3856
   3857	/* Print the GENET core version */
   3858	dev_info(&priv->pdev->dev, "GENET " GENET_VER_FMT,
   3859		 major, (reg >> 16) & 0x0f, reg & 0xffff);
   3860
   3861	/* Store the integrated PHY revision for the MDIO probing function
   3862	 * to pass this information to the PHY driver. The PHY driver expects
   3863	 * to find the PHY major revision in bits 15:8 while the GENET register
   3864	 * stores that information in bits 7:0, account for that.
   3865	 *
   3866	 * On newer chips, starting with PHY revision G0, a new scheme is
   3867	 * deployed similar to the Starfighter 2 switch with GPHY major
   3868	 * revision in bits 15:8 and patch level in bits 7:0. Major revision 0
   3869	 * is reserved as well as special value 0x01ff, we have a small
   3870	 * heuristic to check for the new GPHY revision and re-arrange things
   3871	 * so the GPHY driver is happy.
   3872	 */
   3873	gphy_rev = reg & 0xffff;
   3874
   3875	if (GENET_IS_V5(priv)) {
   3876		/* The EPHY revision should come from the MDIO registers of
   3877		 * the PHY not from GENET.
   3878		 */
   3879		if (gphy_rev != 0) {
   3880			pr_warn("GENET is reporting EPHY revision: 0x%04x\n",
   3881				gphy_rev);
   3882		}
   3883	/* This is reserved so should require special treatment */
   3884	} else if (gphy_rev == 0 || gphy_rev == 0x01ff) {
   3885		pr_warn("Invalid GPHY revision detected: 0x%04x\n", gphy_rev);
   3886		return;
   3887	/* This is the good old scheme, just GPHY major, no minor nor patch */
   3888	} else if ((gphy_rev & 0xf0) != 0) {
   3889		priv->gphy_rev = gphy_rev << 8;
   3890	/* This is the new scheme, GPHY major rolls over with 0x10 = rev G0 */
   3891	} else if ((gphy_rev & 0xff00) != 0) {
   3892		priv->gphy_rev = gphy_rev;
   3893	}
   3894
   3895#ifdef CONFIG_PHYS_ADDR_T_64BIT
   3896	if (!(params->flags & GENET_HAS_40BITS))
   3897		pr_warn("GENET does not support 40-bits PA\n");
   3898#endif
   3899
   3900	pr_debug("Configuration for version: %d\n"
   3901		"TXq: %1d, TXqBDs: %1d, RXq: %1d, RXqBDs: %1d\n"
   3902		"BP << en: %2d, BP msk: 0x%05x\n"
   3903		"HFB count: %2d, QTAQ msk: 0x%05x\n"
   3904		"TBUF: 0x%04x, HFB: 0x%04x, HFBreg: 0x%04x\n"
   3905		"RDMA: 0x%05x, TDMA: 0x%05x\n"
   3906		"Words/BD: %d\n",
   3907		priv->version,
   3908		params->tx_queues, params->tx_bds_per_q,
   3909		params->rx_queues, params->rx_bds_per_q,
   3910		params->bp_in_en_shift, params->bp_in_mask,
   3911		params->hfb_filter_cnt, params->qtag_mask,
   3912		params->tbuf_offset, params->hfb_offset,
   3913		params->hfb_reg_offset,
   3914		params->rdma_offset, params->tdma_offset,
   3915		params->words_per_bd);
   3916}
   3917
   3918struct bcmgenet_plat_data {
   3919	enum bcmgenet_version version;
   3920	u32 dma_max_burst_length;
   3921	bool ephy_16nm;
   3922};
   3923
   3924static const struct bcmgenet_plat_data v1_plat_data = {
   3925	.version = GENET_V1,
   3926	.dma_max_burst_length = DMA_MAX_BURST_LENGTH,
   3927};
   3928
   3929static const struct bcmgenet_plat_data v2_plat_data = {
   3930	.version = GENET_V2,
   3931	.dma_max_burst_length = DMA_MAX_BURST_LENGTH,
   3932};
   3933
   3934static const struct bcmgenet_plat_data v3_plat_data = {
   3935	.version = GENET_V3,
   3936	.dma_max_burst_length = DMA_MAX_BURST_LENGTH,
   3937};
   3938
   3939static const struct bcmgenet_plat_data v4_plat_data = {
   3940	.version = GENET_V4,
   3941	.dma_max_burst_length = DMA_MAX_BURST_LENGTH,
   3942};
   3943
   3944static const struct bcmgenet_plat_data v5_plat_data = {
   3945	.version = GENET_V5,
   3946	.dma_max_burst_length = DMA_MAX_BURST_LENGTH,
   3947};
   3948
   3949static const struct bcmgenet_plat_data bcm2711_plat_data = {
   3950	.version = GENET_V5,
   3951	.dma_max_burst_length = 0x08,
   3952};
   3953
   3954static const struct bcmgenet_plat_data bcm7712_plat_data = {
   3955	.version = GENET_V5,
   3956	.dma_max_burst_length = DMA_MAX_BURST_LENGTH,
   3957	.ephy_16nm = true,
   3958};
   3959
   3960static const struct of_device_id bcmgenet_match[] = {
   3961	{ .compatible = "brcm,genet-v1", .data = &v1_plat_data },
   3962	{ .compatible = "brcm,genet-v2", .data = &v2_plat_data },
   3963	{ .compatible = "brcm,genet-v3", .data = &v3_plat_data },
   3964	{ .compatible = "brcm,genet-v4", .data = &v4_plat_data },
   3965	{ .compatible = "brcm,genet-v5", .data = &v5_plat_data },
   3966	{ .compatible = "brcm,bcm2711-genet-v5", .data = &bcm2711_plat_data },
   3967	{ .compatible = "brcm,bcm7712-genet-v5", .data = &bcm7712_plat_data },
   3968	{ },
   3969};
   3970MODULE_DEVICE_TABLE(of, bcmgenet_match);
   3971
   3972static int bcmgenet_probe(struct platform_device *pdev)
   3973{
   3974	struct bcmgenet_platform_data *pd = pdev->dev.platform_data;
   3975	const struct bcmgenet_plat_data *pdata;
   3976	struct bcmgenet_priv *priv;
   3977	struct net_device *dev;
   3978	unsigned int i;
   3979	int err = -EIO;
   3980
   3981	/* Up to GENET_MAX_MQ_CNT + 1 TX queues and RX queues */
   3982	dev = alloc_etherdev_mqs(sizeof(*priv), GENET_MAX_MQ_CNT + 1,
   3983				 GENET_MAX_MQ_CNT + 1);
   3984	if (!dev) {
   3985		dev_err(&pdev->dev, "can't allocate net device\n");
   3986		return -ENOMEM;
   3987	}
   3988
   3989	priv = netdev_priv(dev);
   3990	priv->irq0 = platform_get_irq(pdev, 0);
   3991	if (priv->irq0 < 0) {
   3992		err = priv->irq0;
   3993		goto err;
   3994	}
   3995	priv->irq1 = platform_get_irq(pdev, 1);
   3996	if (priv->irq1 < 0) {
   3997		err = priv->irq1;
   3998		goto err;
   3999	}
   4000	priv->wol_irq = platform_get_irq_optional(pdev, 2);
   4001	if (priv->wol_irq == -EPROBE_DEFER) {
   4002		err = priv->wol_irq;
   4003		goto err;
   4004	}
   4005
   4006	priv->base = devm_platform_ioremap_resource(pdev, 0);
   4007	if (IS_ERR(priv->base)) {
   4008		err = PTR_ERR(priv->base);
   4009		goto err;
   4010	}
   4011
   4012	spin_lock_init(&priv->lock);
   4013
   4014	/* Set default pause parameters */
   4015	priv->autoneg_pause = 1;
   4016	priv->tx_pause = 1;
   4017	priv->rx_pause = 1;
   4018
   4019	SET_NETDEV_DEV(dev, &pdev->dev);
   4020	dev_set_drvdata(&pdev->dev, dev);
   4021	dev->watchdog_timeo = 2 * HZ;
   4022	dev->ethtool_ops = &bcmgenet_ethtool_ops;
   4023	dev->netdev_ops = &bcmgenet_netdev_ops;
   4024
   4025	priv->msg_enable = netif_msg_init(-1, GENET_MSG_DEFAULT);
   4026
   4027	/* Set default features */
   4028	dev->features |= NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_HW_CSUM |
   4029			 NETIF_F_RXCSUM;
   4030	dev->hw_features |= dev->features;
   4031	dev->vlan_features |= dev->features;
   4032
   4033	/* Request the WOL interrupt and advertise suspend if available */
   4034	priv->wol_irq_disabled = true;
   4035	if (priv->wol_irq > 0) {
   4036		err = devm_request_irq(&pdev->dev, priv->wol_irq,
   4037				       bcmgenet_wol_isr, 0, dev->name, priv);
   4038		if (!err)
   4039			device_set_wakeup_capable(&pdev->dev, 1);
   4040	}
   4041
   4042	/* Set the needed headroom to account for any possible
   4043	 * features enabling/disabling at runtime
   4044	 */
   4045	dev->needed_headroom += 64;
   4046
   4047	priv->dev = dev;
   4048	priv->pdev = pdev;
   4049
   4050	pdata = device_get_match_data(&pdev->dev);
   4051	if (pdata) {
   4052		priv->version = pdata->version;
   4053		priv->dma_max_burst_length = pdata->dma_max_burst_length;
   4054		priv->ephy_16nm = pdata->ephy_16nm;
   4055	} else {
   4056		priv->version = pd->genet_version;
   4057		priv->dma_max_burst_length = DMA_MAX_BURST_LENGTH;
   4058	}
   4059
   4060	priv->clk = devm_clk_get_optional(&priv->pdev->dev, "enet");
   4061	if (IS_ERR(priv->clk)) {
   4062		dev_dbg(&priv->pdev->dev, "failed to get enet clock\n");
   4063		err = PTR_ERR(priv->clk);
   4064		goto err;
   4065	}
   4066
   4067	err = clk_prepare_enable(priv->clk);
   4068	if (err)
   4069		goto err;
   4070
   4071	bcmgenet_set_hw_params(priv);
   4072
   4073	err = -EIO;
   4074	if (priv->hw_params->flags & GENET_HAS_40BITS)
   4075		err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40));
   4076	if (err)
   4077		err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
   4078	if (err)
   4079		goto err_clk_disable;
   4080
   4081	/* Mii wait queue */
   4082	init_waitqueue_head(&priv->wq);
   4083	/* Always use RX_BUF_LENGTH (2KB) buffer for all chips */
   4084	priv->rx_buf_len = RX_BUF_LENGTH;
   4085	INIT_WORK(&priv->bcmgenet_irq_work, bcmgenet_irq_task);
   4086
   4087	priv->clk_wol = devm_clk_get_optional(&priv->pdev->dev, "enet-wol");
   4088	if (IS_ERR(priv->clk_wol)) {
   4089		dev_dbg(&priv->pdev->dev, "failed to get enet-wol clock\n");
   4090		err = PTR_ERR(priv->clk_wol);
   4091		goto err_clk_disable;
   4092	}
   4093
   4094	priv->clk_eee = devm_clk_get_optional(&priv->pdev->dev, "enet-eee");
   4095	if (IS_ERR(priv->clk_eee)) {
   4096		dev_dbg(&priv->pdev->dev, "failed to get enet-eee clock\n");
   4097		err = PTR_ERR(priv->clk_eee);
   4098		goto err_clk_disable;
   4099	}
   4100
   4101	/* If this is an internal GPHY, power it on now, before UniMAC is
   4102	 * brought out of reset as absolutely no UniMAC activity is allowed
   4103	 */
   4104	if (device_get_phy_mode(&pdev->dev) == PHY_INTERFACE_MODE_INTERNAL)
   4105		bcmgenet_power_up(priv, GENET_POWER_PASSIVE);
   4106
   4107	if (pd && !IS_ERR_OR_NULL(pd->mac_address))
   4108		eth_hw_addr_set(dev, pd->mac_address);
   4109	else
   4110		if (device_get_ethdev_address(&pdev->dev, dev))
   4111			if (has_acpi_companion(&pdev->dev)) {
   4112				u8 addr[ETH_ALEN];
   4113
   4114				bcmgenet_get_hw_addr(priv, addr);
   4115				eth_hw_addr_set(dev, addr);
   4116			}
   4117
   4118	if (!is_valid_ether_addr(dev->dev_addr)) {
   4119		dev_warn(&pdev->dev, "using random Ethernet MAC\n");
   4120		eth_hw_addr_random(dev);
   4121	}
   4122
   4123	reset_umac(priv);
   4124
   4125	err = bcmgenet_mii_init(dev);
   4126	if (err)
   4127		goto err_clk_disable;
   4128
   4129	/* setup number of real queues  + 1 (GENET_V1 has 0 hardware queues
   4130	 * just the ring 16 descriptor based TX
   4131	 */
   4132	netif_set_real_num_tx_queues(priv->dev, priv->hw_params->tx_queues + 1);
   4133	netif_set_real_num_rx_queues(priv->dev, priv->hw_params->rx_queues + 1);
   4134
   4135	/* Set default coalescing parameters */
   4136	for (i = 0; i < priv->hw_params->rx_queues; i++)
   4137		priv->rx_rings[i].rx_max_coalesced_frames = 1;
   4138	priv->rx_rings[DESC_INDEX].rx_max_coalesced_frames = 1;
   4139
   4140	/* libphy will determine the link state */
   4141	netif_carrier_off(dev);
   4142
   4143	/* Turn off the main clock, WOL clock is handled separately */
   4144	clk_disable_unprepare(priv->clk);
   4145
   4146	err = register_netdev(dev);
   4147	if (err) {
   4148		bcmgenet_mii_exit(dev);
   4149		goto err;
   4150	}
   4151
   4152	return err;
   4153
   4154err_clk_disable:
   4155	clk_disable_unprepare(priv->clk);
   4156err:
   4157	free_netdev(dev);
   4158	return err;
   4159}
   4160
   4161static int bcmgenet_remove(struct platform_device *pdev)
   4162{
   4163	struct bcmgenet_priv *priv = dev_to_priv(&pdev->dev);
   4164
   4165	dev_set_drvdata(&pdev->dev, NULL);
   4166	unregister_netdev(priv->dev);
   4167	bcmgenet_mii_exit(priv->dev);
   4168	free_netdev(priv->dev);
   4169
   4170	return 0;
   4171}
   4172
   4173static void bcmgenet_shutdown(struct platform_device *pdev)
   4174{
   4175	bcmgenet_remove(pdev);
   4176}
   4177
   4178#ifdef CONFIG_PM_SLEEP
   4179static int bcmgenet_resume_noirq(struct device *d)
   4180{
   4181	struct net_device *dev = dev_get_drvdata(d);
   4182	struct bcmgenet_priv *priv = netdev_priv(dev);
   4183	int ret;
   4184	u32 reg;
   4185
   4186	if (!netif_running(dev))
   4187		return 0;
   4188
   4189	/* Turn on the clock */
   4190	ret = clk_prepare_enable(priv->clk);
   4191	if (ret)
   4192		return ret;
   4193
   4194	if (device_may_wakeup(d) && priv->wolopts) {
   4195		/* Account for Wake-on-LAN events and clear those events
   4196		 * (Some devices need more time between enabling the clocks
   4197		 *  and the interrupt register reflecting the wake event so
   4198		 *  read the register twice)
   4199		 */
   4200		reg = bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_STAT);
   4201		reg = bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_STAT);
   4202		if (reg & UMAC_IRQ_WAKE_EVENT)
   4203			pm_wakeup_event(&priv->pdev->dev, 0);
   4204	}
   4205
   4206	bcmgenet_intrl2_0_writel(priv, UMAC_IRQ_WAKE_EVENT, INTRL2_CPU_CLEAR);
   4207
   4208	return 0;
   4209}
   4210
   4211static int bcmgenet_resume(struct device *d)
   4212{
   4213	struct net_device *dev = dev_get_drvdata(d);
   4214	struct bcmgenet_priv *priv = netdev_priv(dev);
   4215	struct bcmgenet_rxnfc_rule *rule;
   4216	unsigned long dma_ctrl;
   4217	int ret;
   4218
   4219	if (!netif_running(dev))
   4220		return 0;
   4221
   4222	/* From WOL-enabled suspend, switch to regular clock */
   4223	if (device_may_wakeup(d) && priv->wolopts)
   4224		bcmgenet_power_up(priv, GENET_POWER_WOL_MAGIC);
   4225
   4226	/* If this is an internal GPHY, power it back on now, before UniMAC is
   4227	 * brought out of reset as absolutely no UniMAC activity is allowed
   4228	 */
   4229	if (priv->internal_phy)
   4230		bcmgenet_power_up(priv, GENET_POWER_PASSIVE);
   4231
   4232	bcmgenet_umac_reset(priv);
   4233
   4234	init_umac(priv);
   4235
   4236	phy_init_hw(dev->phydev);
   4237
   4238	/* Speed settings must be restored */
   4239	genphy_config_aneg(dev->phydev);
   4240	bcmgenet_mii_config(priv->dev, false);
   4241
   4242	/* Restore enabled features */
   4243	bcmgenet_set_features(dev, dev->features);
   4244
   4245	bcmgenet_set_hw_addr(priv, dev->dev_addr);
   4246
   4247	/* Restore hardware filters */
   4248	bcmgenet_hfb_clear(priv);
   4249	list_for_each_entry(rule, &priv->rxnfc_list, list)
   4250		if (rule->state != BCMGENET_RXNFC_STATE_UNUSED)
   4251			bcmgenet_hfb_create_rxnfc_filter(priv, rule);
   4252
   4253	/* Disable RX/TX DMA and flush TX queues */
   4254	dma_ctrl = bcmgenet_dma_disable(priv);
   4255
   4256	/* Reinitialize TDMA and RDMA and SW housekeeping */
   4257	ret = bcmgenet_init_dma(priv);
   4258	if (ret) {
   4259		netdev_err(dev, "failed to initialize DMA\n");
   4260		goto out_clk_disable;
   4261	}
   4262
   4263	/* Always enable ring 16 - descriptor ring */
   4264	bcmgenet_enable_dma(priv, dma_ctrl);
   4265
   4266	if (!device_may_wakeup(d))
   4267		phy_resume(dev->phydev);
   4268
   4269	if (priv->eee.eee_enabled)
   4270		bcmgenet_eee_enable_set(dev, true);
   4271
   4272	bcmgenet_netif_start(dev);
   4273
   4274	netif_device_attach(dev);
   4275
   4276	return 0;
   4277
   4278out_clk_disable:
   4279	if (priv->internal_phy)
   4280		bcmgenet_power_down(priv, GENET_POWER_PASSIVE);
   4281	clk_disable_unprepare(priv->clk);
   4282	return ret;
   4283}
   4284
   4285static int bcmgenet_suspend(struct device *d)
   4286{
   4287	struct net_device *dev = dev_get_drvdata(d);
   4288	struct bcmgenet_priv *priv = netdev_priv(dev);
   4289
   4290	if (!netif_running(dev))
   4291		return 0;
   4292
   4293	netif_device_detach(dev);
   4294
   4295	bcmgenet_netif_stop(dev);
   4296
   4297	if (!device_may_wakeup(d))
   4298		phy_suspend(dev->phydev);
   4299
   4300	/* Disable filtering */
   4301	bcmgenet_hfb_reg_writel(priv, 0, HFB_CTRL);
   4302
   4303	return 0;
   4304}
   4305
   4306static int bcmgenet_suspend_noirq(struct device *d)
   4307{
   4308	struct net_device *dev = dev_get_drvdata(d);
   4309	struct bcmgenet_priv *priv = netdev_priv(dev);
   4310	int ret = 0;
   4311
   4312	if (!netif_running(dev))
   4313		return 0;
   4314
   4315	/* Prepare the device for Wake-on-LAN and switch to the slow clock */
   4316	if (device_may_wakeup(d) && priv->wolopts)
   4317		ret = bcmgenet_power_down(priv, GENET_POWER_WOL_MAGIC);
   4318	else if (priv->internal_phy)
   4319		ret = bcmgenet_power_down(priv, GENET_POWER_PASSIVE);
   4320
   4321	/* Let the framework handle resumption and leave the clocks on */
   4322	if (ret)
   4323		return ret;
   4324
   4325	/* Turn off the clocks */
   4326	clk_disable_unprepare(priv->clk);
   4327
   4328	return 0;
   4329}
   4330#else
   4331#define bcmgenet_suspend	NULL
   4332#define bcmgenet_suspend_noirq	NULL
   4333#define bcmgenet_resume		NULL
   4334#define bcmgenet_resume_noirq	NULL
   4335#endif /* CONFIG_PM_SLEEP */
   4336
   4337static const struct dev_pm_ops bcmgenet_pm_ops = {
   4338	.suspend	= bcmgenet_suspend,
   4339	.suspend_noirq	= bcmgenet_suspend_noirq,
   4340	.resume		= bcmgenet_resume,
   4341	.resume_noirq	= bcmgenet_resume_noirq,
   4342};
   4343
   4344static const struct acpi_device_id genet_acpi_match[] = {
   4345	{ "BCM6E4E", (kernel_ulong_t)&bcm2711_plat_data },
   4346	{ },
   4347};
   4348MODULE_DEVICE_TABLE(acpi, genet_acpi_match);
   4349
   4350static struct platform_driver bcmgenet_driver = {
   4351	.probe	= bcmgenet_probe,
   4352	.remove	= bcmgenet_remove,
   4353	.shutdown = bcmgenet_shutdown,
   4354	.driver	= {
   4355		.name	= "bcmgenet",
   4356		.of_match_table = bcmgenet_match,
   4357		.pm	= &bcmgenet_pm_ops,
   4358		.acpi_match_table = genet_acpi_match,
   4359	},
   4360};
   4361module_platform_driver(bcmgenet_driver);
   4362
   4363MODULE_AUTHOR("Broadcom Corporation");
   4364MODULE_DESCRIPTION("Broadcom GENET Ethernet controller driver");
   4365MODULE_ALIAS("platform:bcmgenet");
   4366MODULE_LICENSE("GPL");
   4367MODULE_SOFTDEP("pre: mdio-bcm-unimac");