cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

fec_main.c (110936B)


      1// SPDX-License-Identifier: GPL-2.0+
      2/*
      3 * Fast Ethernet Controller (FEC) driver for Motorola MPC8xx.
      4 * Copyright (c) 1997 Dan Malek (dmalek@jlc.net)
      5 *
      6 * Right now, I am very wasteful with the buffers.  I allocate memory
      7 * pages and then divide them into 2K frame buffers.  This way I know I
      8 * have buffers large enough to hold one frame within one buffer descriptor.
      9 * Once I get this working, I will use 64 or 128 byte CPM buffers, which
     10 * will be much more memory efficient and will easily handle lots of
     11 * small packets.
     12 *
     13 * Much better multiple PHY support by Magnus Damm.
     14 * Copyright (c) 2000 Ericsson Radio Systems AB.
     15 *
     16 * Support for FEC controller of ColdFire processors.
     17 * Copyright (c) 2001-2005 Greg Ungerer (gerg@snapgear.com)
     18 *
     19 * Bug fixes and cleanup by Philippe De Muyter (phdm@macqel.be)
     20 * Copyright (c) 2004-2006 Macq Electronique SA.
     21 *
     22 * Copyright (C) 2010-2011 Freescale Semiconductor, Inc.
     23 */
     24
     25#include <linux/module.h>
     26#include <linux/kernel.h>
     27#include <linux/string.h>
     28#include <linux/pm_runtime.h>
     29#include <linux/ptrace.h>
     30#include <linux/errno.h>
     31#include <linux/ioport.h>
     32#include <linux/slab.h>
     33#include <linux/interrupt.h>
     34#include <linux/delay.h>
     35#include <linux/netdevice.h>
     36#include <linux/etherdevice.h>
     37#include <linux/skbuff.h>
     38#include <linux/in.h>
     39#include <linux/ip.h>
     40#include <net/ip.h>
     41#include <net/selftests.h>
     42#include <net/tso.h>
     43#include <linux/tcp.h>
     44#include <linux/udp.h>
     45#include <linux/icmp.h>
     46#include <linux/spinlock.h>
     47#include <linux/workqueue.h>
     48#include <linux/bitops.h>
     49#include <linux/io.h>
     50#include <linux/irq.h>
     51#include <linux/clk.h>
     52#include <linux/crc32.h>
     53#include <linux/platform_device.h>
     54#include <linux/mdio.h>
     55#include <linux/phy.h>
     56#include <linux/fec.h>
     57#include <linux/of.h>
     58#include <linux/of_device.h>
     59#include <linux/of_gpio.h>
     60#include <linux/of_mdio.h>
     61#include <linux/of_net.h>
     62#include <linux/regulator/consumer.h>
     63#include <linux/if_vlan.h>
     64#include <linux/pinctrl/consumer.h>
     65#include <linux/prefetch.h>
     66#include <linux/mfd/syscon.h>
     67#include <linux/regmap.h>
     68#include <soc/imx/cpuidle.h>
     69
     70#include <asm/cacheflush.h>
     71
     72#include "fec.h"
     73
     74static void set_multicast_list(struct net_device *ndev);
     75static void fec_enet_itr_coal_init(struct net_device *ndev);
     76
     77#define DRIVER_NAME	"fec"
     78
     79static const u16 fec_enet_vlan_pri_to_queue[8] = {0, 0, 1, 1, 1, 2, 2, 2};
     80
     81/* Pause frame feild and FIFO threshold */
     82#define FEC_ENET_FCE	(1 << 5)
     83#define FEC_ENET_RSEM_V	0x84
     84#define FEC_ENET_RSFL_V	16
     85#define FEC_ENET_RAEM_V	0x8
     86#define FEC_ENET_RAFL_V	0x8
     87#define FEC_ENET_OPD_V	0xFFF0
     88#define FEC_MDIO_PM_TIMEOUT  100 /* ms */
     89
     90struct fec_devinfo {
     91	u32 quirks;
     92};
     93
     94static const struct fec_devinfo fec_imx25_info = {
     95	.quirks = FEC_QUIRK_USE_GASKET | FEC_QUIRK_MIB_CLEAR |
     96		  FEC_QUIRK_HAS_FRREG,
     97};
     98
     99static const struct fec_devinfo fec_imx27_info = {
    100	.quirks = FEC_QUIRK_MIB_CLEAR | FEC_QUIRK_HAS_FRREG,
    101};
    102
    103static const struct fec_devinfo fec_imx28_info = {
    104	.quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_SWAP_FRAME |
    105		  FEC_QUIRK_SINGLE_MDIO | FEC_QUIRK_HAS_RACC |
    106		  FEC_QUIRK_HAS_FRREG | FEC_QUIRK_CLEAR_SETUP_MII |
    107		  FEC_QUIRK_NO_HARD_RESET,
    108};
    109
    110static const struct fec_devinfo fec_imx6q_info = {
    111	.quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
    112		  FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
    113		  FEC_QUIRK_HAS_VLAN | FEC_QUIRK_ERR006358 |
    114		  FEC_QUIRK_HAS_RACC | FEC_QUIRK_CLEAR_SETUP_MII,
    115};
    116
    117static const struct fec_devinfo fec_mvf600_info = {
    118	.quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_RACC,
    119};
    120
    121static const struct fec_devinfo fec_imx6x_info = {
    122	.quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
    123		  FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
    124		  FEC_QUIRK_HAS_VLAN | FEC_QUIRK_HAS_AVB |
    125		  FEC_QUIRK_ERR007885 | FEC_QUIRK_BUG_CAPTURE |
    126		  FEC_QUIRK_HAS_RACC | FEC_QUIRK_HAS_COALESCE |
    127		  FEC_QUIRK_CLEAR_SETUP_MII | FEC_QUIRK_HAS_MULTI_QUEUES,
    128};
    129
    130static const struct fec_devinfo fec_imx6ul_info = {
    131	.quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
    132		  FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
    133		  FEC_QUIRK_HAS_VLAN | FEC_QUIRK_ERR007885 |
    134		  FEC_QUIRK_BUG_CAPTURE | FEC_QUIRK_HAS_RACC |
    135		  FEC_QUIRK_HAS_COALESCE | FEC_QUIRK_CLEAR_SETUP_MII,
    136};
    137
    138static const struct fec_devinfo fec_imx8mq_info = {
    139	.quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
    140		  FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
    141		  FEC_QUIRK_HAS_VLAN | FEC_QUIRK_HAS_AVB |
    142		  FEC_QUIRK_ERR007885 | FEC_QUIRK_BUG_CAPTURE |
    143		  FEC_QUIRK_HAS_RACC | FEC_QUIRK_HAS_COALESCE |
    144		  FEC_QUIRK_CLEAR_SETUP_MII | FEC_QUIRK_HAS_MULTI_QUEUES |
    145		  FEC_QUIRK_HAS_EEE | FEC_QUIRK_WAKEUP_FROM_INT2,
    146};
    147
    148static const struct fec_devinfo fec_imx8qm_info = {
    149	.quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
    150		  FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
    151		  FEC_QUIRK_HAS_VLAN | FEC_QUIRK_HAS_AVB |
    152		  FEC_QUIRK_ERR007885 | FEC_QUIRK_BUG_CAPTURE |
    153		  FEC_QUIRK_HAS_RACC | FEC_QUIRK_HAS_COALESCE |
    154		  FEC_QUIRK_CLEAR_SETUP_MII | FEC_QUIRK_HAS_MULTI_QUEUES |
    155		  FEC_QUIRK_DELAYED_CLKS_SUPPORT,
    156};
    157
    158static struct platform_device_id fec_devtype[] = {
    159	{
    160		/* keep it for coldfire */
    161		.name = DRIVER_NAME,
    162		.driver_data = 0,
    163	}, {
    164		.name = "imx25-fec",
    165		.driver_data = (kernel_ulong_t)&fec_imx25_info,
    166	}, {
    167		.name = "imx27-fec",
    168		.driver_data = (kernel_ulong_t)&fec_imx27_info,
    169	}, {
    170		.name = "imx28-fec",
    171		.driver_data = (kernel_ulong_t)&fec_imx28_info,
    172	}, {
    173		.name = "imx6q-fec",
    174		.driver_data = (kernel_ulong_t)&fec_imx6q_info,
    175	}, {
    176		.name = "mvf600-fec",
    177		.driver_data = (kernel_ulong_t)&fec_mvf600_info,
    178	}, {
    179		.name = "imx6sx-fec",
    180		.driver_data = (kernel_ulong_t)&fec_imx6x_info,
    181	}, {
    182		.name = "imx6ul-fec",
    183		.driver_data = (kernel_ulong_t)&fec_imx6ul_info,
    184	}, {
    185		.name = "imx8mq-fec",
    186		.driver_data = (kernel_ulong_t)&fec_imx8mq_info,
    187	}, {
    188		.name = "imx8qm-fec",
    189		.driver_data = (kernel_ulong_t)&fec_imx8qm_info,
    190	}, {
    191		/* sentinel */
    192	}
    193};
    194MODULE_DEVICE_TABLE(platform, fec_devtype);
    195
    196enum imx_fec_type {
    197	IMX25_FEC = 1,	/* runs on i.mx25/50/53 */
    198	IMX27_FEC,	/* runs on i.mx27/35/51 */
    199	IMX28_FEC,
    200	IMX6Q_FEC,
    201	MVF600_FEC,
    202	IMX6SX_FEC,
    203	IMX6UL_FEC,
    204	IMX8MQ_FEC,
    205	IMX8QM_FEC,
    206};
    207
    208static const struct of_device_id fec_dt_ids[] = {
    209	{ .compatible = "fsl,imx25-fec", .data = &fec_devtype[IMX25_FEC], },
    210	{ .compatible = "fsl,imx27-fec", .data = &fec_devtype[IMX27_FEC], },
    211	{ .compatible = "fsl,imx28-fec", .data = &fec_devtype[IMX28_FEC], },
    212	{ .compatible = "fsl,imx6q-fec", .data = &fec_devtype[IMX6Q_FEC], },
    213	{ .compatible = "fsl,mvf600-fec", .data = &fec_devtype[MVF600_FEC], },
    214	{ .compatible = "fsl,imx6sx-fec", .data = &fec_devtype[IMX6SX_FEC], },
    215	{ .compatible = "fsl,imx6ul-fec", .data = &fec_devtype[IMX6UL_FEC], },
    216	{ .compatible = "fsl,imx8mq-fec", .data = &fec_devtype[IMX8MQ_FEC], },
    217	{ .compatible = "fsl,imx8qm-fec", .data = &fec_devtype[IMX8QM_FEC], },
    218	{ /* sentinel */ }
    219};
    220MODULE_DEVICE_TABLE(of, fec_dt_ids);
    221
    222static unsigned char macaddr[ETH_ALEN];
    223module_param_array(macaddr, byte, NULL, 0);
    224MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address");
    225
    226#if defined(CONFIG_M5272)
    227/*
    228 * Some hardware gets it MAC address out of local flash memory.
    229 * if this is non-zero then assume it is the address to get MAC from.
    230 */
    231#if defined(CONFIG_NETtel)
    232#define	FEC_FLASHMAC	0xf0006006
    233#elif defined(CONFIG_GILBARCONAP) || defined(CONFIG_SCALES)
    234#define	FEC_FLASHMAC	0xf0006000
    235#elif defined(CONFIG_CANCam)
    236#define	FEC_FLASHMAC	0xf0020000
    237#elif defined (CONFIG_M5272C3)
    238#define	FEC_FLASHMAC	(0xffe04000 + 4)
    239#elif defined(CONFIG_MOD5272)
    240#define FEC_FLASHMAC	0xffc0406b
    241#else
    242#define	FEC_FLASHMAC	0
    243#endif
    244#endif /* CONFIG_M5272 */
    245
    246/* The FEC stores dest/src/type/vlan, data, and checksum for receive packets.
    247 *
    248 * 2048 byte skbufs are allocated. However, alignment requirements
    249 * varies between FEC variants. Worst case is 64, so round down by 64.
    250 */
    251#define PKT_MAXBUF_SIZE		(round_down(2048 - 64, 64))
    252#define PKT_MINBUF_SIZE		64
    253
    254/* FEC receive acceleration */
    255#define FEC_RACC_IPDIS		(1 << 1)
    256#define FEC_RACC_PRODIS		(1 << 2)
    257#define FEC_RACC_SHIFT16	BIT(7)
    258#define FEC_RACC_OPTIONS	(FEC_RACC_IPDIS | FEC_RACC_PRODIS)
    259
    260/* MIB Control Register */
    261#define FEC_MIB_CTRLSTAT_DISABLE	BIT(31)
    262
    263/*
    264 * The 5270/5271/5280/5282/532x RX control register also contains maximum frame
    265 * size bits. Other FEC hardware does not, so we need to take that into
    266 * account when setting it.
    267 */
    268#if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
    269    defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM) || \
    270    defined(CONFIG_ARM64)
    271#define	OPT_FRAME_SIZE	(PKT_MAXBUF_SIZE << 16)
    272#else
    273#define	OPT_FRAME_SIZE	0
    274#endif
    275
    276/* FEC MII MMFR bits definition */
    277#define FEC_MMFR_ST		(1 << 30)
    278#define FEC_MMFR_ST_C45		(0)
    279#define FEC_MMFR_OP_READ	(2 << 28)
    280#define FEC_MMFR_OP_READ_C45	(3 << 28)
    281#define FEC_MMFR_OP_WRITE	(1 << 28)
    282#define FEC_MMFR_OP_ADDR_WRITE	(0)
    283#define FEC_MMFR_PA(v)		((v & 0x1f) << 23)
    284#define FEC_MMFR_RA(v)		((v & 0x1f) << 18)
    285#define FEC_MMFR_TA		(2 << 16)
    286#define FEC_MMFR_DATA(v)	(v & 0xffff)
    287/* FEC ECR bits definition */
    288#define FEC_ECR_MAGICEN		(1 << 2)
    289#define FEC_ECR_SLEEP		(1 << 3)
    290
    291#define FEC_MII_TIMEOUT		30000 /* us */
    292
    293/* Transmitter timeout */
    294#define TX_TIMEOUT (2 * HZ)
    295
    296#define FEC_PAUSE_FLAG_AUTONEG	0x1
    297#define FEC_PAUSE_FLAG_ENABLE	0x2
    298#define FEC_WOL_HAS_MAGIC_PACKET	(0x1 << 0)
    299#define FEC_WOL_FLAG_ENABLE		(0x1 << 1)
    300#define FEC_WOL_FLAG_SLEEP_ON		(0x1 << 2)
    301
    302#define COPYBREAK_DEFAULT	256
    303
    304/* Max number of allowed TCP segments for software TSO */
    305#define FEC_MAX_TSO_SEGS	100
    306#define FEC_MAX_SKB_DESCS	(FEC_MAX_TSO_SEGS * 2 + MAX_SKB_FRAGS)
    307
    308#define IS_TSO_HEADER(txq, addr) \
    309	((addr >= txq->tso_hdrs_dma) && \
    310	(addr < txq->tso_hdrs_dma + txq->bd.ring_size * TSO_HEADER_SIZE))
    311
    312static int mii_cnt;
    313
    314static struct bufdesc *fec_enet_get_nextdesc(struct bufdesc *bdp,
    315					     struct bufdesc_prop *bd)
    316{
    317	return (bdp >= bd->last) ? bd->base
    318			: (struct bufdesc *)(((void *)bdp) + bd->dsize);
    319}
    320
    321static struct bufdesc *fec_enet_get_prevdesc(struct bufdesc *bdp,
    322					     struct bufdesc_prop *bd)
    323{
    324	return (bdp <= bd->base) ? bd->last
    325			: (struct bufdesc *)(((void *)bdp) - bd->dsize);
    326}
    327
    328static int fec_enet_get_bd_index(struct bufdesc *bdp,
    329				 struct bufdesc_prop *bd)
    330{
    331	return ((const char *)bdp - (const char *)bd->base) >> bd->dsize_log2;
    332}
    333
    334static int fec_enet_get_free_txdesc_num(struct fec_enet_priv_tx_q *txq)
    335{
    336	int entries;
    337
    338	entries = (((const char *)txq->dirty_tx -
    339			(const char *)txq->bd.cur) >> txq->bd.dsize_log2) - 1;
    340
    341	return entries >= 0 ? entries : entries + txq->bd.ring_size;
    342}
    343
    344static void swap_buffer(void *bufaddr, int len)
    345{
    346	int i;
    347	unsigned int *buf = bufaddr;
    348
    349	for (i = 0; i < len; i += 4, buf++)
    350		swab32s(buf);
    351}
    352
    353static void swap_buffer2(void *dst_buf, void *src_buf, int len)
    354{
    355	int i;
    356	unsigned int *src = src_buf;
    357	unsigned int *dst = dst_buf;
    358
    359	for (i = 0; i < len; i += 4, src++, dst++)
    360		*dst = swab32p(src);
    361}
    362
    363static void fec_dump(struct net_device *ndev)
    364{
    365	struct fec_enet_private *fep = netdev_priv(ndev);
    366	struct bufdesc *bdp;
    367	struct fec_enet_priv_tx_q *txq;
    368	int index = 0;
    369
    370	netdev_info(ndev, "TX ring dump\n");
    371	pr_info("Nr     SC     addr       len  SKB\n");
    372
    373	txq = fep->tx_queue[0];
    374	bdp = txq->bd.base;
    375
    376	do {
    377		pr_info("%3u %c%c 0x%04x 0x%08x %4u %p\n",
    378			index,
    379			bdp == txq->bd.cur ? 'S' : ' ',
    380			bdp == txq->dirty_tx ? 'H' : ' ',
    381			fec16_to_cpu(bdp->cbd_sc),
    382			fec32_to_cpu(bdp->cbd_bufaddr),
    383			fec16_to_cpu(bdp->cbd_datlen),
    384			txq->tx_skbuff[index]);
    385		bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
    386		index++;
    387	} while (bdp != txq->bd.base);
    388}
    389
    390static inline bool is_ipv4_pkt(struct sk_buff *skb)
    391{
    392	return skb->protocol == htons(ETH_P_IP) && ip_hdr(skb)->version == 4;
    393}
    394
    395static int
    396fec_enet_clear_csum(struct sk_buff *skb, struct net_device *ndev)
    397{
    398	/* Only run for packets requiring a checksum. */
    399	if (skb->ip_summed != CHECKSUM_PARTIAL)
    400		return 0;
    401
    402	if (unlikely(skb_cow_head(skb, 0)))
    403		return -1;
    404
    405	if (is_ipv4_pkt(skb))
    406		ip_hdr(skb)->check = 0;
    407	*(__sum16 *)(skb->head + skb->csum_start + skb->csum_offset) = 0;
    408
    409	return 0;
    410}
    411
    412static struct bufdesc *
    413fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q *txq,
    414			     struct sk_buff *skb,
    415			     struct net_device *ndev)
    416{
    417	struct fec_enet_private *fep = netdev_priv(ndev);
    418	struct bufdesc *bdp = txq->bd.cur;
    419	struct bufdesc_ex *ebdp;
    420	int nr_frags = skb_shinfo(skb)->nr_frags;
    421	int frag, frag_len;
    422	unsigned short status;
    423	unsigned int estatus = 0;
    424	skb_frag_t *this_frag;
    425	unsigned int index;
    426	void *bufaddr;
    427	dma_addr_t addr;
    428	int i;
    429
    430	for (frag = 0; frag < nr_frags; frag++) {
    431		this_frag = &skb_shinfo(skb)->frags[frag];
    432		bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
    433		ebdp = (struct bufdesc_ex *)bdp;
    434
    435		status = fec16_to_cpu(bdp->cbd_sc);
    436		status &= ~BD_ENET_TX_STATS;
    437		status |= (BD_ENET_TX_TC | BD_ENET_TX_READY);
    438		frag_len = skb_frag_size(&skb_shinfo(skb)->frags[frag]);
    439
    440		/* Handle the last BD specially */
    441		if (frag == nr_frags - 1) {
    442			status |= (BD_ENET_TX_INTR | BD_ENET_TX_LAST);
    443			if (fep->bufdesc_ex) {
    444				estatus |= BD_ENET_TX_INT;
    445				if (unlikely(skb_shinfo(skb)->tx_flags &
    446					SKBTX_HW_TSTAMP && fep->hwts_tx_en))
    447					estatus |= BD_ENET_TX_TS;
    448			}
    449		}
    450
    451		if (fep->bufdesc_ex) {
    452			if (fep->quirks & FEC_QUIRK_HAS_AVB)
    453				estatus |= FEC_TX_BD_FTYPE(txq->bd.qid);
    454			if (skb->ip_summed == CHECKSUM_PARTIAL)
    455				estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS;
    456
    457			ebdp->cbd_bdu = 0;
    458			ebdp->cbd_esc = cpu_to_fec32(estatus);
    459		}
    460
    461		bufaddr = skb_frag_address(this_frag);
    462
    463		index = fec_enet_get_bd_index(bdp, &txq->bd);
    464		if (((unsigned long) bufaddr) & fep->tx_align ||
    465			fep->quirks & FEC_QUIRK_SWAP_FRAME) {
    466			memcpy(txq->tx_bounce[index], bufaddr, frag_len);
    467			bufaddr = txq->tx_bounce[index];
    468
    469			if (fep->quirks & FEC_QUIRK_SWAP_FRAME)
    470				swap_buffer(bufaddr, frag_len);
    471		}
    472
    473		addr = dma_map_single(&fep->pdev->dev, bufaddr, frag_len,
    474				      DMA_TO_DEVICE);
    475		if (dma_mapping_error(&fep->pdev->dev, addr)) {
    476			if (net_ratelimit())
    477				netdev_err(ndev, "Tx DMA memory map failed\n");
    478			goto dma_mapping_error;
    479		}
    480
    481		bdp->cbd_bufaddr = cpu_to_fec32(addr);
    482		bdp->cbd_datlen = cpu_to_fec16(frag_len);
    483		/* Make sure the updates to rest of the descriptor are
    484		 * performed before transferring ownership.
    485		 */
    486		wmb();
    487		bdp->cbd_sc = cpu_to_fec16(status);
    488	}
    489
    490	return bdp;
    491dma_mapping_error:
    492	bdp = txq->bd.cur;
    493	for (i = 0; i < frag; i++) {
    494		bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
    495		dma_unmap_single(&fep->pdev->dev, fec32_to_cpu(bdp->cbd_bufaddr),
    496				 fec16_to_cpu(bdp->cbd_datlen), DMA_TO_DEVICE);
    497	}
    498	return ERR_PTR(-ENOMEM);
    499}
    500
    501static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq,
    502				   struct sk_buff *skb, struct net_device *ndev)
    503{
    504	struct fec_enet_private *fep = netdev_priv(ndev);
    505	int nr_frags = skb_shinfo(skb)->nr_frags;
    506	struct bufdesc *bdp, *last_bdp;
    507	void *bufaddr;
    508	dma_addr_t addr;
    509	unsigned short status;
    510	unsigned short buflen;
    511	unsigned int estatus = 0;
    512	unsigned int index;
    513	int entries_free;
    514
    515	entries_free = fec_enet_get_free_txdesc_num(txq);
    516	if (entries_free < MAX_SKB_FRAGS + 1) {
    517		dev_kfree_skb_any(skb);
    518		if (net_ratelimit())
    519			netdev_err(ndev, "NOT enough BD for SG!\n");
    520		return NETDEV_TX_OK;
    521	}
    522
    523	/* Protocol checksum off-load for TCP and UDP. */
    524	if (fec_enet_clear_csum(skb, ndev)) {
    525		dev_kfree_skb_any(skb);
    526		return NETDEV_TX_OK;
    527	}
    528
    529	/* Fill in a Tx ring entry */
    530	bdp = txq->bd.cur;
    531	last_bdp = bdp;
    532	status = fec16_to_cpu(bdp->cbd_sc);
    533	status &= ~BD_ENET_TX_STATS;
    534
    535	/* Set buffer length and buffer pointer */
    536	bufaddr = skb->data;
    537	buflen = skb_headlen(skb);
    538
    539	index = fec_enet_get_bd_index(bdp, &txq->bd);
    540	if (((unsigned long) bufaddr) & fep->tx_align ||
    541		fep->quirks & FEC_QUIRK_SWAP_FRAME) {
    542		memcpy(txq->tx_bounce[index], skb->data, buflen);
    543		bufaddr = txq->tx_bounce[index];
    544
    545		if (fep->quirks & FEC_QUIRK_SWAP_FRAME)
    546			swap_buffer(bufaddr, buflen);
    547	}
    548
    549	/* Push the data cache so the CPM does not get stale memory data. */
    550	addr = dma_map_single(&fep->pdev->dev, bufaddr, buflen, DMA_TO_DEVICE);
    551	if (dma_mapping_error(&fep->pdev->dev, addr)) {
    552		dev_kfree_skb_any(skb);
    553		if (net_ratelimit())
    554			netdev_err(ndev, "Tx DMA memory map failed\n");
    555		return NETDEV_TX_OK;
    556	}
    557
    558	if (nr_frags) {
    559		last_bdp = fec_enet_txq_submit_frag_skb(txq, skb, ndev);
    560		if (IS_ERR(last_bdp)) {
    561			dma_unmap_single(&fep->pdev->dev, addr,
    562					 buflen, DMA_TO_DEVICE);
    563			dev_kfree_skb_any(skb);
    564			return NETDEV_TX_OK;
    565		}
    566	} else {
    567		status |= (BD_ENET_TX_INTR | BD_ENET_TX_LAST);
    568		if (fep->bufdesc_ex) {
    569			estatus = BD_ENET_TX_INT;
    570			if (unlikely(skb_shinfo(skb)->tx_flags &
    571				SKBTX_HW_TSTAMP && fep->hwts_tx_en))
    572				estatus |= BD_ENET_TX_TS;
    573		}
    574	}
    575	bdp->cbd_bufaddr = cpu_to_fec32(addr);
    576	bdp->cbd_datlen = cpu_to_fec16(buflen);
    577
    578	if (fep->bufdesc_ex) {
    579
    580		struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
    581
    582		if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
    583			fep->hwts_tx_en))
    584			skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
    585
    586		if (fep->quirks & FEC_QUIRK_HAS_AVB)
    587			estatus |= FEC_TX_BD_FTYPE(txq->bd.qid);
    588
    589		if (skb->ip_summed == CHECKSUM_PARTIAL)
    590			estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS;
    591
    592		ebdp->cbd_bdu = 0;
    593		ebdp->cbd_esc = cpu_to_fec32(estatus);
    594	}
    595
    596	index = fec_enet_get_bd_index(last_bdp, &txq->bd);
    597	/* Save skb pointer */
    598	txq->tx_skbuff[index] = skb;
    599
    600	/* Make sure the updates to rest of the descriptor are performed before
    601	 * transferring ownership.
    602	 */
    603	wmb();
    604
    605	/* Send it on its way.  Tell FEC it's ready, interrupt when done,
    606	 * it's the last BD of the frame, and to put the CRC on the end.
    607	 */
    608	status |= (BD_ENET_TX_READY | BD_ENET_TX_TC);
    609	bdp->cbd_sc = cpu_to_fec16(status);
    610
    611	/* If this was the last BD in the ring, start at the beginning again. */
    612	bdp = fec_enet_get_nextdesc(last_bdp, &txq->bd);
    613
    614	skb_tx_timestamp(skb);
    615
    616	/* Make sure the update to bdp and tx_skbuff are performed before
    617	 * txq->bd.cur.
    618	 */
    619	wmb();
    620	txq->bd.cur = bdp;
    621
    622	/* Trigger transmission start */
    623	writel(0, txq->bd.reg_desc_active);
    624
    625	return 0;
    626}
    627
    628static int
    629fec_enet_txq_put_data_tso(struct fec_enet_priv_tx_q *txq, struct sk_buff *skb,
    630			  struct net_device *ndev,
    631			  struct bufdesc *bdp, int index, char *data,
    632			  int size, bool last_tcp, bool is_last)
    633{
    634	struct fec_enet_private *fep = netdev_priv(ndev);
    635	struct bufdesc_ex *ebdp = container_of(bdp, struct bufdesc_ex, desc);
    636	unsigned short status;
    637	unsigned int estatus = 0;
    638	dma_addr_t addr;
    639
    640	status = fec16_to_cpu(bdp->cbd_sc);
    641	status &= ~BD_ENET_TX_STATS;
    642
    643	status |= (BD_ENET_TX_TC | BD_ENET_TX_READY);
    644
    645	if (((unsigned long) data) & fep->tx_align ||
    646		fep->quirks & FEC_QUIRK_SWAP_FRAME) {
    647		memcpy(txq->tx_bounce[index], data, size);
    648		data = txq->tx_bounce[index];
    649
    650		if (fep->quirks & FEC_QUIRK_SWAP_FRAME)
    651			swap_buffer(data, size);
    652	}
    653
    654	addr = dma_map_single(&fep->pdev->dev, data, size, DMA_TO_DEVICE);
    655	if (dma_mapping_error(&fep->pdev->dev, addr)) {
    656		dev_kfree_skb_any(skb);
    657		if (net_ratelimit())
    658			netdev_err(ndev, "Tx DMA memory map failed\n");
    659		return NETDEV_TX_BUSY;
    660	}
    661
    662	bdp->cbd_datlen = cpu_to_fec16(size);
    663	bdp->cbd_bufaddr = cpu_to_fec32(addr);
    664
    665	if (fep->bufdesc_ex) {
    666		if (fep->quirks & FEC_QUIRK_HAS_AVB)
    667			estatus |= FEC_TX_BD_FTYPE(txq->bd.qid);
    668		if (skb->ip_summed == CHECKSUM_PARTIAL)
    669			estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS;
    670		ebdp->cbd_bdu = 0;
    671		ebdp->cbd_esc = cpu_to_fec32(estatus);
    672	}
    673
    674	/* Handle the last BD specially */
    675	if (last_tcp)
    676		status |= (BD_ENET_TX_LAST | BD_ENET_TX_TC);
    677	if (is_last) {
    678		status |= BD_ENET_TX_INTR;
    679		if (fep->bufdesc_ex)
    680			ebdp->cbd_esc |= cpu_to_fec32(BD_ENET_TX_INT);
    681	}
    682
    683	bdp->cbd_sc = cpu_to_fec16(status);
    684
    685	return 0;
    686}
    687
    688static int
    689fec_enet_txq_put_hdr_tso(struct fec_enet_priv_tx_q *txq,
    690			 struct sk_buff *skb, struct net_device *ndev,
    691			 struct bufdesc *bdp, int index)
    692{
    693	struct fec_enet_private *fep = netdev_priv(ndev);
    694	int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
    695	struct bufdesc_ex *ebdp = container_of(bdp, struct bufdesc_ex, desc);
    696	void *bufaddr;
    697	unsigned long dmabuf;
    698	unsigned short status;
    699	unsigned int estatus = 0;
    700
    701	status = fec16_to_cpu(bdp->cbd_sc);
    702	status &= ~BD_ENET_TX_STATS;
    703	status |= (BD_ENET_TX_TC | BD_ENET_TX_READY);
    704
    705	bufaddr = txq->tso_hdrs + index * TSO_HEADER_SIZE;
    706	dmabuf = txq->tso_hdrs_dma + index * TSO_HEADER_SIZE;
    707	if (((unsigned long)bufaddr) & fep->tx_align ||
    708		fep->quirks & FEC_QUIRK_SWAP_FRAME) {
    709		memcpy(txq->tx_bounce[index], skb->data, hdr_len);
    710		bufaddr = txq->tx_bounce[index];
    711
    712		if (fep->quirks & FEC_QUIRK_SWAP_FRAME)
    713			swap_buffer(bufaddr, hdr_len);
    714
    715		dmabuf = dma_map_single(&fep->pdev->dev, bufaddr,
    716					hdr_len, DMA_TO_DEVICE);
    717		if (dma_mapping_error(&fep->pdev->dev, dmabuf)) {
    718			dev_kfree_skb_any(skb);
    719			if (net_ratelimit())
    720				netdev_err(ndev, "Tx DMA memory map failed\n");
    721			return NETDEV_TX_BUSY;
    722		}
    723	}
    724
    725	bdp->cbd_bufaddr = cpu_to_fec32(dmabuf);
    726	bdp->cbd_datlen = cpu_to_fec16(hdr_len);
    727
    728	if (fep->bufdesc_ex) {
    729		if (fep->quirks & FEC_QUIRK_HAS_AVB)
    730			estatus |= FEC_TX_BD_FTYPE(txq->bd.qid);
    731		if (skb->ip_summed == CHECKSUM_PARTIAL)
    732			estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS;
    733		ebdp->cbd_bdu = 0;
    734		ebdp->cbd_esc = cpu_to_fec32(estatus);
    735	}
    736
    737	bdp->cbd_sc = cpu_to_fec16(status);
    738
    739	return 0;
    740}
    741
    742static int fec_enet_txq_submit_tso(struct fec_enet_priv_tx_q *txq,
    743				   struct sk_buff *skb,
    744				   struct net_device *ndev)
    745{
    746	struct fec_enet_private *fep = netdev_priv(ndev);
    747	int hdr_len, total_len, data_left;
    748	struct bufdesc *bdp = txq->bd.cur;
    749	struct tso_t tso;
    750	unsigned int index = 0;
    751	int ret;
    752
    753	if (tso_count_descs(skb) >= fec_enet_get_free_txdesc_num(txq)) {
    754		dev_kfree_skb_any(skb);
    755		if (net_ratelimit())
    756			netdev_err(ndev, "NOT enough BD for TSO!\n");
    757		return NETDEV_TX_OK;
    758	}
    759
    760	/* Protocol checksum off-load for TCP and UDP. */
    761	if (fec_enet_clear_csum(skb, ndev)) {
    762		dev_kfree_skb_any(skb);
    763		return NETDEV_TX_OK;
    764	}
    765
    766	/* Initialize the TSO handler, and prepare the first payload */
    767	hdr_len = tso_start(skb, &tso);
    768
    769	total_len = skb->len - hdr_len;
    770	while (total_len > 0) {
    771		char *hdr;
    772
    773		index = fec_enet_get_bd_index(bdp, &txq->bd);
    774		data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len);
    775		total_len -= data_left;
    776
    777		/* prepare packet headers: MAC + IP + TCP */
    778		hdr = txq->tso_hdrs + index * TSO_HEADER_SIZE;
    779		tso_build_hdr(skb, hdr, &tso, data_left, total_len == 0);
    780		ret = fec_enet_txq_put_hdr_tso(txq, skb, ndev, bdp, index);
    781		if (ret)
    782			goto err_release;
    783
    784		while (data_left > 0) {
    785			int size;
    786
    787			size = min_t(int, tso.size, data_left);
    788			bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
    789			index = fec_enet_get_bd_index(bdp, &txq->bd);
    790			ret = fec_enet_txq_put_data_tso(txq, skb, ndev,
    791							bdp, index,
    792							tso.data, size,
    793							size == data_left,
    794							total_len == 0);
    795			if (ret)
    796				goto err_release;
    797
    798			data_left -= size;
    799			tso_build_data(skb, &tso, size);
    800		}
    801
    802		bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
    803	}
    804
    805	/* Save skb pointer */
    806	txq->tx_skbuff[index] = skb;
    807
    808	skb_tx_timestamp(skb);
    809	txq->bd.cur = bdp;
    810
    811	/* Trigger transmission start */
    812	if (!(fep->quirks & FEC_QUIRK_ERR007885) ||
    813	    !readl(txq->bd.reg_desc_active) ||
    814	    !readl(txq->bd.reg_desc_active) ||
    815	    !readl(txq->bd.reg_desc_active) ||
    816	    !readl(txq->bd.reg_desc_active))
    817		writel(0, txq->bd.reg_desc_active);
    818
    819	return 0;
    820
    821err_release:
    822	/* TODO: Release all used data descriptors for TSO */
    823	return ret;
    824}
    825
    826static netdev_tx_t
    827fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
    828{
    829	struct fec_enet_private *fep = netdev_priv(ndev);
    830	int entries_free;
    831	unsigned short queue;
    832	struct fec_enet_priv_tx_q *txq;
    833	struct netdev_queue *nq;
    834	int ret;
    835
    836	queue = skb_get_queue_mapping(skb);
    837	txq = fep->tx_queue[queue];
    838	nq = netdev_get_tx_queue(ndev, queue);
    839
    840	if (skb_is_gso(skb))
    841		ret = fec_enet_txq_submit_tso(txq, skb, ndev);
    842	else
    843		ret = fec_enet_txq_submit_skb(txq, skb, ndev);
    844	if (ret)
    845		return ret;
    846
    847	entries_free = fec_enet_get_free_txdesc_num(txq);
    848	if (entries_free <= txq->tx_stop_threshold)
    849		netif_tx_stop_queue(nq);
    850
    851	return NETDEV_TX_OK;
    852}
    853
    854/* Init RX & TX buffer descriptors
    855 */
    856static void fec_enet_bd_init(struct net_device *dev)
    857{
    858	struct fec_enet_private *fep = netdev_priv(dev);
    859	struct fec_enet_priv_tx_q *txq;
    860	struct fec_enet_priv_rx_q *rxq;
    861	struct bufdesc *bdp;
    862	unsigned int i;
    863	unsigned int q;
    864
    865	for (q = 0; q < fep->num_rx_queues; q++) {
    866		/* Initialize the receive buffer descriptors. */
    867		rxq = fep->rx_queue[q];
    868		bdp = rxq->bd.base;
    869
    870		for (i = 0; i < rxq->bd.ring_size; i++) {
    871
    872			/* Initialize the BD for every fragment in the page. */
    873			if (bdp->cbd_bufaddr)
    874				bdp->cbd_sc = cpu_to_fec16(BD_ENET_RX_EMPTY);
    875			else
    876				bdp->cbd_sc = cpu_to_fec16(0);
    877			bdp = fec_enet_get_nextdesc(bdp, &rxq->bd);
    878		}
    879
    880		/* Set the last buffer to wrap */
    881		bdp = fec_enet_get_prevdesc(bdp, &rxq->bd);
    882		bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP);
    883
    884		rxq->bd.cur = rxq->bd.base;
    885	}
    886
    887	for (q = 0; q < fep->num_tx_queues; q++) {
    888		/* ...and the same for transmit */
    889		txq = fep->tx_queue[q];
    890		bdp = txq->bd.base;
    891		txq->bd.cur = bdp;
    892
    893		for (i = 0; i < txq->bd.ring_size; i++) {
    894			/* Initialize the BD for every fragment in the page. */
    895			bdp->cbd_sc = cpu_to_fec16(0);
    896			if (bdp->cbd_bufaddr &&
    897			    !IS_TSO_HEADER(txq, fec32_to_cpu(bdp->cbd_bufaddr)))
    898				dma_unmap_single(&fep->pdev->dev,
    899						 fec32_to_cpu(bdp->cbd_bufaddr),
    900						 fec16_to_cpu(bdp->cbd_datlen),
    901						 DMA_TO_DEVICE);
    902			if (txq->tx_skbuff[i]) {
    903				dev_kfree_skb_any(txq->tx_skbuff[i]);
    904				txq->tx_skbuff[i] = NULL;
    905			}
    906			bdp->cbd_bufaddr = cpu_to_fec32(0);
    907			bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
    908		}
    909
    910		/* Set the last buffer to wrap */
    911		bdp = fec_enet_get_prevdesc(bdp, &txq->bd);
    912		bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP);
    913		txq->dirty_tx = bdp;
    914	}
    915}
    916
    917static void fec_enet_active_rxring(struct net_device *ndev)
    918{
    919	struct fec_enet_private *fep = netdev_priv(ndev);
    920	int i;
    921
    922	for (i = 0; i < fep->num_rx_queues; i++)
    923		writel(0, fep->rx_queue[i]->bd.reg_desc_active);
    924}
    925
    926static void fec_enet_enable_ring(struct net_device *ndev)
    927{
    928	struct fec_enet_private *fep = netdev_priv(ndev);
    929	struct fec_enet_priv_tx_q *txq;
    930	struct fec_enet_priv_rx_q *rxq;
    931	int i;
    932
    933	for (i = 0; i < fep->num_rx_queues; i++) {
    934		rxq = fep->rx_queue[i];
    935		writel(rxq->bd.dma, fep->hwp + FEC_R_DES_START(i));
    936		writel(PKT_MAXBUF_SIZE, fep->hwp + FEC_R_BUFF_SIZE(i));
    937
    938		/* enable DMA1/2 */
    939		if (i)
    940			writel(RCMR_MATCHEN | RCMR_CMP(i),
    941			       fep->hwp + FEC_RCMR(i));
    942	}
    943
    944	for (i = 0; i < fep->num_tx_queues; i++) {
    945		txq = fep->tx_queue[i];
    946		writel(txq->bd.dma, fep->hwp + FEC_X_DES_START(i));
    947
    948		/* enable DMA1/2 */
    949		if (i)
    950			writel(DMA_CLASS_EN | IDLE_SLOPE(i),
    951			       fep->hwp + FEC_DMA_CFG(i));
    952	}
    953}
    954
    955static void fec_enet_reset_skb(struct net_device *ndev)
    956{
    957	struct fec_enet_private *fep = netdev_priv(ndev);
    958	struct fec_enet_priv_tx_q *txq;
    959	int i, j;
    960
    961	for (i = 0; i < fep->num_tx_queues; i++) {
    962		txq = fep->tx_queue[i];
    963
    964		for (j = 0; j < txq->bd.ring_size; j++) {
    965			if (txq->tx_skbuff[j]) {
    966				dev_kfree_skb_any(txq->tx_skbuff[j]);
    967				txq->tx_skbuff[j] = NULL;
    968			}
    969		}
    970	}
    971}
    972
    973/*
    974 * This function is called to start or restart the FEC during a link
    975 * change, transmit timeout, or to reconfigure the FEC.  The network
    976 * packet processing for this device must be stopped before this call.
    977 */
    978static void
    979fec_restart(struct net_device *ndev)
    980{
    981	struct fec_enet_private *fep = netdev_priv(ndev);
    982	u32 temp_mac[2];
    983	u32 rcntl = OPT_FRAME_SIZE | 0x04;
    984	u32 ecntl = 0x2; /* ETHEREN */
    985
    986	/* Whack a reset.  We should wait for this.
    987	 * For i.MX6SX SOC, enet use AXI bus, we use disable MAC
    988	 * instead of reset MAC itself.
    989	 */
    990	if (fep->quirks & FEC_QUIRK_HAS_MULTI_QUEUES ||
    991	    ((fep->quirks & FEC_QUIRK_NO_HARD_RESET) && fep->link)) {
    992		writel(0, fep->hwp + FEC_ECNTRL);
    993	} else {
    994		writel(1, fep->hwp + FEC_ECNTRL);
    995		udelay(10);
    996	}
    997
    998	/*
    999	 * enet-mac reset will reset mac address registers too,
   1000	 * so need to reconfigure it.
   1001	 */
   1002	memcpy(&temp_mac, ndev->dev_addr, ETH_ALEN);
   1003	writel((__force u32)cpu_to_be32(temp_mac[0]),
   1004	       fep->hwp + FEC_ADDR_LOW);
   1005	writel((__force u32)cpu_to_be32(temp_mac[1]),
   1006	       fep->hwp + FEC_ADDR_HIGH);
   1007
   1008	/* Clear any outstanding interrupt, except MDIO. */
   1009	writel((0xffffffff & ~FEC_ENET_MII), fep->hwp + FEC_IEVENT);
   1010
   1011	fec_enet_bd_init(ndev);
   1012
   1013	fec_enet_enable_ring(ndev);
   1014
   1015	/* Reset tx SKB buffers. */
   1016	fec_enet_reset_skb(ndev);
   1017
   1018	/* Enable MII mode */
   1019	if (fep->full_duplex == DUPLEX_FULL) {
   1020		/* FD enable */
   1021		writel(0x04, fep->hwp + FEC_X_CNTRL);
   1022	} else {
   1023		/* No Rcv on Xmit */
   1024		rcntl |= 0x02;
   1025		writel(0x0, fep->hwp + FEC_X_CNTRL);
   1026	}
   1027
   1028	/* Set MII speed */
   1029	writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
   1030
   1031#if !defined(CONFIG_M5272)
   1032	if (fep->quirks & FEC_QUIRK_HAS_RACC) {
   1033		u32 val = readl(fep->hwp + FEC_RACC);
   1034
   1035		/* align IP header */
   1036		val |= FEC_RACC_SHIFT16;
   1037		if (fep->csum_flags & FLAG_RX_CSUM_ENABLED)
   1038			/* set RX checksum */
   1039			val |= FEC_RACC_OPTIONS;
   1040		else
   1041			val &= ~FEC_RACC_OPTIONS;
   1042		writel(val, fep->hwp + FEC_RACC);
   1043		writel(PKT_MAXBUF_SIZE, fep->hwp + FEC_FTRL);
   1044	}
   1045#endif
   1046
   1047	/*
   1048	 * The phy interface and speed need to get configured
   1049	 * differently on enet-mac.
   1050	 */
   1051	if (fep->quirks & FEC_QUIRK_ENET_MAC) {
   1052		/* Enable flow control and length check */
   1053		rcntl |= 0x40000000 | 0x00000020;
   1054
   1055		/* RGMII, RMII or MII */
   1056		if (fep->phy_interface == PHY_INTERFACE_MODE_RGMII ||
   1057		    fep->phy_interface == PHY_INTERFACE_MODE_RGMII_ID ||
   1058		    fep->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID ||
   1059		    fep->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID)
   1060			rcntl |= (1 << 6);
   1061		else if (fep->phy_interface == PHY_INTERFACE_MODE_RMII)
   1062			rcntl |= (1 << 8);
   1063		else
   1064			rcntl &= ~(1 << 8);
   1065
   1066		/* 1G, 100M or 10M */
   1067		if (ndev->phydev) {
   1068			if (ndev->phydev->speed == SPEED_1000)
   1069				ecntl |= (1 << 5);
   1070			else if (ndev->phydev->speed == SPEED_100)
   1071				rcntl &= ~(1 << 9);
   1072			else
   1073				rcntl |= (1 << 9);
   1074		}
   1075	} else {
   1076#ifdef FEC_MIIGSK_ENR
   1077		if (fep->quirks & FEC_QUIRK_USE_GASKET) {
   1078			u32 cfgr;
   1079			/* disable the gasket and wait */
   1080			writel(0, fep->hwp + FEC_MIIGSK_ENR);
   1081			while (readl(fep->hwp + FEC_MIIGSK_ENR) & 4)
   1082				udelay(1);
   1083
   1084			/*
   1085			 * configure the gasket:
   1086			 *   RMII, 50 MHz, no loopback, no echo
   1087			 *   MII, 25 MHz, no loopback, no echo
   1088			 */
   1089			cfgr = (fep->phy_interface == PHY_INTERFACE_MODE_RMII)
   1090				? BM_MIIGSK_CFGR_RMII : BM_MIIGSK_CFGR_MII;
   1091			if (ndev->phydev && ndev->phydev->speed == SPEED_10)
   1092				cfgr |= BM_MIIGSK_CFGR_FRCONT_10M;
   1093			writel(cfgr, fep->hwp + FEC_MIIGSK_CFGR);
   1094
   1095			/* re-enable the gasket */
   1096			writel(2, fep->hwp + FEC_MIIGSK_ENR);
   1097		}
   1098#endif
   1099	}
   1100
   1101#if !defined(CONFIG_M5272)
   1102	/* enable pause frame*/
   1103	if ((fep->pause_flag & FEC_PAUSE_FLAG_ENABLE) ||
   1104	    ((fep->pause_flag & FEC_PAUSE_FLAG_AUTONEG) &&
   1105	     ndev->phydev && ndev->phydev->pause)) {
   1106		rcntl |= FEC_ENET_FCE;
   1107
   1108		/* set FIFO threshold parameter to reduce overrun */
   1109		writel(FEC_ENET_RSEM_V, fep->hwp + FEC_R_FIFO_RSEM);
   1110		writel(FEC_ENET_RSFL_V, fep->hwp + FEC_R_FIFO_RSFL);
   1111		writel(FEC_ENET_RAEM_V, fep->hwp + FEC_R_FIFO_RAEM);
   1112		writel(FEC_ENET_RAFL_V, fep->hwp + FEC_R_FIFO_RAFL);
   1113
   1114		/* OPD */
   1115		writel(FEC_ENET_OPD_V, fep->hwp + FEC_OPD);
   1116	} else {
   1117		rcntl &= ~FEC_ENET_FCE;
   1118	}
   1119#endif /* !defined(CONFIG_M5272) */
   1120
   1121	writel(rcntl, fep->hwp + FEC_R_CNTRL);
   1122
   1123	/* Setup multicast filter. */
   1124	set_multicast_list(ndev);
   1125#ifndef CONFIG_M5272
   1126	writel(0, fep->hwp + FEC_HASH_TABLE_HIGH);
   1127	writel(0, fep->hwp + FEC_HASH_TABLE_LOW);
   1128#endif
   1129
   1130	if (fep->quirks & FEC_QUIRK_ENET_MAC) {
   1131		/* enable ENET endian swap */
   1132		ecntl |= (1 << 8);
   1133		/* enable ENET store and forward mode */
   1134		writel(1 << 8, fep->hwp + FEC_X_WMRK);
   1135	}
   1136
   1137	if (fep->bufdesc_ex)
   1138		ecntl |= (1 << 4);
   1139
   1140	if (fep->quirks & FEC_QUIRK_DELAYED_CLKS_SUPPORT &&
   1141	    fep->rgmii_txc_dly)
   1142		ecntl |= FEC_ENET_TXC_DLY;
   1143	if (fep->quirks & FEC_QUIRK_DELAYED_CLKS_SUPPORT &&
   1144	    fep->rgmii_rxc_dly)
   1145		ecntl |= FEC_ENET_RXC_DLY;
   1146
   1147#ifndef CONFIG_M5272
   1148	/* Enable the MIB statistic event counters */
   1149	writel(0 << 31, fep->hwp + FEC_MIB_CTRLSTAT);
   1150#endif
   1151
   1152	/* And last, enable the transmit and receive processing */
   1153	writel(ecntl, fep->hwp + FEC_ECNTRL);
   1154	fec_enet_active_rxring(ndev);
   1155
   1156	if (fep->bufdesc_ex)
   1157		fec_ptp_start_cyclecounter(ndev);
   1158
   1159	/* Enable interrupts we wish to service */
   1160	if (fep->link)
   1161		writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
   1162	else
   1163		writel(0, fep->hwp + FEC_IMASK);
   1164
   1165	/* Init the interrupt coalescing */
   1166	fec_enet_itr_coal_init(ndev);
   1167
   1168}
   1169
   1170static void fec_enet_stop_mode(struct fec_enet_private *fep, bool enabled)
   1171{
   1172	struct fec_platform_data *pdata = fep->pdev->dev.platform_data;
   1173	struct fec_stop_mode_gpr *stop_gpr = &fep->stop_gpr;
   1174
   1175	if (stop_gpr->gpr) {
   1176		if (enabled)
   1177			regmap_update_bits(stop_gpr->gpr, stop_gpr->reg,
   1178					   BIT(stop_gpr->bit),
   1179					   BIT(stop_gpr->bit));
   1180		else
   1181			regmap_update_bits(stop_gpr->gpr, stop_gpr->reg,
   1182					   BIT(stop_gpr->bit), 0);
   1183	} else if (pdata && pdata->sleep_mode_enable) {
   1184		pdata->sleep_mode_enable(enabled);
   1185	}
   1186}
   1187
   1188static void fec_irqs_disable(struct net_device *ndev)
   1189{
   1190	struct fec_enet_private *fep = netdev_priv(ndev);
   1191
   1192	writel(0, fep->hwp + FEC_IMASK);
   1193}
   1194
   1195static void fec_irqs_disable_except_wakeup(struct net_device *ndev)
   1196{
   1197	struct fec_enet_private *fep = netdev_priv(ndev);
   1198
   1199	writel(0, fep->hwp + FEC_IMASK);
   1200	writel(FEC_ENET_WAKEUP, fep->hwp + FEC_IMASK);
   1201}
   1202
   1203static void
   1204fec_stop(struct net_device *ndev)
   1205{
   1206	struct fec_enet_private *fep = netdev_priv(ndev);
   1207	u32 rmii_mode = readl(fep->hwp + FEC_R_CNTRL) & (1 << 8);
   1208	u32 val;
   1209
   1210	/* We cannot expect a graceful transmit stop without link !!! */
   1211	if (fep->link) {
   1212		writel(1, fep->hwp + FEC_X_CNTRL); /* Graceful transmit stop */
   1213		udelay(10);
   1214		if (!(readl(fep->hwp + FEC_IEVENT) & FEC_ENET_GRA))
   1215			netdev_err(ndev, "Graceful transmit stop did not complete!\n");
   1216	}
   1217
   1218	/* Whack a reset.  We should wait for this.
   1219	 * For i.MX6SX SOC, enet use AXI bus, we use disable MAC
   1220	 * instead of reset MAC itself.
   1221	 */
   1222	if (!(fep->wol_flag & FEC_WOL_FLAG_SLEEP_ON)) {
   1223		if (fep->quirks & FEC_QUIRK_HAS_MULTI_QUEUES) {
   1224			writel(0, fep->hwp + FEC_ECNTRL);
   1225		} else {
   1226			writel(1, fep->hwp + FEC_ECNTRL);
   1227			udelay(10);
   1228		}
   1229	} else {
   1230		val = readl(fep->hwp + FEC_ECNTRL);
   1231		val |= (FEC_ECR_MAGICEN | FEC_ECR_SLEEP);
   1232		writel(val, fep->hwp + FEC_ECNTRL);
   1233	}
   1234	writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
   1235	writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
   1236
   1237	/* We have to keep ENET enabled to have MII interrupt stay working */
   1238	if (fep->quirks & FEC_QUIRK_ENET_MAC &&
   1239		!(fep->wol_flag & FEC_WOL_FLAG_SLEEP_ON)) {
   1240		writel(2, fep->hwp + FEC_ECNTRL);
   1241		writel(rmii_mode, fep->hwp + FEC_R_CNTRL);
   1242	}
   1243}
   1244
   1245
   1246static void
   1247fec_timeout(struct net_device *ndev, unsigned int txqueue)
   1248{
   1249	struct fec_enet_private *fep = netdev_priv(ndev);
   1250
   1251	fec_dump(ndev);
   1252
   1253	ndev->stats.tx_errors++;
   1254
   1255	schedule_work(&fep->tx_timeout_work);
   1256}
   1257
   1258static void fec_enet_timeout_work(struct work_struct *work)
   1259{
   1260	struct fec_enet_private *fep =
   1261		container_of(work, struct fec_enet_private, tx_timeout_work);
   1262	struct net_device *ndev = fep->netdev;
   1263
   1264	rtnl_lock();
   1265	if (netif_device_present(ndev) || netif_running(ndev)) {
   1266		napi_disable(&fep->napi);
   1267		netif_tx_lock_bh(ndev);
   1268		fec_restart(ndev);
   1269		netif_tx_wake_all_queues(ndev);
   1270		netif_tx_unlock_bh(ndev);
   1271		napi_enable(&fep->napi);
   1272	}
   1273	rtnl_unlock();
   1274}
   1275
   1276static void
   1277fec_enet_hwtstamp(struct fec_enet_private *fep, unsigned ts,
   1278	struct skb_shared_hwtstamps *hwtstamps)
   1279{
   1280	unsigned long flags;
   1281	u64 ns;
   1282
   1283	spin_lock_irqsave(&fep->tmreg_lock, flags);
   1284	ns = timecounter_cyc2time(&fep->tc, ts);
   1285	spin_unlock_irqrestore(&fep->tmreg_lock, flags);
   1286
   1287	memset(hwtstamps, 0, sizeof(*hwtstamps));
   1288	hwtstamps->hwtstamp = ns_to_ktime(ns);
   1289}
   1290
   1291static void
   1292fec_enet_tx_queue(struct net_device *ndev, u16 queue_id)
   1293{
   1294	struct	fec_enet_private *fep;
   1295	struct bufdesc *bdp;
   1296	unsigned short status;
   1297	struct	sk_buff	*skb;
   1298	struct fec_enet_priv_tx_q *txq;
   1299	struct netdev_queue *nq;
   1300	int	index = 0;
   1301	int	entries_free;
   1302
   1303	fep = netdev_priv(ndev);
   1304
   1305	txq = fep->tx_queue[queue_id];
   1306	/* get next bdp of dirty_tx */
   1307	nq = netdev_get_tx_queue(ndev, queue_id);
   1308	bdp = txq->dirty_tx;
   1309
   1310	/* get next bdp of dirty_tx */
   1311	bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
   1312
   1313	while (bdp != READ_ONCE(txq->bd.cur)) {
   1314		/* Order the load of bd.cur and cbd_sc */
   1315		rmb();
   1316		status = fec16_to_cpu(READ_ONCE(bdp->cbd_sc));
   1317		if (status & BD_ENET_TX_READY)
   1318			break;
   1319
   1320		index = fec_enet_get_bd_index(bdp, &txq->bd);
   1321
   1322		skb = txq->tx_skbuff[index];
   1323		txq->tx_skbuff[index] = NULL;
   1324		if (!IS_TSO_HEADER(txq, fec32_to_cpu(bdp->cbd_bufaddr)))
   1325			dma_unmap_single(&fep->pdev->dev,
   1326					 fec32_to_cpu(bdp->cbd_bufaddr),
   1327					 fec16_to_cpu(bdp->cbd_datlen),
   1328					 DMA_TO_DEVICE);
   1329		bdp->cbd_bufaddr = cpu_to_fec32(0);
   1330		if (!skb)
   1331			goto skb_done;
   1332
   1333		/* Check for errors. */
   1334		if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC |
   1335				   BD_ENET_TX_RL | BD_ENET_TX_UN |
   1336				   BD_ENET_TX_CSL)) {
   1337			ndev->stats.tx_errors++;
   1338			if (status & BD_ENET_TX_HB)  /* No heartbeat */
   1339				ndev->stats.tx_heartbeat_errors++;
   1340			if (status & BD_ENET_TX_LC)  /* Late collision */
   1341				ndev->stats.tx_window_errors++;
   1342			if (status & BD_ENET_TX_RL)  /* Retrans limit */
   1343				ndev->stats.tx_aborted_errors++;
   1344			if (status & BD_ENET_TX_UN)  /* Underrun */
   1345				ndev->stats.tx_fifo_errors++;
   1346			if (status & BD_ENET_TX_CSL) /* Carrier lost */
   1347				ndev->stats.tx_carrier_errors++;
   1348		} else {
   1349			ndev->stats.tx_packets++;
   1350			ndev->stats.tx_bytes += skb->len;
   1351		}
   1352
   1353		/* NOTE: SKBTX_IN_PROGRESS being set does not imply it's we who
   1354		 * are to time stamp the packet, so we still need to check time
   1355		 * stamping enabled flag.
   1356		 */
   1357		if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS &&
   1358			     fep->hwts_tx_en) &&
   1359		    fep->bufdesc_ex) {
   1360			struct skb_shared_hwtstamps shhwtstamps;
   1361			struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
   1362
   1363			fec_enet_hwtstamp(fep, fec32_to_cpu(ebdp->ts), &shhwtstamps);
   1364			skb_tstamp_tx(skb, &shhwtstamps);
   1365		}
   1366
   1367		/* Deferred means some collisions occurred during transmit,
   1368		 * but we eventually sent the packet OK.
   1369		 */
   1370		if (status & BD_ENET_TX_DEF)
   1371			ndev->stats.collisions++;
   1372
   1373		/* Free the sk buffer associated with this last transmit */
   1374		dev_kfree_skb_any(skb);
   1375skb_done:
   1376		/* Make sure the update to bdp and tx_skbuff are performed
   1377		 * before dirty_tx
   1378		 */
   1379		wmb();
   1380		txq->dirty_tx = bdp;
   1381
   1382		/* Update pointer to next buffer descriptor to be transmitted */
   1383		bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
   1384
   1385		/* Since we have freed up a buffer, the ring is no longer full
   1386		 */
   1387		if (netif_tx_queue_stopped(nq)) {
   1388			entries_free = fec_enet_get_free_txdesc_num(txq);
   1389			if (entries_free >= txq->tx_wake_threshold)
   1390				netif_tx_wake_queue(nq);
   1391		}
   1392	}
   1393
   1394	/* ERR006358: Keep the transmitter going */
   1395	if (bdp != txq->bd.cur &&
   1396	    readl(txq->bd.reg_desc_active) == 0)
   1397		writel(0, txq->bd.reg_desc_active);
   1398}
   1399
   1400static void fec_enet_tx(struct net_device *ndev)
   1401{
   1402	struct fec_enet_private *fep = netdev_priv(ndev);
   1403	int i;
   1404
   1405	/* Make sure that AVB queues are processed first. */
   1406	for (i = fep->num_tx_queues - 1; i >= 0; i--)
   1407		fec_enet_tx_queue(ndev, i);
   1408}
   1409
   1410static int
   1411fec_enet_new_rxbdp(struct net_device *ndev, struct bufdesc *bdp, struct sk_buff *skb)
   1412{
   1413	struct  fec_enet_private *fep = netdev_priv(ndev);
   1414	int off;
   1415
   1416	off = ((unsigned long)skb->data) & fep->rx_align;
   1417	if (off)
   1418		skb_reserve(skb, fep->rx_align + 1 - off);
   1419
   1420	bdp->cbd_bufaddr = cpu_to_fec32(dma_map_single(&fep->pdev->dev, skb->data, FEC_ENET_RX_FRSIZE - fep->rx_align, DMA_FROM_DEVICE));
   1421	if (dma_mapping_error(&fep->pdev->dev, fec32_to_cpu(bdp->cbd_bufaddr))) {
   1422		if (net_ratelimit())
   1423			netdev_err(ndev, "Rx DMA memory map failed\n");
   1424		return -ENOMEM;
   1425	}
   1426
   1427	return 0;
   1428}
   1429
   1430static bool fec_enet_copybreak(struct net_device *ndev, struct sk_buff **skb,
   1431			       struct bufdesc *bdp, u32 length, bool swap)
   1432{
   1433	struct  fec_enet_private *fep = netdev_priv(ndev);
   1434	struct sk_buff *new_skb;
   1435
   1436	if (length > fep->rx_copybreak)
   1437		return false;
   1438
   1439	new_skb = netdev_alloc_skb(ndev, length);
   1440	if (!new_skb)
   1441		return false;
   1442
   1443	dma_sync_single_for_cpu(&fep->pdev->dev,
   1444				fec32_to_cpu(bdp->cbd_bufaddr),
   1445				FEC_ENET_RX_FRSIZE - fep->rx_align,
   1446				DMA_FROM_DEVICE);
   1447	if (!swap)
   1448		memcpy(new_skb->data, (*skb)->data, length);
   1449	else
   1450		swap_buffer2(new_skb->data, (*skb)->data, length);
   1451	*skb = new_skb;
   1452
   1453	return true;
   1454}
   1455
   1456/* During a receive, the bd_rx.cur points to the current incoming buffer.
   1457 * When we update through the ring, if the next incoming buffer has
   1458 * not been given to the system, we just set the empty indicator,
   1459 * effectively tossing the packet.
   1460 */
   1461static int
   1462fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
   1463{
   1464	struct fec_enet_private *fep = netdev_priv(ndev);
   1465	struct fec_enet_priv_rx_q *rxq;
   1466	struct bufdesc *bdp;
   1467	unsigned short status;
   1468	struct  sk_buff *skb_new = NULL;
   1469	struct  sk_buff *skb;
   1470	ushort	pkt_len;
   1471	__u8 *data;
   1472	int	pkt_received = 0;
   1473	struct	bufdesc_ex *ebdp = NULL;
   1474	bool	vlan_packet_rcvd = false;
   1475	u16	vlan_tag;
   1476	int	index = 0;
   1477	bool	is_copybreak;
   1478	bool	need_swap = fep->quirks & FEC_QUIRK_SWAP_FRAME;
   1479
   1480#ifdef CONFIG_M532x
   1481	flush_cache_all();
   1482#endif
   1483	rxq = fep->rx_queue[queue_id];
   1484
   1485	/* First, grab all of the stats for the incoming packet.
   1486	 * These get messed up if we get called due to a busy condition.
   1487	 */
   1488	bdp = rxq->bd.cur;
   1489
   1490	while (!((status = fec16_to_cpu(bdp->cbd_sc)) & BD_ENET_RX_EMPTY)) {
   1491
   1492		if (pkt_received >= budget)
   1493			break;
   1494		pkt_received++;
   1495
   1496		writel(FEC_ENET_RXF_GET(queue_id), fep->hwp + FEC_IEVENT);
   1497
   1498		/* Check for errors. */
   1499		status ^= BD_ENET_RX_LAST;
   1500		if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_NO |
   1501			   BD_ENET_RX_CR | BD_ENET_RX_OV | BD_ENET_RX_LAST |
   1502			   BD_ENET_RX_CL)) {
   1503			ndev->stats.rx_errors++;
   1504			if (status & BD_ENET_RX_OV) {
   1505				/* FIFO overrun */
   1506				ndev->stats.rx_fifo_errors++;
   1507				goto rx_processing_done;
   1508			}
   1509			if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH
   1510						| BD_ENET_RX_LAST)) {
   1511				/* Frame too long or too short. */
   1512				ndev->stats.rx_length_errors++;
   1513				if (status & BD_ENET_RX_LAST)
   1514					netdev_err(ndev, "rcv is not +last\n");
   1515			}
   1516			if (status & BD_ENET_RX_CR)	/* CRC Error */
   1517				ndev->stats.rx_crc_errors++;
   1518			/* Report late collisions as a frame error. */
   1519			if (status & (BD_ENET_RX_NO | BD_ENET_RX_CL))
   1520				ndev->stats.rx_frame_errors++;
   1521			goto rx_processing_done;
   1522		}
   1523
   1524		/* Process the incoming frame. */
   1525		ndev->stats.rx_packets++;
   1526		pkt_len = fec16_to_cpu(bdp->cbd_datlen);
   1527		ndev->stats.rx_bytes += pkt_len;
   1528
   1529		index = fec_enet_get_bd_index(bdp, &rxq->bd);
   1530		skb = rxq->rx_skbuff[index];
   1531
   1532		/* The packet length includes FCS, but we don't want to
   1533		 * include that when passing upstream as it messes up
   1534		 * bridging applications.
   1535		 */
   1536		is_copybreak = fec_enet_copybreak(ndev, &skb, bdp, pkt_len - 4,
   1537						  need_swap);
   1538		if (!is_copybreak) {
   1539			skb_new = netdev_alloc_skb(ndev, FEC_ENET_RX_FRSIZE);
   1540			if (unlikely(!skb_new)) {
   1541				ndev->stats.rx_dropped++;
   1542				goto rx_processing_done;
   1543			}
   1544			dma_unmap_single(&fep->pdev->dev,
   1545					 fec32_to_cpu(bdp->cbd_bufaddr),
   1546					 FEC_ENET_RX_FRSIZE - fep->rx_align,
   1547					 DMA_FROM_DEVICE);
   1548		}
   1549
   1550		prefetch(skb->data - NET_IP_ALIGN);
   1551		skb_put(skb, pkt_len - 4);
   1552		data = skb->data;
   1553
   1554		if (!is_copybreak && need_swap)
   1555			swap_buffer(data, pkt_len);
   1556
   1557#if !defined(CONFIG_M5272)
   1558		if (fep->quirks & FEC_QUIRK_HAS_RACC)
   1559			data = skb_pull_inline(skb, 2);
   1560#endif
   1561
   1562		/* Extract the enhanced buffer descriptor */
   1563		ebdp = NULL;
   1564		if (fep->bufdesc_ex)
   1565			ebdp = (struct bufdesc_ex *)bdp;
   1566
   1567		/* If this is a VLAN packet remove the VLAN Tag */
   1568		vlan_packet_rcvd = false;
   1569		if ((ndev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
   1570		    fep->bufdesc_ex &&
   1571		    (ebdp->cbd_esc & cpu_to_fec32(BD_ENET_RX_VLAN))) {
   1572			/* Push and remove the vlan tag */
   1573			struct vlan_hdr *vlan_header =
   1574					(struct vlan_hdr *) (data + ETH_HLEN);
   1575			vlan_tag = ntohs(vlan_header->h_vlan_TCI);
   1576
   1577			vlan_packet_rcvd = true;
   1578
   1579			memmove(skb->data + VLAN_HLEN, data, ETH_ALEN * 2);
   1580			skb_pull(skb, VLAN_HLEN);
   1581		}
   1582
   1583		skb->protocol = eth_type_trans(skb, ndev);
   1584
   1585		/* Get receive timestamp from the skb */
   1586		if (fep->hwts_rx_en && fep->bufdesc_ex)
   1587			fec_enet_hwtstamp(fep, fec32_to_cpu(ebdp->ts),
   1588					  skb_hwtstamps(skb));
   1589
   1590		if (fep->bufdesc_ex &&
   1591		    (fep->csum_flags & FLAG_RX_CSUM_ENABLED)) {
   1592			if (!(ebdp->cbd_esc & cpu_to_fec32(FLAG_RX_CSUM_ERROR))) {
   1593				/* don't check it */
   1594				skb->ip_summed = CHECKSUM_UNNECESSARY;
   1595			} else {
   1596				skb_checksum_none_assert(skb);
   1597			}
   1598		}
   1599
   1600		/* Handle received VLAN packets */
   1601		if (vlan_packet_rcvd)
   1602			__vlan_hwaccel_put_tag(skb,
   1603					       htons(ETH_P_8021Q),
   1604					       vlan_tag);
   1605
   1606		skb_record_rx_queue(skb, queue_id);
   1607		napi_gro_receive(&fep->napi, skb);
   1608
   1609		if (is_copybreak) {
   1610			dma_sync_single_for_device(&fep->pdev->dev,
   1611						   fec32_to_cpu(bdp->cbd_bufaddr),
   1612						   FEC_ENET_RX_FRSIZE - fep->rx_align,
   1613						   DMA_FROM_DEVICE);
   1614		} else {
   1615			rxq->rx_skbuff[index] = skb_new;
   1616			fec_enet_new_rxbdp(ndev, bdp, skb_new);
   1617		}
   1618
   1619rx_processing_done:
   1620		/* Clear the status flags for this buffer */
   1621		status &= ~BD_ENET_RX_STATS;
   1622
   1623		/* Mark the buffer empty */
   1624		status |= BD_ENET_RX_EMPTY;
   1625
   1626		if (fep->bufdesc_ex) {
   1627			struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
   1628
   1629			ebdp->cbd_esc = cpu_to_fec32(BD_ENET_RX_INT);
   1630			ebdp->cbd_prot = 0;
   1631			ebdp->cbd_bdu = 0;
   1632		}
   1633		/* Make sure the updates to rest of the descriptor are
   1634		 * performed before transferring ownership.
   1635		 */
   1636		wmb();
   1637		bdp->cbd_sc = cpu_to_fec16(status);
   1638
   1639		/* Update BD pointer to next entry */
   1640		bdp = fec_enet_get_nextdesc(bdp, &rxq->bd);
   1641
   1642		/* Doing this here will keep the FEC running while we process
   1643		 * incoming frames.  On a heavily loaded network, we should be
   1644		 * able to keep up at the expense of system resources.
   1645		 */
   1646		writel(0, rxq->bd.reg_desc_active);
   1647	}
   1648	rxq->bd.cur = bdp;
   1649	return pkt_received;
   1650}
   1651
   1652static int fec_enet_rx(struct net_device *ndev, int budget)
   1653{
   1654	struct fec_enet_private *fep = netdev_priv(ndev);
   1655	int i, done = 0;
   1656
   1657	/* Make sure that AVB queues are processed first. */
   1658	for (i = fep->num_rx_queues - 1; i >= 0; i--)
   1659		done += fec_enet_rx_queue(ndev, budget - done, i);
   1660
   1661	return done;
   1662}
   1663
   1664static bool fec_enet_collect_events(struct fec_enet_private *fep)
   1665{
   1666	uint int_events;
   1667
   1668	int_events = readl(fep->hwp + FEC_IEVENT);
   1669
   1670	/* Don't clear MDIO events, we poll for those */
   1671	int_events &= ~FEC_ENET_MII;
   1672
   1673	writel(int_events, fep->hwp + FEC_IEVENT);
   1674
   1675	return int_events != 0;
   1676}
   1677
   1678static irqreturn_t
   1679fec_enet_interrupt(int irq, void *dev_id)
   1680{
   1681	struct net_device *ndev = dev_id;
   1682	struct fec_enet_private *fep = netdev_priv(ndev);
   1683	irqreturn_t ret = IRQ_NONE;
   1684
   1685	if (fec_enet_collect_events(fep) && fep->link) {
   1686		ret = IRQ_HANDLED;
   1687
   1688		if (napi_schedule_prep(&fep->napi)) {
   1689			/* Disable interrupts */
   1690			writel(0, fep->hwp + FEC_IMASK);
   1691			__napi_schedule(&fep->napi);
   1692		}
   1693	}
   1694
   1695	return ret;
   1696}
   1697
   1698static int fec_enet_rx_napi(struct napi_struct *napi, int budget)
   1699{
   1700	struct net_device *ndev = napi->dev;
   1701	struct fec_enet_private *fep = netdev_priv(ndev);
   1702	int done = 0;
   1703
   1704	do {
   1705		done += fec_enet_rx(ndev, budget - done);
   1706		fec_enet_tx(ndev);
   1707	} while ((done < budget) && fec_enet_collect_events(fep));
   1708
   1709	if (done < budget) {
   1710		napi_complete_done(napi, done);
   1711		writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
   1712	}
   1713
   1714	return done;
   1715}
   1716
   1717/* ------------------------------------------------------------------------- */
   1718static int fec_get_mac(struct net_device *ndev)
   1719{
   1720	struct fec_enet_private *fep = netdev_priv(ndev);
   1721	unsigned char *iap, tmpaddr[ETH_ALEN];
   1722	int ret;
   1723
   1724	/*
   1725	 * try to get mac address in following order:
   1726	 *
   1727	 * 1) module parameter via kernel command line in form
   1728	 *    fec.macaddr=0x00,0x04,0x9f,0x01,0x30,0xe0
   1729	 */
   1730	iap = macaddr;
   1731
   1732	/*
   1733	 * 2) from device tree data
   1734	 */
   1735	if (!is_valid_ether_addr(iap)) {
   1736		struct device_node *np = fep->pdev->dev.of_node;
   1737		if (np) {
   1738			ret = of_get_mac_address(np, tmpaddr);
   1739			if (!ret)
   1740				iap = tmpaddr;
   1741			else if (ret == -EPROBE_DEFER)
   1742				return ret;
   1743		}
   1744	}
   1745
   1746	/*
   1747	 * 3) from flash or fuse (via platform data)
   1748	 */
   1749	if (!is_valid_ether_addr(iap)) {
   1750#ifdef CONFIG_M5272
   1751		if (FEC_FLASHMAC)
   1752			iap = (unsigned char *)FEC_FLASHMAC;
   1753#else
   1754		struct fec_platform_data *pdata = dev_get_platdata(&fep->pdev->dev);
   1755
   1756		if (pdata)
   1757			iap = (unsigned char *)&pdata->mac;
   1758#endif
   1759	}
   1760
   1761	/*
   1762	 * 4) FEC mac registers set by bootloader
   1763	 */
   1764	if (!is_valid_ether_addr(iap)) {
   1765		*((__be32 *) &tmpaddr[0]) =
   1766			cpu_to_be32(readl(fep->hwp + FEC_ADDR_LOW));
   1767		*((__be16 *) &tmpaddr[4]) =
   1768			cpu_to_be16(readl(fep->hwp + FEC_ADDR_HIGH) >> 16);
   1769		iap = &tmpaddr[0];
   1770	}
   1771
   1772	/*
   1773	 * 5) random mac address
   1774	 */
   1775	if (!is_valid_ether_addr(iap)) {
   1776		/* Report it and use a random ethernet address instead */
   1777		dev_err(&fep->pdev->dev, "Invalid MAC address: %pM\n", iap);
   1778		eth_hw_addr_random(ndev);
   1779		dev_info(&fep->pdev->dev, "Using random MAC address: %pM\n",
   1780			 ndev->dev_addr);
   1781		return 0;
   1782	}
   1783
   1784	/* Adjust MAC if using macaddr */
   1785	eth_hw_addr_gen(ndev, iap, iap == macaddr ? fep->dev_id : 0);
   1786
   1787	return 0;
   1788}
   1789
   1790/* ------------------------------------------------------------------------- */
   1791
   1792/*
   1793 * Phy section
   1794 */
   1795static void fec_enet_adjust_link(struct net_device *ndev)
   1796{
   1797	struct fec_enet_private *fep = netdev_priv(ndev);
   1798	struct phy_device *phy_dev = ndev->phydev;
   1799	int status_change = 0;
   1800
   1801	/*
   1802	 * If the netdev is down, or is going down, we're not interested
   1803	 * in link state events, so just mark our idea of the link as down
   1804	 * and ignore the event.
   1805	 */
   1806	if (!netif_running(ndev) || !netif_device_present(ndev)) {
   1807		fep->link = 0;
   1808	} else if (phy_dev->link) {
   1809		if (!fep->link) {
   1810			fep->link = phy_dev->link;
   1811			status_change = 1;
   1812		}
   1813
   1814		if (fep->full_duplex != phy_dev->duplex) {
   1815			fep->full_duplex = phy_dev->duplex;
   1816			status_change = 1;
   1817		}
   1818
   1819		if (phy_dev->speed != fep->speed) {
   1820			fep->speed = phy_dev->speed;
   1821			status_change = 1;
   1822		}
   1823
   1824		/* if any of the above changed restart the FEC */
   1825		if (status_change) {
   1826			napi_disable(&fep->napi);
   1827			netif_tx_lock_bh(ndev);
   1828			fec_restart(ndev);
   1829			netif_tx_wake_all_queues(ndev);
   1830			netif_tx_unlock_bh(ndev);
   1831			napi_enable(&fep->napi);
   1832		}
   1833	} else {
   1834		if (fep->link) {
   1835			napi_disable(&fep->napi);
   1836			netif_tx_lock_bh(ndev);
   1837			fec_stop(ndev);
   1838			netif_tx_unlock_bh(ndev);
   1839			napi_enable(&fep->napi);
   1840			fep->link = phy_dev->link;
   1841			status_change = 1;
   1842		}
   1843	}
   1844
   1845	if (status_change)
   1846		phy_print_status(phy_dev);
   1847}
   1848
   1849static int fec_enet_mdio_wait(struct fec_enet_private *fep)
   1850{
   1851	uint ievent;
   1852	int ret;
   1853
   1854	ret = readl_poll_timeout_atomic(fep->hwp + FEC_IEVENT, ievent,
   1855					ievent & FEC_ENET_MII, 2, 30000);
   1856
   1857	if (!ret)
   1858		writel(FEC_ENET_MII, fep->hwp + FEC_IEVENT);
   1859
   1860	return ret;
   1861}
   1862
   1863static int fec_enet_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
   1864{
   1865	struct fec_enet_private *fep = bus->priv;
   1866	struct device *dev = &fep->pdev->dev;
   1867	int ret = 0, frame_start, frame_addr, frame_op;
   1868	bool is_c45 = !!(regnum & MII_ADDR_C45);
   1869
   1870	ret = pm_runtime_resume_and_get(dev);
   1871	if (ret < 0)
   1872		return ret;
   1873
   1874	if (is_c45) {
   1875		frame_start = FEC_MMFR_ST_C45;
   1876
   1877		/* write address */
   1878		frame_addr = (regnum >> 16);
   1879		writel(frame_start | FEC_MMFR_OP_ADDR_WRITE |
   1880		       FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(frame_addr) |
   1881		       FEC_MMFR_TA | (regnum & 0xFFFF),
   1882		       fep->hwp + FEC_MII_DATA);
   1883
   1884		/* wait for end of transfer */
   1885		ret = fec_enet_mdio_wait(fep);
   1886		if (ret) {
   1887			netdev_err(fep->netdev, "MDIO address write timeout\n");
   1888			goto out;
   1889		}
   1890
   1891		frame_op = FEC_MMFR_OP_READ_C45;
   1892
   1893	} else {
   1894		/* C22 read */
   1895		frame_op = FEC_MMFR_OP_READ;
   1896		frame_start = FEC_MMFR_ST;
   1897		frame_addr = regnum;
   1898	}
   1899
   1900	/* start a read op */
   1901	writel(frame_start | frame_op |
   1902		FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(frame_addr) |
   1903		FEC_MMFR_TA, fep->hwp + FEC_MII_DATA);
   1904
   1905	/* wait for end of transfer */
   1906	ret = fec_enet_mdio_wait(fep);
   1907	if (ret) {
   1908		netdev_err(fep->netdev, "MDIO read timeout\n");
   1909		goto out;
   1910	}
   1911
   1912	ret = FEC_MMFR_DATA(readl(fep->hwp + FEC_MII_DATA));
   1913
   1914out:
   1915	pm_runtime_mark_last_busy(dev);
   1916	pm_runtime_put_autosuspend(dev);
   1917
   1918	return ret;
   1919}
   1920
   1921static int fec_enet_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
   1922			   u16 value)
   1923{
   1924	struct fec_enet_private *fep = bus->priv;
   1925	struct device *dev = &fep->pdev->dev;
   1926	int ret, frame_start, frame_addr;
   1927	bool is_c45 = !!(regnum & MII_ADDR_C45);
   1928
   1929	ret = pm_runtime_resume_and_get(dev);
   1930	if (ret < 0)
   1931		return ret;
   1932
   1933	if (is_c45) {
   1934		frame_start = FEC_MMFR_ST_C45;
   1935
   1936		/* write address */
   1937		frame_addr = (regnum >> 16);
   1938		writel(frame_start | FEC_MMFR_OP_ADDR_WRITE |
   1939		       FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(frame_addr) |
   1940		       FEC_MMFR_TA | (regnum & 0xFFFF),
   1941		       fep->hwp + FEC_MII_DATA);
   1942
   1943		/* wait for end of transfer */
   1944		ret = fec_enet_mdio_wait(fep);
   1945		if (ret) {
   1946			netdev_err(fep->netdev, "MDIO address write timeout\n");
   1947			goto out;
   1948		}
   1949	} else {
   1950		/* C22 write */
   1951		frame_start = FEC_MMFR_ST;
   1952		frame_addr = regnum;
   1953	}
   1954
   1955	/* start a write op */
   1956	writel(frame_start | FEC_MMFR_OP_WRITE |
   1957		FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(frame_addr) |
   1958		FEC_MMFR_TA | FEC_MMFR_DATA(value),
   1959		fep->hwp + FEC_MII_DATA);
   1960
   1961	/* wait for end of transfer */
   1962	ret = fec_enet_mdio_wait(fep);
   1963	if (ret)
   1964		netdev_err(fep->netdev, "MDIO write timeout\n");
   1965
   1966out:
   1967	pm_runtime_mark_last_busy(dev);
   1968	pm_runtime_put_autosuspend(dev);
   1969
   1970	return ret;
   1971}
   1972
   1973static void fec_enet_phy_reset_after_clk_enable(struct net_device *ndev)
   1974{
   1975	struct fec_enet_private *fep = netdev_priv(ndev);
   1976	struct phy_device *phy_dev = ndev->phydev;
   1977
   1978	if (phy_dev) {
   1979		phy_reset_after_clk_enable(phy_dev);
   1980	} else if (fep->phy_node) {
   1981		/*
   1982		 * If the PHY still is not bound to the MAC, but there is
   1983		 * OF PHY node and a matching PHY device instance already,
   1984		 * use the OF PHY node to obtain the PHY device instance,
   1985		 * and then use that PHY device instance when triggering
   1986		 * the PHY reset.
   1987		 */
   1988		phy_dev = of_phy_find_device(fep->phy_node);
   1989		phy_reset_after_clk_enable(phy_dev);
   1990		put_device(&phy_dev->mdio.dev);
   1991	}
   1992}
   1993
   1994static int fec_enet_clk_enable(struct net_device *ndev, bool enable)
   1995{
   1996	struct fec_enet_private *fep = netdev_priv(ndev);
   1997	int ret;
   1998
   1999	if (enable) {
   2000		ret = clk_prepare_enable(fep->clk_enet_out);
   2001		if (ret)
   2002			return ret;
   2003
   2004		if (fep->clk_ptp) {
   2005			mutex_lock(&fep->ptp_clk_mutex);
   2006			ret = clk_prepare_enable(fep->clk_ptp);
   2007			if (ret) {
   2008				mutex_unlock(&fep->ptp_clk_mutex);
   2009				goto failed_clk_ptp;
   2010			} else {
   2011				fep->ptp_clk_on = true;
   2012			}
   2013			mutex_unlock(&fep->ptp_clk_mutex);
   2014		}
   2015
   2016		ret = clk_prepare_enable(fep->clk_ref);
   2017		if (ret)
   2018			goto failed_clk_ref;
   2019
   2020		ret = clk_prepare_enable(fep->clk_2x_txclk);
   2021		if (ret)
   2022			goto failed_clk_2x_txclk;
   2023
   2024		fec_enet_phy_reset_after_clk_enable(ndev);
   2025	} else {
   2026		clk_disable_unprepare(fep->clk_enet_out);
   2027		if (fep->clk_ptp) {
   2028			mutex_lock(&fep->ptp_clk_mutex);
   2029			clk_disable_unprepare(fep->clk_ptp);
   2030			fep->ptp_clk_on = false;
   2031			mutex_unlock(&fep->ptp_clk_mutex);
   2032		}
   2033		clk_disable_unprepare(fep->clk_ref);
   2034		clk_disable_unprepare(fep->clk_2x_txclk);
   2035	}
   2036
   2037	return 0;
   2038
   2039failed_clk_2x_txclk:
   2040	if (fep->clk_ref)
   2041		clk_disable_unprepare(fep->clk_ref);
   2042failed_clk_ref:
   2043	if (fep->clk_ptp) {
   2044		mutex_lock(&fep->ptp_clk_mutex);
   2045		clk_disable_unprepare(fep->clk_ptp);
   2046		fep->ptp_clk_on = false;
   2047		mutex_unlock(&fep->ptp_clk_mutex);
   2048	}
   2049failed_clk_ptp:
   2050	clk_disable_unprepare(fep->clk_enet_out);
   2051
   2052	return ret;
   2053}
   2054
   2055static int fec_enet_parse_rgmii_delay(struct fec_enet_private *fep,
   2056				      struct device_node *np)
   2057{
   2058	u32 rgmii_tx_delay, rgmii_rx_delay;
   2059
   2060	/* For rgmii tx internal delay, valid values are 0ps and 2000ps */
   2061	if (!of_property_read_u32(np, "tx-internal-delay-ps", &rgmii_tx_delay)) {
   2062		if (rgmii_tx_delay != 0 && rgmii_tx_delay != 2000) {
   2063			dev_err(&fep->pdev->dev, "The only allowed RGMII TX delay values are: 0ps, 2000ps");
   2064			return -EINVAL;
   2065		} else if (rgmii_tx_delay == 2000) {
   2066			fep->rgmii_txc_dly = true;
   2067		}
   2068	}
   2069
   2070	/* For rgmii rx internal delay, valid values are 0ps and 2000ps */
   2071	if (!of_property_read_u32(np, "rx-internal-delay-ps", &rgmii_rx_delay)) {
   2072		if (rgmii_rx_delay != 0 && rgmii_rx_delay != 2000) {
   2073			dev_err(&fep->pdev->dev, "The only allowed RGMII RX delay values are: 0ps, 2000ps");
   2074			return -EINVAL;
   2075		} else if (rgmii_rx_delay == 2000) {
   2076			fep->rgmii_rxc_dly = true;
   2077		}
   2078	}
   2079
   2080	return 0;
   2081}
   2082
   2083static int fec_enet_mii_probe(struct net_device *ndev)
   2084{
   2085	struct fec_enet_private *fep = netdev_priv(ndev);
   2086	struct phy_device *phy_dev = NULL;
   2087	char mdio_bus_id[MII_BUS_ID_SIZE];
   2088	char phy_name[MII_BUS_ID_SIZE + 3];
   2089	int phy_id;
   2090	int dev_id = fep->dev_id;
   2091
   2092	if (fep->phy_node) {
   2093		phy_dev = of_phy_connect(ndev, fep->phy_node,
   2094					 &fec_enet_adjust_link, 0,
   2095					 fep->phy_interface);
   2096		if (!phy_dev) {
   2097			netdev_err(ndev, "Unable to connect to phy\n");
   2098			return -ENODEV;
   2099		}
   2100	} else {
   2101		/* check for attached phy */
   2102		for (phy_id = 0; (phy_id < PHY_MAX_ADDR); phy_id++) {
   2103			if (!mdiobus_is_registered_device(fep->mii_bus, phy_id))
   2104				continue;
   2105			if (dev_id--)
   2106				continue;
   2107			strlcpy(mdio_bus_id, fep->mii_bus->id, MII_BUS_ID_SIZE);
   2108			break;
   2109		}
   2110
   2111		if (phy_id >= PHY_MAX_ADDR) {
   2112			netdev_info(ndev, "no PHY, assuming direct connection to switch\n");
   2113			strlcpy(mdio_bus_id, "fixed-0", MII_BUS_ID_SIZE);
   2114			phy_id = 0;
   2115		}
   2116
   2117		snprintf(phy_name, sizeof(phy_name),
   2118			 PHY_ID_FMT, mdio_bus_id, phy_id);
   2119		phy_dev = phy_connect(ndev, phy_name, &fec_enet_adjust_link,
   2120				      fep->phy_interface);
   2121	}
   2122
   2123	if (IS_ERR(phy_dev)) {
   2124		netdev_err(ndev, "could not attach to PHY\n");
   2125		return PTR_ERR(phy_dev);
   2126	}
   2127
   2128	/* mask with MAC supported features */
   2129	if (fep->quirks & FEC_QUIRK_HAS_GBIT) {
   2130		phy_set_max_speed(phy_dev, 1000);
   2131		phy_remove_link_mode(phy_dev,
   2132				     ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
   2133#if !defined(CONFIG_M5272)
   2134		phy_support_sym_pause(phy_dev);
   2135#endif
   2136	}
   2137	else
   2138		phy_set_max_speed(phy_dev, 100);
   2139
   2140	fep->link = 0;
   2141	fep->full_duplex = 0;
   2142
   2143	phy_dev->mac_managed_pm = 1;
   2144
   2145	phy_attached_info(phy_dev);
   2146
   2147	return 0;
   2148}
   2149
   2150static int fec_enet_mii_init(struct platform_device *pdev)
   2151{
   2152	static struct mii_bus *fec0_mii_bus;
   2153	struct net_device *ndev = platform_get_drvdata(pdev);
   2154	struct fec_enet_private *fep = netdev_priv(ndev);
   2155	bool suppress_preamble = false;
   2156	struct device_node *node;
   2157	int err = -ENXIO;
   2158	u32 mii_speed, holdtime;
   2159	u32 bus_freq;
   2160
   2161	/*
   2162	 * The i.MX28 dual fec interfaces are not equal.
   2163	 * Here are the differences:
   2164	 *
   2165	 *  - fec0 supports MII & RMII modes while fec1 only supports RMII
   2166	 *  - fec0 acts as the 1588 time master while fec1 is slave
   2167	 *  - external phys can only be configured by fec0
   2168	 *
   2169	 * That is to say fec1 can not work independently. It only works
   2170	 * when fec0 is working. The reason behind this design is that the
   2171	 * second interface is added primarily for Switch mode.
   2172	 *
   2173	 * Because of the last point above, both phys are attached on fec0
   2174	 * mdio interface in board design, and need to be configured by
   2175	 * fec0 mii_bus.
   2176	 */
   2177	if ((fep->quirks & FEC_QUIRK_SINGLE_MDIO) && fep->dev_id > 0) {
   2178		/* fec1 uses fec0 mii_bus */
   2179		if (mii_cnt && fec0_mii_bus) {
   2180			fep->mii_bus = fec0_mii_bus;
   2181			mii_cnt++;
   2182			return 0;
   2183		}
   2184		return -ENOENT;
   2185	}
   2186
   2187	bus_freq = 2500000; /* 2.5MHz by default */
   2188	node = of_get_child_by_name(pdev->dev.of_node, "mdio");
   2189	if (node) {
   2190		of_property_read_u32(node, "clock-frequency", &bus_freq);
   2191		suppress_preamble = of_property_read_bool(node,
   2192							  "suppress-preamble");
   2193	}
   2194
   2195	/*
   2196	 * Set MII speed (= clk_get_rate() / 2 * phy_speed)
   2197	 *
   2198	 * The formula for FEC MDC is 'ref_freq / (MII_SPEED x 2)' while
   2199	 * for ENET-MAC is 'ref_freq / ((MII_SPEED + 1) x 2)'.  The i.MX28
   2200	 * Reference Manual has an error on this, and gets fixed on i.MX6Q
   2201	 * document.
   2202	 */
   2203	mii_speed = DIV_ROUND_UP(clk_get_rate(fep->clk_ipg), bus_freq * 2);
   2204	if (fep->quirks & FEC_QUIRK_ENET_MAC)
   2205		mii_speed--;
   2206	if (mii_speed > 63) {
   2207		dev_err(&pdev->dev,
   2208			"fec clock (%lu) too fast to get right mii speed\n",
   2209			clk_get_rate(fep->clk_ipg));
   2210		err = -EINVAL;
   2211		goto err_out;
   2212	}
   2213
   2214	/*
   2215	 * The i.MX28 and i.MX6 types have another filed in the MSCR (aka
   2216	 * MII_SPEED) register that defines the MDIO output hold time. Earlier
   2217	 * versions are RAZ there, so just ignore the difference and write the
   2218	 * register always.
   2219	 * The minimal hold time according to IEE802.3 (clause 22) is 10 ns.
   2220	 * HOLDTIME + 1 is the number of clk cycles the fec is holding the
   2221	 * output.
   2222	 * The HOLDTIME bitfield takes values between 0 and 7 (inclusive).
   2223	 * Given that ceil(clkrate / 5000000) <= 64, the calculation for
   2224	 * holdtime cannot result in a value greater than 3.
   2225	 */
   2226	holdtime = DIV_ROUND_UP(clk_get_rate(fep->clk_ipg), 100000000) - 1;
   2227
   2228	fep->phy_speed = mii_speed << 1 | holdtime << 8;
   2229
   2230	if (suppress_preamble)
   2231		fep->phy_speed |= BIT(7);
   2232
   2233	if (fep->quirks & FEC_QUIRK_CLEAR_SETUP_MII) {
   2234		/* Clear MMFR to avoid to generate MII event by writing MSCR.
   2235		 * MII event generation condition:
   2236		 * - writing MSCR:
   2237		 *	- mmfr[31:0]_not_zero & mscr[7:0]_is_zero &
   2238		 *	  mscr_reg_data_in[7:0] != 0
   2239		 * - writing MMFR:
   2240		 *	- mscr[7:0]_not_zero
   2241		 */
   2242		writel(0, fep->hwp + FEC_MII_DATA);
   2243	}
   2244
   2245	writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
   2246
   2247	/* Clear any pending transaction complete indication */
   2248	writel(FEC_ENET_MII, fep->hwp + FEC_IEVENT);
   2249
   2250	fep->mii_bus = mdiobus_alloc();
   2251	if (fep->mii_bus == NULL) {
   2252		err = -ENOMEM;
   2253		goto err_out;
   2254	}
   2255
   2256	fep->mii_bus->name = "fec_enet_mii_bus";
   2257	fep->mii_bus->read = fec_enet_mdio_read;
   2258	fep->mii_bus->write = fec_enet_mdio_write;
   2259	snprintf(fep->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
   2260		pdev->name, fep->dev_id + 1);
   2261	fep->mii_bus->priv = fep;
   2262	fep->mii_bus->parent = &pdev->dev;
   2263
   2264	err = of_mdiobus_register(fep->mii_bus, node);
   2265	if (err)
   2266		goto err_out_free_mdiobus;
   2267	of_node_put(node);
   2268
   2269	mii_cnt++;
   2270
   2271	/* save fec0 mii_bus */
   2272	if (fep->quirks & FEC_QUIRK_SINGLE_MDIO)
   2273		fec0_mii_bus = fep->mii_bus;
   2274
   2275	return 0;
   2276
   2277err_out_free_mdiobus:
   2278	mdiobus_free(fep->mii_bus);
   2279err_out:
   2280	of_node_put(node);
   2281	return err;
   2282}
   2283
   2284static void fec_enet_mii_remove(struct fec_enet_private *fep)
   2285{
   2286	if (--mii_cnt == 0) {
   2287		mdiobus_unregister(fep->mii_bus);
   2288		mdiobus_free(fep->mii_bus);
   2289	}
   2290}
   2291
   2292static void fec_enet_get_drvinfo(struct net_device *ndev,
   2293				 struct ethtool_drvinfo *info)
   2294{
   2295	struct fec_enet_private *fep = netdev_priv(ndev);
   2296
   2297	strlcpy(info->driver, fep->pdev->dev.driver->name,
   2298		sizeof(info->driver));
   2299	strlcpy(info->bus_info, dev_name(&ndev->dev), sizeof(info->bus_info));
   2300}
   2301
   2302static int fec_enet_get_regs_len(struct net_device *ndev)
   2303{
   2304	struct fec_enet_private *fep = netdev_priv(ndev);
   2305	struct resource *r;
   2306	int s = 0;
   2307
   2308	r = platform_get_resource(fep->pdev, IORESOURCE_MEM, 0);
   2309	if (r)
   2310		s = resource_size(r);
   2311
   2312	return s;
   2313}
   2314
   2315/* List of registers that can be safety be read to dump them with ethtool */
   2316#if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
   2317	defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM) || \
   2318	defined(CONFIG_ARM64) || defined(CONFIG_COMPILE_TEST)
   2319static __u32 fec_enet_register_version = 2;
   2320static u32 fec_enet_register_offset[] = {
   2321	FEC_IEVENT, FEC_IMASK, FEC_R_DES_ACTIVE_0, FEC_X_DES_ACTIVE_0,
   2322	FEC_ECNTRL, FEC_MII_DATA, FEC_MII_SPEED, FEC_MIB_CTRLSTAT, FEC_R_CNTRL,
   2323	FEC_X_CNTRL, FEC_ADDR_LOW, FEC_ADDR_HIGH, FEC_OPD, FEC_TXIC0, FEC_TXIC1,
   2324	FEC_TXIC2, FEC_RXIC0, FEC_RXIC1, FEC_RXIC2, FEC_HASH_TABLE_HIGH,
   2325	FEC_HASH_TABLE_LOW, FEC_GRP_HASH_TABLE_HIGH, FEC_GRP_HASH_TABLE_LOW,
   2326	FEC_X_WMRK, FEC_R_BOUND, FEC_R_FSTART, FEC_R_DES_START_1,
   2327	FEC_X_DES_START_1, FEC_R_BUFF_SIZE_1, FEC_R_DES_START_2,
   2328	FEC_X_DES_START_2, FEC_R_BUFF_SIZE_2, FEC_R_DES_START_0,
   2329	FEC_X_DES_START_0, FEC_R_BUFF_SIZE_0, FEC_R_FIFO_RSFL, FEC_R_FIFO_RSEM,
   2330	FEC_R_FIFO_RAEM, FEC_R_FIFO_RAFL, FEC_RACC, FEC_RCMR_1, FEC_RCMR_2,
   2331	FEC_DMA_CFG_1, FEC_DMA_CFG_2, FEC_R_DES_ACTIVE_1, FEC_X_DES_ACTIVE_1,
   2332	FEC_R_DES_ACTIVE_2, FEC_X_DES_ACTIVE_2, FEC_QOS_SCHEME,
   2333	RMON_T_DROP, RMON_T_PACKETS, RMON_T_BC_PKT, RMON_T_MC_PKT,
   2334	RMON_T_CRC_ALIGN, RMON_T_UNDERSIZE, RMON_T_OVERSIZE, RMON_T_FRAG,
   2335	RMON_T_JAB, RMON_T_COL, RMON_T_P64, RMON_T_P65TO127, RMON_T_P128TO255,
   2336	RMON_T_P256TO511, RMON_T_P512TO1023, RMON_T_P1024TO2047,
   2337	RMON_T_P_GTE2048, RMON_T_OCTETS,
   2338	IEEE_T_DROP, IEEE_T_FRAME_OK, IEEE_T_1COL, IEEE_T_MCOL, IEEE_T_DEF,
   2339	IEEE_T_LCOL, IEEE_T_EXCOL, IEEE_T_MACERR, IEEE_T_CSERR, IEEE_T_SQE,
   2340	IEEE_T_FDXFC, IEEE_T_OCTETS_OK,
   2341	RMON_R_PACKETS, RMON_R_BC_PKT, RMON_R_MC_PKT, RMON_R_CRC_ALIGN,
   2342	RMON_R_UNDERSIZE, RMON_R_OVERSIZE, RMON_R_FRAG, RMON_R_JAB,
   2343	RMON_R_RESVD_O, RMON_R_P64, RMON_R_P65TO127, RMON_R_P128TO255,
   2344	RMON_R_P256TO511, RMON_R_P512TO1023, RMON_R_P1024TO2047,
   2345	RMON_R_P_GTE2048, RMON_R_OCTETS,
   2346	IEEE_R_DROP, IEEE_R_FRAME_OK, IEEE_R_CRC, IEEE_R_ALIGN, IEEE_R_MACERR,
   2347	IEEE_R_FDXFC, IEEE_R_OCTETS_OK
   2348};
   2349#else
   2350static __u32 fec_enet_register_version = 1;
   2351static u32 fec_enet_register_offset[] = {
   2352	FEC_ECNTRL, FEC_IEVENT, FEC_IMASK, FEC_IVEC, FEC_R_DES_ACTIVE_0,
   2353	FEC_R_DES_ACTIVE_1, FEC_R_DES_ACTIVE_2, FEC_X_DES_ACTIVE_0,
   2354	FEC_X_DES_ACTIVE_1, FEC_X_DES_ACTIVE_2, FEC_MII_DATA, FEC_MII_SPEED,
   2355	FEC_R_BOUND, FEC_R_FSTART, FEC_X_WMRK, FEC_X_FSTART, FEC_R_CNTRL,
   2356	FEC_MAX_FRM_LEN, FEC_X_CNTRL, FEC_ADDR_LOW, FEC_ADDR_HIGH,
   2357	FEC_GRP_HASH_TABLE_HIGH, FEC_GRP_HASH_TABLE_LOW, FEC_R_DES_START_0,
   2358	FEC_R_DES_START_1, FEC_R_DES_START_2, FEC_X_DES_START_0,
   2359	FEC_X_DES_START_1, FEC_X_DES_START_2, FEC_R_BUFF_SIZE_0,
   2360	FEC_R_BUFF_SIZE_1, FEC_R_BUFF_SIZE_2
   2361};
   2362#endif
   2363
   2364static void fec_enet_get_regs(struct net_device *ndev,
   2365			      struct ethtool_regs *regs, void *regbuf)
   2366{
   2367	struct fec_enet_private *fep = netdev_priv(ndev);
   2368	u32 __iomem *theregs = (u32 __iomem *)fep->hwp;
   2369	struct device *dev = &fep->pdev->dev;
   2370	u32 *buf = (u32 *)regbuf;
   2371	u32 i, off;
   2372	int ret;
   2373
   2374	ret = pm_runtime_resume_and_get(dev);
   2375	if (ret < 0)
   2376		return;
   2377
   2378	regs->version = fec_enet_register_version;
   2379
   2380	memset(buf, 0, regs->len);
   2381
   2382	for (i = 0; i < ARRAY_SIZE(fec_enet_register_offset); i++) {
   2383		off = fec_enet_register_offset[i];
   2384
   2385		if ((off == FEC_R_BOUND || off == FEC_R_FSTART) &&
   2386		    !(fep->quirks & FEC_QUIRK_HAS_FRREG))
   2387			continue;
   2388
   2389		off >>= 2;
   2390		buf[off] = readl(&theregs[off]);
   2391	}
   2392
   2393	pm_runtime_mark_last_busy(dev);
   2394	pm_runtime_put_autosuspend(dev);
   2395}
   2396
   2397static int fec_enet_get_ts_info(struct net_device *ndev,
   2398				struct ethtool_ts_info *info)
   2399{
   2400	struct fec_enet_private *fep = netdev_priv(ndev);
   2401
   2402	if (fep->bufdesc_ex) {
   2403
   2404		info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
   2405					SOF_TIMESTAMPING_RX_SOFTWARE |
   2406					SOF_TIMESTAMPING_SOFTWARE |
   2407					SOF_TIMESTAMPING_TX_HARDWARE |
   2408					SOF_TIMESTAMPING_RX_HARDWARE |
   2409					SOF_TIMESTAMPING_RAW_HARDWARE;
   2410		if (fep->ptp_clock)
   2411			info->phc_index = ptp_clock_index(fep->ptp_clock);
   2412		else
   2413			info->phc_index = -1;
   2414
   2415		info->tx_types = (1 << HWTSTAMP_TX_OFF) |
   2416				 (1 << HWTSTAMP_TX_ON);
   2417
   2418		info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
   2419				   (1 << HWTSTAMP_FILTER_ALL);
   2420		return 0;
   2421	} else {
   2422		return ethtool_op_get_ts_info(ndev, info);
   2423	}
   2424}
   2425
   2426#if !defined(CONFIG_M5272)
   2427
   2428static void fec_enet_get_pauseparam(struct net_device *ndev,
   2429				    struct ethtool_pauseparam *pause)
   2430{
   2431	struct fec_enet_private *fep = netdev_priv(ndev);
   2432
   2433	pause->autoneg = (fep->pause_flag & FEC_PAUSE_FLAG_AUTONEG) != 0;
   2434	pause->tx_pause = (fep->pause_flag & FEC_PAUSE_FLAG_ENABLE) != 0;
   2435	pause->rx_pause = pause->tx_pause;
   2436}
   2437
   2438static int fec_enet_set_pauseparam(struct net_device *ndev,
   2439				   struct ethtool_pauseparam *pause)
   2440{
   2441	struct fec_enet_private *fep = netdev_priv(ndev);
   2442
   2443	if (!ndev->phydev)
   2444		return -ENODEV;
   2445
   2446	if (pause->tx_pause != pause->rx_pause) {
   2447		netdev_info(ndev,
   2448			"hardware only support enable/disable both tx and rx");
   2449		return -EINVAL;
   2450	}
   2451
   2452	fep->pause_flag = 0;
   2453
   2454	/* tx pause must be same as rx pause */
   2455	fep->pause_flag |= pause->rx_pause ? FEC_PAUSE_FLAG_ENABLE : 0;
   2456	fep->pause_flag |= pause->autoneg ? FEC_PAUSE_FLAG_AUTONEG : 0;
   2457
   2458	phy_set_sym_pause(ndev->phydev, pause->rx_pause, pause->tx_pause,
   2459			  pause->autoneg);
   2460
   2461	if (pause->autoneg) {
   2462		if (netif_running(ndev))
   2463			fec_stop(ndev);
   2464		phy_start_aneg(ndev->phydev);
   2465	}
   2466	if (netif_running(ndev)) {
   2467		napi_disable(&fep->napi);
   2468		netif_tx_lock_bh(ndev);
   2469		fec_restart(ndev);
   2470		netif_tx_wake_all_queues(ndev);
   2471		netif_tx_unlock_bh(ndev);
   2472		napi_enable(&fep->napi);
   2473	}
   2474
   2475	return 0;
   2476}
   2477
   2478static const struct fec_stat {
   2479	char name[ETH_GSTRING_LEN];
   2480	u16 offset;
   2481} fec_stats[] = {
   2482	/* RMON TX */
   2483	{ "tx_dropped", RMON_T_DROP },
   2484	{ "tx_packets", RMON_T_PACKETS },
   2485	{ "tx_broadcast", RMON_T_BC_PKT },
   2486	{ "tx_multicast", RMON_T_MC_PKT },
   2487	{ "tx_crc_errors", RMON_T_CRC_ALIGN },
   2488	{ "tx_undersize", RMON_T_UNDERSIZE },
   2489	{ "tx_oversize", RMON_T_OVERSIZE },
   2490	{ "tx_fragment", RMON_T_FRAG },
   2491	{ "tx_jabber", RMON_T_JAB },
   2492	{ "tx_collision", RMON_T_COL },
   2493	{ "tx_64byte", RMON_T_P64 },
   2494	{ "tx_65to127byte", RMON_T_P65TO127 },
   2495	{ "tx_128to255byte", RMON_T_P128TO255 },
   2496	{ "tx_256to511byte", RMON_T_P256TO511 },
   2497	{ "tx_512to1023byte", RMON_T_P512TO1023 },
   2498	{ "tx_1024to2047byte", RMON_T_P1024TO2047 },
   2499	{ "tx_GTE2048byte", RMON_T_P_GTE2048 },
   2500	{ "tx_octets", RMON_T_OCTETS },
   2501
   2502	/* IEEE TX */
   2503	{ "IEEE_tx_drop", IEEE_T_DROP },
   2504	{ "IEEE_tx_frame_ok", IEEE_T_FRAME_OK },
   2505	{ "IEEE_tx_1col", IEEE_T_1COL },
   2506	{ "IEEE_tx_mcol", IEEE_T_MCOL },
   2507	{ "IEEE_tx_def", IEEE_T_DEF },
   2508	{ "IEEE_tx_lcol", IEEE_T_LCOL },
   2509	{ "IEEE_tx_excol", IEEE_T_EXCOL },
   2510	{ "IEEE_tx_macerr", IEEE_T_MACERR },
   2511	{ "IEEE_tx_cserr", IEEE_T_CSERR },
   2512	{ "IEEE_tx_sqe", IEEE_T_SQE },
   2513	{ "IEEE_tx_fdxfc", IEEE_T_FDXFC },
   2514	{ "IEEE_tx_octets_ok", IEEE_T_OCTETS_OK },
   2515
   2516	/* RMON RX */
   2517	{ "rx_packets", RMON_R_PACKETS },
   2518	{ "rx_broadcast", RMON_R_BC_PKT },
   2519	{ "rx_multicast", RMON_R_MC_PKT },
   2520	{ "rx_crc_errors", RMON_R_CRC_ALIGN },
   2521	{ "rx_undersize", RMON_R_UNDERSIZE },
   2522	{ "rx_oversize", RMON_R_OVERSIZE },
   2523	{ "rx_fragment", RMON_R_FRAG },
   2524	{ "rx_jabber", RMON_R_JAB },
   2525	{ "rx_64byte", RMON_R_P64 },
   2526	{ "rx_65to127byte", RMON_R_P65TO127 },
   2527	{ "rx_128to255byte", RMON_R_P128TO255 },
   2528	{ "rx_256to511byte", RMON_R_P256TO511 },
   2529	{ "rx_512to1023byte", RMON_R_P512TO1023 },
   2530	{ "rx_1024to2047byte", RMON_R_P1024TO2047 },
   2531	{ "rx_GTE2048byte", RMON_R_P_GTE2048 },
   2532	{ "rx_octets", RMON_R_OCTETS },
   2533
   2534	/* IEEE RX */
   2535	{ "IEEE_rx_drop", IEEE_R_DROP },
   2536	{ "IEEE_rx_frame_ok", IEEE_R_FRAME_OK },
   2537	{ "IEEE_rx_crc", IEEE_R_CRC },
   2538	{ "IEEE_rx_align", IEEE_R_ALIGN },
   2539	{ "IEEE_rx_macerr", IEEE_R_MACERR },
   2540	{ "IEEE_rx_fdxfc", IEEE_R_FDXFC },
   2541	{ "IEEE_rx_octets_ok", IEEE_R_OCTETS_OK },
   2542};
   2543
   2544#define FEC_STATS_SIZE		(ARRAY_SIZE(fec_stats) * sizeof(u64))
   2545
   2546static void fec_enet_update_ethtool_stats(struct net_device *dev)
   2547{
   2548	struct fec_enet_private *fep = netdev_priv(dev);
   2549	int i;
   2550
   2551	for (i = 0; i < ARRAY_SIZE(fec_stats); i++)
   2552		fep->ethtool_stats[i] = readl(fep->hwp + fec_stats[i].offset);
   2553}
   2554
   2555static void fec_enet_get_ethtool_stats(struct net_device *dev,
   2556				       struct ethtool_stats *stats, u64 *data)
   2557{
   2558	struct fec_enet_private *fep = netdev_priv(dev);
   2559
   2560	if (netif_running(dev))
   2561		fec_enet_update_ethtool_stats(dev);
   2562
   2563	memcpy(data, fep->ethtool_stats, FEC_STATS_SIZE);
   2564}
   2565
   2566static void fec_enet_get_strings(struct net_device *netdev,
   2567	u32 stringset, u8 *data)
   2568{
   2569	int i;
   2570	switch (stringset) {
   2571	case ETH_SS_STATS:
   2572		for (i = 0; i < ARRAY_SIZE(fec_stats); i++)
   2573			memcpy(data + i * ETH_GSTRING_LEN,
   2574				fec_stats[i].name, ETH_GSTRING_LEN);
   2575		break;
   2576	case ETH_SS_TEST:
   2577		net_selftest_get_strings(data);
   2578		break;
   2579	}
   2580}
   2581
   2582static int fec_enet_get_sset_count(struct net_device *dev, int sset)
   2583{
   2584	switch (sset) {
   2585	case ETH_SS_STATS:
   2586		return ARRAY_SIZE(fec_stats);
   2587	case ETH_SS_TEST:
   2588		return net_selftest_get_count();
   2589	default:
   2590		return -EOPNOTSUPP;
   2591	}
   2592}
   2593
   2594static void fec_enet_clear_ethtool_stats(struct net_device *dev)
   2595{
   2596	struct fec_enet_private *fep = netdev_priv(dev);
   2597	int i;
   2598
   2599	/* Disable MIB statistics counters */
   2600	writel(FEC_MIB_CTRLSTAT_DISABLE, fep->hwp + FEC_MIB_CTRLSTAT);
   2601
   2602	for (i = 0; i < ARRAY_SIZE(fec_stats); i++)
   2603		writel(0, fep->hwp + fec_stats[i].offset);
   2604
   2605	/* Don't disable MIB statistics counters */
   2606	writel(0, fep->hwp + FEC_MIB_CTRLSTAT);
   2607}
   2608
   2609#else	/* !defined(CONFIG_M5272) */
   2610#define FEC_STATS_SIZE	0
   2611static inline void fec_enet_update_ethtool_stats(struct net_device *dev)
   2612{
   2613}
   2614
   2615static inline void fec_enet_clear_ethtool_stats(struct net_device *dev)
   2616{
   2617}
   2618#endif /* !defined(CONFIG_M5272) */
   2619
   2620/* ITR clock source is enet system clock (clk_ahb).
   2621 * TCTT unit is cycle_ns * 64 cycle
   2622 * So, the ICTT value = X us / (cycle_ns * 64)
   2623 */
   2624static int fec_enet_us_to_itr_clock(struct net_device *ndev, int us)
   2625{
   2626	struct fec_enet_private *fep = netdev_priv(ndev);
   2627
   2628	return us * (fep->itr_clk_rate / 64000) / 1000;
   2629}
   2630
   2631/* Set threshold for interrupt coalescing */
   2632static void fec_enet_itr_coal_set(struct net_device *ndev)
   2633{
   2634	struct fec_enet_private *fep = netdev_priv(ndev);
   2635	int rx_itr, tx_itr;
   2636
   2637	/* Must be greater than zero to avoid unpredictable behavior */
   2638	if (!fep->rx_time_itr || !fep->rx_pkts_itr ||
   2639	    !fep->tx_time_itr || !fep->tx_pkts_itr)
   2640		return;
   2641
   2642	/* Select enet system clock as Interrupt Coalescing
   2643	 * timer Clock Source
   2644	 */
   2645	rx_itr = FEC_ITR_CLK_SEL;
   2646	tx_itr = FEC_ITR_CLK_SEL;
   2647
   2648	/* set ICFT and ICTT */
   2649	rx_itr |= FEC_ITR_ICFT(fep->rx_pkts_itr);
   2650	rx_itr |= FEC_ITR_ICTT(fec_enet_us_to_itr_clock(ndev, fep->rx_time_itr));
   2651	tx_itr |= FEC_ITR_ICFT(fep->tx_pkts_itr);
   2652	tx_itr |= FEC_ITR_ICTT(fec_enet_us_to_itr_clock(ndev, fep->tx_time_itr));
   2653
   2654	rx_itr |= FEC_ITR_EN;
   2655	tx_itr |= FEC_ITR_EN;
   2656
   2657	writel(tx_itr, fep->hwp + FEC_TXIC0);
   2658	writel(rx_itr, fep->hwp + FEC_RXIC0);
   2659	if (fep->quirks & FEC_QUIRK_HAS_MULTI_QUEUES) {
   2660		writel(tx_itr, fep->hwp + FEC_TXIC1);
   2661		writel(rx_itr, fep->hwp + FEC_RXIC1);
   2662		writel(tx_itr, fep->hwp + FEC_TXIC2);
   2663		writel(rx_itr, fep->hwp + FEC_RXIC2);
   2664	}
   2665}
   2666
   2667static int fec_enet_get_coalesce(struct net_device *ndev,
   2668				 struct ethtool_coalesce *ec,
   2669				 struct kernel_ethtool_coalesce *kernel_coal,
   2670				 struct netlink_ext_ack *extack)
   2671{
   2672	struct fec_enet_private *fep = netdev_priv(ndev);
   2673
   2674	if (!(fep->quirks & FEC_QUIRK_HAS_COALESCE))
   2675		return -EOPNOTSUPP;
   2676
   2677	ec->rx_coalesce_usecs = fep->rx_time_itr;
   2678	ec->rx_max_coalesced_frames = fep->rx_pkts_itr;
   2679
   2680	ec->tx_coalesce_usecs = fep->tx_time_itr;
   2681	ec->tx_max_coalesced_frames = fep->tx_pkts_itr;
   2682
   2683	return 0;
   2684}
   2685
   2686static int fec_enet_set_coalesce(struct net_device *ndev,
   2687				 struct ethtool_coalesce *ec,
   2688				 struct kernel_ethtool_coalesce *kernel_coal,
   2689				 struct netlink_ext_ack *extack)
   2690{
   2691	struct fec_enet_private *fep = netdev_priv(ndev);
   2692	struct device *dev = &fep->pdev->dev;
   2693	unsigned int cycle;
   2694
   2695	if (!(fep->quirks & FEC_QUIRK_HAS_COALESCE))
   2696		return -EOPNOTSUPP;
   2697
   2698	if (ec->rx_max_coalesced_frames > 255) {
   2699		dev_err(dev, "Rx coalesced frames exceed hardware limitation\n");
   2700		return -EINVAL;
   2701	}
   2702
   2703	if (ec->tx_max_coalesced_frames > 255) {
   2704		dev_err(dev, "Tx coalesced frame exceed hardware limitation\n");
   2705		return -EINVAL;
   2706	}
   2707
   2708	cycle = fec_enet_us_to_itr_clock(ndev, ec->rx_coalesce_usecs);
   2709	if (cycle > 0xFFFF) {
   2710		dev_err(dev, "Rx coalesced usec exceed hardware limitation\n");
   2711		return -EINVAL;
   2712	}
   2713
   2714	cycle = fec_enet_us_to_itr_clock(ndev, ec->tx_coalesce_usecs);
   2715	if (cycle > 0xFFFF) {
   2716		dev_err(dev, "Tx coalesced usec exceed hardware limitation\n");
   2717		return -EINVAL;
   2718	}
   2719
   2720	fep->rx_time_itr = ec->rx_coalesce_usecs;
   2721	fep->rx_pkts_itr = ec->rx_max_coalesced_frames;
   2722
   2723	fep->tx_time_itr = ec->tx_coalesce_usecs;
   2724	fep->tx_pkts_itr = ec->tx_max_coalesced_frames;
   2725
   2726	fec_enet_itr_coal_set(ndev);
   2727
   2728	return 0;
   2729}
   2730
   2731static void fec_enet_itr_coal_init(struct net_device *ndev)
   2732{
   2733	struct ethtool_coalesce ec;
   2734
   2735	ec.rx_coalesce_usecs = FEC_ITR_ICTT_DEFAULT;
   2736	ec.rx_max_coalesced_frames = FEC_ITR_ICFT_DEFAULT;
   2737
   2738	ec.tx_coalesce_usecs = FEC_ITR_ICTT_DEFAULT;
   2739	ec.tx_max_coalesced_frames = FEC_ITR_ICFT_DEFAULT;
   2740
   2741	fec_enet_set_coalesce(ndev, &ec, NULL, NULL);
   2742}
   2743
   2744static int fec_enet_get_tunable(struct net_device *netdev,
   2745				const struct ethtool_tunable *tuna,
   2746				void *data)
   2747{
   2748	struct fec_enet_private *fep = netdev_priv(netdev);
   2749	int ret = 0;
   2750
   2751	switch (tuna->id) {
   2752	case ETHTOOL_RX_COPYBREAK:
   2753		*(u32 *)data = fep->rx_copybreak;
   2754		break;
   2755	default:
   2756		ret = -EINVAL;
   2757		break;
   2758	}
   2759
   2760	return ret;
   2761}
   2762
   2763static int fec_enet_set_tunable(struct net_device *netdev,
   2764				const struct ethtool_tunable *tuna,
   2765				const void *data)
   2766{
   2767	struct fec_enet_private *fep = netdev_priv(netdev);
   2768	int ret = 0;
   2769
   2770	switch (tuna->id) {
   2771	case ETHTOOL_RX_COPYBREAK:
   2772		fep->rx_copybreak = *(u32 *)data;
   2773		break;
   2774	default:
   2775		ret = -EINVAL;
   2776		break;
   2777	}
   2778
   2779	return ret;
   2780}
   2781
   2782/* LPI Sleep Ts count base on tx clk (clk_ref).
   2783 * The lpi sleep cnt value = X us / (cycle_ns).
   2784 */
   2785static int fec_enet_us_to_tx_cycle(struct net_device *ndev, int us)
   2786{
   2787	struct fec_enet_private *fep = netdev_priv(ndev);
   2788
   2789	return us * (fep->clk_ref_rate / 1000) / 1000;
   2790}
   2791
   2792static int fec_enet_eee_mode_set(struct net_device *ndev, bool enable)
   2793{
   2794	struct fec_enet_private *fep = netdev_priv(ndev);
   2795	struct ethtool_eee *p = &fep->eee;
   2796	unsigned int sleep_cycle, wake_cycle;
   2797	int ret = 0;
   2798
   2799	if (enable) {
   2800		ret = phy_init_eee(ndev->phydev, false);
   2801		if (ret)
   2802			return ret;
   2803
   2804		sleep_cycle = fec_enet_us_to_tx_cycle(ndev, p->tx_lpi_timer);
   2805		wake_cycle = sleep_cycle;
   2806	} else {
   2807		sleep_cycle = 0;
   2808		wake_cycle = 0;
   2809	}
   2810
   2811	p->tx_lpi_enabled = enable;
   2812	p->eee_enabled = enable;
   2813	p->eee_active = enable;
   2814
   2815	writel(sleep_cycle, fep->hwp + FEC_LPI_SLEEP);
   2816	writel(wake_cycle, fep->hwp + FEC_LPI_WAKE);
   2817
   2818	return 0;
   2819}
   2820
   2821static int
   2822fec_enet_get_eee(struct net_device *ndev, struct ethtool_eee *edata)
   2823{
   2824	struct fec_enet_private *fep = netdev_priv(ndev);
   2825	struct ethtool_eee *p = &fep->eee;
   2826
   2827	if (!(fep->quirks & FEC_QUIRK_HAS_EEE))
   2828		return -EOPNOTSUPP;
   2829
   2830	if (!netif_running(ndev))
   2831		return -ENETDOWN;
   2832
   2833	edata->eee_enabled = p->eee_enabled;
   2834	edata->eee_active = p->eee_active;
   2835	edata->tx_lpi_timer = p->tx_lpi_timer;
   2836	edata->tx_lpi_enabled = p->tx_lpi_enabled;
   2837
   2838	return phy_ethtool_get_eee(ndev->phydev, edata);
   2839}
   2840
   2841static int
   2842fec_enet_set_eee(struct net_device *ndev, struct ethtool_eee *edata)
   2843{
   2844	struct fec_enet_private *fep = netdev_priv(ndev);
   2845	struct ethtool_eee *p = &fep->eee;
   2846	int ret = 0;
   2847
   2848	if (!(fep->quirks & FEC_QUIRK_HAS_EEE))
   2849		return -EOPNOTSUPP;
   2850
   2851	if (!netif_running(ndev))
   2852		return -ENETDOWN;
   2853
   2854	p->tx_lpi_timer = edata->tx_lpi_timer;
   2855
   2856	if (!edata->eee_enabled || !edata->tx_lpi_enabled ||
   2857	    !edata->tx_lpi_timer)
   2858		ret = fec_enet_eee_mode_set(ndev, false);
   2859	else
   2860		ret = fec_enet_eee_mode_set(ndev, true);
   2861
   2862	if (ret)
   2863		return ret;
   2864
   2865	return phy_ethtool_set_eee(ndev->phydev, edata);
   2866}
   2867
   2868static void
   2869fec_enet_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
   2870{
   2871	struct fec_enet_private *fep = netdev_priv(ndev);
   2872
   2873	if (fep->wol_flag & FEC_WOL_HAS_MAGIC_PACKET) {
   2874		wol->supported = WAKE_MAGIC;
   2875		wol->wolopts = fep->wol_flag & FEC_WOL_FLAG_ENABLE ? WAKE_MAGIC : 0;
   2876	} else {
   2877		wol->supported = wol->wolopts = 0;
   2878	}
   2879}
   2880
   2881static int
   2882fec_enet_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
   2883{
   2884	struct fec_enet_private *fep = netdev_priv(ndev);
   2885
   2886	if (!(fep->wol_flag & FEC_WOL_HAS_MAGIC_PACKET))
   2887		return -EINVAL;
   2888
   2889	if (wol->wolopts & ~WAKE_MAGIC)
   2890		return -EINVAL;
   2891
   2892	device_set_wakeup_enable(&ndev->dev, wol->wolopts & WAKE_MAGIC);
   2893	if (device_may_wakeup(&ndev->dev))
   2894		fep->wol_flag |= FEC_WOL_FLAG_ENABLE;
   2895	else
   2896		fep->wol_flag &= (~FEC_WOL_FLAG_ENABLE);
   2897
   2898	return 0;
   2899}
   2900
   2901static const struct ethtool_ops fec_enet_ethtool_ops = {
   2902	.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
   2903				     ETHTOOL_COALESCE_MAX_FRAMES,
   2904	.get_drvinfo		= fec_enet_get_drvinfo,
   2905	.get_regs_len		= fec_enet_get_regs_len,
   2906	.get_regs		= fec_enet_get_regs,
   2907	.nway_reset		= phy_ethtool_nway_reset,
   2908	.get_link		= ethtool_op_get_link,
   2909	.get_coalesce		= fec_enet_get_coalesce,
   2910	.set_coalesce		= fec_enet_set_coalesce,
   2911#ifndef CONFIG_M5272
   2912	.get_pauseparam		= fec_enet_get_pauseparam,
   2913	.set_pauseparam		= fec_enet_set_pauseparam,
   2914	.get_strings		= fec_enet_get_strings,
   2915	.get_ethtool_stats	= fec_enet_get_ethtool_stats,
   2916	.get_sset_count		= fec_enet_get_sset_count,
   2917#endif
   2918	.get_ts_info		= fec_enet_get_ts_info,
   2919	.get_tunable		= fec_enet_get_tunable,
   2920	.set_tunable		= fec_enet_set_tunable,
   2921	.get_wol		= fec_enet_get_wol,
   2922	.set_wol		= fec_enet_set_wol,
   2923	.get_eee		= fec_enet_get_eee,
   2924	.set_eee		= fec_enet_set_eee,
   2925	.get_link_ksettings	= phy_ethtool_get_link_ksettings,
   2926	.set_link_ksettings	= phy_ethtool_set_link_ksettings,
   2927	.self_test		= net_selftest,
   2928};
   2929
   2930static int fec_enet_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
   2931{
   2932	struct fec_enet_private *fep = netdev_priv(ndev);
   2933	struct phy_device *phydev = ndev->phydev;
   2934
   2935	if (!netif_running(ndev))
   2936		return -EINVAL;
   2937
   2938	if (!phydev)
   2939		return -ENODEV;
   2940
   2941	if (fep->bufdesc_ex) {
   2942		bool use_fec_hwts = !phy_has_hwtstamp(phydev);
   2943
   2944		if (cmd == SIOCSHWTSTAMP) {
   2945			if (use_fec_hwts)
   2946				return fec_ptp_set(ndev, rq);
   2947			fec_ptp_disable_hwts(ndev);
   2948		} else if (cmd == SIOCGHWTSTAMP) {
   2949			if (use_fec_hwts)
   2950				return fec_ptp_get(ndev, rq);
   2951		}
   2952	}
   2953
   2954	return phy_mii_ioctl(phydev, rq, cmd);
   2955}
   2956
   2957static void fec_enet_free_buffers(struct net_device *ndev)
   2958{
   2959	struct fec_enet_private *fep = netdev_priv(ndev);
   2960	unsigned int i;
   2961	struct sk_buff *skb;
   2962	struct bufdesc	*bdp;
   2963	struct fec_enet_priv_tx_q *txq;
   2964	struct fec_enet_priv_rx_q *rxq;
   2965	unsigned int q;
   2966
   2967	for (q = 0; q < fep->num_rx_queues; q++) {
   2968		rxq = fep->rx_queue[q];
   2969		bdp = rxq->bd.base;
   2970		for (i = 0; i < rxq->bd.ring_size; i++) {
   2971			skb = rxq->rx_skbuff[i];
   2972			rxq->rx_skbuff[i] = NULL;
   2973			if (skb) {
   2974				dma_unmap_single(&fep->pdev->dev,
   2975						 fec32_to_cpu(bdp->cbd_bufaddr),
   2976						 FEC_ENET_RX_FRSIZE - fep->rx_align,
   2977						 DMA_FROM_DEVICE);
   2978				dev_kfree_skb(skb);
   2979			}
   2980			bdp = fec_enet_get_nextdesc(bdp, &rxq->bd);
   2981		}
   2982	}
   2983
   2984	for (q = 0; q < fep->num_tx_queues; q++) {
   2985		txq = fep->tx_queue[q];
   2986		for (i = 0; i < txq->bd.ring_size; i++) {
   2987			kfree(txq->tx_bounce[i]);
   2988			txq->tx_bounce[i] = NULL;
   2989			skb = txq->tx_skbuff[i];
   2990			txq->tx_skbuff[i] = NULL;
   2991			dev_kfree_skb(skb);
   2992		}
   2993	}
   2994}
   2995
   2996static void fec_enet_free_queue(struct net_device *ndev)
   2997{
   2998	struct fec_enet_private *fep = netdev_priv(ndev);
   2999	int i;
   3000	struct fec_enet_priv_tx_q *txq;
   3001
   3002	for (i = 0; i < fep->num_tx_queues; i++)
   3003		if (fep->tx_queue[i] && fep->tx_queue[i]->tso_hdrs) {
   3004			txq = fep->tx_queue[i];
   3005			dma_free_coherent(&fep->pdev->dev,
   3006					  txq->bd.ring_size * TSO_HEADER_SIZE,
   3007					  txq->tso_hdrs,
   3008					  txq->tso_hdrs_dma);
   3009		}
   3010
   3011	for (i = 0; i < fep->num_rx_queues; i++)
   3012		kfree(fep->rx_queue[i]);
   3013	for (i = 0; i < fep->num_tx_queues; i++)
   3014		kfree(fep->tx_queue[i]);
   3015}
   3016
   3017static int fec_enet_alloc_queue(struct net_device *ndev)
   3018{
   3019	struct fec_enet_private *fep = netdev_priv(ndev);
   3020	int i;
   3021	int ret = 0;
   3022	struct fec_enet_priv_tx_q *txq;
   3023
   3024	for (i = 0; i < fep->num_tx_queues; i++) {
   3025		txq = kzalloc(sizeof(*txq), GFP_KERNEL);
   3026		if (!txq) {
   3027			ret = -ENOMEM;
   3028			goto alloc_failed;
   3029		}
   3030
   3031		fep->tx_queue[i] = txq;
   3032		txq->bd.ring_size = TX_RING_SIZE;
   3033		fep->total_tx_ring_size += fep->tx_queue[i]->bd.ring_size;
   3034
   3035		txq->tx_stop_threshold = FEC_MAX_SKB_DESCS;
   3036		txq->tx_wake_threshold =
   3037			(txq->bd.ring_size - txq->tx_stop_threshold) / 2;
   3038
   3039		txq->tso_hdrs = dma_alloc_coherent(&fep->pdev->dev,
   3040					txq->bd.ring_size * TSO_HEADER_SIZE,
   3041					&txq->tso_hdrs_dma,
   3042					GFP_KERNEL);
   3043		if (!txq->tso_hdrs) {
   3044			ret = -ENOMEM;
   3045			goto alloc_failed;
   3046		}
   3047	}
   3048
   3049	for (i = 0; i < fep->num_rx_queues; i++) {
   3050		fep->rx_queue[i] = kzalloc(sizeof(*fep->rx_queue[i]),
   3051					   GFP_KERNEL);
   3052		if (!fep->rx_queue[i]) {
   3053			ret = -ENOMEM;
   3054			goto alloc_failed;
   3055		}
   3056
   3057		fep->rx_queue[i]->bd.ring_size = RX_RING_SIZE;
   3058		fep->total_rx_ring_size += fep->rx_queue[i]->bd.ring_size;
   3059	}
   3060	return ret;
   3061
   3062alloc_failed:
   3063	fec_enet_free_queue(ndev);
   3064	return ret;
   3065}
   3066
   3067static int
   3068fec_enet_alloc_rxq_buffers(struct net_device *ndev, unsigned int queue)
   3069{
   3070	struct fec_enet_private *fep = netdev_priv(ndev);
   3071	unsigned int i;
   3072	struct sk_buff *skb;
   3073	struct bufdesc	*bdp;
   3074	struct fec_enet_priv_rx_q *rxq;
   3075
   3076	rxq = fep->rx_queue[queue];
   3077	bdp = rxq->bd.base;
   3078	for (i = 0; i < rxq->bd.ring_size; i++) {
   3079		skb = __netdev_alloc_skb(ndev, FEC_ENET_RX_FRSIZE, GFP_KERNEL);
   3080		if (!skb)
   3081			goto err_alloc;
   3082
   3083		if (fec_enet_new_rxbdp(ndev, bdp, skb)) {
   3084			dev_kfree_skb(skb);
   3085			goto err_alloc;
   3086		}
   3087
   3088		rxq->rx_skbuff[i] = skb;
   3089		bdp->cbd_sc = cpu_to_fec16(BD_ENET_RX_EMPTY);
   3090
   3091		if (fep->bufdesc_ex) {
   3092			struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
   3093			ebdp->cbd_esc = cpu_to_fec32(BD_ENET_RX_INT);
   3094		}
   3095
   3096		bdp = fec_enet_get_nextdesc(bdp, &rxq->bd);
   3097	}
   3098
   3099	/* Set the last buffer to wrap. */
   3100	bdp = fec_enet_get_prevdesc(bdp, &rxq->bd);
   3101	bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP);
   3102	return 0;
   3103
   3104 err_alloc:
   3105	fec_enet_free_buffers(ndev);
   3106	return -ENOMEM;
   3107}
   3108
   3109static int
   3110fec_enet_alloc_txq_buffers(struct net_device *ndev, unsigned int queue)
   3111{
   3112	struct fec_enet_private *fep = netdev_priv(ndev);
   3113	unsigned int i;
   3114	struct bufdesc  *bdp;
   3115	struct fec_enet_priv_tx_q *txq;
   3116
   3117	txq = fep->tx_queue[queue];
   3118	bdp = txq->bd.base;
   3119	for (i = 0; i < txq->bd.ring_size; i++) {
   3120		txq->tx_bounce[i] = kmalloc(FEC_ENET_TX_FRSIZE, GFP_KERNEL);
   3121		if (!txq->tx_bounce[i])
   3122			goto err_alloc;
   3123
   3124		bdp->cbd_sc = cpu_to_fec16(0);
   3125		bdp->cbd_bufaddr = cpu_to_fec32(0);
   3126
   3127		if (fep->bufdesc_ex) {
   3128			struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
   3129			ebdp->cbd_esc = cpu_to_fec32(BD_ENET_TX_INT);
   3130		}
   3131
   3132		bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
   3133	}
   3134
   3135	/* Set the last buffer to wrap. */
   3136	bdp = fec_enet_get_prevdesc(bdp, &txq->bd);
   3137	bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP);
   3138
   3139	return 0;
   3140
   3141 err_alloc:
   3142	fec_enet_free_buffers(ndev);
   3143	return -ENOMEM;
   3144}
   3145
   3146static int fec_enet_alloc_buffers(struct net_device *ndev)
   3147{
   3148	struct fec_enet_private *fep = netdev_priv(ndev);
   3149	unsigned int i;
   3150
   3151	for (i = 0; i < fep->num_rx_queues; i++)
   3152		if (fec_enet_alloc_rxq_buffers(ndev, i))
   3153			return -ENOMEM;
   3154
   3155	for (i = 0; i < fep->num_tx_queues; i++)
   3156		if (fec_enet_alloc_txq_buffers(ndev, i))
   3157			return -ENOMEM;
   3158	return 0;
   3159}
   3160
   3161static int
   3162fec_enet_open(struct net_device *ndev)
   3163{
   3164	struct fec_enet_private *fep = netdev_priv(ndev);
   3165	int ret;
   3166	bool reset_again;
   3167
   3168	ret = pm_runtime_resume_and_get(&fep->pdev->dev);
   3169	if (ret < 0)
   3170		return ret;
   3171
   3172	pinctrl_pm_select_default_state(&fep->pdev->dev);
   3173	ret = fec_enet_clk_enable(ndev, true);
   3174	if (ret)
   3175		goto clk_enable;
   3176
   3177	/* During the first fec_enet_open call the PHY isn't probed at this
   3178	 * point. Therefore the phy_reset_after_clk_enable() call within
   3179	 * fec_enet_clk_enable() fails. As we need this reset in order to be
   3180	 * sure the PHY is working correctly we check if we need to reset again
   3181	 * later when the PHY is probed
   3182	 */
   3183	if (ndev->phydev && ndev->phydev->drv)
   3184		reset_again = false;
   3185	else
   3186		reset_again = true;
   3187
   3188	/* I should reset the ring buffers here, but I don't yet know
   3189	 * a simple way to do that.
   3190	 */
   3191
   3192	ret = fec_enet_alloc_buffers(ndev);
   3193	if (ret)
   3194		goto err_enet_alloc;
   3195
   3196	/* Init MAC prior to mii bus probe */
   3197	fec_restart(ndev);
   3198
   3199	/* Call phy_reset_after_clk_enable() again if it failed during
   3200	 * phy_reset_after_clk_enable() before because the PHY wasn't probed.
   3201	 */
   3202	if (reset_again)
   3203		fec_enet_phy_reset_after_clk_enable(ndev);
   3204
   3205	/* Probe and connect to PHY when open the interface */
   3206	ret = fec_enet_mii_probe(ndev);
   3207	if (ret)
   3208		goto err_enet_mii_probe;
   3209
   3210	if (fep->quirks & FEC_QUIRK_ERR006687)
   3211		imx6q_cpuidle_fec_irqs_used();
   3212
   3213	napi_enable(&fep->napi);
   3214	phy_start(ndev->phydev);
   3215	netif_tx_start_all_queues(ndev);
   3216
   3217	device_set_wakeup_enable(&ndev->dev, fep->wol_flag &
   3218				 FEC_WOL_FLAG_ENABLE);
   3219
   3220	return 0;
   3221
   3222err_enet_mii_probe:
   3223	fec_enet_free_buffers(ndev);
   3224err_enet_alloc:
   3225	fec_enet_clk_enable(ndev, false);
   3226clk_enable:
   3227	pm_runtime_mark_last_busy(&fep->pdev->dev);
   3228	pm_runtime_put_autosuspend(&fep->pdev->dev);
   3229	pinctrl_pm_select_sleep_state(&fep->pdev->dev);
   3230	return ret;
   3231}
   3232
   3233static int
   3234fec_enet_close(struct net_device *ndev)
   3235{
   3236	struct fec_enet_private *fep = netdev_priv(ndev);
   3237
   3238	phy_stop(ndev->phydev);
   3239
   3240	if (netif_device_present(ndev)) {
   3241		napi_disable(&fep->napi);
   3242		netif_tx_disable(ndev);
   3243		fec_stop(ndev);
   3244	}
   3245
   3246	phy_disconnect(ndev->phydev);
   3247
   3248	if (fep->quirks & FEC_QUIRK_ERR006687)
   3249		imx6q_cpuidle_fec_irqs_unused();
   3250
   3251	fec_enet_update_ethtool_stats(ndev);
   3252
   3253	fec_enet_clk_enable(ndev, false);
   3254	pinctrl_pm_select_sleep_state(&fep->pdev->dev);
   3255	pm_runtime_mark_last_busy(&fep->pdev->dev);
   3256	pm_runtime_put_autosuspend(&fep->pdev->dev);
   3257
   3258	fec_enet_free_buffers(ndev);
   3259
   3260	return 0;
   3261}
   3262
   3263/* Set or clear the multicast filter for this adaptor.
   3264 * Skeleton taken from sunlance driver.
   3265 * The CPM Ethernet implementation allows Multicast as well as individual
   3266 * MAC address filtering.  Some of the drivers check to make sure it is
   3267 * a group multicast address, and discard those that are not.  I guess I
   3268 * will do the same for now, but just remove the test if you want
   3269 * individual filtering as well (do the upper net layers want or support
   3270 * this kind of feature?).
   3271 */
   3272
   3273#define FEC_HASH_BITS	6		/* #bits in hash */
   3274
   3275static void set_multicast_list(struct net_device *ndev)
   3276{
   3277	struct fec_enet_private *fep = netdev_priv(ndev);
   3278	struct netdev_hw_addr *ha;
   3279	unsigned int crc, tmp;
   3280	unsigned char hash;
   3281	unsigned int hash_high = 0, hash_low = 0;
   3282
   3283	if (ndev->flags & IFF_PROMISC) {
   3284		tmp = readl(fep->hwp + FEC_R_CNTRL);
   3285		tmp |= 0x8;
   3286		writel(tmp, fep->hwp + FEC_R_CNTRL);
   3287		return;
   3288	}
   3289
   3290	tmp = readl(fep->hwp + FEC_R_CNTRL);
   3291	tmp &= ~0x8;
   3292	writel(tmp, fep->hwp + FEC_R_CNTRL);
   3293
   3294	if (ndev->flags & IFF_ALLMULTI) {
   3295		/* Catch all multicast addresses, so set the
   3296		 * filter to all 1's
   3297		 */
   3298		writel(0xffffffff, fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
   3299		writel(0xffffffff, fep->hwp + FEC_GRP_HASH_TABLE_LOW);
   3300
   3301		return;
   3302	}
   3303
   3304	/* Add the addresses in hash register */
   3305	netdev_for_each_mc_addr(ha, ndev) {
   3306		/* calculate crc32 value of mac address */
   3307		crc = ether_crc_le(ndev->addr_len, ha->addr);
   3308
   3309		/* only upper 6 bits (FEC_HASH_BITS) are used
   3310		 * which point to specific bit in the hash registers
   3311		 */
   3312		hash = (crc >> (32 - FEC_HASH_BITS)) & 0x3f;
   3313
   3314		if (hash > 31)
   3315			hash_high |= 1 << (hash - 32);
   3316		else
   3317			hash_low |= 1 << hash;
   3318	}
   3319
   3320	writel(hash_high, fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
   3321	writel(hash_low, fep->hwp + FEC_GRP_HASH_TABLE_LOW);
   3322}
   3323
   3324/* Set a MAC change in hardware. */
   3325static int
   3326fec_set_mac_address(struct net_device *ndev, void *p)
   3327{
   3328	struct fec_enet_private *fep = netdev_priv(ndev);
   3329	struct sockaddr *addr = p;
   3330
   3331	if (addr) {
   3332		if (!is_valid_ether_addr(addr->sa_data))
   3333			return -EADDRNOTAVAIL;
   3334		eth_hw_addr_set(ndev, addr->sa_data);
   3335	}
   3336
   3337	/* Add netif status check here to avoid system hang in below case:
   3338	 * ifconfig ethx down; ifconfig ethx hw ether xx:xx:xx:xx:xx:xx;
   3339	 * After ethx down, fec all clocks are gated off and then register
   3340	 * access causes system hang.
   3341	 */
   3342	if (!netif_running(ndev))
   3343		return 0;
   3344
   3345	writel(ndev->dev_addr[3] | (ndev->dev_addr[2] << 8) |
   3346		(ndev->dev_addr[1] << 16) | (ndev->dev_addr[0] << 24),
   3347		fep->hwp + FEC_ADDR_LOW);
   3348	writel((ndev->dev_addr[5] << 16) | (ndev->dev_addr[4] << 24),
   3349		fep->hwp + FEC_ADDR_HIGH);
   3350	return 0;
   3351}
   3352
   3353#ifdef CONFIG_NET_POLL_CONTROLLER
   3354/**
   3355 * fec_poll_controller - FEC Poll controller function
   3356 * @dev: The FEC network adapter
   3357 *
   3358 * Polled functionality used by netconsole and others in non interrupt mode
   3359 *
   3360 */
   3361static void fec_poll_controller(struct net_device *dev)
   3362{
   3363	int i;
   3364	struct fec_enet_private *fep = netdev_priv(dev);
   3365
   3366	for (i = 0; i < FEC_IRQ_NUM; i++) {
   3367		if (fep->irq[i] > 0) {
   3368			disable_irq(fep->irq[i]);
   3369			fec_enet_interrupt(fep->irq[i], dev);
   3370			enable_irq(fep->irq[i]);
   3371		}
   3372	}
   3373}
   3374#endif
   3375
   3376static inline void fec_enet_set_netdev_features(struct net_device *netdev,
   3377	netdev_features_t features)
   3378{
   3379	struct fec_enet_private *fep = netdev_priv(netdev);
   3380	netdev_features_t changed = features ^ netdev->features;
   3381
   3382	netdev->features = features;
   3383
   3384	/* Receive checksum has been changed */
   3385	if (changed & NETIF_F_RXCSUM) {
   3386		if (features & NETIF_F_RXCSUM)
   3387			fep->csum_flags |= FLAG_RX_CSUM_ENABLED;
   3388		else
   3389			fep->csum_flags &= ~FLAG_RX_CSUM_ENABLED;
   3390	}
   3391}
   3392
   3393static int fec_set_features(struct net_device *netdev,
   3394	netdev_features_t features)
   3395{
   3396	struct fec_enet_private *fep = netdev_priv(netdev);
   3397	netdev_features_t changed = features ^ netdev->features;
   3398
   3399	if (netif_running(netdev) && changed & NETIF_F_RXCSUM) {
   3400		napi_disable(&fep->napi);
   3401		netif_tx_lock_bh(netdev);
   3402		fec_stop(netdev);
   3403		fec_enet_set_netdev_features(netdev, features);
   3404		fec_restart(netdev);
   3405		netif_tx_wake_all_queues(netdev);
   3406		netif_tx_unlock_bh(netdev);
   3407		napi_enable(&fep->napi);
   3408	} else {
   3409		fec_enet_set_netdev_features(netdev, features);
   3410	}
   3411
   3412	return 0;
   3413}
   3414
   3415static u16 fec_enet_get_raw_vlan_tci(struct sk_buff *skb)
   3416{
   3417	struct vlan_ethhdr *vhdr;
   3418	unsigned short vlan_TCI = 0;
   3419
   3420	if (skb->protocol == htons(ETH_P_ALL)) {
   3421		vhdr = (struct vlan_ethhdr *)(skb->data);
   3422		vlan_TCI = ntohs(vhdr->h_vlan_TCI);
   3423	}
   3424
   3425	return vlan_TCI;
   3426}
   3427
   3428static u16 fec_enet_select_queue(struct net_device *ndev, struct sk_buff *skb,
   3429				 struct net_device *sb_dev)
   3430{
   3431	struct fec_enet_private *fep = netdev_priv(ndev);
   3432	u16 vlan_tag;
   3433
   3434	if (!(fep->quirks & FEC_QUIRK_HAS_AVB))
   3435		return netdev_pick_tx(ndev, skb, NULL);
   3436
   3437	vlan_tag = fec_enet_get_raw_vlan_tci(skb);
   3438	if (!vlan_tag)
   3439		return vlan_tag;
   3440
   3441	return fec_enet_vlan_pri_to_queue[vlan_tag >> 13];
   3442}
   3443
   3444static const struct net_device_ops fec_netdev_ops = {
   3445	.ndo_open		= fec_enet_open,
   3446	.ndo_stop		= fec_enet_close,
   3447	.ndo_start_xmit		= fec_enet_start_xmit,
   3448	.ndo_select_queue       = fec_enet_select_queue,
   3449	.ndo_set_rx_mode	= set_multicast_list,
   3450	.ndo_validate_addr	= eth_validate_addr,
   3451	.ndo_tx_timeout		= fec_timeout,
   3452	.ndo_set_mac_address	= fec_set_mac_address,
   3453	.ndo_eth_ioctl		= fec_enet_ioctl,
   3454#ifdef CONFIG_NET_POLL_CONTROLLER
   3455	.ndo_poll_controller	= fec_poll_controller,
   3456#endif
   3457	.ndo_set_features	= fec_set_features,
   3458};
   3459
   3460static const unsigned short offset_des_active_rxq[] = {
   3461	FEC_R_DES_ACTIVE_0, FEC_R_DES_ACTIVE_1, FEC_R_DES_ACTIVE_2
   3462};
   3463
   3464static const unsigned short offset_des_active_txq[] = {
   3465	FEC_X_DES_ACTIVE_0, FEC_X_DES_ACTIVE_1, FEC_X_DES_ACTIVE_2
   3466};
   3467
   3468 /*
   3469  * XXX:  We need to clean up on failure exits here.
   3470  *
   3471  */
   3472static int fec_enet_init(struct net_device *ndev)
   3473{
   3474	struct fec_enet_private *fep = netdev_priv(ndev);
   3475	struct bufdesc *cbd_base;
   3476	dma_addr_t bd_dma;
   3477	int bd_size;
   3478	unsigned int i;
   3479	unsigned dsize = fep->bufdesc_ex ? sizeof(struct bufdesc_ex) :
   3480			sizeof(struct bufdesc);
   3481	unsigned dsize_log2 = __fls(dsize);
   3482	int ret;
   3483
   3484	WARN_ON(dsize != (1 << dsize_log2));
   3485#if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
   3486	fep->rx_align = 0xf;
   3487	fep->tx_align = 0xf;
   3488#else
   3489	fep->rx_align = 0x3;
   3490	fep->tx_align = 0x3;
   3491#endif
   3492
   3493	/* Check mask of the streaming and coherent API */
   3494	ret = dma_set_mask_and_coherent(&fep->pdev->dev, DMA_BIT_MASK(32));
   3495	if (ret < 0) {
   3496		dev_warn(&fep->pdev->dev, "No suitable DMA available\n");
   3497		return ret;
   3498	}
   3499
   3500	ret = fec_enet_alloc_queue(ndev);
   3501	if (ret)
   3502		return ret;
   3503
   3504	bd_size = (fep->total_tx_ring_size + fep->total_rx_ring_size) * dsize;
   3505
   3506	/* Allocate memory for buffer descriptors. */
   3507	cbd_base = dmam_alloc_coherent(&fep->pdev->dev, bd_size, &bd_dma,
   3508				       GFP_KERNEL);
   3509	if (!cbd_base) {
   3510		ret = -ENOMEM;
   3511		goto free_queue_mem;
   3512	}
   3513
   3514	/* Get the Ethernet address */
   3515	ret = fec_get_mac(ndev);
   3516	if (ret)
   3517		goto free_queue_mem;
   3518
   3519	/* make sure MAC we just acquired is programmed into the hw */
   3520	fec_set_mac_address(ndev, NULL);
   3521
   3522	/* Set receive and transmit descriptor base. */
   3523	for (i = 0; i < fep->num_rx_queues; i++) {
   3524		struct fec_enet_priv_rx_q *rxq = fep->rx_queue[i];
   3525		unsigned size = dsize * rxq->bd.ring_size;
   3526
   3527		rxq->bd.qid = i;
   3528		rxq->bd.base = cbd_base;
   3529		rxq->bd.cur = cbd_base;
   3530		rxq->bd.dma = bd_dma;
   3531		rxq->bd.dsize = dsize;
   3532		rxq->bd.dsize_log2 = dsize_log2;
   3533		rxq->bd.reg_desc_active = fep->hwp + offset_des_active_rxq[i];
   3534		bd_dma += size;
   3535		cbd_base = (struct bufdesc *)(((void *)cbd_base) + size);
   3536		rxq->bd.last = (struct bufdesc *)(((void *)cbd_base) - dsize);
   3537	}
   3538
   3539	for (i = 0; i < fep->num_tx_queues; i++) {
   3540		struct fec_enet_priv_tx_q *txq = fep->tx_queue[i];
   3541		unsigned size = dsize * txq->bd.ring_size;
   3542
   3543		txq->bd.qid = i;
   3544		txq->bd.base = cbd_base;
   3545		txq->bd.cur = cbd_base;
   3546		txq->bd.dma = bd_dma;
   3547		txq->bd.dsize = dsize;
   3548		txq->bd.dsize_log2 = dsize_log2;
   3549		txq->bd.reg_desc_active = fep->hwp + offset_des_active_txq[i];
   3550		bd_dma += size;
   3551		cbd_base = (struct bufdesc *)(((void *)cbd_base) + size);
   3552		txq->bd.last = (struct bufdesc *)(((void *)cbd_base) - dsize);
   3553	}
   3554
   3555
   3556	/* The FEC Ethernet specific entries in the device structure */
   3557	ndev->watchdog_timeo = TX_TIMEOUT;
   3558	ndev->netdev_ops = &fec_netdev_ops;
   3559	ndev->ethtool_ops = &fec_enet_ethtool_ops;
   3560
   3561	writel(FEC_RX_DISABLED_IMASK, fep->hwp + FEC_IMASK);
   3562	netif_napi_add(ndev, &fep->napi, fec_enet_rx_napi, NAPI_POLL_WEIGHT);
   3563
   3564	if (fep->quirks & FEC_QUIRK_HAS_VLAN)
   3565		/* enable hw VLAN support */
   3566		ndev->features |= NETIF_F_HW_VLAN_CTAG_RX;
   3567
   3568	if (fep->quirks & FEC_QUIRK_HAS_CSUM) {
   3569		netif_set_tso_max_segs(ndev, FEC_MAX_TSO_SEGS);
   3570
   3571		/* enable hw accelerator */
   3572		ndev->features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM
   3573				| NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_TSO);
   3574		fep->csum_flags |= FLAG_RX_CSUM_ENABLED;
   3575	}
   3576
   3577	if (fep->quirks & FEC_QUIRK_HAS_MULTI_QUEUES) {
   3578		fep->tx_align = 0;
   3579		fep->rx_align = 0x3f;
   3580	}
   3581
   3582	ndev->hw_features = ndev->features;
   3583
   3584	fec_restart(ndev);
   3585
   3586	if (fep->quirks & FEC_QUIRK_MIB_CLEAR)
   3587		fec_enet_clear_ethtool_stats(ndev);
   3588	else
   3589		fec_enet_update_ethtool_stats(ndev);
   3590
   3591	return 0;
   3592
   3593free_queue_mem:
   3594	fec_enet_free_queue(ndev);
   3595	return ret;
   3596}
   3597
   3598#ifdef CONFIG_OF
   3599static int fec_reset_phy(struct platform_device *pdev)
   3600{
   3601	int err, phy_reset;
   3602	bool active_high = false;
   3603	int msec = 1, phy_post_delay = 0;
   3604	struct device_node *np = pdev->dev.of_node;
   3605
   3606	if (!np)
   3607		return 0;
   3608
   3609	err = of_property_read_u32(np, "phy-reset-duration", &msec);
   3610	/* A sane reset duration should not be longer than 1s */
   3611	if (!err && msec > 1000)
   3612		msec = 1;
   3613
   3614	phy_reset = of_get_named_gpio(np, "phy-reset-gpios", 0);
   3615	if (phy_reset == -EPROBE_DEFER)
   3616		return phy_reset;
   3617	else if (!gpio_is_valid(phy_reset))
   3618		return 0;
   3619
   3620	err = of_property_read_u32(np, "phy-reset-post-delay", &phy_post_delay);
   3621	/* valid reset duration should be less than 1s */
   3622	if (!err && phy_post_delay > 1000)
   3623		return -EINVAL;
   3624
   3625	active_high = of_property_read_bool(np, "phy-reset-active-high");
   3626
   3627	err = devm_gpio_request_one(&pdev->dev, phy_reset,
   3628			active_high ? GPIOF_OUT_INIT_HIGH : GPIOF_OUT_INIT_LOW,
   3629			"phy-reset");
   3630	if (err) {
   3631		dev_err(&pdev->dev, "failed to get phy-reset-gpios: %d\n", err);
   3632		return err;
   3633	}
   3634
   3635	if (msec > 20)
   3636		msleep(msec);
   3637	else
   3638		usleep_range(msec * 1000, msec * 1000 + 1000);
   3639
   3640	gpio_set_value_cansleep(phy_reset, !active_high);
   3641
   3642	if (!phy_post_delay)
   3643		return 0;
   3644
   3645	if (phy_post_delay > 20)
   3646		msleep(phy_post_delay);
   3647	else
   3648		usleep_range(phy_post_delay * 1000,
   3649			     phy_post_delay * 1000 + 1000);
   3650
   3651	return 0;
   3652}
   3653#else /* CONFIG_OF */
   3654static int fec_reset_phy(struct platform_device *pdev)
   3655{
   3656	/*
   3657	 * In case of platform probe, the reset has been done
   3658	 * by machine code.
   3659	 */
   3660	return 0;
   3661}
   3662#endif /* CONFIG_OF */
   3663
   3664static void
   3665fec_enet_get_queue_num(struct platform_device *pdev, int *num_tx, int *num_rx)
   3666{
   3667	struct device_node *np = pdev->dev.of_node;
   3668
   3669	*num_tx = *num_rx = 1;
   3670
   3671	if (!np || !of_device_is_available(np))
   3672		return;
   3673
   3674	/* parse the num of tx and rx queues */
   3675	of_property_read_u32(np, "fsl,num-tx-queues", num_tx);
   3676
   3677	of_property_read_u32(np, "fsl,num-rx-queues", num_rx);
   3678
   3679	if (*num_tx < 1 || *num_tx > FEC_ENET_MAX_TX_QS) {
   3680		dev_warn(&pdev->dev, "Invalid num_tx(=%d), fall back to 1\n",
   3681			 *num_tx);
   3682		*num_tx = 1;
   3683		return;
   3684	}
   3685
   3686	if (*num_rx < 1 || *num_rx > FEC_ENET_MAX_RX_QS) {
   3687		dev_warn(&pdev->dev, "Invalid num_rx(=%d), fall back to 1\n",
   3688			 *num_rx);
   3689		*num_rx = 1;
   3690		return;
   3691	}
   3692
   3693}
   3694
   3695static int fec_enet_get_irq_cnt(struct platform_device *pdev)
   3696{
   3697	int irq_cnt = platform_irq_count(pdev);
   3698
   3699	if (irq_cnt > FEC_IRQ_NUM)
   3700		irq_cnt = FEC_IRQ_NUM;	/* last for pps */
   3701	else if (irq_cnt == 2)
   3702		irq_cnt = 1;	/* last for pps */
   3703	else if (irq_cnt <= 0)
   3704		irq_cnt = 1;	/* At least 1 irq is needed */
   3705	return irq_cnt;
   3706}
   3707
   3708static void fec_enet_get_wakeup_irq(struct platform_device *pdev)
   3709{
   3710	struct net_device *ndev = platform_get_drvdata(pdev);
   3711	struct fec_enet_private *fep = netdev_priv(ndev);
   3712
   3713	if (fep->quirks & FEC_QUIRK_WAKEUP_FROM_INT2)
   3714		fep->wake_irq = fep->irq[2];
   3715	else
   3716		fep->wake_irq = fep->irq[0];
   3717}
   3718
   3719static int fec_enet_init_stop_mode(struct fec_enet_private *fep,
   3720				   struct device_node *np)
   3721{
   3722	struct device_node *gpr_np;
   3723	u32 out_val[3];
   3724	int ret = 0;
   3725
   3726	gpr_np = of_parse_phandle(np, "fsl,stop-mode", 0);
   3727	if (!gpr_np)
   3728		return 0;
   3729
   3730	ret = of_property_read_u32_array(np, "fsl,stop-mode", out_val,
   3731					 ARRAY_SIZE(out_val));
   3732	if (ret) {
   3733		dev_dbg(&fep->pdev->dev, "no stop mode property\n");
   3734		goto out;
   3735	}
   3736
   3737	fep->stop_gpr.gpr = syscon_node_to_regmap(gpr_np);
   3738	if (IS_ERR(fep->stop_gpr.gpr)) {
   3739		dev_err(&fep->pdev->dev, "could not find gpr regmap\n");
   3740		ret = PTR_ERR(fep->stop_gpr.gpr);
   3741		fep->stop_gpr.gpr = NULL;
   3742		goto out;
   3743	}
   3744
   3745	fep->stop_gpr.reg = out_val[1];
   3746	fep->stop_gpr.bit = out_val[2];
   3747
   3748out:
   3749	of_node_put(gpr_np);
   3750
   3751	return ret;
   3752}
   3753
   3754static int
   3755fec_probe(struct platform_device *pdev)
   3756{
   3757	struct fec_enet_private *fep;
   3758	struct fec_platform_data *pdata;
   3759	phy_interface_t interface;
   3760	struct net_device *ndev;
   3761	int i, irq, ret = 0;
   3762	const struct of_device_id *of_id;
   3763	static int dev_id;
   3764	struct device_node *np = pdev->dev.of_node, *phy_node;
   3765	int num_tx_qs;
   3766	int num_rx_qs;
   3767	char irq_name[8];
   3768	int irq_cnt;
   3769	struct fec_devinfo *dev_info;
   3770
   3771	fec_enet_get_queue_num(pdev, &num_tx_qs, &num_rx_qs);
   3772
   3773	/* Init network device */
   3774	ndev = alloc_etherdev_mqs(sizeof(struct fec_enet_private) +
   3775				  FEC_STATS_SIZE, num_tx_qs, num_rx_qs);
   3776	if (!ndev)
   3777		return -ENOMEM;
   3778
   3779	SET_NETDEV_DEV(ndev, &pdev->dev);
   3780
   3781	/* setup board info structure */
   3782	fep = netdev_priv(ndev);
   3783
   3784	of_id = of_match_device(fec_dt_ids, &pdev->dev);
   3785	if (of_id)
   3786		pdev->id_entry = of_id->data;
   3787	dev_info = (struct fec_devinfo *)pdev->id_entry->driver_data;
   3788	if (dev_info)
   3789		fep->quirks = dev_info->quirks;
   3790
   3791	fep->netdev = ndev;
   3792	fep->num_rx_queues = num_rx_qs;
   3793	fep->num_tx_queues = num_tx_qs;
   3794
   3795#if !defined(CONFIG_M5272)
   3796	/* default enable pause frame auto negotiation */
   3797	if (fep->quirks & FEC_QUIRK_HAS_GBIT)
   3798		fep->pause_flag |= FEC_PAUSE_FLAG_AUTONEG;
   3799#endif
   3800
   3801	/* Select default pin state */
   3802	pinctrl_pm_select_default_state(&pdev->dev);
   3803
   3804	fep->hwp = devm_platform_ioremap_resource(pdev, 0);
   3805	if (IS_ERR(fep->hwp)) {
   3806		ret = PTR_ERR(fep->hwp);
   3807		goto failed_ioremap;
   3808	}
   3809
   3810	fep->pdev = pdev;
   3811	fep->dev_id = dev_id++;
   3812
   3813	platform_set_drvdata(pdev, ndev);
   3814
   3815	if ((of_machine_is_compatible("fsl,imx6q") ||
   3816	     of_machine_is_compatible("fsl,imx6dl")) &&
   3817	    !of_property_read_bool(np, "fsl,err006687-workaround-present"))
   3818		fep->quirks |= FEC_QUIRK_ERR006687;
   3819
   3820	if (of_get_property(np, "fsl,magic-packet", NULL))
   3821		fep->wol_flag |= FEC_WOL_HAS_MAGIC_PACKET;
   3822
   3823	ret = fec_enet_init_stop_mode(fep, np);
   3824	if (ret)
   3825		goto failed_stop_mode;
   3826
   3827	phy_node = of_parse_phandle(np, "phy-handle", 0);
   3828	if (!phy_node && of_phy_is_fixed_link(np)) {
   3829		ret = of_phy_register_fixed_link(np);
   3830		if (ret < 0) {
   3831			dev_err(&pdev->dev,
   3832				"broken fixed-link specification\n");
   3833			goto failed_phy;
   3834		}
   3835		phy_node = of_node_get(np);
   3836	}
   3837	fep->phy_node = phy_node;
   3838
   3839	ret = of_get_phy_mode(pdev->dev.of_node, &interface);
   3840	if (ret) {
   3841		pdata = dev_get_platdata(&pdev->dev);
   3842		if (pdata)
   3843			fep->phy_interface = pdata->phy;
   3844		else
   3845			fep->phy_interface = PHY_INTERFACE_MODE_MII;
   3846	} else {
   3847		fep->phy_interface = interface;
   3848	}
   3849
   3850	ret = fec_enet_parse_rgmii_delay(fep, np);
   3851	if (ret)
   3852		goto failed_rgmii_delay;
   3853
   3854	fep->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
   3855	if (IS_ERR(fep->clk_ipg)) {
   3856		ret = PTR_ERR(fep->clk_ipg);
   3857		goto failed_clk;
   3858	}
   3859
   3860	fep->clk_ahb = devm_clk_get(&pdev->dev, "ahb");
   3861	if (IS_ERR(fep->clk_ahb)) {
   3862		ret = PTR_ERR(fep->clk_ahb);
   3863		goto failed_clk;
   3864	}
   3865
   3866	fep->itr_clk_rate = clk_get_rate(fep->clk_ahb);
   3867
   3868	/* enet_out is optional, depends on board */
   3869	fep->clk_enet_out = devm_clk_get_optional(&pdev->dev, "enet_out");
   3870	if (IS_ERR(fep->clk_enet_out)) {
   3871		ret = PTR_ERR(fep->clk_enet_out);
   3872		goto failed_clk;
   3873	}
   3874
   3875	fep->ptp_clk_on = false;
   3876	mutex_init(&fep->ptp_clk_mutex);
   3877
   3878	/* clk_ref is optional, depends on board */
   3879	fep->clk_ref = devm_clk_get_optional(&pdev->dev, "enet_clk_ref");
   3880	if (IS_ERR(fep->clk_ref)) {
   3881		ret = PTR_ERR(fep->clk_ref);
   3882		goto failed_clk;
   3883	}
   3884	fep->clk_ref_rate = clk_get_rate(fep->clk_ref);
   3885
   3886	/* clk_2x_txclk is optional, depends on board */
   3887	if (fep->rgmii_txc_dly || fep->rgmii_rxc_dly) {
   3888		fep->clk_2x_txclk = devm_clk_get(&pdev->dev, "enet_2x_txclk");
   3889		if (IS_ERR(fep->clk_2x_txclk))
   3890			fep->clk_2x_txclk = NULL;
   3891	}
   3892
   3893	fep->bufdesc_ex = fep->quirks & FEC_QUIRK_HAS_BUFDESC_EX;
   3894	fep->clk_ptp = devm_clk_get(&pdev->dev, "ptp");
   3895	if (IS_ERR(fep->clk_ptp)) {
   3896		fep->clk_ptp = NULL;
   3897		fep->bufdesc_ex = false;
   3898	}
   3899
   3900	ret = fec_enet_clk_enable(ndev, true);
   3901	if (ret)
   3902		goto failed_clk;
   3903
   3904	ret = clk_prepare_enable(fep->clk_ipg);
   3905	if (ret)
   3906		goto failed_clk_ipg;
   3907	ret = clk_prepare_enable(fep->clk_ahb);
   3908	if (ret)
   3909		goto failed_clk_ahb;
   3910
   3911	fep->reg_phy = devm_regulator_get_optional(&pdev->dev, "phy");
   3912	if (!IS_ERR(fep->reg_phy)) {
   3913		ret = regulator_enable(fep->reg_phy);
   3914		if (ret) {
   3915			dev_err(&pdev->dev,
   3916				"Failed to enable phy regulator: %d\n", ret);
   3917			goto failed_regulator;
   3918		}
   3919	} else {
   3920		if (PTR_ERR(fep->reg_phy) == -EPROBE_DEFER) {
   3921			ret = -EPROBE_DEFER;
   3922			goto failed_regulator;
   3923		}
   3924		fep->reg_phy = NULL;
   3925	}
   3926
   3927	pm_runtime_set_autosuspend_delay(&pdev->dev, FEC_MDIO_PM_TIMEOUT);
   3928	pm_runtime_use_autosuspend(&pdev->dev);
   3929	pm_runtime_get_noresume(&pdev->dev);
   3930	pm_runtime_set_active(&pdev->dev);
   3931	pm_runtime_enable(&pdev->dev);
   3932
   3933	ret = fec_reset_phy(pdev);
   3934	if (ret)
   3935		goto failed_reset;
   3936
   3937	irq_cnt = fec_enet_get_irq_cnt(pdev);
   3938	if (fep->bufdesc_ex)
   3939		fec_ptp_init(pdev, irq_cnt);
   3940
   3941	ret = fec_enet_init(ndev);
   3942	if (ret)
   3943		goto failed_init;
   3944
   3945	for (i = 0; i < irq_cnt; i++) {
   3946		snprintf(irq_name, sizeof(irq_name), "int%d", i);
   3947		irq = platform_get_irq_byname_optional(pdev, irq_name);
   3948		if (irq < 0)
   3949			irq = platform_get_irq(pdev, i);
   3950		if (irq < 0) {
   3951			ret = irq;
   3952			goto failed_irq;
   3953		}
   3954		ret = devm_request_irq(&pdev->dev, irq, fec_enet_interrupt,
   3955				       0, pdev->name, ndev);
   3956		if (ret)
   3957			goto failed_irq;
   3958
   3959		fep->irq[i] = irq;
   3960	}
   3961
   3962	/* Decide which interrupt line is wakeup capable */
   3963	fec_enet_get_wakeup_irq(pdev);
   3964
   3965	ret = fec_enet_mii_init(pdev);
   3966	if (ret)
   3967		goto failed_mii_init;
   3968
   3969	/* Carrier starts down, phylib will bring it up */
   3970	netif_carrier_off(ndev);
   3971	fec_enet_clk_enable(ndev, false);
   3972	pinctrl_pm_select_sleep_state(&pdev->dev);
   3973
   3974	ndev->max_mtu = PKT_MAXBUF_SIZE - ETH_HLEN - ETH_FCS_LEN;
   3975
   3976	ret = register_netdev(ndev);
   3977	if (ret)
   3978		goto failed_register;
   3979
   3980	device_init_wakeup(&ndev->dev, fep->wol_flag &
   3981			   FEC_WOL_HAS_MAGIC_PACKET);
   3982
   3983	if (fep->bufdesc_ex && fep->ptp_clock)
   3984		netdev_info(ndev, "registered PHC device %d\n", fep->dev_id);
   3985
   3986	fep->rx_copybreak = COPYBREAK_DEFAULT;
   3987	INIT_WORK(&fep->tx_timeout_work, fec_enet_timeout_work);
   3988
   3989	pm_runtime_mark_last_busy(&pdev->dev);
   3990	pm_runtime_put_autosuspend(&pdev->dev);
   3991
   3992	return 0;
   3993
   3994failed_register:
   3995	fec_enet_mii_remove(fep);
   3996failed_mii_init:
   3997failed_irq:
   3998failed_init:
   3999	fec_ptp_stop(pdev);
   4000failed_reset:
   4001	pm_runtime_put_noidle(&pdev->dev);
   4002	pm_runtime_disable(&pdev->dev);
   4003	if (fep->reg_phy)
   4004		regulator_disable(fep->reg_phy);
   4005failed_regulator:
   4006	clk_disable_unprepare(fep->clk_ahb);
   4007failed_clk_ahb:
   4008	clk_disable_unprepare(fep->clk_ipg);
   4009failed_clk_ipg:
   4010	fec_enet_clk_enable(ndev, false);
   4011failed_clk:
   4012failed_rgmii_delay:
   4013	if (of_phy_is_fixed_link(np))
   4014		of_phy_deregister_fixed_link(np);
   4015	of_node_put(phy_node);
   4016failed_stop_mode:
   4017failed_phy:
   4018	dev_id--;
   4019failed_ioremap:
   4020	free_netdev(ndev);
   4021
   4022	return ret;
   4023}
   4024
   4025static int
   4026fec_drv_remove(struct platform_device *pdev)
   4027{
   4028	struct net_device *ndev = platform_get_drvdata(pdev);
   4029	struct fec_enet_private *fep = netdev_priv(ndev);
   4030	struct device_node *np = pdev->dev.of_node;
   4031	int ret;
   4032
   4033	ret = pm_runtime_resume_and_get(&pdev->dev);
   4034	if (ret < 0)
   4035		return ret;
   4036
   4037	cancel_work_sync(&fep->tx_timeout_work);
   4038	fec_ptp_stop(pdev);
   4039	unregister_netdev(ndev);
   4040	fec_enet_mii_remove(fep);
   4041	if (fep->reg_phy)
   4042		regulator_disable(fep->reg_phy);
   4043
   4044	if (of_phy_is_fixed_link(np))
   4045		of_phy_deregister_fixed_link(np);
   4046	of_node_put(fep->phy_node);
   4047
   4048	clk_disable_unprepare(fep->clk_ahb);
   4049	clk_disable_unprepare(fep->clk_ipg);
   4050	pm_runtime_put_noidle(&pdev->dev);
   4051	pm_runtime_disable(&pdev->dev);
   4052
   4053	free_netdev(ndev);
   4054	return 0;
   4055}
   4056
   4057static int __maybe_unused fec_suspend(struct device *dev)
   4058{
   4059	struct net_device *ndev = dev_get_drvdata(dev);
   4060	struct fec_enet_private *fep = netdev_priv(ndev);
   4061
   4062	rtnl_lock();
   4063	if (netif_running(ndev)) {
   4064		if (fep->wol_flag & FEC_WOL_FLAG_ENABLE)
   4065			fep->wol_flag |= FEC_WOL_FLAG_SLEEP_ON;
   4066		phy_stop(ndev->phydev);
   4067		napi_disable(&fep->napi);
   4068		netif_tx_lock_bh(ndev);
   4069		netif_device_detach(ndev);
   4070		netif_tx_unlock_bh(ndev);
   4071		fec_stop(ndev);
   4072		if (!(fep->wol_flag & FEC_WOL_FLAG_ENABLE)) {
   4073			fec_irqs_disable(ndev);
   4074			pinctrl_pm_select_sleep_state(&fep->pdev->dev);
   4075		} else {
   4076			fec_irqs_disable_except_wakeup(ndev);
   4077			if (fep->wake_irq > 0) {
   4078				disable_irq(fep->wake_irq);
   4079				enable_irq_wake(fep->wake_irq);
   4080			}
   4081			fec_enet_stop_mode(fep, true);
   4082		}
   4083		/* It's safe to disable clocks since interrupts are masked */
   4084		fec_enet_clk_enable(ndev, false);
   4085	}
   4086	rtnl_unlock();
   4087
   4088	if (fep->reg_phy && !(fep->wol_flag & FEC_WOL_FLAG_ENABLE))
   4089		regulator_disable(fep->reg_phy);
   4090
   4091	/* SOC supply clock to phy, when clock is disabled, phy link down
   4092	 * SOC control phy regulator, when regulator is disabled, phy link down
   4093	 */
   4094	if (fep->clk_enet_out || fep->reg_phy)
   4095		fep->link = 0;
   4096
   4097	return 0;
   4098}
   4099
   4100static int __maybe_unused fec_resume(struct device *dev)
   4101{
   4102	struct net_device *ndev = dev_get_drvdata(dev);
   4103	struct fec_enet_private *fep = netdev_priv(ndev);
   4104	int ret;
   4105	int val;
   4106
   4107	if (fep->reg_phy && !(fep->wol_flag & FEC_WOL_FLAG_ENABLE)) {
   4108		ret = regulator_enable(fep->reg_phy);
   4109		if (ret)
   4110			return ret;
   4111	}
   4112
   4113	rtnl_lock();
   4114	if (netif_running(ndev)) {
   4115		ret = fec_enet_clk_enable(ndev, true);
   4116		if (ret) {
   4117			rtnl_unlock();
   4118			goto failed_clk;
   4119		}
   4120		if (fep->wol_flag & FEC_WOL_FLAG_ENABLE) {
   4121			fec_enet_stop_mode(fep, false);
   4122			if (fep->wake_irq) {
   4123				disable_irq_wake(fep->wake_irq);
   4124				enable_irq(fep->wake_irq);
   4125			}
   4126
   4127			val = readl(fep->hwp + FEC_ECNTRL);
   4128			val &= ~(FEC_ECR_MAGICEN | FEC_ECR_SLEEP);
   4129			writel(val, fep->hwp + FEC_ECNTRL);
   4130			fep->wol_flag &= ~FEC_WOL_FLAG_SLEEP_ON;
   4131		} else {
   4132			pinctrl_pm_select_default_state(&fep->pdev->dev);
   4133		}
   4134		fec_restart(ndev);
   4135		netif_tx_lock_bh(ndev);
   4136		netif_device_attach(ndev);
   4137		netif_tx_unlock_bh(ndev);
   4138		napi_enable(&fep->napi);
   4139		phy_init_hw(ndev->phydev);
   4140		phy_start(ndev->phydev);
   4141	}
   4142	rtnl_unlock();
   4143
   4144	return 0;
   4145
   4146failed_clk:
   4147	if (fep->reg_phy)
   4148		regulator_disable(fep->reg_phy);
   4149	return ret;
   4150}
   4151
   4152static int __maybe_unused fec_runtime_suspend(struct device *dev)
   4153{
   4154	struct net_device *ndev = dev_get_drvdata(dev);
   4155	struct fec_enet_private *fep = netdev_priv(ndev);
   4156
   4157	clk_disable_unprepare(fep->clk_ahb);
   4158	clk_disable_unprepare(fep->clk_ipg);
   4159
   4160	return 0;
   4161}
   4162
   4163static int __maybe_unused fec_runtime_resume(struct device *dev)
   4164{
   4165	struct net_device *ndev = dev_get_drvdata(dev);
   4166	struct fec_enet_private *fep = netdev_priv(ndev);
   4167	int ret;
   4168
   4169	ret = clk_prepare_enable(fep->clk_ahb);
   4170	if (ret)
   4171		return ret;
   4172	ret = clk_prepare_enable(fep->clk_ipg);
   4173	if (ret)
   4174		goto failed_clk_ipg;
   4175
   4176	return 0;
   4177
   4178failed_clk_ipg:
   4179	clk_disable_unprepare(fep->clk_ahb);
   4180	return ret;
   4181}
   4182
   4183static const struct dev_pm_ops fec_pm_ops = {
   4184	SET_SYSTEM_SLEEP_PM_OPS(fec_suspend, fec_resume)
   4185	SET_RUNTIME_PM_OPS(fec_runtime_suspend, fec_runtime_resume, NULL)
   4186};
   4187
   4188static struct platform_driver fec_driver = {
   4189	.driver	= {
   4190		.name	= DRIVER_NAME,
   4191		.pm	= &fec_pm_ops,
   4192		.of_match_table = fec_dt_ids,
   4193		.suppress_bind_attrs = true,
   4194	},
   4195	.id_table = fec_devtype,
   4196	.probe	= fec_probe,
   4197	.remove	= fec_drv_remove,
   4198};
   4199
   4200module_platform_driver(fec_driver);
   4201
   4202MODULE_LICENSE("GPL");