cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

ftgmac100.c (52885B)


      1// SPDX-License-Identifier: GPL-2.0-or-later
      2/*
      3 * Faraday FTGMAC100 Gigabit Ethernet
      4 *
      5 * (C) Copyright 2009-2011 Faraday Technology
      6 * Po-Yu Chuang <ratbert@faraday-tech.com>
      7 */
      8
      9#define pr_fmt(fmt)	KBUILD_MODNAME ": " fmt
     10
     11#include <linux/clk.h>
     12#include <linux/dma-mapping.h>
     13#include <linux/etherdevice.h>
     14#include <linux/ethtool.h>
     15#include <linux/interrupt.h>
     16#include <linux/io.h>
     17#include <linux/module.h>
     18#include <linux/netdevice.h>
     19#include <linux/of.h>
     20#include <linux/of_mdio.h>
     21#include <linux/phy.h>
     22#include <linux/platform_device.h>
     23#include <linux/property.h>
     24#include <linux/crc32.h>
     25#include <linux/if_vlan.h>
     26#include <linux/of_net.h>
     27#include <net/ip.h>
     28#include <net/ncsi.h>
     29
     30#include "ftgmac100.h"
     31
     32#define DRV_NAME	"ftgmac100"
     33
     34/* Arbitrary values, I am not sure the HW has limits */
     35#define MAX_RX_QUEUE_ENTRIES	1024
     36#define MAX_TX_QUEUE_ENTRIES	1024
     37#define MIN_RX_QUEUE_ENTRIES	32
     38#define MIN_TX_QUEUE_ENTRIES	32
     39
     40/* Defaults */
     41#define DEF_RX_QUEUE_ENTRIES	128
     42#define DEF_TX_QUEUE_ENTRIES	128
     43
     44#define MAX_PKT_SIZE		1536
     45#define RX_BUF_SIZE		MAX_PKT_SIZE	/* must be smaller than 0x3fff */
     46
     47/* Min number of tx ring entries before stopping queue */
     48#define TX_THRESHOLD		(MAX_SKB_FRAGS + 1)
     49
     50#define FTGMAC_100MHZ		100000000
     51#define FTGMAC_25MHZ		25000000
     52
     53struct ftgmac100 {
     54	/* Registers */
     55	struct resource *res;
     56	void __iomem *base;
     57
     58	/* Rx ring */
     59	unsigned int rx_q_entries;
     60	struct ftgmac100_rxdes *rxdes;
     61	dma_addr_t rxdes_dma;
     62	struct sk_buff **rx_skbs;
     63	unsigned int rx_pointer;
     64	u32 rxdes0_edorr_mask;
     65
     66	/* Tx ring */
     67	unsigned int tx_q_entries;
     68	struct ftgmac100_txdes *txdes;
     69	dma_addr_t txdes_dma;
     70	struct sk_buff **tx_skbs;
     71	unsigned int tx_clean_pointer;
     72	unsigned int tx_pointer;
     73	u32 txdes0_edotr_mask;
     74
     75	/* Used to signal the reset task of ring change request */
     76	unsigned int new_rx_q_entries;
     77	unsigned int new_tx_q_entries;
     78
     79	/* Scratch page to use when rx skb alloc fails */
     80	void *rx_scratch;
     81	dma_addr_t rx_scratch_dma;
     82
     83	/* Component structures */
     84	struct net_device *netdev;
     85	struct device *dev;
     86	struct ncsi_dev *ndev;
     87	struct napi_struct napi;
     88	struct work_struct reset_task;
     89	struct mii_bus *mii_bus;
     90	struct clk *clk;
     91
     92	/* AST2500/AST2600 RMII ref clock gate */
     93	struct clk *rclk;
     94
     95	/* Link management */
     96	int cur_speed;
     97	int cur_duplex;
     98	bool use_ncsi;
     99
    100	/* Multicast filter settings */
    101	u32 maht0;
    102	u32 maht1;
    103
    104	/* Flow control settings */
    105	bool tx_pause;
    106	bool rx_pause;
    107	bool aneg_pause;
    108
    109	/* Misc */
    110	bool need_mac_restart;
    111	bool is_aspeed;
    112};
    113
    114static int ftgmac100_reset_mac(struct ftgmac100 *priv, u32 maccr)
    115{
    116	struct net_device *netdev = priv->netdev;
    117	int i;
    118
    119	/* NOTE: reset clears all registers */
    120	iowrite32(maccr, priv->base + FTGMAC100_OFFSET_MACCR);
    121	iowrite32(maccr | FTGMAC100_MACCR_SW_RST,
    122		  priv->base + FTGMAC100_OFFSET_MACCR);
    123	for (i = 0; i < 200; i++) {
    124		unsigned int maccr;
    125
    126		maccr = ioread32(priv->base + FTGMAC100_OFFSET_MACCR);
    127		if (!(maccr & FTGMAC100_MACCR_SW_RST))
    128			return 0;
    129
    130		udelay(1);
    131	}
    132
    133	netdev_err(netdev, "Hardware reset failed\n");
    134	return -EIO;
    135}
    136
    137static int ftgmac100_reset_and_config_mac(struct ftgmac100 *priv)
    138{
    139	u32 maccr = 0;
    140
    141	switch (priv->cur_speed) {
    142	case SPEED_10:
    143	case 0: /* no link */
    144		break;
    145
    146	case SPEED_100:
    147		maccr |= FTGMAC100_MACCR_FAST_MODE;
    148		break;
    149
    150	case SPEED_1000:
    151		maccr |= FTGMAC100_MACCR_GIGA_MODE;
    152		break;
    153	default:
    154		netdev_err(priv->netdev, "Unknown speed %d !\n",
    155			   priv->cur_speed);
    156		break;
    157	}
    158
    159	/* (Re)initialize the queue pointers */
    160	priv->rx_pointer = 0;
    161	priv->tx_clean_pointer = 0;
    162	priv->tx_pointer = 0;
    163
    164	/* The doc says reset twice with 10us interval */
    165	if (ftgmac100_reset_mac(priv, maccr))
    166		return -EIO;
    167	usleep_range(10, 1000);
    168	return ftgmac100_reset_mac(priv, maccr);
    169}
    170
    171static void ftgmac100_write_mac_addr(struct ftgmac100 *priv, const u8 *mac)
    172{
    173	unsigned int maddr = mac[0] << 8 | mac[1];
    174	unsigned int laddr = mac[2] << 24 | mac[3] << 16 | mac[4] << 8 | mac[5];
    175
    176	iowrite32(maddr, priv->base + FTGMAC100_OFFSET_MAC_MADR);
    177	iowrite32(laddr, priv->base + FTGMAC100_OFFSET_MAC_LADR);
    178}
    179
    180static void ftgmac100_initial_mac(struct ftgmac100 *priv)
    181{
    182	u8 mac[ETH_ALEN];
    183	unsigned int m;
    184	unsigned int l;
    185
    186	if (!device_get_ethdev_address(priv->dev, priv->netdev)) {
    187		dev_info(priv->dev, "Read MAC address %pM from device tree\n",
    188			 priv->netdev->dev_addr);
    189		return;
    190	}
    191
    192	m = ioread32(priv->base + FTGMAC100_OFFSET_MAC_MADR);
    193	l = ioread32(priv->base + FTGMAC100_OFFSET_MAC_LADR);
    194
    195	mac[0] = (m >> 8) & 0xff;
    196	mac[1] = m & 0xff;
    197	mac[2] = (l >> 24) & 0xff;
    198	mac[3] = (l >> 16) & 0xff;
    199	mac[4] = (l >> 8) & 0xff;
    200	mac[5] = l & 0xff;
    201
    202	if (is_valid_ether_addr(mac)) {
    203		eth_hw_addr_set(priv->netdev, mac);
    204		dev_info(priv->dev, "Read MAC address %pM from chip\n", mac);
    205	} else {
    206		eth_hw_addr_random(priv->netdev);
    207		dev_info(priv->dev, "Generated random MAC address %pM\n",
    208			 priv->netdev->dev_addr);
    209	}
    210}
    211
    212static int ftgmac100_set_mac_addr(struct net_device *dev, void *p)
    213{
    214	int ret;
    215
    216	ret = eth_prepare_mac_addr_change(dev, p);
    217	if (ret < 0)
    218		return ret;
    219
    220	eth_commit_mac_addr_change(dev, p);
    221	ftgmac100_write_mac_addr(netdev_priv(dev), dev->dev_addr);
    222
    223	return 0;
    224}
    225
    226static void ftgmac100_config_pause(struct ftgmac100 *priv)
    227{
    228	u32 fcr = FTGMAC100_FCR_PAUSE_TIME(16);
    229
    230	/* Throttle tx queue when receiving pause frames */
    231	if (priv->rx_pause)
    232		fcr |= FTGMAC100_FCR_FC_EN;
    233
    234	/* Enables sending pause frames when the RX queue is past a
    235	 * certain threshold.
    236	 */
    237	if (priv->tx_pause)
    238		fcr |= FTGMAC100_FCR_FCTHR_EN;
    239
    240	iowrite32(fcr, priv->base + FTGMAC100_OFFSET_FCR);
    241}
    242
    243static void ftgmac100_init_hw(struct ftgmac100 *priv)
    244{
    245	u32 reg, rfifo_sz, tfifo_sz;
    246
    247	/* Clear stale interrupts */
    248	reg = ioread32(priv->base + FTGMAC100_OFFSET_ISR);
    249	iowrite32(reg, priv->base + FTGMAC100_OFFSET_ISR);
    250
    251	/* Setup RX ring buffer base */
    252	iowrite32(priv->rxdes_dma, priv->base + FTGMAC100_OFFSET_RXR_BADR);
    253
    254	/* Setup TX ring buffer base */
    255	iowrite32(priv->txdes_dma, priv->base + FTGMAC100_OFFSET_NPTXR_BADR);
    256
    257	/* Configure RX buffer size */
    258	iowrite32(FTGMAC100_RBSR_SIZE(RX_BUF_SIZE),
    259		  priv->base + FTGMAC100_OFFSET_RBSR);
    260
    261	/* Set RX descriptor autopoll */
    262	iowrite32(FTGMAC100_APTC_RXPOLL_CNT(1),
    263		  priv->base + FTGMAC100_OFFSET_APTC);
    264
    265	/* Write MAC address */
    266	ftgmac100_write_mac_addr(priv, priv->netdev->dev_addr);
    267
    268	/* Write multicast filter */
    269	iowrite32(priv->maht0, priv->base + FTGMAC100_OFFSET_MAHT0);
    270	iowrite32(priv->maht1, priv->base + FTGMAC100_OFFSET_MAHT1);
    271
    272	/* Configure descriptor sizes and increase burst sizes according
    273	 * to values in Aspeed SDK. The FIFO arbitration is enabled and
    274	 * the thresholds set based on the recommended values in the
    275	 * AST2400 specification.
    276	 */
    277	iowrite32(FTGMAC100_DBLAC_RXDES_SIZE(2) |   /* 2*8 bytes RX descs */
    278		  FTGMAC100_DBLAC_TXDES_SIZE(2) |   /* 2*8 bytes TX descs */
    279		  FTGMAC100_DBLAC_RXBURST_SIZE(3) | /* 512 bytes max RX bursts */
    280		  FTGMAC100_DBLAC_TXBURST_SIZE(3) | /* 512 bytes max TX bursts */
    281		  FTGMAC100_DBLAC_RX_THR_EN |       /* Enable fifo threshold arb */
    282		  FTGMAC100_DBLAC_RXFIFO_HTHR(6) |  /* 6/8 of FIFO high threshold */
    283		  FTGMAC100_DBLAC_RXFIFO_LTHR(2),   /* 2/8 of FIFO low threshold */
    284		  priv->base + FTGMAC100_OFFSET_DBLAC);
    285
    286	/* Interrupt mitigation configured for 1 interrupt/packet. HW interrupt
    287	 * mitigation doesn't seem to provide any benefit with NAPI so leave
    288	 * it at that.
    289	 */
    290	iowrite32(FTGMAC100_ITC_RXINT_THR(1) |
    291		  FTGMAC100_ITC_TXINT_THR(1),
    292		  priv->base + FTGMAC100_OFFSET_ITC);
    293
    294	/* Configure FIFO sizes in the TPAFCR register */
    295	reg = ioread32(priv->base + FTGMAC100_OFFSET_FEAR);
    296	rfifo_sz = reg & 0x00000007;
    297	tfifo_sz = (reg >> 3) & 0x00000007;
    298	reg = ioread32(priv->base + FTGMAC100_OFFSET_TPAFCR);
    299	reg &= ~0x3f000000;
    300	reg |= (tfifo_sz << 27);
    301	reg |= (rfifo_sz << 24);
    302	iowrite32(reg, priv->base + FTGMAC100_OFFSET_TPAFCR);
    303}
    304
    305static void ftgmac100_start_hw(struct ftgmac100 *priv)
    306{
    307	u32 maccr = ioread32(priv->base + FTGMAC100_OFFSET_MACCR);
    308
    309	/* Keep the original GMAC and FAST bits */
    310	maccr &= (FTGMAC100_MACCR_FAST_MODE | FTGMAC100_MACCR_GIGA_MODE);
    311
    312	/* Add all the main enable bits */
    313	maccr |= FTGMAC100_MACCR_TXDMA_EN	|
    314		 FTGMAC100_MACCR_RXDMA_EN	|
    315		 FTGMAC100_MACCR_TXMAC_EN	|
    316		 FTGMAC100_MACCR_RXMAC_EN	|
    317		 FTGMAC100_MACCR_CRC_APD	|
    318		 FTGMAC100_MACCR_PHY_LINK_LEVEL	|
    319		 FTGMAC100_MACCR_RX_RUNT	|
    320		 FTGMAC100_MACCR_RX_BROADPKT;
    321
    322	/* Add other bits as needed */
    323	if (priv->cur_duplex == DUPLEX_FULL)
    324		maccr |= FTGMAC100_MACCR_FULLDUP;
    325	if (priv->netdev->flags & IFF_PROMISC)
    326		maccr |= FTGMAC100_MACCR_RX_ALL;
    327	if (priv->netdev->flags & IFF_ALLMULTI)
    328		maccr |= FTGMAC100_MACCR_RX_MULTIPKT;
    329	else if (netdev_mc_count(priv->netdev))
    330		maccr |= FTGMAC100_MACCR_HT_MULTI_EN;
    331
    332	/* Vlan filtering enabled */
    333	if (priv->netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
    334		maccr |= FTGMAC100_MACCR_RM_VLAN;
    335
    336	/* Hit the HW */
    337	iowrite32(maccr, priv->base + FTGMAC100_OFFSET_MACCR);
    338}
    339
    340static void ftgmac100_stop_hw(struct ftgmac100 *priv)
    341{
    342	iowrite32(0, priv->base + FTGMAC100_OFFSET_MACCR);
    343}
    344
    345static void ftgmac100_calc_mc_hash(struct ftgmac100 *priv)
    346{
    347	struct netdev_hw_addr *ha;
    348
    349	priv->maht1 = 0;
    350	priv->maht0 = 0;
    351	netdev_for_each_mc_addr(ha, priv->netdev) {
    352		u32 crc_val = ether_crc_le(ETH_ALEN, ha->addr);
    353
    354		crc_val = (~(crc_val >> 2)) & 0x3f;
    355		if (crc_val >= 32)
    356			priv->maht1 |= 1ul << (crc_val - 32);
    357		else
    358			priv->maht0 |= 1ul << (crc_val);
    359	}
    360}
    361
    362static void ftgmac100_set_rx_mode(struct net_device *netdev)
    363{
    364	struct ftgmac100 *priv = netdev_priv(netdev);
    365
    366	/* Setup the hash filter */
    367	ftgmac100_calc_mc_hash(priv);
    368
    369	/* Interface down ? that's all there is to do */
    370	if (!netif_running(netdev))
    371		return;
    372
    373	/* Update the HW */
    374	iowrite32(priv->maht0, priv->base + FTGMAC100_OFFSET_MAHT0);
    375	iowrite32(priv->maht1, priv->base + FTGMAC100_OFFSET_MAHT1);
    376
    377	/* Reconfigure MACCR */
    378	ftgmac100_start_hw(priv);
    379}
    380
    381static int ftgmac100_alloc_rx_buf(struct ftgmac100 *priv, unsigned int entry,
    382				  struct ftgmac100_rxdes *rxdes, gfp_t gfp)
    383{
    384	struct net_device *netdev = priv->netdev;
    385	struct sk_buff *skb;
    386	dma_addr_t map;
    387	int err = 0;
    388
    389	skb = netdev_alloc_skb_ip_align(netdev, RX_BUF_SIZE);
    390	if (unlikely(!skb)) {
    391		if (net_ratelimit())
    392			netdev_warn(netdev, "failed to allocate rx skb\n");
    393		err = -ENOMEM;
    394		map = priv->rx_scratch_dma;
    395	} else {
    396		map = dma_map_single(priv->dev, skb->data, RX_BUF_SIZE,
    397				     DMA_FROM_DEVICE);
    398		if (unlikely(dma_mapping_error(priv->dev, map))) {
    399			if (net_ratelimit())
    400				netdev_err(netdev, "failed to map rx page\n");
    401			dev_kfree_skb_any(skb);
    402			map = priv->rx_scratch_dma;
    403			skb = NULL;
    404			err = -ENOMEM;
    405		}
    406	}
    407
    408	/* Store skb */
    409	priv->rx_skbs[entry] = skb;
    410
    411	/* Store DMA address into RX desc */
    412	rxdes->rxdes3 = cpu_to_le32(map);
    413
    414	/* Ensure the above is ordered vs clearing the OWN bit */
    415	dma_wmb();
    416
    417	/* Clean status (which resets own bit) */
    418	if (entry == (priv->rx_q_entries - 1))
    419		rxdes->rxdes0 = cpu_to_le32(priv->rxdes0_edorr_mask);
    420	else
    421		rxdes->rxdes0 = 0;
    422
    423	return err;
    424}
    425
    426static unsigned int ftgmac100_next_rx_pointer(struct ftgmac100 *priv,
    427					      unsigned int pointer)
    428{
    429	return (pointer + 1) & (priv->rx_q_entries - 1);
    430}
    431
    432static void ftgmac100_rx_packet_error(struct ftgmac100 *priv, u32 status)
    433{
    434	struct net_device *netdev = priv->netdev;
    435
    436	if (status & FTGMAC100_RXDES0_RX_ERR)
    437		netdev->stats.rx_errors++;
    438
    439	if (status & FTGMAC100_RXDES0_CRC_ERR)
    440		netdev->stats.rx_crc_errors++;
    441
    442	if (status & (FTGMAC100_RXDES0_FTL |
    443		      FTGMAC100_RXDES0_RUNT |
    444		      FTGMAC100_RXDES0_RX_ODD_NB))
    445		netdev->stats.rx_length_errors++;
    446}
    447
    448static bool ftgmac100_rx_packet(struct ftgmac100 *priv, int *processed)
    449{
    450	struct net_device *netdev = priv->netdev;
    451	struct ftgmac100_rxdes *rxdes;
    452	struct sk_buff *skb;
    453	unsigned int pointer, size;
    454	u32 status, csum_vlan;
    455	dma_addr_t map;
    456
    457	/* Grab next RX descriptor */
    458	pointer = priv->rx_pointer;
    459	rxdes = &priv->rxdes[pointer];
    460
    461	/* Grab descriptor status */
    462	status = le32_to_cpu(rxdes->rxdes0);
    463
    464	/* Do we have a packet ? */
    465	if (!(status & FTGMAC100_RXDES0_RXPKT_RDY))
    466		return false;
    467
    468	/* Order subsequent reads with the test for the ready bit */
    469	dma_rmb();
    470
    471	/* We don't cope with fragmented RX packets */
    472	if (unlikely(!(status & FTGMAC100_RXDES0_FRS) ||
    473		     !(status & FTGMAC100_RXDES0_LRS)))
    474		goto drop;
    475
    476	/* Grab received size and csum vlan field in the descriptor */
    477	size = status & FTGMAC100_RXDES0_VDBC;
    478	csum_vlan = le32_to_cpu(rxdes->rxdes1);
    479
    480	/* Any error (other than csum offload) flagged ? */
    481	if (unlikely(status & RXDES0_ANY_ERROR)) {
    482		/* Correct for incorrect flagging of runt packets
    483		 * with vlan tags... Just accept a runt packet that
    484		 * has been flagged as vlan and whose size is at
    485		 * least 60 bytes.
    486		 */
    487		if ((status & FTGMAC100_RXDES0_RUNT) &&
    488		    (csum_vlan & FTGMAC100_RXDES1_VLANTAG_AVAIL) &&
    489		    (size >= 60))
    490			status &= ~FTGMAC100_RXDES0_RUNT;
    491
    492		/* Any error still in there ? */
    493		if (status & RXDES0_ANY_ERROR) {
    494			ftgmac100_rx_packet_error(priv, status);
    495			goto drop;
    496		}
    497	}
    498
    499	/* If the packet had no skb (failed to allocate earlier)
    500	 * then try to allocate one and skip
    501	 */
    502	skb = priv->rx_skbs[pointer];
    503	if (!unlikely(skb)) {
    504		ftgmac100_alloc_rx_buf(priv, pointer, rxdes, GFP_ATOMIC);
    505		goto drop;
    506	}
    507
    508	if (unlikely(status & FTGMAC100_RXDES0_MULTICAST))
    509		netdev->stats.multicast++;
    510
    511	/* If the HW found checksum errors, bounce it to software.
    512	 *
    513	 * If we didn't, we need to see if the packet was recognized
    514	 * by HW as one of the supported checksummed protocols before
    515	 * we accept the HW test results.
    516	 */
    517	if (netdev->features & NETIF_F_RXCSUM) {
    518		u32 err_bits = FTGMAC100_RXDES1_TCP_CHKSUM_ERR |
    519			FTGMAC100_RXDES1_UDP_CHKSUM_ERR |
    520			FTGMAC100_RXDES1_IP_CHKSUM_ERR;
    521		if ((csum_vlan & err_bits) ||
    522		    !(csum_vlan & FTGMAC100_RXDES1_PROT_MASK))
    523			skb->ip_summed = CHECKSUM_NONE;
    524		else
    525			skb->ip_summed = CHECKSUM_UNNECESSARY;
    526	}
    527
    528	/* Transfer received size to skb */
    529	skb_put(skb, size);
    530
    531	/* Extract vlan tag */
    532	if ((netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
    533	    (csum_vlan & FTGMAC100_RXDES1_VLANTAG_AVAIL))
    534		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
    535				       csum_vlan & 0xffff);
    536
    537	/* Tear down DMA mapping, do necessary cache management */
    538	map = le32_to_cpu(rxdes->rxdes3);
    539
    540#if defined(CONFIG_ARM) && !defined(CONFIG_ARM_DMA_USE_IOMMU)
    541	/* When we don't have an iommu, we can save cycles by not
    542	 * invalidating the cache for the part of the packet that
    543	 * wasn't received.
    544	 */
    545	dma_unmap_single(priv->dev, map, size, DMA_FROM_DEVICE);
    546#else
    547	dma_unmap_single(priv->dev, map, RX_BUF_SIZE, DMA_FROM_DEVICE);
    548#endif
    549
    550
    551	/* Resplenish rx ring */
    552	ftgmac100_alloc_rx_buf(priv, pointer, rxdes, GFP_ATOMIC);
    553	priv->rx_pointer = ftgmac100_next_rx_pointer(priv, pointer);
    554
    555	skb->protocol = eth_type_trans(skb, netdev);
    556
    557	netdev->stats.rx_packets++;
    558	netdev->stats.rx_bytes += size;
    559
    560	/* push packet to protocol stack */
    561	if (skb->ip_summed == CHECKSUM_NONE)
    562		netif_receive_skb(skb);
    563	else
    564		napi_gro_receive(&priv->napi, skb);
    565
    566	(*processed)++;
    567	return true;
    568
    569 drop:
    570	/* Clean rxdes0 (which resets own bit) */
    571	rxdes->rxdes0 = cpu_to_le32(status & priv->rxdes0_edorr_mask);
    572	priv->rx_pointer = ftgmac100_next_rx_pointer(priv, pointer);
    573	netdev->stats.rx_dropped++;
    574	return true;
    575}
    576
    577static u32 ftgmac100_base_tx_ctlstat(struct ftgmac100 *priv,
    578				     unsigned int index)
    579{
    580	if (index == (priv->tx_q_entries - 1))
    581		return priv->txdes0_edotr_mask;
    582	else
    583		return 0;
    584}
    585
    586static unsigned int ftgmac100_next_tx_pointer(struct ftgmac100 *priv,
    587					      unsigned int pointer)
    588{
    589	return (pointer + 1) & (priv->tx_q_entries - 1);
    590}
    591
    592static u32 ftgmac100_tx_buf_avail(struct ftgmac100 *priv)
    593{
    594	/* Returns the number of available slots in the TX queue
    595	 *
    596	 * This always leaves one free slot so we don't have to
    597	 * worry about empty vs. full, and this simplifies the
    598	 * test for ftgmac100_tx_buf_cleanable() below
    599	 */
    600	return (priv->tx_clean_pointer - priv->tx_pointer - 1) &
    601		(priv->tx_q_entries - 1);
    602}
    603
    604static bool ftgmac100_tx_buf_cleanable(struct ftgmac100 *priv)
    605{
    606	return priv->tx_pointer != priv->tx_clean_pointer;
    607}
    608
    609static void ftgmac100_free_tx_packet(struct ftgmac100 *priv,
    610				     unsigned int pointer,
    611				     struct sk_buff *skb,
    612				     struct ftgmac100_txdes *txdes,
    613				     u32 ctl_stat)
    614{
    615	dma_addr_t map = le32_to_cpu(txdes->txdes3);
    616	size_t len;
    617
    618	if (ctl_stat & FTGMAC100_TXDES0_FTS) {
    619		len = skb_headlen(skb);
    620		dma_unmap_single(priv->dev, map, len, DMA_TO_DEVICE);
    621	} else {
    622		len = FTGMAC100_TXDES0_TXBUF_SIZE(ctl_stat);
    623		dma_unmap_page(priv->dev, map, len, DMA_TO_DEVICE);
    624	}
    625
    626	/* Free SKB on last segment */
    627	if (ctl_stat & FTGMAC100_TXDES0_LTS)
    628		dev_kfree_skb(skb);
    629	priv->tx_skbs[pointer] = NULL;
    630}
    631
    632static bool ftgmac100_tx_complete_packet(struct ftgmac100 *priv)
    633{
    634	struct net_device *netdev = priv->netdev;
    635	struct ftgmac100_txdes *txdes;
    636	struct sk_buff *skb;
    637	unsigned int pointer;
    638	u32 ctl_stat;
    639
    640	pointer = priv->tx_clean_pointer;
    641	txdes = &priv->txdes[pointer];
    642
    643	ctl_stat = le32_to_cpu(txdes->txdes0);
    644	if (ctl_stat & FTGMAC100_TXDES0_TXDMA_OWN)
    645		return false;
    646
    647	skb = priv->tx_skbs[pointer];
    648	netdev->stats.tx_packets++;
    649	netdev->stats.tx_bytes += skb->len;
    650	ftgmac100_free_tx_packet(priv, pointer, skb, txdes, ctl_stat);
    651	txdes->txdes0 = cpu_to_le32(ctl_stat & priv->txdes0_edotr_mask);
    652
    653	priv->tx_clean_pointer = ftgmac100_next_tx_pointer(priv, pointer);
    654
    655	return true;
    656}
    657
    658static void ftgmac100_tx_complete(struct ftgmac100 *priv)
    659{
    660	struct net_device *netdev = priv->netdev;
    661
    662	/* Process all completed packets */
    663	while (ftgmac100_tx_buf_cleanable(priv) &&
    664	       ftgmac100_tx_complete_packet(priv))
    665		;
    666
    667	/* Restart queue if needed */
    668	smp_mb();
    669	if (unlikely(netif_queue_stopped(netdev) &&
    670		     ftgmac100_tx_buf_avail(priv) >= TX_THRESHOLD)) {
    671		struct netdev_queue *txq;
    672
    673		txq = netdev_get_tx_queue(netdev, 0);
    674		__netif_tx_lock(txq, smp_processor_id());
    675		if (netif_queue_stopped(netdev) &&
    676		    ftgmac100_tx_buf_avail(priv) >= TX_THRESHOLD)
    677			netif_wake_queue(netdev);
    678		__netif_tx_unlock(txq);
    679	}
    680}
    681
    682static bool ftgmac100_prep_tx_csum(struct sk_buff *skb, u32 *csum_vlan)
    683{
    684	if (skb->protocol == cpu_to_be16(ETH_P_IP)) {
    685		u8 ip_proto = ip_hdr(skb)->protocol;
    686
    687		*csum_vlan |= FTGMAC100_TXDES1_IP_CHKSUM;
    688		switch(ip_proto) {
    689		case IPPROTO_TCP:
    690			*csum_vlan |= FTGMAC100_TXDES1_TCP_CHKSUM;
    691			return true;
    692		case IPPROTO_UDP:
    693			*csum_vlan |= FTGMAC100_TXDES1_UDP_CHKSUM;
    694			return true;
    695		case IPPROTO_IP:
    696			return true;
    697		}
    698	}
    699	return skb_checksum_help(skb) == 0;
    700}
    701
    702static netdev_tx_t ftgmac100_hard_start_xmit(struct sk_buff *skb,
    703					     struct net_device *netdev)
    704{
    705	struct ftgmac100 *priv = netdev_priv(netdev);
    706	struct ftgmac100_txdes *txdes, *first;
    707	unsigned int pointer, nfrags, len, i, j;
    708	u32 f_ctl_stat, ctl_stat, csum_vlan;
    709	dma_addr_t map;
    710
    711	/* The HW doesn't pad small frames */
    712	if (eth_skb_pad(skb)) {
    713		netdev->stats.tx_dropped++;
    714		return NETDEV_TX_OK;
    715	}
    716
    717	/* Reject oversize packets */
    718	if (unlikely(skb->len > MAX_PKT_SIZE)) {
    719		if (net_ratelimit())
    720			netdev_dbg(netdev, "tx packet too big\n");
    721		goto drop;
    722	}
    723
    724	/* Do we have a limit on #fragments ? I yet have to get a reply
    725	 * from Aspeed. If there's one I haven't hit it.
    726	 */
    727	nfrags = skb_shinfo(skb)->nr_frags;
    728
    729	/* Setup HW checksumming */
    730	csum_vlan = 0;
    731	if (skb->ip_summed == CHECKSUM_PARTIAL &&
    732	    !ftgmac100_prep_tx_csum(skb, &csum_vlan))
    733		goto drop;
    734
    735	/* Add VLAN tag */
    736	if (skb_vlan_tag_present(skb)) {
    737		csum_vlan |= FTGMAC100_TXDES1_INS_VLANTAG;
    738		csum_vlan |= skb_vlan_tag_get(skb) & 0xffff;
    739	}
    740
    741	/* Get header len */
    742	len = skb_headlen(skb);
    743
    744	/* Map the packet head */
    745	map = dma_map_single(priv->dev, skb->data, len, DMA_TO_DEVICE);
    746	if (dma_mapping_error(priv->dev, map)) {
    747		if (net_ratelimit())
    748			netdev_err(netdev, "map tx packet head failed\n");
    749		goto drop;
    750	}
    751
    752	/* Grab the next free tx descriptor */
    753	pointer = priv->tx_pointer;
    754	txdes = first = &priv->txdes[pointer];
    755
    756	/* Setup it up with the packet head. Don't write the head to the
    757	 * ring just yet
    758	 */
    759	priv->tx_skbs[pointer] = skb;
    760	f_ctl_stat = ftgmac100_base_tx_ctlstat(priv, pointer);
    761	f_ctl_stat |= FTGMAC100_TXDES0_TXDMA_OWN;
    762	f_ctl_stat |= FTGMAC100_TXDES0_TXBUF_SIZE(len);
    763	f_ctl_stat |= FTGMAC100_TXDES0_FTS;
    764	if (nfrags == 0)
    765		f_ctl_stat |= FTGMAC100_TXDES0_LTS;
    766	txdes->txdes3 = cpu_to_le32(map);
    767	txdes->txdes1 = cpu_to_le32(csum_vlan);
    768
    769	/* Next descriptor */
    770	pointer = ftgmac100_next_tx_pointer(priv, pointer);
    771
    772	/* Add the fragments */
    773	for (i = 0; i < nfrags; i++) {
    774		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
    775
    776		len = skb_frag_size(frag);
    777
    778		/* Map it */
    779		map = skb_frag_dma_map(priv->dev, frag, 0, len,
    780				       DMA_TO_DEVICE);
    781		if (dma_mapping_error(priv->dev, map))
    782			goto dma_err;
    783
    784		/* Setup descriptor */
    785		priv->tx_skbs[pointer] = skb;
    786		txdes = &priv->txdes[pointer];
    787		ctl_stat = ftgmac100_base_tx_ctlstat(priv, pointer);
    788		ctl_stat |= FTGMAC100_TXDES0_TXDMA_OWN;
    789		ctl_stat |= FTGMAC100_TXDES0_TXBUF_SIZE(len);
    790		if (i == (nfrags - 1))
    791			ctl_stat |= FTGMAC100_TXDES0_LTS;
    792		txdes->txdes0 = cpu_to_le32(ctl_stat);
    793		txdes->txdes1 = 0;
    794		txdes->txdes3 = cpu_to_le32(map);
    795
    796		/* Next one */
    797		pointer = ftgmac100_next_tx_pointer(priv, pointer);
    798	}
    799
    800	/* Order the previous packet and descriptor udpates
    801	 * before setting the OWN bit on the first descriptor.
    802	 */
    803	dma_wmb();
    804	first->txdes0 = cpu_to_le32(f_ctl_stat);
    805
    806	/* Update next TX pointer */
    807	priv->tx_pointer = pointer;
    808
    809	/* If there isn't enough room for all the fragments of a new packet
    810	 * in the TX ring, stop the queue. The sequence below is race free
    811	 * vs. a concurrent restart in ftgmac100_poll()
    812	 */
    813	if (unlikely(ftgmac100_tx_buf_avail(priv) < TX_THRESHOLD)) {
    814		netif_stop_queue(netdev);
    815		/* Order the queue stop with the test below */
    816		smp_mb();
    817		if (ftgmac100_tx_buf_avail(priv) >= TX_THRESHOLD)
    818			netif_wake_queue(netdev);
    819	}
    820
    821	/* Poke transmitter to read the updated TX descriptors */
    822	iowrite32(1, priv->base + FTGMAC100_OFFSET_NPTXPD);
    823
    824	return NETDEV_TX_OK;
    825
    826 dma_err:
    827	if (net_ratelimit())
    828		netdev_err(netdev, "map tx fragment failed\n");
    829
    830	/* Free head */
    831	pointer = priv->tx_pointer;
    832	ftgmac100_free_tx_packet(priv, pointer, skb, first, f_ctl_stat);
    833	first->txdes0 = cpu_to_le32(f_ctl_stat & priv->txdes0_edotr_mask);
    834
    835	/* Then all fragments */
    836	for (j = 0; j < i; j++) {
    837		pointer = ftgmac100_next_tx_pointer(priv, pointer);
    838		txdes = &priv->txdes[pointer];
    839		ctl_stat = le32_to_cpu(txdes->txdes0);
    840		ftgmac100_free_tx_packet(priv, pointer, skb, txdes, ctl_stat);
    841		txdes->txdes0 = cpu_to_le32(ctl_stat & priv->txdes0_edotr_mask);
    842	}
    843
    844	/* This cannot be reached if we successfully mapped the
    845	 * last fragment, so we know ftgmac100_free_tx_packet()
    846	 * hasn't freed the skb yet.
    847	 */
    848 drop:
    849	/* Drop the packet */
    850	dev_kfree_skb_any(skb);
    851	netdev->stats.tx_dropped++;
    852
    853	return NETDEV_TX_OK;
    854}
    855
    856static void ftgmac100_free_buffers(struct ftgmac100 *priv)
    857{
    858	int i;
    859
    860	/* Free all RX buffers */
    861	for (i = 0; i < priv->rx_q_entries; i++) {
    862		struct ftgmac100_rxdes *rxdes = &priv->rxdes[i];
    863		struct sk_buff *skb = priv->rx_skbs[i];
    864		dma_addr_t map = le32_to_cpu(rxdes->rxdes3);
    865
    866		if (!skb)
    867			continue;
    868
    869		priv->rx_skbs[i] = NULL;
    870		dma_unmap_single(priv->dev, map, RX_BUF_SIZE, DMA_FROM_DEVICE);
    871		dev_kfree_skb_any(skb);
    872	}
    873
    874	/* Free all TX buffers */
    875	for (i = 0; i < priv->tx_q_entries; i++) {
    876		struct ftgmac100_txdes *txdes = &priv->txdes[i];
    877		struct sk_buff *skb = priv->tx_skbs[i];
    878
    879		if (!skb)
    880			continue;
    881		ftgmac100_free_tx_packet(priv, i, skb, txdes,
    882					 le32_to_cpu(txdes->txdes0));
    883	}
    884}
    885
    886static void ftgmac100_free_rings(struct ftgmac100 *priv)
    887{
    888	/* Free skb arrays */
    889	kfree(priv->rx_skbs);
    890	kfree(priv->tx_skbs);
    891
    892	/* Free descriptors */
    893	if (priv->rxdes)
    894		dma_free_coherent(priv->dev, MAX_RX_QUEUE_ENTRIES *
    895				  sizeof(struct ftgmac100_rxdes),
    896				  priv->rxdes, priv->rxdes_dma);
    897	priv->rxdes = NULL;
    898
    899	if (priv->txdes)
    900		dma_free_coherent(priv->dev, MAX_TX_QUEUE_ENTRIES *
    901				  sizeof(struct ftgmac100_txdes),
    902				  priv->txdes, priv->txdes_dma);
    903	priv->txdes = NULL;
    904
    905	/* Free scratch packet buffer */
    906	if (priv->rx_scratch)
    907		dma_free_coherent(priv->dev, RX_BUF_SIZE,
    908				  priv->rx_scratch, priv->rx_scratch_dma);
    909}
    910
    911static int ftgmac100_alloc_rings(struct ftgmac100 *priv)
    912{
    913	/* Allocate skb arrays */
    914	priv->rx_skbs = kcalloc(MAX_RX_QUEUE_ENTRIES, sizeof(void *),
    915				GFP_KERNEL);
    916	if (!priv->rx_skbs)
    917		return -ENOMEM;
    918	priv->tx_skbs = kcalloc(MAX_TX_QUEUE_ENTRIES, sizeof(void *),
    919				GFP_KERNEL);
    920	if (!priv->tx_skbs)
    921		return -ENOMEM;
    922
    923	/* Allocate descriptors */
    924	priv->rxdes = dma_alloc_coherent(priv->dev,
    925					 MAX_RX_QUEUE_ENTRIES * sizeof(struct ftgmac100_rxdes),
    926					 &priv->rxdes_dma, GFP_KERNEL);
    927	if (!priv->rxdes)
    928		return -ENOMEM;
    929	priv->txdes = dma_alloc_coherent(priv->dev,
    930					 MAX_TX_QUEUE_ENTRIES * sizeof(struct ftgmac100_txdes),
    931					 &priv->txdes_dma, GFP_KERNEL);
    932	if (!priv->txdes)
    933		return -ENOMEM;
    934
    935	/* Allocate scratch packet buffer */
    936	priv->rx_scratch = dma_alloc_coherent(priv->dev,
    937					      RX_BUF_SIZE,
    938					      &priv->rx_scratch_dma,
    939					      GFP_KERNEL);
    940	if (!priv->rx_scratch)
    941		return -ENOMEM;
    942
    943	return 0;
    944}
    945
    946static void ftgmac100_init_rings(struct ftgmac100 *priv)
    947{
    948	struct ftgmac100_rxdes *rxdes = NULL;
    949	struct ftgmac100_txdes *txdes = NULL;
    950	int i;
    951
    952	/* Update entries counts */
    953	priv->rx_q_entries = priv->new_rx_q_entries;
    954	priv->tx_q_entries = priv->new_tx_q_entries;
    955
    956	if (WARN_ON(priv->rx_q_entries < MIN_RX_QUEUE_ENTRIES))
    957		return;
    958
    959	/* Initialize RX ring */
    960	for (i = 0; i < priv->rx_q_entries; i++) {
    961		rxdes = &priv->rxdes[i];
    962		rxdes->rxdes0 = 0;
    963		rxdes->rxdes3 = cpu_to_le32(priv->rx_scratch_dma);
    964	}
    965	/* Mark the end of the ring */
    966	rxdes->rxdes0 |= cpu_to_le32(priv->rxdes0_edorr_mask);
    967
    968	if (WARN_ON(priv->tx_q_entries < MIN_RX_QUEUE_ENTRIES))
    969		return;
    970
    971	/* Initialize TX ring */
    972	for (i = 0; i < priv->tx_q_entries; i++) {
    973		txdes = &priv->txdes[i];
    974		txdes->txdes0 = 0;
    975	}
    976	txdes->txdes0 |= cpu_to_le32(priv->txdes0_edotr_mask);
    977}
    978
    979static int ftgmac100_alloc_rx_buffers(struct ftgmac100 *priv)
    980{
    981	int i;
    982
    983	for (i = 0; i < priv->rx_q_entries; i++) {
    984		struct ftgmac100_rxdes *rxdes = &priv->rxdes[i];
    985
    986		if (ftgmac100_alloc_rx_buf(priv, i, rxdes, GFP_KERNEL))
    987			return -ENOMEM;
    988	}
    989	return 0;
    990}
    991
    992static int ftgmac100_mdiobus_read(struct mii_bus *bus, int phy_addr, int regnum)
    993{
    994	struct net_device *netdev = bus->priv;
    995	struct ftgmac100 *priv = netdev_priv(netdev);
    996	unsigned int phycr;
    997	int i;
    998
    999	phycr = ioread32(priv->base + FTGMAC100_OFFSET_PHYCR);
   1000
   1001	/* preserve MDC cycle threshold */
   1002	phycr &= FTGMAC100_PHYCR_MDC_CYCTHR_MASK;
   1003
   1004	phycr |= FTGMAC100_PHYCR_PHYAD(phy_addr) |
   1005		 FTGMAC100_PHYCR_REGAD(regnum) |
   1006		 FTGMAC100_PHYCR_MIIRD;
   1007
   1008	iowrite32(phycr, priv->base + FTGMAC100_OFFSET_PHYCR);
   1009
   1010	for (i = 0; i < 10; i++) {
   1011		phycr = ioread32(priv->base + FTGMAC100_OFFSET_PHYCR);
   1012
   1013		if ((phycr & FTGMAC100_PHYCR_MIIRD) == 0) {
   1014			int data;
   1015
   1016			data = ioread32(priv->base + FTGMAC100_OFFSET_PHYDATA);
   1017			return FTGMAC100_PHYDATA_MIIRDATA(data);
   1018		}
   1019
   1020		udelay(100);
   1021	}
   1022
   1023	netdev_err(netdev, "mdio read timed out\n");
   1024	return -EIO;
   1025}
   1026
   1027static int ftgmac100_mdiobus_write(struct mii_bus *bus, int phy_addr,
   1028				   int regnum, u16 value)
   1029{
   1030	struct net_device *netdev = bus->priv;
   1031	struct ftgmac100 *priv = netdev_priv(netdev);
   1032	unsigned int phycr;
   1033	int data;
   1034	int i;
   1035
   1036	phycr = ioread32(priv->base + FTGMAC100_OFFSET_PHYCR);
   1037
   1038	/* preserve MDC cycle threshold */
   1039	phycr &= FTGMAC100_PHYCR_MDC_CYCTHR_MASK;
   1040
   1041	phycr |= FTGMAC100_PHYCR_PHYAD(phy_addr) |
   1042		 FTGMAC100_PHYCR_REGAD(regnum) |
   1043		 FTGMAC100_PHYCR_MIIWR;
   1044
   1045	data = FTGMAC100_PHYDATA_MIIWDATA(value);
   1046
   1047	iowrite32(data, priv->base + FTGMAC100_OFFSET_PHYDATA);
   1048	iowrite32(phycr, priv->base + FTGMAC100_OFFSET_PHYCR);
   1049
   1050	for (i = 0; i < 10; i++) {
   1051		phycr = ioread32(priv->base + FTGMAC100_OFFSET_PHYCR);
   1052
   1053		if ((phycr & FTGMAC100_PHYCR_MIIWR) == 0)
   1054			return 0;
   1055
   1056		udelay(100);
   1057	}
   1058
   1059	netdev_err(netdev, "mdio write timed out\n");
   1060	return -EIO;
   1061}
   1062
   1063static void ftgmac100_get_drvinfo(struct net_device *netdev,
   1064				  struct ethtool_drvinfo *info)
   1065{
   1066	strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
   1067	strlcpy(info->bus_info, dev_name(&netdev->dev), sizeof(info->bus_info));
   1068}
   1069
   1070static void
   1071ftgmac100_get_ringparam(struct net_device *netdev,
   1072			struct ethtool_ringparam *ering,
   1073			struct kernel_ethtool_ringparam *kernel_ering,
   1074			struct netlink_ext_ack *extack)
   1075{
   1076	struct ftgmac100 *priv = netdev_priv(netdev);
   1077
   1078	memset(ering, 0, sizeof(*ering));
   1079	ering->rx_max_pending = MAX_RX_QUEUE_ENTRIES;
   1080	ering->tx_max_pending = MAX_TX_QUEUE_ENTRIES;
   1081	ering->rx_pending = priv->rx_q_entries;
   1082	ering->tx_pending = priv->tx_q_entries;
   1083}
   1084
   1085static int
   1086ftgmac100_set_ringparam(struct net_device *netdev,
   1087			struct ethtool_ringparam *ering,
   1088			struct kernel_ethtool_ringparam *kernel_ering,
   1089			struct netlink_ext_ack *extack)
   1090{
   1091	struct ftgmac100 *priv = netdev_priv(netdev);
   1092
   1093	if (ering->rx_pending > MAX_RX_QUEUE_ENTRIES ||
   1094	    ering->tx_pending > MAX_TX_QUEUE_ENTRIES ||
   1095	    ering->rx_pending < MIN_RX_QUEUE_ENTRIES ||
   1096	    ering->tx_pending < MIN_TX_QUEUE_ENTRIES ||
   1097	    !is_power_of_2(ering->rx_pending) ||
   1098	    !is_power_of_2(ering->tx_pending))
   1099		return -EINVAL;
   1100
   1101	priv->new_rx_q_entries = ering->rx_pending;
   1102	priv->new_tx_q_entries = ering->tx_pending;
   1103	if (netif_running(netdev))
   1104		schedule_work(&priv->reset_task);
   1105
   1106	return 0;
   1107}
   1108
   1109static void ftgmac100_get_pauseparam(struct net_device *netdev,
   1110				     struct ethtool_pauseparam *pause)
   1111{
   1112	struct ftgmac100 *priv = netdev_priv(netdev);
   1113
   1114	pause->autoneg = priv->aneg_pause;
   1115	pause->tx_pause = priv->tx_pause;
   1116	pause->rx_pause = priv->rx_pause;
   1117}
   1118
   1119static int ftgmac100_set_pauseparam(struct net_device *netdev,
   1120				    struct ethtool_pauseparam *pause)
   1121{
   1122	struct ftgmac100 *priv = netdev_priv(netdev);
   1123	struct phy_device *phydev = netdev->phydev;
   1124
   1125	priv->aneg_pause = pause->autoneg;
   1126	priv->tx_pause = pause->tx_pause;
   1127	priv->rx_pause = pause->rx_pause;
   1128
   1129	if (phydev)
   1130		phy_set_asym_pause(phydev, pause->rx_pause, pause->tx_pause);
   1131
   1132	if (netif_running(netdev)) {
   1133		if (!(phydev && priv->aneg_pause))
   1134			ftgmac100_config_pause(priv);
   1135	}
   1136
   1137	return 0;
   1138}
   1139
   1140static const struct ethtool_ops ftgmac100_ethtool_ops = {
   1141	.get_drvinfo		= ftgmac100_get_drvinfo,
   1142	.get_link		= ethtool_op_get_link,
   1143	.get_link_ksettings	= phy_ethtool_get_link_ksettings,
   1144	.set_link_ksettings	= phy_ethtool_set_link_ksettings,
   1145	.nway_reset		= phy_ethtool_nway_reset,
   1146	.get_ringparam		= ftgmac100_get_ringparam,
   1147	.set_ringparam		= ftgmac100_set_ringparam,
   1148	.get_pauseparam		= ftgmac100_get_pauseparam,
   1149	.set_pauseparam		= ftgmac100_set_pauseparam,
   1150};
   1151
   1152static irqreturn_t ftgmac100_interrupt(int irq, void *dev_id)
   1153{
   1154	struct net_device *netdev = dev_id;
   1155	struct ftgmac100 *priv = netdev_priv(netdev);
   1156	unsigned int status, new_mask = FTGMAC100_INT_BAD;
   1157
   1158	/* Fetch and clear interrupt bits, process abnormal ones */
   1159	status = ioread32(priv->base + FTGMAC100_OFFSET_ISR);
   1160	iowrite32(status, priv->base + FTGMAC100_OFFSET_ISR);
   1161	if (unlikely(status & FTGMAC100_INT_BAD)) {
   1162
   1163		/* RX buffer unavailable */
   1164		if (status & FTGMAC100_INT_NO_RXBUF)
   1165			netdev->stats.rx_over_errors++;
   1166
   1167		/* received packet lost due to RX FIFO full */
   1168		if (status & FTGMAC100_INT_RPKT_LOST)
   1169			netdev->stats.rx_fifo_errors++;
   1170
   1171		/* sent packet lost due to excessive TX collision */
   1172		if (status & FTGMAC100_INT_XPKT_LOST)
   1173			netdev->stats.tx_fifo_errors++;
   1174
   1175		/* AHB error -> Reset the chip */
   1176		if (status & FTGMAC100_INT_AHB_ERR) {
   1177			if (net_ratelimit())
   1178				netdev_warn(netdev,
   1179					   "AHB bus error ! Resetting chip.\n");
   1180			iowrite32(0, priv->base + FTGMAC100_OFFSET_IER);
   1181			schedule_work(&priv->reset_task);
   1182			return IRQ_HANDLED;
   1183		}
   1184
   1185		/* We may need to restart the MAC after such errors, delay
   1186		 * this until after we have freed some Rx buffers though
   1187		 */
   1188		priv->need_mac_restart = true;
   1189
   1190		/* Disable those errors until we restart */
   1191		new_mask &= ~status;
   1192	}
   1193
   1194	/* Only enable "bad" interrupts while NAPI is on */
   1195	iowrite32(new_mask, priv->base + FTGMAC100_OFFSET_IER);
   1196
   1197	/* Schedule NAPI bh */
   1198	napi_schedule_irqoff(&priv->napi);
   1199
   1200	return IRQ_HANDLED;
   1201}
   1202
   1203static bool ftgmac100_check_rx(struct ftgmac100 *priv)
   1204{
   1205	struct ftgmac100_rxdes *rxdes = &priv->rxdes[priv->rx_pointer];
   1206
   1207	/* Do we have a packet ? */
   1208	return !!(rxdes->rxdes0 & cpu_to_le32(FTGMAC100_RXDES0_RXPKT_RDY));
   1209}
   1210
   1211static int ftgmac100_poll(struct napi_struct *napi, int budget)
   1212{
   1213	struct ftgmac100 *priv = container_of(napi, struct ftgmac100, napi);
   1214	int work_done = 0;
   1215	bool more;
   1216
   1217	/* Handle TX completions */
   1218	if (ftgmac100_tx_buf_cleanable(priv))
   1219		ftgmac100_tx_complete(priv);
   1220
   1221	/* Handle RX packets */
   1222	do {
   1223		more = ftgmac100_rx_packet(priv, &work_done);
   1224	} while (more && work_done < budget);
   1225
   1226
   1227	/* The interrupt is telling us to kick the MAC back to life
   1228	 * after an RX overflow
   1229	 */
   1230	if (unlikely(priv->need_mac_restart)) {
   1231		ftgmac100_start_hw(priv);
   1232		priv->need_mac_restart = false;
   1233
   1234		/* Re-enable "bad" interrupts */
   1235		iowrite32(FTGMAC100_INT_BAD,
   1236			  priv->base + FTGMAC100_OFFSET_IER);
   1237	}
   1238
   1239	/* As long as we are waiting for transmit packets to be
   1240	 * completed we keep NAPI going
   1241	 */
   1242	if (ftgmac100_tx_buf_cleanable(priv))
   1243		work_done = budget;
   1244
   1245	if (work_done < budget) {
   1246		/* We are about to re-enable all interrupts. However
   1247		 * the HW has been latching RX/TX packet interrupts while
   1248		 * they were masked. So we clear them first, then we need
   1249		 * to re-check if there's something to process
   1250		 */
   1251		iowrite32(FTGMAC100_INT_RXTX,
   1252			  priv->base + FTGMAC100_OFFSET_ISR);
   1253
   1254		/* Push the above (and provides a barrier vs. subsequent
   1255		 * reads of the descriptor).
   1256		 */
   1257		ioread32(priv->base + FTGMAC100_OFFSET_ISR);
   1258
   1259		/* Check RX and TX descriptors for more work to do */
   1260		if (ftgmac100_check_rx(priv) ||
   1261		    ftgmac100_tx_buf_cleanable(priv))
   1262			return budget;
   1263
   1264		/* deschedule NAPI */
   1265		napi_complete(napi);
   1266
   1267		/* enable all interrupts */
   1268		iowrite32(FTGMAC100_INT_ALL,
   1269			  priv->base + FTGMAC100_OFFSET_IER);
   1270	}
   1271
   1272	return work_done;
   1273}
   1274
   1275static int ftgmac100_init_all(struct ftgmac100 *priv, bool ignore_alloc_err)
   1276{
   1277	int err = 0;
   1278
   1279	/* Re-init descriptors (adjust queue sizes) */
   1280	ftgmac100_init_rings(priv);
   1281
   1282	/* Realloc rx descriptors */
   1283	err = ftgmac100_alloc_rx_buffers(priv);
   1284	if (err && !ignore_alloc_err)
   1285		return err;
   1286
   1287	/* Reinit and restart HW */
   1288	ftgmac100_init_hw(priv);
   1289	ftgmac100_config_pause(priv);
   1290	ftgmac100_start_hw(priv);
   1291
   1292	/* Re-enable the device */
   1293	napi_enable(&priv->napi);
   1294	netif_start_queue(priv->netdev);
   1295
   1296	/* Enable all interrupts */
   1297	iowrite32(FTGMAC100_INT_ALL, priv->base + FTGMAC100_OFFSET_IER);
   1298
   1299	return err;
   1300}
   1301
   1302static void ftgmac100_reset(struct ftgmac100 *priv)
   1303{
   1304	struct net_device *netdev = priv->netdev;
   1305	int err;
   1306
   1307	netdev_dbg(netdev, "Resetting NIC...\n");
   1308
   1309	/* Lock the world */
   1310	rtnl_lock();
   1311	if (netdev->phydev)
   1312		mutex_lock(&netdev->phydev->lock);
   1313	if (priv->mii_bus)
   1314		mutex_lock(&priv->mii_bus->mdio_lock);
   1315
   1316
   1317	/* Check if the interface is still up */
   1318	if (!netif_running(netdev))
   1319		goto bail;
   1320
   1321	/* Stop the network stack */
   1322	netif_trans_update(netdev);
   1323	napi_disable(&priv->napi);
   1324	netif_tx_disable(netdev);
   1325
   1326	/* Stop and reset the MAC */
   1327	ftgmac100_stop_hw(priv);
   1328	err = ftgmac100_reset_and_config_mac(priv);
   1329	if (err) {
   1330		/* Not much we can do ... it might come back... */
   1331		netdev_err(netdev, "attempting to continue...\n");
   1332	}
   1333
   1334	/* Free all rx and tx buffers */
   1335	ftgmac100_free_buffers(priv);
   1336
   1337	/* Setup everything again and restart chip */
   1338	ftgmac100_init_all(priv, true);
   1339
   1340	netdev_dbg(netdev, "Reset done !\n");
   1341 bail:
   1342	if (priv->mii_bus)
   1343		mutex_unlock(&priv->mii_bus->mdio_lock);
   1344	if (netdev->phydev)
   1345		mutex_unlock(&netdev->phydev->lock);
   1346	rtnl_unlock();
   1347}
   1348
   1349static void ftgmac100_reset_task(struct work_struct *work)
   1350{
   1351	struct ftgmac100 *priv = container_of(work, struct ftgmac100,
   1352					      reset_task);
   1353
   1354	ftgmac100_reset(priv);
   1355}
   1356
   1357static void ftgmac100_adjust_link(struct net_device *netdev)
   1358{
   1359	struct ftgmac100 *priv = netdev_priv(netdev);
   1360	struct phy_device *phydev = netdev->phydev;
   1361	bool tx_pause, rx_pause;
   1362	int new_speed;
   1363
   1364	/* We store "no link" as speed 0 */
   1365	if (!phydev->link)
   1366		new_speed = 0;
   1367	else
   1368		new_speed = phydev->speed;
   1369
   1370	/* Grab pause settings from PHY if configured to do so */
   1371	if (priv->aneg_pause) {
   1372		rx_pause = tx_pause = phydev->pause;
   1373		if (phydev->asym_pause)
   1374			tx_pause = !rx_pause;
   1375	} else {
   1376		rx_pause = priv->rx_pause;
   1377		tx_pause = priv->tx_pause;
   1378	}
   1379
   1380	/* Link hasn't changed, do nothing */
   1381	if (phydev->speed == priv->cur_speed &&
   1382	    phydev->duplex == priv->cur_duplex &&
   1383	    rx_pause == priv->rx_pause &&
   1384	    tx_pause == priv->tx_pause)
   1385		return;
   1386
   1387	/* Print status if we have a link or we had one and just lost it,
   1388	 * don't print otherwise.
   1389	 */
   1390	if (new_speed || priv->cur_speed)
   1391		phy_print_status(phydev);
   1392
   1393	priv->cur_speed = new_speed;
   1394	priv->cur_duplex = phydev->duplex;
   1395	priv->rx_pause = rx_pause;
   1396	priv->tx_pause = tx_pause;
   1397
   1398	/* Link is down, do nothing else */
   1399	if (!new_speed)
   1400		return;
   1401
   1402	/* Disable all interrupts */
   1403	iowrite32(0, priv->base + FTGMAC100_OFFSET_IER);
   1404
   1405	/* Release phy lock to allow ftgmac100_reset to aquire it, keeping lock
   1406	 * order consistent to prevent dead lock.
   1407	 */
   1408	if (netdev->phydev)
   1409		mutex_unlock(&netdev->phydev->lock);
   1410
   1411	ftgmac100_reset(priv);
   1412
   1413	if (netdev->phydev)
   1414		mutex_lock(&netdev->phydev->lock);
   1415
   1416}
   1417
   1418static int ftgmac100_mii_probe(struct net_device *netdev)
   1419{
   1420	struct ftgmac100 *priv = netdev_priv(netdev);
   1421	struct platform_device *pdev = to_platform_device(priv->dev);
   1422	struct device_node *np = pdev->dev.of_node;
   1423	struct phy_device *phydev;
   1424	phy_interface_t phy_intf;
   1425	int err;
   1426
   1427	/* Default to RGMII. It's a gigabit part after all */
   1428	err = of_get_phy_mode(np, &phy_intf);
   1429	if (err)
   1430		phy_intf = PHY_INTERFACE_MODE_RGMII;
   1431
   1432	/* Aspeed only supports these. I don't know about other IP
   1433	 * block vendors so I'm going to just let them through for
   1434	 * now. Note that this is only a warning if for some obscure
   1435	 * reason the DT really means to lie about it or it's a newer
   1436	 * part we don't know about.
   1437	 *
   1438	 * On the Aspeed SoC there are additionally straps and SCU
   1439	 * control bits that could tell us what the interface is
   1440	 * (or allow us to configure it while the IP block is held
   1441	 * in reset). For now I chose to keep this driver away from
   1442	 * those SoC specific bits and assume the device-tree is
   1443	 * right and the SCU has been configured properly by pinmux
   1444	 * or the firmware.
   1445	 */
   1446	if (priv->is_aspeed && !(phy_interface_mode_is_rgmii(phy_intf))) {
   1447		netdev_warn(netdev,
   1448			    "Unsupported PHY mode %s !\n",
   1449			    phy_modes(phy_intf));
   1450	}
   1451
   1452	phydev = phy_find_first(priv->mii_bus);
   1453	if (!phydev) {
   1454		netdev_info(netdev, "%s: no PHY found\n", netdev->name);
   1455		return -ENODEV;
   1456	}
   1457
   1458	phydev = phy_connect(netdev, phydev_name(phydev),
   1459			     &ftgmac100_adjust_link, phy_intf);
   1460
   1461	if (IS_ERR(phydev)) {
   1462		netdev_err(netdev, "%s: Could not attach to PHY\n", netdev->name);
   1463		return PTR_ERR(phydev);
   1464	}
   1465
   1466	/* Indicate that we support PAUSE frames (see comment in
   1467	 * Documentation/networking/phy.rst)
   1468	 */
   1469	phy_support_asym_pause(phydev);
   1470
   1471	/* Display what we found */
   1472	phy_attached_info(phydev);
   1473
   1474	return 0;
   1475}
   1476
   1477static int ftgmac100_open(struct net_device *netdev)
   1478{
   1479	struct ftgmac100 *priv = netdev_priv(netdev);
   1480	int err;
   1481
   1482	/* Allocate ring buffers  */
   1483	err = ftgmac100_alloc_rings(priv);
   1484	if (err) {
   1485		netdev_err(netdev, "Failed to allocate descriptors\n");
   1486		return err;
   1487	}
   1488
   1489	/* When using NC-SI we force the speed to 100Mbit/s full duplex,
   1490	 *
   1491	 * Otherwise we leave it set to 0 (no link), the link
   1492	 * message from the PHY layer will handle setting it up to
   1493	 * something else if needed.
   1494	 */
   1495	if (priv->use_ncsi) {
   1496		priv->cur_duplex = DUPLEX_FULL;
   1497		priv->cur_speed = SPEED_100;
   1498	} else {
   1499		priv->cur_duplex = 0;
   1500		priv->cur_speed = 0;
   1501	}
   1502
   1503	/* Reset the hardware */
   1504	err = ftgmac100_reset_and_config_mac(priv);
   1505	if (err)
   1506		goto err_hw;
   1507
   1508	/* Initialize NAPI */
   1509	netif_napi_add(netdev, &priv->napi, ftgmac100_poll, 64);
   1510
   1511	/* Grab our interrupt */
   1512	err = request_irq(netdev->irq, ftgmac100_interrupt, 0, netdev->name, netdev);
   1513	if (err) {
   1514		netdev_err(netdev, "failed to request irq %d\n", netdev->irq);
   1515		goto err_irq;
   1516	}
   1517
   1518	/* Start things up */
   1519	err = ftgmac100_init_all(priv, false);
   1520	if (err) {
   1521		netdev_err(netdev, "Failed to allocate packet buffers\n");
   1522		goto err_alloc;
   1523	}
   1524
   1525	if (netdev->phydev) {
   1526		/* If we have a PHY, start polling */
   1527		phy_start(netdev->phydev);
   1528	} else if (priv->use_ncsi) {
   1529		/* If using NC-SI, set our carrier on and start the stack */
   1530		netif_carrier_on(netdev);
   1531
   1532		/* Start the NCSI device */
   1533		err = ncsi_start_dev(priv->ndev);
   1534		if (err)
   1535			goto err_ncsi;
   1536	}
   1537
   1538	return 0;
   1539
   1540 err_ncsi:
   1541	napi_disable(&priv->napi);
   1542	netif_stop_queue(netdev);
   1543 err_alloc:
   1544	ftgmac100_free_buffers(priv);
   1545	free_irq(netdev->irq, netdev);
   1546 err_irq:
   1547	netif_napi_del(&priv->napi);
   1548 err_hw:
   1549	iowrite32(0, priv->base + FTGMAC100_OFFSET_IER);
   1550	ftgmac100_free_rings(priv);
   1551	return err;
   1552}
   1553
   1554static int ftgmac100_stop(struct net_device *netdev)
   1555{
   1556	struct ftgmac100 *priv = netdev_priv(netdev);
   1557
   1558	/* Note about the reset task: We are called with the rtnl lock
   1559	 * held, so we are synchronized against the core of the reset
   1560	 * task. We must not try to synchronously cancel it otherwise
   1561	 * we can deadlock. But since it will test for netif_running()
   1562	 * which has already been cleared by the net core, we don't
   1563	 * anything special to do.
   1564	 */
   1565
   1566	/* disable all interrupts */
   1567	iowrite32(0, priv->base + FTGMAC100_OFFSET_IER);
   1568
   1569	netif_stop_queue(netdev);
   1570	napi_disable(&priv->napi);
   1571	netif_napi_del(&priv->napi);
   1572	if (netdev->phydev)
   1573		phy_stop(netdev->phydev);
   1574	else if (priv->use_ncsi)
   1575		ncsi_stop_dev(priv->ndev);
   1576
   1577	ftgmac100_stop_hw(priv);
   1578	free_irq(netdev->irq, netdev);
   1579	ftgmac100_free_buffers(priv);
   1580	ftgmac100_free_rings(priv);
   1581
   1582	return 0;
   1583}
   1584
   1585static void ftgmac100_tx_timeout(struct net_device *netdev, unsigned int txqueue)
   1586{
   1587	struct ftgmac100 *priv = netdev_priv(netdev);
   1588
   1589	/* Disable all interrupts */
   1590	iowrite32(0, priv->base + FTGMAC100_OFFSET_IER);
   1591
   1592	/* Do the reset outside of interrupt context */
   1593	schedule_work(&priv->reset_task);
   1594}
   1595
   1596static int ftgmac100_set_features(struct net_device *netdev,
   1597				  netdev_features_t features)
   1598{
   1599	struct ftgmac100 *priv = netdev_priv(netdev);
   1600	netdev_features_t changed = netdev->features ^ features;
   1601
   1602	if (!netif_running(netdev))
   1603		return 0;
   1604
   1605	/* Update the vlan filtering bit */
   1606	if (changed & NETIF_F_HW_VLAN_CTAG_RX) {
   1607		u32 maccr;
   1608
   1609		maccr = ioread32(priv->base + FTGMAC100_OFFSET_MACCR);
   1610		if (priv->netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
   1611			maccr |= FTGMAC100_MACCR_RM_VLAN;
   1612		else
   1613			maccr &= ~FTGMAC100_MACCR_RM_VLAN;
   1614		iowrite32(maccr, priv->base + FTGMAC100_OFFSET_MACCR);
   1615	}
   1616
   1617	return 0;
   1618}
   1619
   1620#ifdef CONFIG_NET_POLL_CONTROLLER
   1621static void ftgmac100_poll_controller(struct net_device *netdev)
   1622{
   1623	unsigned long flags;
   1624
   1625	local_irq_save(flags);
   1626	ftgmac100_interrupt(netdev->irq, netdev);
   1627	local_irq_restore(flags);
   1628}
   1629#endif
   1630
   1631static const struct net_device_ops ftgmac100_netdev_ops = {
   1632	.ndo_open		= ftgmac100_open,
   1633	.ndo_stop		= ftgmac100_stop,
   1634	.ndo_start_xmit		= ftgmac100_hard_start_xmit,
   1635	.ndo_set_mac_address	= ftgmac100_set_mac_addr,
   1636	.ndo_validate_addr	= eth_validate_addr,
   1637	.ndo_eth_ioctl		= phy_do_ioctl,
   1638	.ndo_tx_timeout		= ftgmac100_tx_timeout,
   1639	.ndo_set_rx_mode	= ftgmac100_set_rx_mode,
   1640	.ndo_set_features	= ftgmac100_set_features,
   1641#ifdef CONFIG_NET_POLL_CONTROLLER
   1642	.ndo_poll_controller	= ftgmac100_poll_controller,
   1643#endif
   1644	.ndo_vlan_rx_add_vid	= ncsi_vlan_rx_add_vid,
   1645	.ndo_vlan_rx_kill_vid	= ncsi_vlan_rx_kill_vid,
   1646};
   1647
   1648static int ftgmac100_setup_mdio(struct net_device *netdev)
   1649{
   1650	struct ftgmac100 *priv = netdev_priv(netdev);
   1651	struct platform_device *pdev = to_platform_device(priv->dev);
   1652	struct device_node *np = pdev->dev.of_node;
   1653	struct device_node *mdio_np;
   1654	int i, err = 0;
   1655	u32 reg;
   1656
   1657	/* initialize mdio bus */
   1658	priv->mii_bus = mdiobus_alloc();
   1659	if (!priv->mii_bus)
   1660		return -EIO;
   1661
   1662	if (of_device_is_compatible(np, "aspeed,ast2400-mac") ||
   1663	    of_device_is_compatible(np, "aspeed,ast2500-mac")) {
   1664		/* The AST2600 has a separate MDIO controller */
   1665
   1666		/* For the AST2400 and AST2500 this driver only supports the
   1667		 * old MDIO interface
   1668		 */
   1669		reg = ioread32(priv->base + FTGMAC100_OFFSET_REVR);
   1670		reg &= ~FTGMAC100_REVR_NEW_MDIO_INTERFACE;
   1671		iowrite32(reg, priv->base + FTGMAC100_OFFSET_REVR);
   1672	}
   1673
   1674	priv->mii_bus->name = "ftgmac100_mdio";
   1675	snprintf(priv->mii_bus->id, MII_BUS_ID_SIZE, "%s-%d",
   1676		 pdev->name, pdev->id);
   1677	priv->mii_bus->parent = priv->dev;
   1678	priv->mii_bus->priv = priv->netdev;
   1679	priv->mii_bus->read = ftgmac100_mdiobus_read;
   1680	priv->mii_bus->write = ftgmac100_mdiobus_write;
   1681
   1682	for (i = 0; i < PHY_MAX_ADDR; i++)
   1683		priv->mii_bus->irq[i] = PHY_POLL;
   1684
   1685	mdio_np = of_get_child_by_name(np, "mdio");
   1686
   1687	err = of_mdiobus_register(priv->mii_bus, mdio_np);
   1688	if (err) {
   1689		dev_err(priv->dev, "Cannot register MDIO bus!\n");
   1690		goto err_register_mdiobus;
   1691	}
   1692
   1693	of_node_put(mdio_np);
   1694
   1695	return 0;
   1696
   1697err_register_mdiobus:
   1698	mdiobus_free(priv->mii_bus);
   1699	return err;
   1700}
   1701
   1702static void ftgmac100_phy_disconnect(struct net_device *netdev)
   1703{
   1704	if (!netdev->phydev)
   1705		return;
   1706
   1707	phy_disconnect(netdev->phydev);
   1708}
   1709
   1710static void ftgmac100_destroy_mdio(struct net_device *netdev)
   1711{
   1712	struct ftgmac100 *priv = netdev_priv(netdev);
   1713
   1714	if (!priv->mii_bus)
   1715		return;
   1716
   1717	mdiobus_unregister(priv->mii_bus);
   1718	mdiobus_free(priv->mii_bus);
   1719}
   1720
   1721static void ftgmac100_ncsi_handler(struct ncsi_dev *nd)
   1722{
   1723	if (unlikely(nd->state != ncsi_dev_state_functional))
   1724		return;
   1725
   1726	netdev_dbg(nd->dev, "NCSI interface %s\n",
   1727		   nd->link_up ? "up" : "down");
   1728}
   1729
   1730static int ftgmac100_setup_clk(struct ftgmac100 *priv)
   1731{
   1732	struct clk *clk;
   1733	int rc;
   1734
   1735	clk = devm_clk_get(priv->dev, NULL /* MACCLK */);
   1736	if (IS_ERR(clk))
   1737		return PTR_ERR(clk);
   1738	priv->clk = clk;
   1739	rc = clk_prepare_enable(priv->clk);
   1740	if (rc)
   1741		return rc;
   1742
   1743	/* Aspeed specifies a 100MHz clock is required for up to
   1744	 * 1000Mbit link speeds. As NCSI is limited to 100Mbit, 25MHz
   1745	 * is sufficient
   1746	 */
   1747	rc = clk_set_rate(priv->clk, priv->use_ncsi ? FTGMAC_25MHZ :
   1748			  FTGMAC_100MHZ);
   1749	if (rc)
   1750		goto cleanup_clk;
   1751
   1752	/* RCLK is for RMII, typically used for NCSI. Optional because it's not
   1753	 * necessary if it's the AST2400 MAC, or the MAC is configured for
   1754	 * RGMII, or the controller is not an ASPEED-based controller.
   1755	 */
   1756	priv->rclk = devm_clk_get_optional(priv->dev, "RCLK");
   1757	rc = clk_prepare_enable(priv->rclk);
   1758	if (!rc)
   1759		return 0;
   1760
   1761cleanup_clk:
   1762	clk_disable_unprepare(priv->clk);
   1763
   1764	return rc;
   1765}
   1766
   1767static int ftgmac100_probe(struct platform_device *pdev)
   1768{
   1769	struct resource *res;
   1770	int irq;
   1771	struct net_device *netdev;
   1772	struct ftgmac100 *priv;
   1773	struct device_node *np;
   1774	int err = 0;
   1775
   1776	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
   1777	if (!res)
   1778		return -ENXIO;
   1779
   1780	irq = platform_get_irq(pdev, 0);
   1781	if (irq < 0)
   1782		return irq;
   1783
   1784	/* setup net_device */
   1785	netdev = alloc_etherdev(sizeof(*priv));
   1786	if (!netdev) {
   1787		err = -ENOMEM;
   1788		goto err_alloc_etherdev;
   1789	}
   1790
   1791	SET_NETDEV_DEV(netdev, &pdev->dev);
   1792
   1793	netdev->ethtool_ops = &ftgmac100_ethtool_ops;
   1794	netdev->netdev_ops = &ftgmac100_netdev_ops;
   1795	netdev->watchdog_timeo = 5 * HZ;
   1796
   1797	platform_set_drvdata(pdev, netdev);
   1798
   1799	/* setup private data */
   1800	priv = netdev_priv(netdev);
   1801	priv->netdev = netdev;
   1802	priv->dev = &pdev->dev;
   1803	INIT_WORK(&priv->reset_task, ftgmac100_reset_task);
   1804
   1805	/* map io memory */
   1806	priv->res = request_mem_region(res->start, resource_size(res),
   1807				       dev_name(&pdev->dev));
   1808	if (!priv->res) {
   1809		dev_err(&pdev->dev, "Could not reserve memory region\n");
   1810		err = -ENOMEM;
   1811		goto err_req_mem;
   1812	}
   1813
   1814	priv->base = ioremap(res->start, resource_size(res));
   1815	if (!priv->base) {
   1816		dev_err(&pdev->dev, "Failed to ioremap ethernet registers\n");
   1817		err = -EIO;
   1818		goto err_ioremap;
   1819	}
   1820
   1821	netdev->irq = irq;
   1822
   1823	/* Enable pause */
   1824	priv->tx_pause = true;
   1825	priv->rx_pause = true;
   1826	priv->aneg_pause = true;
   1827
   1828	/* MAC address from chip or random one */
   1829	ftgmac100_initial_mac(priv);
   1830
   1831	np = pdev->dev.of_node;
   1832	if (np && (of_device_is_compatible(np, "aspeed,ast2400-mac") ||
   1833		   of_device_is_compatible(np, "aspeed,ast2500-mac") ||
   1834		   of_device_is_compatible(np, "aspeed,ast2600-mac"))) {
   1835		priv->rxdes0_edorr_mask = BIT(30);
   1836		priv->txdes0_edotr_mask = BIT(30);
   1837		priv->is_aspeed = true;
   1838	} else {
   1839		priv->rxdes0_edorr_mask = BIT(15);
   1840		priv->txdes0_edotr_mask = BIT(15);
   1841	}
   1842
   1843	if (np && of_get_property(np, "use-ncsi", NULL)) {
   1844		if (!IS_ENABLED(CONFIG_NET_NCSI)) {
   1845			dev_err(&pdev->dev, "NCSI stack not enabled\n");
   1846			err = -EINVAL;
   1847			goto err_phy_connect;
   1848		}
   1849
   1850		dev_info(&pdev->dev, "Using NCSI interface\n");
   1851		priv->use_ncsi = true;
   1852		priv->ndev = ncsi_register_dev(netdev, ftgmac100_ncsi_handler);
   1853		if (!priv->ndev) {
   1854			err = -EINVAL;
   1855			goto err_phy_connect;
   1856		}
   1857	} else if (np && of_get_property(np, "phy-handle", NULL)) {
   1858		struct phy_device *phy;
   1859
   1860		/* Support "mdio"/"phy" child nodes for ast2400/2500 with
   1861		 * an embedded MDIO controller. Automatically scan the DTS for
   1862		 * available PHYs and register them.
   1863		 */
   1864		if (of_device_is_compatible(np, "aspeed,ast2400-mac") ||
   1865		    of_device_is_compatible(np, "aspeed,ast2500-mac")) {
   1866			err = ftgmac100_setup_mdio(netdev);
   1867			if (err)
   1868				goto err_setup_mdio;
   1869		}
   1870
   1871		phy = of_phy_get_and_connect(priv->netdev, np,
   1872					     &ftgmac100_adjust_link);
   1873		if (!phy) {
   1874			dev_err(&pdev->dev, "Failed to connect to phy\n");
   1875			err = -EINVAL;
   1876			goto err_phy_connect;
   1877		}
   1878
   1879		/* Indicate that we support PAUSE frames (see comment in
   1880		 * Documentation/networking/phy.rst)
   1881		 */
   1882		phy_support_asym_pause(phy);
   1883
   1884		/* Display what we found */
   1885		phy_attached_info(phy);
   1886	} else if (np && !of_get_child_by_name(np, "mdio")) {
   1887		/* Support legacy ASPEED devicetree descriptions that decribe a
   1888		 * MAC with an embedded MDIO controller but have no "mdio"
   1889		 * child node. Automatically scan the MDIO bus for available
   1890		 * PHYs.
   1891		 */
   1892		priv->use_ncsi = false;
   1893		err = ftgmac100_setup_mdio(netdev);
   1894		if (err)
   1895			goto err_setup_mdio;
   1896
   1897		err = ftgmac100_mii_probe(netdev);
   1898		if (err) {
   1899			dev_err(priv->dev, "MII probe failed!\n");
   1900			goto err_ncsi_dev;
   1901		}
   1902
   1903	}
   1904
   1905	if (priv->is_aspeed) {
   1906		err = ftgmac100_setup_clk(priv);
   1907		if (err)
   1908			goto err_phy_connect;
   1909
   1910		/* Disable ast2600 problematic HW arbitration */
   1911		if (of_device_is_compatible(np, "aspeed,ast2600-mac"))
   1912			iowrite32(FTGMAC100_TM_DEFAULT,
   1913				  priv->base + FTGMAC100_OFFSET_TM);
   1914	}
   1915
   1916	/* Default ring sizes */
   1917	priv->rx_q_entries = priv->new_rx_q_entries = DEF_RX_QUEUE_ENTRIES;
   1918	priv->tx_q_entries = priv->new_tx_q_entries = DEF_TX_QUEUE_ENTRIES;
   1919
   1920	/* Base feature set */
   1921	netdev->hw_features = NETIF_F_RXCSUM | NETIF_F_HW_CSUM |
   1922		NETIF_F_GRO | NETIF_F_SG | NETIF_F_HW_VLAN_CTAG_RX |
   1923		NETIF_F_HW_VLAN_CTAG_TX;
   1924
   1925	if (priv->use_ncsi)
   1926		netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
   1927
   1928	/* AST2400  doesn't have working HW checksum generation */
   1929	if (np && (of_device_is_compatible(np, "aspeed,ast2400-mac")))
   1930		netdev->hw_features &= ~NETIF_F_HW_CSUM;
   1931
   1932	/* AST2600 tx checksum with NCSI is broken */
   1933	if (priv->use_ncsi && of_device_is_compatible(np, "aspeed,ast2600-mac"))
   1934		netdev->hw_features &= ~NETIF_F_HW_CSUM;
   1935
   1936	if (np && of_get_property(np, "no-hw-checksum", NULL))
   1937		netdev->hw_features &= ~(NETIF_F_HW_CSUM | NETIF_F_RXCSUM);
   1938	netdev->features |= netdev->hw_features;
   1939
   1940	/* register network device */
   1941	err = register_netdev(netdev);
   1942	if (err) {
   1943		dev_err(&pdev->dev, "Failed to register netdev\n");
   1944		goto err_register_netdev;
   1945	}
   1946
   1947	netdev_info(netdev, "irq %d, mapped at %p\n", netdev->irq, priv->base);
   1948
   1949	return 0;
   1950
   1951err_register_netdev:
   1952	clk_disable_unprepare(priv->rclk);
   1953	clk_disable_unprepare(priv->clk);
   1954err_phy_connect:
   1955	ftgmac100_phy_disconnect(netdev);
   1956err_ncsi_dev:
   1957	if (priv->ndev)
   1958		ncsi_unregister_dev(priv->ndev);
   1959	ftgmac100_destroy_mdio(netdev);
   1960err_setup_mdio:
   1961	iounmap(priv->base);
   1962err_ioremap:
   1963	release_resource(priv->res);
   1964err_req_mem:
   1965	free_netdev(netdev);
   1966err_alloc_etherdev:
   1967	return err;
   1968}
   1969
   1970static int ftgmac100_remove(struct platform_device *pdev)
   1971{
   1972	struct net_device *netdev;
   1973	struct ftgmac100 *priv;
   1974
   1975	netdev = platform_get_drvdata(pdev);
   1976	priv = netdev_priv(netdev);
   1977
   1978	if (priv->ndev)
   1979		ncsi_unregister_dev(priv->ndev);
   1980	unregister_netdev(netdev);
   1981
   1982	clk_disable_unprepare(priv->rclk);
   1983	clk_disable_unprepare(priv->clk);
   1984
   1985	/* There's a small chance the reset task will have been re-queued,
   1986	 * during stop, make sure it's gone before we free the structure.
   1987	 */
   1988	cancel_work_sync(&priv->reset_task);
   1989
   1990	ftgmac100_phy_disconnect(netdev);
   1991	ftgmac100_destroy_mdio(netdev);
   1992
   1993	iounmap(priv->base);
   1994	release_resource(priv->res);
   1995
   1996	netif_napi_del(&priv->napi);
   1997	free_netdev(netdev);
   1998	return 0;
   1999}
   2000
   2001static const struct of_device_id ftgmac100_of_match[] = {
   2002	{ .compatible = "faraday,ftgmac100" },
   2003	{ }
   2004};
   2005MODULE_DEVICE_TABLE(of, ftgmac100_of_match);
   2006
   2007static struct platform_driver ftgmac100_driver = {
   2008	.probe	= ftgmac100_probe,
   2009	.remove	= ftgmac100_remove,
   2010	.driver	= {
   2011		.name		= DRV_NAME,
   2012		.of_match_table	= ftgmac100_of_match,
   2013	},
   2014};
   2015module_platform_driver(ftgmac100_driver);
   2016
   2017MODULE_AUTHOR("Po-Yu Chuang <ratbert@faraday-tech.com>");
   2018MODULE_DESCRIPTION("FTGMAC100 driver");
   2019MODULE_LICENSE("GPL");