cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

ll_temac_main.c (45873B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/*
      3 * Driver for Xilinx TEMAC Ethernet device
      4 *
      5 * Copyright (c) 2008 Nissin Systems Co., Ltd.,  Yoshio Kashiwagi
      6 * Copyright (c) 2005-2008 DLA Systems,  David H. Lynch Jr. <dhlii@dlasys.net>
      7 * Copyright (c) 2008-2009 Secret Lab Technologies Ltd.
      8 *
      9 * This is a driver for the Xilinx ll_temac ipcore which is often used
     10 * in the Virtex and Spartan series of chips.
     11 *
     12 * Notes:
     13 * - The ll_temac hardware uses indirect access for many of the TEMAC
     14 *   registers, include the MDIO bus.  However, indirect access to MDIO
     15 *   registers take considerably more clock cycles than to TEMAC registers.
     16 *   MDIO accesses are long, so threads doing them should probably sleep
     17 *   rather than busywait.  However, since only one indirect access can be
     18 *   in progress at any given time, that means that *all* indirect accesses
     19 *   could end up sleeping (to wait for an MDIO access to complete).
     20 *   Fortunately none of the indirect accesses are on the 'hot' path for tx
     21 *   or rx, so this should be okay.
     22 *
     23 * TODO:
     24 * - Factor out locallink DMA code into separate driver
     25 * - Fix support for hardware checksumming.
     26 * - Testing.  Lots and lots of testing.
     27 *
     28 */
     29
     30#include <linux/delay.h>
     31#include <linux/etherdevice.h>
     32#include <linux/mii.h>
     33#include <linux/module.h>
     34#include <linux/mutex.h>
     35#include <linux/netdevice.h>
     36#include <linux/if_ether.h>
     37#include <linux/of.h>
     38#include <linux/of_device.h>
     39#include <linux/of_irq.h>
     40#include <linux/of_mdio.h>
     41#include <linux/of_net.h>
     42#include <linux/of_platform.h>
     43#include <linux/of_address.h>
     44#include <linux/skbuff.h>
     45#include <linux/spinlock.h>
     46#include <linux/tcp.h>      /* needed for sizeof(tcphdr) */
     47#include <linux/udp.h>      /* needed for sizeof(udphdr) */
     48#include <linux/phy.h>
     49#include <linux/in.h>
     50#include <linux/io.h>
     51#include <linux/ip.h>
     52#include <linux/slab.h>
     53#include <linux/interrupt.h>
     54#include <linux/workqueue.h>
     55#include <linux/dma-mapping.h>
     56#include <linux/processor.h>
     57#include <linux/platform_data/xilinx-ll-temac.h>
     58
     59#include "ll_temac.h"
     60
     61/* Descriptors defines for Tx and Rx DMA */
     62#define TX_BD_NUM_DEFAULT		64
     63#define RX_BD_NUM_DEFAULT		1024
     64#define TX_BD_NUM_MAX			4096
     65#define RX_BD_NUM_MAX			4096
     66
     67/* ---------------------------------------------------------------------
     68 * Low level register access functions
     69 */
     70
     71static u32 _temac_ior_be(struct temac_local *lp, int offset)
     72{
     73	return ioread32be(lp->regs + offset);
     74}
     75
     76static void _temac_iow_be(struct temac_local *lp, int offset, u32 value)
     77{
     78	return iowrite32be(value, lp->regs + offset);
     79}
     80
     81static u32 _temac_ior_le(struct temac_local *lp, int offset)
     82{
     83	return ioread32(lp->regs + offset);
     84}
     85
     86static void _temac_iow_le(struct temac_local *lp, int offset, u32 value)
     87{
     88	return iowrite32(value, lp->regs + offset);
     89}
     90
     91static bool hard_acs_rdy(struct temac_local *lp)
     92{
     93	return temac_ior(lp, XTE_RDY0_OFFSET) & XTE_RDY0_HARD_ACS_RDY_MASK;
     94}
     95
     96static bool hard_acs_rdy_or_timeout(struct temac_local *lp, ktime_t timeout)
     97{
     98	ktime_t cur = ktime_get();
     99
    100	return hard_acs_rdy(lp) || ktime_after(cur, timeout);
    101}
    102
    103/* Poll for maximum 20 ms.  This is similar to the 2 jiffies @ 100 Hz
    104 * that was used before, and should cover MDIO bus speed down to 3200
    105 * Hz.
    106 */
    107#define HARD_ACS_RDY_POLL_NS (20 * NSEC_PER_MSEC)
    108
    109/*
    110 * temac_indirect_busywait - Wait for current indirect register access
    111 * to complete.
    112 */
    113int temac_indirect_busywait(struct temac_local *lp)
    114{
    115	ktime_t timeout = ktime_add_ns(ktime_get(), HARD_ACS_RDY_POLL_NS);
    116
    117	spin_until_cond(hard_acs_rdy_or_timeout(lp, timeout));
    118	if (WARN_ON(!hard_acs_rdy(lp)))
    119		return -ETIMEDOUT;
    120	else
    121		return 0;
    122}
    123
    124/*
    125 * temac_indirect_in32 - Indirect register read access.  This function
    126 * must be called without lp->indirect_lock being held.
    127 */
    128u32 temac_indirect_in32(struct temac_local *lp, int reg)
    129{
    130	unsigned long flags;
    131	int val;
    132
    133	spin_lock_irqsave(lp->indirect_lock, flags);
    134	val = temac_indirect_in32_locked(lp, reg);
    135	spin_unlock_irqrestore(lp->indirect_lock, flags);
    136	return val;
    137}
    138
    139/*
    140 * temac_indirect_in32_locked - Indirect register read access.  This
    141 * function must be called with lp->indirect_lock being held.  Use
    142 * this together with spin_lock_irqsave/spin_lock_irqrestore to avoid
    143 * repeated lock/unlock and to ensure uninterrupted access to indirect
    144 * registers.
    145 */
    146u32 temac_indirect_in32_locked(struct temac_local *lp, int reg)
    147{
    148	/* This initial wait should normally not spin, as we always
    149	 * try to wait for indirect access to complete before
    150	 * releasing the indirect_lock.
    151	 */
    152	if (WARN_ON(temac_indirect_busywait(lp)))
    153		return -ETIMEDOUT;
    154	/* Initiate read from indirect register */
    155	temac_iow(lp, XTE_CTL0_OFFSET, reg);
    156	/* Wait for indirect register access to complete.  We really
    157	 * should not see timeouts, and could even end up causing
    158	 * problem for following indirect access, so let's make a bit
    159	 * of WARN noise.
    160	 */
    161	if (WARN_ON(temac_indirect_busywait(lp)))
    162		return -ETIMEDOUT;
    163	/* Value is ready now */
    164	return temac_ior(lp, XTE_LSW0_OFFSET);
    165}
    166
    167/*
    168 * temac_indirect_out32 - Indirect register write access.  This function
    169 * must be called without lp->indirect_lock being held.
    170 */
    171void temac_indirect_out32(struct temac_local *lp, int reg, u32 value)
    172{
    173	unsigned long flags;
    174
    175	spin_lock_irqsave(lp->indirect_lock, flags);
    176	temac_indirect_out32_locked(lp, reg, value);
    177	spin_unlock_irqrestore(lp->indirect_lock, flags);
    178}
    179
    180/*
    181 * temac_indirect_out32_locked - Indirect register write access.  This
    182 * function must be called with lp->indirect_lock being held.  Use
    183 * this together with spin_lock_irqsave/spin_lock_irqrestore to avoid
    184 * repeated lock/unlock and to ensure uninterrupted access to indirect
    185 * registers.
    186 */
    187void temac_indirect_out32_locked(struct temac_local *lp, int reg, u32 value)
    188{
    189	/* As in temac_indirect_in32_locked(), we should normally not
    190	 * spin here.  And if it happens, we actually end up silently
    191	 * ignoring the write request.  Ouch.
    192	 */
    193	if (WARN_ON(temac_indirect_busywait(lp)))
    194		return;
    195	/* Initiate write to indirect register */
    196	temac_iow(lp, XTE_LSW0_OFFSET, value);
    197	temac_iow(lp, XTE_CTL0_OFFSET, CNTLREG_WRITE_ENABLE_MASK | reg);
    198	/* As in temac_indirect_in32_locked(), we should not see timeouts
    199	 * here.  And if it happens, we continue before the write has
    200	 * completed.  Not good.
    201	 */
    202	WARN_ON(temac_indirect_busywait(lp));
    203}
    204
    205/*
    206 * temac_dma_in32_* - Memory mapped DMA read, these function expects a
    207 * register input that is based on DCR word addresses which are then
    208 * converted to memory mapped byte addresses.  To be assigned to
    209 * lp->dma_in32.
    210 */
    211static u32 temac_dma_in32_be(struct temac_local *lp, int reg)
    212{
    213	return ioread32be(lp->sdma_regs + (reg << 2));
    214}
    215
    216static u32 temac_dma_in32_le(struct temac_local *lp, int reg)
    217{
    218	return ioread32(lp->sdma_regs + (reg << 2));
    219}
    220
    221/*
    222 * temac_dma_out32_* - Memory mapped DMA read, these function expects
    223 * a register input that is based on DCR word addresses which are then
    224 * converted to memory mapped byte addresses.  To be assigned to
    225 * lp->dma_out32.
    226 */
    227static void temac_dma_out32_be(struct temac_local *lp, int reg, u32 value)
    228{
    229	iowrite32be(value, lp->sdma_regs + (reg << 2));
    230}
    231
    232static void temac_dma_out32_le(struct temac_local *lp, int reg, u32 value)
    233{
    234	iowrite32(value, lp->sdma_regs + (reg << 2));
    235}
    236
    237/* DMA register access functions can be DCR based or memory mapped.
    238 * The PowerPC 440 is DCR based, the PowerPC 405 and MicroBlaze are both
    239 * memory mapped.
    240 */
    241#ifdef CONFIG_PPC_DCR
    242
    243/*
    244 * temac_dma_dcr_in32 - DCR based DMA read
    245 */
    246static u32 temac_dma_dcr_in(struct temac_local *lp, int reg)
    247{
    248	return dcr_read(lp->sdma_dcrs, reg);
    249}
    250
    251/*
    252 * temac_dma_dcr_out32 - DCR based DMA write
    253 */
    254static void temac_dma_dcr_out(struct temac_local *lp, int reg, u32 value)
    255{
    256	dcr_write(lp->sdma_dcrs, reg, value);
    257}
    258
    259/*
    260 * temac_dcr_setup - If the DMA is DCR based, then setup the address and
    261 * I/O  functions
    262 */
    263static int temac_dcr_setup(struct temac_local *lp, struct platform_device *op,
    264				struct device_node *np)
    265{
    266	unsigned int dcrs;
    267
    268	/* setup the dcr address mapping if it's in the device tree */
    269
    270	dcrs = dcr_resource_start(np, 0);
    271	if (dcrs != 0) {
    272		lp->sdma_dcrs = dcr_map(np, dcrs, dcr_resource_len(np, 0));
    273		lp->dma_in = temac_dma_dcr_in;
    274		lp->dma_out = temac_dma_dcr_out;
    275		dev_dbg(&op->dev, "DCR base: %x\n", dcrs);
    276		return 0;
    277	}
    278	/* no DCR in the device tree, indicate a failure */
    279	return -1;
    280}
    281
    282#else
    283
    284/*
    285 * temac_dcr_setup - This is a stub for when DCR is not supported,
    286 * such as with MicroBlaze and x86
    287 */
    288static int temac_dcr_setup(struct temac_local *lp, struct platform_device *op,
    289				struct device_node *np)
    290{
    291	return -1;
    292}
    293
    294#endif
    295
    296/*
    297 * temac_dma_bd_release - Release buffer descriptor rings
    298 */
    299static void temac_dma_bd_release(struct net_device *ndev)
    300{
    301	struct temac_local *lp = netdev_priv(ndev);
    302	int i;
    303
    304	/* Reset Local Link (DMA) */
    305	lp->dma_out(lp, DMA_CONTROL_REG, DMA_CONTROL_RST);
    306
    307	for (i = 0; i < lp->rx_bd_num; i++) {
    308		if (!lp->rx_skb[i])
    309			break;
    310		else {
    311			dma_unmap_single(ndev->dev.parent, lp->rx_bd_v[i].phys,
    312					XTE_MAX_JUMBO_FRAME_SIZE, DMA_FROM_DEVICE);
    313			dev_kfree_skb(lp->rx_skb[i]);
    314		}
    315	}
    316	if (lp->rx_bd_v)
    317		dma_free_coherent(ndev->dev.parent,
    318				  sizeof(*lp->rx_bd_v) * lp->rx_bd_num,
    319				  lp->rx_bd_v, lp->rx_bd_p);
    320	if (lp->tx_bd_v)
    321		dma_free_coherent(ndev->dev.parent,
    322				  sizeof(*lp->tx_bd_v) * lp->tx_bd_num,
    323				  lp->tx_bd_v, lp->tx_bd_p);
    324}
    325
    326/*
    327 * temac_dma_bd_init - Setup buffer descriptor rings
    328 */
    329static int temac_dma_bd_init(struct net_device *ndev)
    330{
    331	struct temac_local *lp = netdev_priv(ndev);
    332	struct sk_buff *skb;
    333	dma_addr_t skb_dma_addr;
    334	int i;
    335
    336	lp->rx_skb = devm_kcalloc(&ndev->dev, lp->rx_bd_num,
    337				  sizeof(*lp->rx_skb), GFP_KERNEL);
    338	if (!lp->rx_skb)
    339		goto out;
    340
    341	/* allocate the tx and rx ring buffer descriptors. */
    342	/* returns a virtual address and a physical address. */
    343	lp->tx_bd_v = dma_alloc_coherent(ndev->dev.parent,
    344					 sizeof(*lp->tx_bd_v) * lp->tx_bd_num,
    345					 &lp->tx_bd_p, GFP_KERNEL);
    346	if (!lp->tx_bd_v)
    347		goto out;
    348
    349	lp->rx_bd_v = dma_alloc_coherent(ndev->dev.parent,
    350					 sizeof(*lp->rx_bd_v) * lp->rx_bd_num,
    351					 &lp->rx_bd_p, GFP_KERNEL);
    352	if (!lp->rx_bd_v)
    353		goto out;
    354
    355	for (i = 0; i < lp->tx_bd_num; i++) {
    356		lp->tx_bd_v[i].next = cpu_to_be32(lp->tx_bd_p
    357			+ sizeof(*lp->tx_bd_v) * ((i + 1) % lp->tx_bd_num));
    358	}
    359
    360	for (i = 0; i < lp->rx_bd_num; i++) {
    361		lp->rx_bd_v[i].next = cpu_to_be32(lp->rx_bd_p
    362			+ sizeof(*lp->rx_bd_v) * ((i + 1) % lp->rx_bd_num));
    363
    364		skb = __netdev_alloc_skb_ip_align(ndev,
    365						  XTE_MAX_JUMBO_FRAME_SIZE,
    366						  GFP_KERNEL);
    367		if (!skb)
    368			goto out;
    369
    370		lp->rx_skb[i] = skb;
    371		/* returns physical address of skb->data */
    372		skb_dma_addr = dma_map_single(ndev->dev.parent, skb->data,
    373					      XTE_MAX_JUMBO_FRAME_SIZE,
    374					      DMA_FROM_DEVICE);
    375		if (dma_mapping_error(ndev->dev.parent, skb_dma_addr))
    376			goto out;
    377		lp->rx_bd_v[i].phys = cpu_to_be32(skb_dma_addr);
    378		lp->rx_bd_v[i].len = cpu_to_be32(XTE_MAX_JUMBO_FRAME_SIZE);
    379		lp->rx_bd_v[i].app0 = cpu_to_be32(STS_CTRL_APP0_IRQONEND);
    380	}
    381
    382	/* Configure DMA channel (irq setup) */
    383	lp->dma_out(lp, TX_CHNL_CTRL,
    384		    lp->coalesce_delay_tx << 24 | lp->coalesce_count_tx << 16 |
    385		    0x00000400 | // Use 1 Bit Wide Counters. Currently Not Used!
    386		    CHNL_CTRL_IRQ_EN | CHNL_CTRL_IRQ_ERR_EN |
    387		    CHNL_CTRL_IRQ_DLY_EN | CHNL_CTRL_IRQ_COAL_EN);
    388	lp->dma_out(lp, RX_CHNL_CTRL,
    389		    lp->coalesce_delay_rx << 24 | lp->coalesce_count_rx << 16 |
    390		    CHNL_CTRL_IRQ_IOE |
    391		    CHNL_CTRL_IRQ_EN | CHNL_CTRL_IRQ_ERR_EN |
    392		    CHNL_CTRL_IRQ_DLY_EN | CHNL_CTRL_IRQ_COAL_EN);
    393
    394	/* Init descriptor indexes */
    395	lp->tx_bd_ci = 0;
    396	lp->tx_bd_tail = 0;
    397	lp->rx_bd_ci = 0;
    398	lp->rx_bd_tail = lp->rx_bd_num - 1;
    399
    400	/* Enable RX DMA transfers */
    401	wmb();
    402	lp->dma_out(lp, RX_CURDESC_PTR,  lp->rx_bd_p);
    403	lp->dma_out(lp, RX_TAILDESC_PTR,
    404		       lp->rx_bd_p + (sizeof(*lp->rx_bd_v) * lp->rx_bd_tail));
    405
    406	/* Prepare for TX DMA transfer */
    407	lp->dma_out(lp, TX_CURDESC_PTR, lp->tx_bd_p);
    408
    409	return 0;
    410
    411out:
    412	temac_dma_bd_release(ndev);
    413	return -ENOMEM;
    414}
    415
    416/* ---------------------------------------------------------------------
    417 * net_device_ops
    418 */
    419
    420static void temac_do_set_mac_address(struct net_device *ndev)
    421{
    422	struct temac_local *lp = netdev_priv(ndev);
    423	unsigned long flags;
    424
    425	/* set up unicast MAC address filter set its mac address */
    426	spin_lock_irqsave(lp->indirect_lock, flags);
    427	temac_indirect_out32_locked(lp, XTE_UAW0_OFFSET,
    428				    (ndev->dev_addr[0]) |
    429				    (ndev->dev_addr[1] << 8) |
    430				    (ndev->dev_addr[2] << 16) |
    431				    (ndev->dev_addr[3] << 24));
    432	/* There are reserved bits in EUAW1
    433	 * so don't affect them Set MAC bits [47:32] in EUAW1 */
    434	temac_indirect_out32_locked(lp, XTE_UAW1_OFFSET,
    435				    (ndev->dev_addr[4] & 0x000000ff) |
    436				    (ndev->dev_addr[5] << 8));
    437	spin_unlock_irqrestore(lp->indirect_lock, flags);
    438}
    439
    440static int temac_init_mac_address(struct net_device *ndev, const void *address)
    441{
    442	eth_hw_addr_set(ndev, address);
    443	if (!is_valid_ether_addr(ndev->dev_addr))
    444		eth_hw_addr_random(ndev);
    445	temac_do_set_mac_address(ndev);
    446	return 0;
    447}
    448
    449static int temac_set_mac_address(struct net_device *ndev, void *p)
    450{
    451	struct sockaddr *addr = p;
    452
    453	if (!is_valid_ether_addr(addr->sa_data))
    454		return -EADDRNOTAVAIL;
    455	eth_hw_addr_set(ndev, addr->sa_data);
    456	temac_do_set_mac_address(ndev);
    457	return 0;
    458}
    459
    460static void temac_set_multicast_list(struct net_device *ndev)
    461{
    462	struct temac_local *lp = netdev_priv(ndev);
    463	u32 multi_addr_msw, multi_addr_lsw;
    464	int i = 0;
    465	unsigned long flags;
    466	bool promisc_mode_disabled = false;
    467
    468	if (ndev->flags & (IFF_PROMISC | IFF_ALLMULTI) ||
    469	    (netdev_mc_count(ndev) > MULTICAST_CAM_TABLE_NUM)) {
    470		temac_indirect_out32(lp, XTE_AFM_OFFSET, XTE_AFM_EPPRM_MASK);
    471		dev_info(&ndev->dev, "Promiscuous mode enabled.\n");
    472		return;
    473	}
    474
    475	spin_lock_irqsave(lp->indirect_lock, flags);
    476
    477	if (!netdev_mc_empty(ndev)) {
    478		struct netdev_hw_addr *ha;
    479
    480		netdev_for_each_mc_addr(ha, ndev) {
    481			if (WARN_ON(i >= MULTICAST_CAM_TABLE_NUM))
    482				break;
    483			multi_addr_msw = ((ha->addr[3] << 24) |
    484					  (ha->addr[2] << 16) |
    485					  (ha->addr[1] << 8) |
    486					  (ha->addr[0]));
    487			temac_indirect_out32_locked(lp, XTE_MAW0_OFFSET,
    488						    multi_addr_msw);
    489			multi_addr_lsw = ((ha->addr[5] << 8) |
    490					  (ha->addr[4]) | (i << 16));
    491			temac_indirect_out32_locked(lp, XTE_MAW1_OFFSET,
    492						    multi_addr_lsw);
    493			i++;
    494		}
    495	}
    496
    497	/* Clear all or remaining/unused address table entries */
    498	while (i < MULTICAST_CAM_TABLE_NUM) {
    499		temac_indirect_out32_locked(lp, XTE_MAW0_OFFSET, 0);
    500		temac_indirect_out32_locked(lp, XTE_MAW1_OFFSET, i << 16);
    501		i++;
    502	}
    503
    504	/* Enable address filter block if currently disabled */
    505	if (temac_indirect_in32_locked(lp, XTE_AFM_OFFSET)
    506	    & XTE_AFM_EPPRM_MASK) {
    507		temac_indirect_out32_locked(lp, XTE_AFM_OFFSET, 0);
    508		promisc_mode_disabled = true;
    509	}
    510
    511	spin_unlock_irqrestore(lp->indirect_lock, flags);
    512
    513	if (promisc_mode_disabled)
    514		dev_info(&ndev->dev, "Promiscuous mode disabled.\n");
    515}
    516
    517static struct temac_option {
    518	int flg;
    519	u32 opt;
    520	u32 reg;
    521	u32 m_or;
    522	u32 m_and;
    523} temac_options[] = {
    524	/* Turn on jumbo packet support for both Rx and Tx */
    525	{
    526		.opt = XTE_OPTION_JUMBO,
    527		.reg = XTE_TXC_OFFSET,
    528		.m_or = XTE_TXC_TXJMBO_MASK,
    529	},
    530	{
    531		.opt = XTE_OPTION_JUMBO,
    532		.reg = XTE_RXC1_OFFSET,
    533		.m_or =XTE_RXC1_RXJMBO_MASK,
    534	},
    535	/* Turn on VLAN packet support for both Rx and Tx */
    536	{
    537		.opt = XTE_OPTION_VLAN,
    538		.reg = XTE_TXC_OFFSET,
    539		.m_or =XTE_TXC_TXVLAN_MASK,
    540	},
    541	{
    542		.opt = XTE_OPTION_VLAN,
    543		.reg = XTE_RXC1_OFFSET,
    544		.m_or =XTE_RXC1_RXVLAN_MASK,
    545	},
    546	/* Turn on FCS stripping on receive packets */
    547	{
    548		.opt = XTE_OPTION_FCS_STRIP,
    549		.reg = XTE_RXC1_OFFSET,
    550		.m_or =XTE_RXC1_RXFCS_MASK,
    551	},
    552	/* Turn on FCS insertion on transmit packets */
    553	{
    554		.opt = XTE_OPTION_FCS_INSERT,
    555		.reg = XTE_TXC_OFFSET,
    556		.m_or =XTE_TXC_TXFCS_MASK,
    557	},
    558	/* Turn on length/type field checking on receive packets */
    559	{
    560		.opt = XTE_OPTION_LENTYPE_ERR,
    561		.reg = XTE_RXC1_OFFSET,
    562		.m_or =XTE_RXC1_RXLT_MASK,
    563	},
    564	/* Turn on flow control */
    565	{
    566		.opt = XTE_OPTION_FLOW_CONTROL,
    567		.reg = XTE_FCC_OFFSET,
    568		.m_or =XTE_FCC_RXFLO_MASK,
    569	},
    570	/* Turn on flow control */
    571	{
    572		.opt = XTE_OPTION_FLOW_CONTROL,
    573		.reg = XTE_FCC_OFFSET,
    574		.m_or =XTE_FCC_TXFLO_MASK,
    575	},
    576	/* Turn on promiscuous frame filtering (all frames are received ) */
    577	{
    578		.opt = XTE_OPTION_PROMISC,
    579		.reg = XTE_AFM_OFFSET,
    580		.m_or =XTE_AFM_EPPRM_MASK,
    581	},
    582	/* Enable transmitter if not already enabled */
    583	{
    584		.opt = XTE_OPTION_TXEN,
    585		.reg = XTE_TXC_OFFSET,
    586		.m_or =XTE_TXC_TXEN_MASK,
    587	},
    588	/* Enable receiver? */
    589	{
    590		.opt = XTE_OPTION_RXEN,
    591		.reg = XTE_RXC1_OFFSET,
    592		.m_or =XTE_RXC1_RXEN_MASK,
    593	},
    594	{}
    595};
    596
    597/*
    598 * temac_setoptions
    599 */
    600static u32 temac_setoptions(struct net_device *ndev, u32 options)
    601{
    602	struct temac_local *lp = netdev_priv(ndev);
    603	struct temac_option *tp = &temac_options[0];
    604	int reg;
    605	unsigned long flags;
    606
    607	spin_lock_irqsave(lp->indirect_lock, flags);
    608	while (tp->opt) {
    609		reg = temac_indirect_in32_locked(lp, tp->reg) & ~tp->m_or;
    610		if (options & tp->opt) {
    611			reg |= tp->m_or;
    612			temac_indirect_out32_locked(lp, tp->reg, reg);
    613		}
    614		tp++;
    615	}
    616	spin_unlock_irqrestore(lp->indirect_lock, flags);
    617	lp->options |= options;
    618
    619	return 0;
    620}
    621
    622/* Initialize temac */
    623static void temac_device_reset(struct net_device *ndev)
    624{
    625	struct temac_local *lp = netdev_priv(ndev);
    626	u32 timeout;
    627	u32 val;
    628	unsigned long flags;
    629
    630	/* Perform a software reset */
    631
    632	/* 0x300 host enable bit ? */
    633	/* reset PHY through control register ?:1 */
    634
    635	dev_dbg(&ndev->dev, "%s()\n", __func__);
    636
    637	/* Reset the receiver and wait for it to finish reset */
    638	temac_indirect_out32(lp, XTE_RXC1_OFFSET, XTE_RXC1_RXRST_MASK);
    639	timeout = 1000;
    640	while (temac_indirect_in32(lp, XTE_RXC1_OFFSET) & XTE_RXC1_RXRST_MASK) {
    641		udelay(1);
    642		if (--timeout == 0) {
    643			dev_err(&ndev->dev,
    644				"temac_device_reset RX reset timeout!!\n");
    645			break;
    646		}
    647	}
    648
    649	/* Reset the transmitter and wait for it to finish reset */
    650	temac_indirect_out32(lp, XTE_TXC_OFFSET, XTE_TXC_TXRST_MASK);
    651	timeout = 1000;
    652	while (temac_indirect_in32(lp, XTE_TXC_OFFSET) & XTE_TXC_TXRST_MASK) {
    653		udelay(1);
    654		if (--timeout == 0) {
    655			dev_err(&ndev->dev,
    656				"temac_device_reset TX reset timeout!!\n");
    657			break;
    658		}
    659	}
    660
    661	/* Disable the receiver */
    662	spin_lock_irqsave(lp->indirect_lock, flags);
    663	val = temac_indirect_in32_locked(lp, XTE_RXC1_OFFSET);
    664	temac_indirect_out32_locked(lp, XTE_RXC1_OFFSET,
    665				    val & ~XTE_RXC1_RXEN_MASK);
    666	spin_unlock_irqrestore(lp->indirect_lock, flags);
    667
    668	/* Reset Local Link (DMA) */
    669	lp->dma_out(lp, DMA_CONTROL_REG, DMA_CONTROL_RST);
    670	timeout = 1000;
    671	while (lp->dma_in(lp, DMA_CONTROL_REG) & DMA_CONTROL_RST) {
    672		udelay(1);
    673		if (--timeout == 0) {
    674			dev_err(&ndev->dev,
    675				"temac_device_reset DMA reset timeout!!\n");
    676			break;
    677		}
    678	}
    679	lp->dma_out(lp, DMA_CONTROL_REG, DMA_TAIL_ENABLE);
    680
    681	if (temac_dma_bd_init(ndev)) {
    682		dev_err(&ndev->dev,
    683				"temac_device_reset descriptor allocation failed\n");
    684	}
    685
    686	spin_lock_irqsave(lp->indirect_lock, flags);
    687	temac_indirect_out32_locked(lp, XTE_RXC0_OFFSET, 0);
    688	temac_indirect_out32_locked(lp, XTE_RXC1_OFFSET, 0);
    689	temac_indirect_out32_locked(lp, XTE_TXC_OFFSET, 0);
    690	temac_indirect_out32_locked(lp, XTE_FCC_OFFSET, XTE_FCC_RXFLO_MASK);
    691	spin_unlock_irqrestore(lp->indirect_lock, flags);
    692
    693	/* Sync default options with HW
    694	 * but leave receiver and transmitter disabled.  */
    695	temac_setoptions(ndev,
    696			 lp->options & ~(XTE_OPTION_TXEN | XTE_OPTION_RXEN));
    697
    698	temac_do_set_mac_address(ndev);
    699
    700	/* Set address filter table */
    701	temac_set_multicast_list(ndev);
    702	if (temac_setoptions(ndev, lp->options))
    703		dev_err(&ndev->dev, "Error setting TEMAC options\n");
    704
    705	/* Init Driver variable */
    706	netif_trans_update(ndev); /* prevent tx timeout */
    707}
    708
    709static void temac_adjust_link(struct net_device *ndev)
    710{
    711	struct temac_local *lp = netdev_priv(ndev);
    712	struct phy_device *phy = ndev->phydev;
    713	u32 mii_speed;
    714	int link_state;
    715	unsigned long flags;
    716
    717	/* hash together the state values to decide if something has changed */
    718	link_state = phy->speed | (phy->duplex << 1) | phy->link;
    719
    720	if (lp->last_link != link_state) {
    721		spin_lock_irqsave(lp->indirect_lock, flags);
    722		mii_speed = temac_indirect_in32_locked(lp, XTE_EMCFG_OFFSET);
    723		mii_speed &= ~XTE_EMCFG_LINKSPD_MASK;
    724
    725		switch (phy->speed) {
    726		case SPEED_1000: mii_speed |= XTE_EMCFG_LINKSPD_1000; break;
    727		case SPEED_100: mii_speed |= XTE_EMCFG_LINKSPD_100; break;
    728		case SPEED_10: mii_speed |= XTE_EMCFG_LINKSPD_10; break;
    729		}
    730
    731		/* Write new speed setting out to TEMAC */
    732		temac_indirect_out32_locked(lp, XTE_EMCFG_OFFSET, mii_speed);
    733		spin_unlock_irqrestore(lp->indirect_lock, flags);
    734
    735		lp->last_link = link_state;
    736		phy_print_status(phy);
    737	}
    738}
    739
    740#ifdef CONFIG_64BIT
    741
    742static void ptr_to_txbd(void *p, struct cdmac_bd *bd)
    743{
    744	bd->app3 = (u32)(((u64)p) >> 32);
    745	bd->app4 = (u32)((u64)p & 0xFFFFFFFF);
    746}
    747
    748static void *ptr_from_txbd(struct cdmac_bd *bd)
    749{
    750	return (void *)(((u64)(bd->app3) << 32) | bd->app4);
    751}
    752
    753#else
    754
    755static void ptr_to_txbd(void *p, struct cdmac_bd *bd)
    756{
    757	bd->app4 = (u32)p;
    758}
    759
    760static void *ptr_from_txbd(struct cdmac_bd *bd)
    761{
    762	return (void *)(bd->app4);
    763}
    764
    765#endif
    766
    767static void temac_start_xmit_done(struct net_device *ndev)
    768{
    769	struct temac_local *lp = netdev_priv(ndev);
    770	struct cdmac_bd *cur_p;
    771	unsigned int stat = 0;
    772	struct sk_buff *skb;
    773
    774	cur_p = &lp->tx_bd_v[lp->tx_bd_ci];
    775	stat = be32_to_cpu(cur_p->app0);
    776
    777	while (stat & STS_CTRL_APP0_CMPLT) {
    778		/* Make sure that the other fields are read after bd is
    779		 * released by dma
    780		 */
    781		rmb();
    782		dma_unmap_single(ndev->dev.parent, be32_to_cpu(cur_p->phys),
    783				 be32_to_cpu(cur_p->len), DMA_TO_DEVICE);
    784		skb = (struct sk_buff *)ptr_from_txbd(cur_p);
    785		if (skb)
    786			dev_consume_skb_irq(skb);
    787		cur_p->app1 = 0;
    788		cur_p->app2 = 0;
    789		cur_p->app3 = 0;
    790		cur_p->app4 = 0;
    791
    792		ndev->stats.tx_packets++;
    793		ndev->stats.tx_bytes += be32_to_cpu(cur_p->len);
    794
    795		/* app0 must be visible last, as it is used to flag
    796		 * availability of the bd
    797		 */
    798		smp_mb();
    799		cur_p->app0 = 0;
    800
    801		lp->tx_bd_ci++;
    802		if (lp->tx_bd_ci >= lp->tx_bd_num)
    803			lp->tx_bd_ci = 0;
    804
    805		cur_p = &lp->tx_bd_v[lp->tx_bd_ci];
    806		stat = be32_to_cpu(cur_p->app0);
    807	}
    808
    809	/* Matches barrier in temac_start_xmit */
    810	smp_mb();
    811
    812	netif_wake_queue(ndev);
    813}
    814
    815static inline int temac_check_tx_bd_space(struct temac_local *lp, int num_frag)
    816{
    817	struct cdmac_bd *cur_p;
    818	int tail;
    819
    820	tail = lp->tx_bd_tail;
    821	cur_p = &lp->tx_bd_v[tail];
    822
    823	do {
    824		if (cur_p->app0)
    825			return NETDEV_TX_BUSY;
    826
    827		/* Make sure to read next bd app0 after this one */
    828		rmb();
    829
    830		tail++;
    831		if (tail >= lp->tx_bd_num)
    832			tail = 0;
    833
    834		cur_p = &lp->tx_bd_v[tail];
    835		num_frag--;
    836	} while (num_frag >= 0);
    837
    838	return 0;
    839}
    840
    841static netdev_tx_t
    842temac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
    843{
    844	struct temac_local *lp = netdev_priv(ndev);
    845	struct cdmac_bd *cur_p;
    846	dma_addr_t tail_p, skb_dma_addr;
    847	int ii;
    848	unsigned long num_frag;
    849	skb_frag_t *frag;
    850
    851	num_frag = skb_shinfo(skb)->nr_frags;
    852	frag = &skb_shinfo(skb)->frags[0];
    853	cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
    854
    855	if (temac_check_tx_bd_space(lp, num_frag + 1)) {
    856		if (netif_queue_stopped(ndev))
    857			return NETDEV_TX_BUSY;
    858
    859		netif_stop_queue(ndev);
    860
    861		/* Matches barrier in temac_start_xmit_done */
    862		smp_mb();
    863
    864		/* Space might have just been freed - check again */
    865		if (temac_check_tx_bd_space(lp, num_frag + 1))
    866			return NETDEV_TX_BUSY;
    867
    868		netif_wake_queue(ndev);
    869	}
    870
    871	cur_p->app0 = 0;
    872	if (skb->ip_summed == CHECKSUM_PARTIAL) {
    873		unsigned int csum_start_off = skb_checksum_start_offset(skb);
    874		unsigned int csum_index_off = csum_start_off + skb->csum_offset;
    875
    876		cur_p->app0 |= cpu_to_be32(0x000001); /* TX Checksum Enabled */
    877		cur_p->app1 = cpu_to_be32((csum_start_off << 16)
    878					  | csum_index_off);
    879		cur_p->app2 = 0;  /* initial checksum seed */
    880	}
    881
    882	cur_p->app0 |= cpu_to_be32(STS_CTRL_APP0_SOP);
    883	skb_dma_addr = dma_map_single(ndev->dev.parent, skb->data,
    884				      skb_headlen(skb), DMA_TO_DEVICE);
    885	cur_p->len = cpu_to_be32(skb_headlen(skb));
    886	if (WARN_ON_ONCE(dma_mapping_error(ndev->dev.parent, skb_dma_addr))) {
    887		dev_kfree_skb_any(skb);
    888		ndev->stats.tx_dropped++;
    889		return NETDEV_TX_OK;
    890	}
    891	cur_p->phys = cpu_to_be32(skb_dma_addr);
    892
    893	for (ii = 0; ii < num_frag; ii++) {
    894		if (++lp->tx_bd_tail >= lp->tx_bd_num)
    895			lp->tx_bd_tail = 0;
    896
    897		cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
    898		skb_dma_addr = dma_map_single(ndev->dev.parent,
    899					      skb_frag_address(frag),
    900					      skb_frag_size(frag),
    901					      DMA_TO_DEVICE);
    902		if (dma_mapping_error(ndev->dev.parent, skb_dma_addr)) {
    903			if (--lp->tx_bd_tail < 0)
    904				lp->tx_bd_tail = lp->tx_bd_num - 1;
    905			cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
    906			while (--ii >= 0) {
    907				--frag;
    908				dma_unmap_single(ndev->dev.parent,
    909						 be32_to_cpu(cur_p->phys),
    910						 skb_frag_size(frag),
    911						 DMA_TO_DEVICE);
    912				if (--lp->tx_bd_tail < 0)
    913					lp->tx_bd_tail = lp->tx_bd_num - 1;
    914				cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
    915			}
    916			dma_unmap_single(ndev->dev.parent,
    917					 be32_to_cpu(cur_p->phys),
    918					 skb_headlen(skb), DMA_TO_DEVICE);
    919			dev_kfree_skb_any(skb);
    920			ndev->stats.tx_dropped++;
    921			return NETDEV_TX_OK;
    922		}
    923		cur_p->phys = cpu_to_be32(skb_dma_addr);
    924		cur_p->len = cpu_to_be32(skb_frag_size(frag));
    925		cur_p->app0 = 0;
    926		frag++;
    927	}
    928	cur_p->app0 |= cpu_to_be32(STS_CTRL_APP0_EOP);
    929
    930	/* Mark last fragment with skb address, so it can be consumed
    931	 * in temac_start_xmit_done()
    932	 */
    933	ptr_to_txbd((void *)skb, cur_p);
    934
    935	tail_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * lp->tx_bd_tail;
    936	lp->tx_bd_tail++;
    937	if (lp->tx_bd_tail >= lp->tx_bd_num)
    938		lp->tx_bd_tail = 0;
    939
    940	skb_tx_timestamp(skb);
    941
    942	/* Kick off the transfer */
    943	wmb();
    944	lp->dma_out(lp, TX_TAILDESC_PTR, tail_p); /* DMA start */
    945
    946	if (temac_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1))
    947		netif_stop_queue(ndev);
    948
    949	return NETDEV_TX_OK;
    950}
    951
    952static int ll_temac_recv_buffers_available(struct temac_local *lp)
    953{
    954	int available;
    955
    956	if (!lp->rx_skb[lp->rx_bd_ci])
    957		return 0;
    958	available = 1 + lp->rx_bd_tail - lp->rx_bd_ci;
    959	if (available <= 0)
    960		available += lp->rx_bd_num;
    961	return available;
    962}
    963
    964static void ll_temac_recv(struct net_device *ndev)
    965{
    966	struct temac_local *lp = netdev_priv(ndev);
    967	unsigned long flags;
    968	int rx_bd;
    969	bool update_tail = false;
    970
    971	spin_lock_irqsave(&lp->rx_lock, flags);
    972
    973	/* Process all received buffers, passing them on network
    974	 * stack.  After this, the buffer descriptors will be in an
    975	 * un-allocated stage, where no skb is allocated for it, and
    976	 * they are therefore not available for TEMAC/DMA.
    977	 */
    978	do {
    979		struct cdmac_bd *bd = &lp->rx_bd_v[lp->rx_bd_ci];
    980		struct sk_buff *skb = lp->rx_skb[lp->rx_bd_ci];
    981		unsigned int bdstat = be32_to_cpu(bd->app0);
    982		int length;
    983
    984		/* While this should not normally happen, we can end
    985		 * here when GFP_ATOMIC allocations fail, and we
    986		 * therefore have un-allocated buffers.
    987		 */
    988		if (!skb)
    989			break;
    990
    991		/* Loop over all completed buffer descriptors */
    992		if (!(bdstat & STS_CTRL_APP0_CMPLT))
    993			break;
    994
    995		dma_unmap_single(ndev->dev.parent, be32_to_cpu(bd->phys),
    996				 XTE_MAX_JUMBO_FRAME_SIZE, DMA_FROM_DEVICE);
    997		/* The buffer is not valid for DMA anymore */
    998		bd->phys = 0;
    999		bd->len = 0;
   1000
   1001		length = be32_to_cpu(bd->app4) & 0x3FFF;
   1002		skb_put(skb, length);
   1003		skb->protocol = eth_type_trans(skb, ndev);
   1004		skb_checksum_none_assert(skb);
   1005
   1006		/* if we're doing rx csum offload, set it up */
   1007		if (((lp->temac_features & TEMAC_FEATURE_RX_CSUM) != 0) &&
   1008		    (skb->protocol == htons(ETH_P_IP)) &&
   1009		    (skb->len > 64)) {
   1010
   1011			/* Convert from device endianness (be32) to cpu
   1012			 * endianness, and if necessary swap the bytes
   1013			 * (back) for proper IP checksum byte order
   1014			 * (be16).
   1015			 */
   1016			skb->csum = htons(be32_to_cpu(bd->app3) & 0xFFFF);
   1017			skb->ip_summed = CHECKSUM_COMPLETE;
   1018		}
   1019
   1020		if (!skb_defer_rx_timestamp(skb))
   1021			netif_rx(skb);
   1022		/* The skb buffer is now owned by network stack above */
   1023		lp->rx_skb[lp->rx_bd_ci] = NULL;
   1024
   1025		ndev->stats.rx_packets++;
   1026		ndev->stats.rx_bytes += length;
   1027
   1028		rx_bd = lp->rx_bd_ci;
   1029		if (++lp->rx_bd_ci >= lp->rx_bd_num)
   1030			lp->rx_bd_ci = 0;
   1031	} while (rx_bd != lp->rx_bd_tail);
   1032
   1033	/* DMA operations will halt when the last buffer descriptor is
   1034	 * processed (ie. the one pointed to by RX_TAILDESC_PTR).
   1035	 * When that happens, no more interrupt events will be
   1036	 * generated.  No IRQ_COAL or IRQ_DLY, and not even an
   1037	 * IRQ_ERR.  To avoid stalling, we schedule a delayed work
   1038	 * when there is a potential risk of that happening.  The work
   1039	 * will call this function, and thus re-schedule itself until
   1040	 * enough buffers are available again.
   1041	 */
   1042	if (ll_temac_recv_buffers_available(lp) < lp->coalesce_count_rx)
   1043		schedule_delayed_work(&lp->restart_work, HZ / 1000);
   1044
   1045	/* Allocate new buffers for those buffer descriptors that were
   1046	 * passed to network stack.  Note that GFP_ATOMIC allocations
   1047	 * can fail (e.g. when a larger burst of GFP_ATOMIC
   1048	 * allocations occurs), so while we try to allocate all
   1049	 * buffers in the same interrupt where they were processed, we
   1050	 * continue with what we could get in case of allocation
   1051	 * failure.  Allocation of remaining buffers will be retried
   1052	 * in following calls.
   1053	 */
   1054	while (1) {
   1055		struct sk_buff *skb;
   1056		struct cdmac_bd *bd;
   1057		dma_addr_t skb_dma_addr;
   1058
   1059		rx_bd = lp->rx_bd_tail + 1;
   1060		if (rx_bd >= lp->rx_bd_num)
   1061			rx_bd = 0;
   1062		bd = &lp->rx_bd_v[rx_bd];
   1063
   1064		if (bd->phys)
   1065			break;	/* All skb's allocated */
   1066
   1067		skb = netdev_alloc_skb_ip_align(ndev, XTE_MAX_JUMBO_FRAME_SIZE);
   1068		if (!skb) {
   1069			dev_warn(&ndev->dev, "skb alloc failed\n");
   1070			break;
   1071		}
   1072
   1073		skb_dma_addr = dma_map_single(ndev->dev.parent, skb->data,
   1074					      XTE_MAX_JUMBO_FRAME_SIZE,
   1075					      DMA_FROM_DEVICE);
   1076		if (WARN_ON_ONCE(dma_mapping_error(ndev->dev.parent,
   1077						   skb_dma_addr))) {
   1078			dev_kfree_skb_any(skb);
   1079			break;
   1080		}
   1081
   1082		bd->phys = cpu_to_be32(skb_dma_addr);
   1083		bd->len = cpu_to_be32(XTE_MAX_JUMBO_FRAME_SIZE);
   1084		bd->app0 = cpu_to_be32(STS_CTRL_APP0_IRQONEND);
   1085		lp->rx_skb[rx_bd] = skb;
   1086
   1087		lp->rx_bd_tail = rx_bd;
   1088		update_tail = true;
   1089	}
   1090
   1091	/* Move tail pointer when buffers have been allocated */
   1092	if (update_tail) {
   1093		lp->dma_out(lp, RX_TAILDESC_PTR,
   1094			lp->rx_bd_p + sizeof(*lp->rx_bd_v) * lp->rx_bd_tail);
   1095	}
   1096
   1097	spin_unlock_irqrestore(&lp->rx_lock, flags);
   1098}
   1099
   1100/* Function scheduled to ensure a restart in case of DMA halt
   1101 * condition caused by running out of buffer descriptors.
   1102 */
   1103static void ll_temac_restart_work_func(struct work_struct *work)
   1104{
   1105	struct temac_local *lp = container_of(work, struct temac_local,
   1106					      restart_work.work);
   1107	struct net_device *ndev = lp->ndev;
   1108
   1109	ll_temac_recv(ndev);
   1110}
   1111
   1112static irqreturn_t ll_temac_tx_irq(int irq, void *_ndev)
   1113{
   1114	struct net_device *ndev = _ndev;
   1115	struct temac_local *lp = netdev_priv(ndev);
   1116	unsigned int status;
   1117
   1118	status = lp->dma_in(lp, TX_IRQ_REG);
   1119	lp->dma_out(lp, TX_IRQ_REG, status);
   1120
   1121	if (status & (IRQ_COAL | IRQ_DLY))
   1122		temac_start_xmit_done(lp->ndev);
   1123	if (status & (IRQ_ERR | IRQ_DMAERR))
   1124		dev_err_ratelimited(&ndev->dev,
   1125				    "TX error 0x%x TX_CHNL_STS=0x%08x\n",
   1126				    status, lp->dma_in(lp, TX_CHNL_STS));
   1127
   1128	return IRQ_HANDLED;
   1129}
   1130
   1131static irqreturn_t ll_temac_rx_irq(int irq, void *_ndev)
   1132{
   1133	struct net_device *ndev = _ndev;
   1134	struct temac_local *lp = netdev_priv(ndev);
   1135	unsigned int status;
   1136
   1137	/* Read and clear the status registers */
   1138	status = lp->dma_in(lp, RX_IRQ_REG);
   1139	lp->dma_out(lp, RX_IRQ_REG, status);
   1140
   1141	if (status & (IRQ_COAL | IRQ_DLY))
   1142		ll_temac_recv(lp->ndev);
   1143	if (status & (IRQ_ERR | IRQ_DMAERR))
   1144		dev_err_ratelimited(&ndev->dev,
   1145				    "RX error 0x%x RX_CHNL_STS=0x%08x\n",
   1146				    status, lp->dma_in(lp, RX_CHNL_STS));
   1147
   1148	return IRQ_HANDLED;
   1149}
   1150
   1151static int temac_open(struct net_device *ndev)
   1152{
   1153	struct temac_local *lp = netdev_priv(ndev);
   1154	struct phy_device *phydev = NULL;
   1155	int rc;
   1156
   1157	dev_dbg(&ndev->dev, "temac_open()\n");
   1158
   1159	if (lp->phy_node) {
   1160		phydev = of_phy_connect(lp->ndev, lp->phy_node,
   1161					temac_adjust_link, 0, 0);
   1162		if (!phydev) {
   1163			dev_err(lp->dev, "of_phy_connect() failed\n");
   1164			return -ENODEV;
   1165		}
   1166		phy_start(phydev);
   1167	} else if (strlen(lp->phy_name) > 0) {
   1168		phydev = phy_connect(lp->ndev, lp->phy_name, temac_adjust_link,
   1169				     lp->phy_interface);
   1170		if (IS_ERR(phydev)) {
   1171			dev_err(lp->dev, "phy_connect() failed\n");
   1172			return PTR_ERR(phydev);
   1173		}
   1174		phy_start(phydev);
   1175	}
   1176
   1177	temac_device_reset(ndev);
   1178
   1179	rc = request_irq(lp->tx_irq, ll_temac_tx_irq, 0, ndev->name, ndev);
   1180	if (rc)
   1181		goto err_tx_irq;
   1182	rc = request_irq(lp->rx_irq, ll_temac_rx_irq, 0, ndev->name, ndev);
   1183	if (rc)
   1184		goto err_rx_irq;
   1185
   1186	return 0;
   1187
   1188 err_rx_irq:
   1189	free_irq(lp->tx_irq, ndev);
   1190 err_tx_irq:
   1191	if (phydev)
   1192		phy_disconnect(phydev);
   1193	dev_err(lp->dev, "request_irq() failed\n");
   1194	return rc;
   1195}
   1196
   1197static int temac_stop(struct net_device *ndev)
   1198{
   1199	struct temac_local *lp = netdev_priv(ndev);
   1200	struct phy_device *phydev = ndev->phydev;
   1201
   1202	dev_dbg(&ndev->dev, "temac_close()\n");
   1203
   1204	cancel_delayed_work_sync(&lp->restart_work);
   1205
   1206	free_irq(lp->tx_irq, ndev);
   1207	free_irq(lp->rx_irq, ndev);
   1208
   1209	if (phydev)
   1210		phy_disconnect(phydev);
   1211
   1212	temac_dma_bd_release(ndev);
   1213
   1214	return 0;
   1215}
   1216
   1217#ifdef CONFIG_NET_POLL_CONTROLLER
   1218static void
   1219temac_poll_controller(struct net_device *ndev)
   1220{
   1221	struct temac_local *lp = netdev_priv(ndev);
   1222
   1223	disable_irq(lp->tx_irq);
   1224	disable_irq(lp->rx_irq);
   1225
   1226	ll_temac_rx_irq(lp->tx_irq, ndev);
   1227	ll_temac_tx_irq(lp->rx_irq, ndev);
   1228
   1229	enable_irq(lp->tx_irq);
   1230	enable_irq(lp->rx_irq);
   1231}
   1232#endif
   1233
   1234static const struct net_device_ops temac_netdev_ops = {
   1235	.ndo_open = temac_open,
   1236	.ndo_stop = temac_stop,
   1237	.ndo_start_xmit = temac_start_xmit,
   1238	.ndo_set_rx_mode = temac_set_multicast_list,
   1239	.ndo_set_mac_address = temac_set_mac_address,
   1240	.ndo_validate_addr = eth_validate_addr,
   1241	.ndo_eth_ioctl = phy_do_ioctl_running,
   1242#ifdef CONFIG_NET_POLL_CONTROLLER
   1243	.ndo_poll_controller = temac_poll_controller,
   1244#endif
   1245};
   1246
   1247/* ---------------------------------------------------------------------
   1248 * SYSFS device attributes
   1249 */
   1250static ssize_t temac_show_llink_regs(struct device *dev,
   1251				     struct device_attribute *attr, char *buf)
   1252{
   1253	struct net_device *ndev = dev_get_drvdata(dev);
   1254	struct temac_local *lp = netdev_priv(ndev);
   1255	int i, len = 0;
   1256
   1257	for (i = 0; i < 0x11; i++)
   1258		len += sprintf(buf + len, "%.8x%s", lp->dma_in(lp, i),
   1259			       (i % 8) == 7 ? "\n" : " ");
   1260	len += sprintf(buf + len, "\n");
   1261
   1262	return len;
   1263}
   1264
   1265static DEVICE_ATTR(llink_regs, 0440, temac_show_llink_regs, NULL);
   1266
   1267static struct attribute *temac_device_attrs[] = {
   1268	&dev_attr_llink_regs.attr,
   1269	NULL,
   1270};
   1271
   1272static const struct attribute_group temac_attr_group = {
   1273	.attrs = temac_device_attrs,
   1274};
   1275
   1276/* ---------------------------------------------------------------------
   1277 * ethtool support
   1278 */
   1279
   1280static void
   1281ll_temac_ethtools_get_ringparam(struct net_device *ndev,
   1282				struct ethtool_ringparam *ering,
   1283				struct kernel_ethtool_ringparam *kernel_ering,
   1284				struct netlink_ext_ack *extack)
   1285{
   1286	struct temac_local *lp = netdev_priv(ndev);
   1287
   1288	ering->rx_max_pending = RX_BD_NUM_MAX;
   1289	ering->rx_mini_max_pending = 0;
   1290	ering->rx_jumbo_max_pending = 0;
   1291	ering->tx_max_pending = TX_BD_NUM_MAX;
   1292	ering->rx_pending = lp->rx_bd_num;
   1293	ering->rx_mini_pending = 0;
   1294	ering->rx_jumbo_pending = 0;
   1295	ering->tx_pending = lp->tx_bd_num;
   1296}
   1297
   1298static int
   1299ll_temac_ethtools_set_ringparam(struct net_device *ndev,
   1300				struct ethtool_ringparam *ering,
   1301				struct kernel_ethtool_ringparam *kernel_ering,
   1302				struct netlink_ext_ack *extack)
   1303{
   1304	struct temac_local *lp = netdev_priv(ndev);
   1305
   1306	if (ering->rx_pending > RX_BD_NUM_MAX ||
   1307	    ering->rx_mini_pending ||
   1308	    ering->rx_jumbo_pending ||
   1309	    ering->rx_pending > TX_BD_NUM_MAX)
   1310		return -EINVAL;
   1311
   1312	if (netif_running(ndev))
   1313		return -EBUSY;
   1314
   1315	lp->rx_bd_num = ering->rx_pending;
   1316	lp->tx_bd_num = ering->tx_pending;
   1317	return 0;
   1318}
   1319
   1320static int
   1321ll_temac_ethtools_get_coalesce(struct net_device *ndev,
   1322			       struct ethtool_coalesce *ec,
   1323			       struct kernel_ethtool_coalesce *kernel_coal,
   1324			       struct netlink_ext_ack *extack)
   1325{
   1326	struct temac_local *lp = netdev_priv(ndev);
   1327
   1328	ec->rx_max_coalesced_frames = lp->coalesce_count_rx;
   1329	ec->tx_max_coalesced_frames = lp->coalesce_count_tx;
   1330	ec->rx_coalesce_usecs = (lp->coalesce_delay_rx * 512) / 100;
   1331	ec->tx_coalesce_usecs = (lp->coalesce_delay_tx * 512) / 100;
   1332	return 0;
   1333}
   1334
   1335static int
   1336ll_temac_ethtools_set_coalesce(struct net_device *ndev,
   1337			       struct ethtool_coalesce *ec,
   1338			       struct kernel_ethtool_coalesce *kernel_coal,
   1339			       struct netlink_ext_ack *extack)
   1340{
   1341	struct temac_local *lp = netdev_priv(ndev);
   1342
   1343	if (netif_running(ndev)) {
   1344		netdev_err(ndev,
   1345			   "Please stop netif before applying configuration\n");
   1346		return -EFAULT;
   1347	}
   1348
   1349	if (ec->rx_max_coalesced_frames)
   1350		lp->coalesce_count_rx = ec->rx_max_coalesced_frames;
   1351	if (ec->tx_max_coalesced_frames)
   1352		lp->coalesce_count_tx = ec->tx_max_coalesced_frames;
   1353	/* With typical LocalLink clock speed of 200 MHz and
   1354	 * C_PRESCALAR=1023, each delay count corresponds to 5.12 us.
   1355	 */
   1356	if (ec->rx_coalesce_usecs)
   1357		lp->coalesce_delay_rx =
   1358			min(255U, (ec->rx_coalesce_usecs * 100) / 512);
   1359	if (ec->tx_coalesce_usecs)
   1360		lp->coalesce_delay_tx =
   1361			min(255U, (ec->tx_coalesce_usecs * 100) / 512);
   1362
   1363	return 0;
   1364}
   1365
   1366static const struct ethtool_ops temac_ethtool_ops = {
   1367	.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
   1368				     ETHTOOL_COALESCE_MAX_FRAMES,
   1369	.nway_reset = phy_ethtool_nway_reset,
   1370	.get_link = ethtool_op_get_link,
   1371	.get_ts_info = ethtool_op_get_ts_info,
   1372	.get_link_ksettings = phy_ethtool_get_link_ksettings,
   1373	.set_link_ksettings = phy_ethtool_set_link_ksettings,
   1374	.get_ringparam	= ll_temac_ethtools_get_ringparam,
   1375	.set_ringparam	= ll_temac_ethtools_set_ringparam,
   1376	.get_coalesce	= ll_temac_ethtools_get_coalesce,
   1377	.set_coalesce	= ll_temac_ethtools_set_coalesce,
   1378};
   1379
   1380static int temac_probe(struct platform_device *pdev)
   1381{
   1382	struct ll_temac_platform_data *pdata = dev_get_platdata(&pdev->dev);
   1383	struct device_node *temac_np = dev_of_node(&pdev->dev), *dma_np;
   1384	struct temac_local *lp;
   1385	struct net_device *ndev;
   1386	u8 addr[ETH_ALEN];
   1387	__be32 *p;
   1388	bool little_endian;
   1389	int rc = 0;
   1390
   1391	/* Init network device structure */
   1392	ndev = devm_alloc_etherdev(&pdev->dev, sizeof(*lp));
   1393	if (!ndev)
   1394		return -ENOMEM;
   1395
   1396	platform_set_drvdata(pdev, ndev);
   1397	SET_NETDEV_DEV(ndev, &pdev->dev);
   1398	ndev->features = NETIF_F_SG;
   1399	ndev->netdev_ops = &temac_netdev_ops;
   1400	ndev->ethtool_ops = &temac_ethtool_ops;
   1401#if 0
   1402	ndev->features |= NETIF_F_IP_CSUM; /* Can checksum TCP/UDP over IPv4. */
   1403	ndev->features |= NETIF_F_HW_CSUM; /* Can checksum all the packets. */
   1404	ndev->features |= NETIF_F_IPV6_CSUM; /* Can checksum IPV6 TCP/UDP */
   1405	ndev->features |= NETIF_F_HIGHDMA; /* Can DMA to high memory. */
   1406	ndev->features |= NETIF_F_HW_VLAN_CTAG_TX; /* Transmit VLAN hw accel */
   1407	ndev->features |= NETIF_F_HW_VLAN_CTAG_RX; /* Receive VLAN hw acceleration */
   1408	ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; /* Receive VLAN filtering */
   1409	ndev->features |= NETIF_F_VLAN_CHALLENGED; /* cannot handle VLAN pkts */
   1410	ndev->features |= NETIF_F_GSO; /* Enable software GSO. */
   1411	ndev->features |= NETIF_F_MULTI_QUEUE; /* Has multiple TX/RX queues */
   1412	ndev->features |= NETIF_F_LRO; /* large receive offload */
   1413#endif
   1414
   1415	/* setup temac private info structure */
   1416	lp = netdev_priv(ndev);
   1417	lp->ndev = ndev;
   1418	lp->dev = &pdev->dev;
   1419	lp->options = XTE_OPTION_DEFAULTS;
   1420	lp->rx_bd_num = RX_BD_NUM_DEFAULT;
   1421	lp->tx_bd_num = TX_BD_NUM_DEFAULT;
   1422	spin_lock_init(&lp->rx_lock);
   1423	INIT_DELAYED_WORK(&lp->restart_work, ll_temac_restart_work_func);
   1424
   1425	/* Setup mutex for synchronization of indirect register access */
   1426	if (pdata) {
   1427		if (!pdata->indirect_lock) {
   1428			dev_err(&pdev->dev,
   1429				"indirect_lock missing in platform_data\n");
   1430			return -EINVAL;
   1431		}
   1432		lp->indirect_lock = pdata->indirect_lock;
   1433	} else {
   1434		lp->indirect_lock = devm_kmalloc(&pdev->dev,
   1435						 sizeof(*lp->indirect_lock),
   1436						 GFP_KERNEL);
   1437		if (!lp->indirect_lock)
   1438			return -ENOMEM;
   1439		spin_lock_init(lp->indirect_lock);
   1440	}
   1441
   1442	/* map device registers */
   1443	lp->regs = devm_platform_ioremap_resource_byname(pdev, 0);
   1444	if (IS_ERR(lp->regs)) {
   1445		dev_err(&pdev->dev, "could not map TEMAC registers\n");
   1446		return -ENOMEM;
   1447	}
   1448
   1449	/* Select register access functions with the specified
   1450	 * endianness mode.  Default for OF devices is big-endian.
   1451	 */
   1452	little_endian = false;
   1453	if (temac_np) {
   1454		if (of_get_property(temac_np, "little-endian", NULL))
   1455			little_endian = true;
   1456	} else if (pdata) {
   1457		little_endian = pdata->reg_little_endian;
   1458	}
   1459	if (little_endian) {
   1460		lp->temac_ior = _temac_ior_le;
   1461		lp->temac_iow = _temac_iow_le;
   1462	} else {
   1463		lp->temac_ior = _temac_ior_be;
   1464		lp->temac_iow = _temac_iow_be;
   1465	}
   1466
   1467	/* Setup checksum offload, but default to off if not specified */
   1468	lp->temac_features = 0;
   1469	if (temac_np) {
   1470		p = (__be32 *)of_get_property(temac_np, "xlnx,txcsum", NULL);
   1471		if (p && be32_to_cpu(*p))
   1472			lp->temac_features |= TEMAC_FEATURE_TX_CSUM;
   1473		p = (__be32 *)of_get_property(temac_np, "xlnx,rxcsum", NULL);
   1474		if (p && be32_to_cpu(*p))
   1475			lp->temac_features |= TEMAC_FEATURE_RX_CSUM;
   1476	} else if (pdata) {
   1477		if (pdata->txcsum)
   1478			lp->temac_features |= TEMAC_FEATURE_TX_CSUM;
   1479		if (pdata->rxcsum)
   1480			lp->temac_features |= TEMAC_FEATURE_RX_CSUM;
   1481	}
   1482	if (lp->temac_features & TEMAC_FEATURE_TX_CSUM)
   1483		/* Can checksum TCP/UDP over IPv4. */
   1484		ndev->features |= NETIF_F_IP_CSUM;
   1485
   1486	/* Defaults for IRQ delay/coalescing setup.  These are
   1487	 * configuration values, so does not belong in device-tree.
   1488	 */
   1489	lp->coalesce_delay_tx = 0x10;
   1490	lp->coalesce_count_tx = 0x22;
   1491	lp->coalesce_delay_rx = 0xff;
   1492	lp->coalesce_count_rx = 0x07;
   1493
   1494	/* Setup LocalLink DMA */
   1495	if (temac_np) {
   1496		/* Find the DMA node, map the DMA registers, and
   1497		 * decode the DMA IRQs.
   1498		 */
   1499		dma_np = of_parse_phandle(temac_np, "llink-connected", 0);
   1500		if (!dma_np) {
   1501			dev_err(&pdev->dev, "could not find DMA node\n");
   1502			return -ENODEV;
   1503		}
   1504
   1505		/* Setup the DMA register accesses, could be DCR or
   1506		 * memory mapped.
   1507		 */
   1508		if (temac_dcr_setup(lp, pdev, dma_np)) {
   1509			/* no DCR in the device tree, try non-DCR */
   1510			lp->sdma_regs = devm_of_iomap(&pdev->dev, dma_np, 0,
   1511						      NULL);
   1512			if (IS_ERR(lp->sdma_regs)) {
   1513				dev_err(&pdev->dev,
   1514					"unable to map DMA registers\n");
   1515				of_node_put(dma_np);
   1516				return PTR_ERR(lp->sdma_regs);
   1517			}
   1518			if (of_property_read_bool(dma_np, "little-endian")) {
   1519				lp->dma_in = temac_dma_in32_le;
   1520				lp->dma_out = temac_dma_out32_le;
   1521			} else {
   1522				lp->dma_in = temac_dma_in32_be;
   1523				lp->dma_out = temac_dma_out32_be;
   1524			}
   1525			dev_dbg(&pdev->dev, "MEM base: %p\n", lp->sdma_regs);
   1526		}
   1527
   1528		/* Get DMA RX and TX interrupts */
   1529		lp->rx_irq = irq_of_parse_and_map(dma_np, 0);
   1530		lp->tx_irq = irq_of_parse_and_map(dma_np, 1);
   1531
   1532		/* Finished with the DMA node; drop the reference */
   1533		of_node_put(dma_np);
   1534	} else if (pdata) {
   1535		/* 2nd memory resource specifies DMA registers */
   1536		lp->sdma_regs = devm_platform_ioremap_resource(pdev, 1);
   1537		if (IS_ERR(lp->sdma_regs)) {
   1538			dev_err(&pdev->dev,
   1539				"could not map DMA registers\n");
   1540			return PTR_ERR(lp->sdma_regs);
   1541		}
   1542		if (pdata->dma_little_endian) {
   1543			lp->dma_in = temac_dma_in32_le;
   1544			lp->dma_out = temac_dma_out32_le;
   1545		} else {
   1546			lp->dma_in = temac_dma_in32_be;
   1547			lp->dma_out = temac_dma_out32_be;
   1548		}
   1549
   1550		/* Get DMA RX and TX interrupts */
   1551		lp->rx_irq = platform_get_irq(pdev, 0);
   1552		lp->tx_irq = platform_get_irq(pdev, 1);
   1553
   1554		/* IRQ delay/coalescing setup */
   1555		if (pdata->tx_irq_timeout || pdata->tx_irq_count) {
   1556			lp->coalesce_delay_tx = pdata->tx_irq_timeout;
   1557			lp->coalesce_count_tx = pdata->tx_irq_count;
   1558		}
   1559		if (pdata->rx_irq_timeout || pdata->rx_irq_count) {
   1560			lp->coalesce_delay_rx = pdata->rx_irq_timeout;
   1561			lp->coalesce_count_rx = pdata->rx_irq_count;
   1562		}
   1563	}
   1564
   1565	/* Error handle returned DMA RX and TX interrupts */
   1566	if (lp->rx_irq < 0) {
   1567		if (lp->rx_irq != -EPROBE_DEFER)
   1568			dev_err(&pdev->dev, "could not get DMA RX irq\n");
   1569		return lp->rx_irq;
   1570	}
   1571	if (lp->tx_irq < 0) {
   1572		if (lp->tx_irq != -EPROBE_DEFER)
   1573			dev_err(&pdev->dev, "could not get DMA TX irq\n");
   1574		return lp->tx_irq;
   1575	}
   1576
   1577	if (temac_np) {
   1578		/* Retrieve the MAC address */
   1579		rc = of_get_mac_address(temac_np, addr);
   1580		if (rc) {
   1581			dev_err(&pdev->dev, "could not find MAC address\n");
   1582			return -ENODEV;
   1583		}
   1584		temac_init_mac_address(ndev, addr);
   1585	} else if (pdata) {
   1586		temac_init_mac_address(ndev, pdata->mac_addr);
   1587	}
   1588
   1589	rc = temac_mdio_setup(lp, pdev);
   1590	if (rc)
   1591		dev_warn(&pdev->dev, "error registering MDIO bus\n");
   1592
   1593	if (temac_np) {
   1594		lp->phy_node = of_parse_phandle(temac_np, "phy-handle", 0);
   1595		if (lp->phy_node)
   1596			dev_dbg(lp->dev, "using PHY node %pOF\n", temac_np);
   1597	} else if (pdata) {
   1598		snprintf(lp->phy_name, sizeof(lp->phy_name),
   1599			 PHY_ID_FMT, lp->mii_bus->id, pdata->phy_addr);
   1600		lp->phy_interface = pdata->phy_interface;
   1601	}
   1602
   1603	/* Add the device attributes */
   1604	rc = sysfs_create_group(&lp->dev->kobj, &temac_attr_group);
   1605	if (rc) {
   1606		dev_err(lp->dev, "Error creating sysfs files\n");
   1607		goto err_sysfs_create;
   1608	}
   1609
   1610	rc = register_netdev(lp->ndev);
   1611	if (rc) {
   1612		dev_err(lp->dev, "register_netdev() error (%i)\n", rc);
   1613		goto err_register_ndev;
   1614	}
   1615
   1616	return 0;
   1617
   1618err_register_ndev:
   1619	sysfs_remove_group(&lp->dev->kobj, &temac_attr_group);
   1620err_sysfs_create:
   1621	if (lp->phy_node)
   1622		of_node_put(lp->phy_node);
   1623	temac_mdio_teardown(lp);
   1624	return rc;
   1625}
   1626
   1627static int temac_remove(struct platform_device *pdev)
   1628{
   1629	struct net_device *ndev = platform_get_drvdata(pdev);
   1630	struct temac_local *lp = netdev_priv(ndev);
   1631
   1632	unregister_netdev(ndev);
   1633	sysfs_remove_group(&lp->dev->kobj, &temac_attr_group);
   1634	if (lp->phy_node)
   1635		of_node_put(lp->phy_node);
   1636	temac_mdio_teardown(lp);
   1637	return 0;
   1638}
   1639
   1640static const struct of_device_id temac_of_match[] = {
   1641	{ .compatible = "xlnx,xps-ll-temac-1.01.b", },
   1642	{ .compatible = "xlnx,xps-ll-temac-2.00.a", },
   1643	{ .compatible = "xlnx,xps-ll-temac-2.02.a", },
   1644	{ .compatible = "xlnx,xps-ll-temac-2.03.a", },
   1645	{},
   1646};
   1647MODULE_DEVICE_TABLE(of, temac_of_match);
   1648
   1649static struct platform_driver temac_driver = {
   1650	.probe = temac_probe,
   1651	.remove = temac_remove,
   1652	.driver = {
   1653		.name = "xilinx_temac",
   1654		.of_match_table = temac_of_match,
   1655	},
   1656};
   1657
   1658module_platform_driver(temac_driver);
   1659
   1660MODULE_DESCRIPTION("Xilinx LL_TEMAC Ethernet driver");
   1661MODULE_AUTHOR("Yoshio Kashiwagi");
   1662MODULE_LICENSE("GPL");