cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

xilinx_axienet_main.c (64631B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/*
      3 * Xilinx Axi Ethernet device driver
      4 *
      5 * Copyright (c) 2008 Nissin Systems Co., Ltd.,  Yoshio Kashiwagi
      6 * Copyright (c) 2005-2008 DLA Systems,  David H. Lynch Jr. <dhlii@dlasys.net>
      7 * Copyright (c) 2008-2009 Secret Lab Technologies Ltd.
      8 * Copyright (c) 2010 - 2011 Michal Simek <monstr@monstr.eu>
      9 * Copyright (c) 2010 - 2011 PetaLogix
     10 * Copyright (c) 2019 - 2022 Calian Advanced Technologies
     11 * Copyright (c) 2010 - 2012 Xilinx, Inc. All rights reserved.
     12 *
     13 * This is a driver for the Xilinx Axi Ethernet which is used in the Virtex6
     14 * and Spartan6.
     15 *
     16 * TODO:
     17 *  - Add Axi Fifo support.
     18 *  - Factor out Axi DMA code into separate driver.
     19 *  - Test and fix basic multicast filtering.
     20 *  - Add support for extended multicast filtering.
     21 *  - Test basic VLAN support.
     22 *  - Add support for extended VLAN support.
     23 */
     24
     25#include <linux/clk.h>
     26#include <linux/delay.h>
     27#include <linux/etherdevice.h>
     28#include <linux/module.h>
     29#include <linux/netdevice.h>
     30#include <linux/of_mdio.h>
     31#include <linux/of_net.h>
     32#include <linux/of_platform.h>
     33#include <linux/of_irq.h>
     34#include <linux/of_address.h>
     35#include <linux/skbuff.h>
     36#include <linux/math64.h>
     37#include <linux/phy.h>
     38#include <linux/mii.h>
     39#include <linux/ethtool.h>
     40
     41#include "xilinx_axienet.h"
     42
     43/* Descriptors defines for Tx and Rx DMA */
     44#define TX_BD_NUM_DEFAULT		128
     45#define RX_BD_NUM_DEFAULT		1024
     46#define TX_BD_NUM_MIN			(MAX_SKB_FRAGS + 1)
     47#define TX_BD_NUM_MAX			4096
     48#define RX_BD_NUM_MAX			4096
     49
     50/* Must be shorter than length of ethtool_drvinfo.driver field to fit */
     51#define DRIVER_NAME		"xaxienet"
     52#define DRIVER_DESCRIPTION	"Xilinx Axi Ethernet driver"
     53#define DRIVER_VERSION		"1.00a"
     54
     55#define AXIENET_REGS_N		40
     56
     57/* Match table for of_platform binding */
     58static const struct of_device_id axienet_of_match[] = {
     59	{ .compatible = "xlnx,axi-ethernet-1.00.a", },
     60	{ .compatible = "xlnx,axi-ethernet-1.01.a", },
     61	{ .compatible = "xlnx,axi-ethernet-2.01.a", },
     62	{},
     63};
     64
     65MODULE_DEVICE_TABLE(of, axienet_of_match);
     66
     67/* Option table for setting up Axi Ethernet hardware options */
     68static struct axienet_option axienet_options[] = {
     69	/* Turn on jumbo packet support for both Rx and Tx */
     70	{
     71		.opt = XAE_OPTION_JUMBO,
     72		.reg = XAE_TC_OFFSET,
     73		.m_or = XAE_TC_JUM_MASK,
     74	}, {
     75		.opt = XAE_OPTION_JUMBO,
     76		.reg = XAE_RCW1_OFFSET,
     77		.m_or = XAE_RCW1_JUM_MASK,
     78	}, { /* Turn on VLAN packet support for both Rx and Tx */
     79		.opt = XAE_OPTION_VLAN,
     80		.reg = XAE_TC_OFFSET,
     81		.m_or = XAE_TC_VLAN_MASK,
     82	}, {
     83		.opt = XAE_OPTION_VLAN,
     84		.reg = XAE_RCW1_OFFSET,
     85		.m_or = XAE_RCW1_VLAN_MASK,
     86	}, { /* Turn on FCS stripping on receive packets */
     87		.opt = XAE_OPTION_FCS_STRIP,
     88		.reg = XAE_RCW1_OFFSET,
     89		.m_or = XAE_RCW1_FCS_MASK,
     90	}, { /* Turn on FCS insertion on transmit packets */
     91		.opt = XAE_OPTION_FCS_INSERT,
     92		.reg = XAE_TC_OFFSET,
     93		.m_or = XAE_TC_FCS_MASK,
     94	}, { /* Turn off length/type field checking on receive packets */
     95		.opt = XAE_OPTION_LENTYPE_ERR,
     96		.reg = XAE_RCW1_OFFSET,
     97		.m_or = XAE_RCW1_LT_DIS_MASK,
     98	}, { /* Turn on Rx flow control */
     99		.opt = XAE_OPTION_FLOW_CONTROL,
    100		.reg = XAE_FCC_OFFSET,
    101		.m_or = XAE_FCC_FCRX_MASK,
    102	}, { /* Turn on Tx flow control */
    103		.opt = XAE_OPTION_FLOW_CONTROL,
    104		.reg = XAE_FCC_OFFSET,
    105		.m_or = XAE_FCC_FCTX_MASK,
    106	}, { /* Turn on promiscuous frame filtering */
    107		.opt = XAE_OPTION_PROMISC,
    108		.reg = XAE_FMI_OFFSET,
    109		.m_or = XAE_FMI_PM_MASK,
    110	}, { /* Enable transmitter */
    111		.opt = XAE_OPTION_TXEN,
    112		.reg = XAE_TC_OFFSET,
    113		.m_or = XAE_TC_TX_MASK,
    114	}, { /* Enable receiver */
    115		.opt = XAE_OPTION_RXEN,
    116		.reg = XAE_RCW1_OFFSET,
    117		.m_or = XAE_RCW1_RX_MASK,
    118	},
    119	{}
    120};
    121
    122/**
    123 * axienet_dma_in32 - Memory mapped Axi DMA register read
    124 * @lp:		Pointer to axienet local structure
    125 * @reg:	Address offset from the base address of the Axi DMA core
    126 *
    127 * Return: The contents of the Axi DMA register
    128 *
    129 * This function returns the contents of the corresponding Axi DMA register.
    130 */
    131static inline u32 axienet_dma_in32(struct axienet_local *lp, off_t reg)
    132{
    133	return ioread32(lp->dma_regs + reg);
    134}
    135
    136static void desc_set_phys_addr(struct axienet_local *lp, dma_addr_t addr,
    137			       struct axidma_bd *desc)
    138{
    139	desc->phys = lower_32_bits(addr);
    140	if (lp->features & XAE_FEATURE_DMA_64BIT)
    141		desc->phys_msb = upper_32_bits(addr);
    142}
    143
    144static dma_addr_t desc_get_phys_addr(struct axienet_local *lp,
    145				     struct axidma_bd *desc)
    146{
    147	dma_addr_t ret = desc->phys;
    148
    149	if (lp->features & XAE_FEATURE_DMA_64BIT)
    150		ret |= ((dma_addr_t)desc->phys_msb << 16) << 16;
    151
    152	return ret;
    153}
    154
    155/**
    156 * axienet_dma_bd_release - Release buffer descriptor rings
    157 * @ndev:	Pointer to the net_device structure
    158 *
    159 * This function is used to release the descriptors allocated in
    160 * axienet_dma_bd_init. axienet_dma_bd_release is called when Axi Ethernet
    161 * driver stop api is called.
    162 */
    163static void axienet_dma_bd_release(struct net_device *ndev)
    164{
    165	int i;
    166	struct axienet_local *lp = netdev_priv(ndev);
    167
    168	/* If we end up here, tx_bd_v must have been DMA allocated. */
    169	dma_free_coherent(lp->dev,
    170			  sizeof(*lp->tx_bd_v) * lp->tx_bd_num,
    171			  lp->tx_bd_v,
    172			  lp->tx_bd_p);
    173
    174	if (!lp->rx_bd_v)
    175		return;
    176
    177	for (i = 0; i < lp->rx_bd_num; i++) {
    178		dma_addr_t phys;
    179
    180		/* A NULL skb means this descriptor has not been initialised
    181		 * at all.
    182		 */
    183		if (!lp->rx_bd_v[i].skb)
    184			break;
    185
    186		dev_kfree_skb(lp->rx_bd_v[i].skb);
    187
    188		/* For each descriptor, we programmed cntrl with the (non-zero)
    189		 * descriptor size, after it had been successfully allocated.
    190		 * So a non-zero value in there means we need to unmap it.
    191		 */
    192		if (lp->rx_bd_v[i].cntrl) {
    193			phys = desc_get_phys_addr(lp, &lp->rx_bd_v[i]);
    194			dma_unmap_single(lp->dev, phys,
    195					 lp->max_frm_size, DMA_FROM_DEVICE);
    196		}
    197	}
    198
    199	dma_free_coherent(lp->dev,
    200			  sizeof(*lp->rx_bd_v) * lp->rx_bd_num,
    201			  lp->rx_bd_v,
    202			  lp->rx_bd_p);
    203}
    204
    205/**
    206 * axienet_usec_to_timer - Calculate IRQ delay timer value
    207 * @lp:		Pointer to the axienet_local structure
    208 * @coalesce_usec: Microseconds to convert into timer value
    209 */
    210static u32 axienet_usec_to_timer(struct axienet_local *lp, u32 coalesce_usec)
    211{
    212	u32 result;
    213	u64 clk_rate = 125000000; /* arbitrary guess if no clock rate set */
    214
    215	if (lp->axi_clk)
    216		clk_rate = clk_get_rate(lp->axi_clk);
    217
    218	/* 1 Timeout Interval = 125 * (clock period of SG clock) */
    219	result = DIV64_U64_ROUND_CLOSEST((u64)coalesce_usec * clk_rate,
    220					 (u64)125000000);
    221	if (result > 255)
    222		result = 255;
    223
    224	return result;
    225}
    226
    227/**
    228 * axienet_dma_start - Set up DMA registers and start DMA operation
    229 * @lp:		Pointer to the axienet_local structure
    230 */
    231static void axienet_dma_start(struct axienet_local *lp)
    232{
    233	/* Start updating the Rx channel control register */
    234	lp->rx_dma_cr = (lp->coalesce_count_rx << XAXIDMA_COALESCE_SHIFT) |
    235			XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_ERROR_MASK;
    236	/* Only set interrupt delay timer if not generating an interrupt on
    237	 * the first RX packet. Otherwise leave at 0 to disable delay interrupt.
    238	 */
    239	if (lp->coalesce_count_rx > 1)
    240		lp->rx_dma_cr |= (axienet_usec_to_timer(lp, lp->coalesce_usec_rx)
    241					<< XAXIDMA_DELAY_SHIFT) |
    242				 XAXIDMA_IRQ_DELAY_MASK;
    243	axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, lp->rx_dma_cr);
    244
    245	/* Start updating the Tx channel control register */
    246	lp->tx_dma_cr = (lp->coalesce_count_tx << XAXIDMA_COALESCE_SHIFT) |
    247			XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_ERROR_MASK;
    248	/* Only set interrupt delay timer if not generating an interrupt on
    249	 * the first TX packet. Otherwise leave at 0 to disable delay interrupt.
    250	 */
    251	if (lp->coalesce_count_tx > 1)
    252		lp->tx_dma_cr |= (axienet_usec_to_timer(lp, lp->coalesce_usec_tx)
    253					<< XAXIDMA_DELAY_SHIFT) |
    254				 XAXIDMA_IRQ_DELAY_MASK;
    255	axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, lp->tx_dma_cr);
    256
    257	/* Populate the tail pointer and bring the Rx Axi DMA engine out of
    258	 * halted state. This will make the Rx side ready for reception.
    259	 */
    260	axienet_dma_out_addr(lp, XAXIDMA_RX_CDESC_OFFSET, lp->rx_bd_p);
    261	lp->rx_dma_cr |= XAXIDMA_CR_RUNSTOP_MASK;
    262	axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, lp->rx_dma_cr);
    263	axienet_dma_out_addr(lp, XAXIDMA_RX_TDESC_OFFSET, lp->rx_bd_p +
    264			     (sizeof(*lp->rx_bd_v) * (lp->rx_bd_num - 1)));
    265
    266	/* Write to the RS (Run-stop) bit in the Tx channel control register.
    267	 * Tx channel is now ready to run. But only after we write to the
    268	 * tail pointer register that the Tx channel will start transmitting.
    269	 */
    270	axienet_dma_out_addr(lp, XAXIDMA_TX_CDESC_OFFSET, lp->tx_bd_p);
    271	lp->tx_dma_cr |= XAXIDMA_CR_RUNSTOP_MASK;
    272	axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, lp->tx_dma_cr);
    273}
    274
    275/**
    276 * axienet_dma_bd_init - Setup buffer descriptor rings for Axi DMA
    277 * @ndev:	Pointer to the net_device structure
    278 *
    279 * Return: 0, on success -ENOMEM, on failure
    280 *
    281 * This function is called to initialize the Rx and Tx DMA descriptor
    282 * rings. This initializes the descriptors with required default values
    283 * and is called when Axi Ethernet driver reset is called.
    284 */
    285static int axienet_dma_bd_init(struct net_device *ndev)
    286{
    287	int i;
    288	struct sk_buff *skb;
    289	struct axienet_local *lp = netdev_priv(ndev);
    290
    291	/* Reset the indexes which are used for accessing the BDs */
    292	lp->tx_bd_ci = 0;
    293	lp->tx_bd_tail = 0;
    294	lp->rx_bd_ci = 0;
    295
    296	/* Allocate the Tx and Rx buffer descriptors. */
    297	lp->tx_bd_v = dma_alloc_coherent(lp->dev,
    298					 sizeof(*lp->tx_bd_v) * lp->tx_bd_num,
    299					 &lp->tx_bd_p, GFP_KERNEL);
    300	if (!lp->tx_bd_v)
    301		return -ENOMEM;
    302
    303	lp->rx_bd_v = dma_alloc_coherent(lp->dev,
    304					 sizeof(*lp->rx_bd_v) * lp->rx_bd_num,
    305					 &lp->rx_bd_p, GFP_KERNEL);
    306	if (!lp->rx_bd_v)
    307		goto out;
    308
    309	for (i = 0; i < lp->tx_bd_num; i++) {
    310		dma_addr_t addr = lp->tx_bd_p +
    311				  sizeof(*lp->tx_bd_v) *
    312				  ((i + 1) % lp->tx_bd_num);
    313
    314		lp->tx_bd_v[i].next = lower_32_bits(addr);
    315		if (lp->features & XAE_FEATURE_DMA_64BIT)
    316			lp->tx_bd_v[i].next_msb = upper_32_bits(addr);
    317	}
    318
    319	for (i = 0; i < lp->rx_bd_num; i++) {
    320		dma_addr_t addr;
    321
    322		addr = lp->rx_bd_p + sizeof(*lp->rx_bd_v) *
    323			((i + 1) % lp->rx_bd_num);
    324		lp->rx_bd_v[i].next = lower_32_bits(addr);
    325		if (lp->features & XAE_FEATURE_DMA_64BIT)
    326			lp->rx_bd_v[i].next_msb = upper_32_bits(addr);
    327
    328		skb = netdev_alloc_skb_ip_align(ndev, lp->max_frm_size);
    329		if (!skb)
    330			goto out;
    331
    332		lp->rx_bd_v[i].skb = skb;
    333		addr = dma_map_single(lp->dev, skb->data,
    334				      lp->max_frm_size, DMA_FROM_DEVICE);
    335		if (dma_mapping_error(lp->dev, addr)) {
    336			netdev_err(ndev, "DMA mapping error\n");
    337			goto out;
    338		}
    339		desc_set_phys_addr(lp, addr, &lp->rx_bd_v[i]);
    340
    341		lp->rx_bd_v[i].cntrl = lp->max_frm_size;
    342	}
    343
    344	axienet_dma_start(lp);
    345
    346	return 0;
    347out:
    348	axienet_dma_bd_release(ndev);
    349	return -ENOMEM;
    350}
    351
    352/**
    353 * axienet_set_mac_address - Write the MAC address
    354 * @ndev:	Pointer to the net_device structure
    355 * @address:	6 byte Address to be written as MAC address
    356 *
    357 * This function is called to initialize the MAC address of the Axi Ethernet
    358 * core. It writes to the UAW0 and UAW1 registers of the core.
    359 */
    360static void axienet_set_mac_address(struct net_device *ndev,
    361				    const void *address)
    362{
    363	struct axienet_local *lp = netdev_priv(ndev);
    364
    365	if (address)
    366		eth_hw_addr_set(ndev, address);
    367	if (!is_valid_ether_addr(ndev->dev_addr))
    368		eth_hw_addr_random(ndev);
    369
    370	/* Set up unicast MAC address filter set its mac address */
    371	axienet_iow(lp, XAE_UAW0_OFFSET,
    372		    (ndev->dev_addr[0]) |
    373		    (ndev->dev_addr[1] << 8) |
    374		    (ndev->dev_addr[2] << 16) |
    375		    (ndev->dev_addr[3] << 24));
    376	axienet_iow(lp, XAE_UAW1_OFFSET,
    377		    (((axienet_ior(lp, XAE_UAW1_OFFSET)) &
    378		      ~XAE_UAW1_UNICASTADDR_MASK) |
    379		     (ndev->dev_addr[4] |
    380		     (ndev->dev_addr[5] << 8))));
    381}
    382
    383/**
    384 * netdev_set_mac_address - Write the MAC address (from outside the driver)
    385 * @ndev:	Pointer to the net_device structure
    386 * @p:		6 byte Address to be written as MAC address
    387 *
    388 * Return: 0 for all conditions. Presently, there is no failure case.
    389 *
    390 * This function is called to initialize the MAC address of the Axi Ethernet
    391 * core. It calls the core specific axienet_set_mac_address. This is the
    392 * function that goes into net_device_ops structure entry ndo_set_mac_address.
    393 */
    394static int netdev_set_mac_address(struct net_device *ndev, void *p)
    395{
    396	struct sockaddr *addr = p;
    397	axienet_set_mac_address(ndev, addr->sa_data);
    398	return 0;
    399}
    400
    401/**
    402 * axienet_set_multicast_list - Prepare the multicast table
    403 * @ndev:	Pointer to the net_device structure
    404 *
    405 * This function is called to initialize the multicast table during
    406 * initialization. The Axi Ethernet basic multicast support has a four-entry
    407 * multicast table which is initialized here. Additionally this function
    408 * goes into the net_device_ops structure entry ndo_set_multicast_list. This
    409 * means whenever the multicast table entries need to be updated this
    410 * function gets called.
    411 */
    412static void axienet_set_multicast_list(struct net_device *ndev)
    413{
    414	int i;
    415	u32 reg, af0reg, af1reg;
    416	struct axienet_local *lp = netdev_priv(ndev);
    417
    418	if (ndev->flags & (IFF_ALLMULTI | IFF_PROMISC) ||
    419	    netdev_mc_count(ndev) > XAE_MULTICAST_CAM_TABLE_NUM) {
    420		/* We must make the kernel realize we had to move into
    421		 * promiscuous mode. If it was a promiscuous mode request
    422		 * the flag is already set. If not we set it.
    423		 */
    424		ndev->flags |= IFF_PROMISC;
    425		reg = axienet_ior(lp, XAE_FMI_OFFSET);
    426		reg |= XAE_FMI_PM_MASK;
    427		axienet_iow(lp, XAE_FMI_OFFSET, reg);
    428		dev_info(&ndev->dev, "Promiscuous mode enabled.\n");
    429	} else if (!netdev_mc_empty(ndev)) {
    430		struct netdev_hw_addr *ha;
    431
    432		i = 0;
    433		netdev_for_each_mc_addr(ha, ndev) {
    434			if (i >= XAE_MULTICAST_CAM_TABLE_NUM)
    435				break;
    436
    437			af0reg = (ha->addr[0]);
    438			af0reg |= (ha->addr[1] << 8);
    439			af0reg |= (ha->addr[2] << 16);
    440			af0reg |= (ha->addr[3] << 24);
    441
    442			af1reg = (ha->addr[4]);
    443			af1reg |= (ha->addr[5] << 8);
    444
    445			reg = axienet_ior(lp, XAE_FMI_OFFSET) & 0xFFFFFF00;
    446			reg |= i;
    447
    448			axienet_iow(lp, XAE_FMI_OFFSET, reg);
    449			axienet_iow(lp, XAE_AF0_OFFSET, af0reg);
    450			axienet_iow(lp, XAE_AF1_OFFSET, af1reg);
    451			i++;
    452		}
    453	} else {
    454		reg = axienet_ior(lp, XAE_FMI_OFFSET);
    455		reg &= ~XAE_FMI_PM_MASK;
    456
    457		axienet_iow(lp, XAE_FMI_OFFSET, reg);
    458
    459		for (i = 0; i < XAE_MULTICAST_CAM_TABLE_NUM; i++) {
    460			reg = axienet_ior(lp, XAE_FMI_OFFSET) & 0xFFFFFF00;
    461			reg |= i;
    462
    463			axienet_iow(lp, XAE_FMI_OFFSET, reg);
    464			axienet_iow(lp, XAE_AF0_OFFSET, 0);
    465			axienet_iow(lp, XAE_AF1_OFFSET, 0);
    466		}
    467
    468		dev_info(&ndev->dev, "Promiscuous mode disabled.\n");
    469	}
    470}
    471
    472/**
    473 * axienet_setoptions - Set an Axi Ethernet option
    474 * @ndev:	Pointer to the net_device structure
    475 * @options:	Option to be enabled/disabled
    476 *
    477 * The Axi Ethernet core has multiple features which can be selectively turned
    478 * on or off. The typical options could be jumbo frame option, basic VLAN
    479 * option, promiscuous mode option etc. This function is used to set or clear
    480 * these options in the Axi Ethernet hardware. This is done through
    481 * axienet_option structure .
    482 */
    483static void axienet_setoptions(struct net_device *ndev, u32 options)
    484{
    485	int reg;
    486	struct axienet_local *lp = netdev_priv(ndev);
    487	struct axienet_option *tp = &axienet_options[0];
    488
    489	while (tp->opt) {
    490		reg = ((axienet_ior(lp, tp->reg)) & ~(tp->m_or));
    491		if (options & tp->opt)
    492			reg |= tp->m_or;
    493		axienet_iow(lp, tp->reg, reg);
    494		tp++;
    495	}
    496
    497	lp->options |= options;
    498}
    499
    500static int __axienet_device_reset(struct axienet_local *lp)
    501{
    502	u32 value;
    503	int ret;
    504
    505	/* Reset Axi DMA. This would reset Axi Ethernet core as well. The reset
    506	 * process of Axi DMA takes a while to complete as all pending
    507	 * commands/transfers will be flushed or completed during this
    508	 * reset process.
    509	 * Note that even though both TX and RX have their own reset register,
    510	 * they both reset the entire DMA core, so only one needs to be used.
    511	 */
    512	axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, XAXIDMA_CR_RESET_MASK);
    513	ret = read_poll_timeout(axienet_dma_in32, value,
    514				!(value & XAXIDMA_CR_RESET_MASK),
    515				DELAY_OF_ONE_MILLISEC, 50000, false, lp,
    516				XAXIDMA_TX_CR_OFFSET);
    517	if (ret) {
    518		dev_err(lp->dev, "%s: DMA reset timeout!\n", __func__);
    519		return ret;
    520	}
    521
    522	/* Wait for PhyRstCmplt bit to be set, indicating the PHY reset has finished */
    523	ret = read_poll_timeout(axienet_ior, value,
    524				value & XAE_INT_PHYRSTCMPLT_MASK,
    525				DELAY_OF_ONE_MILLISEC, 50000, false, lp,
    526				XAE_IS_OFFSET);
    527	if (ret) {
    528		dev_err(lp->dev, "%s: timeout waiting for PhyRstCmplt\n", __func__);
    529		return ret;
    530	}
    531
    532	return 0;
    533}
    534
    535/**
    536 * axienet_dma_stop - Stop DMA operation
    537 * @lp:		Pointer to the axienet_local structure
    538 */
    539static void axienet_dma_stop(struct axienet_local *lp)
    540{
    541	int count;
    542	u32 cr, sr;
    543
    544	cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
    545	cr &= ~(XAXIDMA_CR_RUNSTOP_MASK | XAXIDMA_IRQ_ALL_MASK);
    546	axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
    547	synchronize_irq(lp->rx_irq);
    548
    549	cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
    550	cr &= ~(XAXIDMA_CR_RUNSTOP_MASK | XAXIDMA_IRQ_ALL_MASK);
    551	axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr);
    552	synchronize_irq(lp->tx_irq);
    553
    554	/* Give DMAs a chance to halt gracefully */
    555	sr = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET);
    556	for (count = 0; !(sr & XAXIDMA_SR_HALT_MASK) && count < 5; ++count) {
    557		msleep(20);
    558		sr = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET);
    559	}
    560
    561	sr = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET);
    562	for (count = 0; !(sr & XAXIDMA_SR_HALT_MASK) && count < 5; ++count) {
    563		msleep(20);
    564		sr = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET);
    565	}
    566
    567	/* Do a reset to ensure DMA is really stopped */
    568	axienet_lock_mii(lp);
    569	__axienet_device_reset(lp);
    570	axienet_unlock_mii(lp);
    571}
    572
    573/**
    574 * axienet_device_reset - Reset and initialize the Axi Ethernet hardware.
    575 * @ndev:	Pointer to the net_device structure
    576 *
    577 * This function is called to reset and initialize the Axi Ethernet core. This
    578 * is typically called during initialization. It does a reset of the Axi DMA
    579 * Rx/Tx channels and initializes the Axi DMA BDs. Since Axi DMA reset lines
    580 * are connected to Axi Ethernet reset lines, this in turn resets the Axi
    581 * Ethernet core. No separate hardware reset is done for the Axi Ethernet
    582 * core.
    583 * Returns 0 on success or a negative error number otherwise.
    584 */
    585static int axienet_device_reset(struct net_device *ndev)
    586{
    587	u32 axienet_status;
    588	struct axienet_local *lp = netdev_priv(ndev);
    589	int ret;
    590
    591	ret = __axienet_device_reset(lp);
    592	if (ret)
    593		return ret;
    594
    595	lp->max_frm_size = XAE_MAX_VLAN_FRAME_SIZE;
    596	lp->options |= XAE_OPTION_VLAN;
    597	lp->options &= (~XAE_OPTION_JUMBO);
    598
    599	if ((ndev->mtu > XAE_MTU) &&
    600		(ndev->mtu <= XAE_JUMBO_MTU)) {
    601		lp->max_frm_size = ndev->mtu + VLAN_ETH_HLEN +
    602					XAE_TRL_SIZE;
    603
    604		if (lp->max_frm_size <= lp->rxmem)
    605			lp->options |= XAE_OPTION_JUMBO;
    606	}
    607
    608	ret = axienet_dma_bd_init(ndev);
    609	if (ret) {
    610		netdev_err(ndev, "%s: descriptor allocation failed\n",
    611			   __func__);
    612		return ret;
    613	}
    614
    615	axienet_status = axienet_ior(lp, XAE_RCW1_OFFSET);
    616	axienet_status &= ~XAE_RCW1_RX_MASK;
    617	axienet_iow(lp, XAE_RCW1_OFFSET, axienet_status);
    618
    619	axienet_status = axienet_ior(lp, XAE_IP_OFFSET);
    620	if (axienet_status & XAE_INT_RXRJECT_MASK)
    621		axienet_iow(lp, XAE_IS_OFFSET, XAE_INT_RXRJECT_MASK);
    622	axienet_iow(lp, XAE_IE_OFFSET, lp->eth_irq > 0 ?
    623		    XAE_INT_RECV_ERROR_MASK : 0);
    624
    625	axienet_iow(lp, XAE_FCC_OFFSET, XAE_FCC_FCRX_MASK);
    626
    627	/* Sync default options with HW but leave receiver and
    628	 * transmitter disabled.
    629	 */
    630	axienet_setoptions(ndev, lp->options &
    631			   ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
    632	axienet_set_mac_address(ndev, NULL);
    633	axienet_set_multicast_list(ndev);
    634	axienet_setoptions(ndev, lp->options);
    635
    636	netif_trans_update(ndev);
    637
    638	return 0;
    639}
    640
    641/**
    642 * axienet_free_tx_chain - Clean up a series of linked TX descriptors.
    643 * @lp:		Pointer to the axienet_local structure
    644 * @first_bd:	Index of first descriptor to clean up
    645 * @nr_bds:	Max number of descriptors to clean up
    646 * @force:	Whether to clean descriptors even if not complete
    647 * @sizep:	Pointer to a u32 filled with the total sum of all bytes
    648 * 		in all cleaned-up descriptors. Ignored if NULL.
    649 * @budget:	NAPI budget (use 0 when not called from NAPI poll)
    650 *
    651 * Would either be called after a successful transmit operation, or after
    652 * there was an error when setting up the chain.
    653 * Returns the number of descriptors handled.
    654 */
    655static int axienet_free_tx_chain(struct axienet_local *lp, u32 first_bd,
    656				 int nr_bds, bool force, u32 *sizep, int budget)
    657{
    658	struct axidma_bd *cur_p;
    659	unsigned int status;
    660	dma_addr_t phys;
    661	int i;
    662
    663	for (i = 0; i < nr_bds; i++) {
    664		cur_p = &lp->tx_bd_v[(first_bd + i) % lp->tx_bd_num];
    665		status = cur_p->status;
    666
    667		/* If force is not specified, clean up only descriptors
    668		 * that have been completed by the MAC.
    669		 */
    670		if (!force && !(status & XAXIDMA_BD_STS_COMPLETE_MASK))
    671			break;
    672
    673		/* Ensure we see complete descriptor update */
    674		dma_rmb();
    675		phys = desc_get_phys_addr(lp, cur_p);
    676		dma_unmap_single(lp->dev, phys,
    677				 (cur_p->cntrl & XAXIDMA_BD_CTRL_LENGTH_MASK),
    678				 DMA_TO_DEVICE);
    679
    680		if (cur_p->skb && (status & XAXIDMA_BD_STS_COMPLETE_MASK))
    681			napi_consume_skb(cur_p->skb, budget);
    682
    683		cur_p->app0 = 0;
    684		cur_p->app1 = 0;
    685		cur_p->app2 = 0;
    686		cur_p->app4 = 0;
    687		cur_p->skb = NULL;
    688		/* ensure our transmit path and device don't prematurely see status cleared */
    689		wmb();
    690		cur_p->cntrl = 0;
    691		cur_p->status = 0;
    692
    693		if (sizep)
    694			*sizep += status & XAXIDMA_BD_STS_ACTUAL_LEN_MASK;
    695	}
    696
    697	return i;
    698}
    699
    700/**
    701 * axienet_check_tx_bd_space - Checks if a BD/group of BDs are currently busy
    702 * @lp:		Pointer to the axienet_local structure
    703 * @num_frag:	The number of BDs to check for
    704 *
    705 * Return: 0, on success
    706 *	    NETDEV_TX_BUSY, if any of the descriptors are not free
    707 *
    708 * This function is invoked before BDs are allocated and transmission starts.
    709 * This function returns 0 if a BD or group of BDs can be allocated for
    710 * transmission. If the BD or any of the BDs are not free the function
    711 * returns a busy status.
    712 */
    713static inline int axienet_check_tx_bd_space(struct axienet_local *lp,
    714					    int num_frag)
    715{
    716	struct axidma_bd *cur_p;
    717
    718	/* Ensure we see all descriptor updates from device or TX polling */
    719	rmb();
    720	cur_p = &lp->tx_bd_v[(READ_ONCE(lp->tx_bd_tail) + num_frag) %
    721			     lp->tx_bd_num];
    722	if (cur_p->cntrl)
    723		return NETDEV_TX_BUSY;
    724	return 0;
    725}
    726
    727/**
    728 * axienet_tx_poll - Invoked once a transmit is completed by the
    729 * Axi DMA Tx channel.
    730 * @napi:	Pointer to NAPI structure.
    731 * @budget:	Max number of TX packets to process.
    732 *
    733 * Return: Number of TX packets processed.
    734 *
    735 * This function is invoked from the NAPI processing to notify the completion
    736 * of transmit operation. It clears fields in the corresponding Tx BDs and
    737 * unmaps the corresponding buffer so that CPU can regain ownership of the
    738 * buffer. It finally invokes "netif_wake_queue" to restart transmission if
    739 * required.
    740 */
    741static int axienet_tx_poll(struct napi_struct *napi, int budget)
    742{
    743	struct axienet_local *lp = container_of(napi, struct axienet_local, napi_tx);
    744	struct net_device *ndev = lp->ndev;
    745	u32 size = 0;
    746	int packets;
    747
    748	packets = axienet_free_tx_chain(lp, lp->tx_bd_ci, budget, false, &size, budget);
    749
    750	if (packets) {
    751		lp->tx_bd_ci += packets;
    752		if (lp->tx_bd_ci >= lp->tx_bd_num)
    753			lp->tx_bd_ci %= lp->tx_bd_num;
    754
    755		ndev->stats.tx_packets += packets;
    756		ndev->stats.tx_bytes += size;
    757
    758		/* Matches barrier in axienet_start_xmit */
    759		smp_mb();
    760
    761		if (!axienet_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1))
    762			netif_wake_queue(ndev);
    763	}
    764
    765	if (packets < budget && napi_complete_done(napi, packets)) {
    766		/* Re-enable TX completion interrupts. This should
    767		 * cause an immediate interrupt if any TX packets are
    768		 * already pending.
    769		 */
    770		axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, lp->tx_dma_cr);
    771	}
    772	return packets;
    773}
    774
    775/**
    776 * axienet_start_xmit - Starts the transmission.
    777 * @skb:	sk_buff pointer that contains data to be Txed.
    778 * @ndev:	Pointer to net_device structure.
    779 *
    780 * Return: NETDEV_TX_OK, on success
    781 *	    NETDEV_TX_BUSY, if any of the descriptors are not free
    782 *
    783 * This function is invoked from upper layers to initiate transmission. The
    784 * function uses the next available free BDs and populates their fields to
    785 * start the transmission. Additionally if checksum offloading is supported,
    786 * it populates AXI Stream Control fields with appropriate values.
    787 */
    788static netdev_tx_t
    789axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
    790{
    791	u32 ii;
    792	u32 num_frag;
    793	u32 csum_start_off;
    794	u32 csum_index_off;
    795	skb_frag_t *frag;
    796	dma_addr_t tail_p, phys;
    797	u32 orig_tail_ptr, new_tail_ptr;
    798	struct axienet_local *lp = netdev_priv(ndev);
    799	struct axidma_bd *cur_p;
    800
    801	orig_tail_ptr = lp->tx_bd_tail;
    802	new_tail_ptr = orig_tail_ptr;
    803
    804	num_frag = skb_shinfo(skb)->nr_frags;
    805	cur_p = &lp->tx_bd_v[orig_tail_ptr];
    806
    807	if (axienet_check_tx_bd_space(lp, num_frag + 1)) {
    808		/* Should not happen as last start_xmit call should have
    809		 * checked for sufficient space and queue should only be
    810		 * woken when sufficient space is available.
    811		 */
    812		netif_stop_queue(ndev);
    813		if (net_ratelimit())
    814			netdev_warn(ndev, "TX ring unexpectedly full\n");
    815		return NETDEV_TX_BUSY;
    816	}
    817
    818	if (skb->ip_summed == CHECKSUM_PARTIAL) {
    819		if (lp->features & XAE_FEATURE_FULL_TX_CSUM) {
    820			/* Tx Full Checksum Offload Enabled */
    821			cur_p->app0 |= 2;
    822		} else if (lp->features & XAE_FEATURE_PARTIAL_RX_CSUM) {
    823			csum_start_off = skb_transport_offset(skb);
    824			csum_index_off = csum_start_off + skb->csum_offset;
    825			/* Tx Partial Checksum Offload Enabled */
    826			cur_p->app0 |= 1;
    827			cur_p->app1 = (csum_start_off << 16) | csum_index_off;
    828		}
    829	} else if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
    830		cur_p->app0 |= 2; /* Tx Full Checksum Offload Enabled */
    831	}
    832
    833	phys = dma_map_single(lp->dev, skb->data,
    834			      skb_headlen(skb), DMA_TO_DEVICE);
    835	if (unlikely(dma_mapping_error(lp->dev, phys))) {
    836		if (net_ratelimit())
    837			netdev_err(ndev, "TX DMA mapping error\n");
    838		ndev->stats.tx_dropped++;
    839		return NETDEV_TX_OK;
    840	}
    841	desc_set_phys_addr(lp, phys, cur_p);
    842	cur_p->cntrl = skb_headlen(skb) | XAXIDMA_BD_CTRL_TXSOF_MASK;
    843
    844	for (ii = 0; ii < num_frag; ii++) {
    845		if (++new_tail_ptr >= lp->tx_bd_num)
    846			new_tail_ptr = 0;
    847		cur_p = &lp->tx_bd_v[new_tail_ptr];
    848		frag = &skb_shinfo(skb)->frags[ii];
    849		phys = dma_map_single(lp->dev,
    850				      skb_frag_address(frag),
    851				      skb_frag_size(frag),
    852				      DMA_TO_DEVICE);
    853		if (unlikely(dma_mapping_error(lp->dev, phys))) {
    854			if (net_ratelimit())
    855				netdev_err(ndev, "TX DMA mapping error\n");
    856			ndev->stats.tx_dropped++;
    857			axienet_free_tx_chain(lp, orig_tail_ptr, ii + 1,
    858					      true, NULL, 0);
    859			return NETDEV_TX_OK;
    860		}
    861		desc_set_phys_addr(lp, phys, cur_p);
    862		cur_p->cntrl = skb_frag_size(frag);
    863	}
    864
    865	cur_p->cntrl |= XAXIDMA_BD_CTRL_TXEOF_MASK;
    866	cur_p->skb = skb;
    867
    868	tail_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * new_tail_ptr;
    869	if (++new_tail_ptr >= lp->tx_bd_num)
    870		new_tail_ptr = 0;
    871	WRITE_ONCE(lp->tx_bd_tail, new_tail_ptr);
    872
    873	/* Start the transfer */
    874	axienet_dma_out_addr(lp, XAXIDMA_TX_TDESC_OFFSET, tail_p);
    875
    876	/* Stop queue if next transmit may not have space */
    877	if (axienet_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1)) {
    878		netif_stop_queue(ndev);
    879
    880		/* Matches barrier in axienet_tx_poll */
    881		smp_mb();
    882
    883		/* Space might have just been freed - check again */
    884		if (!axienet_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1))
    885			netif_wake_queue(ndev);
    886	}
    887
    888	return NETDEV_TX_OK;
    889}
    890
    891/**
    892 * axienet_rx_poll - Triggered by RX ISR to complete the BD processing.
    893 * @napi:	Pointer to NAPI structure.
    894 * @budget:	Max number of RX packets to process.
    895 *
    896 * Return: Number of RX packets processed.
    897 */
    898static int axienet_rx_poll(struct napi_struct *napi, int budget)
    899{
    900	u32 length;
    901	u32 csumstatus;
    902	u32 size = 0;
    903	int packets = 0;
    904	dma_addr_t tail_p = 0;
    905	struct axidma_bd *cur_p;
    906	struct sk_buff *skb, *new_skb;
    907	struct axienet_local *lp = container_of(napi, struct axienet_local, napi_rx);
    908
    909	cur_p = &lp->rx_bd_v[lp->rx_bd_ci];
    910
    911	while (packets < budget && (cur_p->status & XAXIDMA_BD_STS_COMPLETE_MASK)) {
    912		dma_addr_t phys;
    913
    914		/* Ensure we see complete descriptor update */
    915		dma_rmb();
    916
    917		skb = cur_p->skb;
    918		cur_p->skb = NULL;
    919
    920		/* skb could be NULL if a previous pass already received the
    921		 * packet for this slot in the ring, but failed to refill it
    922		 * with a newly allocated buffer. In this case, don't try to
    923		 * receive it again.
    924		 */
    925		if (likely(skb)) {
    926			length = cur_p->app4 & 0x0000FFFF;
    927
    928			phys = desc_get_phys_addr(lp, cur_p);
    929			dma_unmap_single(lp->dev, phys, lp->max_frm_size,
    930					 DMA_FROM_DEVICE);
    931
    932			skb_put(skb, length);
    933			skb->protocol = eth_type_trans(skb, lp->ndev);
    934			/*skb_checksum_none_assert(skb);*/
    935			skb->ip_summed = CHECKSUM_NONE;
    936
    937			/* if we're doing Rx csum offload, set it up */
    938			if (lp->features & XAE_FEATURE_FULL_RX_CSUM) {
    939				csumstatus = (cur_p->app2 &
    940					      XAE_FULL_CSUM_STATUS_MASK) >> 3;
    941				if (csumstatus == XAE_IP_TCP_CSUM_VALIDATED ||
    942				    csumstatus == XAE_IP_UDP_CSUM_VALIDATED) {
    943					skb->ip_summed = CHECKSUM_UNNECESSARY;
    944				}
    945			} else if ((lp->features & XAE_FEATURE_PARTIAL_RX_CSUM) != 0 &&
    946				   skb->protocol == htons(ETH_P_IP) &&
    947				   skb->len > 64) {
    948				skb->csum = be32_to_cpu(cur_p->app3 & 0xFFFF);
    949				skb->ip_summed = CHECKSUM_COMPLETE;
    950			}
    951
    952			napi_gro_receive(napi, skb);
    953
    954			size += length;
    955			packets++;
    956		}
    957
    958		new_skb = napi_alloc_skb(napi, lp->max_frm_size);
    959		if (!new_skb)
    960			break;
    961
    962		phys = dma_map_single(lp->dev, new_skb->data,
    963				      lp->max_frm_size,
    964				      DMA_FROM_DEVICE);
    965		if (unlikely(dma_mapping_error(lp->dev, phys))) {
    966			if (net_ratelimit())
    967				netdev_err(lp->ndev, "RX DMA mapping error\n");
    968			dev_kfree_skb(new_skb);
    969			break;
    970		}
    971		desc_set_phys_addr(lp, phys, cur_p);
    972
    973		cur_p->cntrl = lp->max_frm_size;
    974		cur_p->status = 0;
    975		cur_p->skb = new_skb;
    976
    977		/* Only update tail_p to mark this slot as usable after it has
    978		 * been successfully refilled.
    979		 */
    980		tail_p = lp->rx_bd_p + sizeof(*lp->rx_bd_v) * lp->rx_bd_ci;
    981
    982		if (++lp->rx_bd_ci >= lp->rx_bd_num)
    983			lp->rx_bd_ci = 0;
    984		cur_p = &lp->rx_bd_v[lp->rx_bd_ci];
    985	}
    986
    987	lp->ndev->stats.rx_packets += packets;
    988	lp->ndev->stats.rx_bytes += size;
    989
    990	if (tail_p)
    991		axienet_dma_out_addr(lp, XAXIDMA_RX_TDESC_OFFSET, tail_p);
    992
    993	if (packets < budget && napi_complete_done(napi, packets)) {
    994		/* Re-enable RX completion interrupts. This should
    995		 * cause an immediate interrupt if any RX packets are
    996		 * already pending.
    997		 */
    998		axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, lp->rx_dma_cr);
    999	}
   1000	return packets;
   1001}
   1002
   1003/**
   1004 * axienet_tx_irq - Tx Done Isr.
   1005 * @irq:	irq number
   1006 * @_ndev:	net_device pointer
   1007 *
   1008 * Return: IRQ_HANDLED if device generated a TX interrupt, IRQ_NONE otherwise.
   1009 *
   1010 * This is the Axi DMA Tx done Isr. It invokes NAPI polling to complete the
   1011 * TX BD processing.
   1012 */
   1013static irqreturn_t axienet_tx_irq(int irq, void *_ndev)
   1014{
   1015	unsigned int status;
   1016	struct net_device *ndev = _ndev;
   1017	struct axienet_local *lp = netdev_priv(ndev);
   1018
   1019	status = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET);
   1020
   1021	if (!(status & XAXIDMA_IRQ_ALL_MASK))
   1022		return IRQ_NONE;
   1023
   1024	axienet_dma_out32(lp, XAXIDMA_TX_SR_OFFSET, status);
   1025
   1026	if (unlikely(status & XAXIDMA_IRQ_ERROR_MASK)) {
   1027		netdev_err(ndev, "DMA Tx error 0x%x\n", status);
   1028		netdev_err(ndev, "Current BD is at: 0x%x%08x\n",
   1029			   (lp->tx_bd_v[lp->tx_bd_ci]).phys_msb,
   1030			   (lp->tx_bd_v[lp->tx_bd_ci]).phys);
   1031		schedule_work(&lp->dma_err_task);
   1032	} else {
   1033		/* Disable further TX completion interrupts and schedule
   1034		 * NAPI to handle the completions.
   1035		 */
   1036		u32 cr = lp->tx_dma_cr;
   1037
   1038		cr &= ~(XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK);
   1039		axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr);
   1040
   1041		napi_schedule(&lp->napi_tx);
   1042	}
   1043
   1044	return IRQ_HANDLED;
   1045}
   1046
   1047/**
   1048 * axienet_rx_irq - Rx Isr.
   1049 * @irq:	irq number
   1050 * @_ndev:	net_device pointer
   1051 *
   1052 * Return: IRQ_HANDLED if device generated a RX interrupt, IRQ_NONE otherwise.
   1053 *
   1054 * This is the Axi DMA Rx Isr. It invokes NAPI polling to complete the RX BD
   1055 * processing.
   1056 */
   1057static irqreturn_t axienet_rx_irq(int irq, void *_ndev)
   1058{
   1059	unsigned int status;
   1060	struct net_device *ndev = _ndev;
   1061	struct axienet_local *lp = netdev_priv(ndev);
   1062
   1063	status = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET);
   1064
   1065	if (!(status & XAXIDMA_IRQ_ALL_MASK))
   1066		return IRQ_NONE;
   1067
   1068	axienet_dma_out32(lp, XAXIDMA_RX_SR_OFFSET, status);
   1069
   1070	if (unlikely(status & XAXIDMA_IRQ_ERROR_MASK)) {
   1071		netdev_err(ndev, "DMA Rx error 0x%x\n", status);
   1072		netdev_err(ndev, "Current BD is at: 0x%x%08x\n",
   1073			   (lp->rx_bd_v[lp->rx_bd_ci]).phys_msb,
   1074			   (lp->rx_bd_v[lp->rx_bd_ci]).phys);
   1075		schedule_work(&lp->dma_err_task);
   1076	} else {
   1077		/* Disable further RX completion interrupts and schedule
   1078		 * NAPI receive.
   1079		 */
   1080		u32 cr = lp->rx_dma_cr;
   1081
   1082		cr &= ~(XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK);
   1083		axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
   1084
   1085		napi_schedule(&lp->napi_rx);
   1086	}
   1087
   1088	return IRQ_HANDLED;
   1089}
   1090
   1091/**
   1092 * axienet_eth_irq - Ethernet core Isr.
   1093 * @irq:	irq number
   1094 * @_ndev:	net_device pointer
   1095 *
   1096 * Return: IRQ_HANDLED if device generated a core interrupt, IRQ_NONE otherwise.
   1097 *
   1098 * Handle miscellaneous conditions indicated by Ethernet core IRQ.
   1099 */
   1100static irqreturn_t axienet_eth_irq(int irq, void *_ndev)
   1101{
   1102	struct net_device *ndev = _ndev;
   1103	struct axienet_local *lp = netdev_priv(ndev);
   1104	unsigned int pending;
   1105
   1106	pending = axienet_ior(lp, XAE_IP_OFFSET);
   1107	if (!pending)
   1108		return IRQ_NONE;
   1109
   1110	if (pending & XAE_INT_RXFIFOOVR_MASK)
   1111		ndev->stats.rx_missed_errors++;
   1112
   1113	if (pending & XAE_INT_RXRJECT_MASK)
   1114		ndev->stats.rx_frame_errors++;
   1115
   1116	axienet_iow(lp, XAE_IS_OFFSET, pending);
   1117	return IRQ_HANDLED;
   1118}
   1119
   1120static void axienet_dma_err_handler(struct work_struct *work);
   1121
   1122/**
   1123 * axienet_open - Driver open routine.
   1124 * @ndev:	Pointer to net_device structure
   1125 *
   1126 * Return: 0, on success.
   1127 *	    non-zero error value on failure
   1128 *
   1129 * This is the driver open routine. It calls phylink_start to start the
   1130 * PHY device.
   1131 * It also allocates interrupt service routines, enables the interrupt lines
   1132 * and ISR handling. Axi Ethernet core is reset through Axi DMA core. Buffer
   1133 * descriptors are initialized.
   1134 */
   1135static int axienet_open(struct net_device *ndev)
   1136{
   1137	int ret;
   1138	struct axienet_local *lp = netdev_priv(ndev);
   1139
   1140	dev_dbg(&ndev->dev, "axienet_open()\n");
   1141
   1142	/* When we do an Axi Ethernet reset, it resets the complete core
   1143	 * including the MDIO. MDIO must be disabled before resetting.
   1144	 * Hold MDIO bus lock to avoid MDIO accesses during the reset.
   1145	 */
   1146	axienet_lock_mii(lp);
   1147	ret = axienet_device_reset(ndev);
   1148	axienet_unlock_mii(lp);
   1149
   1150	ret = phylink_of_phy_connect(lp->phylink, lp->dev->of_node, 0);
   1151	if (ret) {
   1152		dev_err(lp->dev, "phylink_of_phy_connect() failed: %d\n", ret);
   1153		return ret;
   1154	}
   1155
   1156	phylink_start(lp->phylink);
   1157
   1158	/* Enable worker thread for Axi DMA error handling */
   1159	INIT_WORK(&lp->dma_err_task, axienet_dma_err_handler);
   1160
   1161	napi_enable(&lp->napi_rx);
   1162	napi_enable(&lp->napi_tx);
   1163
   1164	/* Enable interrupts for Axi DMA Tx */
   1165	ret = request_irq(lp->tx_irq, axienet_tx_irq, IRQF_SHARED,
   1166			  ndev->name, ndev);
   1167	if (ret)
   1168		goto err_tx_irq;
   1169	/* Enable interrupts for Axi DMA Rx */
   1170	ret = request_irq(lp->rx_irq, axienet_rx_irq, IRQF_SHARED,
   1171			  ndev->name, ndev);
   1172	if (ret)
   1173		goto err_rx_irq;
   1174	/* Enable interrupts for Axi Ethernet core (if defined) */
   1175	if (lp->eth_irq > 0) {
   1176		ret = request_irq(lp->eth_irq, axienet_eth_irq, IRQF_SHARED,
   1177				  ndev->name, ndev);
   1178		if (ret)
   1179			goto err_eth_irq;
   1180	}
   1181
   1182	return 0;
   1183
   1184err_eth_irq:
   1185	free_irq(lp->rx_irq, ndev);
   1186err_rx_irq:
   1187	free_irq(lp->tx_irq, ndev);
   1188err_tx_irq:
   1189	napi_disable(&lp->napi_tx);
   1190	napi_disable(&lp->napi_rx);
   1191	phylink_stop(lp->phylink);
   1192	phylink_disconnect_phy(lp->phylink);
   1193	cancel_work_sync(&lp->dma_err_task);
   1194	dev_err(lp->dev, "request_irq() failed\n");
   1195	return ret;
   1196}
   1197
   1198/**
   1199 * axienet_stop - Driver stop routine.
   1200 * @ndev:	Pointer to net_device structure
   1201 *
   1202 * Return: 0, on success.
   1203 *
   1204 * This is the driver stop routine. It calls phylink_disconnect to stop the PHY
   1205 * device. It also removes the interrupt handlers and disables the interrupts.
   1206 * The Axi DMA Tx/Rx BDs are released.
   1207 */
   1208static int axienet_stop(struct net_device *ndev)
   1209{
   1210	struct axienet_local *lp = netdev_priv(ndev);
   1211
   1212	dev_dbg(&ndev->dev, "axienet_close()\n");
   1213
   1214	napi_disable(&lp->napi_tx);
   1215	napi_disable(&lp->napi_rx);
   1216
   1217	phylink_stop(lp->phylink);
   1218	phylink_disconnect_phy(lp->phylink);
   1219
   1220	axienet_setoptions(ndev, lp->options &
   1221			   ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
   1222
   1223	axienet_dma_stop(lp);
   1224
   1225	axienet_iow(lp, XAE_IE_OFFSET, 0);
   1226
   1227	cancel_work_sync(&lp->dma_err_task);
   1228
   1229	if (lp->eth_irq > 0)
   1230		free_irq(lp->eth_irq, ndev);
   1231	free_irq(lp->tx_irq, ndev);
   1232	free_irq(lp->rx_irq, ndev);
   1233
   1234	axienet_dma_bd_release(ndev);
   1235	return 0;
   1236}
   1237
   1238/**
   1239 * axienet_change_mtu - Driver change mtu routine.
   1240 * @ndev:	Pointer to net_device structure
   1241 * @new_mtu:	New mtu value to be applied
   1242 *
   1243 * Return: Always returns 0 (success).
   1244 *
   1245 * This is the change mtu driver routine. It checks if the Axi Ethernet
   1246 * hardware supports jumbo frames before changing the mtu. This can be
   1247 * called only when the device is not up.
   1248 */
   1249static int axienet_change_mtu(struct net_device *ndev, int new_mtu)
   1250{
   1251	struct axienet_local *lp = netdev_priv(ndev);
   1252
   1253	if (netif_running(ndev))
   1254		return -EBUSY;
   1255
   1256	if ((new_mtu + VLAN_ETH_HLEN +
   1257		XAE_TRL_SIZE) > lp->rxmem)
   1258		return -EINVAL;
   1259
   1260	ndev->mtu = new_mtu;
   1261
   1262	return 0;
   1263}
   1264
   1265#ifdef CONFIG_NET_POLL_CONTROLLER
   1266/**
   1267 * axienet_poll_controller - Axi Ethernet poll mechanism.
   1268 * @ndev:	Pointer to net_device structure
   1269 *
   1270 * This implements Rx/Tx ISR poll mechanisms. The interrupts are disabled prior
   1271 * to polling the ISRs and are enabled back after the polling is done.
   1272 */
   1273static void axienet_poll_controller(struct net_device *ndev)
   1274{
   1275	struct axienet_local *lp = netdev_priv(ndev);
   1276	disable_irq(lp->tx_irq);
   1277	disable_irq(lp->rx_irq);
   1278	axienet_rx_irq(lp->tx_irq, ndev);
   1279	axienet_tx_irq(lp->rx_irq, ndev);
   1280	enable_irq(lp->tx_irq);
   1281	enable_irq(lp->rx_irq);
   1282}
   1283#endif
   1284
   1285static int axienet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
   1286{
   1287	struct axienet_local *lp = netdev_priv(dev);
   1288
   1289	if (!netif_running(dev))
   1290		return -EINVAL;
   1291
   1292	return phylink_mii_ioctl(lp->phylink, rq, cmd);
   1293}
   1294
   1295static const struct net_device_ops axienet_netdev_ops = {
   1296	.ndo_open = axienet_open,
   1297	.ndo_stop = axienet_stop,
   1298	.ndo_start_xmit = axienet_start_xmit,
   1299	.ndo_change_mtu	= axienet_change_mtu,
   1300	.ndo_set_mac_address = netdev_set_mac_address,
   1301	.ndo_validate_addr = eth_validate_addr,
   1302	.ndo_eth_ioctl = axienet_ioctl,
   1303	.ndo_set_rx_mode = axienet_set_multicast_list,
   1304#ifdef CONFIG_NET_POLL_CONTROLLER
   1305	.ndo_poll_controller = axienet_poll_controller,
   1306#endif
   1307};
   1308
   1309/**
   1310 * axienet_ethtools_get_drvinfo - Get various Axi Ethernet driver information.
   1311 * @ndev:	Pointer to net_device structure
   1312 * @ed:		Pointer to ethtool_drvinfo structure
   1313 *
   1314 * This implements ethtool command for getting the driver information.
   1315 * Issue "ethtool -i ethX" under linux prompt to execute this function.
   1316 */
   1317static void axienet_ethtools_get_drvinfo(struct net_device *ndev,
   1318					 struct ethtool_drvinfo *ed)
   1319{
   1320	strlcpy(ed->driver, DRIVER_NAME, sizeof(ed->driver));
   1321	strlcpy(ed->version, DRIVER_VERSION, sizeof(ed->version));
   1322}
   1323
   1324/**
   1325 * axienet_ethtools_get_regs_len - Get the total regs length present in the
   1326 *				   AxiEthernet core.
   1327 * @ndev:	Pointer to net_device structure
   1328 *
   1329 * This implements ethtool command for getting the total register length
   1330 * information.
   1331 *
   1332 * Return: the total regs length
   1333 */
   1334static int axienet_ethtools_get_regs_len(struct net_device *ndev)
   1335{
   1336	return sizeof(u32) * AXIENET_REGS_N;
   1337}
   1338
   1339/**
   1340 * axienet_ethtools_get_regs - Dump the contents of all registers present
   1341 *			       in AxiEthernet core.
   1342 * @ndev:	Pointer to net_device structure
   1343 * @regs:	Pointer to ethtool_regs structure
   1344 * @ret:	Void pointer used to return the contents of the registers.
   1345 *
   1346 * This implements ethtool command for getting the Axi Ethernet register dump.
   1347 * Issue "ethtool -d ethX" to execute this function.
   1348 */
   1349static void axienet_ethtools_get_regs(struct net_device *ndev,
   1350				      struct ethtool_regs *regs, void *ret)
   1351{
   1352	u32 *data = (u32 *) ret;
   1353	size_t len = sizeof(u32) * AXIENET_REGS_N;
   1354	struct axienet_local *lp = netdev_priv(ndev);
   1355
   1356	regs->version = 0;
   1357	regs->len = len;
   1358
   1359	memset(data, 0, len);
   1360	data[0] = axienet_ior(lp, XAE_RAF_OFFSET);
   1361	data[1] = axienet_ior(lp, XAE_TPF_OFFSET);
   1362	data[2] = axienet_ior(lp, XAE_IFGP_OFFSET);
   1363	data[3] = axienet_ior(lp, XAE_IS_OFFSET);
   1364	data[4] = axienet_ior(lp, XAE_IP_OFFSET);
   1365	data[5] = axienet_ior(lp, XAE_IE_OFFSET);
   1366	data[6] = axienet_ior(lp, XAE_TTAG_OFFSET);
   1367	data[7] = axienet_ior(lp, XAE_RTAG_OFFSET);
   1368	data[8] = axienet_ior(lp, XAE_UAWL_OFFSET);
   1369	data[9] = axienet_ior(lp, XAE_UAWU_OFFSET);
   1370	data[10] = axienet_ior(lp, XAE_TPID0_OFFSET);
   1371	data[11] = axienet_ior(lp, XAE_TPID1_OFFSET);
   1372	data[12] = axienet_ior(lp, XAE_PPST_OFFSET);
   1373	data[13] = axienet_ior(lp, XAE_RCW0_OFFSET);
   1374	data[14] = axienet_ior(lp, XAE_RCW1_OFFSET);
   1375	data[15] = axienet_ior(lp, XAE_TC_OFFSET);
   1376	data[16] = axienet_ior(lp, XAE_FCC_OFFSET);
   1377	data[17] = axienet_ior(lp, XAE_EMMC_OFFSET);
   1378	data[18] = axienet_ior(lp, XAE_PHYC_OFFSET);
   1379	data[19] = axienet_ior(lp, XAE_MDIO_MC_OFFSET);
   1380	data[20] = axienet_ior(lp, XAE_MDIO_MCR_OFFSET);
   1381	data[21] = axienet_ior(lp, XAE_MDIO_MWD_OFFSET);
   1382	data[22] = axienet_ior(lp, XAE_MDIO_MRD_OFFSET);
   1383	data[27] = axienet_ior(lp, XAE_UAW0_OFFSET);
   1384	data[28] = axienet_ior(lp, XAE_UAW1_OFFSET);
   1385	data[29] = axienet_ior(lp, XAE_FMI_OFFSET);
   1386	data[30] = axienet_ior(lp, XAE_AF0_OFFSET);
   1387	data[31] = axienet_ior(lp, XAE_AF1_OFFSET);
   1388	data[32] = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
   1389	data[33] = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET);
   1390	data[34] = axienet_dma_in32(lp, XAXIDMA_TX_CDESC_OFFSET);
   1391	data[35] = axienet_dma_in32(lp, XAXIDMA_TX_TDESC_OFFSET);
   1392	data[36] = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
   1393	data[37] = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET);
   1394	data[38] = axienet_dma_in32(lp, XAXIDMA_RX_CDESC_OFFSET);
   1395	data[39] = axienet_dma_in32(lp, XAXIDMA_RX_TDESC_OFFSET);
   1396}
   1397
   1398static void
   1399axienet_ethtools_get_ringparam(struct net_device *ndev,
   1400			       struct ethtool_ringparam *ering,
   1401			       struct kernel_ethtool_ringparam *kernel_ering,
   1402			       struct netlink_ext_ack *extack)
   1403{
   1404	struct axienet_local *lp = netdev_priv(ndev);
   1405
   1406	ering->rx_max_pending = RX_BD_NUM_MAX;
   1407	ering->rx_mini_max_pending = 0;
   1408	ering->rx_jumbo_max_pending = 0;
   1409	ering->tx_max_pending = TX_BD_NUM_MAX;
   1410	ering->rx_pending = lp->rx_bd_num;
   1411	ering->rx_mini_pending = 0;
   1412	ering->rx_jumbo_pending = 0;
   1413	ering->tx_pending = lp->tx_bd_num;
   1414}
   1415
   1416static int
   1417axienet_ethtools_set_ringparam(struct net_device *ndev,
   1418			       struct ethtool_ringparam *ering,
   1419			       struct kernel_ethtool_ringparam *kernel_ering,
   1420			       struct netlink_ext_ack *extack)
   1421{
   1422	struct axienet_local *lp = netdev_priv(ndev);
   1423
   1424	if (ering->rx_pending > RX_BD_NUM_MAX ||
   1425	    ering->rx_mini_pending ||
   1426	    ering->rx_jumbo_pending ||
   1427	    ering->tx_pending < TX_BD_NUM_MIN ||
   1428	    ering->tx_pending > TX_BD_NUM_MAX)
   1429		return -EINVAL;
   1430
   1431	if (netif_running(ndev))
   1432		return -EBUSY;
   1433
   1434	lp->rx_bd_num = ering->rx_pending;
   1435	lp->tx_bd_num = ering->tx_pending;
   1436	return 0;
   1437}
   1438
   1439/**
   1440 * axienet_ethtools_get_pauseparam - Get the pause parameter setting for
   1441 *				     Tx and Rx paths.
   1442 * @ndev:	Pointer to net_device structure
   1443 * @epauseparm:	Pointer to ethtool_pauseparam structure.
   1444 *
   1445 * This implements ethtool command for getting axi ethernet pause frame
   1446 * setting. Issue "ethtool -a ethX" to execute this function.
   1447 */
   1448static void
   1449axienet_ethtools_get_pauseparam(struct net_device *ndev,
   1450				struct ethtool_pauseparam *epauseparm)
   1451{
   1452	struct axienet_local *lp = netdev_priv(ndev);
   1453
   1454	phylink_ethtool_get_pauseparam(lp->phylink, epauseparm);
   1455}
   1456
   1457/**
   1458 * axienet_ethtools_set_pauseparam - Set device pause parameter(flow control)
   1459 *				     settings.
   1460 * @ndev:	Pointer to net_device structure
   1461 * @epauseparm:Pointer to ethtool_pauseparam structure
   1462 *
   1463 * This implements ethtool command for enabling flow control on Rx and Tx
   1464 * paths. Issue "ethtool -A ethX tx on|off" under linux prompt to execute this
   1465 * function.
   1466 *
   1467 * Return: 0 on success, -EFAULT if device is running
   1468 */
   1469static int
   1470axienet_ethtools_set_pauseparam(struct net_device *ndev,
   1471				struct ethtool_pauseparam *epauseparm)
   1472{
   1473	struct axienet_local *lp = netdev_priv(ndev);
   1474
   1475	return phylink_ethtool_set_pauseparam(lp->phylink, epauseparm);
   1476}
   1477
   1478/**
   1479 * axienet_ethtools_get_coalesce - Get DMA interrupt coalescing count.
   1480 * @ndev:	Pointer to net_device structure
   1481 * @ecoalesce:	Pointer to ethtool_coalesce structure
   1482 * @kernel_coal: ethtool CQE mode setting structure
   1483 * @extack:	extack for reporting error messages
   1484 *
   1485 * This implements ethtool command for getting the DMA interrupt coalescing
   1486 * count on Tx and Rx paths. Issue "ethtool -c ethX" under linux prompt to
   1487 * execute this function.
   1488 *
   1489 * Return: 0 always
   1490 */
   1491static int
   1492axienet_ethtools_get_coalesce(struct net_device *ndev,
   1493			      struct ethtool_coalesce *ecoalesce,
   1494			      struct kernel_ethtool_coalesce *kernel_coal,
   1495			      struct netlink_ext_ack *extack)
   1496{
   1497	struct axienet_local *lp = netdev_priv(ndev);
   1498
   1499	ecoalesce->rx_max_coalesced_frames = lp->coalesce_count_rx;
   1500	ecoalesce->rx_coalesce_usecs = lp->coalesce_usec_rx;
   1501	ecoalesce->tx_max_coalesced_frames = lp->coalesce_count_tx;
   1502	ecoalesce->tx_coalesce_usecs = lp->coalesce_usec_tx;
   1503	return 0;
   1504}
   1505
   1506/**
   1507 * axienet_ethtools_set_coalesce - Set DMA interrupt coalescing count.
   1508 * @ndev:	Pointer to net_device structure
   1509 * @ecoalesce:	Pointer to ethtool_coalesce structure
   1510 * @kernel_coal: ethtool CQE mode setting structure
   1511 * @extack:	extack for reporting error messages
   1512 *
   1513 * This implements ethtool command for setting the DMA interrupt coalescing
   1514 * count on Tx and Rx paths. Issue "ethtool -C ethX rx-frames 5" under linux
   1515 * prompt to execute this function.
   1516 *
   1517 * Return: 0, on success, Non-zero error value on failure.
   1518 */
   1519static int
   1520axienet_ethtools_set_coalesce(struct net_device *ndev,
   1521			      struct ethtool_coalesce *ecoalesce,
   1522			      struct kernel_ethtool_coalesce *kernel_coal,
   1523			      struct netlink_ext_ack *extack)
   1524{
   1525	struct axienet_local *lp = netdev_priv(ndev);
   1526
   1527	if (netif_running(ndev)) {
   1528		netdev_err(ndev,
   1529			   "Please stop netif before applying configuration\n");
   1530		return -EFAULT;
   1531	}
   1532
   1533	if (ecoalesce->rx_max_coalesced_frames)
   1534		lp->coalesce_count_rx = ecoalesce->rx_max_coalesced_frames;
   1535	if (ecoalesce->rx_coalesce_usecs)
   1536		lp->coalesce_usec_rx = ecoalesce->rx_coalesce_usecs;
   1537	if (ecoalesce->tx_max_coalesced_frames)
   1538		lp->coalesce_count_tx = ecoalesce->tx_max_coalesced_frames;
   1539	if (ecoalesce->tx_coalesce_usecs)
   1540		lp->coalesce_usec_tx = ecoalesce->tx_coalesce_usecs;
   1541
   1542	return 0;
   1543}
   1544
   1545static int
   1546axienet_ethtools_get_link_ksettings(struct net_device *ndev,
   1547				    struct ethtool_link_ksettings *cmd)
   1548{
   1549	struct axienet_local *lp = netdev_priv(ndev);
   1550
   1551	return phylink_ethtool_ksettings_get(lp->phylink, cmd);
   1552}
   1553
   1554static int
   1555axienet_ethtools_set_link_ksettings(struct net_device *ndev,
   1556				    const struct ethtool_link_ksettings *cmd)
   1557{
   1558	struct axienet_local *lp = netdev_priv(ndev);
   1559
   1560	return phylink_ethtool_ksettings_set(lp->phylink, cmd);
   1561}
   1562
   1563static int axienet_ethtools_nway_reset(struct net_device *dev)
   1564{
   1565	struct axienet_local *lp = netdev_priv(dev);
   1566
   1567	return phylink_ethtool_nway_reset(lp->phylink);
   1568}
   1569
   1570static const struct ethtool_ops axienet_ethtool_ops = {
   1571	.supported_coalesce_params = ETHTOOL_COALESCE_MAX_FRAMES |
   1572				     ETHTOOL_COALESCE_USECS,
   1573	.get_drvinfo    = axienet_ethtools_get_drvinfo,
   1574	.get_regs_len   = axienet_ethtools_get_regs_len,
   1575	.get_regs       = axienet_ethtools_get_regs,
   1576	.get_link       = ethtool_op_get_link,
   1577	.get_ringparam	= axienet_ethtools_get_ringparam,
   1578	.set_ringparam	= axienet_ethtools_set_ringparam,
   1579	.get_pauseparam = axienet_ethtools_get_pauseparam,
   1580	.set_pauseparam = axienet_ethtools_set_pauseparam,
   1581	.get_coalesce   = axienet_ethtools_get_coalesce,
   1582	.set_coalesce   = axienet_ethtools_set_coalesce,
   1583	.get_link_ksettings = axienet_ethtools_get_link_ksettings,
   1584	.set_link_ksettings = axienet_ethtools_set_link_ksettings,
   1585	.nway_reset	= axienet_ethtools_nway_reset,
   1586};
   1587
   1588static struct axienet_local *pcs_to_axienet_local(struct phylink_pcs *pcs)
   1589{
   1590	return container_of(pcs, struct axienet_local, pcs);
   1591}
   1592
   1593static void axienet_pcs_get_state(struct phylink_pcs *pcs,
   1594				  struct phylink_link_state *state)
   1595{
   1596	struct mdio_device *pcs_phy = pcs_to_axienet_local(pcs)->pcs_phy;
   1597
   1598	phylink_mii_c22_pcs_get_state(pcs_phy, state);
   1599}
   1600
   1601static void axienet_pcs_an_restart(struct phylink_pcs *pcs)
   1602{
   1603	struct mdio_device *pcs_phy = pcs_to_axienet_local(pcs)->pcs_phy;
   1604
   1605	phylink_mii_c22_pcs_an_restart(pcs_phy);
   1606}
   1607
   1608static int axienet_pcs_config(struct phylink_pcs *pcs, unsigned int mode,
   1609			      phy_interface_t interface,
   1610			      const unsigned long *advertising,
   1611			      bool permit_pause_to_mac)
   1612{
   1613	struct mdio_device *pcs_phy = pcs_to_axienet_local(pcs)->pcs_phy;
   1614	struct net_device *ndev = pcs_to_axienet_local(pcs)->ndev;
   1615	struct axienet_local *lp = netdev_priv(ndev);
   1616	int ret;
   1617
   1618	if (lp->switch_x_sgmii) {
   1619		ret = mdiodev_write(pcs_phy, XLNX_MII_STD_SELECT_REG,
   1620				    interface == PHY_INTERFACE_MODE_SGMII ?
   1621					XLNX_MII_STD_SELECT_SGMII : 0);
   1622		if (ret < 0) {
   1623			netdev_warn(ndev,
   1624				    "Failed to switch PHY interface: %d\n",
   1625				    ret);
   1626			return ret;
   1627		}
   1628	}
   1629
   1630	ret = phylink_mii_c22_pcs_config(pcs_phy, mode, interface, advertising);
   1631	if (ret < 0)
   1632		netdev_warn(ndev, "Failed to configure PCS: %d\n", ret);
   1633
   1634	return ret;
   1635}
   1636
   1637static const struct phylink_pcs_ops axienet_pcs_ops = {
   1638	.pcs_get_state = axienet_pcs_get_state,
   1639	.pcs_config = axienet_pcs_config,
   1640	.pcs_an_restart = axienet_pcs_an_restart,
   1641};
   1642
   1643static struct phylink_pcs *axienet_mac_select_pcs(struct phylink_config *config,
   1644						  phy_interface_t interface)
   1645{
   1646	struct net_device *ndev = to_net_dev(config->dev);
   1647	struct axienet_local *lp = netdev_priv(ndev);
   1648
   1649	if (interface == PHY_INTERFACE_MODE_1000BASEX ||
   1650	    interface ==  PHY_INTERFACE_MODE_SGMII)
   1651		return &lp->pcs;
   1652
   1653	return NULL;
   1654}
   1655
   1656static void axienet_mac_config(struct phylink_config *config, unsigned int mode,
   1657			       const struct phylink_link_state *state)
   1658{
   1659	/* nothing meaningful to do */
   1660}
   1661
   1662static void axienet_mac_link_down(struct phylink_config *config,
   1663				  unsigned int mode,
   1664				  phy_interface_t interface)
   1665{
   1666	/* nothing meaningful to do */
   1667}
   1668
   1669static void axienet_mac_link_up(struct phylink_config *config,
   1670				struct phy_device *phy,
   1671				unsigned int mode, phy_interface_t interface,
   1672				int speed, int duplex,
   1673				bool tx_pause, bool rx_pause)
   1674{
   1675	struct net_device *ndev = to_net_dev(config->dev);
   1676	struct axienet_local *lp = netdev_priv(ndev);
   1677	u32 emmc_reg, fcc_reg;
   1678
   1679	emmc_reg = axienet_ior(lp, XAE_EMMC_OFFSET);
   1680	emmc_reg &= ~XAE_EMMC_LINKSPEED_MASK;
   1681
   1682	switch (speed) {
   1683	case SPEED_1000:
   1684		emmc_reg |= XAE_EMMC_LINKSPD_1000;
   1685		break;
   1686	case SPEED_100:
   1687		emmc_reg |= XAE_EMMC_LINKSPD_100;
   1688		break;
   1689	case SPEED_10:
   1690		emmc_reg |= XAE_EMMC_LINKSPD_10;
   1691		break;
   1692	default:
   1693		dev_err(&ndev->dev,
   1694			"Speed other than 10, 100 or 1Gbps is not supported\n");
   1695		break;
   1696	}
   1697
   1698	axienet_iow(lp, XAE_EMMC_OFFSET, emmc_reg);
   1699
   1700	fcc_reg = axienet_ior(lp, XAE_FCC_OFFSET);
   1701	if (tx_pause)
   1702		fcc_reg |= XAE_FCC_FCTX_MASK;
   1703	else
   1704		fcc_reg &= ~XAE_FCC_FCTX_MASK;
   1705	if (rx_pause)
   1706		fcc_reg |= XAE_FCC_FCRX_MASK;
   1707	else
   1708		fcc_reg &= ~XAE_FCC_FCRX_MASK;
   1709	axienet_iow(lp, XAE_FCC_OFFSET, fcc_reg);
   1710}
   1711
   1712static const struct phylink_mac_ops axienet_phylink_ops = {
   1713	.validate = phylink_generic_validate,
   1714	.mac_select_pcs = axienet_mac_select_pcs,
   1715	.mac_config = axienet_mac_config,
   1716	.mac_link_down = axienet_mac_link_down,
   1717	.mac_link_up = axienet_mac_link_up,
   1718};
   1719
   1720/**
   1721 * axienet_dma_err_handler - Work queue task for Axi DMA Error
   1722 * @work:	pointer to work_struct
   1723 *
   1724 * Resets the Axi DMA and Axi Ethernet devices, and reconfigures the
   1725 * Tx/Rx BDs.
   1726 */
   1727static void axienet_dma_err_handler(struct work_struct *work)
   1728{
   1729	u32 i;
   1730	u32 axienet_status;
   1731	struct axidma_bd *cur_p;
   1732	struct axienet_local *lp = container_of(work, struct axienet_local,
   1733						dma_err_task);
   1734	struct net_device *ndev = lp->ndev;
   1735
   1736	napi_disable(&lp->napi_tx);
   1737	napi_disable(&lp->napi_rx);
   1738
   1739	axienet_setoptions(ndev, lp->options &
   1740			   ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
   1741
   1742	axienet_dma_stop(lp);
   1743
   1744	for (i = 0; i < lp->tx_bd_num; i++) {
   1745		cur_p = &lp->tx_bd_v[i];
   1746		if (cur_p->cntrl) {
   1747			dma_addr_t addr = desc_get_phys_addr(lp, cur_p);
   1748
   1749			dma_unmap_single(lp->dev, addr,
   1750					 (cur_p->cntrl &
   1751					  XAXIDMA_BD_CTRL_LENGTH_MASK),
   1752					 DMA_TO_DEVICE);
   1753		}
   1754		if (cur_p->skb)
   1755			dev_kfree_skb_irq(cur_p->skb);
   1756		cur_p->phys = 0;
   1757		cur_p->phys_msb = 0;
   1758		cur_p->cntrl = 0;
   1759		cur_p->status = 0;
   1760		cur_p->app0 = 0;
   1761		cur_p->app1 = 0;
   1762		cur_p->app2 = 0;
   1763		cur_p->app3 = 0;
   1764		cur_p->app4 = 0;
   1765		cur_p->skb = NULL;
   1766	}
   1767
   1768	for (i = 0; i < lp->rx_bd_num; i++) {
   1769		cur_p = &lp->rx_bd_v[i];
   1770		cur_p->status = 0;
   1771		cur_p->app0 = 0;
   1772		cur_p->app1 = 0;
   1773		cur_p->app2 = 0;
   1774		cur_p->app3 = 0;
   1775		cur_p->app4 = 0;
   1776	}
   1777
   1778	lp->tx_bd_ci = 0;
   1779	lp->tx_bd_tail = 0;
   1780	lp->rx_bd_ci = 0;
   1781
   1782	axienet_dma_start(lp);
   1783
   1784	axienet_status = axienet_ior(lp, XAE_RCW1_OFFSET);
   1785	axienet_status &= ~XAE_RCW1_RX_MASK;
   1786	axienet_iow(lp, XAE_RCW1_OFFSET, axienet_status);
   1787
   1788	axienet_status = axienet_ior(lp, XAE_IP_OFFSET);
   1789	if (axienet_status & XAE_INT_RXRJECT_MASK)
   1790		axienet_iow(lp, XAE_IS_OFFSET, XAE_INT_RXRJECT_MASK);
   1791	axienet_iow(lp, XAE_IE_OFFSET, lp->eth_irq > 0 ?
   1792		    XAE_INT_RECV_ERROR_MASK : 0);
   1793	axienet_iow(lp, XAE_FCC_OFFSET, XAE_FCC_FCRX_MASK);
   1794
   1795	/* Sync default options with HW but leave receiver and
   1796	 * transmitter disabled.
   1797	 */
   1798	axienet_setoptions(ndev, lp->options &
   1799			   ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
   1800	axienet_set_mac_address(ndev, NULL);
   1801	axienet_set_multicast_list(ndev);
   1802	axienet_setoptions(ndev, lp->options);
   1803	napi_enable(&lp->napi_rx);
   1804	napi_enable(&lp->napi_tx);
   1805}
   1806
   1807/**
   1808 * axienet_probe - Axi Ethernet probe function.
   1809 * @pdev:	Pointer to platform device structure.
   1810 *
   1811 * Return: 0, on success
   1812 *	    Non-zero error value on failure.
   1813 *
   1814 * This is the probe routine for Axi Ethernet driver. This is called before
   1815 * any other driver routines are invoked. It allocates and sets up the Ethernet
   1816 * device. Parses through device tree and populates fields of
   1817 * axienet_local. It registers the Ethernet device.
   1818 */
   1819static int axienet_probe(struct platform_device *pdev)
   1820{
   1821	int ret;
   1822	struct device_node *np;
   1823	struct axienet_local *lp;
   1824	struct net_device *ndev;
   1825	struct resource *ethres;
   1826	u8 mac_addr[ETH_ALEN];
   1827	int addr_width = 32;
   1828	u32 value;
   1829
   1830	ndev = alloc_etherdev(sizeof(*lp));
   1831	if (!ndev)
   1832		return -ENOMEM;
   1833
   1834	platform_set_drvdata(pdev, ndev);
   1835
   1836	SET_NETDEV_DEV(ndev, &pdev->dev);
   1837	ndev->flags &= ~IFF_MULTICAST;  /* clear multicast */
   1838	ndev->features = NETIF_F_SG;
   1839	ndev->netdev_ops = &axienet_netdev_ops;
   1840	ndev->ethtool_ops = &axienet_ethtool_ops;
   1841
   1842	/* MTU range: 64 - 9000 */
   1843	ndev->min_mtu = 64;
   1844	ndev->max_mtu = XAE_JUMBO_MTU;
   1845
   1846	lp = netdev_priv(ndev);
   1847	lp->ndev = ndev;
   1848	lp->dev = &pdev->dev;
   1849	lp->options = XAE_OPTION_DEFAULTS;
   1850	lp->rx_bd_num = RX_BD_NUM_DEFAULT;
   1851	lp->tx_bd_num = TX_BD_NUM_DEFAULT;
   1852
   1853	netif_napi_add(ndev, &lp->napi_rx, axienet_rx_poll, NAPI_POLL_WEIGHT);
   1854	netif_napi_add(ndev, &lp->napi_tx, axienet_tx_poll, NAPI_POLL_WEIGHT);
   1855
   1856	lp->axi_clk = devm_clk_get_optional(&pdev->dev, "s_axi_lite_clk");
   1857	if (!lp->axi_clk) {
   1858		/* For backward compatibility, if named AXI clock is not present,
   1859		 * treat the first clock specified as the AXI clock.
   1860		 */
   1861		lp->axi_clk = devm_clk_get_optional(&pdev->dev, NULL);
   1862	}
   1863	if (IS_ERR(lp->axi_clk)) {
   1864		ret = PTR_ERR(lp->axi_clk);
   1865		goto free_netdev;
   1866	}
   1867	ret = clk_prepare_enable(lp->axi_clk);
   1868	if (ret) {
   1869		dev_err(&pdev->dev, "Unable to enable AXI clock: %d\n", ret);
   1870		goto free_netdev;
   1871	}
   1872
   1873	lp->misc_clks[0].id = "axis_clk";
   1874	lp->misc_clks[1].id = "ref_clk";
   1875	lp->misc_clks[2].id = "mgt_clk";
   1876
   1877	ret = devm_clk_bulk_get_optional(&pdev->dev, XAE_NUM_MISC_CLOCKS, lp->misc_clks);
   1878	if (ret)
   1879		goto cleanup_clk;
   1880
   1881	ret = clk_bulk_prepare_enable(XAE_NUM_MISC_CLOCKS, lp->misc_clks);
   1882	if (ret)
   1883		goto cleanup_clk;
   1884
   1885	/* Map device registers */
   1886	lp->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &ethres);
   1887	if (IS_ERR(lp->regs)) {
   1888		ret = PTR_ERR(lp->regs);
   1889		goto cleanup_clk;
   1890	}
   1891	lp->regs_start = ethres->start;
   1892
   1893	/* Setup checksum offload, but default to off if not specified */
   1894	lp->features = 0;
   1895
   1896	ret = of_property_read_u32(pdev->dev.of_node, "xlnx,txcsum", &value);
   1897	if (!ret) {
   1898		switch (value) {
   1899		case 1:
   1900			lp->csum_offload_on_tx_path =
   1901				XAE_FEATURE_PARTIAL_TX_CSUM;
   1902			lp->features |= XAE_FEATURE_PARTIAL_TX_CSUM;
   1903			/* Can checksum TCP/UDP over IPv4. */
   1904			ndev->features |= NETIF_F_IP_CSUM;
   1905			break;
   1906		case 2:
   1907			lp->csum_offload_on_tx_path =
   1908				XAE_FEATURE_FULL_TX_CSUM;
   1909			lp->features |= XAE_FEATURE_FULL_TX_CSUM;
   1910			/* Can checksum TCP/UDP over IPv4. */
   1911			ndev->features |= NETIF_F_IP_CSUM;
   1912			break;
   1913		default:
   1914			lp->csum_offload_on_tx_path = XAE_NO_CSUM_OFFLOAD;
   1915		}
   1916	}
   1917	ret = of_property_read_u32(pdev->dev.of_node, "xlnx,rxcsum", &value);
   1918	if (!ret) {
   1919		switch (value) {
   1920		case 1:
   1921			lp->csum_offload_on_rx_path =
   1922				XAE_FEATURE_PARTIAL_RX_CSUM;
   1923			lp->features |= XAE_FEATURE_PARTIAL_RX_CSUM;
   1924			break;
   1925		case 2:
   1926			lp->csum_offload_on_rx_path =
   1927				XAE_FEATURE_FULL_RX_CSUM;
   1928			lp->features |= XAE_FEATURE_FULL_RX_CSUM;
   1929			break;
   1930		default:
   1931			lp->csum_offload_on_rx_path = XAE_NO_CSUM_OFFLOAD;
   1932		}
   1933	}
   1934	/* For supporting jumbo frames, the Axi Ethernet hardware must have
   1935	 * a larger Rx/Tx Memory. Typically, the size must be large so that
   1936	 * we can enable jumbo option and start supporting jumbo frames.
   1937	 * Here we check for memory allocated for Rx/Tx in the hardware from
   1938	 * the device-tree and accordingly set flags.
   1939	 */
   1940	of_property_read_u32(pdev->dev.of_node, "xlnx,rxmem", &lp->rxmem);
   1941
   1942	lp->switch_x_sgmii = of_property_read_bool(pdev->dev.of_node,
   1943						   "xlnx,switch-x-sgmii");
   1944
   1945	/* Start with the proprietary, and broken phy_type */
   1946	ret = of_property_read_u32(pdev->dev.of_node, "xlnx,phy-type", &value);
   1947	if (!ret) {
   1948		netdev_warn(ndev, "Please upgrade your device tree binary blob to use phy-mode");
   1949		switch (value) {
   1950		case XAE_PHY_TYPE_MII:
   1951			lp->phy_mode = PHY_INTERFACE_MODE_MII;
   1952			break;
   1953		case XAE_PHY_TYPE_GMII:
   1954			lp->phy_mode = PHY_INTERFACE_MODE_GMII;
   1955			break;
   1956		case XAE_PHY_TYPE_RGMII_2_0:
   1957			lp->phy_mode = PHY_INTERFACE_MODE_RGMII_ID;
   1958			break;
   1959		case XAE_PHY_TYPE_SGMII:
   1960			lp->phy_mode = PHY_INTERFACE_MODE_SGMII;
   1961			break;
   1962		case XAE_PHY_TYPE_1000BASE_X:
   1963			lp->phy_mode = PHY_INTERFACE_MODE_1000BASEX;
   1964			break;
   1965		default:
   1966			ret = -EINVAL;
   1967			goto cleanup_clk;
   1968		}
   1969	} else {
   1970		ret = of_get_phy_mode(pdev->dev.of_node, &lp->phy_mode);
   1971		if (ret)
   1972			goto cleanup_clk;
   1973	}
   1974	if (lp->switch_x_sgmii && lp->phy_mode != PHY_INTERFACE_MODE_SGMII &&
   1975	    lp->phy_mode != PHY_INTERFACE_MODE_1000BASEX) {
   1976		dev_err(&pdev->dev, "xlnx,switch-x-sgmii only supported with SGMII or 1000BaseX\n");
   1977		ret = -EINVAL;
   1978		goto cleanup_clk;
   1979	}
   1980
   1981	/* Find the DMA node, map the DMA registers, and decode the DMA IRQs */
   1982	np = of_parse_phandle(pdev->dev.of_node, "axistream-connected", 0);
   1983	if (np) {
   1984		struct resource dmares;
   1985
   1986		ret = of_address_to_resource(np, 0, &dmares);
   1987		if (ret) {
   1988			dev_err(&pdev->dev,
   1989				"unable to get DMA resource\n");
   1990			of_node_put(np);
   1991			goto cleanup_clk;
   1992		}
   1993		lp->dma_regs = devm_ioremap_resource(&pdev->dev,
   1994						     &dmares);
   1995		lp->rx_irq = irq_of_parse_and_map(np, 1);
   1996		lp->tx_irq = irq_of_parse_and_map(np, 0);
   1997		of_node_put(np);
   1998		lp->eth_irq = platform_get_irq_optional(pdev, 0);
   1999	} else {
   2000		/* Check for these resources directly on the Ethernet node. */
   2001		lp->dma_regs = devm_platform_get_and_ioremap_resource(pdev, 1, NULL);
   2002		lp->rx_irq = platform_get_irq(pdev, 1);
   2003		lp->tx_irq = platform_get_irq(pdev, 0);
   2004		lp->eth_irq = platform_get_irq_optional(pdev, 2);
   2005	}
   2006	if (IS_ERR(lp->dma_regs)) {
   2007		dev_err(&pdev->dev, "could not map DMA regs\n");
   2008		ret = PTR_ERR(lp->dma_regs);
   2009		goto cleanup_clk;
   2010	}
   2011	if ((lp->rx_irq <= 0) || (lp->tx_irq <= 0)) {
   2012		dev_err(&pdev->dev, "could not determine irqs\n");
   2013		ret = -ENOMEM;
   2014		goto cleanup_clk;
   2015	}
   2016
   2017	/* Autodetect the need for 64-bit DMA pointers.
   2018	 * When the IP is configured for a bus width bigger than 32 bits,
   2019	 * writing the MSB registers is mandatory, even if they are all 0.
   2020	 * We can detect this case by writing all 1's to one such register
   2021	 * and see if that sticks: when the IP is configured for 32 bits
   2022	 * only, those registers are RES0.
   2023	 * Those MSB registers were introduced in IP v7.1, which we check first.
   2024	 */
   2025	if ((axienet_ior(lp, XAE_ID_OFFSET) >> 24) >= 0x9) {
   2026		void __iomem *desc = lp->dma_regs + XAXIDMA_TX_CDESC_OFFSET + 4;
   2027
   2028		iowrite32(0x0, desc);
   2029		if (ioread32(desc) == 0) {	/* sanity check */
   2030			iowrite32(0xffffffff, desc);
   2031			if (ioread32(desc) > 0) {
   2032				lp->features |= XAE_FEATURE_DMA_64BIT;
   2033				addr_width = 64;
   2034				dev_info(&pdev->dev,
   2035					 "autodetected 64-bit DMA range\n");
   2036			}
   2037			iowrite32(0x0, desc);
   2038		}
   2039	}
   2040	if (!IS_ENABLED(CONFIG_64BIT) && lp->features & XAE_FEATURE_DMA_64BIT) {
   2041		dev_err(&pdev->dev, "64-bit addressable DMA is not compatible with 32-bit archecture\n");
   2042		ret = -EINVAL;
   2043		goto cleanup_clk;
   2044	}
   2045
   2046	ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(addr_width));
   2047	if (ret) {
   2048		dev_err(&pdev->dev, "No suitable DMA available\n");
   2049		goto cleanup_clk;
   2050	}
   2051
   2052	/* Check for Ethernet core IRQ (optional) */
   2053	if (lp->eth_irq <= 0)
   2054		dev_info(&pdev->dev, "Ethernet core IRQ not defined\n");
   2055
   2056	/* Retrieve the MAC address */
   2057	ret = of_get_mac_address(pdev->dev.of_node, mac_addr);
   2058	if (!ret) {
   2059		axienet_set_mac_address(ndev, mac_addr);
   2060	} else {
   2061		dev_warn(&pdev->dev, "could not find MAC address property: %d\n",
   2062			 ret);
   2063		axienet_set_mac_address(ndev, NULL);
   2064	}
   2065
   2066	lp->coalesce_count_rx = XAXIDMA_DFT_RX_THRESHOLD;
   2067	lp->coalesce_usec_rx = XAXIDMA_DFT_RX_USEC;
   2068	lp->coalesce_count_tx = XAXIDMA_DFT_TX_THRESHOLD;
   2069	lp->coalesce_usec_tx = XAXIDMA_DFT_TX_USEC;
   2070
   2071	/* Reset core now that clocks are enabled, prior to accessing MDIO */
   2072	ret = __axienet_device_reset(lp);
   2073	if (ret)
   2074		goto cleanup_clk;
   2075
   2076	ret = axienet_mdio_setup(lp);
   2077	if (ret)
   2078		dev_warn(&pdev->dev,
   2079			 "error registering MDIO bus: %d\n", ret);
   2080
   2081	if (lp->phy_mode == PHY_INTERFACE_MODE_SGMII ||
   2082	    lp->phy_mode == PHY_INTERFACE_MODE_1000BASEX) {
   2083		np = of_parse_phandle(pdev->dev.of_node, "pcs-handle", 0);
   2084		if (!np) {
   2085			/* Deprecated: Always use "pcs-handle" for pcs_phy.
   2086			 * Falling back to "phy-handle" here is only for
   2087			 * backward compatibility with old device trees.
   2088			 */
   2089			np = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0);
   2090		}
   2091		if (!np) {
   2092			dev_err(&pdev->dev, "pcs-handle (preferred) or phy-handle required for 1000BaseX/SGMII\n");
   2093			ret = -EINVAL;
   2094			goto cleanup_mdio;
   2095		}
   2096		lp->pcs_phy = of_mdio_find_device(np);
   2097		if (!lp->pcs_phy) {
   2098			ret = -EPROBE_DEFER;
   2099			of_node_put(np);
   2100			goto cleanup_mdio;
   2101		}
   2102		of_node_put(np);
   2103		lp->pcs.ops = &axienet_pcs_ops;
   2104		lp->pcs.poll = true;
   2105	}
   2106
   2107	lp->phylink_config.dev = &ndev->dev;
   2108	lp->phylink_config.type = PHYLINK_NETDEV;
   2109	lp->phylink_config.mac_capabilities = MAC_SYM_PAUSE | MAC_ASYM_PAUSE |
   2110		MAC_10FD | MAC_100FD | MAC_1000FD;
   2111
   2112	__set_bit(lp->phy_mode, lp->phylink_config.supported_interfaces);
   2113	if (lp->switch_x_sgmii) {
   2114		__set_bit(PHY_INTERFACE_MODE_1000BASEX,
   2115			  lp->phylink_config.supported_interfaces);
   2116		__set_bit(PHY_INTERFACE_MODE_SGMII,
   2117			  lp->phylink_config.supported_interfaces);
   2118	}
   2119
   2120	lp->phylink = phylink_create(&lp->phylink_config, pdev->dev.fwnode,
   2121				     lp->phy_mode,
   2122				     &axienet_phylink_ops);
   2123	if (IS_ERR(lp->phylink)) {
   2124		ret = PTR_ERR(lp->phylink);
   2125		dev_err(&pdev->dev, "phylink_create error (%i)\n", ret);
   2126		goto cleanup_mdio;
   2127	}
   2128
   2129	ret = register_netdev(lp->ndev);
   2130	if (ret) {
   2131		dev_err(lp->dev, "register_netdev() error (%i)\n", ret);
   2132		goto cleanup_phylink;
   2133	}
   2134
   2135	return 0;
   2136
   2137cleanup_phylink:
   2138	phylink_destroy(lp->phylink);
   2139
   2140cleanup_mdio:
   2141	if (lp->pcs_phy)
   2142		put_device(&lp->pcs_phy->dev);
   2143	if (lp->mii_bus)
   2144		axienet_mdio_teardown(lp);
   2145cleanup_clk:
   2146	clk_bulk_disable_unprepare(XAE_NUM_MISC_CLOCKS, lp->misc_clks);
   2147	clk_disable_unprepare(lp->axi_clk);
   2148
   2149free_netdev:
   2150	free_netdev(ndev);
   2151
   2152	return ret;
   2153}
   2154
   2155static int axienet_remove(struct platform_device *pdev)
   2156{
   2157	struct net_device *ndev = platform_get_drvdata(pdev);
   2158	struct axienet_local *lp = netdev_priv(ndev);
   2159
   2160	unregister_netdev(ndev);
   2161
   2162	if (lp->phylink)
   2163		phylink_destroy(lp->phylink);
   2164
   2165	if (lp->pcs_phy)
   2166		put_device(&lp->pcs_phy->dev);
   2167
   2168	axienet_mdio_teardown(lp);
   2169
   2170	clk_bulk_disable_unprepare(XAE_NUM_MISC_CLOCKS, lp->misc_clks);
   2171	clk_disable_unprepare(lp->axi_clk);
   2172
   2173	free_netdev(ndev);
   2174
   2175	return 0;
   2176}
   2177
   2178static void axienet_shutdown(struct platform_device *pdev)
   2179{
   2180	struct net_device *ndev = platform_get_drvdata(pdev);
   2181
   2182	rtnl_lock();
   2183	netif_device_detach(ndev);
   2184
   2185	if (netif_running(ndev))
   2186		dev_close(ndev);
   2187
   2188	rtnl_unlock();
   2189}
   2190
   2191static struct platform_driver axienet_driver = {
   2192	.probe = axienet_probe,
   2193	.remove = axienet_remove,
   2194	.shutdown = axienet_shutdown,
   2195	.driver = {
   2196		 .name = "xilinx_axienet",
   2197		 .of_match_table = axienet_of_match,
   2198	},
   2199};
   2200
   2201module_platform_driver(axienet_driver);
   2202
   2203MODULE_DESCRIPTION("Xilinx Axi Ethernet driver");
   2204MODULE_AUTHOR("Xilinx");
   2205MODULE_LICENSE("GPL");