cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

fs_enet-main.c (26256B)


      1/*
      2 * Combined Ethernet driver for Motorola MPC8xx and MPC82xx.
      3 *
      4 * Copyright (c) 2003 Intracom S.A.
      5 *  by Pantelis Antoniou <panto@intracom.gr>
      6 *
      7 * 2005 (c) MontaVista Software, Inc.
      8 * Vitaly Bordug <vbordug@ru.mvista.com>
      9 *
     10 * Heavily based on original FEC driver by Dan Malek <dan@embeddededge.com>
     11 * and modifications by Joakim Tjernlund <joakim.tjernlund@lumentis.se>
     12 *
     13 * This file is licensed under the terms of the GNU General Public License
     14 * version 2. This program is licensed "as is" without any warranty of any
     15 * kind, whether express or implied.
     16 */
     17
     18#include <linux/module.h>
     19#include <linux/kernel.h>
     20#include <linux/types.h>
     21#include <linux/string.h>
     22#include <linux/ptrace.h>
     23#include <linux/errno.h>
     24#include <linux/ioport.h>
     25#include <linux/slab.h>
     26#include <linux/interrupt.h>
     27#include <linux/delay.h>
     28#include <linux/netdevice.h>
     29#include <linux/etherdevice.h>
     30#include <linux/skbuff.h>
     31#include <linux/spinlock.h>
     32#include <linux/mii.h>
     33#include <linux/ethtool.h>
     34#include <linux/bitops.h>
     35#include <linux/fs.h>
     36#include <linux/platform_device.h>
     37#include <linux/phy.h>
     38#include <linux/of.h>
     39#include <linux/of_mdio.h>
     40#include <linux/of_platform.h>
     41#include <linux/of_gpio.h>
     42#include <linux/of_net.h>
     43#include <linux/pgtable.h>
     44
     45#include <linux/vmalloc.h>
     46#include <asm/irq.h>
     47#include <linux/uaccess.h>
     48
     49#include "fs_enet.h"
     50
     51/*************************************************/
     52
     53MODULE_AUTHOR("Pantelis Antoniou <panto@intracom.gr>");
     54MODULE_DESCRIPTION("Freescale Ethernet Driver");
     55MODULE_LICENSE("GPL");
     56
     57static int fs_enet_debug = -1; /* -1 == use FS_ENET_DEF_MSG_ENABLE as value */
     58module_param(fs_enet_debug, int, 0);
     59MODULE_PARM_DESC(fs_enet_debug,
     60		 "Freescale bitmapped debugging message enable value");
     61
     62#define RX_RING_SIZE	32
     63#define TX_RING_SIZE	64
     64
     65#ifdef CONFIG_NET_POLL_CONTROLLER
     66static void fs_enet_netpoll(struct net_device *dev);
     67#endif
     68
     69static void fs_set_multicast_list(struct net_device *dev)
     70{
     71	struct fs_enet_private *fep = netdev_priv(dev);
     72
     73	(*fep->ops->set_multicast_list)(dev);
     74}
     75
     76static void skb_align(struct sk_buff *skb, int align)
     77{
     78	int off = ((unsigned long)skb->data) & (align - 1);
     79
     80	if (off)
     81		skb_reserve(skb, align - off);
     82}
     83
     84/* NAPI function */
     85static int fs_enet_napi(struct napi_struct *napi, int budget)
     86{
     87	struct fs_enet_private *fep = container_of(napi, struct fs_enet_private, napi);
     88	struct net_device *dev = fep->ndev;
     89	const struct fs_platform_info *fpi = fep->fpi;
     90	cbd_t __iomem *bdp;
     91	struct sk_buff *skb, *skbn;
     92	int received = 0;
     93	u16 pkt_len, sc;
     94	int curidx;
     95	int dirtyidx, do_wake, do_restart;
     96	int tx_left = TX_RING_SIZE;
     97
     98	spin_lock(&fep->tx_lock);
     99	bdp = fep->dirty_tx;
    100
    101	/* clear status bits for napi*/
    102	(*fep->ops->napi_clear_event)(dev);
    103
    104	do_wake = do_restart = 0;
    105	while (((sc = CBDR_SC(bdp)) & BD_ENET_TX_READY) == 0 && tx_left) {
    106		dirtyidx = bdp - fep->tx_bd_base;
    107
    108		if (fep->tx_free == fep->tx_ring)
    109			break;
    110
    111		skb = fep->tx_skbuff[dirtyidx];
    112
    113		/*
    114		 * Check for errors.
    115		 */
    116		if (sc & (BD_ENET_TX_HB | BD_ENET_TX_LC |
    117			  BD_ENET_TX_RL | BD_ENET_TX_UN | BD_ENET_TX_CSL)) {
    118
    119			if (sc & BD_ENET_TX_HB)	/* No heartbeat */
    120				dev->stats.tx_heartbeat_errors++;
    121			if (sc & BD_ENET_TX_LC)	/* Late collision */
    122				dev->stats.tx_window_errors++;
    123			if (sc & BD_ENET_TX_RL)	/* Retrans limit */
    124				dev->stats.tx_aborted_errors++;
    125			if (sc & BD_ENET_TX_UN)	/* Underrun */
    126				dev->stats.tx_fifo_errors++;
    127			if (sc & BD_ENET_TX_CSL)	/* Carrier lost */
    128				dev->stats.tx_carrier_errors++;
    129
    130			if (sc & (BD_ENET_TX_LC | BD_ENET_TX_RL | BD_ENET_TX_UN)) {
    131				dev->stats.tx_errors++;
    132				do_restart = 1;
    133			}
    134		} else
    135			dev->stats.tx_packets++;
    136
    137		if (sc & BD_ENET_TX_READY) {
    138			dev_warn(fep->dev,
    139				 "HEY! Enet xmit interrupt and TX_READY.\n");
    140		}
    141
    142		/*
    143		 * Deferred means some collisions occurred during transmit,
    144		 * but we eventually sent the packet OK.
    145		 */
    146		if (sc & BD_ENET_TX_DEF)
    147			dev->stats.collisions++;
    148
    149		/* unmap */
    150		if (fep->mapped_as_page[dirtyidx])
    151			dma_unmap_page(fep->dev, CBDR_BUFADDR(bdp),
    152				       CBDR_DATLEN(bdp), DMA_TO_DEVICE);
    153		else
    154			dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp),
    155					 CBDR_DATLEN(bdp), DMA_TO_DEVICE);
    156
    157		/*
    158		 * Free the sk buffer associated with this last transmit.
    159		 */
    160		if (skb) {
    161			dev_kfree_skb(skb);
    162			fep->tx_skbuff[dirtyidx] = NULL;
    163		}
    164
    165		/*
    166		 * Update pointer to next buffer descriptor to be transmitted.
    167		 */
    168		if ((sc & BD_ENET_TX_WRAP) == 0)
    169			bdp++;
    170		else
    171			bdp = fep->tx_bd_base;
    172
    173		/*
    174		 * Since we have freed up a buffer, the ring is no longer
    175		 * full.
    176		 */
    177		if (++fep->tx_free == MAX_SKB_FRAGS)
    178			do_wake = 1;
    179		tx_left--;
    180	}
    181
    182	fep->dirty_tx = bdp;
    183
    184	if (do_restart)
    185		(*fep->ops->tx_restart)(dev);
    186
    187	spin_unlock(&fep->tx_lock);
    188
    189	if (do_wake)
    190		netif_wake_queue(dev);
    191
    192	/*
    193	 * First, grab all of the stats for the incoming packet.
    194	 * These get messed up if we get called due to a busy condition.
    195	 */
    196	bdp = fep->cur_rx;
    197
    198	while (((sc = CBDR_SC(bdp)) & BD_ENET_RX_EMPTY) == 0 &&
    199	       received < budget) {
    200		curidx = bdp - fep->rx_bd_base;
    201
    202		/*
    203		 * Since we have allocated space to hold a complete frame,
    204		 * the last indicator should be set.
    205		 */
    206		if ((sc & BD_ENET_RX_LAST) == 0)
    207			dev_warn(fep->dev, "rcv is not +last\n");
    208
    209		/*
    210		 * Check for errors.
    211		 */
    212		if (sc & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_CL |
    213			  BD_ENET_RX_NO | BD_ENET_RX_CR | BD_ENET_RX_OV)) {
    214			dev->stats.rx_errors++;
    215			/* Frame too long or too short. */
    216			if (sc & (BD_ENET_RX_LG | BD_ENET_RX_SH))
    217				dev->stats.rx_length_errors++;
    218			/* Frame alignment */
    219			if (sc & (BD_ENET_RX_NO | BD_ENET_RX_CL))
    220				dev->stats.rx_frame_errors++;
    221			/* CRC Error */
    222			if (sc & BD_ENET_RX_CR)
    223				dev->stats.rx_crc_errors++;
    224			/* FIFO overrun */
    225			if (sc & BD_ENET_RX_OV)
    226				dev->stats.rx_crc_errors++;
    227
    228			skbn = fep->rx_skbuff[curidx];
    229		} else {
    230			skb = fep->rx_skbuff[curidx];
    231
    232			/*
    233			 * Process the incoming frame.
    234			 */
    235			dev->stats.rx_packets++;
    236			pkt_len = CBDR_DATLEN(bdp) - 4;	/* remove CRC */
    237			dev->stats.rx_bytes += pkt_len + 4;
    238
    239			if (pkt_len <= fpi->rx_copybreak) {
    240				/* +2 to make IP header L1 cache aligned */
    241				skbn = netdev_alloc_skb(dev, pkt_len + 2);
    242				if (skbn != NULL) {
    243					skb_reserve(skbn, 2);	/* align IP header */
    244					skb_copy_from_linear_data(skb,
    245						      skbn->data, pkt_len);
    246					swap(skb, skbn);
    247					dma_sync_single_for_cpu(fep->dev,
    248						CBDR_BUFADDR(bdp),
    249						L1_CACHE_ALIGN(pkt_len),
    250						DMA_FROM_DEVICE);
    251				}
    252			} else {
    253				skbn = netdev_alloc_skb(dev, ENET_RX_FRSIZE);
    254
    255				if (skbn) {
    256					dma_addr_t dma;
    257
    258					skb_align(skbn, ENET_RX_ALIGN);
    259
    260					dma_unmap_single(fep->dev,
    261						CBDR_BUFADDR(bdp),
    262						L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
    263						DMA_FROM_DEVICE);
    264
    265					dma = dma_map_single(fep->dev,
    266						skbn->data,
    267						L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
    268						DMA_FROM_DEVICE);
    269					CBDW_BUFADDR(bdp, dma);
    270				}
    271			}
    272
    273			if (skbn != NULL) {
    274				skb_put(skb, pkt_len);	/* Make room */
    275				skb->protocol = eth_type_trans(skb, dev);
    276				received++;
    277				netif_receive_skb(skb);
    278			} else {
    279				dev->stats.rx_dropped++;
    280				skbn = skb;
    281			}
    282		}
    283
    284		fep->rx_skbuff[curidx] = skbn;
    285		CBDW_DATLEN(bdp, 0);
    286		CBDW_SC(bdp, (sc & ~BD_ENET_RX_STATS) | BD_ENET_RX_EMPTY);
    287
    288		/*
    289		 * Update BD pointer to next entry.
    290		 */
    291		if ((sc & BD_ENET_RX_WRAP) == 0)
    292			bdp++;
    293		else
    294			bdp = fep->rx_bd_base;
    295
    296		(*fep->ops->rx_bd_done)(dev);
    297	}
    298
    299	fep->cur_rx = bdp;
    300
    301	if (received < budget && tx_left) {
    302		/* done */
    303		napi_complete_done(napi, received);
    304		(*fep->ops->napi_enable)(dev);
    305
    306		return received;
    307	}
    308
    309	return budget;
    310}
    311
    312/*
    313 * The interrupt handler.
    314 * This is called from the MPC core interrupt.
    315 */
    316static irqreturn_t
    317fs_enet_interrupt(int irq, void *dev_id)
    318{
    319	struct net_device *dev = dev_id;
    320	struct fs_enet_private *fep;
    321	const struct fs_platform_info *fpi;
    322	u32 int_events;
    323	u32 int_clr_events;
    324	int nr, napi_ok;
    325	int handled;
    326
    327	fep = netdev_priv(dev);
    328	fpi = fep->fpi;
    329
    330	nr = 0;
    331	while ((int_events = (*fep->ops->get_int_events)(dev)) != 0) {
    332		nr++;
    333
    334		int_clr_events = int_events;
    335		int_clr_events &= ~fep->ev_napi;
    336
    337		(*fep->ops->clear_int_events)(dev, int_clr_events);
    338
    339		if (int_events & fep->ev_err)
    340			(*fep->ops->ev_error)(dev, int_events);
    341
    342		if (int_events & fep->ev) {
    343			napi_ok = napi_schedule_prep(&fep->napi);
    344
    345			(*fep->ops->napi_disable)(dev);
    346			(*fep->ops->clear_int_events)(dev, fep->ev_napi);
    347
    348			/* NOTE: it is possible for FCCs in NAPI mode    */
    349			/* to submit a spurious interrupt while in poll  */
    350			if (napi_ok)
    351				__napi_schedule(&fep->napi);
    352		}
    353
    354	}
    355
    356	handled = nr > 0;
    357	return IRQ_RETVAL(handled);
    358}
    359
    360void fs_init_bds(struct net_device *dev)
    361{
    362	struct fs_enet_private *fep = netdev_priv(dev);
    363	cbd_t __iomem *bdp;
    364	struct sk_buff *skb;
    365	int i;
    366
    367	fs_cleanup_bds(dev);
    368
    369	fep->dirty_tx = fep->cur_tx = fep->tx_bd_base;
    370	fep->tx_free = fep->tx_ring;
    371	fep->cur_rx = fep->rx_bd_base;
    372
    373	/*
    374	 * Initialize the receive buffer descriptors.
    375	 */
    376	for (i = 0, bdp = fep->rx_bd_base; i < fep->rx_ring; i++, bdp++) {
    377		skb = netdev_alloc_skb(dev, ENET_RX_FRSIZE);
    378		if (skb == NULL)
    379			break;
    380
    381		skb_align(skb, ENET_RX_ALIGN);
    382		fep->rx_skbuff[i] = skb;
    383		CBDW_BUFADDR(bdp,
    384			dma_map_single(fep->dev, skb->data,
    385				L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
    386				DMA_FROM_DEVICE));
    387		CBDW_DATLEN(bdp, 0);	/* zero */
    388		CBDW_SC(bdp, BD_ENET_RX_EMPTY |
    389			((i < fep->rx_ring - 1) ? 0 : BD_SC_WRAP));
    390	}
    391	/*
    392	 * if we failed, fillup remainder
    393	 */
    394	for (; i < fep->rx_ring; i++, bdp++) {
    395		fep->rx_skbuff[i] = NULL;
    396		CBDW_SC(bdp, (i < fep->rx_ring - 1) ? 0 : BD_SC_WRAP);
    397	}
    398
    399	/*
    400	 * ...and the same for transmit.
    401	 */
    402	for (i = 0, bdp = fep->tx_bd_base; i < fep->tx_ring; i++, bdp++) {
    403		fep->tx_skbuff[i] = NULL;
    404		CBDW_BUFADDR(bdp, 0);
    405		CBDW_DATLEN(bdp, 0);
    406		CBDW_SC(bdp, (i < fep->tx_ring - 1) ? 0 : BD_SC_WRAP);
    407	}
    408}
    409
    410void fs_cleanup_bds(struct net_device *dev)
    411{
    412	struct fs_enet_private *fep = netdev_priv(dev);
    413	struct sk_buff *skb;
    414	cbd_t __iomem *bdp;
    415	int i;
    416
    417	/*
    418	 * Reset SKB transmit buffers.
    419	 */
    420	for (i = 0, bdp = fep->tx_bd_base; i < fep->tx_ring; i++, bdp++) {
    421		if ((skb = fep->tx_skbuff[i]) == NULL)
    422			continue;
    423
    424		/* unmap */
    425		dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp),
    426				skb->len, DMA_TO_DEVICE);
    427
    428		fep->tx_skbuff[i] = NULL;
    429		dev_kfree_skb(skb);
    430	}
    431
    432	/*
    433	 * Reset SKB receive buffers
    434	 */
    435	for (i = 0, bdp = fep->rx_bd_base; i < fep->rx_ring; i++, bdp++) {
    436		if ((skb = fep->rx_skbuff[i]) == NULL)
    437			continue;
    438
    439		/* unmap */
    440		dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp),
    441			L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
    442			DMA_FROM_DEVICE);
    443
    444		fep->rx_skbuff[i] = NULL;
    445
    446		dev_kfree_skb(skb);
    447	}
    448}
    449
    450/**********************************************************************************/
    451
    452#ifdef CONFIG_FS_ENET_MPC5121_FEC
    453/*
    454 * MPC5121 FEC requeries 4-byte alignment for TX data buffer!
    455 */
    456static struct sk_buff *tx_skb_align_workaround(struct net_device *dev,
    457					       struct sk_buff *skb)
    458{
    459	struct sk_buff *new_skb;
    460
    461	if (skb_linearize(skb))
    462		return NULL;
    463
    464	/* Alloc new skb */
    465	new_skb = netdev_alloc_skb(dev, skb->len + 4);
    466	if (!new_skb)
    467		return NULL;
    468
    469	/* Make sure new skb is properly aligned */
    470	skb_align(new_skb, 4);
    471
    472	/* Copy data to new skb ... */
    473	skb_copy_from_linear_data(skb, new_skb->data, skb->len);
    474	skb_put(new_skb, skb->len);
    475
    476	/* ... and free an old one */
    477	dev_kfree_skb_any(skb);
    478
    479	return new_skb;
    480}
    481#endif
    482
    483static netdev_tx_t
    484fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
    485{
    486	struct fs_enet_private *fep = netdev_priv(dev);
    487	cbd_t __iomem *bdp;
    488	int curidx;
    489	u16 sc;
    490	int nr_frags;
    491	skb_frag_t *frag;
    492	int len;
    493#ifdef CONFIG_FS_ENET_MPC5121_FEC
    494	int is_aligned = 1;
    495	int i;
    496
    497	if (!IS_ALIGNED((unsigned long)skb->data, 4)) {
    498		is_aligned = 0;
    499	} else {
    500		nr_frags = skb_shinfo(skb)->nr_frags;
    501		frag = skb_shinfo(skb)->frags;
    502		for (i = 0; i < nr_frags; i++, frag++) {
    503			if (!IS_ALIGNED(skb_frag_off(frag), 4)) {
    504				is_aligned = 0;
    505				break;
    506			}
    507		}
    508	}
    509
    510	if (!is_aligned) {
    511		skb = tx_skb_align_workaround(dev, skb);
    512		if (!skb) {
    513			/*
    514			 * We have lost packet due to memory allocation error
    515			 * in tx_skb_align_workaround(). Hopefully original
    516			 * skb is still valid, so try transmit it later.
    517			 */
    518			return NETDEV_TX_BUSY;
    519		}
    520	}
    521#endif
    522
    523	spin_lock(&fep->tx_lock);
    524
    525	/*
    526	 * Fill in a Tx ring entry
    527	 */
    528	bdp = fep->cur_tx;
    529
    530	nr_frags = skb_shinfo(skb)->nr_frags;
    531	if (fep->tx_free <= nr_frags || (CBDR_SC(bdp) & BD_ENET_TX_READY)) {
    532		netif_stop_queue(dev);
    533		spin_unlock(&fep->tx_lock);
    534
    535		/*
    536		 * Ooops.  All transmit buffers are full.  Bail out.
    537		 * This should not happen, since the tx queue should be stopped.
    538		 */
    539		dev_warn(fep->dev, "tx queue full!.\n");
    540		return NETDEV_TX_BUSY;
    541	}
    542
    543	curidx = bdp - fep->tx_bd_base;
    544
    545	len = skb->len;
    546	dev->stats.tx_bytes += len;
    547	if (nr_frags)
    548		len -= skb->data_len;
    549	fep->tx_free -= nr_frags + 1;
    550	/*
    551	 * Push the data cache so the CPM does not get stale memory data.
    552	 */
    553	CBDW_BUFADDR(bdp, dma_map_single(fep->dev,
    554				skb->data, len, DMA_TO_DEVICE));
    555	CBDW_DATLEN(bdp, len);
    556
    557	fep->mapped_as_page[curidx] = 0;
    558	frag = skb_shinfo(skb)->frags;
    559	while (nr_frags) {
    560		CBDC_SC(bdp,
    561			BD_ENET_TX_STATS | BD_ENET_TX_INTR | BD_ENET_TX_LAST |
    562			BD_ENET_TX_TC);
    563		CBDS_SC(bdp, BD_ENET_TX_READY);
    564
    565		if ((CBDR_SC(bdp) & BD_ENET_TX_WRAP) == 0) {
    566			bdp++;
    567			curidx++;
    568		} else {
    569			bdp = fep->tx_bd_base;
    570			curidx = 0;
    571		}
    572
    573		len = skb_frag_size(frag);
    574		CBDW_BUFADDR(bdp, skb_frag_dma_map(fep->dev, frag, 0, len,
    575						   DMA_TO_DEVICE));
    576		CBDW_DATLEN(bdp, len);
    577
    578		fep->tx_skbuff[curidx] = NULL;
    579		fep->mapped_as_page[curidx] = 1;
    580
    581		frag++;
    582		nr_frags--;
    583	}
    584
    585	/* Trigger transmission start */
    586	sc = BD_ENET_TX_READY | BD_ENET_TX_INTR |
    587	     BD_ENET_TX_LAST | BD_ENET_TX_TC;
    588
    589	/* note that while FEC does not have this bit
    590	 * it marks it as available for software use
    591	 * yay for hw reuse :) */
    592	if (skb->len <= 60)
    593		sc |= BD_ENET_TX_PAD;
    594	CBDC_SC(bdp, BD_ENET_TX_STATS);
    595	CBDS_SC(bdp, sc);
    596
    597	/* Save skb pointer. */
    598	fep->tx_skbuff[curidx] = skb;
    599
    600	/* If this was the last BD in the ring, start at the beginning again. */
    601	if ((CBDR_SC(bdp) & BD_ENET_TX_WRAP) == 0)
    602		bdp++;
    603	else
    604		bdp = fep->tx_bd_base;
    605	fep->cur_tx = bdp;
    606
    607	if (fep->tx_free < MAX_SKB_FRAGS)
    608		netif_stop_queue(dev);
    609
    610	skb_tx_timestamp(skb);
    611
    612	(*fep->ops->tx_kickstart)(dev);
    613
    614	spin_unlock(&fep->tx_lock);
    615
    616	return NETDEV_TX_OK;
    617}
    618
    619static void fs_timeout_work(struct work_struct *work)
    620{
    621	struct fs_enet_private *fep = container_of(work, struct fs_enet_private,
    622						   timeout_work);
    623	struct net_device *dev = fep->ndev;
    624	unsigned long flags;
    625	int wake = 0;
    626
    627	dev->stats.tx_errors++;
    628
    629	spin_lock_irqsave(&fep->lock, flags);
    630
    631	if (dev->flags & IFF_UP) {
    632		phy_stop(dev->phydev);
    633		(*fep->ops->stop)(dev);
    634		(*fep->ops->restart)(dev);
    635	}
    636
    637	phy_start(dev->phydev);
    638	wake = fep->tx_free >= MAX_SKB_FRAGS &&
    639	       !(CBDR_SC(fep->cur_tx) & BD_ENET_TX_READY);
    640	spin_unlock_irqrestore(&fep->lock, flags);
    641
    642	if (wake)
    643		netif_wake_queue(dev);
    644}
    645
    646static void fs_timeout(struct net_device *dev, unsigned int txqueue)
    647{
    648	struct fs_enet_private *fep = netdev_priv(dev);
    649
    650	schedule_work(&fep->timeout_work);
    651}
    652
    653/*-----------------------------------------------------------------------------
    654 *  generic link-change handler - should be sufficient for most cases
    655 *-----------------------------------------------------------------------------*/
    656static void generic_adjust_link(struct  net_device *dev)
    657{
    658	struct fs_enet_private *fep = netdev_priv(dev);
    659	struct phy_device *phydev = dev->phydev;
    660	int new_state = 0;
    661
    662	if (phydev->link) {
    663		/* adjust to duplex mode */
    664		if (phydev->duplex != fep->oldduplex) {
    665			new_state = 1;
    666			fep->oldduplex = phydev->duplex;
    667		}
    668
    669		if (phydev->speed != fep->oldspeed) {
    670			new_state = 1;
    671			fep->oldspeed = phydev->speed;
    672		}
    673
    674		if (!fep->oldlink) {
    675			new_state = 1;
    676			fep->oldlink = 1;
    677		}
    678
    679		if (new_state)
    680			fep->ops->restart(dev);
    681	} else if (fep->oldlink) {
    682		new_state = 1;
    683		fep->oldlink = 0;
    684		fep->oldspeed = 0;
    685		fep->oldduplex = -1;
    686	}
    687
    688	if (new_state && netif_msg_link(fep))
    689		phy_print_status(phydev);
    690}
    691
    692
    693static void fs_adjust_link(struct net_device *dev)
    694{
    695	struct fs_enet_private *fep = netdev_priv(dev);
    696	unsigned long flags;
    697
    698	spin_lock_irqsave(&fep->lock, flags);
    699
    700	if(fep->ops->adjust_link)
    701		fep->ops->adjust_link(dev);
    702	else
    703		generic_adjust_link(dev);
    704
    705	spin_unlock_irqrestore(&fep->lock, flags);
    706}
    707
    708static int fs_init_phy(struct net_device *dev)
    709{
    710	struct fs_enet_private *fep = netdev_priv(dev);
    711	struct phy_device *phydev;
    712	phy_interface_t iface;
    713
    714	fep->oldlink = 0;
    715	fep->oldspeed = 0;
    716	fep->oldduplex = -1;
    717
    718	iface = fep->fpi->use_rmii ?
    719		PHY_INTERFACE_MODE_RMII : PHY_INTERFACE_MODE_MII;
    720
    721	phydev = of_phy_connect(dev, fep->fpi->phy_node, &fs_adjust_link, 0,
    722				iface);
    723	if (!phydev) {
    724		dev_err(&dev->dev, "Could not attach to PHY\n");
    725		return -ENODEV;
    726	}
    727
    728	return 0;
    729}
    730
    731static int fs_enet_open(struct net_device *dev)
    732{
    733	struct fs_enet_private *fep = netdev_priv(dev);
    734	int r;
    735	int err;
    736
    737	/* to initialize the fep->cur_rx,... */
    738	/* not doing this, will cause a crash in fs_enet_napi */
    739	fs_init_bds(fep->ndev);
    740
    741	napi_enable(&fep->napi);
    742
    743	/* Install our interrupt handler. */
    744	r = request_irq(fep->interrupt, fs_enet_interrupt, IRQF_SHARED,
    745			"fs_enet-mac", dev);
    746	if (r != 0) {
    747		dev_err(fep->dev, "Could not allocate FS_ENET IRQ!");
    748		napi_disable(&fep->napi);
    749		return -EINVAL;
    750	}
    751
    752	err = fs_init_phy(dev);
    753	if (err) {
    754		free_irq(fep->interrupt, dev);
    755		napi_disable(&fep->napi);
    756		return err;
    757	}
    758	phy_start(dev->phydev);
    759
    760	netif_start_queue(dev);
    761
    762	return 0;
    763}
    764
    765static int fs_enet_close(struct net_device *dev)
    766{
    767	struct fs_enet_private *fep = netdev_priv(dev);
    768	unsigned long flags;
    769
    770	netif_stop_queue(dev);
    771	netif_carrier_off(dev);
    772	napi_disable(&fep->napi);
    773	cancel_work_sync(&fep->timeout_work);
    774	phy_stop(dev->phydev);
    775
    776	spin_lock_irqsave(&fep->lock, flags);
    777	spin_lock(&fep->tx_lock);
    778	(*fep->ops->stop)(dev);
    779	spin_unlock(&fep->tx_lock);
    780	spin_unlock_irqrestore(&fep->lock, flags);
    781
    782	/* release any irqs */
    783	phy_disconnect(dev->phydev);
    784	free_irq(fep->interrupt, dev);
    785
    786	return 0;
    787}
    788
    789/*************************************************************************/
    790
    791static void fs_get_drvinfo(struct net_device *dev,
    792			    struct ethtool_drvinfo *info)
    793{
    794	strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
    795}
    796
    797static int fs_get_regs_len(struct net_device *dev)
    798{
    799	struct fs_enet_private *fep = netdev_priv(dev);
    800
    801	return (*fep->ops->get_regs_len)(dev);
    802}
    803
    804static void fs_get_regs(struct net_device *dev, struct ethtool_regs *regs,
    805			 void *p)
    806{
    807	struct fs_enet_private *fep = netdev_priv(dev);
    808	unsigned long flags;
    809	int r, len;
    810
    811	len = regs->len;
    812
    813	spin_lock_irqsave(&fep->lock, flags);
    814	r = (*fep->ops->get_regs)(dev, p, &len);
    815	spin_unlock_irqrestore(&fep->lock, flags);
    816
    817	if (r == 0)
    818		regs->version = 0;
    819}
    820
    821static u32 fs_get_msglevel(struct net_device *dev)
    822{
    823	struct fs_enet_private *fep = netdev_priv(dev);
    824	return fep->msg_enable;
    825}
    826
    827static void fs_set_msglevel(struct net_device *dev, u32 value)
    828{
    829	struct fs_enet_private *fep = netdev_priv(dev);
    830	fep->msg_enable = value;
    831}
    832
    833static int fs_get_tunable(struct net_device *dev,
    834			  const struct ethtool_tunable *tuna, void *data)
    835{
    836	struct fs_enet_private *fep = netdev_priv(dev);
    837	struct fs_platform_info *fpi = fep->fpi;
    838	int ret = 0;
    839
    840	switch (tuna->id) {
    841	case ETHTOOL_RX_COPYBREAK:
    842		*(u32 *)data = fpi->rx_copybreak;
    843		break;
    844	default:
    845		ret = -EINVAL;
    846		break;
    847	}
    848
    849	return ret;
    850}
    851
    852static int fs_set_tunable(struct net_device *dev,
    853			  const struct ethtool_tunable *tuna, const void *data)
    854{
    855	struct fs_enet_private *fep = netdev_priv(dev);
    856	struct fs_platform_info *fpi = fep->fpi;
    857	int ret = 0;
    858
    859	switch (tuna->id) {
    860	case ETHTOOL_RX_COPYBREAK:
    861		fpi->rx_copybreak = *(u32 *)data;
    862		break;
    863	default:
    864		ret = -EINVAL;
    865		break;
    866	}
    867
    868	return ret;
    869}
    870
    871static const struct ethtool_ops fs_ethtool_ops = {
    872	.get_drvinfo = fs_get_drvinfo,
    873	.get_regs_len = fs_get_regs_len,
    874	.nway_reset = phy_ethtool_nway_reset,
    875	.get_link = ethtool_op_get_link,
    876	.get_msglevel = fs_get_msglevel,
    877	.set_msglevel = fs_set_msglevel,
    878	.get_regs = fs_get_regs,
    879	.get_ts_info = ethtool_op_get_ts_info,
    880	.get_link_ksettings = phy_ethtool_get_link_ksettings,
    881	.set_link_ksettings = phy_ethtool_set_link_ksettings,
    882	.get_tunable = fs_get_tunable,
    883	.set_tunable = fs_set_tunable,
    884};
    885
    886extern int fs_mii_connect(struct net_device *dev);
    887extern void fs_mii_disconnect(struct net_device *dev);
    888
    889/**************************************************************************************/
    890
    891#ifdef CONFIG_FS_ENET_HAS_FEC
    892#define IS_FEC(match) ((match)->data == &fs_fec_ops)
    893#else
    894#define IS_FEC(match) 0
    895#endif
    896
    897static const struct net_device_ops fs_enet_netdev_ops = {
    898	.ndo_open		= fs_enet_open,
    899	.ndo_stop		= fs_enet_close,
    900	.ndo_start_xmit		= fs_enet_start_xmit,
    901	.ndo_tx_timeout		= fs_timeout,
    902	.ndo_set_rx_mode	= fs_set_multicast_list,
    903	.ndo_eth_ioctl		= phy_do_ioctl_running,
    904	.ndo_validate_addr	= eth_validate_addr,
    905	.ndo_set_mac_address	= eth_mac_addr,
    906#ifdef CONFIG_NET_POLL_CONTROLLER
    907	.ndo_poll_controller	= fs_enet_netpoll,
    908#endif
    909};
    910
    911static const struct of_device_id fs_enet_match[];
    912static int fs_enet_probe(struct platform_device *ofdev)
    913{
    914	const struct of_device_id *match;
    915	struct net_device *ndev;
    916	struct fs_enet_private *fep;
    917	struct fs_platform_info *fpi;
    918	const u32 *data;
    919	struct clk *clk;
    920	int err;
    921	const char *phy_connection_type;
    922	int privsize, len, ret = -ENODEV;
    923
    924	match = of_match_device(fs_enet_match, &ofdev->dev);
    925	if (!match)
    926		return -EINVAL;
    927
    928	fpi = kzalloc(sizeof(*fpi), GFP_KERNEL);
    929	if (!fpi)
    930		return -ENOMEM;
    931
    932	if (!IS_FEC(match)) {
    933		data = of_get_property(ofdev->dev.of_node, "fsl,cpm-command", &len);
    934		if (!data || len != 4)
    935			goto out_free_fpi;
    936
    937		fpi->cp_command = *data;
    938	}
    939
    940	fpi->rx_ring = RX_RING_SIZE;
    941	fpi->tx_ring = TX_RING_SIZE;
    942	fpi->rx_copybreak = 240;
    943	fpi->napi_weight = 17;
    944	fpi->phy_node = of_parse_phandle(ofdev->dev.of_node, "phy-handle", 0);
    945	if (!fpi->phy_node && of_phy_is_fixed_link(ofdev->dev.of_node)) {
    946		err = of_phy_register_fixed_link(ofdev->dev.of_node);
    947		if (err)
    948			goto out_free_fpi;
    949
    950		/* In the case of a fixed PHY, the DT node associated
    951		 * to the PHY is the Ethernet MAC DT node.
    952		 */
    953		fpi->phy_node = of_node_get(ofdev->dev.of_node);
    954	}
    955
    956	if (of_device_is_compatible(ofdev->dev.of_node, "fsl,mpc5125-fec")) {
    957		phy_connection_type = of_get_property(ofdev->dev.of_node,
    958						"phy-connection-type", NULL);
    959		if (phy_connection_type && !strcmp("rmii", phy_connection_type))
    960			fpi->use_rmii = 1;
    961	}
    962
    963	/* make clock lookup non-fatal (the driver is shared among platforms),
    964	 * but require enable to succeed when a clock was specified/found,
    965	 * keep a reference to the clock upon successful acquisition
    966	 */
    967	clk = devm_clk_get(&ofdev->dev, "per");
    968	if (!IS_ERR(clk)) {
    969		ret = clk_prepare_enable(clk);
    970		if (ret)
    971			goto out_deregister_fixed_link;
    972
    973		fpi->clk_per = clk;
    974	}
    975
    976	privsize = sizeof(*fep) +
    977	           sizeof(struct sk_buff **) *
    978		     (fpi->rx_ring + fpi->tx_ring) +
    979		   sizeof(char) * fpi->tx_ring;
    980
    981	ndev = alloc_etherdev(privsize);
    982	if (!ndev) {
    983		ret = -ENOMEM;
    984		goto out_put;
    985	}
    986
    987	SET_NETDEV_DEV(ndev, &ofdev->dev);
    988	platform_set_drvdata(ofdev, ndev);
    989
    990	fep = netdev_priv(ndev);
    991	fep->dev = &ofdev->dev;
    992	fep->ndev = ndev;
    993	fep->fpi = fpi;
    994	fep->ops = match->data;
    995
    996	ret = fep->ops->setup_data(ndev);
    997	if (ret)
    998		goto out_free_dev;
    999
   1000	fep->rx_skbuff = (struct sk_buff **)&fep[1];
   1001	fep->tx_skbuff = fep->rx_skbuff + fpi->rx_ring;
   1002	fep->mapped_as_page = (char *)(fep->rx_skbuff + fpi->rx_ring +
   1003				       fpi->tx_ring);
   1004
   1005	spin_lock_init(&fep->lock);
   1006	spin_lock_init(&fep->tx_lock);
   1007
   1008	of_get_ethdev_address(ofdev->dev.of_node, ndev);
   1009
   1010	ret = fep->ops->allocate_bd(ndev);
   1011	if (ret)
   1012		goto out_cleanup_data;
   1013
   1014	fep->rx_bd_base = fep->ring_base;
   1015	fep->tx_bd_base = fep->rx_bd_base + fpi->rx_ring;
   1016
   1017	fep->tx_ring = fpi->tx_ring;
   1018	fep->rx_ring = fpi->rx_ring;
   1019
   1020	ndev->netdev_ops = &fs_enet_netdev_ops;
   1021	ndev->watchdog_timeo = 2 * HZ;
   1022	INIT_WORK(&fep->timeout_work, fs_timeout_work);
   1023	netif_napi_add_weight(ndev, &fep->napi, fs_enet_napi,
   1024			      fpi->napi_weight);
   1025
   1026	ndev->ethtool_ops = &fs_ethtool_ops;
   1027
   1028	netif_carrier_off(ndev);
   1029
   1030	ndev->features |= NETIF_F_SG;
   1031
   1032	ret = register_netdev(ndev);
   1033	if (ret)
   1034		goto out_free_bd;
   1035
   1036	pr_info("%s: fs_enet: %pM\n", ndev->name, ndev->dev_addr);
   1037
   1038	return 0;
   1039
   1040out_free_bd:
   1041	fep->ops->free_bd(ndev);
   1042out_cleanup_data:
   1043	fep->ops->cleanup_data(ndev);
   1044out_free_dev:
   1045	free_netdev(ndev);
   1046out_put:
   1047	clk_disable_unprepare(fpi->clk_per);
   1048out_deregister_fixed_link:
   1049	of_node_put(fpi->phy_node);
   1050	if (of_phy_is_fixed_link(ofdev->dev.of_node))
   1051		of_phy_deregister_fixed_link(ofdev->dev.of_node);
   1052out_free_fpi:
   1053	kfree(fpi);
   1054	return ret;
   1055}
   1056
   1057static int fs_enet_remove(struct platform_device *ofdev)
   1058{
   1059	struct net_device *ndev = platform_get_drvdata(ofdev);
   1060	struct fs_enet_private *fep = netdev_priv(ndev);
   1061
   1062	unregister_netdev(ndev);
   1063
   1064	fep->ops->free_bd(ndev);
   1065	fep->ops->cleanup_data(ndev);
   1066	dev_set_drvdata(fep->dev, NULL);
   1067	of_node_put(fep->fpi->phy_node);
   1068	clk_disable_unprepare(fep->fpi->clk_per);
   1069	if (of_phy_is_fixed_link(ofdev->dev.of_node))
   1070		of_phy_deregister_fixed_link(ofdev->dev.of_node);
   1071	free_netdev(ndev);
   1072	return 0;
   1073}
   1074
   1075static const struct of_device_id fs_enet_match[] = {
   1076#ifdef CONFIG_FS_ENET_HAS_SCC
   1077	{
   1078		.compatible = "fsl,cpm1-scc-enet",
   1079		.data = (void *)&fs_scc_ops,
   1080	},
   1081	{
   1082		.compatible = "fsl,cpm2-scc-enet",
   1083		.data = (void *)&fs_scc_ops,
   1084	},
   1085#endif
   1086#ifdef CONFIG_FS_ENET_HAS_FCC
   1087	{
   1088		.compatible = "fsl,cpm2-fcc-enet",
   1089		.data = (void *)&fs_fcc_ops,
   1090	},
   1091#endif
   1092#ifdef CONFIG_FS_ENET_HAS_FEC
   1093#ifdef CONFIG_FS_ENET_MPC5121_FEC
   1094	{
   1095		.compatible = "fsl,mpc5121-fec",
   1096		.data = (void *)&fs_fec_ops,
   1097	},
   1098	{
   1099		.compatible = "fsl,mpc5125-fec",
   1100		.data = (void *)&fs_fec_ops,
   1101	},
   1102#else
   1103	{
   1104		.compatible = "fsl,pq1-fec-enet",
   1105		.data = (void *)&fs_fec_ops,
   1106	},
   1107#endif
   1108#endif
   1109	{}
   1110};
   1111MODULE_DEVICE_TABLE(of, fs_enet_match);
   1112
   1113static struct platform_driver fs_enet_driver = {
   1114	.driver = {
   1115		.name = "fs_enet",
   1116		.of_match_table = fs_enet_match,
   1117	},
   1118	.probe = fs_enet_probe,
   1119	.remove = fs_enet_remove,
   1120};
   1121
   1122#ifdef CONFIG_NET_POLL_CONTROLLER
   1123static void fs_enet_netpoll(struct net_device *dev)
   1124{
   1125       disable_irq(dev->irq);
   1126       fs_enet_interrupt(dev->irq, dev);
   1127       enable_irq(dev->irq);
   1128}
   1129#endif
   1130
   1131module_platform_driver(fs_enet_driver);