cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

pxa168_eth.c (41388B)


      1// SPDX-License-Identifier: GPL-2.0-or-later
      2/*
      3 * PXA168 ethernet driver.
      4 * Most of the code is derived from mv643xx ethernet driver.
      5 *
      6 * Copyright (C) 2010 Marvell International Ltd.
      7 *		Sachin Sanap <ssanap@marvell.com>
      8 *		Zhangfei Gao <zgao6@marvell.com>
      9 *		Philip Rakity <prakity@marvell.com>
     10 *		Mark Brown <markb@marvell.com>
     11 */
     12
     13#include <linux/bitops.h>
     14#include <linux/clk.h>
     15#include <linux/delay.h>
     16#include <linux/dma-mapping.h>
     17#include <linux/etherdevice.h>
     18#include <linux/ethtool.h>
     19#include <linux/in.h>
     20#include <linux/interrupt.h>
     21#include <linux/io.h>
     22#include <linux/ip.h>
     23#include <linux/kernel.h>
     24#include <linux/module.h>
     25#include <linux/of.h>
     26#include <linux/of_net.h>
     27#include <linux/phy.h>
     28#include <linux/platform_device.h>
     29#include <linux/pxa168_eth.h>
     30#include <linux/tcp.h>
     31#include <linux/types.h>
     32#include <linux/udp.h>
     33#include <linux/workqueue.h>
     34#include <linux/pgtable.h>
     35
     36#include <asm/cacheflush.h>
     37
     38#define DRIVER_NAME	"pxa168-eth"
     39#define DRIVER_VERSION	"0.3"
     40
     41/*
     42 * Registers
     43 */
     44
     45#define PHY_ADDRESS		0x0000
     46#define SMI			0x0010
     47#define PORT_CONFIG		0x0400
     48#define PORT_CONFIG_EXT		0x0408
     49#define PORT_COMMAND		0x0410
     50#define PORT_STATUS		0x0418
     51#define HTPR			0x0428
     52#define MAC_ADDR_LOW		0x0430
     53#define MAC_ADDR_HIGH		0x0438
     54#define SDMA_CONFIG		0x0440
     55#define SDMA_CMD		0x0448
     56#define INT_CAUSE		0x0450
     57#define INT_W_CLEAR		0x0454
     58#define INT_MASK		0x0458
     59#define ETH_F_RX_DESC_0		0x0480
     60#define ETH_C_RX_DESC_0		0x04A0
     61#define ETH_C_TX_DESC_1		0x04E4
     62
     63/* smi register */
     64#define SMI_BUSY		(1 << 28)	/* 0 - Write, 1 - Read  */
     65#define SMI_R_VALID		(1 << 27)	/* 0 - Write, 1 - Read  */
     66#define SMI_OP_W		(0 << 26)	/* Write operation      */
     67#define SMI_OP_R		(1 << 26)	/* Read operation */
     68
     69#define PHY_WAIT_ITERATIONS	10
     70
     71#define PXA168_ETH_PHY_ADDR_DEFAULT	0
     72/* RX & TX descriptor command */
     73#define BUF_OWNED_BY_DMA	(1 << 31)
     74
     75/* RX descriptor status */
     76#define RX_EN_INT		(1 << 23)
     77#define RX_FIRST_DESC		(1 << 17)
     78#define RX_LAST_DESC		(1 << 16)
     79#define RX_ERROR		(1 << 15)
     80
     81/* TX descriptor command */
     82#define TX_EN_INT		(1 << 23)
     83#define TX_GEN_CRC		(1 << 22)
     84#define TX_ZERO_PADDING		(1 << 18)
     85#define TX_FIRST_DESC		(1 << 17)
     86#define TX_LAST_DESC		(1 << 16)
     87#define TX_ERROR		(1 << 15)
     88
     89/* SDMA_CMD */
     90#define SDMA_CMD_AT		(1 << 31)
     91#define SDMA_CMD_TXDL		(1 << 24)
     92#define SDMA_CMD_TXDH		(1 << 23)
     93#define SDMA_CMD_AR		(1 << 15)
     94#define SDMA_CMD_ERD		(1 << 7)
     95
     96/* Bit definitions of the Port Config Reg */
     97#define PCR_DUPLEX_FULL		(1 << 15)
     98#define PCR_HS			(1 << 12)
     99#define PCR_EN			(1 << 7)
    100#define PCR_PM			(1 << 0)
    101
    102/* Bit definitions of the Port Config Extend Reg */
    103#define PCXR_2BSM		(1 << 28)
    104#define PCXR_DSCP_EN		(1 << 21)
    105#define PCXR_RMII_EN		(1 << 20)
    106#define PCXR_AN_SPEED_DIS	(1 << 19)
    107#define PCXR_SPEED_100		(1 << 18)
    108#define PCXR_MFL_1518		(0 << 14)
    109#define PCXR_MFL_1536		(1 << 14)
    110#define PCXR_MFL_2048		(2 << 14)
    111#define PCXR_MFL_64K		(3 << 14)
    112#define PCXR_FLOWCTL_DIS	(1 << 12)
    113#define PCXR_FLP		(1 << 11)
    114#define PCXR_AN_FLOWCTL_DIS	(1 << 10)
    115#define PCXR_AN_DUPLEX_DIS	(1 << 9)
    116#define PCXR_PRIO_TX_OFF	3
    117#define PCXR_TX_HIGH_PRI	(7 << PCXR_PRIO_TX_OFF)
    118
    119/* Bit definitions of the SDMA Config Reg */
    120#define SDCR_BSZ_OFF		12
    121#define SDCR_BSZ8		(3 << SDCR_BSZ_OFF)
    122#define SDCR_BSZ4		(2 << SDCR_BSZ_OFF)
    123#define SDCR_BSZ2		(1 << SDCR_BSZ_OFF)
    124#define SDCR_BSZ1		(0 << SDCR_BSZ_OFF)
    125#define SDCR_BLMR		(1 << 6)
    126#define SDCR_BLMT		(1 << 7)
    127#define SDCR_RIFB		(1 << 9)
    128#define SDCR_RC_OFF		2
    129#define SDCR_RC_MAX_RETRANS	(0xf << SDCR_RC_OFF)
    130
    131/*
    132 * Bit definitions of the Interrupt Cause Reg
    133 * and Interrupt MASK Reg is the same
    134 */
    135#define ICR_RXBUF		(1 << 0)
    136#define ICR_TXBUF_H		(1 << 2)
    137#define ICR_TXBUF_L		(1 << 3)
    138#define ICR_TXEND_H		(1 << 6)
    139#define ICR_TXEND_L		(1 << 7)
    140#define ICR_RXERR		(1 << 8)
    141#define ICR_TXERR_H		(1 << 10)
    142#define ICR_TXERR_L		(1 << 11)
    143#define ICR_TX_UDR		(1 << 13)
    144#define ICR_MII_CH		(1 << 28)
    145
    146#define ALL_INTS (ICR_TXBUF_H  | ICR_TXBUF_L  | ICR_TX_UDR |\
    147				ICR_TXERR_H  | ICR_TXERR_L |\
    148				ICR_TXEND_H  | ICR_TXEND_L |\
    149				ICR_RXBUF | ICR_RXERR  | ICR_MII_CH)
    150
    151#define ETH_HW_IP_ALIGN		2	/* hw aligns IP header */
    152
    153#define NUM_RX_DESCS		64
    154#define NUM_TX_DESCS		64
    155
    156#define HASH_ADD		0
    157#define HASH_DELETE		1
    158#define HASH_ADDR_TABLE_SIZE	0x4000	/* 16K (1/2K address - PCR_HS == 1) */
    159#define HOP_NUMBER		12
    160
    161/* Bit definitions for Port status */
    162#define PORT_SPEED_100		(1 << 0)
    163#define FULL_DUPLEX		(1 << 1)
    164#define FLOW_CONTROL_DISABLED	(1 << 2)
    165#define LINK_UP			(1 << 3)
    166
    167/* Bit definitions for work to be done */
    168#define WORK_TX_DONE		(1 << 1)
    169
    170/*
    171 * Misc definitions.
    172 */
    173#define SKB_DMA_REALIGN		((PAGE_SIZE - NET_SKB_PAD) % SMP_CACHE_BYTES)
    174
    175struct rx_desc {
    176	u32 cmd_sts;		/* Descriptor command status            */
    177	u16 byte_cnt;		/* Descriptor buffer byte count         */
    178	u16 buf_size;		/* Buffer size                          */
    179	u32 buf_ptr;		/* Descriptor buffer pointer            */
    180	u32 next_desc_ptr;	/* Next descriptor pointer              */
    181};
    182
    183struct tx_desc {
    184	u32 cmd_sts;		/* Command/status field                 */
    185	u16 reserved;
    186	u16 byte_cnt;		/* buffer byte count                    */
    187	u32 buf_ptr;		/* pointer to buffer for this descriptor */
    188	u32 next_desc_ptr;	/* Pointer to next descriptor           */
    189};
    190
    191struct pxa168_eth_private {
    192	struct platform_device *pdev;
    193	int port_num;		/* User Ethernet port number    */
    194	int phy_addr;
    195	int phy_speed;
    196	int phy_duplex;
    197	phy_interface_t phy_intf;
    198
    199	int rx_resource_err;	/* Rx ring resource error flag */
    200
    201	/* Next available and first returning Rx resource */
    202	int rx_curr_desc_q, rx_used_desc_q;
    203
    204	/* Next available and first returning Tx resource */
    205	int tx_curr_desc_q, tx_used_desc_q;
    206
    207	struct rx_desc *p_rx_desc_area;
    208	dma_addr_t rx_desc_dma;
    209	int rx_desc_area_size;
    210	struct sk_buff **rx_skb;
    211
    212	struct tx_desc *p_tx_desc_area;
    213	dma_addr_t tx_desc_dma;
    214	int tx_desc_area_size;
    215	struct sk_buff **tx_skb;
    216
    217	struct work_struct tx_timeout_task;
    218
    219	struct net_device *dev;
    220	struct napi_struct napi;
    221	u8 work_todo;
    222	int skb_size;
    223
    224	/* Size of Tx Ring per queue */
    225	int tx_ring_size;
    226	/* Number of tx descriptors in use */
    227	int tx_desc_count;
    228	/* Size of Rx Ring per queue */
    229	int rx_ring_size;
    230	/* Number of rx descriptors in use */
    231	int rx_desc_count;
    232
    233	/*
    234	 * Used in case RX Ring is empty, which can occur when
    235	 * system does not have resources (skb's)
    236	 */
    237	struct timer_list timeout;
    238	struct mii_bus *smi_bus;
    239
    240	/* clock */
    241	struct clk *clk;
    242	struct pxa168_eth_platform_data *pd;
    243	/*
    244	 * Ethernet controller base address.
    245	 */
    246	void __iomem *base;
    247
    248	/* Pointer to the hardware address filter table */
    249	void *htpr;
    250	dma_addr_t htpr_dma;
    251};
    252
    253struct addr_table_entry {
    254	__le32 lo;
    255	__le32 hi;
    256};
    257
    258/* Bit fields of a Hash Table Entry */
    259enum hash_table_entry {
    260	HASH_ENTRY_VALID = 1,
    261	SKIP = 2,
    262	HASH_ENTRY_RECEIVE_DISCARD = 4,
    263	HASH_ENTRY_RECEIVE_DISCARD_BIT = 2
    264};
    265
    266static int pxa168_init_hw(struct pxa168_eth_private *pep);
    267static int pxa168_init_phy(struct net_device *dev);
    268static void eth_port_reset(struct net_device *dev);
    269static void eth_port_start(struct net_device *dev);
    270static int pxa168_eth_open(struct net_device *dev);
    271static int pxa168_eth_stop(struct net_device *dev);
    272
    273static inline u32 rdl(struct pxa168_eth_private *pep, int offset)
    274{
    275	return readl_relaxed(pep->base + offset);
    276}
    277
    278static inline void wrl(struct pxa168_eth_private *pep, int offset, u32 data)
    279{
    280	writel_relaxed(data, pep->base + offset);
    281}
    282
    283static void abort_dma(struct pxa168_eth_private *pep)
    284{
    285	int delay;
    286	int max_retries = 40;
    287
    288	do {
    289		wrl(pep, SDMA_CMD, SDMA_CMD_AR | SDMA_CMD_AT);
    290		udelay(100);
    291
    292		delay = 10;
    293		while ((rdl(pep, SDMA_CMD) & (SDMA_CMD_AR | SDMA_CMD_AT))
    294		       && delay-- > 0) {
    295			udelay(10);
    296		}
    297	} while (max_retries-- > 0 && delay <= 0);
    298
    299	if (max_retries <= 0)
    300		netdev_err(pep->dev, "%s : DMA Stuck\n", __func__);
    301}
    302
    303static void rxq_refill(struct net_device *dev)
    304{
    305	struct pxa168_eth_private *pep = netdev_priv(dev);
    306	struct sk_buff *skb;
    307	struct rx_desc *p_used_rx_desc;
    308	int used_rx_desc;
    309
    310	while (pep->rx_desc_count < pep->rx_ring_size) {
    311		int size;
    312
    313		skb = netdev_alloc_skb(dev, pep->skb_size);
    314		if (!skb)
    315			break;
    316		if (SKB_DMA_REALIGN)
    317			skb_reserve(skb, SKB_DMA_REALIGN);
    318		pep->rx_desc_count++;
    319		/* Get 'used' Rx descriptor */
    320		used_rx_desc = pep->rx_used_desc_q;
    321		p_used_rx_desc = &pep->p_rx_desc_area[used_rx_desc];
    322		size = skb_end_pointer(skb) - skb->data;
    323		p_used_rx_desc->buf_ptr = dma_map_single(&pep->pdev->dev,
    324							 skb->data,
    325							 size,
    326							 DMA_FROM_DEVICE);
    327		p_used_rx_desc->buf_size = size;
    328		pep->rx_skb[used_rx_desc] = skb;
    329
    330		/* Return the descriptor to DMA ownership */
    331		dma_wmb();
    332		p_used_rx_desc->cmd_sts = BUF_OWNED_BY_DMA | RX_EN_INT;
    333		dma_wmb();
    334
    335		/* Move the used descriptor pointer to the next descriptor */
    336		pep->rx_used_desc_q = (used_rx_desc + 1) % pep->rx_ring_size;
    337
    338		/* Any Rx return cancels the Rx resource error status */
    339		pep->rx_resource_err = 0;
    340
    341		skb_reserve(skb, ETH_HW_IP_ALIGN);
    342	}
    343
    344	/*
    345	 * If RX ring is empty of SKB, set a timer to try allocating
    346	 * again at a later time.
    347	 */
    348	if (pep->rx_desc_count == 0) {
    349		pep->timeout.expires = jiffies + (HZ / 10);
    350		add_timer(&pep->timeout);
    351	}
    352}
    353
    354static inline void rxq_refill_timer_wrapper(struct timer_list *t)
    355{
    356	struct pxa168_eth_private *pep = from_timer(pep, t, timeout);
    357	napi_schedule(&pep->napi);
    358}
    359
    360static inline u8 flip_8_bits(u8 x)
    361{
    362	return (((x) & 0x01) << 3) | (((x) & 0x02) << 1)
    363	    | (((x) & 0x04) >> 1) | (((x) & 0x08) >> 3)
    364	    | (((x) & 0x10) << 3) | (((x) & 0x20) << 1)
    365	    | (((x) & 0x40) >> 1) | (((x) & 0x80) >> 3);
    366}
    367
    368static void nibble_swap_every_byte(unsigned char *mac_addr)
    369{
    370	int i;
    371	for (i = 0; i < ETH_ALEN; i++) {
    372		mac_addr[i] = ((mac_addr[i] & 0x0f) << 4) |
    373				((mac_addr[i] & 0xf0) >> 4);
    374	}
    375}
    376
    377static void inverse_every_nibble(unsigned char *mac_addr)
    378{
    379	int i;
    380	for (i = 0; i < ETH_ALEN; i++)
    381		mac_addr[i] = flip_8_bits(mac_addr[i]);
    382}
    383
    384/*
    385 * ----------------------------------------------------------------------------
    386 * This function will calculate the hash function of the address.
    387 * Inputs
    388 * mac_addr_orig    - MAC address.
    389 * Outputs
    390 * return the calculated entry.
    391 */
    392static u32 hash_function(const unsigned char *mac_addr_orig)
    393{
    394	u32 hash_result;
    395	u32 addr0;
    396	u32 addr1;
    397	u32 addr2;
    398	u32 addr3;
    399	unsigned char mac_addr[ETH_ALEN];
    400
    401	/* Make a copy of MAC address since we are going to performe bit
    402	 * operations on it
    403	 */
    404	memcpy(mac_addr, mac_addr_orig, ETH_ALEN);
    405
    406	nibble_swap_every_byte(mac_addr);
    407	inverse_every_nibble(mac_addr);
    408
    409	addr0 = (mac_addr[5] >> 2) & 0x3f;
    410	addr1 = (mac_addr[5] & 0x03) | (((mac_addr[4] & 0x7f)) << 2);
    411	addr2 = ((mac_addr[4] & 0x80) >> 7) | mac_addr[3] << 1;
    412	addr3 = (mac_addr[2] & 0xff) | ((mac_addr[1] & 1) << 8);
    413
    414	hash_result = (addr0 << 9) | (addr1 ^ addr2 ^ addr3);
    415	hash_result = hash_result & 0x07ff;
    416	return hash_result;
    417}
    418
    419/*
    420 * ----------------------------------------------------------------------------
    421 * This function will add/del an entry to the address table.
    422 * Inputs
    423 * pep - ETHERNET .
    424 * mac_addr - MAC address.
    425 * skip - if 1, skip this address.Used in case of deleting an entry which is a
    426 *	  part of chain in the hash table.We can't just delete the entry since
    427 *	  that will break the chain.We need to defragment the tables time to
    428 *	  time.
    429 * rd   - 0 Discard packet upon match.
    430 *	- 1 Receive packet upon match.
    431 * Outputs
    432 * address table entry is added/deleted.
    433 * 0 if success.
    434 * -ENOSPC if table full
    435 */
    436static int add_del_hash_entry(struct pxa168_eth_private *pep,
    437			      const unsigned char *mac_addr,
    438			      u32 rd, u32 skip, int del)
    439{
    440	struct addr_table_entry *entry, *start;
    441	u32 new_high;
    442	u32 new_low;
    443	u32 i;
    444
    445	new_low = (((mac_addr[1] >> 4) & 0xf) << 15)
    446	    | (((mac_addr[1] >> 0) & 0xf) << 11)
    447	    | (((mac_addr[0] >> 4) & 0xf) << 7)
    448	    | (((mac_addr[0] >> 0) & 0xf) << 3)
    449	    | (((mac_addr[3] >> 4) & 0x1) << 31)
    450	    | (((mac_addr[3] >> 0) & 0xf) << 27)
    451	    | (((mac_addr[2] >> 4) & 0xf) << 23)
    452	    | (((mac_addr[2] >> 0) & 0xf) << 19)
    453	    | (skip << SKIP) | (rd << HASH_ENTRY_RECEIVE_DISCARD_BIT)
    454	    | HASH_ENTRY_VALID;
    455
    456	new_high = (((mac_addr[5] >> 4) & 0xf) << 15)
    457	    | (((mac_addr[5] >> 0) & 0xf) << 11)
    458	    | (((mac_addr[4] >> 4) & 0xf) << 7)
    459	    | (((mac_addr[4] >> 0) & 0xf) << 3)
    460	    | (((mac_addr[3] >> 5) & 0x7) << 0);
    461
    462	/*
    463	 * Pick the appropriate table, start scanning for free/reusable
    464	 * entries at the index obtained by hashing the specified MAC address
    465	 */
    466	start = pep->htpr;
    467	entry = start + hash_function(mac_addr);
    468	for (i = 0; i < HOP_NUMBER; i++) {
    469		if (!(le32_to_cpu(entry->lo) & HASH_ENTRY_VALID)) {
    470			break;
    471		} else {
    472			/* if same address put in same position */
    473			if (((le32_to_cpu(entry->lo) & 0xfffffff8) ==
    474				(new_low & 0xfffffff8)) &&
    475				(le32_to_cpu(entry->hi) == new_high)) {
    476				break;
    477			}
    478		}
    479		if (entry == start + 0x7ff)
    480			entry = start;
    481		else
    482			entry++;
    483	}
    484
    485	if (((le32_to_cpu(entry->lo) & 0xfffffff8) != (new_low & 0xfffffff8)) &&
    486	    (le32_to_cpu(entry->hi) != new_high) && del)
    487		return 0;
    488
    489	if (i == HOP_NUMBER) {
    490		if (!del) {
    491			netdev_info(pep->dev,
    492				    "%s: table section is full, need to "
    493				    "move to 16kB implementation?\n",
    494				    __FILE__);
    495			return -ENOSPC;
    496		} else
    497			return 0;
    498	}
    499
    500	/*
    501	 * Update the selected entry
    502	 */
    503	if (del) {
    504		entry->hi = 0;
    505		entry->lo = 0;
    506	} else {
    507		entry->hi = cpu_to_le32(new_high);
    508		entry->lo = cpu_to_le32(new_low);
    509	}
    510
    511	return 0;
    512}
    513
    514/*
    515 * ----------------------------------------------------------------------------
    516 *  Create an addressTable entry from MAC address info
    517 *  found in the specifed net_device struct
    518 *
    519 *  Input : pointer to ethernet interface network device structure
    520 *  Output : N/A
    521 */
    522static void update_hash_table_mac_address(struct pxa168_eth_private *pep,
    523					  unsigned char *oaddr,
    524					  const unsigned char *addr)
    525{
    526	/* Delete old entry */
    527	if (oaddr)
    528		add_del_hash_entry(pep, oaddr, 1, 0, HASH_DELETE);
    529	/* Add new entry */
    530	add_del_hash_entry(pep, addr, 1, 0, HASH_ADD);
    531}
    532
    533static int init_hash_table(struct pxa168_eth_private *pep)
    534{
    535	/*
    536	 * Hardware expects CPU to build a hash table based on a predefined
    537	 * hash function and populate it based on hardware address. The
    538	 * location of the hash table is identified by 32-bit pointer stored
    539	 * in HTPR internal register. Two possible sizes exists for the hash
    540	 * table 8kB (256kB of DRAM required (4 x 64 kB banks)) and 1/2kB
    541	 * (16kB of DRAM required (4 x 4 kB banks)).We currently only support
    542	 * 1/2kB.
    543	 */
    544	/* TODO: Add support for 8kB hash table and alternative hash
    545	 * function.Driver can dynamically switch to them if the 1/2kB hash
    546	 * table is full.
    547	 */
    548	if (!pep->htpr) {
    549		pep->htpr = dma_alloc_coherent(pep->dev->dev.parent,
    550					       HASH_ADDR_TABLE_SIZE,
    551					       &pep->htpr_dma, GFP_KERNEL);
    552		if (!pep->htpr)
    553			return -ENOMEM;
    554	} else {
    555		memset(pep->htpr, 0, HASH_ADDR_TABLE_SIZE);
    556	}
    557	wrl(pep, HTPR, pep->htpr_dma);
    558	return 0;
    559}
    560
    561static void pxa168_eth_set_rx_mode(struct net_device *dev)
    562{
    563	struct pxa168_eth_private *pep = netdev_priv(dev);
    564	struct netdev_hw_addr *ha;
    565	u32 val;
    566
    567	val = rdl(pep, PORT_CONFIG);
    568	if (dev->flags & IFF_PROMISC)
    569		val |= PCR_PM;
    570	else
    571		val &= ~PCR_PM;
    572	wrl(pep, PORT_CONFIG, val);
    573
    574	/*
    575	 * Remove the old list of MAC address and add dev->addr
    576	 * and multicast address.
    577	 */
    578	memset(pep->htpr, 0, HASH_ADDR_TABLE_SIZE);
    579	update_hash_table_mac_address(pep, NULL, dev->dev_addr);
    580
    581	netdev_for_each_mc_addr(ha, dev)
    582		update_hash_table_mac_address(pep, NULL, ha->addr);
    583}
    584
    585static void pxa168_eth_get_mac_address(struct net_device *dev,
    586				       unsigned char *addr)
    587{
    588	struct pxa168_eth_private *pep = netdev_priv(dev);
    589	unsigned int mac_h = rdl(pep, MAC_ADDR_HIGH);
    590	unsigned int mac_l = rdl(pep, MAC_ADDR_LOW);
    591
    592	addr[0] = (mac_h >> 24) & 0xff;
    593	addr[1] = (mac_h >> 16) & 0xff;
    594	addr[2] = (mac_h >> 8) & 0xff;
    595	addr[3] = mac_h & 0xff;
    596	addr[4] = (mac_l >> 8) & 0xff;
    597	addr[5] = mac_l & 0xff;
    598}
    599
    600static int pxa168_eth_set_mac_address(struct net_device *dev, void *addr)
    601{
    602	struct sockaddr *sa = addr;
    603	struct pxa168_eth_private *pep = netdev_priv(dev);
    604	unsigned char oldMac[ETH_ALEN];
    605	u32 mac_h, mac_l;
    606
    607	if (!is_valid_ether_addr(sa->sa_data))
    608		return -EADDRNOTAVAIL;
    609	memcpy(oldMac, dev->dev_addr, ETH_ALEN);
    610	eth_hw_addr_set(dev, sa->sa_data);
    611
    612	mac_h = dev->dev_addr[0] << 24;
    613	mac_h |= dev->dev_addr[1] << 16;
    614	mac_h |= dev->dev_addr[2] << 8;
    615	mac_h |= dev->dev_addr[3];
    616	mac_l = dev->dev_addr[4] << 8;
    617	mac_l |= dev->dev_addr[5];
    618	wrl(pep, MAC_ADDR_HIGH, mac_h);
    619	wrl(pep, MAC_ADDR_LOW, mac_l);
    620
    621	netif_addr_lock_bh(dev);
    622	update_hash_table_mac_address(pep, oldMac, dev->dev_addr);
    623	netif_addr_unlock_bh(dev);
    624	return 0;
    625}
    626
    627static void eth_port_start(struct net_device *dev)
    628{
    629	unsigned int val = 0;
    630	struct pxa168_eth_private *pep = netdev_priv(dev);
    631	int tx_curr_desc, rx_curr_desc;
    632
    633	phy_start(dev->phydev);
    634
    635	/* Assignment of Tx CTRP of given queue */
    636	tx_curr_desc = pep->tx_curr_desc_q;
    637	wrl(pep, ETH_C_TX_DESC_1,
    638	    (u32) (pep->tx_desc_dma + tx_curr_desc * sizeof(struct tx_desc)));
    639
    640	/* Assignment of Rx CRDP of given queue */
    641	rx_curr_desc = pep->rx_curr_desc_q;
    642	wrl(pep, ETH_C_RX_DESC_0,
    643	    (u32) (pep->rx_desc_dma + rx_curr_desc * sizeof(struct rx_desc)));
    644
    645	wrl(pep, ETH_F_RX_DESC_0,
    646	    (u32) (pep->rx_desc_dma + rx_curr_desc * sizeof(struct rx_desc)));
    647
    648	/* Clear all interrupts */
    649	wrl(pep, INT_CAUSE, 0);
    650
    651	/* Enable all interrupts for receive, transmit and error. */
    652	wrl(pep, INT_MASK, ALL_INTS);
    653
    654	val = rdl(pep, PORT_CONFIG);
    655	val |= PCR_EN;
    656	wrl(pep, PORT_CONFIG, val);
    657
    658	/* Start RX DMA engine */
    659	val = rdl(pep, SDMA_CMD);
    660	val |= SDMA_CMD_ERD;
    661	wrl(pep, SDMA_CMD, val);
    662}
    663
    664static void eth_port_reset(struct net_device *dev)
    665{
    666	struct pxa168_eth_private *pep = netdev_priv(dev);
    667	unsigned int val = 0;
    668
    669	/* Stop all interrupts for receive, transmit and error. */
    670	wrl(pep, INT_MASK, 0);
    671
    672	/* Clear all interrupts */
    673	wrl(pep, INT_CAUSE, 0);
    674
    675	/* Stop RX DMA */
    676	val = rdl(pep, SDMA_CMD);
    677	val &= ~SDMA_CMD_ERD;	/* abort dma command */
    678
    679	/* Abort any transmit and receive operations and put DMA
    680	 * in idle state.
    681	 */
    682	abort_dma(pep);
    683
    684	/* Disable port */
    685	val = rdl(pep, PORT_CONFIG);
    686	val &= ~PCR_EN;
    687	wrl(pep, PORT_CONFIG, val);
    688
    689	phy_stop(dev->phydev);
    690}
    691
    692/*
    693 * txq_reclaim - Free the tx desc data for completed descriptors
    694 * If force is non-zero, frees uncompleted descriptors as well
    695 */
    696static int txq_reclaim(struct net_device *dev, int force)
    697{
    698	struct pxa168_eth_private *pep = netdev_priv(dev);
    699	struct tx_desc *desc;
    700	u32 cmd_sts;
    701	struct sk_buff *skb;
    702	int tx_index;
    703	dma_addr_t addr;
    704	int count;
    705	int released = 0;
    706
    707	netif_tx_lock(dev);
    708
    709	pep->work_todo &= ~WORK_TX_DONE;
    710	while (pep->tx_desc_count > 0) {
    711		tx_index = pep->tx_used_desc_q;
    712		desc = &pep->p_tx_desc_area[tx_index];
    713		cmd_sts = desc->cmd_sts;
    714		if (!force && (cmd_sts & BUF_OWNED_BY_DMA)) {
    715			if (released > 0) {
    716				goto txq_reclaim_end;
    717			} else {
    718				released = -1;
    719				goto txq_reclaim_end;
    720			}
    721		}
    722		pep->tx_used_desc_q = (tx_index + 1) % pep->tx_ring_size;
    723		pep->tx_desc_count--;
    724		addr = desc->buf_ptr;
    725		count = desc->byte_cnt;
    726		skb = pep->tx_skb[tx_index];
    727		if (skb)
    728			pep->tx_skb[tx_index] = NULL;
    729
    730		if (cmd_sts & TX_ERROR) {
    731			if (net_ratelimit())
    732				netdev_err(dev, "Error in TX\n");
    733			dev->stats.tx_errors++;
    734		}
    735		dma_unmap_single(&pep->pdev->dev, addr, count, DMA_TO_DEVICE);
    736		if (skb)
    737			dev_kfree_skb_irq(skb);
    738		released++;
    739	}
    740txq_reclaim_end:
    741	netif_tx_unlock(dev);
    742	return released;
    743}
    744
    745static void pxa168_eth_tx_timeout(struct net_device *dev, unsigned int txqueue)
    746{
    747	struct pxa168_eth_private *pep = netdev_priv(dev);
    748
    749	netdev_info(dev, "TX timeout  desc_count %d\n", pep->tx_desc_count);
    750
    751	schedule_work(&pep->tx_timeout_task);
    752}
    753
    754static void pxa168_eth_tx_timeout_task(struct work_struct *work)
    755{
    756	struct pxa168_eth_private *pep = container_of(work,
    757						 struct pxa168_eth_private,
    758						 tx_timeout_task);
    759	struct net_device *dev = pep->dev;
    760	pxa168_eth_stop(dev);
    761	pxa168_eth_open(dev);
    762}
    763
    764static int rxq_process(struct net_device *dev, int budget)
    765{
    766	struct pxa168_eth_private *pep = netdev_priv(dev);
    767	struct net_device_stats *stats = &dev->stats;
    768	unsigned int received_packets = 0;
    769	struct sk_buff *skb;
    770
    771	while (budget-- > 0) {
    772		int rx_next_curr_desc, rx_curr_desc, rx_used_desc;
    773		struct rx_desc *rx_desc;
    774		unsigned int cmd_sts;
    775
    776		/* Do not process Rx ring in case of Rx ring resource error */
    777		if (pep->rx_resource_err)
    778			break;
    779		rx_curr_desc = pep->rx_curr_desc_q;
    780		rx_used_desc = pep->rx_used_desc_q;
    781		rx_desc = &pep->p_rx_desc_area[rx_curr_desc];
    782		cmd_sts = rx_desc->cmd_sts;
    783		dma_rmb();
    784		if (cmd_sts & (BUF_OWNED_BY_DMA))
    785			break;
    786		skb = pep->rx_skb[rx_curr_desc];
    787		pep->rx_skb[rx_curr_desc] = NULL;
    788
    789		rx_next_curr_desc = (rx_curr_desc + 1) % pep->rx_ring_size;
    790		pep->rx_curr_desc_q = rx_next_curr_desc;
    791
    792		/* Rx descriptors exhausted. */
    793		/* Set the Rx ring resource error flag */
    794		if (rx_next_curr_desc == rx_used_desc)
    795			pep->rx_resource_err = 1;
    796		pep->rx_desc_count--;
    797		dma_unmap_single(&pep->pdev->dev, rx_desc->buf_ptr,
    798				 rx_desc->buf_size,
    799				 DMA_FROM_DEVICE);
    800		received_packets++;
    801		/*
    802		 * Update statistics.
    803		 * Note byte count includes 4 byte CRC count
    804		 */
    805		stats->rx_packets++;
    806		stats->rx_bytes += rx_desc->byte_cnt;
    807		/*
    808		 * In case received a packet without first / last bits on OR
    809		 * the error summary bit is on, the packets needs to be droped.
    810		 */
    811		if (((cmd_sts & (RX_FIRST_DESC | RX_LAST_DESC)) !=
    812		     (RX_FIRST_DESC | RX_LAST_DESC))
    813		    || (cmd_sts & RX_ERROR)) {
    814
    815			stats->rx_dropped++;
    816			if ((cmd_sts & (RX_FIRST_DESC | RX_LAST_DESC)) !=
    817			    (RX_FIRST_DESC | RX_LAST_DESC)) {
    818				if (net_ratelimit())
    819					netdev_err(dev,
    820						   "Rx pkt on multiple desc\n");
    821			}
    822			if (cmd_sts & RX_ERROR)
    823				stats->rx_errors++;
    824			dev_kfree_skb_irq(skb);
    825		} else {
    826			/*
    827			 * The -4 is for the CRC in the trailer of the
    828			 * received packet
    829			 */
    830			skb_put(skb, rx_desc->byte_cnt - 4);
    831			skb->protocol = eth_type_trans(skb, dev);
    832			netif_receive_skb(skb);
    833		}
    834	}
    835	/* Fill RX ring with skb's */
    836	rxq_refill(dev);
    837	return received_packets;
    838}
    839
    840static int pxa168_eth_collect_events(struct pxa168_eth_private *pep,
    841				     struct net_device *dev)
    842{
    843	u32 icr;
    844	int ret = 0;
    845
    846	icr = rdl(pep, INT_CAUSE);
    847	if (icr == 0)
    848		return IRQ_NONE;
    849
    850	wrl(pep, INT_CAUSE, ~icr);
    851	if (icr & (ICR_TXBUF_H | ICR_TXBUF_L)) {
    852		pep->work_todo |= WORK_TX_DONE;
    853		ret = 1;
    854	}
    855	if (icr & ICR_RXBUF)
    856		ret = 1;
    857	return ret;
    858}
    859
    860static irqreturn_t pxa168_eth_int_handler(int irq, void *dev_id)
    861{
    862	struct net_device *dev = (struct net_device *)dev_id;
    863	struct pxa168_eth_private *pep = netdev_priv(dev);
    864
    865	if (unlikely(!pxa168_eth_collect_events(pep, dev)))
    866		return IRQ_NONE;
    867	/* Disable interrupts */
    868	wrl(pep, INT_MASK, 0);
    869	napi_schedule(&pep->napi);
    870	return IRQ_HANDLED;
    871}
    872
    873static void pxa168_eth_recalc_skb_size(struct pxa168_eth_private *pep)
    874{
    875	int skb_size;
    876
    877	/*
    878	 * Reserve 2+14 bytes for an ethernet header (the hardware
    879	 * automatically prepends 2 bytes of dummy data to each
    880	 * received packet), 16 bytes for up to four VLAN tags, and
    881	 * 4 bytes for the trailing FCS -- 36 bytes total.
    882	 */
    883	skb_size = pep->dev->mtu + 36;
    884
    885	/*
    886	 * Make sure that the skb size is a multiple of 8 bytes, as
    887	 * the lower three bits of the receive descriptor's buffer
    888	 * size field are ignored by the hardware.
    889	 */
    890	pep->skb_size = (skb_size + 7) & ~7;
    891
    892	/*
    893	 * If NET_SKB_PAD is smaller than a cache line,
    894	 * netdev_alloc_skb() will cause skb->data to be misaligned
    895	 * to a cache line boundary.  If this is the case, include
    896	 * some extra space to allow re-aligning the data area.
    897	 */
    898	pep->skb_size += SKB_DMA_REALIGN;
    899
    900}
    901
    902static int set_port_config_ext(struct pxa168_eth_private *pep)
    903{
    904	int skb_size;
    905
    906	pxa168_eth_recalc_skb_size(pep);
    907	if  (pep->skb_size <= 1518)
    908		skb_size = PCXR_MFL_1518;
    909	else if (pep->skb_size <= 1536)
    910		skb_size = PCXR_MFL_1536;
    911	else if (pep->skb_size <= 2048)
    912		skb_size = PCXR_MFL_2048;
    913	else
    914		skb_size = PCXR_MFL_64K;
    915
    916	/* Extended Port Configuration */
    917	wrl(pep, PORT_CONFIG_EXT,
    918	    PCXR_AN_SPEED_DIS |		 /* Disable HW AN */
    919	    PCXR_AN_DUPLEX_DIS |
    920	    PCXR_AN_FLOWCTL_DIS |
    921	    PCXR_2BSM |			 /* Two byte prefix aligns IP hdr */
    922	    PCXR_DSCP_EN |		 /* Enable DSCP in IP */
    923	    skb_size | PCXR_FLP |	 /* do not force link pass */
    924	    PCXR_TX_HIGH_PRI);		 /* Transmit - high priority queue */
    925
    926	return 0;
    927}
    928
    929static void pxa168_eth_adjust_link(struct net_device *dev)
    930{
    931	struct pxa168_eth_private *pep = netdev_priv(dev);
    932	struct phy_device *phy = dev->phydev;
    933	u32 cfg, cfg_o = rdl(pep, PORT_CONFIG);
    934	u32 cfgext, cfgext_o = rdl(pep, PORT_CONFIG_EXT);
    935
    936	cfg = cfg_o & ~PCR_DUPLEX_FULL;
    937	cfgext = cfgext_o & ~(PCXR_SPEED_100 | PCXR_FLOWCTL_DIS | PCXR_RMII_EN);
    938
    939	if (phy->interface == PHY_INTERFACE_MODE_RMII)
    940		cfgext |= PCXR_RMII_EN;
    941	if (phy->speed == SPEED_100)
    942		cfgext |= PCXR_SPEED_100;
    943	if (phy->duplex)
    944		cfg |= PCR_DUPLEX_FULL;
    945	if (!phy->pause)
    946		cfgext |= PCXR_FLOWCTL_DIS;
    947
    948	/* Bail out if there has nothing changed */
    949	if (cfg == cfg_o && cfgext == cfgext_o)
    950		return;
    951
    952	wrl(pep, PORT_CONFIG, cfg);
    953	wrl(pep, PORT_CONFIG_EXT, cfgext);
    954
    955	phy_print_status(phy);
    956}
    957
    958static int pxa168_init_phy(struct net_device *dev)
    959{
    960	struct pxa168_eth_private *pep = netdev_priv(dev);
    961	struct ethtool_link_ksettings cmd;
    962	struct phy_device *phy = NULL;
    963	int err;
    964
    965	if (dev->phydev)
    966		return 0;
    967
    968	phy = mdiobus_scan(pep->smi_bus, pep->phy_addr);
    969	if (IS_ERR(phy))
    970		return PTR_ERR(phy);
    971
    972	err = phy_connect_direct(dev, phy, pxa168_eth_adjust_link,
    973				 pep->phy_intf);
    974	if (err)
    975		return err;
    976
    977	cmd.base.phy_address = pep->phy_addr;
    978	cmd.base.speed = pep->phy_speed;
    979	cmd.base.duplex = pep->phy_duplex;
    980	linkmode_copy(cmd.link_modes.advertising, PHY_BASIC_FEATURES);
    981	cmd.base.autoneg = AUTONEG_ENABLE;
    982
    983	if (cmd.base.speed != 0)
    984		cmd.base.autoneg = AUTONEG_DISABLE;
    985
    986	return phy_ethtool_set_link_ksettings(dev, &cmd);
    987}
    988
    989static int pxa168_init_hw(struct pxa168_eth_private *pep)
    990{
    991	int err = 0;
    992
    993	/* Disable interrupts */
    994	wrl(pep, INT_MASK, 0);
    995	wrl(pep, INT_CAUSE, 0);
    996	/* Write to ICR to clear interrupts. */
    997	wrl(pep, INT_W_CLEAR, 0);
    998	/* Abort any transmit and receive operations and put DMA
    999	 * in idle state.
   1000	 */
   1001	abort_dma(pep);
   1002	/* Initialize address hash table */
   1003	err = init_hash_table(pep);
   1004	if (err)
   1005		return err;
   1006	/* SDMA configuration */
   1007	wrl(pep, SDMA_CONFIG, SDCR_BSZ8 |	/* Burst size = 32 bytes */
   1008	    SDCR_RIFB |				/* Rx interrupt on frame */
   1009	    SDCR_BLMT |				/* Little endian transmit */
   1010	    SDCR_BLMR |				/* Little endian receive */
   1011	    SDCR_RC_MAX_RETRANS);		/* Max retransmit count */
   1012	/* Port Configuration */
   1013	wrl(pep, PORT_CONFIG, PCR_HS);		/* Hash size is 1/2kb */
   1014	set_port_config_ext(pep);
   1015
   1016	return err;
   1017}
   1018
   1019static int rxq_init(struct net_device *dev)
   1020{
   1021	struct pxa168_eth_private *pep = netdev_priv(dev);
   1022	struct rx_desc *p_rx_desc;
   1023	int size = 0, i = 0;
   1024	int rx_desc_num = pep->rx_ring_size;
   1025
   1026	/* Allocate RX skb rings */
   1027	pep->rx_skb = kcalloc(rx_desc_num, sizeof(*pep->rx_skb), GFP_KERNEL);
   1028	if (!pep->rx_skb)
   1029		return -ENOMEM;
   1030
   1031	/* Allocate RX ring */
   1032	pep->rx_desc_count = 0;
   1033	size = pep->rx_ring_size * sizeof(struct rx_desc);
   1034	pep->rx_desc_area_size = size;
   1035	pep->p_rx_desc_area = dma_alloc_coherent(pep->dev->dev.parent, size,
   1036						 &pep->rx_desc_dma,
   1037						 GFP_KERNEL);
   1038	if (!pep->p_rx_desc_area)
   1039		goto out;
   1040
   1041	/* initialize the next_desc_ptr links in the Rx descriptors ring */
   1042	p_rx_desc = pep->p_rx_desc_area;
   1043	for (i = 0; i < rx_desc_num; i++) {
   1044		p_rx_desc[i].next_desc_ptr = pep->rx_desc_dma +
   1045		    ((i + 1) % rx_desc_num) * sizeof(struct rx_desc);
   1046	}
   1047	/* Save Rx desc pointer to driver struct. */
   1048	pep->rx_curr_desc_q = 0;
   1049	pep->rx_used_desc_q = 0;
   1050	pep->rx_desc_area_size = rx_desc_num * sizeof(struct rx_desc);
   1051	return 0;
   1052out:
   1053	kfree(pep->rx_skb);
   1054	return -ENOMEM;
   1055}
   1056
   1057static void rxq_deinit(struct net_device *dev)
   1058{
   1059	struct pxa168_eth_private *pep = netdev_priv(dev);
   1060	int curr;
   1061
   1062	/* Free preallocated skb's on RX rings */
   1063	for (curr = 0; pep->rx_desc_count && curr < pep->rx_ring_size; curr++) {
   1064		if (pep->rx_skb[curr]) {
   1065			dev_kfree_skb(pep->rx_skb[curr]);
   1066			pep->rx_desc_count--;
   1067		}
   1068	}
   1069	if (pep->rx_desc_count)
   1070		netdev_err(dev, "Error in freeing Rx Ring. %d skb's still\n",
   1071			   pep->rx_desc_count);
   1072	/* Free RX ring */
   1073	if (pep->p_rx_desc_area)
   1074		dma_free_coherent(pep->dev->dev.parent, pep->rx_desc_area_size,
   1075				  pep->p_rx_desc_area, pep->rx_desc_dma);
   1076	kfree(pep->rx_skb);
   1077}
   1078
   1079static int txq_init(struct net_device *dev)
   1080{
   1081	struct pxa168_eth_private *pep = netdev_priv(dev);
   1082	struct tx_desc *p_tx_desc;
   1083	int size = 0, i = 0;
   1084	int tx_desc_num = pep->tx_ring_size;
   1085
   1086	pep->tx_skb = kcalloc(tx_desc_num, sizeof(*pep->tx_skb), GFP_KERNEL);
   1087	if (!pep->tx_skb)
   1088		return -ENOMEM;
   1089
   1090	/* Allocate TX ring */
   1091	pep->tx_desc_count = 0;
   1092	size = pep->tx_ring_size * sizeof(struct tx_desc);
   1093	pep->tx_desc_area_size = size;
   1094	pep->p_tx_desc_area = dma_alloc_coherent(pep->dev->dev.parent, size,
   1095						 &pep->tx_desc_dma,
   1096						 GFP_KERNEL);
   1097	if (!pep->p_tx_desc_area)
   1098		goto out;
   1099	/* Initialize the next_desc_ptr links in the Tx descriptors ring */
   1100	p_tx_desc = pep->p_tx_desc_area;
   1101	for (i = 0; i < tx_desc_num; i++) {
   1102		p_tx_desc[i].next_desc_ptr = pep->tx_desc_dma +
   1103		    ((i + 1) % tx_desc_num) * sizeof(struct tx_desc);
   1104	}
   1105	pep->tx_curr_desc_q = 0;
   1106	pep->tx_used_desc_q = 0;
   1107	pep->tx_desc_area_size = tx_desc_num * sizeof(struct tx_desc);
   1108	return 0;
   1109out:
   1110	kfree(pep->tx_skb);
   1111	return -ENOMEM;
   1112}
   1113
   1114static void txq_deinit(struct net_device *dev)
   1115{
   1116	struct pxa168_eth_private *pep = netdev_priv(dev);
   1117
   1118	/* Free outstanding skb's on TX ring */
   1119	txq_reclaim(dev, 1);
   1120	BUG_ON(pep->tx_used_desc_q != pep->tx_curr_desc_q);
   1121	/* Free TX ring */
   1122	if (pep->p_tx_desc_area)
   1123		dma_free_coherent(pep->dev->dev.parent, pep->tx_desc_area_size,
   1124				  pep->p_tx_desc_area, pep->tx_desc_dma);
   1125	kfree(pep->tx_skb);
   1126}
   1127
   1128static int pxa168_eth_open(struct net_device *dev)
   1129{
   1130	struct pxa168_eth_private *pep = netdev_priv(dev);
   1131	int err;
   1132
   1133	err = pxa168_init_phy(dev);
   1134	if (err)
   1135		return err;
   1136
   1137	err = request_irq(dev->irq, pxa168_eth_int_handler, 0, dev->name, dev);
   1138	if (err) {
   1139		dev_err(&dev->dev, "can't assign irq\n");
   1140		return -EAGAIN;
   1141	}
   1142	pep->rx_resource_err = 0;
   1143	err = rxq_init(dev);
   1144	if (err != 0)
   1145		goto out_free_irq;
   1146	err = txq_init(dev);
   1147	if (err != 0)
   1148		goto out_free_rx_skb;
   1149	pep->rx_used_desc_q = 0;
   1150	pep->rx_curr_desc_q = 0;
   1151
   1152	/* Fill RX ring with skb's */
   1153	rxq_refill(dev);
   1154	pep->rx_used_desc_q = 0;
   1155	pep->rx_curr_desc_q = 0;
   1156	netif_carrier_off(dev);
   1157	napi_enable(&pep->napi);
   1158	eth_port_start(dev);
   1159	return 0;
   1160out_free_rx_skb:
   1161	rxq_deinit(dev);
   1162out_free_irq:
   1163	free_irq(dev->irq, dev);
   1164	return err;
   1165}
   1166
   1167static int pxa168_eth_stop(struct net_device *dev)
   1168{
   1169	struct pxa168_eth_private *pep = netdev_priv(dev);
   1170	eth_port_reset(dev);
   1171
   1172	/* Disable interrupts */
   1173	wrl(pep, INT_MASK, 0);
   1174	wrl(pep, INT_CAUSE, 0);
   1175	/* Write to ICR to clear interrupts. */
   1176	wrl(pep, INT_W_CLEAR, 0);
   1177	napi_disable(&pep->napi);
   1178	del_timer_sync(&pep->timeout);
   1179	netif_carrier_off(dev);
   1180	free_irq(dev->irq, dev);
   1181	rxq_deinit(dev);
   1182	txq_deinit(dev);
   1183
   1184	return 0;
   1185}
   1186
   1187static int pxa168_eth_change_mtu(struct net_device *dev, int mtu)
   1188{
   1189	struct pxa168_eth_private *pep = netdev_priv(dev);
   1190
   1191	dev->mtu = mtu;
   1192	set_port_config_ext(pep);
   1193
   1194	if (!netif_running(dev))
   1195		return 0;
   1196
   1197	/*
   1198	 * Stop and then re-open the interface. This will allocate RX
   1199	 * skbs of the new MTU.
   1200	 * There is a possible danger that the open will not succeed,
   1201	 * due to memory being full.
   1202	 */
   1203	pxa168_eth_stop(dev);
   1204	if (pxa168_eth_open(dev)) {
   1205		dev_err(&dev->dev,
   1206			"fatal error on re-opening device after MTU change\n");
   1207	}
   1208
   1209	return 0;
   1210}
   1211
   1212static int eth_alloc_tx_desc_index(struct pxa168_eth_private *pep)
   1213{
   1214	int tx_desc_curr;
   1215
   1216	tx_desc_curr = pep->tx_curr_desc_q;
   1217	pep->tx_curr_desc_q = (tx_desc_curr + 1) % pep->tx_ring_size;
   1218	BUG_ON(pep->tx_curr_desc_q == pep->tx_used_desc_q);
   1219	pep->tx_desc_count++;
   1220
   1221	return tx_desc_curr;
   1222}
   1223
   1224static int pxa168_rx_poll(struct napi_struct *napi, int budget)
   1225{
   1226	struct pxa168_eth_private *pep =
   1227	    container_of(napi, struct pxa168_eth_private, napi);
   1228	struct net_device *dev = pep->dev;
   1229	int work_done = 0;
   1230
   1231	/*
   1232	 * We call txq_reclaim every time since in NAPI interupts are disabled
   1233	 * and due to this we miss the TX_DONE interrupt,which is not updated in
   1234	 * interrupt status register.
   1235	 */
   1236	txq_reclaim(dev, 0);
   1237	if (netif_queue_stopped(dev)
   1238	    && pep->tx_ring_size - pep->tx_desc_count > 1) {
   1239		netif_wake_queue(dev);
   1240	}
   1241	work_done = rxq_process(dev, budget);
   1242	if (work_done < budget) {
   1243		napi_complete_done(napi, work_done);
   1244		wrl(pep, INT_MASK, ALL_INTS);
   1245	}
   1246
   1247	return work_done;
   1248}
   1249
   1250static netdev_tx_t
   1251pxa168_eth_start_xmit(struct sk_buff *skb, struct net_device *dev)
   1252{
   1253	struct pxa168_eth_private *pep = netdev_priv(dev);
   1254	struct net_device_stats *stats = &dev->stats;
   1255	struct tx_desc *desc;
   1256	int tx_index;
   1257	int length;
   1258
   1259	tx_index = eth_alloc_tx_desc_index(pep);
   1260	desc = &pep->p_tx_desc_area[tx_index];
   1261	length = skb->len;
   1262	pep->tx_skb[tx_index] = skb;
   1263	desc->byte_cnt = length;
   1264	desc->buf_ptr = dma_map_single(&pep->pdev->dev, skb->data, length,
   1265					DMA_TO_DEVICE);
   1266
   1267	skb_tx_timestamp(skb);
   1268
   1269	dma_wmb();
   1270	desc->cmd_sts = BUF_OWNED_BY_DMA | TX_GEN_CRC | TX_FIRST_DESC |
   1271			TX_ZERO_PADDING | TX_LAST_DESC | TX_EN_INT;
   1272	wmb();
   1273	wrl(pep, SDMA_CMD, SDMA_CMD_TXDH | SDMA_CMD_ERD);
   1274
   1275	stats->tx_bytes += length;
   1276	stats->tx_packets++;
   1277	netif_trans_update(dev);
   1278	if (pep->tx_ring_size - pep->tx_desc_count <= 1) {
   1279		/* We handled the current skb, but now we are out of space.*/
   1280		netif_stop_queue(dev);
   1281	}
   1282
   1283	return NETDEV_TX_OK;
   1284}
   1285
   1286static int smi_wait_ready(struct pxa168_eth_private *pep)
   1287{
   1288	int i = 0;
   1289
   1290	/* wait for the SMI register to become available */
   1291	for (i = 0; rdl(pep, SMI) & SMI_BUSY; i++) {
   1292		if (i == PHY_WAIT_ITERATIONS)
   1293			return -ETIMEDOUT;
   1294		msleep(10);
   1295	}
   1296
   1297	return 0;
   1298}
   1299
   1300static int pxa168_smi_read(struct mii_bus *bus, int phy_addr, int regnum)
   1301{
   1302	struct pxa168_eth_private *pep = bus->priv;
   1303	int i = 0;
   1304	int val;
   1305
   1306	if (smi_wait_ready(pep)) {
   1307		netdev_warn(pep->dev, "pxa168_eth: SMI bus busy timeout\n");
   1308		return -ETIMEDOUT;
   1309	}
   1310	wrl(pep, SMI, (phy_addr << 16) | (regnum << 21) | SMI_OP_R);
   1311	/* now wait for the data to be valid */
   1312	for (i = 0; !((val = rdl(pep, SMI)) & SMI_R_VALID); i++) {
   1313		if (i == PHY_WAIT_ITERATIONS) {
   1314			netdev_warn(pep->dev,
   1315				    "pxa168_eth: SMI bus read not valid\n");
   1316			return -ENODEV;
   1317		}
   1318		msleep(10);
   1319	}
   1320
   1321	return val & 0xffff;
   1322}
   1323
   1324static int pxa168_smi_write(struct mii_bus *bus, int phy_addr, int regnum,
   1325			    u16 value)
   1326{
   1327	struct pxa168_eth_private *pep = bus->priv;
   1328
   1329	if (smi_wait_ready(pep)) {
   1330		netdev_warn(pep->dev, "pxa168_eth: SMI bus busy timeout\n");
   1331		return -ETIMEDOUT;
   1332	}
   1333
   1334	wrl(pep, SMI, (phy_addr << 16) | (regnum << 21) |
   1335	    SMI_OP_W | (value & 0xffff));
   1336
   1337	if (smi_wait_ready(pep)) {
   1338		netdev_err(pep->dev, "pxa168_eth: SMI bus busy timeout\n");
   1339		return -ETIMEDOUT;
   1340	}
   1341
   1342	return 0;
   1343}
   1344
   1345#ifdef CONFIG_NET_POLL_CONTROLLER
   1346static void pxa168_eth_netpoll(struct net_device *dev)
   1347{
   1348	disable_irq(dev->irq);
   1349	pxa168_eth_int_handler(dev->irq, dev);
   1350	enable_irq(dev->irq);
   1351}
   1352#endif
   1353
   1354static void pxa168_get_drvinfo(struct net_device *dev,
   1355			       struct ethtool_drvinfo *info)
   1356{
   1357	strlcpy(info->driver, DRIVER_NAME, sizeof(info->driver));
   1358	strlcpy(info->version, DRIVER_VERSION, sizeof(info->version));
   1359	strlcpy(info->fw_version, "N/A", sizeof(info->fw_version));
   1360	strlcpy(info->bus_info, "N/A", sizeof(info->bus_info));
   1361}
   1362
   1363static const struct ethtool_ops pxa168_ethtool_ops = {
   1364	.get_drvinfo	= pxa168_get_drvinfo,
   1365	.nway_reset	= phy_ethtool_nway_reset,
   1366	.get_link	= ethtool_op_get_link,
   1367	.get_ts_info	= ethtool_op_get_ts_info,
   1368	.get_link_ksettings = phy_ethtool_get_link_ksettings,
   1369	.set_link_ksettings = phy_ethtool_set_link_ksettings,
   1370};
   1371
   1372static const struct net_device_ops pxa168_eth_netdev_ops = {
   1373	.ndo_open		= pxa168_eth_open,
   1374	.ndo_stop		= pxa168_eth_stop,
   1375	.ndo_start_xmit		= pxa168_eth_start_xmit,
   1376	.ndo_set_rx_mode	= pxa168_eth_set_rx_mode,
   1377	.ndo_set_mac_address	= pxa168_eth_set_mac_address,
   1378	.ndo_validate_addr	= eth_validate_addr,
   1379	.ndo_eth_ioctl		= phy_do_ioctl,
   1380	.ndo_change_mtu		= pxa168_eth_change_mtu,
   1381	.ndo_tx_timeout		= pxa168_eth_tx_timeout,
   1382#ifdef CONFIG_NET_POLL_CONTROLLER
   1383	.ndo_poll_controller    = pxa168_eth_netpoll,
   1384#endif
   1385};
   1386
   1387static int pxa168_eth_probe(struct platform_device *pdev)
   1388{
   1389	struct pxa168_eth_private *pep = NULL;
   1390	struct net_device *dev = NULL;
   1391	struct clk *clk;
   1392	struct device_node *np;
   1393	int err;
   1394
   1395	printk(KERN_NOTICE "PXA168 10/100 Ethernet Driver\n");
   1396
   1397	clk = devm_clk_get(&pdev->dev, NULL);
   1398	if (IS_ERR(clk)) {
   1399		dev_err(&pdev->dev, "Fast Ethernet failed to get clock\n");
   1400		return -ENODEV;
   1401	}
   1402	clk_prepare_enable(clk);
   1403
   1404	dev = alloc_etherdev(sizeof(struct pxa168_eth_private));
   1405	if (!dev) {
   1406		err = -ENOMEM;
   1407		goto err_clk;
   1408	}
   1409
   1410	platform_set_drvdata(pdev, dev);
   1411	pep = netdev_priv(dev);
   1412	pep->dev = dev;
   1413	pep->clk = clk;
   1414
   1415	pep->base = devm_platform_ioremap_resource(pdev, 0);
   1416	if (IS_ERR(pep->base)) {
   1417		err = PTR_ERR(pep->base);
   1418		goto err_netdev;
   1419	}
   1420
   1421	err = platform_get_irq(pdev, 0);
   1422	if (err == -EPROBE_DEFER)
   1423		goto err_netdev;
   1424	BUG_ON(dev->irq < 0);
   1425	dev->irq = err;
   1426	dev->netdev_ops = &pxa168_eth_netdev_ops;
   1427	dev->watchdog_timeo = 2 * HZ;
   1428	dev->base_addr = 0;
   1429	dev->ethtool_ops = &pxa168_ethtool_ops;
   1430
   1431	/* MTU range: 68 - 9500 */
   1432	dev->min_mtu = ETH_MIN_MTU;
   1433	dev->max_mtu = 9500;
   1434
   1435	INIT_WORK(&pep->tx_timeout_task, pxa168_eth_tx_timeout_task);
   1436
   1437	err = of_get_ethdev_address(pdev->dev.of_node, dev);
   1438	if (err) {
   1439		u8 addr[ETH_ALEN];
   1440
   1441		/* try reading the mac address, if set by the bootloader */
   1442		pxa168_eth_get_mac_address(dev, addr);
   1443		if (is_valid_ether_addr(addr)) {
   1444			eth_hw_addr_set(dev, addr);
   1445		} else {
   1446			dev_info(&pdev->dev, "Using random mac address\n");
   1447			eth_hw_addr_random(dev);
   1448		}
   1449	}
   1450
   1451	pep->rx_ring_size = NUM_RX_DESCS;
   1452	pep->tx_ring_size = NUM_TX_DESCS;
   1453
   1454	pep->pd = dev_get_platdata(&pdev->dev);
   1455	if (pep->pd) {
   1456		if (pep->pd->rx_queue_size)
   1457			pep->rx_ring_size = pep->pd->rx_queue_size;
   1458
   1459		if (pep->pd->tx_queue_size)
   1460			pep->tx_ring_size = pep->pd->tx_queue_size;
   1461
   1462		pep->port_num = pep->pd->port_number;
   1463		pep->phy_addr = pep->pd->phy_addr;
   1464		pep->phy_speed = pep->pd->speed;
   1465		pep->phy_duplex = pep->pd->duplex;
   1466		pep->phy_intf = pep->pd->intf;
   1467
   1468		if (pep->pd->init)
   1469			pep->pd->init();
   1470	} else if (pdev->dev.of_node) {
   1471		of_property_read_u32(pdev->dev.of_node, "port-id",
   1472				     &pep->port_num);
   1473
   1474		np = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0);
   1475		if (!np) {
   1476			dev_err(&pdev->dev, "missing phy-handle\n");
   1477			err = -EINVAL;
   1478			goto err_netdev;
   1479		}
   1480		of_property_read_u32(np, "reg", &pep->phy_addr);
   1481		of_node_put(np);
   1482		err = of_get_phy_mode(pdev->dev.of_node, &pep->phy_intf);
   1483		if (err && err != -ENODEV)
   1484			goto err_netdev;
   1485	}
   1486
   1487	/* Hardware supports only 3 ports */
   1488	BUG_ON(pep->port_num > 2);
   1489	netif_napi_add_weight(dev, &pep->napi, pxa168_rx_poll,
   1490			      pep->rx_ring_size);
   1491
   1492	memset(&pep->timeout, 0, sizeof(struct timer_list));
   1493	timer_setup(&pep->timeout, rxq_refill_timer_wrapper, 0);
   1494
   1495	pep->smi_bus = mdiobus_alloc();
   1496	if (!pep->smi_bus) {
   1497		err = -ENOMEM;
   1498		goto err_netdev;
   1499	}
   1500	pep->smi_bus->priv = pep;
   1501	pep->smi_bus->name = "pxa168_eth smi";
   1502	pep->smi_bus->read = pxa168_smi_read;
   1503	pep->smi_bus->write = pxa168_smi_write;
   1504	snprintf(pep->smi_bus->id, MII_BUS_ID_SIZE, "%s-%d",
   1505		pdev->name, pdev->id);
   1506	pep->smi_bus->parent = &pdev->dev;
   1507	pep->smi_bus->phy_mask = 0xffffffff;
   1508	err = mdiobus_register(pep->smi_bus);
   1509	if (err)
   1510		goto err_free_mdio;
   1511
   1512	pep->pdev = pdev;
   1513	SET_NETDEV_DEV(dev, &pdev->dev);
   1514	pxa168_init_hw(pep);
   1515	err = register_netdev(dev);
   1516	if (err)
   1517		goto err_mdiobus;
   1518	return 0;
   1519
   1520err_mdiobus:
   1521	mdiobus_unregister(pep->smi_bus);
   1522err_free_mdio:
   1523	mdiobus_free(pep->smi_bus);
   1524err_netdev:
   1525	free_netdev(dev);
   1526err_clk:
   1527	clk_disable_unprepare(clk);
   1528	return err;
   1529}
   1530
   1531static int pxa168_eth_remove(struct platform_device *pdev)
   1532{
   1533	struct net_device *dev = platform_get_drvdata(pdev);
   1534	struct pxa168_eth_private *pep = netdev_priv(dev);
   1535
   1536	cancel_work_sync(&pep->tx_timeout_task);
   1537	if (pep->htpr) {
   1538		dma_free_coherent(pep->dev->dev.parent, HASH_ADDR_TABLE_SIZE,
   1539				  pep->htpr, pep->htpr_dma);
   1540		pep->htpr = NULL;
   1541	}
   1542	if (dev->phydev)
   1543		phy_disconnect(dev->phydev);
   1544
   1545	clk_disable_unprepare(pep->clk);
   1546	mdiobus_unregister(pep->smi_bus);
   1547	mdiobus_free(pep->smi_bus);
   1548	unregister_netdev(dev);
   1549	free_netdev(dev);
   1550	return 0;
   1551}
   1552
   1553static void pxa168_eth_shutdown(struct platform_device *pdev)
   1554{
   1555	struct net_device *dev = platform_get_drvdata(pdev);
   1556	eth_port_reset(dev);
   1557}
   1558
   1559#ifdef CONFIG_PM
   1560static int pxa168_eth_resume(struct platform_device *pdev)
   1561{
   1562	return -ENOSYS;
   1563}
   1564
   1565static int pxa168_eth_suspend(struct platform_device *pdev, pm_message_t state)
   1566{
   1567	return -ENOSYS;
   1568}
   1569
   1570#else
   1571#define pxa168_eth_resume NULL
   1572#define pxa168_eth_suspend NULL
   1573#endif
   1574
   1575static const struct of_device_id pxa168_eth_of_match[] = {
   1576	{ .compatible = "marvell,pxa168-eth" },
   1577	{ },
   1578};
   1579MODULE_DEVICE_TABLE(of, pxa168_eth_of_match);
   1580
   1581static struct platform_driver pxa168_eth_driver = {
   1582	.probe = pxa168_eth_probe,
   1583	.remove = pxa168_eth_remove,
   1584	.shutdown = pxa168_eth_shutdown,
   1585	.resume = pxa168_eth_resume,
   1586	.suspend = pxa168_eth_suspend,
   1587	.driver = {
   1588		.name		= DRIVER_NAME,
   1589		.of_match_table	= of_match_ptr(pxa168_eth_of_match),
   1590	},
   1591};
   1592
   1593module_platform_driver(pxa168_eth_driver);
   1594
   1595MODULE_LICENSE("GPL");
   1596MODULE_DESCRIPTION("Ethernet driver for Marvell PXA168");
   1597MODULE_ALIAS("platform:pxa168_eth");