cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

mv643xx_eth.c (80568B)


      1// SPDX-License-Identifier: GPL-2.0-or-later
      2/*
      3 * Driver for Marvell Discovery (MV643XX) and Marvell Orion ethernet ports
      4 * Copyright (C) 2002 Matthew Dharm <mdharm@momenco.com>
      5 *
      6 * Based on the 64360 driver from:
      7 * Copyright (C) 2002 Rabeeh Khoury <rabeeh@galileo.co.il>
      8 *		      Rabeeh Khoury <rabeeh@marvell.com>
      9 *
     10 * Copyright (C) 2003 PMC-Sierra, Inc.,
     11 *	written by Manish Lachwani
     12 *
     13 * Copyright (C) 2003 Ralf Baechle <ralf@linux-mips.org>
     14 *
     15 * Copyright (C) 2004-2006 MontaVista Software, Inc.
     16 *			   Dale Farnsworth <dale@farnsworth.org>
     17 *
     18 * Copyright (C) 2004 Steven J. Hill <sjhill1@rockwellcollins.com>
     19 *				     <sjhill@realitydiluted.com>
     20 *
     21 * Copyright (C) 2007-2008 Marvell Semiconductor
     22 *			   Lennert Buytenhek <buytenh@marvell.com>
     23 *
     24 * Copyright (C) 2013 Michael Stapelberg <michael@stapelberg.de>
     25 */
     26
     27#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
     28
     29#include <linux/init.h>
     30#include <linux/dma-mapping.h>
     31#include <linux/in.h>
     32#include <linux/ip.h>
     33#include <net/tso.h>
     34#include <linux/tcp.h>
     35#include <linux/udp.h>
     36#include <linux/etherdevice.h>
     37#include <linux/delay.h>
     38#include <linux/ethtool.h>
     39#include <linux/platform_device.h>
     40#include <linux/module.h>
     41#include <linux/kernel.h>
     42#include <linux/spinlock.h>
     43#include <linux/workqueue.h>
     44#include <linux/phy.h>
     45#include <linux/mv643xx_eth.h>
     46#include <linux/io.h>
     47#include <linux/interrupt.h>
     48#include <linux/types.h>
     49#include <linux/slab.h>
     50#include <linux/clk.h>
     51#include <linux/of.h>
     52#include <linux/of_irq.h>
     53#include <linux/of_net.h>
     54#include <linux/of_mdio.h>
     55
     56static char mv643xx_eth_driver_name[] = "mv643xx_eth";
     57static char mv643xx_eth_driver_version[] = "1.4";
     58
     59
     60/*
     61 * Registers shared between all ports.
     62 */
     63#define PHY_ADDR			0x0000
     64#define WINDOW_BASE(w)			(0x0200 + ((w) << 3))
     65#define WINDOW_SIZE(w)			(0x0204 + ((w) << 3))
     66#define WINDOW_REMAP_HIGH(w)		(0x0280 + ((w) << 2))
     67#define WINDOW_BAR_ENABLE		0x0290
     68#define WINDOW_PROTECT(w)		(0x0294 + ((w) << 4))
     69
     70/*
     71 * Main per-port registers.  These live at offset 0x0400 for
     72 * port #0, 0x0800 for port #1, and 0x0c00 for port #2.
     73 */
     74#define PORT_CONFIG			0x0000
     75#define  UNICAST_PROMISCUOUS_MODE	0x00000001
     76#define PORT_CONFIG_EXT			0x0004
     77#define MAC_ADDR_LOW			0x0014
     78#define MAC_ADDR_HIGH			0x0018
     79#define SDMA_CONFIG			0x001c
     80#define  TX_BURST_SIZE_16_64BIT		0x01000000
     81#define  TX_BURST_SIZE_4_64BIT		0x00800000
     82#define  BLM_TX_NO_SWAP			0x00000020
     83#define  BLM_RX_NO_SWAP			0x00000010
     84#define  RX_BURST_SIZE_16_64BIT		0x00000008
     85#define  RX_BURST_SIZE_4_64BIT		0x00000004
     86#define PORT_SERIAL_CONTROL		0x003c
     87#define  SET_MII_SPEED_TO_100		0x01000000
     88#define  SET_GMII_SPEED_TO_1000		0x00800000
     89#define  SET_FULL_DUPLEX_MODE		0x00200000
     90#define  MAX_RX_PACKET_9700BYTE		0x000a0000
     91#define  DISABLE_AUTO_NEG_SPEED_GMII	0x00002000
     92#define  DO_NOT_FORCE_LINK_FAIL		0x00000400
     93#define  SERIAL_PORT_CONTROL_RESERVED	0x00000200
     94#define  DISABLE_AUTO_NEG_FOR_FLOW_CTRL	0x00000008
     95#define  DISABLE_AUTO_NEG_FOR_DUPLEX	0x00000004
     96#define  FORCE_LINK_PASS		0x00000002
     97#define  SERIAL_PORT_ENABLE		0x00000001
     98#define PORT_STATUS			0x0044
     99#define  TX_FIFO_EMPTY			0x00000400
    100#define  TX_IN_PROGRESS			0x00000080
    101#define  PORT_SPEED_MASK		0x00000030
    102#define  PORT_SPEED_1000		0x00000010
    103#define  PORT_SPEED_100			0x00000020
    104#define  PORT_SPEED_10			0x00000000
    105#define  FLOW_CONTROL_ENABLED		0x00000008
    106#define  FULL_DUPLEX			0x00000004
    107#define  LINK_UP			0x00000002
    108#define TXQ_COMMAND			0x0048
    109#define TXQ_FIX_PRIO_CONF		0x004c
    110#define PORT_SERIAL_CONTROL1		0x004c
    111#define  CLK125_BYPASS_EN		0x00000010
    112#define TX_BW_RATE			0x0050
    113#define TX_BW_MTU			0x0058
    114#define TX_BW_BURST			0x005c
    115#define INT_CAUSE			0x0060
    116#define  INT_TX_END			0x07f80000
    117#define  INT_TX_END_0			0x00080000
    118#define  INT_RX				0x000003fc
    119#define  INT_RX_0			0x00000004
    120#define  INT_EXT			0x00000002
    121#define INT_CAUSE_EXT			0x0064
    122#define  INT_EXT_LINK_PHY		0x00110000
    123#define  INT_EXT_TX			0x000000ff
    124#define INT_MASK			0x0068
    125#define INT_MASK_EXT			0x006c
    126#define TX_FIFO_URGENT_THRESHOLD	0x0074
    127#define RX_DISCARD_FRAME_CNT		0x0084
    128#define RX_OVERRUN_FRAME_CNT		0x0088
    129#define TXQ_FIX_PRIO_CONF_MOVED		0x00dc
    130#define TX_BW_RATE_MOVED		0x00e0
    131#define TX_BW_MTU_MOVED			0x00e8
    132#define TX_BW_BURST_MOVED		0x00ec
    133#define RXQ_CURRENT_DESC_PTR(q)		(0x020c + ((q) << 4))
    134#define RXQ_COMMAND			0x0280
    135#define TXQ_CURRENT_DESC_PTR(q)		(0x02c0 + ((q) << 2))
    136#define TXQ_BW_TOKENS(q)		(0x0300 + ((q) << 4))
    137#define TXQ_BW_CONF(q)			(0x0304 + ((q) << 4))
    138#define TXQ_BW_WRR_CONF(q)		(0x0308 + ((q) << 4))
    139
    140/*
    141 * Misc per-port registers.
    142 */
    143#define MIB_COUNTERS(p)			(0x1000 + ((p) << 7))
    144#define SPECIAL_MCAST_TABLE(p)		(0x1400 + ((p) << 10))
    145#define OTHER_MCAST_TABLE(p)		(0x1500 + ((p) << 10))
    146#define UNICAST_TABLE(p)		(0x1600 + ((p) << 10))
    147
    148
    149/*
    150 * SDMA configuration register default value.
    151 */
    152#if defined(__BIG_ENDIAN)
    153#define PORT_SDMA_CONFIG_DEFAULT_VALUE		\
    154		(RX_BURST_SIZE_4_64BIT	|	\
    155		 TX_BURST_SIZE_4_64BIT)
    156#elif defined(__LITTLE_ENDIAN)
    157#define PORT_SDMA_CONFIG_DEFAULT_VALUE		\
    158		(RX_BURST_SIZE_4_64BIT	|	\
    159		 BLM_RX_NO_SWAP		|	\
    160		 BLM_TX_NO_SWAP		|	\
    161		 TX_BURST_SIZE_4_64BIT)
    162#else
    163#error One of __BIG_ENDIAN or __LITTLE_ENDIAN must be defined
    164#endif
    165
    166
    167/*
    168 * Misc definitions.
    169 */
    170#define DEFAULT_RX_QUEUE_SIZE	128
    171#define DEFAULT_TX_QUEUE_SIZE	512
    172#define SKB_DMA_REALIGN		((PAGE_SIZE - NET_SKB_PAD) % SMP_CACHE_BYTES)
    173
    174/* Max number of allowed TCP segments for software TSO */
    175#define MV643XX_MAX_TSO_SEGS 100
    176#define MV643XX_MAX_SKB_DESCS (MV643XX_MAX_TSO_SEGS * 2 + MAX_SKB_FRAGS)
    177
    178#define IS_TSO_HEADER(txq, addr) \
    179	((addr >= txq->tso_hdrs_dma) && \
    180	 (addr < txq->tso_hdrs_dma + txq->tx_ring_size * TSO_HEADER_SIZE))
    181
    182#define DESC_DMA_MAP_SINGLE 0
    183#define DESC_DMA_MAP_PAGE 1
    184
    185/*
    186 * RX/TX descriptors.
    187 */
    188#if defined(__BIG_ENDIAN)
    189struct rx_desc {
    190	u16 byte_cnt;		/* Descriptor buffer byte count		*/
    191	u16 buf_size;		/* Buffer size				*/
    192	u32 cmd_sts;		/* Descriptor command status		*/
    193	u32 next_desc_ptr;	/* Next descriptor pointer		*/
    194	u32 buf_ptr;		/* Descriptor buffer pointer		*/
    195};
    196
    197struct tx_desc {
    198	u16 byte_cnt;		/* buffer byte count			*/
    199	u16 l4i_chk;		/* CPU provided TCP checksum		*/
    200	u32 cmd_sts;		/* Command/status field			*/
    201	u32 next_desc_ptr;	/* Pointer to next descriptor		*/
    202	u32 buf_ptr;		/* pointer to buffer for this descriptor*/
    203};
    204#elif defined(__LITTLE_ENDIAN)
    205struct rx_desc {
    206	u32 cmd_sts;		/* Descriptor command status		*/
    207	u16 buf_size;		/* Buffer size				*/
    208	u16 byte_cnt;		/* Descriptor buffer byte count		*/
    209	u32 buf_ptr;		/* Descriptor buffer pointer		*/
    210	u32 next_desc_ptr;	/* Next descriptor pointer		*/
    211};
    212
    213struct tx_desc {
    214	u32 cmd_sts;		/* Command/status field			*/
    215	u16 l4i_chk;		/* CPU provided TCP checksum		*/
    216	u16 byte_cnt;		/* buffer byte count			*/
    217	u32 buf_ptr;		/* pointer to buffer for this descriptor*/
    218	u32 next_desc_ptr;	/* Pointer to next descriptor		*/
    219};
    220#else
    221#error One of __BIG_ENDIAN or __LITTLE_ENDIAN must be defined
    222#endif
    223
    224/* RX & TX descriptor command */
    225#define BUFFER_OWNED_BY_DMA		0x80000000
    226
    227/* RX & TX descriptor status */
    228#define ERROR_SUMMARY			0x00000001
    229
    230/* RX descriptor status */
    231#define LAYER_4_CHECKSUM_OK		0x40000000
    232#define RX_ENABLE_INTERRUPT		0x20000000
    233#define RX_FIRST_DESC			0x08000000
    234#define RX_LAST_DESC			0x04000000
    235#define RX_IP_HDR_OK			0x02000000
    236#define RX_PKT_IS_IPV4			0x01000000
    237#define RX_PKT_IS_ETHERNETV2		0x00800000
    238#define RX_PKT_LAYER4_TYPE_MASK		0x00600000
    239#define RX_PKT_LAYER4_TYPE_TCP_IPV4	0x00000000
    240#define RX_PKT_IS_VLAN_TAGGED		0x00080000
    241
    242/* TX descriptor command */
    243#define TX_ENABLE_INTERRUPT		0x00800000
    244#define GEN_CRC				0x00400000
    245#define TX_FIRST_DESC			0x00200000
    246#define TX_LAST_DESC			0x00100000
    247#define ZERO_PADDING			0x00080000
    248#define GEN_IP_V4_CHECKSUM		0x00040000
    249#define GEN_TCP_UDP_CHECKSUM		0x00020000
    250#define UDP_FRAME			0x00010000
    251#define MAC_HDR_EXTRA_4_BYTES		0x00008000
    252#define GEN_TCP_UDP_CHK_FULL		0x00000400
    253#define MAC_HDR_EXTRA_8_BYTES		0x00000200
    254
    255#define TX_IHL_SHIFT			11
    256
    257
    258/* global *******************************************************************/
    259struct mv643xx_eth_shared_private {
    260	/*
    261	 * Ethernet controller base address.
    262	 */
    263	void __iomem *base;
    264
    265	/*
    266	 * Per-port MBUS window access register value.
    267	 */
    268	u32 win_protect;
    269
    270	/*
    271	 * Hardware-specific parameters.
    272	 */
    273	int extended_rx_coal_limit;
    274	int tx_bw_control;
    275	int tx_csum_limit;
    276	struct clk *clk;
    277};
    278
    279#define TX_BW_CONTROL_ABSENT		0
    280#define TX_BW_CONTROL_OLD_LAYOUT	1
    281#define TX_BW_CONTROL_NEW_LAYOUT	2
    282
    283static int mv643xx_eth_open(struct net_device *dev);
    284static int mv643xx_eth_stop(struct net_device *dev);
    285
    286
    287/* per-port *****************************************************************/
    288struct mib_counters {
    289	u64 good_octets_received;
    290	u32 bad_octets_received;
    291	u32 internal_mac_transmit_err;
    292	u32 good_frames_received;
    293	u32 bad_frames_received;
    294	u32 broadcast_frames_received;
    295	u32 multicast_frames_received;
    296	u32 frames_64_octets;
    297	u32 frames_65_to_127_octets;
    298	u32 frames_128_to_255_octets;
    299	u32 frames_256_to_511_octets;
    300	u32 frames_512_to_1023_octets;
    301	u32 frames_1024_to_max_octets;
    302	u64 good_octets_sent;
    303	u32 good_frames_sent;
    304	u32 excessive_collision;
    305	u32 multicast_frames_sent;
    306	u32 broadcast_frames_sent;
    307	u32 unrec_mac_control_received;
    308	u32 fc_sent;
    309	u32 good_fc_received;
    310	u32 bad_fc_received;
    311	u32 undersize_received;
    312	u32 fragments_received;
    313	u32 oversize_received;
    314	u32 jabber_received;
    315	u32 mac_receive_error;
    316	u32 bad_crc_event;
    317	u32 collision;
    318	u32 late_collision;
    319	/* Non MIB hardware counters */
    320	u32 rx_discard;
    321	u32 rx_overrun;
    322};
    323
    324struct rx_queue {
    325	int index;
    326
    327	int rx_ring_size;
    328
    329	int rx_desc_count;
    330	int rx_curr_desc;
    331	int rx_used_desc;
    332
    333	struct rx_desc *rx_desc_area;
    334	dma_addr_t rx_desc_dma;
    335	int rx_desc_area_size;
    336	struct sk_buff **rx_skb;
    337};
    338
    339struct tx_queue {
    340	int index;
    341
    342	int tx_ring_size;
    343
    344	int tx_desc_count;
    345	int tx_curr_desc;
    346	int tx_used_desc;
    347
    348	int tx_stop_threshold;
    349	int tx_wake_threshold;
    350
    351	char *tso_hdrs;
    352	dma_addr_t tso_hdrs_dma;
    353
    354	struct tx_desc *tx_desc_area;
    355	char *tx_desc_mapping; /* array to track the type of the dma mapping */
    356	dma_addr_t tx_desc_dma;
    357	int tx_desc_area_size;
    358
    359	struct sk_buff_head tx_skb;
    360
    361	unsigned long tx_packets;
    362	unsigned long tx_bytes;
    363	unsigned long tx_dropped;
    364};
    365
    366struct mv643xx_eth_private {
    367	struct mv643xx_eth_shared_private *shared;
    368	void __iomem *base;
    369	int port_num;
    370
    371	struct net_device *dev;
    372
    373	struct timer_list mib_counters_timer;
    374	spinlock_t mib_counters_lock;
    375	struct mib_counters mib_counters;
    376
    377	struct work_struct tx_timeout_task;
    378
    379	struct napi_struct napi;
    380	u32 int_mask;
    381	u8 oom;
    382	u8 work_link;
    383	u8 work_tx;
    384	u8 work_tx_end;
    385	u8 work_rx;
    386	u8 work_rx_refill;
    387
    388	int skb_size;
    389
    390	/*
    391	 * RX state.
    392	 */
    393	int rx_ring_size;
    394	unsigned long rx_desc_sram_addr;
    395	int rx_desc_sram_size;
    396	int rxq_count;
    397	struct timer_list rx_oom;
    398	struct rx_queue rxq[8];
    399
    400	/*
    401	 * TX state.
    402	 */
    403	int tx_ring_size;
    404	unsigned long tx_desc_sram_addr;
    405	int tx_desc_sram_size;
    406	int txq_count;
    407	struct tx_queue txq[8];
    408
    409	/*
    410	 * Hardware-specific parameters.
    411	 */
    412	struct clk *clk;
    413	unsigned int t_clk;
    414};
    415
    416
    417/* port register accessors **************************************************/
    418static inline u32 rdl(struct mv643xx_eth_private *mp, int offset)
    419{
    420	return readl(mp->shared->base + offset);
    421}
    422
    423static inline u32 rdlp(struct mv643xx_eth_private *mp, int offset)
    424{
    425	return readl(mp->base + offset);
    426}
    427
    428static inline void wrl(struct mv643xx_eth_private *mp, int offset, u32 data)
    429{
    430	writel(data, mp->shared->base + offset);
    431}
    432
    433static inline void wrlp(struct mv643xx_eth_private *mp, int offset, u32 data)
    434{
    435	writel(data, mp->base + offset);
    436}
    437
    438
    439/* rxq/txq helper functions *************************************************/
    440static struct mv643xx_eth_private *rxq_to_mp(struct rx_queue *rxq)
    441{
    442	return container_of(rxq, struct mv643xx_eth_private, rxq[rxq->index]);
    443}
    444
    445static struct mv643xx_eth_private *txq_to_mp(struct tx_queue *txq)
    446{
    447	return container_of(txq, struct mv643xx_eth_private, txq[txq->index]);
    448}
    449
    450static void rxq_enable(struct rx_queue *rxq)
    451{
    452	struct mv643xx_eth_private *mp = rxq_to_mp(rxq);
    453	wrlp(mp, RXQ_COMMAND, 1 << rxq->index);
    454}
    455
    456static void rxq_disable(struct rx_queue *rxq)
    457{
    458	struct mv643xx_eth_private *mp = rxq_to_mp(rxq);
    459	u8 mask = 1 << rxq->index;
    460
    461	wrlp(mp, RXQ_COMMAND, mask << 8);
    462	while (rdlp(mp, RXQ_COMMAND) & mask)
    463		udelay(10);
    464}
    465
    466static void txq_reset_hw_ptr(struct tx_queue *txq)
    467{
    468	struct mv643xx_eth_private *mp = txq_to_mp(txq);
    469	u32 addr;
    470
    471	addr = (u32)txq->tx_desc_dma;
    472	addr += txq->tx_curr_desc * sizeof(struct tx_desc);
    473	wrlp(mp, TXQ_CURRENT_DESC_PTR(txq->index), addr);
    474}
    475
    476static void txq_enable(struct tx_queue *txq)
    477{
    478	struct mv643xx_eth_private *mp = txq_to_mp(txq);
    479	wrlp(mp, TXQ_COMMAND, 1 << txq->index);
    480}
    481
    482static void txq_disable(struct tx_queue *txq)
    483{
    484	struct mv643xx_eth_private *mp = txq_to_mp(txq);
    485	u8 mask = 1 << txq->index;
    486
    487	wrlp(mp, TXQ_COMMAND, mask << 8);
    488	while (rdlp(mp, TXQ_COMMAND) & mask)
    489		udelay(10);
    490}
    491
    492static void txq_maybe_wake(struct tx_queue *txq)
    493{
    494	struct mv643xx_eth_private *mp = txq_to_mp(txq);
    495	struct netdev_queue *nq = netdev_get_tx_queue(mp->dev, txq->index);
    496
    497	if (netif_tx_queue_stopped(nq)) {
    498		__netif_tx_lock(nq, smp_processor_id());
    499		if (txq->tx_desc_count <= txq->tx_wake_threshold)
    500			netif_tx_wake_queue(nq);
    501		__netif_tx_unlock(nq);
    502	}
    503}
    504
    505static int rxq_process(struct rx_queue *rxq, int budget)
    506{
    507	struct mv643xx_eth_private *mp = rxq_to_mp(rxq);
    508	struct net_device_stats *stats = &mp->dev->stats;
    509	int rx;
    510
    511	rx = 0;
    512	while (rx < budget && rxq->rx_desc_count) {
    513		struct rx_desc *rx_desc;
    514		unsigned int cmd_sts;
    515		struct sk_buff *skb;
    516		u16 byte_cnt;
    517
    518		rx_desc = &rxq->rx_desc_area[rxq->rx_curr_desc];
    519
    520		cmd_sts = rx_desc->cmd_sts;
    521		if (cmd_sts & BUFFER_OWNED_BY_DMA)
    522			break;
    523		rmb();
    524
    525		skb = rxq->rx_skb[rxq->rx_curr_desc];
    526		rxq->rx_skb[rxq->rx_curr_desc] = NULL;
    527
    528		rxq->rx_curr_desc++;
    529		if (rxq->rx_curr_desc == rxq->rx_ring_size)
    530			rxq->rx_curr_desc = 0;
    531
    532		dma_unmap_single(mp->dev->dev.parent, rx_desc->buf_ptr,
    533				 rx_desc->buf_size, DMA_FROM_DEVICE);
    534		rxq->rx_desc_count--;
    535		rx++;
    536
    537		mp->work_rx_refill |= 1 << rxq->index;
    538
    539		byte_cnt = rx_desc->byte_cnt;
    540
    541		/*
    542		 * Update statistics.
    543		 *
    544		 * Note that the descriptor byte count includes 2 dummy
    545		 * bytes automatically inserted by the hardware at the
    546		 * start of the packet (which we don't count), and a 4
    547		 * byte CRC at the end of the packet (which we do count).
    548		 */
    549		stats->rx_packets++;
    550		stats->rx_bytes += byte_cnt - 2;
    551
    552		/*
    553		 * In case we received a packet without first / last bits
    554		 * on, or the error summary bit is set, the packet needs
    555		 * to be dropped.
    556		 */
    557		if ((cmd_sts & (RX_FIRST_DESC | RX_LAST_DESC | ERROR_SUMMARY))
    558			!= (RX_FIRST_DESC | RX_LAST_DESC))
    559			goto err;
    560
    561		/*
    562		 * The -4 is for the CRC in the trailer of the
    563		 * received packet
    564		 */
    565		skb_put(skb, byte_cnt - 2 - 4);
    566
    567		if (cmd_sts & LAYER_4_CHECKSUM_OK)
    568			skb->ip_summed = CHECKSUM_UNNECESSARY;
    569		skb->protocol = eth_type_trans(skb, mp->dev);
    570
    571		napi_gro_receive(&mp->napi, skb);
    572
    573		continue;
    574
    575err:
    576		stats->rx_dropped++;
    577
    578		if ((cmd_sts & (RX_FIRST_DESC | RX_LAST_DESC)) !=
    579			(RX_FIRST_DESC | RX_LAST_DESC)) {
    580			if (net_ratelimit())
    581				netdev_err(mp->dev,
    582					   "received packet spanning multiple descriptors\n");
    583		}
    584
    585		if (cmd_sts & ERROR_SUMMARY)
    586			stats->rx_errors++;
    587
    588		dev_kfree_skb(skb);
    589	}
    590
    591	if (rx < budget)
    592		mp->work_rx &= ~(1 << rxq->index);
    593
    594	return rx;
    595}
    596
    597static int rxq_refill(struct rx_queue *rxq, int budget)
    598{
    599	struct mv643xx_eth_private *mp = rxq_to_mp(rxq);
    600	int refilled;
    601
    602	refilled = 0;
    603	while (refilled < budget && rxq->rx_desc_count < rxq->rx_ring_size) {
    604		struct sk_buff *skb;
    605		int rx;
    606		struct rx_desc *rx_desc;
    607		int size;
    608
    609		skb = netdev_alloc_skb(mp->dev, mp->skb_size);
    610
    611		if (skb == NULL) {
    612			mp->oom = 1;
    613			goto oom;
    614		}
    615
    616		if (SKB_DMA_REALIGN)
    617			skb_reserve(skb, SKB_DMA_REALIGN);
    618
    619		refilled++;
    620		rxq->rx_desc_count++;
    621
    622		rx = rxq->rx_used_desc++;
    623		if (rxq->rx_used_desc == rxq->rx_ring_size)
    624			rxq->rx_used_desc = 0;
    625
    626		rx_desc = rxq->rx_desc_area + rx;
    627
    628		size = skb_end_pointer(skb) - skb->data;
    629		rx_desc->buf_ptr = dma_map_single(mp->dev->dev.parent,
    630						  skb->data, size,
    631						  DMA_FROM_DEVICE);
    632		rx_desc->buf_size = size;
    633		rxq->rx_skb[rx] = skb;
    634		wmb();
    635		rx_desc->cmd_sts = BUFFER_OWNED_BY_DMA | RX_ENABLE_INTERRUPT;
    636		wmb();
    637
    638		/*
    639		 * The hardware automatically prepends 2 bytes of
    640		 * dummy data to each received packet, so that the
    641		 * IP header ends up 16-byte aligned.
    642		 */
    643		skb_reserve(skb, 2);
    644	}
    645
    646	if (refilled < budget)
    647		mp->work_rx_refill &= ~(1 << rxq->index);
    648
    649oom:
    650	return refilled;
    651}
    652
    653
    654/* tx ***********************************************************************/
    655static inline unsigned int has_tiny_unaligned_frags(struct sk_buff *skb)
    656{
    657	int frag;
    658
    659	for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) {
    660		const skb_frag_t *fragp = &skb_shinfo(skb)->frags[frag];
    661
    662		if (skb_frag_size(fragp) <= 8 && skb_frag_off(fragp) & 7)
    663			return 1;
    664	}
    665
    666	return 0;
    667}
    668
    669static int skb_tx_csum(struct mv643xx_eth_private *mp, struct sk_buff *skb,
    670		       u16 *l4i_chk, u32 *command, int length)
    671{
    672	int ret;
    673	u32 cmd = 0;
    674
    675	if (skb->ip_summed == CHECKSUM_PARTIAL) {
    676		int hdr_len;
    677		int tag_bytes;
    678
    679		BUG_ON(skb->protocol != htons(ETH_P_IP) &&
    680		       skb->protocol != htons(ETH_P_8021Q));
    681
    682		hdr_len = (void *)ip_hdr(skb) - (void *)skb->data;
    683		tag_bytes = hdr_len - ETH_HLEN;
    684
    685		if (length - hdr_len > mp->shared->tx_csum_limit ||
    686		    unlikely(tag_bytes & ~12)) {
    687			ret = skb_checksum_help(skb);
    688			if (!ret)
    689				goto no_csum;
    690			return ret;
    691		}
    692
    693		if (tag_bytes & 4)
    694			cmd |= MAC_HDR_EXTRA_4_BYTES;
    695		if (tag_bytes & 8)
    696			cmd |= MAC_HDR_EXTRA_8_BYTES;
    697
    698		cmd |= GEN_TCP_UDP_CHECKSUM | GEN_TCP_UDP_CHK_FULL |
    699			   GEN_IP_V4_CHECKSUM   |
    700			   ip_hdr(skb)->ihl << TX_IHL_SHIFT;
    701
    702		/* TODO: Revisit this. With the usage of GEN_TCP_UDP_CHK_FULL
    703		 * it seems we don't need to pass the initial checksum.
    704		 */
    705		switch (ip_hdr(skb)->protocol) {
    706		case IPPROTO_UDP:
    707			cmd |= UDP_FRAME;
    708			*l4i_chk = 0;
    709			break;
    710		case IPPROTO_TCP:
    711			*l4i_chk = 0;
    712			break;
    713		default:
    714			WARN(1, "protocol not supported");
    715		}
    716	} else {
    717no_csum:
    718		/* Errata BTS #50, IHL must be 5 if no HW checksum */
    719		cmd |= 5 << TX_IHL_SHIFT;
    720	}
    721	*command = cmd;
    722	return 0;
    723}
    724
    725static inline int
    726txq_put_data_tso(struct net_device *dev, struct tx_queue *txq,
    727		 struct sk_buff *skb, char *data, int length,
    728		 bool last_tcp, bool is_last)
    729{
    730	int tx_index;
    731	u32 cmd_sts;
    732	struct tx_desc *desc;
    733
    734	tx_index = txq->tx_curr_desc++;
    735	if (txq->tx_curr_desc == txq->tx_ring_size)
    736		txq->tx_curr_desc = 0;
    737	desc = &txq->tx_desc_area[tx_index];
    738	txq->tx_desc_mapping[tx_index] = DESC_DMA_MAP_SINGLE;
    739
    740	desc->l4i_chk = 0;
    741	desc->byte_cnt = length;
    742
    743	if (length <= 8 && (uintptr_t)data & 0x7) {
    744		/* Copy unaligned small data fragment to TSO header data area */
    745		memcpy(txq->tso_hdrs + tx_index * TSO_HEADER_SIZE,
    746		       data, length);
    747		desc->buf_ptr = txq->tso_hdrs_dma
    748			+ tx_index * TSO_HEADER_SIZE;
    749	} else {
    750		/* Alignment is okay, map buffer and hand off to hardware */
    751		txq->tx_desc_mapping[tx_index] = DESC_DMA_MAP_SINGLE;
    752		desc->buf_ptr = dma_map_single(dev->dev.parent, data,
    753			length, DMA_TO_DEVICE);
    754		if (unlikely(dma_mapping_error(dev->dev.parent,
    755					       desc->buf_ptr))) {
    756			WARN(1, "dma_map_single failed!\n");
    757			return -ENOMEM;
    758		}
    759	}
    760
    761	cmd_sts = BUFFER_OWNED_BY_DMA;
    762	if (last_tcp) {
    763		/* last descriptor in the TCP packet */
    764		cmd_sts |= ZERO_PADDING | TX_LAST_DESC;
    765		/* last descriptor in SKB */
    766		if (is_last)
    767			cmd_sts |= TX_ENABLE_INTERRUPT;
    768	}
    769	desc->cmd_sts = cmd_sts;
    770	return 0;
    771}
    772
    773static inline void
    774txq_put_hdr_tso(struct sk_buff *skb, struct tx_queue *txq, int length,
    775		u32 *first_cmd_sts, bool first_desc)
    776{
    777	struct mv643xx_eth_private *mp = txq_to_mp(txq);
    778	int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
    779	int tx_index;
    780	struct tx_desc *desc;
    781	int ret;
    782	u32 cmd_csum = 0;
    783	u16 l4i_chk = 0;
    784	u32 cmd_sts;
    785
    786	tx_index = txq->tx_curr_desc;
    787	desc = &txq->tx_desc_area[tx_index];
    788
    789	ret = skb_tx_csum(mp, skb, &l4i_chk, &cmd_csum, length);
    790	if (ret)
    791		WARN(1, "failed to prepare checksum!");
    792
    793	/* Should we set this? Can't use the value from skb_tx_csum()
    794	 * as it's not the correct initial L4 checksum to use.
    795	 */
    796	desc->l4i_chk = 0;
    797
    798	desc->byte_cnt = hdr_len;
    799	desc->buf_ptr = txq->tso_hdrs_dma +
    800			txq->tx_curr_desc * TSO_HEADER_SIZE;
    801	cmd_sts = cmd_csum | BUFFER_OWNED_BY_DMA  | TX_FIRST_DESC |
    802				   GEN_CRC;
    803
    804	/* Defer updating the first command descriptor until all
    805	 * following descriptors have been written.
    806	 */
    807	if (first_desc)
    808		*first_cmd_sts = cmd_sts;
    809	else
    810		desc->cmd_sts = cmd_sts;
    811
    812	txq->tx_curr_desc++;
    813	if (txq->tx_curr_desc == txq->tx_ring_size)
    814		txq->tx_curr_desc = 0;
    815}
    816
    817static int txq_submit_tso(struct tx_queue *txq, struct sk_buff *skb,
    818			  struct net_device *dev)
    819{
    820	struct mv643xx_eth_private *mp = txq_to_mp(txq);
    821	int hdr_len, total_len, data_left, ret;
    822	int desc_count = 0;
    823	struct tso_t tso;
    824	struct tx_desc *first_tx_desc;
    825	u32 first_cmd_sts = 0;
    826
    827	/* Count needed descriptors */
    828	if ((txq->tx_desc_count + tso_count_descs(skb)) >= txq->tx_ring_size) {
    829		netdev_dbg(dev, "not enough descriptors for TSO!\n");
    830		return -EBUSY;
    831	}
    832
    833	first_tx_desc = &txq->tx_desc_area[txq->tx_curr_desc];
    834
    835	/* Initialize the TSO handler, and prepare the first payload */
    836	hdr_len = tso_start(skb, &tso);
    837
    838	total_len = skb->len - hdr_len;
    839	while (total_len > 0) {
    840		bool first_desc = (desc_count == 0);
    841		char *hdr;
    842
    843		data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len);
    844		total_len -= data_left;
    845		desc_count++;
    846
    847		/* prepare packet headers: MAC + IP + TCP */
    848		hdr = txq->tso_hdrs + txq->tx_curr_desc * TSO_HEADER_SIZE;
    849		tso_build_hdr(skb, hdr, &tso, data_left, total_len == 0);
    850		txq_put_hdr_tso(skb, txq, data_left, &first_cmd_sts,
    851				first_desc);
    852
    853		while (data_left > 0) {
    854			int size;
    855			desc_count++;
    856
    857			size = min_t(int, tso.size, data_left);
    858			ret = txq_put_data_tso(dev, txq, skb, tso.data, size,
    859					       size == data_left,
    860					       total_len == 0);
    861			if (ret)
    862				goto err_release;
    863			data_left -= size;
    864			tso_build_data(skb, &tso, size);
    865		}
    866	}
    867
    868	__skb_queue_tail(&txq->tx_skb, skb);
    869	skb_tx_timestamp(skb);
    870
    871	/* ensure all other descriptors are written before first cmd_sts */
    872	wmb();
    873	first_tx_desc->cmd_sts = first_cmd_sts;
    874
    875	/* clear TX_END status */
    876	mp->work_tx_end &= ~(1 << txq->index);
    877
    878	/* ensure all descriptors are written before poking hardware */
    879	wmb();
    880	txq_enable(txq);
    881	txq->tx_desc_count += desc_count;
    882	return 0;
    883err_release:
    884	/* TODO: Release all used data descriptors; header descriptors must not
    885	 * be DMA-unmapped.
    886	 */
    887	return ret;
    888}
    889
    890static void txq_submit_frag_skb(struct tx_queue *txq, struct sk_buff *skb)
    891{
    892	struct mv643xx_eth_private *mp = txq_to_mp(txq);
    893	int nr_frags = skb_shinfo(skb)->nr_frags;
    894	int frag;
    895
    896	for (frag = 0; frag < nr_frags; frag++) {
    897		skb_frag_t *this_frag;
    898		int tx_index;
    899		struct tx_desc *desc;
    900
    901		this_frag = &skb_shinfo(skb)->frags[frag];
    902		tx_index = txq->tx_curr_desc++;
    903		if (txq->tx_curr_desc == txq->tx_ring_size)
    904			txq->tx_curr_desc = 0;
    905		desc = &txq->tx_desc_area[tx_index];
    906		txq->tx_desc_mapping[tx_index] = DESC_DMA_MAP_PAGE;
    907
    908		/*
    909		 * The last fragment will generate an interrupt
    910		 * which will free the skb on TX completion.
    911		 */
    912		if (frag == nr_frags - 1) {
    913			desc->cmd_sts = BUFFER_OWNED_BY_DMA |
    914					ZERO_PADDING | TX_LAST_DESC |
    915					TX_ENABLE_INTERRUPT;
    916		} else {
    917			desc->cmd_sts = BUFFER_OWNED_BY_DMA;
    918		}
    919
    920		desc->l4i_chk = 0;
    921		desc->byte_cnt = skb_frag_size(this_frag);
    922		desc->buf_ptr = skb_frag_dma_map(mp->dev->dev.parent,
    923						 this_frag, 0, desc->byte_cnt,
    924						 DMA_TO_DEVICE);
    925	}
    926}
    927
    928static int txq_submit_skb(struct tx_queue *txq, struct sk_buff *skb,
    929			  struct net_device *dev)
    930{
    931	struct mv643xx_eth_private *mp = txq_to_mp(txq);
    932	int nr_frags = skb_shinfo(skb)->nr_frags;
    933	int tx_index;
    934	struct tx_desc *desc;
    935	u32 cmd_sts;
    936	u16 l4i_chk;
    937	int length, ret;
    938
    939	cmd_sts = 0;
    940	l4i_chk = 0;
    941
    942	if (txq->tx_ring_size - txq->tx_desc_count < MAX_SKB_FRAGS + 1) {
    943		if (net_ratelimit())
    944			netdev_err(dev, "tx queue full?!\n");
    945		return -EBUSY;
    946	}
    947
    948	ret = skb_tx_csum(mp, skb, &l4i_chk, &cmd_sts, skb->len);
    949	if (ret)
    950		return ret;
    951	cmd_sts |= TX_FIRST_DESC | GEN_CRC | BUFFER_OWNED_BY_DMA;
    952
    953	tx_index = txq->tx_curr_desc++;
    954	if (txq->tx_curr_desc == txq->tx_ring_size)
    955		txq->tx_curr_desc = 0;
    956	desc = &txq->tx_desc_area[tx_index];
    957	txq->tx_desc_mapping[tx_index] = DESC_DMA_MAP_SINGLE;
    958
    959	if (nr_frags) {
    960		txq_submit_frag_skb(txq, skb);
    961		length = skb_headlen(skb);
    962	} else {
    963		cmd_sts |= ZERO_PADDING | TX_LAST_DESC | TX_ENABLE_INTERRUPT;
    964		length = skb->len;
    965	}
    966
    967	desc->l4i_chk = l4i_chk;
    968	desc->byte_cnt = length;
    969	desc->buf_ptr = dma_map_single(mp->dev->dev.parent, skb->data,
    970				       length, DMA_TO_DEVICE);
    971
    972	__skb_queue_tail(&txq->tx_skb, skb);
    973
    974	skb_tx_timestamp(skb);
    975
    976	/* ensure all other descriptors are written before first cmd_sts */
    977	wmb();
    978	desc->cmd_sts = cmd_sts;
    979
    980	/* clear TX_END status */
    981	mp->work_tx_end &= ~(1 << txq->index);
    982
    983	/* ensure all descriptors are written before poking hardware */
    984	wmb();
    985	txq_enable(txq);
    986
    987	txq->tx_desc_count += nr_frags + 1;
    988
    989	return 0;
    990}
    991
    992static netdev_tx_t mv643xx_eth_xmit(struct sk_buff *skb, struct net_device *dev)
    993{
    994	struct mv643xx_eth_private *mp = netdev_priv(dev);
    995	int length, queue, ret;
    996	struct tx_queue *txq;
    997	struct netdev_queue *nq;
    998
    999	queue = skb_get_queue_mapping(skb);
   1000	txq = mp->txq + queue;
   1001	nq = netdev_get_tx_queue(dev, queue);
   1002
   1003	if (has_tiny_unaligned_frags(skb) && __skb_linearize(skb)) {
   1004		netdev_printk(KERN_DEBUG, dev,
   1005			      "failed to linearize skb with tiny unaligned fragment\n");
   1006		return NETDEV_TX_BUSY;
   1007	}
   1008
   1009	length = skb->len;
   1010
   1011	if (skb_is_gso(skb))
   1012		ret = txq_submit_tso(txq, skb, dev);
   1013	else
   1014		ret = txq_submit_skb(txq, skb, dev);
   1015	if (!ret) {
   1016		txq->tx_bytes += length;
   1017		txq->tx_packets++;
   1018
   1019		if (txq->tx_desc_count >= txq->tx_stop_threshold)
   1020			netif_tx_stop_queue(nq);
   1021	} else {
   1022		txq->tx_dropped++;
   1023		dev_kfree_skb_any(skb);
   1024	}
   1025
   1026	return NETDEV_TX_OK;
   1027}
   1028
   1029
   1030/* tx napi ******************************************************************/
   1031static void txq_kick(struct tx_queue *txq)
   1032{
   1033	struct mv643xx_eth_private *mp = txq_to_mp(txq);
   1034	struct netdev_queue *nq = netdev_get_tx_queue(mp->dev, txq->index);
   1035	u32 hw_desc_ptr;
   1036	u32 expected_ptr;
   1037
   1038	__netif_tx_lock(nq, smp_processor_id());
   1039
   1040	if (rdlp(mp, TXQ_COMMAND) & (1 << txq->index))
   1041		goto out;
   1042
   1043	hw_desc_ptr = rdlp(mp, TXQ_CURRENT_DESC_PTR(txq->index));
   1044	expected_ptr = (u32)txq->tx_desc_dma +
   1045				txq->tx_curr_desc * sizeof(struct tx_desc);
   1046
   1047	if (hw_desc_ptr != expected_ptr)
   1048		txq_enable(txq);
   1049
   1050out:
   1051	__netif_tx_unlock(nq);
   1052
   1053	mp->work_tx_end &= ~(1 << txq->index);
   1054}
   1055
   1056static int txq_reclaim(struct tx_queue *txq, int budget, int force)
   1057{
   1058	struct mv643xx_eth_private *mp = txq_to_mp(txq);
   1059	struct netdev_queue *nq = netdev_get_tx_queue(mp->dev, txq->index);
   1060	int reclaimed;
   1061
   1062	__netif_tx_lock_bh(nq);
   1063
   1064	reclaimed = 0;
   1065	while (reclaimed < budget && txq->tx_desc_count > 0) {
   1066		int tx_index;
   1067		struct tx_desc *desc;
   1068		u32 cmd_sts;
   1069		char desc_dma_map;
   1070
   1071		tx_index = txq->tx_used_desc;
   1072		desc = &txq->tx_desc_area[tx_index];
   1073		desc_dma_map = txq->tx_desc_mapping[tx_index];
   1074
   1075		cmd_sts = desc->cmd_sts;
   1076
   1077		if (cmd_sts & BUFFER_OWNED_BY_DMA) {
   1078			if (!force)
   1079				break;
   1080			desc->cmd_sts = cmd_sts & ~BUFFER_OWNED_BY_DMA;
   1081		}
   1082
   1083		txq->tx_used_desc = tx_index + 1;
   1084		if (txq->tx_used_desc == txq->tx_ring_size)
   1085			txq->tx_used_desc = 0;
   1086
   1087		reclaimed++;
   1088		txq->tx_desc_count--;
   1089
   1090		if (!IS_TSO_HEADER(txq, desc->buf_ptr)) {
   1091
   1092			if (desc_dma_map == DESC_DMA_MAP_PAGE)
   1093				dma_unmap_page(mp->dev->dev.parent,
   1094					       desc->buf_ptr,
   1095					       desc->byte_cnt,
   1096					       DMA_TO_DEVICE);
   1097			else
   1098				dma_unmap_single(mp->dev->dev.parent,
   1099						 desc->buf_ptr,
   1100						 desc->byte_cnt,
   1101						 DMA_TO_DEVICE);
   1102		}
   1103
   1104		if (cmd_sts & TX_ENABLE_INTERRUPT) {
   1105			struct sk_buff *skb = __skb_dequeue(&txq->tx_skb);
   1106
   1107			if (!WARN_ON(!skb))
   1108				dev_consume_skb_any(skb);
   1109		}
   1110
   1111		if (cmd_sts & ERROR_SUMMARY) {
   1112			netdev_info(mp->dev, "tx error\n");
   1113			mp->dev->stats.tx_errors++;
   1114		}
   1115
   1116	}
   1117
   1118	__netif_tx_unlock_bh(nq);
   1119
   1120	if (reclaimed < budget)
   1121		mp->work_tx &= ~(1 << txq->index);
   1122
   1123	return reclaimed;
   1124}
   1125
   1126
   1127/* tx rate control **********************************************************/
   1128/*
   1129 * Set total maximum TX rate (shared by all TX queues for this port)
   1130 * to 'rate' bits per second, with a maximum burst of 'burst' bytes.
   1131 */
   1132static void tx_set_rate(struct mv643xx_eth_private *mp, int rate, int burst)
   1133{
   1134	int token_rate;
   1135	int mtu;
   1136	int bucket_size;
   1137
   1138	token_rate = ((rate / 1000) * 64) / (mp->t_clk / 1000);
   1139	if (token_rate > 1023)
   1140		token_rate = 1023;
   1141
   1142	mtu = (mp->dev->mtu + 255) >> 8;
   1143	if (mtu > 63)
   1144		mtu = 63;
   1145
   1146	bucket_size = (burst + 255) >> 8;
   1147	if (bucket_size > 65535)
   1148		bucket_size = 65535;
   1149
   1150	switch (mp->shared->tx_bw_control) {
   1151	case TX_BW_CONTROL_OLD_LAYOUT:
   1152		wrlp(mp, TX_BW_RATE, token_rate);
   1153		wrlp(mp, TX_BW_MTU, mtu);
   1154		wrlp(mp, TX_BW_BURST, bucket_size);
   1155		break;
   1156	case TX_BW_CONTROL_NEW_LAYOUT:
   1157		wrlp(mp, TX_BW_RATE_MOVED, token_rate);
   1158		wrlp(mp, TX_BW_MTU_MOVED, mtu);
   1159		wrlp(mp, TX_BW_BURST_MOVED, bucket_size);
   1160		break;
   1161	}
   1162}
   1163
   1164static void txq_set_rate(struct tx_queue *txq, int rate, int burst)
   1165{
   1166	struct mv643xx_eth_private *mp = txq_to_mp(txq);
   1167	int token_rate;
   1168	int bucket_size;
   1169
   1170	token_rate = ((rate / 1000) * 64) / (mp->t_clk / 1000);
   1171	if (token_rate > 1023)
   1172		token_rate = 1023;
   1173
   1174	bucket_size = (burst + 255) >> 8;
   1175	if (bucket_size > 65535)
   1176		bucket_size = 65535;
   1177
   1178	wrlp(mp, TXQ_BW_TOKENS(txq->index), token_rate << 14);
   1179	wrlp(mp, TXQ_BW_CONF(txq->index), (bucket_size << 10) | token_rate);
   1180}
   1181
   1182static void txq_set_fixed_prio_mode(struct tx_queue *txq)
   1183{
   1184	struct mv643xx_eth_private *mp = txq_to_mp(txq);
   1185	int off;
   1186	u32 val;
   1187
   1188	/*
   1189	 * Turn on fixed priority mode.
   1190	 */
   1191	off = 0;
   1192	switch (mp->shared->tx_bw_control) {
   1193	case TX_BW_CONTROL_OLD_LAYOUT:
   1194		off = TXQ_FIX_PRIO_CONF;
   1195		break;
   1196	case TX_BW_CONTROL_NEW_LAYOUT:
   1197		off = TXQ_FIX_PRIO_CONF_MOVED;
   1198		break;
   1199	}
   1200
   1201	if (off) {
   1202		val = rdlp(mp, off);
   1203		val |= 1 << txq->index;
   1204		wrlp(mp, off, val);
   1205	}
   1206}
   1207
   1208
   1209/* mii management interface *************************************************/
   1210static void mv643xx_eth_adjust_link(struct net_device *dev)
   1211{
   1212	struct mv643xx_eth_private *mp = netdev_priv(dev);
   1213	u32 pscr = rdlp(mp, PORT_SERIAL_CONTROL);
   1214	u32 autoneg_disable = FORCE_LINK_PASS |
   1215	             DISABLE_AUTO_NEG_SPEED_GMII |
   1216		     DISABLE_AUTO_NEG_FOR_FLOW_CTRL |
   1217		     DISABLE_AUTO_NEG_FOR_DUPLEX;
   1218
   1219	if (dev->phydev->autoneg == AUTONEG_ENABLE) {
   1220		/* enable auto negotiation */
   1221		pscr &= ~autoneg_disable;
   1222		goto out_write;
   1223	}
   1224
   1225	pscr |= autoneg_disable;
   1226
   1227	if (dev->phydev->speed == SPEED_1000) {
   1228		/* force gigabit, half duplex not supported */
   1229		pscr |= SET_GMII_SPEED_TO_1000;
   1230		pscr |= SET_FULL_DUPLEX_MODE;
   1231		goto out_write;
   1232	}
   1233
   1234	pscr &= ~SET_GMII_SPEED_TO_1000;
   1235
   1236	if (dev->phydev->speed == SPEED_100)
   1237		pscr |= SET_MII_SPEED_TO_100;
   1238	else
   1239		pscr &= ~SET_MII_SPEED_TO_100;
   1240
   1241	if (dev->phydev->duplex == DUPLEX_FULL)
   1242		pscr |= SET_FULL_DUPLEX_MODE;
   1243	else
   1244		pscr &= ~SET_FULL_DUPLEX_MODE;
   1245
   1246out_write:
   1247	wrlp(mp, PORT_SERIAL_CONTROL, pscr);
   1248}
   1249
   1250/* statistics ***************************************************************/
   1251static struct net_device_stats *mv643xx_eth_get_stats(struct net_device *dev)
   1252{
   1253	struct mv643xx_eth_private *mp = netdev_priv(dev);
   1254	struct net_device_stats *stats = &dev->stats;
   1255	unsigned long tx_packets = 0;
   1256	unsigned long tx_bytes = 0;
   1257	unsigned long tx_dropped = 0;
   1258	int i;
   1259
   1260	for (i = 0; i < mp->txq_count; i++) {
   1261		struct tx_queue *txq = mp->txq + i;
   1262
   1263		tx_packets += txq->tx_packets;
   1264		tx_bytes += txq->tx_bytes;
   1265		tx_dropped += txq->tx_dropped;
   1266	}
   1267
   1268	stats->tx_packets = tx_packets;
   1269	stats->tx_bytes = tx_bytes;
   1270	stats->tx_dropped = tx_dropped;
   1271
   1272	return stats;
   1273}
   1274
   1275static inline u32 mib_read(struct mv643xx_eth_private *mp, int offset)
   1276{
   1277	return rdl(mp, MIB_COUNTERS(mp->port_num) + offset);
   1278}
   1279
   1280static void mib_counters_clear(struct mv643xx_eth_private *mp)
   1281{
   1282	int i;
   1283
   1284	for (i = 0; i < 0x80; i += 4)
   1285		mib_read(mp, i);
   1286
   1287	/* Clear non MIB hw counters also */
   1288	rdlp(mp, RX_DISCARD_FRAME_CNT);
   1289	rdlp(mp, RX_OVERRUN_FRAME_CNT);
   1290}
   1291
   1292static void mib_counters_update(struct mv643xx_eth_private *mp)
   1293{
   1294	struct mib_counters *p = &mp->mib_counters;
   1295
   1296	spin_lock_bh(&mp->mib_counters_lock);
   1297	p->good_octets_received += mib_read(mp, 0x00);
   1298	p->bad_octets_received += mib_read(mp, 0x08);
   1299	p->internal_mac_transmit_err += mib_read(mp, 0x0c);
   1300	p->good_frames_received += mib_read(mp, 0x10);
   1301	p->bad_frames_received += mib_read(mp, 0x14);
   1302	p->broadcast_frames_received += mib_read(mp, 0x18);
   1303	p->multicast_frames_received += mib_read(mp, 0x1c);
   1304	p->frames_64_octets += mib_read(mp, 0x20);
   1305	p->frames_65_to_127_octets += mib_read(mp, 0x24);
   1306	p->frames_128_to_255_octets += mib_read(mp, 0x28);
   1307	p->frames_256_to_511_octets += mib_read(mp, 0x2c);
   1308	p->frames_512_to_1023_octets += mib_read(mp, 0x30);
   1309	p->frames_1024_to_max_octets += mib_read(mp, 0x34);
   1310	p->good_octets_sent += mib_read(mp, 0x38);
   1311	p->good_frames_sent += mib_read(mp, 0x40);
   1312	p->excessive_collision += mib_read(mp, 0x44);
   1313	p->multicast_frames_sent += mib_read(mp, 0x48);
   1314	p->broadcast_frames_sent += mib_read(mp, 0x4c);
   1315	p->unrec_mac_control_received += mib_read(mp, 0x50);
   1316	p->fc_sent += mib_read(mp, 0x54);
   1317	p->good_fc_received += mib_read(mp, 0x58);
   1318	p->bad_fc_received += mib_read(mp, 0x5c);
   1319	p->undersize_received += mib_read(mp, 0x60);
   1320	p->fragments_received += mib_read(mp, 0x64);
   1321	p->oversize_received += mib_read(mp, 0x68);
   1322	p->jabber_received += mib_read(mp, 0x6c);
   1323	p->mac_receive_error += mib_read(mp, 0x70);
   1324	p->bad_crc_event += mib_read(mp, 0x74);
   1325	p->collision += mib_read(mp, 0x78);
   1326	p->late_collision += mib_read(mp, 0x7c);
   1327	/* Non MIB hardware counters */
   1328	p->rx_discard += rdlp(mp, RX_DISCARD_FRAME_CNT);
   1329	p->rx_overrun += rdlp(mp, RX_OVERRUN_FRAME_CNT);
   1330	spin_unlock_bh(&mp->mib_counters_lock);
   1331}
   1332
   1333static void mib_counters_timer_wrapper(struct timer_list *t)
   1334{
   1335	struct mv643xx_eth_private *mp = from_timer(mp, t, mib_counters_timer);
   1336	mib_counters_update(mp);
   1337	mod_timer(&mp->mib_counters_timer, jiffies + 30 * HZ);
   1338}
   1339
   1340
   1341/* interrupt coalescing *****************************************************/
   1342/*
   1343 * Hardware coalescing parameters are set in units of 64 t_clk
   1344 * cycles.  I.e.:
   1345 *
   1346 *	coal_delay_in_usec = 64000000 * register_value / t_clk_rate
   1347 *
   1348 *	register_value = coal_delay_in_usec * t_clk_rate / 64000000
   1349 *
   1350 * In the ->set*() methods, we round the computed register value
   1351 * to the nearest integer.
   1352 */
   1353static unsigned int get_rx_coal(struct mv643xx_eth_private *mp)
   1354{
   1355	u32 val = rdlp(mp, SDMA_CONFIG);
   1356	u64 temp;
   1357
   1358	if (mp->shared->extended_rx_coal_limit)
   1359		temp = ((val & 0x02000000) >> 10) | ((val & 0x003fff80) >> 7);
   1360	else
   1361		temp = (val & 0x003fff00) >> 8;
   1362
   1363	temp *= 64000000;
   1364	temp += mp->t_clk / 2;
   1365	do_div(temp, mp->t_clk);
   1366
   1367	return (unsigned int)temp;
   1368}
   1369
   1370static void set_rx_coal(struct mv643xx_eth_private *mp, unsigned int usec)
   1371{
   1372	u64 temp;
   1373	u32 val;
   1374
   1375	temp = (u64)usec * mp->t_clk;
   1376	temp += 31999999;
   1377	do_div(temp, 64000000);
   1378
   1379	val = rdlp(mp, SDMA_CONFIG);
   1380	if (mp->shared->extended_rx_coal_limit) {
   1381		if (temp > 0xffff)
   1382			temp = 0xffff;
   1383		val &= ~0x023fff80;
   1384		val |= (temp & 0x8000) << 10;
   1385		val |= (temp & 0x7fff) << 7;
   1386	} else {
   1387		if (temp > 0x3fff)
   1388			temp = 0x3fff;
   1389		val &= ~0x003fff00;
   1390		val |= (temp & 0x3fff) << 8;
   1391	}
   1392	wrlp(mp, SDMA_CONFIG, val);
   1393}
   1394
   1395static unsigned int get_tx_coal(struct mv643xx_eth_private *mp)
   1396{
   1397	u64 temp;
   1398
   1399	temp = (rdlp(mp, TX_FIFO_URGENT_THRESHOLD) & 0x3fff0) >> 4;
   1400	temp *= 64000000;
   1401	temp += mp->t_clk / 2;
   1402	do_div(temp, mp->t_clk);
   1403
   1404	return (unsigned int)temp;
   1405}
   1406
   1407static void set_tx_coal(struct mv643xx_eth_private *mp, unsigned int usec)
   1408{
   1409	u64 temp;
   1410
   1411	temp = (u64)usec * mp->t_clk;
   1412	temp += 31999999;
   1413	do_div(temp, 64000000);
   1414
   1415	if (temp > 0x3fff)
   1416		temp = 0x3fff;
   1417
   1418	wrlp(mp, TX_FIFO_URGENT_THRESHOLD, temp << 4);
   1419}
   1420
   1421
   1422/* ethtool ******************************************************************/
   1423struct mv643xx_eth_stats {
   1424	char stat_string[ETH_GSTRING_LEN];
   1425	int sizeof_stat;
   1426	int netdev_off;
   1427	int mp_off;
   1428};
   1429
   1430#define SSTAT(m)						\
   1431	{ #m, sizeof_field(struct net_device_stats, m),		\
   1432	  offsetof(struct net_device, stats.m), -1 }
   1433
   1434#define MIBSTAT(m)						\
   1435	{ #m, sizeof_field(struct mib_counters, m),		\
   1436	  -1, offsetof(struct mv643xx_eth_private, mib_counters.m) }
   1437
   1438static const struct mv643xx_eth_stats mv643xx_eth_stats[] = {
   1439	SSTAT(rx_packets),
   1440	SSTAT(tx_packets),
   1441	SSTAT(rx_bytes),
   1442	SSTAT(tx_bytes),
   1443	SSTAT(rx_errors),
   1444	SSTAT(tx_errors),
   1445	SSTAT(rx_dropped),
   1446	SSTAT(tx_dropped),
   1447	MIBSTAT(good_octets_received),
   1448	MIBSTAT(bad_octets_received),
   1449	MIBSTAT(internal_mac_transmit_err),
   1450	MIBSTAT(good_frames_received),
   1451	MIBSTAT(bad_frames_received),
   1452	MIBSTAT(broadcast_frames_received),
   1453	MIBSTAT(multicast_frames_received),
   1454	MIBSTAT(frames_64_octets),
   1455	MIBSTAT(frames_65_to_127_octets),
   1456	MIBSTAT(frames_128_to_255_octets),
   1457	MIBSTAT(frames_256_to_511_octets),
   1458	MIBSTAT(frames_512_to_1023_octets),
   1459	MIBSTAT(frames_1024_to_max_octets),
   1460	MIBSTAT(good_octets_sent),
   1461	MIBSTAT(good_frames_sent),
   1462	MIBSTAT(excessive_collision),
   1463	MIBSTAT(multicast_frames_sent),
   1464	MIBSTAT(broadcast_frames_sent),
   1465	MIBSTAT(unrec_mac_control_received),
   1466	MIBSTAT(fc_sent),
   1467	MIBSTAT(good_fc_received),
   1468	MIBSTAT(bad_fc_received),
   1469	MIBSTAT(undersize_received),
   1470	MIBSTAT(fragments_received),
   1471	MIBSTAT(oversize_received),
   1472	MIBSTAT(jabber_received),
   1473	MIBSTAT(mac_receive_error),
   1474	MIBSTAT(bad_crc_event),
   1475	MIBSTAT(collision),
   1476	MIBSTAT(late_collision),
   1477	MIBSTAT(rx_discard),
   1478	MIBSTAT(rx_overrun),
   1479};
   1480
   1481static int
   1482mv643xx_eth_get_link_ksettings_phy(struct mv643xx_eth_private *mp,
   1483				   struct ethtool_link_ksettings *cmd)
   1484{
   1485	struct net_device *dev = mp->dev;
   1486
   1487	phy_ethtool_ksettings_get(dev->phydev, cmd);
   1488
   1489	/*
   1490	 * The MAC does not support 1000baseT_Half.
   1491	 */
   1492	linkmode_clear_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT,
   1493			   cmd->link_modes.supported);
   1494	linkmode_clear_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT,
   1495			   cmd->link_modes.advertising);
   1496
   1497	return 0;
   1498}
   1499
   1500static int
   1501mv643xx_eth_get_link_ksettings_phyless(struct mv643xx_eth_private *mp,
   1502				       struct ethtool_link_ksettings *cmd)
   1503{
   1504	u32 port_status;
   1505	u32 supported, advertising;
   1506
   1507	port_status = rdlp(mp, PORT_STATUS);
   1508
   1509	supported = SUPPORTED_MII;
   1510	advertising = ADVERTISED_MII;
   1511	switch (port_status & PORT_SPEED_MASK) {
   1512	case PORT_SPEED_10:
   1513		cmd->base.speed = SPEED_10;
   1514		break;
   1515	case PORT_SPEED_100:
   1516		cmd->base.speed = SPEED_100;
   1517		break;
   1518	case PORT_SPEED_1000:
   1519		cmd->base.speed = SPEED_1000;
   1520		break;
   1521	default:
   1522		cmd->base.speed = -1;
   1523		break;
   1524	}
   1525	cmd->base.duplex = (port_status & FULL_DUPLEX) ?
   1526		DUPLEX_FULL : DUPLEX_HALF;
   1527	cmd->base.port = PORT_MII;
   1528	cmd->base.phy_address = 0;
   1529	cmd->base.autoneg = AUTONEG_DISABLE;
   1530
   1531	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
   1532						supported);
   1533	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
   1534						advertising);
   1535
   1536	return 0;
   1537}
   1538
   1539static void
   1540mv643xx_eth_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
   1541{
   1542	wol->supported = 0;
   1543	wol->wolopts = 0;
   1544	if (dev->phydev)
   1545		phy_ethtool_get_wol(dev->phydev, wol);
   1546}
   1547
   1548static int
   1549mv643xx_eth_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
   1550{
   1551	int err;
   1552
   1553	if (!dev->phydev)
   1554		return -EOPNOTSUPP;
   1555
   1556	err = phy_ethtool_set_wol(dev->phydev, wol);
   1557	/* Given that mv643xx_eth works without the marvell-specific PHY driver,
   1558	 * this debugging hint is useful to have.
   1559	 */
   1560	if (err == -EOPNOTSUPP)
   1561		netdev_info(dev, "The PHY does not support set_wol, was CONFIG_MARVELL_PHY enabled?\n");
   1562	return err;
   1563}
   1564
   1565static int
   1566mv643xx_eth_get_link_ksettings(struct net_device *dev,
   1567			       struct ethtool_link_ksettings *cmd)
   1568{
   1569	struct mv643xx_eth_private *mp = netdev_priv(dev);
   1570
   1571	if (dev->phydev)
   1572		return mv643xx_eth_get_link_ksettings_phy(mp, cmd);
   1573	else
   1574		return mv643xx_eth_get_link_ksettings_phyless(mp, cmd);
   1575}
   1576
   1577static int
   1578mv643xx_eth_set_link_ksettings(struct net_device *dev,
   1579			       const struct ethtool_link_ksettings *cmd)
   1580{
   1581	struct ethtool_link_ksettings c = *cmd;
   1582	u32 advertising;
   1583	int ret;
   1584
   1585	if (!dev->phydev)
   1586		return -EINVAL;
   1587
   1588	/*
   1589	 * The MAC does not support 1000baseT_Half.
   1590	 */
   1591	ethtool_convert_link_mode_to_legacy_u32(&advertising,
   1592						c.link_modes.advertising);
   1593	advertising &= ~ADVERTISED_1000baseT_Half;
   1594	ethtool_convert_legacy_u32_to_link_mode(c.link_modes.advertising,
   1595						advertising);
   1596
   1597	ret = phy_ethtool_ksettings_set(dev->phydev, &c);
   1598	if (!ret)
   1599		mv643xx_eth_adjust_link(dev);
   1600	return ret;
   1601}
   1602
   1603static void mv643xx_eth_get_drvinfo(struct net_device *dev,
   1604				    struct ethtool_drvinfo *drvinfo)
   1605{
   1606	strlcpy(drvinfo->driver, mv643xx_eth_driver_name,
   1607		sizeof(drvinfo->driver));
   1608	strlcpy(drvinfo->version, mv643xx_eth_driver_version,
   1609		sizeof(drvinfo->version));
   1610	strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
   1611	strlcpy(drvinfo->bus_info, "platform", sizeof(drvinfo->bus_info));
   1612}
   1613
   1614static int mv643xx_eth_get_coalesce(struct net_device *dev,
   1615				    struct ethtool_coalesce *ec,
   1616				    struct kernel_ethtool_coalesce *kernel_coal,
   1617				    struct netlink_ext_ack *extack)
   1618{
   1619	struct mv643xx_eth_private *mp = netdev_priv(dev);
   1620
   1621	ec->rx_coalesce_usecs = get_rx_coal(mp);
   1622	ec->tx_coalesce_usecs = get_tx_coal(mp);
   1623
   1624	return 0;
   1625}
   1626
   1627static int mv643xx_eth_set_coalesce(struct net_device *dev,
   1628				    struct ethtool_coalesce *ec,
   1629				    struct kernel_ethtool_coalesce *kernel_coal,
   1630				    struct netlink_ext_ack *extack)
   1631{
   1632	struct mv643xx_eth_private *mp = netdev_priv(dev);
   1633
   1634	set_rx_coal(mp, ec->rx_coalesce_usecs);
   1635	set_tx_coal(mp, ec->tx_coalesce_usecs);
   1636
   1637	return 0;
   1638}
   1639
   1640static void
   1641mv643xx_eth_get_ringparam(struct net_device *dev, struct ethtool_ringparam *er,
   1642			  struct kernel_ethtool_ringparam *kernel_er,
   1643			  struct netlink_ext_ack *extack)
   1644{
   1645	struct mv643xx_eth_private *mp = netdev_priv(dev);
   1646
   1647	er->rx_max_pending = 4096;
   1648	er->tx_max_pending = 4096;
   1649
   1650	er->rx_pending = mp->rx_ring_size;
   1651	er->tx_pending = mp->tx_ring_size;
   1652}
   1653
   1654static int
   1655mv643xx_eth_set_ringparam(struct net_device *dev, struct ethtool_ringparam *er,
   1656			  struct kernel_ethtool_ringparam *kernel_er,
   1657			  struct netlink_ext_ack *extack)
   1658{
   1659	struct mv643xx_eth_private *mp = netdev_priv(dev);
   1660
   1661	if (er->rx_mini_pending || er->rx_jumbo_pending)
   1662		return -EINVAL;
   1663
   1664	mp->rx_ring_size = min(er->rx_pending, 4096U);
   1665	mp->tx_ring_size = clamp_t(unsigned int, er->tx_pending,
   1666				   MV643XX_MAX_SKB_DESCS * 2, 4096);
   1667	if (mp->tx_ring_size != er->tx_pending)
   1668		netdev_warn(dev, "TX queue size set to %u (requested %u)\n",
   1669			    mp->tx_ring_size, er->tx_pending);
   1670
   1671	if (netif_running(dev)) {
   1672		mv643xx_eth_stop(dev);
   1673		if (mv643xx_eth_open(dev)) {
   1674			netdev_err(dev,
   1675				   "fatal error on re-opening device after ring param change\n");
   1676			return -ENOMEM;
   1677		}
   1678	}
   1679
   1680	return 0;
   1681}
   1682
   1683
   1684static int
   1685mv643xx_eth_set_features(struct net_device *dev, netdev_features_t features)
   1686{
   1687	struct mv643xx_eth_private *mp = netdev_priv(dev);
   1688	bool rx_csum = features & NETIF_F_RXCSUM;
   1689
   1690	wrlp(mp, PORT_CONFIG, rx_csum ? 0x02000000 : 0x00000000);
   1691
   1692	return 0;
   1693}
   1694
   1695static void mv643xx_eth_get_strings(struct net_device *dev,
   1696				    uint32_t stringset, uint8_t *data)
   1697{
   1698	int i;
   1699
   1700	if (stringset == ETH_SS_STATS) {
   1701		for (i = 0; i < ARRAY_SIZE(mv643xx_eth_stats); i++) {
   1702			memcpy(data + i * ETH_GSTRING_LEN,
   1703				mv643xx_eth_stats[i].stat_string,
   1704				ETH_GSTRING_LEN);
   1705		}
   1706	}
   1707}
   1708
   1709static void mv643xx_eth_get_ethtool_stats(struct net_device *dev,
   1710					  struct ethtool_stats *stats,
   1711					  uint64_t *data)
   1712{
   1713	struct mv643xx_eth_private *mp = netdev_priv(dev);
   1714	int i;
   1715
   1716	mv643xx_eth_get_stats(dev);
   1717	mib_counters_update(mp);
   1718
   1719	for (i = 0; i < ARRAY_SIZE(mv643xx_eth_stats); i++) {
   1720		const struct mv643xx_eth_stats *stat;
   1721		void *p;
   1722
   1723		stat = mv643xx_eth_stats + i;
   1724
   1725		if (stat->netdev_off >= 0)
   1726			p = ((void *)mp->dev) + stat->netdev_off;
   1727		else
   1728			p = ((void *)mp) + stat->mp_off;
   1729
   1730		data[i] = (stat->sizeof_stat == 8) ?
   1731				*(uint64_t *)p : *(uint32_t *)p;
   1732	}
   1733}
   1734
   1735static int mv643xx_eth_get_sset_count(struct net_device *dev, int sset)
   1736{
   1737	if (sset == ETH_SS_STATS)
   1738		return ARRAY_SIZE(mv643xx_eth_stats);
   1739
   1740	return -EOPNOTSUPP;
   1741}
   1742
   1743static const struct ethtool_ops mv643xx_eth_ethtool_ops = {
   1744	.supported_coalesce_params = ETHTOOL_COALESCE_USECS,
   1745	.get_drvinfo		= mv643xx_eth_get_drvinfo,
   1746	.nway_reset		= phy_ethtool_nway_reset,
   1747	.get_link		= ethtool_op_get_link,
   1748	.get_coalesce		= mv643xx_eth_get_coalesce,
   1749	.set_coalesce		= mv643xx_eth_set_coalesce,
   1750	.get_ringparam		= mv643xx_eth_get_ringparam,
   1751	.set_ringparam		= mv643xx_eth_set_ringparam,
   1752	.get_strings		= mv643xx_eth_get_strings,
   1753	.get_ethtool_stats	= mv643xx_eth_get_ethtool_stats,
   1754	.get_sset_count		= mv643xx_eth_get_sset_count,
   1755	.get_ts_info		= ethtool_op_get_ts_info,
   1756	.get_wol                = mv643xx_eth_get_wol,
   1757	.set_wol                = mv643xx_eth_set_wol,
   1758	.get_link_ksettings	= mv643xx_eth_get_link_ksettings,
   1759	.set_link_ksettings	= mv643xx_eth_set_link_ksettings,
   1760};
   1761
   1762
   1763/* address handling *********************************************************/
   1764static void uc_addr_get(struct mv643xx_eth_private *mp, unsigned char *addr)
   1765{
   1766	unsigned int mac_h = rdlp(mp, MAC_ADDR_HIGH);
   1767	unsigned int mac_l = rdlp(mp, MAC_ADDR_LOW);
   1768
   1769	addr[0] = (mac_h >> 24) & 0xff;
   1770	addr[1] = (mac_h >> 16) & 0xff;
   1771	addr[2] = (mac_h >> 8) & 0xff;
   1772	addr[3] = mac_h & 0xff;
   1773	addr[4] = (mac_l >> 8) & 0xff;
   1774	addr[5] = mac_l & 0xff;
   1775}
   1776
   1777static void uc_addr_set(struct mv643xx_eth_private *mp, const u8 *addr)
   1778{
   1779	wrlp(mp, MAC_ADDR_HIGH,
   1780		(addr[0] << 24) | (addr[1] << 16) | (addr[2] << 8) | addr[3]);
   1781	wrlp(mp, MAC_ADDR_LOW, (addr[4] << 8) | addr[5]);
   1782}
   1783
   1784static u32 uc_addr_filter_mask(struct net_device *dev)
   1785{
   1786	struct netdev_hw_addr *ha;
   1787	u32 nibbles;
   1788
   1789	if (dev->flags & IFF_PROMISC)
   1790		return 0;
   1791
   1792	nibbles = 1 << (dev->dev_addr[5] & 0x0f);
   1793	netdev_for_each_uc_addr(ha, dev) {
   1794		if (memcmp(dev->dev_addr, ha->addr, 5))
   1795			return 0;
   1796		if ((dev->dev_addr[5] ^ ha->addr[5]) & 0xf0)
   1797			return 0;
   1798
   1799		nibbles |= 1 << (ha->addr[5] & 0x0f);
   1800	}
   1801
   1802	return nibbles;
   1803}
   1804
   1805static void mv643xx_eth_program_unicast_filter(struct net_device *dev)
   1806{
   1807	struct mv643xx_eth_private *mp = netdev_priv(dev);
   1808	u32 port_config;
   1809	u32 nibbles;
   1810	int i;
   1811
   1812	uc_addr_set(mp, dev->dev_addr);
   1813
   1814	port_config = rdlp(mp, PORT_CONFIG) & ~UNICAST_PROMISCUOUS_MODE;
   1815
   1816	nibbles = uc_addr_filter_mask(dev);
   1817	if (!nibbles) {
   1818		port_config |= UNICAST_PROMISCUOUS_MODE;
   1819		nibbles = 0xffff;
   1820	}
   1821
   1822	for (i = 0; i < 16; i += 4) {
   1823		int off = UNICAST_TABLE(mp->port_num) + i;
   1824		u32 v;
   1825
   1826		v = 0;
   1827		if (nibbles & 1)
   1828			v |= 0x00000001;
   1829		if (nibbles & 2)
   1830			v |= 0x00000100;
   1831		if (nibbles & 4)
   1832			v |= 0x00010000;
   1833		if (nibbles & 8)
   1834			v |= 0x01000000;
   1835		nibbles >>= 4;
   1836
   1837		wrl(mp, off, v);
   1838	}
   1839
   1840	wrlp(mp, PORT_CONFIG, port_config);
   1841}
   1842
   1843static int addr_crc(unsigned char *addr)
   1844{
   1845	int crc = 0;
   1846	int i;
   1847
   1848	for (i = 0; i < 6; i++) {
   1849		int j;
   1850
   1851		crc = (crc ^ addr[i]) << 8;
   1852		for (j = 7; j >= 0; j--) {
   1853			if (crc & (0x100 << j))
   1854				crc ^= 0x107 << j;
   1855		}
   1856	}
   1857
   1858	return crc;
   1859}
   1860
   1861static void mv643xx_eth_program_multicast_filter(struct net_device *dev)
   1862{
   1863	struct mv643xx_eth_private *mp = netdev_priv(dev);
   1864	u32 *mc_spec;
   1865	u32 *mc_other;
   1866	struct netdev_hw_addr *ha;
   1867	int i;
   1868
   1869	if (dev->flags & (IFF_PROMISC | IFF_ALLMULTI))
   1870		goto promiscuous;
   1871
   1872	/* Allocate both mc_spec and mc_other tables */
   1873	mc_spec = kcalloc(128, sizeof(u32), GFP_ATOMIC);
   1874	if (!mc_spec)
   1875		goto promiscuous;
   1876	mc_other = &mc_spec[64];
   1877
   1878	netdev_for_each_mc_addr(ha, dev) {
   1879		u8 *a = ha->addr;
   1880		u32 *table;
   1881		u8 entry;
   1882
   1883		if (memcmp(a, "\x01\x00\x5e\x00\x00", 5) == 0) {
   1884			table = mc_spec;
   1885			entry = a[5];
   1886		} else {
   1887			table = mc_other;
   1888			entry = addr_crc(a);
   1889		}
   1890
   1891		table[entry >> 2] |= 1 << (8 * (entry & 3));
   1892	}
   1893
   1894	for (i = 0; i < 64; i++) {
   1895		wrl(mp, SPECIAL_MCAST_TABLE(mp->port_num) + i * sizeof(u32),
   1896		    mc_spec[i]);
   1897		wrl(mp, OTHER_MCAST_TABLE(mp->port_num) + i * sizeof(u32),
   1898		    mc_other[i]);
   1899	}
   1900
   1901	kfree(mc_spec);
   1902	return;
   1903
   1904promiscuous:
   1905	for (i = 0; i < 64; i++) {
   1906		wrl(mp, SPECIAL_MCAST_TABLE(mp->port_num) + i * sizeof(u32),
   1907		    0x01010101u);
   1908		wrl(mp, OTHER_MCAST_TABLE(mp->port_num) + i * sizeof(u32),
   1909		    0x01010101u);
   1910	}
   1911}
   1912
   1913static void mv643xx_eth_set_rx_mode(struct net_device *dev)
   1914{
   1915	mv643xx_eth_program_unicast_filter(dev);
   1916	mv643xx_eth_program_multicast_filter(dev);
   1917}
   1918
   1919static int mv643xx_eth_set_mac_address(struct net_device *dev, void *addr)
   1920{
   1921	struct sockaddr *sa = addr;
   1922
   1923	if (!is_valid_ether_addr(sa->sa_data))
   1924		return -EADDRNOTAVAIL;
   1925
   1926	eth_hw_addr_set(dev, sa->sa_data);
   1927
   1928	netif_addr_lock_bh(dev);
   1929	mv643xx_eth_program_unicast_filter(dev);
   1930	netif_addr_unlock_bh(dev);
   1931
   1932	return 0;
   1933}
   1934
   1935
   1936/* rx/tx queue initialisation ***********************************************/
   1937static int rxq_init(struct mv643xx_eth_private *mp, int index)
   1938{
   1939	struct rx_queue *rxq = mp->rxq + index;
   1940	struct rx_desc *rx_desc;
   1941	int size;
   1942	int i;
   1943
   1944	rxq->index = index;
   1945
   1946	rxq->rx_ring_size = mp->rx_ring_size;
   1947
   1948	rxq->rx_desc_count = 0;
   1949	rxq->rx_curr_desc = 0;
   1950	rxq->rx_used_desc = 0;
   1951
   1952	size = rxq->rx_ring_size * sizeof(struct rx_desc);
   1953
   1954	if (index == 0 && size <= mp->rx_desc_sram_size) {
   1955		rxq->rx_desc_area = ioremap(mp->rx_desc_sram_addr,
   1956						mp->rx_desc_sram_size);
   1957		rxq->rx_desc_dma = mp->rx_desc_sram_addr;
   1958	} else {
   1959		rxq->rx_desc_area = dma_alloc_coherent(mp->dev->dev.parent,
   1960						       size, &rxq->rx_desc_dma,
   1961						       GFP_KERNEL);
   1962	}
   1963
   1964	if (rxq->rx_desc_area == NULL) {
   1965		netdev_err(mp->dev,
   1966			   "can't allocate rx ring (%d bytes)\n", size);
   1967		goto out;
   1968	}
   1969	memset(rxq->rx_desc_area, 0, size);
   1970
   1971	rxq->rx_desc_area_size = size;
   1972	rxq->rx_skb = kcalloc(rxq->rx_ring_size, sizeof(*rxq->rx_skb),
   1973				    GFP_KERNEL);
   1974	if (rxq->rx_skb == NULL)
   1975		goto out_free;
   1976
   1977	rx_desc = rxq->rx_desc_area;
   1978	for (i = 0; i < rxq->rx_ring_size; i++) {
   1979		int nexti;
   1980
   1981		nexti = i + 1;
   1982		if (nexti == rxq->rx_ring_size)
   1983			nexti = 0;
   1984
   1985		rx_desc[i].next_desc_ptr = rxq->rx_desc_dma +
   1986					nexti * sizeof(struct rx_desc);
   1987	}
   1988
   1989	return 0;
   1990
   1991
   1992out_free:
   1993	if (index == 0 && size <= mp->rx_desc_sram_size)
   1994		iounmap(rxq->rx_desc_area);
   1995	else
   1996		dma_free_coherent(mp->dev->dev.parent, size,
   1997				  rxq->rx_desc_area,
   1998				  rxq->rx_desc_dma);
   1999
   2000out:
   2001	return -ENOMEM;
   2002}
   2003
   2004static void rxq_deinit(struct rx_queue *rxq)
   2005{
   2006	struct mv643xx_eth_private *mp = rxq_to_mp(rxq);
   2007	int i;
   2008
   2009	rxq_disable(rxq);
   2010
   2011	for (i = 0; i < rxq->rx_ring_size; i++) {
   2012		if (rxq->rx_skb[i]) {
   2013			dev_consume_skb_any(rxq->rx_skb[i]);
   2014			rxq->rx_desc_count--;
   2015		}
   2016	}
   2017
   2018	if (rxq->rx_desc_count) {
   2019		netdev_err(mp->dev, "error freeing rx ring -- %d skbs stuck\n",
   2020			   rxq->rx_desc_count);
   2021	}
   2022
   2023	if (rxq->index == 0 &&
   2024	    rxq->rx_desc_area_size <= mp->rx_desc_sram_size)
   2025		iounmap(rxq->rx_desc_area);
   2026	else
   2027		dma_free_coherent(mp->dev->dev.parent, rxq->rx_desc_area_size,
   2028				  rxq->rx_desc_area, rxq->rx_desc_dma);
   2029
   2030	kfree(rxq->rx_skb);
   2031}
   2032
   2033static int txq_init(struct mv643xx_eth_private *mp, int index)
   2034{
   2035	struct tx_queue *txq = mp->txq + index;
   2036	struct tx_desc *tx_desc;
   2037	int size;
   2038	int ret;
   2039	int i;
   2040
   2041	txq->index = index;
   2042
   2043	txq->tx_ring_size = mp->tx_ring_size;
   2044
   2045	/* A queue must always have room for at least one skb.
   2046	 * Therefore, stop the queue when the free entries reaches
   2047	 * the maximum number of descriptors per skb.
   2048	 */
   2049	txq->tx_stop_threshold = txq->tx_ring_size - MV643XX_MAX_SKB_DESCS;
   2050	txq->tx_wake_threshold = txq->tx_stop_threshold / 2;
   2051
   2052	txq->tx_desc_count = 0;
   2053	txq->tx_curr_desc = 0;
   2054	txq->tx_used_desc = 0;
   2055
   2056	size = txq->tx_ring_size * sizeof(struct tx_desc);
   2057
   2058	if (index == 0 && size <= mp->tx_desc_sram_size) {
   2059		txq->tx_desc_area = ioremap(mp->tx_desc_sram_addr,
   2060						mp->tx_desc_sram_size);
   2061		txq->tx_desc_dma = mp->tx_desc_sram_addr;
   2062	} else {
   2063		txq->tx_desc_area = dma_alloc_coherent(mp->dev->dev.parent,
   2064						       size, &txq->tx_desc_dma,
   2065						       GFP_KERNEL);
   2066	}
   2067
   2068	if (txq->tx_desc_area == NULL) {
   2069		netdev_err(mp->dev,
   2070			   "can't allocate tx ring (%d bytes)\n", size);
   2071		return -ENOMEM;
   2072	}
   2073	memset(txq->tx_desc_area, 0, size);
   2074
   2075	txq->tx_desc_area_size = size;
   2076
   2077	tx_desc = txq->tx_desc_area;
   2078	for (i = 0; i < txq->tx_ring_size; i++) {
   2079		struct tx_desc *txd = tx_desc + i;
   2080		int nexti;
   2081
   2082		nexti = i + 1;
   2083		if (nexti == txq->tx_ring_size)
   2084			nexti = 0;
   2085
   2086		txd->cmd_sts = 0;
   2087		txd->next_desc_ptr = txq->tx_desc_dma +
   2088					nexti * sizeof(struct tx_desc);
   2089	}
   2090
   2091	txq->tx_desc_mapping = kcalloc(txq->tx_ring_size, sizeof(char),
   2092				       GFP_KERNEL);
   2093	if (!txq->tx_desc_mapping) {
   2094		ret = -ENOMEM;
   2095		goto err_free_desc_area;
   2096	}
   2097
   2098	/* Allocate DMA buffers for TSO MAC/IP/TCP headers */
   2099	txq->tso_hdrs = dma_alloc_coherent(mp->dev->dev.parent,
   2100					   txq->tx_ring_size * TSO_HEADER_SIZE,
   2101					   &txq->tso_hdrs_dma, GFP_KERNEL);
   2102	if (txq->tso_hdrs == NULL) {
   2103		ret = -ENOMEM;
   2104		goto err_free_desc_mapping;
   2105	}
   2106	skb_queue_head_init(&txq->tx_skb);
   2107
   2108	return 0;
   2109
   2110err_free_desc_mapping:
   2111	kfree(txq->tx_desc_mapping);
   2112err_free_desc_area:
   2113	if (index == 0 && size <= mp->tx_desc_sram_size)
   2114		iounmap(txq->tx_desc_area);
   2115	else
   2116		dma_free_coherent(mp->dev->dev.parent, txq->tx_desc_area_size,
   2117				  txq->tx_desc_area, txq->tx_desc_dma);
   2118	return ret;
   2119}
   2120
   2121static void txq_deinit(struct tx_queue *txq)
   2122{
   2123	struct mv643xx_eth_private *mp = txq_to_mp(txq);
   2124
   2125	txq_disable(txq);
   2126	txq_reclaim(txq, txq->tx_ring_size, 1);
   2127
   2128	BUG_ON(txq->tx_used_desc != txq->tx_curr_desc);
   2129
   2130	if (txq->index == 0 &&
   2131	    txq->tx_desc_area_size <= mp->tx_desc_sram_size)
   2132		iounmap(txq->tx_desc_area);
   2133	else
   2134		dma_free_coherent(mp->dev->dev.parent, txq->tx_desc_area_size,
   2135				  txq->tx_desc_area, txq->tx_desc_dma);
   2136	kfree(txq->tx_desc_mapping);
   2137
   2138	if (txq->tso_hdrs)
   2139		dma_free_coherent(mp->dev->dev.parent,
   2140				  txq->tx_ring_size * TSO_HEADER_SIZE,
   2141				  txq->tso_hdrs, txq->tso_hdrs_dma);
   2142}
   2143
   2144
   2145/* netdev ops and related ***************************************************/
   2146static int mv643xx_eth_collect_events(struct mv643xx_eth_private *mp)
   2147{
   2148	u32 int_cause;
   2149	u32 int_cause_ext;
   2150
   2151	int_cause = rdlp(mp, INT_CAUSE) & mp->int_mask;
   2152	if (int_cause == 0)
   2153		return 0;
   2154
   2155	int_cause_ext = 0;
   2156	if (int_cause & INT_EXT) {
   2157		int_cause &= ~INT_EXT;
   2158		int_cause_ext = rdlp(mp, INT_CAUSE_EXT);
   2159	}
   2160
   2161	if (int_cause) {
   2162		wrlp(mp, INT_CAUSE, ~int_cause);
   2163		mp->work_tx_end |= ((int_cause & INT_TX_END) >> 19) &
   2164				~(rdlp(mp, TXQ_COMMAND) & 0xff);
   2165		mp->work_rx |= (int_cause & INT_RX) >> 2;
   2166	}
   2167
   2168	int_cause_ext &= INT_EXT_LINK_PHY | INT_EXT_TX;
   2169	if (int_cause_ext) {
   2170		wrlp(mp, INT_CAUSE_EXT, ~int_cause_ext);
   2171		if (int_cause_ext & INT_EXT_LINK_PHY)
   2172			mp->work_link = 1;
   2173		mp->work_tx |= int_cause_ext & INT_EXT_TX;
   2174	}
   2175
   2176	return 1;
   2177}
   2178
   2179static irqreturn_t mv643xx_eth_irq(int irq, void *dev_id)
   2180{
   2181	struct net_device *dev = (struct net_device *)dev_id;
   2182	struct mv643xx_eth_private *mp = netdev_priv(dev);
   2183
   2184	if (unlikely(!mv643xx_eth_collect_events(mp)))
   2185		return IRQ_NONE;
   2186
   2187	wrlp(mp, INT_MASK, 0);
   2188	napi_schedule(&mp->napi);
   2189
   2190	return IRQ_HANDLED;
   2191}
   2192
   2193static void handle_link_event(struct mv643xx_eth_private *mp)
   2194{
   2195	struct net_device *dev = mp->dev;
   2196	u32 port_status;
   2197	int speed;
   2198	int duplex;
   2199	int fc;
   2200
   2201	port_status = rdlp(mp, PORT_STATUS);
   2202	if (!(port_status & LINK_UP)) {
   2203		if (netif_carrier_ok(dev)) {
   2204			int i;
   2205
   2206			netdev_info(dev, "link down\n");
   2207
   2208			netif_carrier_off(dev);
   2209
   2210			for (i = 0; i < mp->txq_count; i++) {
   2211				struct tx_queue *txq = mp->txq + i;
   2212
   2213				txq_reclaim(txq, txq->tx_ring_size, 1);
   2214				txq_reset_hw_ptr(txq);
   2215			}
   2216		}
   2217		return;
   2218	}
   2219
   2220	switch (port_status & PORT_SPEED_MASK) {
   2221	case PORT_SPEED_10:
   2222		speed = 10;
   2223		break;
   2224	case PORT_SPEED_100:
   2225		speed = 100;
   2226		break;
   2227	case PORT_SPEED_1000:
   2228		speed = 1000;
   2229		break;
   2230	default:
   2231		speed = -1;
   2232		break;
   2233	}
   2234	duplex = (port_status & FULL_DUPLEX) ? 1 : 0;
   2235	fc = (port_status & FLOW_CONTROL_ENABLED) ? 1 : 0;
   2236
   2237	netdev_info(dev, "link up, %d Mb/s, %s duplex, flow control %sabled\n",
   2238		    speed, duplex ? "full" : "half", fc ? "en" : "dis");
   2239
   2240	if (!netif_carrier_ok(dev))
   2241		netif_carrier_on(dev);
   2242}
   2243
   2244static int mv643xx_eth_poll(struct napi_struct *napi, int budget)
   2245{
   2246	struct mv643xx_eth_private *mp;
   2247	int work_done;
   2248
   2249	mp = container_of(napi, struct mv643xx_eth_private, napi);
   2250
   2251	if (unlikely(mp->oom)) {
   2252		mp->oom = 0;
   2253		del_timer(&mp->rx_oom);
   2254	}
   2255
   2256	work_done = 0;
   2257	while (work_done < budget) {
   2258		u8 queue_mask;
   2259		int queue;
   2260		int work_tbd;
   2261
   2262		if (mp->work_link) {
   2263			mp->work_link = 0;
   2264			handle_link_event(mp);
   2265			work_done++;
   2266			continue;
   2267		}
   2268
   2269		queue_mask = mp->work_tx | mp->work_tx_end | mp->work_rx;
   2270		if (likely(!mp->oom))
   2271			queue_mask |= mp->work_rx_refill;
   2272
   2273		if (!queue_mask) {
   2274			if (mv643xx_eth_collect_events(mp))
   2275				continue;
   2276			break;
   2277		}
   2278
   2279		queue = fls(queue_mask) - 1;
   2280		queue_mask = 1 << queue;
   2281
   2282		work_tbd = budget - work_done;
   2283		if (work_tbd > 16)
   2284			work_tbd = 16;
   2285
   2286		if (mp->work_tx_end & queue_mask) {
   2287			txq_kick(mp->txq + queue);
   2288		} else if (mp->work_tx & queue_mask) {
   2289			work_done += txq_reclaim(mp->txq + queue, work_tbd, 0);
   2290			txq_maybe_wake(mp->txq + queue);
   2291		} else if (mp->work_rx & queue_mask) {
   2292			work_done += rxq_process(mp->rxq + queue, work_tbd);
   2293		} else if (!mp->oom && (mp->work_rx_refill & queue_mask)) {
   2294			work_done += rxq_refill(mp->rxq + queue, work_tbd);
   2295		} else {
   2296			BUG();
   2297		}
   2298	}
   2299
   2300	if (work_done < budget) {
   2301		if (mp->oom)
   2302			mod_timer(&mp->rx_oom, jiffies + (HZ / 10));
   2303		napi_complete_done(napi, work_done);
   2304		wrlp(mp, INT_MASK, mp->int_mask);
   2305	}
   2306
   2307	return work_done;
   2308}
   2309
   2310static inline void oom_timer_wrapper(struct timer_list *t)
   2311{
   2312	struct mv643xx_eth_private *mp = from_timer(mp, t, rx_oom);
   2313
   2314	napi_schedule(&mp->napi);
   2315}
   2316
   2317static void port_start(struct mv643xx_eth_private *mp)
   2318{
   2319	struct net_device *dev = mp->dev;
   2320	u32 pscr;
   2321	int i;
   2322
   2323	/*
   2324	 * Perform PHY reset, if there is a PHY.
   2325	 */
   2326	if (dev->phydev) {
   2327		struct ethtool_link_ksettings cmd;
   2328
   2329		mv643xx_eth_get_link_ksettings(dev, &cmd);
   2330		phy_init_hw(dev->phydev);
   2331		mv643xx_eth_set_link_ksettings(
   2332			dev, (const struct ethtool_link_ksettings *)&cmd);
   2333		phy_start(dev->phydev);
   2334	}
   2335
   2336	/*
   2337	 * Configure basic link parameters.
   2338	 */
   2339	pscr = rdlp(mp, PORT_SERIAL_CONTROL);
   2340
   2341	pscr |= SERIAL_PORT_ENABLE;
   2342	wrlp(mp, PORT_SERIAL_CONTROL, pscr);
   2343
   2344	pscr |= DO_NOT_FORCE_LINK_FAIL;
   2345	if (!dev->phydev)
   2346		pscr |= FORCE_LINK_PASS;
   2347	wrlp(mp, PORT_SERIAL_CONTROL, pscr);
   2348
   2349	/*
   2350	 * Configure TX path and queues.
   2351	 */
   2352	tx_set_rate(mp, 1000000000, 16777216);
   2353	for (i = 0; i < mp->txq_count; i++) {
   2354		struct tx_queue *txq = mp->txq + i;
   2355
   2356		txq_reset_hw_ptr(txq);
   2357		txq_set_rate(txq, 1000000000, 16777216);
   2358		txq_set_fixed_prio_mode(txq);
   2359	}
   2360
   2361	/*
   2362	 * Receive all unmatched unicast, TCP, UDP, BPDU and broadcast
   2363	 * frames to RX queue #0, and include the pseudo-header when
   2364	 * calculating receive checksums.
   2365	 */
   2366	mv643xx_eth_set_features(mp->dev, mp->dev->features);
   2367
   2368	/*
   2369	 * Treat BPDUs as normal multicasts, and disable partition mode.
   2370	 */
   2371	wrlp(mp, PORT_CONFIG_EXT, 0x00000000);
   2372
   2373	/*
   2374	 * Add configured unicast addresses to address filter table.
   2375	 */
   2376	mv643xx_eth_program_unicast_filter(mp->dev);
   2377
   2378	/*
   2379	 * Enable the receive queues.
   2380	 */
   2381	for (i = 0; i < mp->rxq_count; i++) {
   2382		struct rx_queue *rxq = mp->rxq + i;
   2383		u32 addr;
   2384
   2385		addr = (u32)rxq->rx_desc_dma;
   2386		addr += rxq->rx_curr_desc * sizeof(struct rx_desc);
   2387		wrlp(mp, RXQ_CURRENT_DESC_PTR(i), addr);
   2388
   2389		rxq_enable(rxq);
   2390	}
   2391}
   2392
   2393static void mv643xx_eth_recalc_skb_size(struct mv643xx_eth_private *mp)
   2394{
   2395	int skb_size;
   2396
   2397	/*
   2398	 * Reserve 2+14 bytes for an ethernet header (the hardware
   2399	 * automatically prepends 2 bytes of dummy data to each
   2400	 * received packet), 16 bytes for up to four VLAN tags, and
   2401	 * 4 bytes for the trailing FCS -- 36 bytes total.
   2402	 */
   2403	skb_size = mp->dev->mtu + 36;
   2404
   2405	/*
   2406	 * Make sure that the skb size is a multiple of 8 bytes, as
   2407	 * the lower three bits of the receive descriptor's buffer
   2408	 * size field are ignored by the hardware.
   2409	 */
   2410	mp->skb_size = (skb_size + 7) & ~7;
   2411
   2412	/*
   2413	 * If NET_SKB_PAD is smaller than a cache line,
   2414	 * netdev_alloc_skb() will cause skb->data to be misaligned
   2415	 * to a cache line boundary.  If this is the case, include
   2416	 * some extra space to allow re-aligning the data area.
   2417	 */
   2418	mp->skb_size += SKB_DMA_REALIGN;
   2419}
   2420
   2421static int mv643xx_eth_open(struct net_device *dev)
   2422{
   2423	struct mv643xx_eth_private *mp = netdev_priv(dev);
   2424	int err;
   2425	int i;
   2426
   2427	wrlp(mp, INT_CAUSE, 0);
   2428	wrlp(mp, INT_CAUSE_EXT, 0);
   2429	rdlp(mp, INT_CAUSE_EXT);
   2430
   2431	err = request_irq(dev->irq, mv643xx_eth_irq,
   2432			  IRQF_SHARED, dev->name, dev);
   2433	if (err) {
   2434		netdev_err(dev, "can't assign irq\n");
   2435		return -EAGAIN;
   2436	}
   2437
   2438	mv643xx_eth_recalc_skb_size(mp);
   2439
   2440	napi_enable(&mp->napi);
   2441
   2442	mp->int_mask = INT_EXT;
   2443
   2444	for (i = 0; i < mp->rxq_count; i++) {
   2445		err = rxq_init(mp, i);
   2446		if (err) {
   2447			while (--i >= 0)
   2448				rxq_deinit(mp->rxq + i);
   2449			goto out;
   2450		}
   2451
   2452		rxq_refill(mp->rxq + i, INT_MAX);
   2453		mp->int_mask |= INT_RX_0 << i;
   2454	}
   2455
   2456	if (mp->oom) {
   2457		mp->rx_oom.expires = jiffies + (HZ / 10);
   2458		add_timer(&mp->rx_oom);
   2459	}
   2460
   2461	for (i = 0; i < mp->txq_count; i++) {
   2462		err = txq_init(mp, i);
   2463		if (err) {
   2464			while (--i >= 0)
   2465				txq_deinit(mp->txq + i);
   2466			goto out_free;
   2467		}
   2468		mp->int_mask |= INT_TX_END_0 << i;
   2469	}
   2470
   2471	add_timer(&mp->mib_counters_timer);
   2472	port_start(mp);
   2473
   2474	wrlp(mp, INT_MASK_EXT, INT_EXT_LINK_PHY | INT_EXT_TX);
   2475	wrlp(mp, INT_MASK, mp->int_mask);
   2476
   2477	return 0;
   2478
   2479
   2480out_free:
   2481	for (i = 0; i < mp->rxq_count; i++)
   2482		rxq_deinit(mp->rxq + i);
   2483out:
   2484	free_irq(dev->irq, dev);
   2485
   2486	return err;
   2487}
   2488
   2489static void port_reset(struct mv643xx_eth_private *mp)
   2490{
   2491	unsigned int data;
   2492	int i;
   2493
   2494	for (i = 0; i < mp->rxq_count; i++)
   2495		rxq_disable(mp->rxq + i);
   2496	for (i = 0; i < mp->txq_count; i++)
   2497		txq_disable(mp->txq + i);
   2498
   2499	while (1) {
   2500		u32 ps = rdlp(mp, PORT_STATUS);
   2501
   2502		if ((ps & (TX_IN_PROGRESS | TX_FIFO_EMPTY)) == TX_FIFO_EMPTY)
   2503			break;
   2504		udelay(10);
   2505	}
   2506
   2507	/* Reset the Enable bit in the Configuration Register */
   2508	data = rdlp(mp, PORT_SERIAL_CONTROL);
   2509	data &= ~(SERIAL_PORT_ENABLE		|
   2510		  DO_NOT_FORCE_LINK_FAIL	|
   2511		  FORCE_LINK_PASS);
   2512	wrlp(mp, PORT_SERIAL_CONTROL, data);
   2513}
   2514
   2515static int mv643xx_eth_stop(struct net_device *dev)
   2516{
   2517	struct mv643xx_eth_private *mp = netdev_priv(dev);
   2518	int i;
   2519
   2520	wrlp(mp, INT_MASK_EXT, 0x00000000);
   2521	wrlp(mp, INT_MASK, 0x00000000);
   2522	rdlp(mp, INT_MASK);
   2523
   2524	napi_disable(&mp->napi);
   2525
   2526	del_timer_sync(&mp->rx_oom);
   2527
   2528	netif_carrier_off(dev);
   2529	if (dev->phydev)
   2530		phy_stop(dev->phydev);
   2531	free_irq(dev->irq, dev);
   2532
   2533	port_reset(mp);
   2534	mv643xx_eth_get_stats(dev);
   2535	mib_counters_update(mp);
   2536	del_timer_sync(&mp->mib_counters_timer);
   2537
   2538	for (i = 0; i < mp->rxq_count; i++)
   2539		rxq_deinit(mp->rxq + i);
   2540	for (i = 0; i < mp->txq_count; i++)
   2541		txq_deinit(mp->txq + i);
   2542
   2543	return 0;
   2544}
   2545
   2546static int mv643xx_eth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
   2547{
   2548	int ret;
   2549
   2550	if (!dev->phydev)
   2551		return -ENOTSUPP;
   2552
   2553	ret = phy_mii_ioctl(dev->phydev, ifr, cmd);
   2554	if (!ret)
   2555		mv643xx_eth_adjust_link(dev);
   2556	return ret;
   2557}
   2558
   2559static int mv643xx_eth_change_mtu(struct net_device *dev, int new_mtu)
   2560{
   2561	struct mv643xx_eth_private *mp = netdev_priv(dev);
   2562
   2563	dev->mtu = new_mtu;
   2564	mv643xx_eth_recalc_skb_size(mp);
   2565	tx_set_rate(mp, 1000000000, 16777216);
   2566
   2567	if (!netif_running(dev))
   2568		return 0;
   2569
   2570	/*
   2571	 * Stop and then re-open the interface. This will allocate RX
   2572	 * skbs of the new MTU.
   2573	 * There is a possible danger that the open will not succeed,
   2574	 * due to memory being full.
   2575	 */
   2576	mv643xx_eth_stop(dev);
   2577	if (mv643xx_eth_open(dev)) {
   2578		netdev_err(dev,
   2579			   "fatal error on re-opening device after MTU change\n");
   2580	}
   2581
   2582	return 0;
   2583}
   2584
   2585static void tx_timeout_task(struct work_struct *ugly)
   2586{
   2587	struct mv643xx_eth_private *mp;
   2588
   2589	mp = container_of(ugly, struct mv643xx_eth_private, tx_timeout_task);
   2590	if (netif_running(mp->dev)) {
   2591		netif_tx_stop_all_queues(mp->dev);
   2592		port_reset(mp);
   2593		port_start(mp);
   2594		netif_tx_wake_all_queues(mp->dev);
   2595	}
   2596}
   2597
   2598static void mv643xx_eth_tx_timeout(struct net_device *dev, unsigned int txqueue)
   2599{
   2600	struct mv643xx_eth_private *mp = netdev_priv(dev);
   2601
   2602	netdev_info(dev, "tx timeout\n");
   2603
   2604	schedule_work(&mp->tx_timeout_task);
   2605}
   2606
   2607#ifdef CONFIG_NET_POLL_CONTROLLER
   2608static void mv643xx_eth_netpoll(struct net_device *dev)
   2609{
   2610	struct mv643xx_eth_private *mp = netdev_priv(dev);
   2611
   2612	wrlp(mp, INT_MASK, 0x00000000);
   2613	rdlp(mp, INT_MASK);
   2614
   2615	mv643xx_eth_irq(dev->irq, dev);
   2616
   2617	wrlp(mp, INT_MASK, mp->int_mask);
   2618}
   2619#endif
   2620
   2621
   2622/* platform glue ************************************************************/
   2623static void
   2624mv643xx_eth_conf_mbus_windows(struct mv643xx_eth_shared_private *msp,
   2625			      const struct mbus_dram_target_info *dram)
   2626{
   2627	void __iomem *base = msp->base;
   2628	u32 win_enable;
   2629	u32 win_protect;
   2630	int i;
   2631
   2632	for (i = 0; i < 6; i++) {
   2633		writel(0, base + WINDOW_BASE(i));
   2634		writel(0, base + WINDOW_SIZE(i));
   2635		if (i < 4)
   2636			writel(0, base + WINDOW_REMAP_HIGH(i));
   2637	}
   2638
   2639	win_enable = 0x3f;
   2640	win_protect = 0;
   2641
   2642	for (i = 0; i < dram->num_cs; i++) {
   2643		const struct mbus_dram_window *cs = dram->cs + i;
   2644
   2645		writel((cs->base & 0xffff0000) |
   2646			(cs->mbus_attr << 8) |
   2647			dram->mbus_dram_target_id, base + WINDOW_BASE(i));
   2648		writel((cs->size - 1) & 0xffff0000, base + WINDOW_SIZE(i));
   2649
   2650		win_enable &= ~(1 << i);
   2651		win_protect |= 3 << (2 * i);
   2652	}
   2653
   2654	writel(win_enable, base + WINDOW_BAR_ENABLE);
   2655	msp->win_protect = win_protect;
   2656}
   2657
   2658static void infer_hw_params(struct mv643xx_eth_shared_private *msp)
   2659{
   2660	/*
   2661	 * Check whether we have a 14-bit coal limit field in bits
   2662	 * [21:8], or a 16-bit coal limit in bits [25,21:7] of the
   2663	 * SDMA config register.
   2664	 */
   2665	writel(0x02000000, msp->base + 0x0400 + SDMA_CONFIG);
   2666	if (readl(msp->base + 0x0400 + SDMA_CONFIG) & 0x02000000)
   2667		msp->extended_rx_coal_limit = 1;
   2668	else
   2669		msp->extended_rx_coal_limit = 0;
   2670
   2671	/*
   2672	 * Check whether the MAC supports TX rate control, and if
   2673	 * yes, whether its associated registers are in the old or
   2674	 * the new place.
   2675	 */
   2676	writel(1, msp->base + 0x0400 + TX_BW_MTU_MOVED);
   2677	if (readl(msp->base + 0x0400 + TX_BW_MTU_MOVED) & 1) {
   2678		msp->tx_bw_control = TX_BW_CONTROL_NEW_LAYOUT;
   2679	} else {
   2680		writel(7, msp->base + 0x0400 + TX_BW_RATE);
   2681		if (readl(msp->base + 0x0400 + TX_BW_RATE) & 7)
   2682			msp->tx_bw_control = TX_BW_CONTROL_OLD_LAYOUT;
   2683		else
   2684			msp->tx_bw_control = TX_BW_CONTROL_ABSENT;
   2685	}
   2686}
   2687
   2688#if defined(CONFIG_OF)
   2689static const struct of_device_id mv643xx_eth_shared_ids[] = {
   2690	{ .compatible = "marvell,orion-eth", },
   2691	{ .compatible = "marvell,kirkwood-eth", },
   2692	{ }
   2693};
   2694MODULE_DEVICE_TABLE(of, mv643xx_eth_shared_ids);
   2695#endif
   2696
   2697#ifdef CONFIG_OF_IRQ
   2698#define mv643xx_eth_property(_np, _name, _v)				\
   2699	do {								\
   2700		u32 tmp;						\
   2701		if (!of_property_read_u32(_np, "marvell," _name, &tmp))	\
   2702			_v = tmp;					\
   2703	} while (0)
   2704
   2705static struct platform_device *port_platdev[3];
   2706
   2707static void mv643xx_eth_shared_of_remove(void)
   2708{
   2709	int n;
   2710
   2711	for (n = 0; n < 3; n++) {
   2712		platform_device_del(port_platdev[n]);
   2713		port_platdev[n] = NULL;
   2714	}
   2715}
   2716
   2717static int mv643xx_eth_shared_of_add_port(struct platform_device *pdev,
   2718					  struct device_node *pnp)
   2719{
   2720	struct platform_device *ppdev;
   2721	struct mv643xx_eth_platform_data ppd;
   2722	struct resource res;
   2723	int ret;
   2724	int dev_num = 0;
   2725
   2726	memset(&ppd, 0, sizeof(ppd));
   2727	ppd.shared = pdev;
   2728
   2729	memset(&res, 0, sizeof(res));
   2730	if (of_irq_to_resource(pnp, 0, &res) <= 0) {
   2731		dev_err(&pdev->dev, "missing interrupt on %pOFn\n", pnp);
   2732		return -EINVAL;
   2733	}
   2734
   2735	if (of_property_read_u32(pnp, "reg", &ppd.port_number)) {
   2736		dev_err(&pdev->dev, "missing reg property on %pOFn\n", pnp);
   2737		return -EINVAL;
   2738	}
   2739
   2740	if (ppd.port_number >= 3) {
   2741		dev_err(&pdev->dev, "invalid reg property on %pOFn\n", pnp);
   2742		return -EINVAL;
   2743	}
   2744
   2745	while (dev_num < 3 && port_platdev[dev_num])
   2746		dev_num++;
   2747
   2748	if (dev_num == 3) {
   2749		dev_err(&pdev->dev, "too many ports registered\n");
   2750		return -EINVAL;
   2751	}
   2752
   2753	ret = of_get_mac_address(pnp, ppd.mac_addr);
   2754	if (ret == -EPROBE_DEFER)
   2755		return ret;
   2756
   2757	mv643xx_eth_property(pnp, "tx-queue-size", ppd.tx_queue_size);
   2758	mv643xx_eth_property(pnp, "tx-sram-addr", ppd.tx_sram_addr);
   2759	mv643xx_eth_property(pnp, "tx-sram-size", ppd.tx_sram_size);
   2760	mv643xx_eth_property(pnp, "rx-queue-size", ppd.rx_queue_size);
   2761	mv643xx_eth_property(pnp, "rx-sram-addr", ppd.rx_sram_addr);
   2762	mv643xx_eth_property(pnp, "rx-sram-size", ppd.rx_sram_size);
   2763
   2764	ppd.phy_node = of_parse_phandle(pnp, "phy-handle", 0);
   2765	if (!ppd.phy_node) {
   2766		ppd.phy_addr = MV643XX_ETH_PHY_NONE;
   2767		of_property_read_u32(pnp, "speed", &ppd.speed);
   2768		of_property_read_u32(pnp, "duplex", &ppd.duplex);
   2769	}
   2770
   2771	ppdev = platform_device_alloc(MV643XX_ETH_NAME, dev_num);
   2772	if (!ppdev)
   2773		return -ENOMEM;
   2774	ppdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
   2775	ppdev->dev.of_node = pnp;
   2776
   2777	ret = platform_device_add_resources(ppdev, &res, 1);
   2778	if (ret)
   2779		goto port_err;
   2780
   2781	ret = platform_device_add_data(ppdev, &ppd, sizeof(ppd));
   2782	if (ret)
   2783		goto port_err;
   2784
   2785	ret = platform_device_add(ppdev);
   2786	if (ret)
   2787		goto port_err;
   2788
   2789	port_platdev[dev_num] = ppdev;
   2790
   2791	return 0;
   2792
   2793port_err:
   2794	platform_device_put(ppdev);
   2795	return ret;
   2796}
   2797
   2798static int mv643xx_eth_shared_of_probe(struct platform_device *pdev)
   2799{
   2800	struct mv643xx_eth_shared_platform_data *pd;
   2801	struct device_node *pnp, *np = pdev->dev.of_node;
   2802	int ret;
   2803
   2804	/* bail out if not registered from DT */
   2805	if (!np)
   2806		return 0;
   2807
   2808	pd = devm_kzalloc(&pdev->dev, sizeof(*pd), GFP_KERNEL);
   2809	if (!pd)
   2810		return -ENOMEM;
   2811	pdev->dev.platform_data = pd;
   2812
   2813	mv643xx_eth_property(np, "tx-checksum-limit", pd->tx_csum_limit);
   2814
   2815	for_each_available_child_of_node(np, pnp) {
   2816		ret = mv643xx_eth_shared_of_add_port(pdev, pnp);
   2817		if (ret) {
   2818			of_node_put(pnp);
   2819			mv643xx_eth_shared_of_remove();
   2820			return ret;
   2821		}
   2822	}
   2823	return 0;
   2824}
   2825
   2826#else
   2827static inline int mv643xx_eth_shared_of_probe(struct platform_device *pdev)
   2828{
   2829	return 0;
   2830}
   2831
   2832static inline void mv643xx_eth_shared_of_remove(void)
   2833{
   2834}
   2835#endif
   2836
   2837static int mv643xx_eth_shared_probe(struct platform_device *pdev)
   2838{
   2839	static int mv643xx_eth_version_printed;
   2840	struct mv643xx_eth_shared_platform_data *pd;
   2841	struct mv643xx_eth_shared_private *msp;
   2842	const struct mbus_dram_target_info *dram;
   2843	struct resource *res;
   2844	int ret;
   2845
   2846	if (!mv643xx_eth_version_printed++)
   2847		pr_notice("MV-643xx 10/100/1000 ethernet driver version %s\n",
   2848			  mv643xx_eth_driver_version);
   2849
   2850	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
   2851	if (res == NULL)
   2852		return -EINVAL;
   2853
   2854	msp = devm_kzalloc(&pdev->dev, sizeof(*msp), GFP_KERNEL);
   2855	if (msp == NULL)
   2856		return -ENOMEM;
   2857	platform_set_drvdata(pdev, msp);
   2858
   2859	msp->base = devm_ioremap(&pdev->dev, res->start, resource_size(res));
   2860	if (msp->base == NULL)
   2861		return -ENOMEM;
   2862
   2863	msp->clk = devm_clk_get(&pdev->dev, NULL);
   2864	if (!IS_ERR(msp->clk))
   2865		clk_prepare_enable(msp->clk);
   2866
   2867	/*
   2868	 * (Re-)program MBUS remapping windows if we are asked to.
   2869	 */
   2870	dram = mv_mbus_dram_info();
   2871	if (dram)
   2872		mv643xx_eth_conf_mbus_windows(msp, dram);
   2873
   2874	ret = mv643xx_eth_shared_of_probe(pdev);
   2875	if (ret)
   2876		goto err_put_clk;
   2877	pd = dev_get_platdata(&pdev->dev);
   2878
   2879	msp->tx_csum_limit = (pd != NULL && pd->tx_csum_limit) ?
   2880					pd->tx_csum_limit : 9 * 1024;
   2881	infer_hw_params(msp);
   2882
   2883	return 0;
   2884
   2885err_put_clk:
   2886	if (!IS_ERR(msp->clk))
   2887		clk_disable_unprepare(msp->clk);
   2888	return ret;
   2889}
   2890
   2891static int mv643xx_eth_shared_remove(struct platform_device *pdev)
   2892{
   2893	struct mv643xx_eth_shared_private *msp = platform_get_drvdata(pdev);
   2894
   2895	mv643xx_eth_shared_of_remove();
   2896	if (!IS_ERR(msp->clk))
   2897		clk_disable_unprepare(msp->clk);
   2898	return 0;
   2899}
   2900
   2901static struct platform_driver mv643xx_eth_shared_driver = {
   2902	.probe		= mv643xx_eth_shared_probe,
   2903	.remove		= mv643xx_eth_shared_remove,
   2904	.driver = {
   2905		.name	= MV643XX_ETH_SHARED_NAME,
   2906		.of_match_table = of_match_ptr(mv643xx_eth_shared_ids),
   2907	},
   2908};
   2909
   2910static void phy_addr_set(struct mv643xx_eth_private *mp, int phy_addr)
   2911{
   2912	int addr_shift = 5 * mp->port_num;
   2913	u32 data;
   2914
   2915	data = rdl(mp, PHY_ADDR);
   2916	data &= ~(0x1f << addr_shift);
   2917	data |= (phy_addr & 0x1f) << addr_shift;
   2918	wrl(mp, PHY_ADDR, data);
   2919}
   2920
   2921static int phy_addr_get(struct mv643xx_eth_private *mp)
   2922{
   2923	unsigned int data;
   2924
   2925	data = rdl(mp, PHY_ADDR);
   2926
   2927	return (data >> (5 * mp->port_num)) & 0x1f;
   2928}
   2929
   2930static void set_params(struct mv643xx_eth_private *mp,
   2931		       struct mv643xx_eth_platform_data *pd)
   2932{
   2933	struct net_device *dev = mp->dev;
   2934	unsigned int tx_ring_size;
   2935
   2936	if (is_valid_ether_addr(pd->mac_addr)) {
   2937		eth_hw_addr_set(dev, pd->mac_addr);
   2938	} else {
   2939		u8 addr[ETH_ALEN];
   2940
   2941		uc_addr_get(mp, addr);
   2942		eth_hw_addr_set(dev, addr);
   2943	}
   2944
   2945	mp->rx_ring_size = DEFAULT_RX_QUEUE_SIZE;
   2946	if (pd->rx_queue_size)
   2947		mp->rx_ring_size = pd->rx_queue_size;
   2948	mp->rx_desc_sram_addr = pd->rx_sram_addr;
   2949	mp->rx_desc_sram_size = pd->rx_sram_size;
   2950
   2951	mp->rxq_count = pd->rx_queue_count ? : 1;
   2952
   2953	tx_ring_size = DEFAULT_TX_QUEUE_SIZE;
   2954	if (pd->tx_queue_size)
   2955		tx_ring_size = pd->tx_queue_size;
   2956
   2957	mp->tx_ring_size = clamp_t(unsigned int, tx_ring_size,
   2958				   MV643XX_MAX_SKB_DESCS * 2, 4096);
   2959	if (mp->tx_ring_size != tx_ring_size)
   2960		netdev_warn(dev, "TX queue size set to %u (requested %u)\n",
   2961			    mp->tx_ring_size, tx_ring_size);
   2962
   2963	mp->tx_desc_sram_addr = pd->tx_sram_addr;
   2964	mp->tx_desc_sram_size = pd->tx_sram_size;
   2965
   2966	mp->txq_count = pd->tx_queue_count ? : 1;
   2967}
   2968
   2969static int get_phy_mode(struct mv643xx_eth_private *mp)
   2970{
   2971	struct device *dev = mp->dev->dev.parent;
   2972	phy_interface_t iface;
   2973	int err;
   2974
   2975	if (dev->of_node)
   2976		err = of_get_phy_mode(dev->of_node, &iface);
   2977
   2978	/* Historical default if unspecified. We could also read/write
   2979	 * the interface state in the PSC1
   2980	 */
   2981	if (!dev->of_node || err)
   2982		iface = PHY_INTERFACE_MODE_GMII;
   2983	return iface;
   2984}
   2985
   2986static struct phy_device *phy_scan(struct mv643xx_eth_private *mp,
   2987				   int phy_addr)
   2988{
   2989	struct phy_device *phydev;
   2990	int start;
   2991	int num;
   2992	int i;
   2993	char phy_id[MII_BUS_ID_SIZE + 3];
   2994
   2995	if (phy_addr == MV643XX_ETH_PHY_ADDR_DEFAULT) {
   2996		start = phy_addr_get(mp) & 0x1f;
   2997		num = 32;
   2998	} else {
   2999		start = phy_addr & 0x1f;
   3000		num = 1;
   3001	}
   3002
   3003	/* Attempt to connect to the PHY using orion-mdio */
   3004	phydev = ERR_PTR(-ENODEV);
   3005	for (i = 0; i < num; i++) {
   3006		int addr = (start + i) & 0x1f;
   3007
   3008		snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT,
   3009				"orion-mdio-mii", addr);
   3010
   3011		phydev = phy_connect(mp->dev, phy_id, mv643xx_eth_adjust_link,
   3012				     get_phy_mode(mp));
   3013		if (!IS_ERR(phydev)) {
   3014			phy_addr_set(mp, addr);
   3015			break;
   3016		}
   3017	}
   3018
   3019	return phydev;
   3020}
   3021
   3022static void phy_init(struct mv643xx_eth_private *mp, int speed, int duplex)
   3023{
   3024	struct net_device *dev = mp->dev;
   3025	struct phy_device *phy = dev->phydev;
   3026
   3027	if (speed == 0) {
   3028		phy->autoneg = AUTONEG_ENABLE;
   3029		phy->speed = 0;
   3030		phy->duplex = 0;
   3031		linkmode_copy(phy->advertising, phy->supported);
   3032		linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
   3033				 phy->advertising);
   3034	} else {
   3035		phy->autoneg = AUTONEG_DISABLE;
   3036		linkmode_zero(phy->advertising);
   3037		phy->speed = speed;
   3038		phy->duplex = duplex;
   3039	}
   3040	phy_start_aneg(phy);
   3041}
   3042
   3043static void init_pscr(struct mv643xx_eth_private *mp, int speed, int duplex)
   3044{
   3045	struct net_device *dev = mp->dev;
   3046	u32 pscr;
   3047
   3048	pscr = rdlp(mp, PORT_SERIAL_CONTROL);
   3049	if (pscr & SERIAL_PORT_ENABLE) {
   3050		pscr &= ~SERIAL_PORT_ENABLE;
   3051		wrlp(mp, PORT_SERIAL_CONTROL, pscr);
   3052	}
   3053
   3054	pscr = MAX_RX_PACKET_9700BYTE | SERIAL_PORT_CONTROL_RESERVED;
   3055	if (!dev->phydev) {
   3056		pscr |= DISABLE_AUTO_NEG_SPEED_GMII;
   3057		if (speed == SPEED_1000)
   3058			pscr |= SET_GMII_SPEED_TO_1000;
   3059		else if (speed == SPEED_100)
   3060			pscr |= SET_MII_SPEED_TO_100;
   3061
   3062		pscr |= DISABLE_AUTO_NEG_FOR_FLOW_CTRL;
   3063
   3064		pscr |= DISABLE_AUTO_NEG_FOR_DUPLEX;
   3065		if (duplex == DUPLEX_FULL)
   3066			pscr |= SET_FULL_DUPLEX_MODE;
   3067	}
   3068
   3069	wrlp(mp, PORT_SERIAL_CONTROL, pscr);
   3070}
   3071
   3072static const struct net_device_ops mv643xx_eth_netdev_ops = {
   3073	.ndo_open		= mv643xx_eth_open,
   3074	.ndo_stop		= mv643xx_eth_stop,
   3075	.ndo_start_xmit		= mv643xx_eth_xmit,
   3076	.ndo_set_rx_mode	= mv643xx_eth_set_rx_mode,
   3077	.ndo_set_mac_address	= mv643xx_eth_set_mac_address,
   3078	.ndo_validate_addr	= eth_validate_addr,
   3079	.ndo_eth_ioctl		= mv643xx_eth_ioctl,
   3080	.ndo_change_mtu		= mv643xx_eth_change_mtu,
   3081	.ndo_set_features	= mv643xx_eth_set_features,
   3082	.ndo_tx_timeout		= mv643xx_eth_tx_timeout,
   3083	.ndo_get_stats		= mv643xx_eth_get_stats,
   3084#ifdef CONFIG_NET_POLL_CONTROLLER
   3085	.ndo_poll_controller	= mv643xx_eth_netpoll,
   3086#endif
   3087};
   3088
   3089static int mv643xx_eth_probe(struct platform_device *pdev)
   3090{
   3091	struct mv643xx_eth_platform_data *pd;
   3092	struct mv643xx_eth_private *mp;
   3093	struct net_device *dev;
   3094	struct phy_device *phydev = NULL;
   3095	int err, irq;
   3096
   3097	pd = dev_get_platdata(&pdev->dev);
   3098	if (pd == NULL) {
   3099		dev_err(&pdev->dev, "no mv643xx_eth_platform_data\n");
   3100		return -ENODEV;
   3101	}
   3102
   3103	if (pd->shared == NULL) {
   3104		dev_err(&pdev->dev, "no mv643xx_eth_platform_data->shared\n");
   3105		return -ENODEV;
   3106	}
   3107
   3108	dev = alloc_etherdev_mq(sizeof(struct mv643xx_eth_private), 8);
   3109	if (!dev)
   3110		return -ENOMEM;
   3111
   3112	SET_NETDEV_DEV(dev, &pdev->dev);
   3113	mp = netdev_priv(dev);
   3114	platform_set_drvdata(pdev, mp);
   3115
   3116	mp->shared = platform_get_drvdata(pd->shared);
   3117	mp->base = mp->shared->base + 0x0400 + (pd->port_number << 10);
   3118	mp->port_num = pd->port_number;
   3119
   3120	mp->dev = dev;
   3121
   3122	/* Kirkwood resets some registers on gated clocks. Especially
   3123	 * CLK125_BYPASS_EN must be cleared but is not available on
   3124	 * all other SoCs/System Controllers using this driver.
   3125	 */
   3126	if (of_device_is_compatible(pdev->dev.of_node,
   3127				    "marvell,kirkwood-eth-port"))
   3128		wrlp(mp, PORT_SERIAL_CONTROL1,
   3129		     rdlp(mp, PORT_SERIAL_CONTROL1) & ~CLK125_BYPASS_EN);
   3130
   3131	/*
   3132	 * Start with a default rate, and if there is a clock, allow
   3133	 * it to override the default.
   3134	 */
   3135	mp->t_clk = 133000000;
   3136	mp->clk = devm_clk_get(&pdev->dev, NULL);
   3137	if (!IS_ERR(mp->clk)) {
   3138		clk_prepare_enable(mp->clk);
   3139		mp->t_clk = clk_get_rate(mp->clk);
   3140	} else if (!IS_ERR(mp->shared->clk)) {
   3141		mp->t_clk = clk_get_rate(mp->shared->clk);
   3142	}
   3143
   3144	set_params(mp, pd);
   3145	netif_set_real_num_tx_queues(dev, mp->txq_count);
   3146	netif_set_real_num_rx_queues(dev, mp->rxq_count);
   3147
   3148	err = 0;
   3149	if (pd->phy_node) {
   3150		phydev = of_phy_connect(mp->dev, pd->phy_node,
   3151					mv643xx_eth_adjust_link, 0,
   3152					get_phy_mode(mp));
   3153		if (!phydev)
   3154			err = -ENODEV;
   3155		else
   3156			phy_addr_set(mp, phydev->mdio.addr);
   3157	} else if (pd->phy_addr != MV643XX_ETH_PHY_NONE) {
   3158		phydev = phy_scan(mp, pd->phy_addr);
   3159
   3160		if (IS_ERR(phydev))
   3161			err = PTR_ERR(phydev);
   3162		else
   3163			phy_init(mp, pd->speed, pd->duplex);
   3164	}
   3165	if (err == -ENODEV) {
   3166		err = -EPROBE_DEFER;
   3167		goto out;
   3168	}
   3169	if (err)
   3170		goto out;
   3171
   3172	dev->ethtool_ops = &mv643xx_eth_ethtool_ops;
   3173
   3174	init_pscr(mp, pd->speed, pd->duplex);
   3175
   3176
   3177	mib_counters_clear(mp);
   3178
   3179	timer_setup(&mp->mib_counters_timer, mib_counters_timer_wrapper, 0);
   3180	mp->mib_counters_timer.expires = jiffies + 30 * HZ;
   3181
   3182	spin_lock_init(&mp->mib_counters_lock);
   3183
   3184	INIT_WORK(&mp->tx_timeout_task, tx_timeout_task);
   3185
   3186	netif_napi_add(dev, &mp->napi, mv643xx_eth_poll, NAPI_POLL_WEIGHT);
   3187
   3188	timer_setup(&mp->rx_oom, oom_timer_wrapper, 0);
   3189
   3190
   3191	irq = platform_get_irq(pdev, 0);
   3192	if (WARN_ON(irq < 0)) {
   3193		err = irq;
   3194		goto out;
   3195	}
   3196	dev->irq = irq;
   3197
   3198	dev->netdev_ops = &mv643xx_eth_netdev_ops;
   3199
   3200	dev->watchdog_timeo = 2 * HZ;
   3201	dev->base_addr = 0;
   3202
   3203	dev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
   3204	dev->vlan_features = dev->features;
   3205
   3206	dev->features |= NETIF_F_RXCSUM;
   3207	dev->hw_features = dev->features;
   3208
   3209	dev->priv_flags |= IFF_UNICAST_FLT;
   3210	netif_set_tso_max_segs(dev, MV643XX_MAX_TSO_SEGS);
   3211
   3212	/* MTU range: 64 - 9500 */
   3213	dev->min_mtu = 64;
   3214	dev->max_mtu = 9500;
   3215
   3216	if (mp->shared->win_protect)
   3217		wrl(mp, WINDOW_PROTECT(mp->port_num), mp->shared->win_protect);
   3218
   3219	netif_carrier_off(dev);
   3220
   3221	wrlp(mp, SDMA_CONFIG, PORT_SDMA_CONFIG_DEFAULT_VALUE);
   3222
   3223	set_rx_coal(mp, 250);
   3224	set_tx_coal(mp, 0);
   3225
   3226	err = register_netdev(dev);
   3227	if (err)
   3228		goto out;
   3229
   3230	netdev_notice(dev, "port %d with MAC address %pM\n",
   3231		      mp->port_num, dev->dev_addr);
   3232
   3233	if (mp->tx_desc_sram_size > 0)
   3234		netdev_notice(dev, "configured with sram\n");
   3235
   3236	return 0;
   3237
   3238out:
   3239	if (!IS_ERR(mp->clk))
   3240		clk_disable_unprepare(mp->clk);
   3241	free_netdev(dev);
   3242
   3243	return err;
   3244}
   3245
   3246static int mv643xx_eth_remove(struct platform_device *pdev)
   3247{
   3248	struct mv643xx_eth_private *mp = platform_get_drvdata(pdev);
   3249	struct net_device *dev = mp->dev;
   3250
   3251	unregister_netdev(mp->dev);
   3252	if (dev->phydev)
   3253		phy_disconnect(dev->phydev);
   3254	cancel_work_sync(&mp->tx_timeout_task);
   3255
   3256	if (!IS_ERR(mp->clk))
   3257		clk_disable_unprepare(mp->clk);
   3258
   3259	free_netdev(mp->dev);
   3260
   3261	return 0;
   3262}
   3263
   3264static void mv643xx_eth_shutdown(struct platform_device *pdev)
   3265{
   3266	struct mv643xx_eth_private *mp = platform_get_drvdata(pdev);
   3267
   3268	/* Mask all interrupts on ethernet port */
   3269	wrlp(mp, INT_MASK, 0);
   3270	rdlp(mp, INT_MASK);
   3271
   3272	if (netif_running(mp->dev))
   3273		port_reset(mp);
   3274}
   3275
   3276static struct platform_driver mv643xx_eth_driver = {
   3277	.probe		= mv643xx_eth_probe,
   3278	.remove		= mv643xx_eth_remove,
   3279	.shutdown	= mv643xx_eth_shutdown,
   3280	.driver = {
   3281		.name	= MV643XX_ETH_NAME,
   3282	},
   3283};
   3284
   3285static struct platform_driver * const drivers[] = {
   3286	&mv643xx_eth_shared_driver,
   3287	&mv643xx_eth_driver,
   3288};
   3289
   3290static int __init mv643xx_eth_init_module(void)
   3291{
   3292	return platform_register_drivers(drivers, ARRAY_SIZE(drivers));
   3293}
   3294module_init(mv643xx_eth_init_module);
   3295
   3296static void __exit mv643xx_eth_cleanup_module(void)
   3297{
   3298	platform_unregister_drivers(drivers, ARRAY_SIZE(drivers));
   3299}
   3300module_exit(mv643xx_eth_cleanup_module);
   3301
   3302MODULE_AUTHOR("Rabeeh Khoury, Assaf Hoffman, Matthew Dharm, "
   3303	      "Manish Lachwani, Dale Farnsworth and Lennert Buytenhek");
   3304MODULE_DESCRIPTION("Ethernet driver for Marvell MV643XX");
   3305MODULE_LICENSE("GPL");
   3306MODULE_ALIAS("platform:" MV643XX_ETH_SHARED_NAME);
   3307MODULE_ALIAS("platform:" MV643XX_ETH_NAME);