cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

ixp4xx_eth.c (40003B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/*
      3 * Intel IXP4xx Ethernet driver for Linux
      4 *
      5 * Copyright (C) 2007 Krzysztof Halasa <khc@pm.waw.pl>
      6 *
      7 * Ethernet port config (0x00 is not present on IXP42X):
      8 *
      9 * logical port		0x00		0x10		0x20
     10 * NPE			0 (NPE-A)	1 (NPE-B)	2 (NPE-C)
     11 * physical PortId	2		0		1
     12 * TX queue		23		24		25
     13 * RX-free queue	26		27		28
     14 * TX-done queue is always 31, per-port RX and TX-ready queues are configurable
     15 *
     16 * Queue entries:
     17 * bits 0 -> 1	- NPE ID (RX and TX-done)
     18 * bits 0 -> 2	- priority (TX, per 802.1D)
     19 * bits 3 -> 4	- port ID (user-set?)
     20 * bits 5 -> 31	- physical descriptor address
     21 */
     22
     23#include <linux/delay.h>
     24#include <linux/dma-mapping.h>
     25#include <linux/dmapool.h>
     26#include <linux/etherdevice.h>
     27#include <linux/io.h>
     28#include <linux/kernel.h>
     29#include <linux/net_tstamp.h>
     30#include <linux/of.h>
     31#include <linux/of_mdio.h>
     32#include <linux/phy.h>
     33#include <linux/platform_device.h>
     34#include <linux/ptp_classify.h>
     35#include <linux/slab.h>
     36#include <linux/module.h>
     37#include <linux/soc/ixp4xx/npe.h>
     38#include <linux/soc/ixp4xx/qmgr.h>
     39#include <linux/soc/ixp4xx/cpu.h>
     40#include <linux/types.h>
     41
     42#define IXP4XX_ETH_NPEA		0x00
     43#define IXP4XX_ETH_NPEB		0x10
     44#define IXP4XX_ETH_NPEC		0x20
     45
     46#include "ixp46x_ts.h"
     47
     48#define DEBUG_DESC		0
     49#define DEBUG_RX		0
     50#define DEBUG_TX		0
     51#define DEBUG_PKT_BYTES		0
     52#define DEBUG_MDIO		0
     53#define DEBUG_CLOSE		0
     54
     55#define DRV_NAME		"ixp4xx_eth"
     56
     57#define MAX_NPES		3
     58
     59#define RX_DESCS		64 /* also length of all RX queues */
     60#define TX_DESCS		16 /* also length of all TX queues */
     61#define TXDONE_QUEUE_LEN	64 /* dwords */
     62
     63#define POOL_ALLOC_SIZE		(sizeof(struct desc) * (RX_DESCS + TX_DESCS))
     64#define REGS_SIZE		0x1000
     65#define MAX_MRU			1536 /* 0x600 */
     66#define RX_BUFF_SIZE		ALIGN((NET_IP_ALIGN) + MAX_MRU, 4)
     67
     68#define NAPI_WEIGHT		16
     69#define MDIO_INTERVAL		(3 * HZ)
     70#define MAX_MDIO_RETRIES	100 /* microseconds, typically 30 cycles */
     71#define MAX_CLOSE_WAIT		1000 /* microseconds, typically 2-3 cycles */
     72
     73#define NPE_ID(port_id)		((port_id) >> 4)
     74#define PHYSICAL_ID(port_id)	((NPE_ID(port_id) + 2) % 3)
     75#define TX_QUEUE(port_id)	(NPE_ID(port_id) + 23)
     76#define RXFREE_QUEUE(port_id)	(NPE_ID(port_id) + 26)
     77#define TXDONE_QUEUE		31
     78
     79#define PTP_SLAVE_MODE		1
     80#define PTP_MASTER_MODE		2
     81#define PORT2CHANNEL(p)		NPE_ID(p->id)
     82
     83/* TX Control Registers */
     84#define TX_CNTRL0_TX_EN		0x01
     85#define TX_CNTRL0_HALFDUPLEX	0x02
     86#define TX_CNTRL0_RETRY		0x04
     87#define TX_CNTRL0_PAD_EN	0x08
     88#define TX_CNTRL0_APPEND_FCS	0x10
     89#define TX_CNTRL0_2DEFER	0x20
     90#define TX_CNTRL0_RMII		0x40 /* reduced MII */
     91#define TX_CNTRL1_RETRIES	0x0F /* 4 bits */
     92
     93/* RX Control Registers */
     94#define RX_CNTRL0_RX_EN		0x01
     95#define RX_CNTRL0_PADSTRIP_EN	0x02
     96#define RX_CNTRL0_SEND_FCS	0x04
     97#define RX_CNTRL0_PAUSE_EN	0x08
     98#define RX_CNTRL0_LOOP_EN	0x10
     99#define RX_CNTRL0_ADDR_FLTR_EN	0x20
    100#define RX_CNTRL0_RX_RUNT_EN	0x40
    101#define RX_CNTRL0_BCAST_DIS	0x80
    102#define RX_CNTRL1_DEFER_EN	0x01
    103
    104/* Core Control Register */
    105#define CORE_RESET		0x01
    106#define CORE_RX_FIFO_FLUSH	0x02
    107#define CORE_TX_FIFO_FLUSH	0x04
    108#define CORE_SEND_JAM		0x08
    109#define CORE_MDC_EN		0x10 /* MDIO using NPE-B ETH-0 only */
    110
    111#define DEFAULT_TX_CNTRL0	(TX_CNTRL0_TX_EN | TX_CNTRL0_RETRY |	\
    112				 TX_CNTRL0_PAD_EN | TX_CNTRL0_APPEND_FCS | \
    113				 TX_CNTRL0_2DEFER)
    114#define DEFAULT_RX_CNTRL0	RX_CNTRL0_RX_EN
    115#define DEFAULT_CORE_CNTRL	CORE_MDC_EN
    116
    117
    118/* NPE message codes */
    119#define NPE_GETSTATUS			0x00
    120#define NPE_EDB_SETPORTADDRESS		0x01
    121#define NPE_EDB_GETMACADDRESSDATABASE	0x02
    122#define NPE_EDB_SETMACADDRESSSDATABASE	0x03
    123#define NPE_GETSTATS			0x04
    124#define NPE_RESETSTATS			0x05
    125#define NPE_SETMAXFRAMELENGTHS		0x06
    126#define NPE_VLAN_SETRXTAGMODE		0x07
    127#define NPE_VLAN_SETDEFAULTRXVID	0x08
    128#define NPE_VLAN_SETPORTVLANTABLEENTRY	0x09
    129#define NPE_VLAN_SETPORTVLANTABLERANGE	0x0A
    130#define NPE_VLAN_SETRXQOSENTRY		0x0B
    131#define NPE_VLAN_SETPORTIDEXTRACTIONMODE 0x0C
    132#define NPE_STP_SETBLOCKINGSTATE	0x0D
    133#define NPE_FW_SETFIREWALLMODE		0x0E
    134#define NPE_PC_SETFRAMECONTROLDURATIONID 0x0F
    135#define NPE_PC_SETAPMACTABLE		0x11
    136#define NPE_SETLOOPBACK_MODE		0x12
    137#define NPE_PC_SETBSSIDTABLE		0x13
    138#define NPE_ADDRESS_FILTER_CONFIG	0x14
    139#define NPE_APPENDFCSCONFIG		0x15
    140#define NPE_NOTIFY_MAC_RECOVERY_DONE	0x16
    141#define NPE_MAC_RECOVERY_START		0x17
    142
    143
    144#ifdef __ARMEB__
    145typedef struct sk_buff buffer_t;
    146#define free_buffer dev_kfree_skb
    147#define free_buffer_irq dev_consume_skb_irq
    148#else
    149typedef void buffer_t;
    150#define free_buffer kfree
    151#define free_buffer_irq kfree
    152#endif
    153
    154/* Information about built-in Ethernet MAC interfaces */
    155struct eth_plat_info {
    156	u8 phy;		/* MII PHY ID, 0 - 31 */
    157	u8 rxq;		/* configurable, currently 0 - 31 only */
    158	u8 txreadyq;
    159	u8 hwaddr[6];
    160	u8 npe;		/* NPE instance used by this interface */
    161	bool has_mdio;	/* If this instance has an MDIO bus */
    162};
    163
    164struct eth_regs {
    165	u32 tx_control[2], __res1[2];		/* 000 */
    166	u32 rx_control[2], __res2[2];		/* 010 */
    167	u32 random_seed, __res3[3];		/* 020 */
    168	u32 partial_empty_threshold, __res4;	/* 030 */
    169	u32 partial_full_threshold, __res5;	/* 038 */
    170	u32 tx_start_bytes, __res6[3];		/* 040 */
    171	u32 tx_deferral, rx_deferral, __res7[2];/* 050 */
    172	u32 tx_2part_deferral[2], __res8[2];	/* 060 */
    173	u32 slot_time, __res9[3];		/* 070 */
    174	u32 mdio_command[4];			/* 080 */
    175	u32 mdio_status[4];			/* 090 */
    176	u32 mcast_mask[6], __res10[2];		/* 0A0 */
    177	u32 mcast_addr[6], __res11[2];		/* 0C0 */
    178	u32 int_clock_threshold, __res12[3];	/* 0E0 */
    179	u32 hw_addr[6], __res13[61];		/* 0F0 */
    180	u32 core_control;			/* 1FC */
    181};
    182
    183struct port {
    184	struct eth_regs __iomem *regs;
    185	struct ixp46x_ts_regs __iomem *timesync_regs;
    186	int phc_index;
    187	struct npe *npe;
    188	struct net_device *netdev;
    189	struct napi_struct napi;
    190	struct eth_plat_info *plat;
    191	buffer_t *rx_buff_tab[RX_DESCS], *tx_buff_tab[TX_DESCS];
    192	struct desc *desc_tab;	/* coherent */
    193	dma_addr_t desc_tab_phys;
    194	int id;			/* logical port ID */
    195	int speed, duplex;
    196	u8 firmware[4];
    197	int hwts_tx_en;
    198	int hwts_rx_en;
    199};
    200
    201/* NPE message structure */
    202struct msg {
    203#ifdef __ARMEB__
    204	u8 cmd, eth_id, byte2, byte3;
    205	u8 byte4, byte5, byte6, byte7;
    206#else
    207	u8 byte3, byte2, eth_id, cmd;
    208	u8 byte7, byte6, byte5, byte4;
    209#endif
    210};
    211
    212/* Ethernet packet descriptor */
    213struct desc {
    214	u32 next;		/* pointer to next buffer, unused */
    215
    216#ifdef __ARMEB__
    217	u16 buf_len;		/* buffer length */
    218	u16 pkt_len;		/* packet length */
    219	u32 data;		/* pointer to data buffer in RAM */
    220	u8 dest_id;
    221	u8 src_id;
    222	u16 flags;
    223	u8 qos;
    224	u8 padlen;
    225	u16 vlan_tci;
    226#else
    227	u16 pkt_len;		/* packet length */
    228	u16 buf_len;		/* buffer length */
    229	u32 data;		/* pointer to data buffer in RAM */
    230	u16 flags;
    231	u8 src_id;
    232	u8 dest_id;
    233	u16 vlan_tci;
    234	u8 padlen;
    235	u8 qos;
    236#endif
    237
    238#ifdef __ARMEB__
    239	u8 dst_mac_0, dst_mac_1, dst_mac_2, dst_mac_3;
    240	u8 dst_mac_4, dst_mac_5, src_mac_0, src_mac_1;
    241	u8 src_mac_2, src_mac_3, src_mac_4, src_mac_5;
    242#else
    243	u8 dst_mac_3, dst_mac_2, dst_mac_1, dst_mac_0;
    244	u8 src_mac_1, src_mac_0, dst_mac_5, dst_mac_4;
    245	u8 src_mac_5, src_mac_4, src_mac_3, src_mac_2;
    246#endif
    247};
    248
    249
    250#define rx_desc_phys(port, n)	((port)->desc_tab_phys +		\
    251				 (n) * sizeof(struct desc))
    252#define rx_desc_ptr(port, n)	(&(port)->desc_tab[n])
    253
    254#define tx_desc_phys(port, n)	((port)->desc_tab_phys +		\
    255				 ((n) + RX_DESCS) * sizeof(struct desc))
    256#define tx_desc_ptr(port, n)	(&(port)->desc_tab[(n) + RX_DESCS])
    257
    258#ifndef __ARMEB__
    259static inline void memcpy_swab32(u32 *dest, u32 *src, int cnt)
    260{
    261	int i;
    262	for (i = 0; i < cnt; i++)
    263		dest[i] = swab32(src[i]);
    264}
    265#endif
    266
    267static DEFINE_SPINLOCK(mdio_lock);
    268static struct eth_regs __iomem *mdio_regs; /* mdio command and status only */
    269static struct mii_bus *mdio_bus;
    270static struct device_node *mdio_bus_np;
    271static int ports_open;
    272static struct port *npe_port_tab[MAX_NPES];
    273static struct dma_pool *dma_pool;
    274
    275static int ixp_ptp_match(struct sk_buff *skb, u16 uid_hi, u32 uid_lo, u16 seqid)
    276{
    277	u8 *data = skb->data;
    278	unsigned int offset;
    279	u16 *hi, *id;
    280	u32 lo;
    281
    282	if (ptp_classify_raw(skb) != PTP_CLASS_V1_IPV4)
    283		return 0;
    284
    285	offset = ETH_HLEN + IPV4_HLEN(data) + UDP_HLEN;
    286
    287	if (skb->len < offset + OFF_PTP_SEQUENCE_ID + sizeof(seqid))
    288		return 0;
    289
    290	hi = (u16 *)(data + offset + OFF_PTP_SOURCE_UUID);
    291	id = (u16 *)(data + offset + OFF_PTP_SEQUENCE_ID);
    292
    293	memcpy(&lo, &hi[1], sizeof(lo));
    294
    295	return (uid_hi == ntohs(*hi) &&
    296		uid_lo == ntohl(lo) &&
    297		seqid  == ntohs(*id));
    298}
    299
    300static void ixp_rx_timestamp(struct port *port, struct sk_buff *skb)
    301{
    302	struct skb_shared_hwtstamps *shhwtstamps;
    303	struct ixp46x_ts_regs *regs;
    304	u64 ns;
    305	u32 ch, hi, lo, val;
    306	u16 uid, seq;
    307
    308	if (!port->hwts_rx_en)
    309		return;
    310
    311	ch = PORT2CHANNEL(port);
    312
    313	regs = port->timesync_regs;
    314
    315	val = __raw_readl(&regs->channel[ch].ch_event);
    316
    317	if (!(val & RX_SNAPSHOT_LOCKED))
    318		return;
    319
    320	lo = __raw_readl(&regs->channel[ch].src_uuid_lo);
    321	hi = __raw_readl(&regs->channel[ch].src_uuid_hi);
    322
    323	uid = hi & 0xffff;
    324	seq = (hi >> 16) & 0xffff;
    325
    326	if (!ixp_ptp_match(skb, htons(uid), htonl(lo), htons(seq)))
    327		goto out;
    328
    329	lo = __raw_readl(&regs->channel[ch].rx_snap_lo);
    330	hi = __raw_readl(&regs->channel[ch].rx_snap_hi);
    331	ns = ((u64) hi) << 32;
    332	ns |= lo;
    333	ns <<= TICKS_NS_SHIFT;
    334
    335	shhwtstamps = skb_hwtstamps(skb);
    336	memset(shhwtstamps, 0, sizeof(*shhwtstamps));
    337	shhwtstamps->hwtstamp = ns_to_ktime(ns);
    338out:
    339	__raw_writel(RX_SNAPSHOT_LOCKED, &regs->channel[ch].ch_event);
    340}
    341
    342static void ixp_tx_timestamp(struct port *port, struct sk_buff *skb)
    343{
    344	struct skb_shared_hwtstamps shhwtstamps;
    345	struct ixp46x_ts_regs *regs;
    346	struct skb_shared_info *shtx;
    347	u64 ns;
    348	u32 ch, cnt, hi, lo, val;
    349
    350	shtx = skb_shinfo(skb);
    351	if (unlikely(shtx->tx_flags & SKBTX_HW_TSTAMP && port->hwts_tx_en))
    352		shtx->tx_flags |= SKBTX_IN_PROGRESS;
    353	else
    354		return;
    355
    356	ch = PORT2CHANNEL(port);
    357
    358	regs = port->timesync_regs;
    359
    360	/*
    361	 * This really stinks, but we have to poll for the Tx time stamp.
    362	 * Usually, the time stamp is ready after 4 to 6 microseconds.
    363	 */
    364	for (cnt = 0; cnt < 100; cnt++) {
    365		val = __raw_readl(&regs->channel[ch].ch_event);
    366		if (val & TX_SNAPSHOT_LOCKED)
    367			break;
    368		udelay(1);
    369	}
    370	if (!(val & TX_SNAPSHOT_LOCKED)) {
    371		shtx->tx_flags &= ~SKBTX_IN_PROGRESS;
    372		return;
    373	}
    374
    375	lo = __raw_readl(&regs->channel[ch].tx_snap_lo);
    376	hi = __raw_readl(&regs->channel[ch].tx_snap_hi);
    377	ns = ((u64) hi) << 32;
    378	ns |= lo;
    379	ns <<= TICKS_NS_SHIFT;
    380
    381	memset(&shhwtstamps, 0, sizeof(shhwtstamps));
    382	shhwtstamps.hwtstamp = ns_to_ktime(ns);
    383	skb_tstamp_tx(skb, &shhwtstamps);
    384
    385	__raw_writel(TX_SNAPSHOT_LOCKED, &regs->channel[ch].ch_event);
    386}
    387
    388static int hwtstamp_set(struct net_device *netdev, struct ifreq *ifr)
    389{
    390	struct hwtstamp_config cfg;
    391	struct ixp46x_ts_regs *regs;
    392	struct port *port = netdev_priv(netdev);
    393	int ret;
    394	int ch;
    395
    396	if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
    397		return -EFAULT;
    398
    399	ret = ixp46x_ptp_find(&port->timesync_regs, &port->phc_index);
    400	if (ret)
    401		return ret;
    402
    403	ch = PORT2CHANNEL(port);
    404	regs = port->timesync_regs;
    405
    406	if (cfg.tx_type != HWTSTAMP_TX_OFF && cfg.tx_type != HWTSTAMP_TX_ON)
    407		return -ERANGE;
    408
    409	switch (cfg.rx_filter) {
    410	case HWTSTAMP_FILTER_NONE:
    411		port->hwts_rx_en = 0;
    412		break;
    413	case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
    414		port->hwts_rx_en = PTP_SLAVE_MODE;
    415		__raw_writel(0, &regs->channel[ch].ch_control);
    416		break;
    417	case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
    418		port->hwts_rx_en = PTP_MASTER_MODE;
    419		__raw_writel(MASTER_MODE, &regs->channel[ch].ch_control);
    420		break;
    421	default:
    422		return -ERANGE;
    423	}
    424
    425	port->hwts_tx_en = cfg.tx_type == HWTSTAMP_TX_ON;
    426
    427	/* Clear out any old time stamps. */
    428	__raw_writel(TX_SNAPSHOT_LOCKED | RX_SNAPSHOT_LOCKED,
    429		     &regs->channel[ch].ch_event);
    430
    431	return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
    432}
    433
    434static int hwtstamp_get(struct net_device *netdev, struct ifreq *ifr)
    435{
    436	struct hwtstamp_config cfg;
    437	struct port *port = netdev_priv(netdev);
    438
    439	cfg.flags = 0;
    440	cfg.tx_type = port->hwts_tx_en ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
    441
    442	switch (port->hwts_rx_en) {
    443	case 0:
    444		cfg.rx_filter = HWTSTAMP_FILTER_NONE;
    445		break;
    446	case PTP_SLAVE_MODE:
    447		cfg.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
    448		break;
    449	case PTP_MASTER_MODE:
    450		cfg.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
    451		break;
    452	default:
    453		WARN_ON_ONCE(1);
    454		return -ERANGE;
    455	}
    456
    457	return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
    458}
    459
    460static int ixp4xx_mdio_cmd(struct mii_bus *bus, int phy_id, int location,
    461			   int write, u16 cmd)
    462{
    463	int cycles = 0;
    464
    465	if (__raw_readl(&mdio_regs->mdio_command[3]) & 0x80) {
    466		printk(KERN_ERR "%s: MII not ready to transmit\n", bus->name);
    467		return -1;
    468	}
    469
    470	if (write) {
    471		__raw_writel(cmd & 0xFF, &mdio_regs->mdio_command[0]);
    472		__raw_writel(cmd >> 8, &mdio_regs->mdio_command[1]);
    473	}
    474	__raw_writel(((phy_id << 5) | location) & 0xFF,
    475		     &mdio_regs->mdio_command[2]);
    476	__raw_writel((phy_id >> 3) | (write << 2) | 0x80 /* GO */,
    477		     &mdio_regs->mdio_command[3]);
    478
    479	while ((cycles < MAX_MDIO_RETRIES) &&
    480	       (__raw_readl(&mdio_regs->mdio_command[3]) & 0x80)) {
    481		udelay(1);
    482		cycles++;
    483	}
    484
    485	if (cycles == MAX_MDIO_RETRIES) {
    486		printk(KERN_ERR "%s #%i: MII write failed\n", bus->name,
    487		       phy_id);
    488		return -1;
    489	}
    490
    491#if DEBUG_MDIO
    492	printk(KERN_DEBUG "%s #%i: mdio_%s() took %i cycles\n", bus->name,
    493	       phy_id, write ? "write" : "read", cycles);
    494#endif
    495
    496	if (write)
    497		return 0;
    498
    499	if (__raw_readl(&mdio_regs->mdio_status[3]) & 0x80) {
    500#if DEBUG_MDIO
    501		printk(KERN_DEBUG "%s #%i: MII read failed\n", bus->name,
    502		       phy_id);
    503#endif
    504		return 0xFFFF; /* don't return error */
    505	}
    506
    507	return (__raw_readl(&mdio_regs->mdio_status[0]) & 0xFF) |
    508		((__raw_readl(&mdio_regs->mdio_status[1]) & 0xFF) << 8);
    509}
    510
    511static int ixp4xx_mdio_read(struct mii_bus *bus, int phy_id, int location)
    512{
    513	unsigned long flags;
    514	int ret;
    515
    516	spin_lock_irqsave(&mdio_lock, flags);
    517	ret = ixp4xx_mdio_cmd(bus, phy_id, location, 0, 0);
    518	spin_unlock_irqrestore(&mdio_lock, flags);
    519#if DEBUG_MDIO
    520	printk(KERN_DEBUG "%s #%i: MII read [%i] -> 0x%X\n", bus->name,
    521	       phy_id, location, ret);
    522#endif
    523	return ret;
    524}
    525
    526static int ixp4xx_mdio_write(struct mii_bus *bus, int phy_id, int location,
    527			     u16 val)
    528{
    529	unsigned long flags;
    530	int ret;
    531
    532	spin_lock_irqsave(&mdio_lock, flags);
    533	ret = ixp4xx_mdio_cmd(bus, phy_id, location, 1, val);
    534	spin_unlock_irqrestore(&mdio_lock, flags);
    535#if DEBUG_MDIO
    536	printk(KERN_DEBUG "%s #%i: MII write [%i] <- 0x%X, err = %i\n",
    537	       bus->name, phy_id, location, val, ret);
    538#endif
    539	return ret;
    540}
    541
    542static int ixp4xx_mdio_register(struct eth_regs __iomem *regs)
    543{
    544	int err;
    545
    546	if (!(mdio_bus = mdiobus_alloc()))
    547		return -ENOMEM;
    548
    549	mdio_regs = regs;
    550	__raw_writel(DEFAULT_CORE_CNTRL, &mdio_regs->core_control);
    551	mdio_bus->name = "IXP4xx MII Bus";
    552	mdio_bus->read = &ixp4xx_mdio_read;
    553	mdio_bus->write = &ixp4xx_mdio_write;
    554	snprintf(mdio_bus->id, MII_BUS_ID_SIZE, "ixp4xx-eth-0");
    555
    556	err = of_mdiobus_register(mdio_bus, mdio_bus_np);
    557	if (err)
    558		mdiobus_free(mdio_bus);
    559	return err;
    560}
    561
    562static void ixp4xx_mdio_remove(void)
    563{
    564	mdiobus_unregister(mdio_bus);
    565	mdiobus_free(mdio_bus);
    566}
    567
    568
    569static void ixp4xx_adjust_link(struct net_device *dev)
    570{
    571	struct port *port = netdev_priv(dev);
    572	struct phy_device *phydev = dev->phydev;
    573
    574	if (!phydev->link) {
    575		if (port->speed) {
    576			port->speed = 0;
    577			printk(KERN_INFO "%s: link down\n", dev->name);
    578		}
    579		return;
    580	}
    581
    582	if (port->speed == phydev->speed && port->duplex == phydev->duplex)
    583		return;
    584
    585	port->speed = phydev->speed;
    586	port->duplex = phydev->duplex;
    587
    588	if (port->duplex)
    589		__raw_writel(DEFAULT_TX_CNTRL0 & ~TX_CNTRL0_HALFDUPLEX,
    590			     &port->regs->tx_control[0]);
    591	else
    592		__raw_writel(DEFAULT_TX_CNTRL0 | TX_CNTRL0_HALFDUPLEX,
    593			     &port->regs->tx_control[0]);
    594
    595	netdev_info(dev, "%s: link up, speed %u Mb/s, %s duplex\n",
    596		    dev->name, port->speed, port->duplex ? "full" : "half");
    597}
    598
    599
    600static inline void debug_pkt(struct net_device *dev, const char *func,
    601			     u8 *data, int len)
    602{
    603#if DEBUG_PKT_BYTES
    604	int i;
    605
    606	netdev_debug(dev, "%s(%i) ", func, len);
    607	for (i = 0; i < len; i++) {
    608		if (i >= DEBUG_PKT_BYTES)
    609			break;
    610		printk("%s%02X",
    611		       ((i == 6) || (i == 12) || (i >= 14)) ? " " : "",
    612		       data[i]);
    613	}
    614	printk("\n");
    615#endif
    616}
    617
    618
    619static inline void debug_desc(u32 phys, struct desc *desc)
    620{
    621#if DEBUG_DESC
    622	printk(KERN_DEBUG "%X: %X %3X %3X %08X %2X < %2X %4X %X"
    623	       " %X %X %02X%02X%02X%02X%02X%02X < %02X%02X%02X%02X%02X%02X\n",
    624	       phys, desc->next, desc->buf_len, desc->pkt_len,
    625	       desc->data, desc->dest_id, desc->src_id, desc->flags,
    626	       desc->qos, desc->padlen, desc->vlan_tci,
    627	       desc->dst_mac_0, desc->dst_mac_1, desc->dst_mac_2,
    628	       desc->dst_mac_3, desc->dst_mac_4, desc->dst_mac_5,
    629	       desc->src_mac_0, desc->src_mac_1, desc->src_mac_2,
    630	       desc->src_mac_3, desc->src_mac_4, desc->src_mac_5);
    631#endif
    632}
    633
    634static inline int queue_get_desc(unsigned int queue, struct port *port,
    635				 int is_tx)
    636{
    637	u32 phys, tab_phys, n_desc;
    638	struct desc *tab;
    639
    640	if (!(phys = qmgr_get_entry(queue)))
    641		return -1;
    642
    643	phys &= ~0x1F; /* mask out non-address bits */
    644	tab_phys = is_tx ? tx_desc_phys(port, 0) : rx_desc_phys(port, 0);
    645	tab = is_tx ? tx_desc_ptr(port, 0) : rx_desc_ptr(port, 0);
    646	n_desc = (phys - tab_phys) / sizeof(struct desc);
    647	BUG_ON(n_desc >= (is_tx ? TX_DESCS : RX_DESCS));
    648	debug_desc(phys, &tab[n_desc]);
    649	BUG_ON(tab[n_desc].next);
    650	return n_desc;
    651}
    652
    653static inline void queue_put_desc(unsigned int queue, u32 phys,
    654				  struct desc *desc)
    655{
    656	debug_desc(phys, desc);
    657	BUG_ON(phys & 0x1F);
    658	qmgr_put_entry(queue, phys);
    659	/* Don't check for queue overflow here, we've allocated sufficient
    660	   length and queues >= 32 don't support this check anyway. */
    661}
    662
    663
    664static inline void dma_unmap_tx(struct port *port, struct desc *desc)
    665{
    666#ifdef __ARMEB__
    667	dma_unmap_single(&port->netdev->dev, desc->data,
    668			 desc->buf_len, DMA_TO_DEVICE);
    669#else
    670	dma_unmap_single(&port->netdev->dev, desc->data & ~3,
    671			 ALIGN((desc->data & 3) + desc->buf_len, 4),
    672			 DMA_TO_DEVICE);
    673#endif
    674}
    675
    676
    677static void eth_rx_irq(void *pdev)
    678{
    679	struct net_device *dev = pdev;
    680	struct port *port = netdev_priv(dev);
    681
    682#if DEBUG_RX
    683	printk(KERN_DEBUG "%s: eth_rx_irq\n", dev->name);
    684#endif
    685	qmgr_disable_irq(port->plat->rxq);
    686	napi_schedule(&port->napi);
    687}
    688
    689static int eth_poll(struct napi_struct *napi, int budget)
    690{
    691	struct port *port = container_of(napi, struct port, napi);
    692	struct net_device *dev = port->netdev;
    693	unsigned int rxq = port->plat->rxq, rxfreeq = RXFREE_QUEUE(port->id);
    694	int received = 0;
    695
    696#if DEBUG_RX
    697	netdev_debug(dev, "eth_poll\n");
    698#endif
    699
    700	while (received < budget) {
    701		struct sk_buff *skb;
    702		struct desc *desc;
    703		int n;
    704#ifdef __ARMEB__
    705		struct sk_buff *temp;
    706		u32 phys;
    707#endif
    708
    709		if ((n = queue_get_desc(rxq, port, 0)) < 0) {
    710#if DEBUG_RX
    711			netdev_debug(dev, "eth_poll napi_complete\n");
    712#endif
    713			napi_complete(napi);
    714			qmgr_enable_irq(rxq);
    715			if (!qmgr_stat_below_low_watermark(rxq) &&
    716			    napi_reschedule(napi)) { /* not empty again */
    717#if DEBUG_RX
    718				netdev_debug(dev, "eth_poll napi_reschedule succeeded\n");
    719#endif
    720				qmgr_disable_irq(rxq);
    721				continue;
    722			}
    723#if DEBUG_RX
    724			netdev_debug(dev, "eth_poll all done\n");
    725#endif
    726			return received; /* all work done */
    727		}
    728
    729		desc = rx_desc_ptr(port, n);
    730
    731#ifdef __ARMEB__
    732		if ((skb = netdev_alloc_skb(dev, RX_BUFF_SIZE))) {
    733			phys = dma_map_single(&dev->dev, skb->data,
    734					      RX_BUFF_SIZE, DMA_FROM_DEVICE);
    735			if (dma_mapping_error(&dev->dev, phys)) {
    736				dev_kfree_skb(skb);
    737				skb = NULL;
    738			}
    739		}
    740#else
    741		skb = netdev_alloc_skb(dev,
    742				       ALIGN(NET_IP_ALIGN + desc->pkt_len, 4));
    743#endif
    744
    745		if (!skb) {
    746			dev->stats.rx_dropped++;
    747			/* put the desc back on RX-ready queue */
    748			desc->buf_len = MAX_MRU;
    749			desc->pkt_len = 0;
    750			queue_put_desc(rxfreeq, rx_desc_phys(port, n), desc);
    751			continue;
    752		}
    753
    754		/* process received frame */
    755#ifdef __ARMEB__
    756		temp = skb;
    757		skb = port->rx_buff_tab[n];
    758		dma_unmap_single(&dev->dev, desc->data - NET_IP_ALIGN,
    759				 RX_BUFF_SIZE, DMA_FROM_DEVICE);
    760#else
    761		dma_sync_single_for_cpu(&dev->dev, desc->data - NET_IP_ALIGN,
    762					RX_BUFF_SIZE, DMA_FROM_DEVICE);
    763		memcpy_swab32((u32 *)skb->data, (u32 *)port->rx_buff_tab[n],
    764			      ALIGN(NET_IP_ALIGN + desc->pkt_len, 4) / 4);
    765#endif
    766		skb_reserve(skb, NET_IP_ALIGN);
    767		skb_put(skb, desc->pkt_len);
    768
    769		debug_pkt(dev, "eth_poll", skb->data, skb->len);
    770
    771		ixp_rx_timestamp(port, skb);
    772		skb->protocol = eth_type_trans(skb, dev);
    773		dev->stats.rx_packets++;
    774		dev->stats.rx_bytes += skb->len;
    775		netif_receive_skb(skb);
    776
    777		/* put the new buffer on RX-free queue */
    778#ifdef __ARMEB__
    779		port->rx_buff_tab[n] = temp;
    780		desc->data = phys + NET_IP_ALIGN;
    781#endif
    782		desc->buf_len = MAX_MRU;
    783		desc->pkt_len = 0;
    784		queue_put_desc(rxfreeq, rx_desc_phys(port, n), desc);
    785		received++;
    786	}
    787
    788#if DEBUG_RX
    789	netdev_debug(dev, "eth_poll(): end, not all work done\n");
    790#endif
    791	return received;		/* not all work done */
    792}
    793
    794
    795static void eth_txdone_irq(void *unused)
    796{
    797	u32 phys;
    798
    799#if DEBUG_TX
    800	printk(KERN_DEBUG DRV_NAME ": eth_txdone_irq\n");
    801#endif
    802	while ((phys = qmgr_get_entry(TXDONE_QUEUE)) != 0) {
    803		u32 npe_id, n_desc;
    804		struct port *port;
    805		struct desc *desc;
    806		int start;
    807
    808		npe_id = phys & 3;
    809		BUG_ON(npe_id >= MAX_NPES);
    810		port = npe_port_tab[npe_id];
    811		BUG_ON(!port);
    812		phys &= ~0x1F; /* mask out non-address bits */
    813		n_desc = (phys - tx_desc_phys(port, 0)) / sizeof(struct desc);
    814		BUG_ON(n_desc >= TX_DESCS);
    815		desc = tx_desc_ptr(port, n_desc);
    816		debug_desc(phys, desc);
    817
    818		if (port->tx_buff_tab[n_desc]) { /* not the draining packet */
    819			port->netdev->stats.tx_packets++;
    820			port->netdev->stats.tx_bytes += desc->pkt_len;
    821
    822			dma_unmap_tx(port, desc);
    823#if DEBUG_TX
    824			printk(KERN_DEBUG "%s: eth_txdone_irq free %p\n",
    825			       port->netdev->name, port->tx_buff_tab[n_desc]);
    826#endif
    827			free_buffer_irq(port->tx_buff_tab[n_desc]);
    828			port->tx_buff_tab[n_desc] = NULL;
    829		}
    830
    831		start = qmgr_stat_below_low_watermark(port->plat->txreadyq);
    832		queue_put_desc(port->plat->txreadyq, phys, desc);
    833		if (start) { /* TX-ready queue was empty */
    834#if DEBUG_TX
    835			printk(KERN_DEBUG "%s: eth_txdone_irq xmit ready\n",
    836			       port->netdev->name);
    837#endif
    838			netif_wake_queue(port->netdev);
    839		}
    840	}
    841}
    842
    843static int eth_xmit(struct sk_buff *skb, struct net_device *dev)
    844{
    845	struct port *port = netdev_priv(dev);
    846	unsigned int txreadyq = port->plat->txreadyq;
    847	int len, offset, bytes, n;
    848	void *mem;
    849	u32 phys;
    850	struct desc *desc;
    851
    852#if DEBUG_TX
    853	netdev_debug(dev, "eth_xmit\n");
    854#endif
    855
    856	if (unlikely(skb->len > MAX_MRU)) {
    857		dev_kfree_skb(skb);
    858		dev->stats.tx_errors++;
    859		return NETDEV_TX_OK;
    860	}
    861
    862	debug_pkt(dev, "eth_xmit", skb->data, skb->len);
    863
    864	len = skb->len;
    865#ifdef __ARMEB__
    866	offset = 0; /* no need to keep alignment */
    867	bytes = len;
    868	mem = skb->data;
    869#else
    870	offset = (uintptr_t)skb->data & 3; /* keep 32-bit alignment */
    871	bytes = ALIGN(offset + len, 4);
    872	if (!(mem = kmalloc(bytes, GFP_ATOMIC))) {
    873		dev_kfree_skb(skb);
    874		dev->stats.tx_dropped++;
    875		return NETDEV_TX_OK;
    876	}
    877	memcpy_swab32(mem, (u32 *)((uintptr_t)skb->data & ~3), bytes / 4);
    878#endif
    879
    880	phys = dma_map_single(&dev->dev, mem, bytes, DMA_TO_DEVICE);
    881	if (dma_mapping_error(&dev->dev, phys)) {
    882		dev_kfree_skb(skb);
    883#ifndef __ARMEB__
    884		kfree(mem);
    885#endif
    886		dev->stats.tx_dropped++;
    887		return NETDEV_TX_OK;
    888	}
    889
    890	n = queue_get_desc(txreadyq, port, 1);
    891	BUG_ON(n < 0);
    892	desc = tx_desc_ptr(port, n);
    893
    894#ifdef __ARMEB__
    895	port->tx_buff_tab[n] = skb;
    896#else
    897	port->tx_buff_tab[n] = mem;
    898#endif
    899	desc->data = phys + offset;
    900	desc->buf_len = desc->pkt_len = len;
    901
    902	/* NPE firmware pads short frames with zeros internally */
    903	wmb();
    904	queue_put_desc(TX_QUEUE(port->id), tx_desc_phys(port, n), desc);
    905
    906	if (qmgr_stat_below_low_watermark(txreadyq)) { /* empty */
    907#if DEBUG_TX
    908		netdev_debug(dev, "eth_xmit queue full\n");
    909#endif
    910		netif_stop_queue(dev);
    911		/* we could miss TX ready interrupt */
    912		/* really empty in fact */
    913		if (!qmgr_stat_below_low_watermark(txreadyq)) {
    914#if DEBUG_TX
    915			netdev_debug(dev, "eth_xmit ready again\n");
    916#endif
    917			netif_wake_queue(dev);
    918		}
    919	}
    920
    921#if DEBUG_TX
    922	netdev_debug(dev, "eth_xmit end\n");
    923#endif
    924
    925	ixp_tx_timestamp(port, skb);
    926	skb_tx_timestamp(skb);
    927
    928#ifndef __ARMEB__
    929	dev_kfree_skb(skb);
    930#endif
    931	return NETDEV_TX_OK;
    932}
    933
    934
    935static void eth_set_mcast_list(struct net_device *dev)
    936{
    937	struct port *port = netdev_priv(dev);
    938	struct netdev_hw_addr *ha;
    939	u8 diffs[ETH_ALEN], *addr;
    940	int i;
    941	static const u8 allmulti[] = { 0x01, 0x00, 0x00, 0x00, 0x00, 0x00 };
    942
    943	if ((dev->flags & IFF_ALLMULTI) && !(dev->flags & IFF_PROMISC)) {
    944		for (i = 0; i < ETH_ALEN; i++) {
    945			__raw_writel(allmulti[i], &port->regs->mcast_addr[i]);
    946			__raw_writel(allmulti[i], &port->regs->mcast_mask[i]);
    947		}
    948		__raw_writel(DEFAULT_RX_CNTRL0 | RX_CNTRL0_ADDR_FLTR_EN,
    949			&port->regs->rx_control[0]);
    950		return;
    951	}
    952
    953	if ((dev->flags & IFF_PROMISC) || netdev_mc_empty(dev)) {
    954		__raw_writel(DEFAULT_RX_CNTRL0 & ~RX_CNTRL0_ADDR_FLTR_EN,
    955			     &port->regs->rx_control[0]);
    956		return;
    957	}
    958
    959	eth_zero_addr(diffs);
    960
    961	addr = NULL;
    962	netdev_for_each_mc_addr(ha, dev) {
    963		if (!addr)
    964			addr = ha->addr; /* first MAC address */
    965		for (i = 0; i < ETH_ALEN; i++)
    966			diffs[i] |= addr[i] ^ ha->addr[i];
    967	}
    968
    969	for (i = 0; i < ETH_ALEN; i++) {
    970		__raw_writel(addr[i], &port->regs->mcast_addr[i]);
    971		__raw_writel(~diffs[i], &port->regs->mcast_mask[i]);
    972	}
    973
    974	__raw_writel(DEFAULT_RX_CNTRL0 | RX_CNTRL0_ADDR_FLTR_EN,
    975		     &port->regs->rx_control[0]);
    976}
    977
    978
    979static int eth_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
    980{
    981	if (!netif_running(dev))
    982		return -EINVAL;
    983
    984	if (cpu_is_ixp46x()) {
    985		if (cmd == SIOCSHWTSTAMP)
    986			return hwtstamp_set(dev, req);
    987		if (cmd == SIOCGHWTSTAMP)
    988			return hwtstamp_get(dev, req);
    989	}
    990
    991	return phy_mii_ioctl(dev->phydev, req, cmd);
    992}
    993
    994/* ethtool support */
    995
    996static void ixp4xx_get_drvinfo(struct net_device *dev,
    997			       struct ethtool_drvinfo *info)
    998{
    999	struct port *port = netdev_priv(dev);
   1000
   1001	strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
   1002	snprintf(info->fw_version, sizeof(info->fw_version), "%u:%u:%u:%u",
   1003		 port->firmware[0], port->firmware[1],
   1004		 port->firmware[2], port->firmware[3]);
   1005	strlcpy(info->bus_info, "internal", sizeof(info->bus_info));
   1006}
   1007
   1008static int ixp4xx_get_ts_info(struct net_device *dev,
   1009			      struct ethtool_ts_info *info)
   1010{
   1011	struct port *port = netdev_priv(dev);
   1012
   1013	if (port->phc_index < 0)
   1014		ixp46x_ptp_find(&port->timesync_regs, &port->phc_index);
   1015
   1016	info->phc_index = port->phc_index;
   1017
   1018	if (info->phc_index < 0) {
   1019		info->so_timestamping =
   1020			SOF_TIMESTAMPING_TX_SOFTWARE |
   1021			SOF_TIMESTAMPING_RX_SOFTWARE |
   1022			SOF_TIMESTAMPING_SOFTWARE;
   1023		return 0;
   1024	}
   1025	info->so_timestamping =
   1026		SOF_TIMESTAMPING_TX_HARDWARE |
   1027		SOF_TIMESTAMPING_RX_HARDWARE |
   1028		SOF_TIMESTAMPING_RAW_HARDWARE;
   1029	info->tx_types =
   1030		(1 << HWTSTAMP_TX_OFF) |
   1031		(1 << HWTSTAMP_TX_ON);
   1032	info->rx_filters =
   1033		(1 << HWTSTAMP_FILTER_NONE) |
   1034		(1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
   1035		(1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ);
   1036	return 0;
   1037}
   1038
   1039static const struct ethtool_ops ixp4xx_ethtool_ops = {
   1040	.get_drvinfo = ixp4xx_get_drvinfo,
   1041	.nway_reset = phy_ethtool_nway_reset,
   1042	.get_link = ethtool_op_get_link,
   1043	.get_ts_info = ixp4xx_get_ts_info,
   1044	.get_link_ksettings = phy_ethtool_get_link_ksettings,
   1045	.set_link_ksettings = phy_ethtool_set_link_ksettings,
   1046};
   1047
   1048
   1049static int request_queues(struct port *port)
   1050{
   1051	int err;
   1052
   1053	err = qmgr_request_queue(RXFREE_QUEUE(port->id), RX_DESCS, 0, 0,
   1054				 "%s:RX-free", port->netdev->name);
   1055	if (err)
   1056		return err;
   1057
   1058	err = qmgr_request_queue(port->plat->rxq, RX_DESCS, 0, 0,
   1059				 "%s:RX", port->netdev->name);
   1060	if (err)
   1061		goto rel_rxfree;
   1062
   1063	err = qmgr_request_queue(TX_QUEUE(port->id), TX_DESCS, 0, 0,
   1064				 "%s:TX", port->netdev->name);
   1065	if (err)
   1066		goto rel_rx;
   1067
   1068	err = qmgr_request_queue(port->plat->txreadyq, TX_DESCS, 0, 0,
   1069				 "%s:TX-ready", port->netdev->name);
   1070	if (err)
   1071		goto rel_tx;
   1072
   1073	/* TX-done queue handles skbs sent out by the NPEs */
   1074	if (!ports_open) {
   1075		err = qmgr_request_queue(TXDONE_QUEUE, TXDONE_QUEUE_LEN, 0, 0,
   1076					 "%s:TX-done", DRV_NAME);
   1077		if (err)
   1078			goto rel_txready;
   1079	}
   1080	return 0;
   1081
   1082rel_txready:
   1083	qmgr_release_queue(port->plat->txreadyq);
   1084rel_tx:
   1085	qmgr_release_queue(TX_QUEUE(port->id));
   1086rel_rx:
   1087	qmgr_release_queue(port->plat->rxq);
   1088rel_rxfree:
   1089	qmgr_release_queue(RXFREE_QUEUE(port->id));
   1090	printk(KERN_DEBUG "%s: unable to request hardware queues\n",
   1091	       port->netdev->name);
   1092	return err;
   1093}
   1094
   1095static void release_queues(struct port *port)
   1096{
   1097	qmgr_release_queue(RXFREE_QUEUE(port->id));
   1098	qmgr_release_queue(port->plat->rxq);
   1099	qmgr_release_queue(TX_QUEUE(port->id));
   1100	qmgr_release_queue(port->plat->txreadyq);
   1101
   1102	if (!ports_open)
   1103		qmgr_release_queue(TXDONE_QUEUE);
   1104}
   1105
   1106static int init_queues(struct port *port)
   1107{
   1108	int i;
   1109
   1110	if (!ports_open) {
   1111		dma_pool = dma_pool_create(DRV_NAME, &port->netdev->dev,
   1112					   POOL_ALLOC_SIZE, 32, 0);
   1113		if (!dma_pool)
   1114			return -ENOMEM;
   1115	}
   1116
   1117	port->desc_tab = dma_pool_zalloc(dma_pool, GFP_KERNEL, &port->desc_tab_phys);
   1118	if (!port->desc_tab)
   1119		return -ENOMEM;
   1120	memset(port->rx_buff_tab, 0, sizeof(port->rx_buff_tab)); /* tables */
   1121	memset(port->tx_buff_tab, 0, sizeof(port->tx_buff_tab));
   1122
   1123	/* Setup RX buffers */
   1124	for (i = 0; i < RX_DESCS; i++) {
   1125		struct desc *desc = rx_desc_ptr(port, i);
   1126		buffer_t *buff; /* skb or kmalloc()ated memory */
   1127		void *data;
   1128#ifdef __ARMEB__
   1129		if (!(buff = netdev_alloc_skb(port->netdev, RX_BUFF_SIZE)))
   1130			return -ENOMEM;
   1131		data = buff->data;
   1132#else
   1133		if (!(buff = kmalloc(RX_BUFF_SIZE, GFP_KERNEL)))
   1134			return -ENOMEM;
   1135		data = buff;
   1136#endif
   1137		desc->buf_len = MAX_MRU;
   1138		desc->data = dma_map_single(&port->netdev->dev, data,
   1139					    RX_BUFF_SIZE, DMA_FROM_DEVICE);
   1140		if (dma_mapping_error(&port->netdev->dev, desc->data)) {
   1141			free_buffer(buff);
   1142			return -EIO;
   1143		}
   1144		desc->data += NET_IP_ALIGN;
   1145		port->rx_buff_tab[i] = buff;
   1146	}
   1147
   1148	return 0;
   1149}
   1150
   1151static void destroy_queues(struct port *port)
   1152{
   1153	int i;
   1154
   1155	if (port->desc_tab) {
   1156		for (i = 0; i < RX_DESCS; i++) {
   1157			struct desc *desc = rx_desc_ptr(port, i);
   1158			buffer_t *buff = port->rx_buff_tab[i];
   1159			if (buff) {
   1160				dma_unmap_single(&port->netdev->dev,
   1161						 desc->data - NET_IP_ALIGN,
   1162						 RX_BUFF_SIZE, DMA_FROM_DEVICE);
   1163				free_buffer(buff);
   1164			}
   1165		}
   1166		for (i = 0; i < TX_DESCS; i++) {
   1167			struct desc *desc = tx_desc_ptr(port, i);
   1168			buffer_t *buff = port->tx_buff_tab[i];
   1169			if (buff) {
   1170				dma_unmap_tx(port, desc);
   1171				free_buffer(buff);
   1172			}
   1173		}
   1174		dma_pool_free(dma_pool, port->desc_tab, port->desc_tab_phys);
   1175		port->desc_tab = NULL;
   1176	}
   1177
   1178	if (!ports_open && dma_pool) {
   1179		dma_pool_destroy(dma_pool);
   1180		dma_pool = NULL;
   1181	}
   1182}
   1183
   1184static int eth_open(struct net_device *dev)
   1185{
   1186	struct port *port = netdev_priv(dev);
   1187	struct npe *npe = port->npe;
   1188	struct msg msg;
   1189	int i, err;
   1190
   1191	if (!npe_running(npe)) {
   1192		err = npe_load_firmware(npe, npe_name(npe), &dev->dev);
   1193		if (err)
   1194			return err;
   1195
   1196		if (npe_recv_message(npe, &msg, "ETH_GET_STATUS")) {
   1197			netdev_err(dev, "%s not responding\n", npe_name(npe));
   1198			return -EIO;
   1199		}
   1200		port->firmware[0] = msg.byte4;
   1201		port->firmware[1] = msg.byte5;
   1202		port->firmware[2] = msg.byte6;
   1203		port->firmware[3] = msg.byte7;
   1204	}
   1205
   1206	memset(&msg, 0, sizeof(msg));
   1207	msg.cmd = NPE_VLAN_SETRXQOSENTRY;
   1208	msg.eth_id = port->id;
   1209	msg.byte5 = port->plat->rxq | 0x80;
   1210	msg.byte7 = port->plat->rxq << 4;
   1211	for (i = 0; i < 8; i++) {
   1212		msg.byte3 = i;
   1213		if (npe_send_recv_message(port->npe, &msg, "ETH_SET_RXQ"))
   1214			return -EIO;
   1215	}
   1216
   1217	msg.cmd = NPE_EDB_SETPORTADDRESS;
   1218	msg.eth_id = PHYSICAL_ID(port->id);
   1219	msg.byte2 = dev->dev_addr[0];
   1220	msg.byte3 = dev->dev_addr[1];
   1221	msg.byte4 = dev->dev_addr[2];
   1222	msg.byte5 = dev->dev_addr[3];
   1223	msg.byte6 = dev->dev_addr[4];
   1224	msg.byte7 = dev->dev_addr[5];
   1225	if (npe_send_recv_message(port->npe, &msg, "ETH_SET_MAC"))
   1226		return -EIO;
   1227
   1228	memset(&msg, 0, sizeof(msg));
   1229	msg.cmd = NPE_FW_SETFIREWALLMODE;
   1230	msg.eth_id = port->id;
   1231	if (npe_send_recv_message(port->npe, &msg, "ETH_SET_FIREWALL_MODE"))
   1232		return -EIO;
   1233
   1234	if ((err = request_queues(port)) != 0)
   1235		return err;
   1236
   1237	if ((err = init_queues(port)) != 0) {
   1238		destroy_queues(port);
   1239		release_queues(port);
   1240		return err;
   1241	}
   1242
   1243	port->speed = 0;	/* force "link up" message */
   1244	phy_start(dev->phydev);
   1245
   1246	for (i = 0; i < ETH_ALEN; i++)
   1247		__raw_writel(dev->dev_addr[i], &port->regs->hw_addr[i]);
   1248	__raw_writel(0x08, &port->regs->random_seed);
   1249	__raw_writel(0x12, &port->regs->partial_empty_threshold);
   1250	__raw_writel(0x30, &port->regs->partial_full_threshold);
   1251	__raw_writel(0x08, &port->regs->tx_start_bytes);
   1252	__raw_writel(0x15, &port->regs->tx_deferral);
   1253	__raw_writel(0x08, &port->regs->tx_2part_deferral[0]);
   1254	__raw_writel(0x07, &port->regs->tx_2part_deferral[1]);
   1255	__raw_writel(0x80, &port->regs->slot_time);
   1256	__raw_writel(0x01, &port->regs->int_clock_threshold);
   1257
   1258	/* Populate queues with buffers, no failure after this point */
   1259	for (i = 0; i < TX_DESCS; i++)
   1260		queue_put_desc(port->plat->txreadyq,
   1261			       tx_desc_phys(port, i), tx_desc_ptr(port, i));
   1262
   1263	for (i = 0; i < RX_DESCS; i++)
   1264		queue_put_desc(RXFREE_QUEUE(port->id),
   1265			       rx_desc_phys(port, i), rx_desc_ptr(port, i));
   1266
   1267	__raw_writel(TX_CNTRL1_RETRIES, &port->regs->tx_control[1]);
   1268	__raw_writel(DEFAULT_TX_CNTRL0, &port->regs->tx_control[0]);
   1269	__raw_writel(0, &port->regs->rx_control[1]);
   1270	__raw_writel(DEFAULT_RX_CNTRL0, &port->regs->rx_control[0]);
   1271
   1272	napi_enable(&port->napi);
   1273	eth_set_mcast_list(dev);
   1274	netif_start_queue(dev);
   1275
   1276	qmgr_set_irq(port->plat->rxq, QUEUE_IRQ_SRC_NOT_EMPTY,
   1277		     eth_rx_irq, dev);
   1278	if (!ports_open) {
   1279		qmgr_set_irq(TXDONE_QUEUE, QUEUE_IRQ_SRC_NOT_EMPTY,
   1280			     eth_txdone_irq, NULL);
   1281		qmgr_enable_irq(TXDONE_QUEUE);
   1282	}
   1283	ports_open++;
   1284	/* we may already have RX data, enables IRQ */
   1285	napi_schedule(&port->napi);
   1286	return 0;
   1287}
   1288
   1289static int eth_close(struct net_device *dev)
   1290{
   1291	struct port *port = netdev_priv(dev);
   1292	struct msg msg;
   1293	int buffs = RX_DESCS; /* allocated RX buffers */
   1294	int i;
   1295
   1296	ports_open--;
   1297	qmgr_disable_irq(port->plat->rxq);
   1298	napi_disable(&port->napi);
   1299	netif_stop_queue(dev);
   1300
   1301	while (queue_get_desc(RXFREE_QUEUE(port->id), port, 0) >= 0)
   1302		buffs--;
   1303
   1304	memset(&msg, 0, sizeof(msg));
   1305	msg.cmd = NPE_SETLOOPBACK_MODE;
   1306	msg.eth_id = port->id;
   1307	msg.byte3 = 1;
   1308	if (npe_send_recv_message(port->npe, &msg, "ETH_ENABLE_LOOPBACK"))
   1309		netdev_crit(dev, "unable to enable loopback\n");
   1310
   1311	i = 0;
   1312	do {			/* drain RX buffers */
   1313		while (queue_get_desc(port->plat->rxq, port, 0) >= 0)
   1314			buffs--;
   1315		if (!buffs)
   1316			break;
   1317		if (qmgr_stat_empty(TX_QUEUE(port->id))) {
   1318			/* we have to inject some packet */
   1319			struct desc *desc;
   1320			u32 phys;
   1321			int n = queue_get_desc(port->plat->txreadyq, port, 1);
   1322			BUG_ON(n < 0);
   1323			desc = tx_desc_ptr(port, n);
   1324			phys = tx_desc_phys(port, n);
   1325			desc->buf_len = desc->pkt_len = 1;
   1326			wmb();
   1327			queue_put_desc(TX_QUEUE(port->id), phys, desc);
   1328		}
   1329		udelay(1);
   1330	} while (++i < MAX_CLOSE_WAIT);
   1331
   1332	if (buffs)
   1333		netdev_crit(dev, "unable to drain RX queue, %i buffer(s)"
   1334			    " left in NPE\n", buffs);
   1335#if DEBUG_CLOSE
   1336	if (!buffs)
   1337		netdev_debug(dev, "draining RX queue took %i cycles\n", i);
   1338#endif
   1339
   1340	buffs = TX_DESCS;
   1341	while (queue_get_desc(TX_QUEUE(port->id), port, 1) >= 0)
   1342		buffs--; /* cancel TX */
   1343
   1344	i = 0;
   1345	do {
   1346		while (queue_get_desc(port->plat->txreadyq, port, 1) >= 0)
   1347			buffs--;
   1348		if (!buffs)
   1349			break;
   1350	} while (++i < MAX_CLOSE_WAIT);
   1351
   1352	if (buffs)
   1353		netdev_crit(dev, "unable to drain TX queue, %i buffer(s) "
   1354			    "left in NPE\n", buffs);
   1355#if DEBUG_CLOSE
   1356	if (!buffs)
   1357		netdev_debug(dev, "draining TX queues took %i cycles\n", i);
   1358#endif
   1359
   1360	msg.byte3 = 0;
   1361	if (npe_send_recv_message(port->npe, &msg, "ETH_DISABLE_LOOPBACK"))
   1362		netdev_crit(dev, "unable to disable loopback\n");
   1363
   1364	phy_stop(dev->phydev);
   1365
   1366	if (!ports_open)
   1367		qmgr_disable_irq(TXDONE_QUEUE);
   1368	destroy_queues(port);
   1369	release_queues(port);
   1370	return 0;
   1371}
   1372
   1373static const struct net_device_ops ixp4xx_netdev_ops = {
   1374	.ndo_open = eth_open,
   1375	.ndo_stop = eth_close,
   1376	.ndo_start_xmit = eth_xmit,
   1377	.ndo_set_rx_mode = eth_set_mcast_list,
   1378	.ndo_eth_ioctl = eth_ioctl,
   1379	.ndo_set_mac_address = eth_mac_addr,
   1380	.ndo_validate_addr = eth_validate_addr,
   1381};
   1382
   1383static struct eth_plat_info *ixp4xx_of_get_platdata(struct device *dev)
   1384{
   1385	struct device_node *np = dev->of_node;
   1386	struct of_phandle_args queue_spec;
   1387	struct of_phandle_args npe_spec;
   1388	struct device_node *mdio_np;
   1389	struct eth_plat_info *plat;
   1390	int ret;
   1391
   1392	plat = devm_kzalloc(dev, sizeof(*plat), GFP_KERNEL);
   1393	if (!plat)
   1394		return NULL;
   1395
   1396	ret = of_parse_phandle_with_fixed_args(np, "intel,npe-handle", 1, 0,
   1397					       &npe_spec);
   1398	if (ret) {
   1399		dev_err(dev, "no NPE engine specified\n");
   1400		return NULL;
   1401	}
   1402	/* NPE ID 0x00, 0x10, 0x20... */
   1403	plat->npe = (npe_spec.args[0] << 4);
   1404
   1405	/* Check if this device has an MDIO bus */
   1406	mdio_np = of_get_child_by_name(np, "mdio");
   1407	if (mdio_np) {
   1408		plat->has_mdio = true;
   1409		mdio_bus_np = mdio_np;
   1410		/* DO NOT put the mdio_np, it will be used */
   1411	}
   1412
   1413	/* Get the rx queue as a resource from queue manager */
   1414	ret = of_parse_phandle_with_fixed_args(np, "queue-rx", 1, 0,
   1415					       &queue_spec);
   1416	if (ret) {
   1417		dev_err(dev, "no rx queue phandle\n");
   1418		return NULL;
   1419	}
   1420	plat->rxq = queue_spec.args[0];
   1421
   1422	/* Get the txready queue as resource from queue manager */
   1423	ret = of_parse_phandle_with_fixed_args(np, "queue-txready", 1, 0,
   1424					       &queue_spec);
   1425	if (ret) {
   1426		dev_err(dev, "no txready queue phandle\n");
   1427		return NULL;
   1428	}
   1429	plat->txreadyq = queue_spec.args[0];
   1430
   1431	return plat;
   1432}
   1433
   1434static int ixp4xx_eth_probe(struct platform_device *pdev)
   1435{
   1436	struct phy_device *phydev = NULL;
   1437	struct device *dev = &pdev->dev;
   1438	struct device_node *np = dev->of_node;
   1439	struct eth_plat_info *plat;
   1440	struct net_device *ndev;
   1441	struct port *port;
   1442	int err;
   1443
   1444	plat = ixp4xx_of_get_platdata(dev);
   1445	if (!plat)
   1446		return -ENODEV;
   1447
   1448	if (!(ndev = devm_alloc_etherdev(dev, sizeof(struct port))))
   1449		return -ENOMEM;
   1450
   1451	SET_NETDEV_DEV(ndev, dev);
   1452	port = netdev_priv(ndev);
   1453	port->netdev = ndev;
   1454	port->id = plat->npe;
   1455	port->phc_index = -1;
   1456
   1457	/* Get the port resource and remap */
   1458	port->regs = devm_platform_get_and_ioremap_resource(pdev, 0, NULL);
   1459	if (IS_ERR(port->regs))
   1460		return PTR_ERR(port->regs);
   1461
   1462	/* Register the MDIO bus if we have it */
   1463	if (plat->has_mdio) {
   1464		err = ixp4xx_mdio_register(port->regs);
   1465		if (err) {
   1466			dev_err(dev, "failed to register MDIO bus\n");
   1467			return err;
   1468		}
   1469	}
   1470	/* If the instance with the MDIO bus has not yet appeared,
   1471	 * defer probing until it gets probed.
   1472	 */
   1473	if (!mdio_bus)
   1474		return -EPROBE_DEFER;
   1475
   1476	ndev->netdev_ops = &ixp4xx_netdev_ops;
   1477	ndev->ethtool_ops = &ixp4xx_ethtool_ops;
   1478	ndev->tx_queue_len = 100;
   1479	/* Inherit the DMA masks from the platform device */
   1480	ndev->dev.dma_mask = dev->dma_mask;
   1481	ndev->dev.coherent_dma_mask = dev->coherent_dma_mask;
   1482
   1483	netif_napi_add_weight(ndev, &port->napi, eth_poll, NAPI_WEIGHT);
   1484
   1485	if (!(port->npe = npe_request(NPE_ID(port->id))))
   1486		return -EIO;
   1487
   1488	port->plat = plat;
   1489	npe_port_tab[NPE_ID(port->id)] = port;
   1490	eth_hw_addr_set(ndev, plat->hwaddr);
   1491
   1492	platform_set_drvdata(pdev, ndev);
   1493
   1494	__raw_writel(DEFAULT_CORE_CNTRL | CORE_RESET,
   1495		     &port->regs->core_control);
   1496	udelay(50);
   1497	__raw_writel(DEFAULT_CORE_CNTRL, &port->regs->core_control);
   1498	udelay(50);
   1499
   1500	phydev = of_phy_get_and_connect(ndev, np, ixp4xx_adjust_link);
   1501	if (!phydev) {
   1502		err = -ENODEV;
   1503		dev_err(dev, "no phydev\n");
   1504		goto err_free_mem;
   1505	}
   1506
   1507	phydev->irq = PHY_POLL;
   1508
   1509	if ((err = register_netdev(ndev)))
   1510		goto err_phy_dis;
   1511
   1512	netdev_info(ndev, "%s: MII PHY %i on %s\n", ndev->name, plat->phy,
   1513		    npe_name(port->npe));
   1514
   1515	return 0;
   1516
   1517err_phy_dis:
   1518	phy_disconnect(phydev);
   1519err_free_mem:
   1520	npe_port_tab[NPE_ID(port->id)] = NULL;
   1521	npe_release(port->npe);
   1522	return err;
   1523}
   1524
   1525static int ixp4xx_eth_remove(struct platform_device *pdev)
   1526{
   1527	struct net_device *ndev = platform_get_drvdata(pdev);
   1528	struct phy_device *phydev = ndev->phydev;
   1529	struct port *port = netdev_priv(ndev);
   1530
   1531	unregister_netdev(ndev);
   1532	phy_disconnect(phydev);
   1533	ixp4xx_mdio_remove();
   1534	npe_port_tab[NPE_ID(port->id)] = NULL;
   1535	npe_release(port->npe);
   1536	return 0;
   1537}
   1538
   1539static const struct of_device_id ixp4xx_eth_of_match[] = {
   1540	{
   1541		.compatible = "intel,ixp4xx-ethernet",
   1542	},
   1543	{ },
   1544};
   1545
   1546static struct platform_driver ixp4xx_eth_driver = {
   1547	.driver = {
   1548		.name = DRV_NAME,
   1549		.of_match_table = of_match_ptr(ixp4xx_eth_of_match),
   1550	},
   1551	.probe		= ixp4xx_eth_probe,
   1552	.remove		= ixp4xx_eth_remove,
   1553};
   1554module_platform_driver(ixp4xx_eth_driver);
   1555
   1556MODULE_AUTHOR("Krzysztof Halasa");
   1557MODULE_DESCRIPTION("Intel IXP4xx Ethernet driver");
   1558MODULE_LICENSE("GPL v2");
   1559MODULE_ALIAS("platform:ixp4xx_eth");