cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

octeon_mgmt.c (41746B)


      1/*
      2 * This file is subject to the terms and conditions of the GNU General Public
      3 * License.  See the file "COPYING" in the main directory of this archive
      4 * for more details.
      5 *
      6 * Copyright (C) 2009-2012 Cavium, Inc
      7 */
      8
      9#include <linux/platform_device.h>
     10#include <linux/dma-mapping.h>
     11#include <linux/etherdevice.h>
     12#include <linux/capability.h>
     13#include <linux/net_tstamp.h>
     14#include <linux/interrupt.h>
     15#include <linux/netdevice.h>
     16#include <linux/spinlock.h>
     17#include <linux/if_vlan.h>
     18#include <linux/of_mdio.h>
     19#include <linux/module.h>
     20#include <linux/of_net.h>
     21#include <linux/init.h>
     22#include <linux/slab.h>
     23#include <linux/phy.h>
     24#include <linux/io.h>
     25
     26#include <asm/octeon/octeon.h>
     27#include <asm/octeon/cvmx-mixx-defs.h>
     28#include <asm/octeon/cvmx-agl-defs.h>
     29
     30#define DRV_NAME "octeon_mgmt"
     31#define DRV_DESCRIPTION \
     32	"Cavium Networks Octeon MII (management) port Network Driver"
     33
     34#define OCTEON_MGMT_NAPI_WEIGHT 16
     35
     36/* Ring sizes that are powers of two allow for more efficient modulo
     37 * opertions.
     38 */
     39#define OCTEON_MGMT_RX_RING_SIZE 512
     40#define OCTEON_MGMT_TX_RING_SIZE 128
     41
     42/* Allow 8 bytes for vlan and FCS. */
     43#define OCTEON_MGMT_RX_HEADROOM (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN)
     44
     45union mgmt_port_ring_entry {
     46	u64 d64;
     47	struct {
     48#define RING_ENTRY_CODE_DONE 0xf
     49#define RING_ENTRY_CODE_MORE 0x10
     50#ifdef __BIG_ENDIAN_BITFIELD
     51		u64 reserved_62_63:2;
     52		/* Length of the buffer/packet in bytes */
     53		u64 len:14;
     54		/* For TX, signals that the packet should be timestamped */
     55		u64 tstamp:1;
     56		/* The RX error code */
     57		u64 code:7;
     58		/* Physical address of the buffer */
     59		u64 addr:40;
     60#else
     61		u64 addr:40;
     62		u64 code:7;
     63		u64 tstamp:1;
     64		u64 len:14;
     65		u64 reserved_62_63:2;
     66#endif
     67	} s;
     68};
     69
     70#define MIX_ORING1	0x0
     71#define MIX_ORING2	0x8
     72#define MIX_IRING1	0x10
     73#define MIX_IRING2	0x18
     74#define MIX_CTL		0x20
     75#define MIX_IRHWM	0x28
     76#define MIX_IRCNT	0x30
     77#define MIX_ORHWM	0x38
     78#define MIX_ORCNT	0x40
     79#define MIX_ISR		0x48
     80#define MIX_INTENA	0x50
     81#define MIX_REMCNT	0x58
     82#define MIX_BIST	0x78
     83
     84#define AGL_GMX_PRT_CFG			0x10
     85#define AGL_GMX_RX_FRM_CTL		0x18
     86#define AGL_GMX_RX_FRM_MAX		0x30
     87#define AGL_GMX_RX_JABBER		0x38
     88#define AGL_GMX_RX_STATS_CTL		0x50
     89
     90#define AGL_GMX_RX_STATS_PKTS_DRP	0xb0
     91#define AGL_GMX_RX_STATS_OCTS_DRP	0xb8
     92#define AGL_GMX_RX_STATS_PKTS_BAD	0xc0
     93
     94#define AGL_GMX_RX_ADR_CTL		0x100
     95#define AGL_GMX_RX_ADR_CAM_EN		0x108
     96#define AGL_GMX_RX_ADR_CAM0		0x180
     97#define AGL_GMX_RX_ADR_CAM1		0x188
     98#define AGL_GMX_RX_ADR_CAM2		0x190
     99#define AGL_GMX_RX_ADR_CAM3		0x198
    100#define AGL_GMX_RX_ADR_CAM4		0x1a0
    101#define AGL_GMX_RX_ADR_CAM5		0x1a8
    102
    103#define AGL_GMX_TX_CLK			0x208
    104#define AGL_GMX_TX_STATS_CTL		0x268
    105#define AGL_GMX_TX_CTL			0x270
    106#define AGL_GMX_TX_STAT0		0x280
    107#define AGL_GMX_TX_STAT1		0x288
    108#define AGL_GMX_TX_STAT2		0x290
    109#define AGL_GMX_TX_STAT3		0x298
    110#define AGL_GMX_TX_STAT4		0x2a0
    111#define AGL_GMX_TX_STAT5		0x2a8
    112#define AGL_GMX_TX_STAT6		0x2b0
    113#define AGL_GMX_TX_STAT7		0x2b8
    114#define AGL_GMX_TX_STAT8		0x2c0
    115#define AGL_GMX_TX_STAT9		0x2c8
    116
    117struct octeon_mgmt {
    118	struct net_device *netdev;
    119	u64 mix;
    120	u64 agl;
    121	u64 agl_prt_ctl;
    122	int port;
    123	int irq;
    124	bool has_rx_tstamp;
    125	u64 *tx_ring;
    126	dma_addr_t tx_ring_handle;
    127	unsigned int tx_next;
    128	unsigned int tx_next_clean;
    129	unsigned int tx_current_fill;
    130	/* The tx_list lock also protects the ring related variables */
    131	struct sk_buff_head tx_list;
    132
    133	/* RX variables only touched in napi_poll.  No locking necessary. */
    134	u64 *rx_ring;
    135	dma_addr_t rx_ring_handle;
    136	unsigned int rx_next;
    137	unsigned int rx_next_fill;
    138	unsigned int rx_current_fill;
    139	struct sk_buff_head rx_list;
    140
    141	spinlock_t lock;
    142	unsigned int last_duplex;
    143	unsigned int last_link;
    144	unsigned int last_speed;
    145	struct device *dev;
    146	struct napi_struct napi;
    147	struct tasklet_struct tx_clean_tasklet;
    148	struct device_node *phy_np;
    149	resource_size_t mix_phys;
    150	resource_size_t mix_size;
    151	resource_size_t agl_phys;
    152	resource_size_t agl_size;
    153	resource_size_t agl_prt_ctl_phys;
    154	resource_size_t agl_prt_ctl_size;
    155};
    156
    157static void octeon_mgmt_set_rx_irq(struct octeon_mgmt *p, int enable)
    158{
    159	union cvmx_mixx_intena mix_intena;
    160	unsigned long flags;
    161
    162	spin_lock_irqsave(&p->lock, flags);
    163	mix_intena.u64 = cvmx_read_csr(p->mix + MIX_INTENA);
    164	mix_intena.s.ithena = enable ? 1 : 0;
    165	cvmx_write_csr(p->mix + MIX_INTENA, mix_intena.u64);
    166	spin_unlock_irqrestore(&p->lock, flags);
    167}
    168
    169static void octeon_mgmt_set_tx_irq(struct octeon_mgmt *p, int enable)
    170{
    171	union cvmx_mixx_intena mix_intena;
    172	unsigned long flags;
    173
    174	spin_lock_irqsave(&p->lock, flags);
    175	mix_intena.u64 = cvmx_read_csr(p->mix + MIX_INTENA);
    176	mix_intena.s.othena = enable ? 1 : 0;
    177	cvmx_write_csr(p->mix + MIX_INTENA, mix_intena.u64);
    178	spin_unlock_irqrestore(&p->lock, flags);
    179}
    180
    181static void octeon_mgmt_enable_rx_irq(struct octeon_mgmt *p)
    182{
    183	octeon_mgmt_set_rx_irq(p, 1);
    184}
    185
    186static void octeon_mgmt_disable_rx_irq(struct octeon_mgmt *p)
    187{
    188	octeon_mgmt_set_rx_irq(p, 0);
    189}
    190
    191static void octeon_mgmt_enable_tx_irq(struct octeon_mgmt *p)
    192{
    193	octeon_mgmt_set_tx_irq(p, 1);
    194}
    195
    196static void octeon_mgmt_disable_tx_irq(struct octeon_mgmt *p)
    197{
    198	octeon_mgmt_set_tx_irq(p, 0);
    199}
    200
    201static unsigned int ring_max_fill(unsigned int ring_size)
    202{
    203	return ring_size - 8;
    204}
    205
    206static unsigned int ring_size_to_bytes(unsigned int ring_size)
    207{
    208	return ring_size * sizeof(union mgmt_port_ring_entry);
    209}
    210
    211static void octeon_mgmt_rx_fill_ring(struct net_device *netdev)
    212{
    213	struct octeon_mgmt *p = netdev_priv(netdev);
    214
    215	while (p->rx_current_fill < ring_max_fill(OCTEON_MGMT_RX_RING_SIZE)) {
    216		unsigned int size;
    217		union mgmt_port_ring_entry re;
    218		struct sk_buff *skb;
    219
    220		/* CN56XX pass 1 needs 8 bytes of padding.  */
    221		size = netdev->mtu + OCTEON_MGMT_RX_HEADROOM + 8 + NET_IP_ALIGN;
    222
    223		skb = netdev_alloc_skb(netdev, size);
    224		if (!skb)
    225			break;
    226		skb_reserve(skb, NET_IP_ALIGN);
    227		__skb_queue_tail(&p->rx_list, skb);
    228
    229		re.d64 = 0;
    230		re.s.len = size;
    231		re.s.addr = dma_map_single(p->dev, skb->data,
    232					   size,
    233					   DMA_FROM_DEVICE);
    234
    235		/* Put it in the ring.  */
    236		p->rx_ring[p->rx_next_fill] = re.d64;
    237		/* Make sure there is no reorder of filling the ring and ringing
    238		 * the bell
    239		 */
    240		wmb();
    241
    242		dma_sync_single_for_device(p->dev, p->rx_ring_handle,
    243					   ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE),
    244					   DMA_BIDIRECTIONAL);
    245		p->rx_next_fill =
    246			(p->rx_next_fill + 1) % OCTEON_MGMT_RX_RING_SIZE;
    247		p->rx_current_fill++;
    248		/* Ring the bell.  */
    249		cvmx_write_csr(p->mix + MIX_IRING2, 1);
    250	}
    251}
    252
    253static void octeon_mgmt_clean_tx_buffers(struct octeon_mgmt *p)
    254{
    255	union cvmx_mixx_orcnt mix_orcnt;
    256	union mgmt_port_ring_entry re;
    257	struct sk_buff *skb;
    258	int cleaned = 0;
    259	unsigned long flags;
    260
    261	mix_orcnt.u64 = cvmx_read_csr(p->mix + MIX_ORCNT);
    262	while (mix_orcnt.s.orcnt) {
    263		spin_lock_irqsave(&p->tx_list.lock, flags);
    264
    265		mix_orcnt.u64 = cvmx_read_csr(p->mix + MIX_ORCNT);
    266
    267		if (mix_orcnt.s.orcnt == 0) {
    268			spin_unlock_irqrestore(&p->tx_list.lock, flags);
    269			break;
    270		}
    271
    272		dma_sync_single_for_cpu(p->dev, p->tx_ring_handle,
    273					ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE),
    274					DMA_BIDIRECTIONAL);
    275
    276		re.d64 = p->tx_ring[p->tx_next_clean];
    277		p->tx_next_clean =
    278			(p->tx_next_clean + 1) % OCTEON_MGMT_TX_RING_SIZE;
    279		skb = __skb_dequeue(&p->tx_list);
    280
    281		mix_orcnt.u64 = 0;
    282		mix_orcnt.s.orcnt = 1;
    283
    284		/* Acknowledge to hardware that we have the buffer.  */
    285		cvmx_write_csr(p->mix + MIX_ORCNT, mix_orcnt.u64);
    286		p->tx_current_fill--;
    287
    288		spin_unlock_irqrestore(&p->tx_list.lock, flags);
    289
    290		dma_unmap_single(p->dev, re.s.addr, re.s.len,
    291				 DMA_TO_DEVICE);
    292
    293		/* Read the hardware TX timestamp if one was recorded */
    294		if (unlikely(re.s.tstamp)) {
    295			struct skb_shared_hwtstamps ts;
    296			u64 ns;
    297
    298			memset(&ts, 0, sizeof(ts));
    299			/* Read the timestamp */
    300			ns = cvmx_read_csr(CVMX_MIXX_TSTAMP(p->port));
    301			/* Remove the timestamp from the FIFO */
    302			cvmx_write_csr(CVMX_MIXX_TSCTL(p->port), 0);
    303			/* Tell the kernel about the timestamp */
    304			ts.hwtstamp = ns_to_ktime(ns);
    305			skb_tstamp_tx(skb, &ts);
    306		}
    307
    308		dev_kfree_skb_any(skb);
    309		cleaned++;
    310
    311		mix_orcnt.u64 = cvmx_read_csr(p->mix + MIX_ORCNT);
    312	}
    313
    314	if (cleaned && netif_queue_stopped(p->netdev))
    315		netif_wake_queue(p->netdev);
    316}
    317
    318static void octeon_mgmt_clean_tx_tasklet(struct tasklet_struct *t)
    319{
    320	struct octeon_mgmt *p = from_tasklet(p, t, tx_clean_tasklet);
    321	octeon_mgmt_clean_tx_buffers(p);
    322	octeon_mgmt_enable_tx_irq(p);
    323}
    324
    325static void octeon_mgmt_update_rx_stats(struct net_device *netdev)
    326{
    327	struct octeon_mgmt *p = netdev_priv(netdev);
    328	unsigned long flags;
    329	u64 drop, bad;
    330
    331	/* These reads also clear the count registers.  */
    332	drop = cvmx_read_csr(p->agl + AGL_GMX_RX_STATS_PKTS_DRP);
    333	bad = cvmx_read_csr(p->agl + AGL_GMX_RX_STATS_PKTS_BAD);
    334
    335	if (drop || bad) {
    336		/* Do an atomic update. */
    337		spin_lock_irqsave(&p->lock, flags);
    338		netdev->stats.rx_errors += bad;
    339		netdev->stats.rx_dropped += drop;
    340		spin_unlock_irqrestore(&p->lock, flags);
    341	}
    342}
    343
    344static void octeon_mgmt_update_tx_stats(struct net_device *netdev)
    345{
    346	struct octeon_mgmt *p = netdev_priv(netdev);
    347	unsigned long flags;
    348
    349	union cvmx_agl_gmx_txx_stat0 s0;
    350	union cvmx_agl_gmx_txx_stat1 s1;
    351
    352	/* These reads also clear the count registers.  */
    353	s0.u64 = cvmx_read_csr(p->agl + AGL_GMX_TX_STAT0);
    354	s1.u64 = cvmx_read_csr(p->agl + AGL_GMX_TX_STAT1);
    355
    356	if (s0.s.xsdef || s0.s.xscol || s1.s.scol || s1.s.mcol) {
    357		/* Do an atomic update. */
    358		spin_lock_irqsave(&p->lock, flags);
    359		netdev->stats.tx_errors += s0.s.xsdef + s0.s.xscol;
    360		netdev->stats.collisions += s1.s.scol + s1.s.mcol;
    361		spin_unlock_irqrestore(&p->lock, flags);
    362	}
    363}
    364
    365/*
    366 * Dequeue a receive skb and its corresponding ring entry.  The ring
    367 * entry is returned, *pskb is updated to point to the skb.
    368 */
    369static u64 octeon_mgmt_dequeue_rx_buffer(struct octeon_mgmt *p,
    370					 struct sk_buff **pskb)
    371{
    372	union mgmt_port_ring_entry re;
    373
    374	dma_sync_single_for_cpu(p->dev, p->rx_ring_handle,
    375				ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE),
    376				DMA_BIDIRECTIONAL);
    377
    378	re.d64 = p->rx_ring[p->rx_next];
    379	p->rx_next = (p->rx_next + 1) % OCTEON_MGMT_RX_RING_SIZE;
    380	p->rx_current_fill--;
    381	*pskb = __skb_dequeue(&p->rx_list);
    382
    383	dma_unmap_single(p->dev, re.s.addr,
    384			 ETH_FRAME_LEN + OCTEON_MGMT_RX_HEADROOM,
    385			 DMA_FROM_DEVICE);
    386
    387	return re.d64;
    388}
    389
    390
    391static int octeon_mgmt_receive_one(struct octeon_mgmt *p)
    392{
    393	struct net_device *netdev = p->netdev;
    394	union cvmx_mixx_ircnt mix_ircnt;
    395	union mgmt_port_ring_entry re;
    396	struct sk_buff *skb;
    397	struct sk_buff *skb2;
    398	struct sk_buff *skb_new;
    399	union mgmt_port_ring_entry re2;
    400	int rc = 1;
    401
    402
    403	re.d64 = octeon_mgmt_dequeue_rx_buffer(p, &skb);
    404	if (likely(re.s.code == RING_ENTRY_CODE_DONE)) {
    405		/* A good packet, send it up. */
    406		skb_put(skb, re.s.len);
    407good:
    408		/* Process the RX timestamp if it was recorded */
    409		if (p->has_rx_tstamp) {
    410			/* The first 8 bytes are the timestamp */
    411			u64 ns = *(u64 *)skb->data;
    412			struct skb_shared_hwtstamps *ts;
    413			ts = skb_hwtstamps(skb);
    414			ts->hwtstamp = ns_to_ktime(ns);
    415			__skb_pull(skb, 8);
    416		}
    417		skb->protocol = eth_type_trans(skb, netdev);
    418		netdev->stats.rx_packets++;
    419		netdev->stats.rx_bytes += skb->len;
    420		netif_receive_skb(skb);
    421		rc = 0;
    422	} else if (re.s.code == RING_ENTRY_CODE_MORE) {
    423		/* Packet split across skbs.  This can happen if we
    424		 * increase the MTU.  Buffers that are already in the
    425		 * rx ring can then end up being too small.  As the rx
    426		 * ring is refilled, buffers sized for the new MTU
    427		 * will be used and we should go back to the normal
    428		 * non-split case.
    429		 */
    430		skb_put(skb, re.s.len);
    431		do {
    432			re2.d64 = octeon_mgmt_dequeue_rx_buffer(p, &skb2);
    433			if (re2.s.code != RING_ENTRY_CODE_MORE
    434				&& re2.s.code != RING_ENTRY_CODE_DONE)
    435				goto split_error;
    436			skb_put(skb2,  re2.s.len);
    437			skb_new = skb_copy_expand(skb, 0, skb2->len,
    438						  GFP_ATOMIC);
    439			if (!skb_new)
    440				goto split_error;
    441			if (skb_copy_bits(skb2, 0, skb_tail_pointer(skb_new),
    442					  skb2->len))
    443				goto split_error;
    444			skb_put(skb_new, skb2->len);
    445			dev_kfree_skb_any(skb);
    446			dev_kfree_skb_any(skb2);
    447			skb = skb_new;
    448		} while (re2.s.code == RING_ENTRY_CODE_MORE);
    449		goto good;
    450	} else {
    451		/* Some other error, discard it. */
    452		dev_kfree_skb_any(skb);
    453		/* Error statistics are accumulated in
    454		 * octeon_mgmt_update_rx_stats.
    455		 */
    456	}
    457	goto done;
    458split_error:
    459	/* Discard the whole mess. */
    460	dev_kfree_skb_any(skb);
    461	dev_kfree_skb_any(skb2);
    462	while (re2.s.code == RING_ENTRY_CODE_MORE) {
    463		re2.d64 = octeon_mgmt_dequeue_rx_buffer(p, &skb2);
    464		dev_kfree_skb_any(skb2);
    465	}
    466	netdev->stats.rx_errors++;
    467
    468done:
    469	/* Tell the hardware we processed a packet.  */
    470	mix_ircnt.u64 = 0;
    471	mix_ircnt.s.ircnt = 1;
    472	cvmx_write_csr(p->mix + MIX_IRCNT, mix_ircnt.u64);
    473	return rc;
    474}
    475
    476static int octeon_mgmt_receive_packets(struct octeon_mgmt *p, int budget)
    477{
    478	unsigned int work_done = 0;
    479	union cvmx_mixx_ircnt mix_ircnt;
    480	int rc;
    481
    482	mix_ircnt.u64 = cvmx_read_csr(p->mix + MIX_IRCNT);
    483	while (work_done < budget && mix_ircnt.s.ircnt) {
    484
    485		rc = octeon_mgmt_receive_one(p);
    486		if (!rc)
    487			work_done++;
    488
    489		/* Check for more packets. */
    490		mix_ircnt.u64 = cvmx_read_csr(p->mix + MIX_IRCNT);
    491	}
    492
    493	octeon_mgmt_rx_fill_ring(p->netdev);
    494
    495	return work_done;
    496}
    497
    498static int octeon_mgmt_napi_poll(struct napi_struct *napi, int budget)
    499{
    500	struct octeon_mgmt *p = container_of(napi, struct octeon_mgmt, napi);
    501	struct net_device *netdev = p->netdev;
    502	unsigned int work_done = 0;
    503
    504	work_done = octeon_mgmt_receive_packets(p, budget);
    505
    506	if (work_done < budget) {
    507		/* We stopped because no more packets were available. */
    508		napi_complete_done(napi, work_done);
    509		octeon_mgmt_enable_rx_irq(p);
    510	}
    511	octeon_mgmt_update_rx_stats(netdev);
    512
    513	return work_done;
    514}
    515
    516/* Reset the hardware to clean state.  */
    517static void octeon_mgmt_reset_hw(struct octeon_mgmt *p)
    518{
    519	union cvmx_mixx_ctl mix_ctl;
    520	union cvmx_mixx_bist mix_bist;
    521	union cvmx_agl_gmx_bist agl_gmx_bist;
    522
    523	mix_ctl.u64 = 0;
    524	cvmx_write_csr(p->mix + MIX_CTL, mix_ctl.u64);
    525	do {
    526		mix_ctl.u64 = cvmx_read_csr(p->mix + MIX_CTL);
    527	} while (mix_ctl.s.busy);
    528	mix_ctl.s.reset = 1;
    529	cvmx_write_csr(p->mix + MIX_CTL, mix_ctl.u64);
    530	cvmx_read_csr(p->mix + MIX_CTL);
    531	octeon_io_clk_delay(64);
    532
    533	mix_bist.u64 = cvmx_read_csr(p->mix + MIX_BIST);
    534	if (mix_bist.u64)
    535		dev_warn(p->dev, "MIX failed BIST (0x%016llx)\n",
    536			(unsigned long long)mix_bist.u64);
    537
    538	agl_gmx_bist.u64 = cvmx_read_csr(CVMX_AGL_GMX_BIST);
    539	if (agl_gmx_bist.u64)
    540		dev_warn(p->dev, "AGL failed BIST (0x%016llx)\n",
    541			 (unsigned long long)agl_gmx_bist.u64);
    542}
    543
    544struct octeon_mgmt_cam_state {
    545	u64 cam[6];
    546	u64 cam_mask;
    547	int cam_index;
    548};
    549
    550static void octeon_mgmt_cam_state_add(struct octeon_mgmt_cam_state *cs,
    551				      const unsigned char *addr)
    552{
    553	int i;
    554
    555	for (i = 0; i < 6; i++)
    556		cs->cam[i] |= (u64)addr[i] << (8 * (cs->cam_index));
    557	cs->cam_mask |= (1ULL << cs->cam_index);
    558	cs->cam_index++;
    559}
    560
    561static void octeon_mgmt_set_rx_filtering(struct net_device *netdev)
    562{
    563	struct octeon_mgmt *p = netdev_priv(netdev);
    564	union cvmx_agl_gmx_rxx_adr_ctl adr_ctl;
    565	union cvmx_agl_gmx_prtx_cfg agl_gmx_prtx;
    566	unsigned long flags;
    567	unsigned int prev_packet_enable;
    568	unsigned int cam_mode = 1; /* 1 - Accept on CAM match */
    569	unsigned int multicast_mode = 1; /* 1 - Reject all multicast.  */
    570	struct octeon_mgmt_cam_state cam_state;
    571	struct netdev_hw_addr *ha;
    572	int available_cam_entries;
    573
    574	memset(&cam_state, 0, sizeof(cam_state));
    575
    576	if ((netdev->flags & IFF_PROMISC) || netdev->uc.count > 7) {
    577		cam_mode = 0;
    578		available_cam_entries = 8;
    579	} else {
    580		/* One CAM entry for the primary address, leaves seven
    581		 * for the secondary addresses.
    582		 */
    583		available_cam_entries = 7 - netdev->uc.count;
    584	}
    585
    586	if (netdev->flags & IFF_MULTICAST) {
    587		if (cam_mode == 0 || (netdev->flags & IFF_ALLMULTI) ||
    588		    netdev_mc_count(netdev) > available_cam_entries)
    589			multicast_mode = 2; /* 2 - Accept all multicast.  */
    590		else
    591			multicast_mode = 0; /* 0 - Use CAM.  */
    592	}
    593
    594	if (cam_mode == 1) {
    595		/* Add primary address. */
    596		octeon_mgmt_cam_state_add(&cam_state, netdev->dev_addr);
    597		netdev_for_each_uc_addr(ha, netdev)
    598			octeon_mgmt_cam_state_add(&cam_state, ha->addr);
    599	}
    600	if (multicast_mode == 0) {
    601		netdev_for_each_mc_addr(ha, netdev)
    602			octeon_mgmt_cam_state_add(&cam_state, ha->addr);
    603	}
    604
    605	spin_lock_irqsave(&p->lock, flags);
    606
    607	/* Disable packet I/O. */
    608	agl_gmx_prtx.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG);
    609	prev_packet_enable = agl_gmx_prtx.s.en;
    610	agl_gmx_prtx.s.en = 0;
    611	cvmx_write_csr(p->agl + AGL_GMX_PRT_CFG, agl_gmx_prtx.u64);
    612
    613	adr_ctl.u64 = 0;
    614	adr_ctl.s.cam_mode = cam_mode;
    615	adr_ctl.s.mcst = multicast_mode;
    616	adr_ctl.s.bcst = 1;     /* Allow broadcast */
    617
    618	cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CTL, adr_ctl.u64);
    619
    620	cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM0, cam_state.cam[0]);
    621	cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM1, cam_state.cam[1]);
    622	cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM2, cam_state.cam[2]);
    623	cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM3, cam_state.cam[3]);
    624	cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM4, cam_state.cam[4]);
    625	cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM5, cam_state.cam[5]);
    626	cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM_EN, cam_state.cam_mask);
    627
    628	/* Restore packet I/O. */
    629	agl_gmx_prtx.s.en = prev_packet_enable;
    630	cvmx_write_csr(p->agl + AGL_GMX_PRT_CFG, agl_gmx_prtx.u64);
    631
    632	spin_unlock_irqrestore(&p->lock, flags);
    633}
    634
    635static int octeon_mgmt_set_mac_address(struct net_device *netdev, void *addr)
    636{
    637	int r = eth_mac_addr(netdev, addr);
    638
    639	if (r)
    640		return r;
    641
    642	octeon_mgmt_set_rx_filtering(netdev);
    643
    644	return 0;
    645}
    646
    647static int octeon_mgmt_change_mtu(struct net_device *netdev, int new_mtu)
    648{
    649	struct octeon_mgmt *p = netdev_priv(netdev);
    650	int max_packet = new_mtu + ETH_HLEN + ETH_FCS_LEN;
    651
    652	netdev->mtu = new_mtu;
    653
    654	/* HW lifts the limit if the frame is VLAN tagged
    655	 * (+4 bytes per each tag, up to two tags)
    656	 */
    657	cvmx_write_csr(p->agl + AGL_GMX_RX_FRM_MAX, max_packet);
    658	/* Set the hardware to truncate packets larger than the MTU. The jabber
    659	 * register must be set to a multiple of 8 bytes, so round up. JABBER is
    660	 * an unconditional limit, so we need to account for two possible VLAN
    661	 * tags.
    662	 */
    663	cvmx_write_csr(p->agl + AGL_GMX_RX_JABBER,
    664		       (max_packet + 7 + VLAN_HLEN * 2) & 0xfff8);
    665
    666	return 0;
    667}
    668
    669static irqreturn_t octeon_mgmt_interrupt(int cpl, void *dev_id)
    670{
    671	struct net_device *netdev = dev_id;
    672	struct octeon_mgmt *p = netdev_priv(netdev);
    673	union cvmx_mixx_isr mixx_isr;
    674
    675	mixx_isr.u64 = cvmx_read_csr(p->mix + MIX_ISR);
    676
    677	/* Clear any pending interrupts */
    678	cvmx_write_csr(p->mix + MIX_ISR, mixx_isr.u64);
    679	cvmx_read_csr(p->mix + MIX_ISR);
    680
    681	if (mixx_isr.s.irthresh) {
    682		octeon_mgmt_disable_rx_irq(p);
    683		napi_schedule(&p->napi);
    684	}
    685	if (mixx_isr.s.orthresh) {
    686		octeon_mgmt_disable_tx_irq(p);
    687		tasklet_schedule(&p->tx_clean_tasklet);
    688	}
    689
    690	return IRQ_HANDLED;
    691}
    692
    693static int octeon_mgmt_ioctl_hwtstamp(struct net_device *netdev,
    694				      struct ifreq *rq, int cmd)
    695{
    696	struct octeon_mgmt *p = netdev_priv(netdev);
    697	struct hwtstamp_config config;
    698	union cvmx_mio_ptp_clock_cfg ptp;
    699	union cvmx_agl_gmx_rxx_frm_ctl rxx_frm_ctl;
    700	bool have_hw_timestamps = false;
    701
    702	if (copy_from_user(&config, rq->ifr_data, sizeof(config)))
    703		return -EFAULT;
    704
    705	/* Check the status of hardware for tiemstamps */
    706	if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) {
    707		/* Get the current state of the PTP clock */
    708		ptp.u64 = cvmx_read_csr(CVMX_MIO_PTP_CLOCK_CFG);
    709		if (!ptp.s.ext_clk_en) {
    710			/* The clock has not been configured to use an
    711			 * external source.  Program it to use the main clock
    712			 * reference.
    713			 */
    714			u64 clock_comp = (NSEC_PER_SEC << 32) /	octeon_get_io_clock_rate();
    715			if (!ptp.s.ptp_en)
    716				cvmx_write_csr(CVMX_MIO_PTP_CLOCK_COMP, clock_comp);
    717			netdev_info(netdev,
    718				    "PTP Clock using sclk reference @ %lldHz\n",
    719				    (NSEC_PER_SEC << 32) / clock_comp);
    720		} else {
    721			/* The clock is already programmed to use a GPIO */
    722			u64 clock_comp = cvmx_read_csr(CVMX_MIO_PTP_CLOCK_COMP);
    723			netdev_info(netdev,
    724				    "PTP Clock using GPIO%d @ %lld Hz\n",
    725				    ptp.s.ext_clk_in, (NSEC_PER_SEC << 32) / clock_comp);
    726		}
    727
    728		/* Enable the clock if it wasn't done already */
    729		if (!ptp.s.ptp_en) {
    730			ptp.s.ptp_en = 1;
    731			cvmx_write_csr(CVMX_MIO_PTP_CLOCK_CFG, ptp.u64);
    732		}
    733		have_hw_timestamps = true;
    734	}
    735
    736	if (!have_hw_timestamps)
    737		return -EINVAL;
    738
    739	switch (config.tx_type) {
    740	case HWTSTAMP_TX_OFF:
    741	case HWTSTAMP_TX_ON:
    742		break;
    743	default:
    744		return -ERANGE;
    745	}
    746
    747	switch (config.rx_filter) {
    748	case HWTSTAMP_FILTER_NONE:
    749		p->has_rx_tstamp = false;
    750		rxx_frm_ctl.u64 = cvmx_read_csr(p->agl + AGL_GMX_RX_FRM_CTL);
    751		rxx_frm_ctl.s.ptp_mode = 0;
    752		cvmx_write_csr(p->agl + AGL_GMX_RX_FRM_CTL, rxx_frm_ctl.u64);
    753		break;
    754	case HWTSTAMP_FILTER_ALL:
    755	case HWTSTAMP_FILTER_SOME:
    756	case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
    757	case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
    758	case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
    759	case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
    760	case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
    761	case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
    762	case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
    763	case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
    764	case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
    765	case HWTSTAMP_FILTER_PTP_V2_EVENT:
    766	case HWTSTAMP_FILTER_PTP_V2_SYNC:
    767	case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
    768	case HWTSTAMP_FILTER_NTP_ALL:
    769		p->has_rx_tstamp = have_hw_timestamps;
    770		config.rx_filter = HWTSTAMP_FILTER_ALL;
    771		if (p->has_rx_tstamp) {
    772			rxx_frm_ctl.u64 = cvmx_read_csr(p->agl + AGL_GMX_RX_FRM_CTL);
    773			rxx_frm_ctl.s.ptp_mode = 1;
    774			cvmx_write_csr(p->agl + AGL_GMX_RX_FRM_CTL, rxx_frm_ctl.u64);
    775		}
    776		break;
    777	default:
    778		return -ERANGE;
    779	}
    780
    781	if (copy_to_user(rq->ifr_data, &config, sizeof(config)))
    782		return -EFAULT;
    783
    784	return 0;
    785}
    786
    787static int octeon_mgmt_ioctl(struct net_device *netdev,
    788			     struct ifreq *rq, int cmd)
    789{
    790	switch (cmd) {
    791	case SIOCSHWTSTAMP:
    792		return octeon_mgmt_ioctl_hwtstamp(netdev, rq, cmd);
    793	default:
    794		return phy_do_ioctl(netdev, rq, cmd);
    795	}
    796}
    797
    798static void octeon_mgmt_disable_link(struct octeon_mgmt *p)
    799{
    800	union cvmx_agl_gmx_prtx_cfg prtx_cfg;
    801
    802	/* Disable GMX before we make any changes. */
    803	prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG);
    804	prtx_cfg.s.en = 0;
    805	prtx_cfg.s.tx_en = 0;
    806	prtx_cfg.s.rx_en = 0;
    807	cvmx_write_csr(p->agl + AGL_GMX_PRT_CFG, prtx_cfg.u64);
    808
    809	if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) {
    810		int i;
    811		for (i = 0; i < 10; i++) {
    812			prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG);
    813			if (prtx_cfg.s.tx_idle == 1 || prtx_cfg.s.rx_idle == 1)
    814				break;
    815			mdelay(1);
    816			i++;
    817		}
    818	}
    819}
    820
    821static void octeon_mgmt_enable_link(struct octeon_mgmt *p)
    822{
    823	union cvmx_agl_gmx_prtx_cfg prtx_cfg;
    824
    825	/* Restore the GMX enable state only if link is set */
    826	prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG);
    827	prtx_cfg.s.tx_en = 1;
    828	prtx_cfg.s.rx_en = 1;
    829	prtx_cfg.s.en = 1;
    830	cvmx_write_csr(p->agl + AGL_GMX_PRT_CFG, prtx_cfg.u64);
    831}
    832
    833static void octeon_mgmt_update_link(struct octeon_mgmt *p)
    834{
    835	struct net_device *ndev = p->netdev;
    836	struct phy_device *phydev = ndev->phydev;
    837	union cvmx_agl_gmx_prtx_cfg prtx_cfg;
    838
    839	prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG);
    840
    841	if (!phydev->link)
    842		prtx_cfg.s.duplex = 1;
    843	else
    844		prtx_cfg.s.duplex = phydev->duplex;
    845
    846	switch (phydev->speed) {
    847	case 10:
    848		prtx_cfg.s.speed = 0;
    849		prtx_cfg.s.slottime = 0;
    850
    851		if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) {
    852			prtx_cfg.s.burst = 1;
    853			prtx_cfg.s.speed_msb = 1;
    854		}
    855		break;
    856	case 100:
    857		prtx_cfg.s.speed = 0;
    858		prtx_cfg.s.slottime = 0;
    859
    860		if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) {
    861			prtx_cfg.s.burst = 1;
    862			prtx_cfg.s.speed_msb = 0;
    863		}
    864		break;
    865	case 1000:
    866		/* 1000 MBits is only supported on 6XXX chips */
    867		if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) {
    868			prtx_cfg.s.speed = 1;
    869			prtx_cfg.s.speed_msb = 0;
    870			/* Only matters for half-duplex */
    871			prtx_cfg.s.slottime = 1;
    872			prtx_cfg.s.burst = phydev->duplex;
    873		}
    874		break;
    875	case 0:  /* No link */
    876	default:
    877		break;
    878	}
    879
    880	/* Write the new GMX setting with the port still disabled. */
    881	cvmx_write_csr(p->agl + AGL_GMX_PRT_CFG, prtx_cfg.u64);
    882
    883	/* Read GMX CFG again to make sure the config is completed. */
    884	prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG);
    885
    886	if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) {
    887		union cvmx_agl_gmx_txx_clk agl_clk;
    888		union cvmx_agl_prtx_ctl prtx_ctl;
    889
    890		prtx_ctl.u64 = cvmx_read_csr(p->agl_prt_ctl);
    891		agl_clk.u64 = cvmx_read_csr(p->agl + AGL_GMX_TX_CLK);
    892		/* MII (both speeds) and RGMII 1000 speed. */
    893		agl_clk.s.clk_cnt = 1;
    894		if (prtx_ctl.s.mode == 0) { /* RGMII mode */
    895			if (phydev->speed == 10)
    896				agl_clk.s.clk_cnt = 50;
    897			else if (phydev->speed == 100)
    898				agl_clk.s.clk_cnt = 5;
    899		}
    900		cvmx_write_csr(p->agl + AGL_GMX_TX_CLK, agl_clk.u64);
    901	}
    902}
    903
    904static void octeon_mgmt_adjust_link(struct net_device *netdev)
    905{
    906	struct octeon_mgmt *p = netdev_priv(netdev);
    907	struct phy_device *phydev = netdev->phydev;
    908	unsigned long flags;
    909	int link_changed = 0;
    910
    911	if (!phydev)
    912		return;
    913
    914	spin_lock_irqsave(&p->lock, flags);
    915
    916
    917	if (!phydev->link && p->last_link)
    918		link_changed = -1;
    919
    920	if (phydev->link &&
    921	    (p->last_duplex != phydev->duplex ||
    922	     p->last_link != phydev->link ||
    923	     p->last_speed != phydev->speed)) {
    924		octeon_mgmt_disable_link(p);
    925		link_changed = 1;
    926		octeon_mgmt_update_link(p);
    927		octeon_mgmt_enable_link(p);
    928	}
    929
    930	p->last_link = phydev->link;
    931	p->last_speed = phydev->speed;
    932	p->last_duplex = phydev->duplex;
    933
    934	spin_unlock_irqrestore(&p->lock, flags);
    935
    936	if (link_changed != 0) {
    937		if (link_changed > 0)
    938			netdev_info(netdev, "Link is up - %d/%s\n",
    939				    phydev->speed, phydev->duplex == DUPLEX_FULL ? "Full" : "Half");
    940		else
    941			netdev_info(netdev, "Link is down\n");
    942	}
    943}
    944
    945static int octeon_mgmt_init_phy(struct net_device *netdev)
    946{
    947	struct octeon_mgmt *p = netdev_priv(netdev);
    948	struct phy_device *phydev = NULL;
    949
    950	if (octeon_is_simulation() || p->phy_np == NULL) {
    951		/* No PHYs in the simulator. */
    952		netif_carrier_on(netdev);
    953		return 0;
    954	}
    955
    956	phydev = of_phy_connect(netdev, p->phy_np,
    957				octeon_mgmt_adjust_link, 0,
    958				PHY_INTERFACE_MODE_MII);
    959
    960	if (!phydev)
    961		return -EPROBE_DEFER;
    962
    963	return 0;
    964}
    965
    966static int octeon_mgmt_open(struct net_device *netdev)
    967{
    968	struct octeon_mgmt *p = netdev_priv(netdev);
    969	union cvmx_mixx_ctl mix_ctl;
    970	union cvmx_agl_gmx_inf_mode agl_gmx_inf_mode;
    971	union cvmx_mixx_oring1 oring1;
    972	union cvmx_mixx_iring1 iring1;
    973	union cvmx_agl_gmx_rxx_frm_ctl rxx_frm_ctl;
    974	union cvmx_mixx_irhwm mix_irhwm;
    975	union cvmx_mixx_orhwm mix_orhwm;
    976	union cvmx_mixx_intena mix_intena;
    977	struct sockaddr sa;
    978
    979	/* Allocate ring buffers.  */
    980	p->tx_ring = kzalloc(ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE),
    981			     GFP_KERNEL);
    982	if (!p->tx_ring)
    983		return -ENOMEM;
    984	p->tx_ring_handle =
    985		dma_map_single(p->dev, p->tx_ring,
    986			       ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE),
    987			       DMA_BIDIRECTIONAL);
    988	p->tx_next = 0;
    989	p->tx_next_clean = 0;
    990	p->tx_current_fill = 0;
    991
    992
    993	p->rx_ring = kzalloc(ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE),
    994			     GFP_KERNEL);
    995	if (!p->rx_ring)
    996		goto err_nomem;
    997	p->rx_ring_handle =
    998		dma_map_single(p->dev, p->rx_ring,
    999			       ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE),
   1000			       DMA_BIDIRECTIONAL);
   1001
   1002	p->rx_next = 0;
   1003	p->rx_next_fill = 0;
   1004	p->rx_current_fill = 0;
   1005
   1006	octeon_mgmt_reset_hw(p);
   1007
   1008	mix_ctl.u64 = cvmx_read_csr(p->mix + MIX_CTL);
   1009
   1010	/* Bring it out of reset if needed. */
   1011	if (mix_ctl.s.reset) {
   1012		mix_ctl.s.reset = 0;
   1013		cvmx_write_csr(p->mix + MIX_CTL, mix_ctl.u64);
   1014		do {
   1015			mix_ctl.u64 = cvmx_read_csr(p->mix + MIX_CTL);
   1016		} while (mix_ctl.s.reset);
   1017	}
   1018
   1019	if (OCTEON_IS_MODEL(OCTEON_CN5XXX)) {
   1020		agl_gmx_inf_mode.u64 = 0;
   1021		agl_gmx_inf_mode.s.en = 1;
   1022		cvmx_write_csr(CVMX_AGL_GMX_INF_MODE, agl_gmx_inf_mode.u64);
   1023	}
   1024	if (OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_X)
   1025		|| OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_X)) {
   1026		/* Force compensation values, as they are not
   1027		 * determined properly by HW
   1028		 */
   1029		union cvmx_agl_gmx_drv_ctl drv_ctl;
   1030
   1031		drv_ctl.u64 = cvmx_read_csr(CVMX_AGL_GMX_DRV_CTL);
   1032		if (p->port) {
   1033			drv_ctl.s.byp_en1 = 1;
   1034			drv_ctl.s.nctl1 = 6;
   1035			drv_ctl.s.pctl1 = 6;
   1036		} else {
   1037			drv_ctl.s.byp_en = 1;
   1038			drv_ctl.s.nctl = 6;
   1039			drv_ctl.s.pctl = 6;
   1040		}
   1041		cvmx_write_csr(CVMX_AGL_GMX_DRV_CTL, drv_ctl.u64);
   1042	}
   1043
   1044	oring1.u64 = 0;
   1045	oring1.s.obase = p->tx_ring_handle >> 3;
   1046	oring1.s.osize = OCTEON_MGMT_TX_RING_SIZE;
   1047	cvmx_write_csr(p->mix + MIX_ORING1, oring1.u64);
   1048
   1049	iring1.u64 = 0;
   1050	iring1.s.ibase = p->rx_ring_handle >> 3;
   1051	iring1.s.isize = OCTEON_MGMT_RX_RING_SIZE;
   1052	cvmx_write_csr(p->mix + MIX_IRING1, iring1.u64);
   1053
   1054	memcpy(sa.sa_data, netdev->dev_addr, ETH_ALEN);
   1055	octeon_mgmt_set_mac_address(netdev, &sa);
   1056
   1057	octeon_mgmt_change_mtu(netdev, netdev->mtu);
   1058
   1059	/* Enable the port HW. Packets are not allowed until
   1060	 * cvmx_mgmt_port_enable() is called.
   1061	 */
   1062	mix_ctl.u64 = 0;
   1063	mix_ctl.s.crc_strip = 1;    /* Strip the ending CRC */
   1064	mix_ctl.s.en = 1;           /* Enable the port */
   1065	mix_ctl.s.nbtarb = 0;       /* Arbitration mode */
   1066	/* MII CB-request FIFO programmable high watermark */
   1067	mix_ctl.s.mrq_hwm = 1;
   1068#ifdef __LITTLE_ENDIAN
   1069	mix_ctl.s.lendian = 1;
   1070#endif
   1071	cvmx_write_csr(p->mix + MIX_CTL, mix_ctl.u64);
   1072
   1073	/* Read the PHY to find the mode of the interface. */
   1074	if (octeon_mgmt_init_phy(netdev)) {
   1075		dev_err(p->dev, "Cannot initialize PHY on MIX%d.\n", p->port);
   1076		goto err_noirq;
   1077	}
   1078
   1079	/* Set the mode of the interface, RGMII/MII. */
   1080	if (OCTEON_IS_MODEL(OCTEON_CN6XXX) && netdev->phydev) {
   1081		union cvmx_agl_prtx_ctl agl_prtx_ctl;
   1082		int rgmii_mode =
   1083			(linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT,
   1084					   netdev->phydev->supported) |
   1085			 linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
   1086					   netdev->phydev->supported)) != 0;
   1087
   1088		agl_prtx_ctl.u64 = cvmx_read_csr(p->agl_prt_ctl);
   1089		agl_prtx_ctl.s.mode = rgmii_mode ? 0 : 1;
   1090		cvmx_write_csr(p->agl_prt_ctl,	agl_prtx_ctl.u64);
   1091
   1092		/* MII clocks counts are based on the 125Mhz
   1093		 * reference, which has an 8nS period. So our delays
   1094		 * need to be multiplied by this factor.
   1095		 */
   1096#define NS_PER_PHY_CLK 8
   1097
   1098		/* Take the DLL and clock tree out of reset */
   1099		agl_prtx_ctl.u64 = cvmx_read_csr(p->agl_prt_ctl);
   1100		agl_prtx_ctl.s.clkrst = 0;
   1101		if (rgmii_mode) {
   1102			agl_prtx_ctl.s.dllrst = 0;
   1103			agl_prtx_ctl.s.clktx_byp = 0;
   1104		}
   1105		cvmx_write_csr(p->agl_prt_ctl,	agl_prtx_ctl.u64);
   1106		cvmx_read_csr(p->agl_prt_ctl); /* Force write out before wait */
   1107
   1108		/* Wait for the DLL to lock. External 125 MHz
   1109		 * reference clock must be stable at this point.
   1110		 */
   1111		ndelay(256 * NS_PER_PHY_CLK);
   1112
   1113		/* Enable the interface */
   1114		agl_prtx_ctl.u64 = cvmx_read_csr(p->agl_prt_ctl);
   1115		agl_prtx_ctl.s.enable = 1;
   1116		cvmx_write_csr(p->agl_prt_ctl, agl_prtx_ctl.u64);
   1117
   1118		/* Read the value back to force the previous write */
   1119		agl_prtx_ctl.u64 = cvmx_read_csr(p->agl_prt_ctl);
   1120
   1121		/* Enable the compensation controller */
   1122		agl_prtx_ctl.s.comp = 1;
   1123		agl_prtx_ctl.s.drv_byp = 0;
   1124		cvmx_write_csr(p->agl_prt_ctl,	agl_prtx_ctl.u64);
   1125		/* Force write out before wait. */
   1126		cvmx_read_csr(p->agl_prt_ctl);
   1127
   1128		/* For compensation state to lock. */
   1129		ndelay(1040 * NS_PER_PHY_CLK);
   1130
   1131		/* Default Interframe Gaps are too small.  Recommended
   1132		 * workaround is.
   1133		 *
   1134		 * AGL_GMX_TX_IFG[IFG1]=14
   1135		 * AGL_GMX_TX_IFG[IFG2]=10
   1136		 */
   1137		cvmx_write_csr(CVMX_AGL_GMX_TX_IFG, 0xae);
   1138	}
   1139
   1140	octeon_mgmt_rx_fill_ring(netdev);
   1141
   1142	/* Clear statistics. */
   1143	/* Clear on read. */
   1144	cvmx_write_csr(p->agl + AGL_GMX_RX_STATS_CTL, 1);
   1145	cvmx_write_csr(p->agl + AGL_GMX_RX_STATS_PKTS_DRP, 0);
   1146	cvmx_write_csr(p->agl + AGL_GMX_RX_STATS_PKTS_BAD, 0);
   1147
   1148	cvmx_write_csr(p->agl + AGL_GMX_TX_STATS_CTL, 1);
   1149	cvmx_write_csr(p->agl + AGL_GMX_TX_STAT0, 0);
   1150	cvmx_write_csr(p->agl + AGL_GMX_TX_STAT1, 0);
   1151
   1152	/* Clear any pending interrupts */
   1153	cvmx_write_csr(p->mix + MIX_ISR, cvmx_read_csr(p->mix + MIX_ISR));
   1154
   1155	if (request_irq(p->irq, octeon_mgmt_interrupt, 0, netdev->name,
   1156			netdev)) {
   1157		dev_err(p->dev, "request_irq(%d) failed.\n", p->irq);
   1158		goto err_noirq;
   1159	}
   1160
   1161	/* Interrupt every single RX packet */
   1162	mix_irhwm.u64 = 0;
   1163	mix_irhwm.s.irhwm = 0;
   1164	cvmx_write_csr(p->mix + MIX_IRHWM, mix_irhwm.u64);
   1165
   1166	/* Interrupt when we have 1 or more packets to clean.  */
   1167	mix_orhwm.u64 = 0;
   1168	mix_orhwm.s.orhwm = 0;
   1169	cvmx_write_csr(p->mix + MIX_ORHWM, mix_orhwm.u64);
   1170
   1171	/* Enable receive and transmit interrupts */
   1172	mix_intena.u64 = 0;
   1173	mix_intena.s.ithena = 1;
   1174	mix_intena.s.othena = 1;
   1175	cvmx_write_csr(p->mix + MIX_INTENA, mix_intena.u64);
   1176
   1177	/* Enable packet I/O. */
   1178
   1179	rxx_frm_ctl.u64 = 0;
   1180	rxx_frm_ctl.s.ptp_mode = p->has_rx_tstamp ? 1 : 0;
   1181	rxx_frm_ctl.s.pre_align = 1;
   1182	/* When set, disables the length check for non-min sized pkts
   1183	 * with padding in the client data.
   1184	 */
   1185	rxx_frm_ctl.s.pad_len = 1;
   1186	/* When set, disables the length check for VLAN pkts */
   1187	rxx_frm_ctl.s.vlan_len = 1;
   1188	/* When set, PREAMBLE checking is  less strict */
   1189	rxx_frm_ctl.s.pre_free = 1;
   1190	/* Control Pause Frames can match station SMAC */
   1191	rxx_frm_ctl.s.ctl_smac = 0;
   1192	/* Control Pause Frames can match globally assign Multicast address */
   1193	rxx_frm_ctl.s.ctl_mcst = 1;
   1194	/* Forward pause information to TX block */
   1195	rxx_frm_ctl.s.ctl_bck = 1;
   1196	/* Drop Control Pause Frames */
   1197	rxx_frm_ctl.s.ctl_drp = 1;
   1198	/* Strip off the preamble */
   1199	rxx_frm_ctl.s.pre_strp = 1;
   1200	/* This port is configured to send PREAMBLE+SFD to begin every
   1201	 * frame.  GMX checks that the PREAMBLE is sent correctly.
   1202	 */
   1203	rxx_frm_ctl.s.pre_chk = 1;
   1204	cvmx_write_csr(p->agl + AGL_GMX_RX_FRM_CTL, rxx_frm_ctl.u64);
   1205
   1206	/* Configure the port duplex, speed and enables */
   1207	octeon_mgmt_disable_link(p);
   1208	if (netdev->phydev)
   1209		octeon_mgmt_update_link(p);
   1210	octeon_mgmt_enable_link(p);
   1211
   1212	p->last_link = 0;
   1213	p->last_speed = 0;
   1214	/* PHY is not present in simulator. The carrier is enabled
   1215	 * while initializing the phy for simulator, leave it enabled.
   1216	 */
   1217	if (netdev->phydev) {
   1218		netif_carrier_off(netdev);
   1219		phy_start(netdev->phydev);
   1220	}
   1221
   1222	netif_wake_queue(netdev);
   1223	napi_enable(&p->napi);
   1224
   1225	return 0;
   1226err_noirq:
   1227	octeon_mgmt_reset_hw(p);
   1228	dma_unmap_single(p->dev, p->rx_ring_handle,
   1229			 ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE),
   1230			 DMA_BIDIRECTIONAL);
   1231	kfree(p->rx_ring);
   1232err_nomem:
   1233	dma_unmap_single(p->dev, p->tx_ring_handle,
   1234			 ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE),
   1235			 DMA_BIDIRECTIONAL);
   1236	kfree(p->tx_ring);
   1237	return -ENOMEM;
   1238}
   1239
   1240static int octeon_mgmt_stop(struct net_device *netdev)
   1241{
   1242	struct octeon_mgmt *p = netdev_priv(netdev);
   1243
   1244	napi_disable(&p->napi);
   1245	netif_stop_queue(netdev);
   1246
   1247	if (netdev->phydev) {
   1248		phy_stop(netdev->phydev);
   1249		phy_disconnect(netdev->phydev);
   1250	}
   1251
   1252	netif_carrier_off(netdev);
   1253
   1254	octeon_mgmt_reset_hw(p);
   1255
   1256	free_irq(p->irq, netdev);
   1257
   1258	/* dma_unmap is a nop on Octeon, so just free everything.  */
   1259	skb_queue_purge(&p->tx_list);
   1260	skb_queue_purge(&p->rx_list);
   1261
   1262	dma_unmap_single(p->dev, p->rx_ring_handle,
   1263			 ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE),
   1264			 DMA_BIDIRECTIONAL);
   1265	kfree(p->rx_ring);
   1266
   1267	dma_unmap_single(p->dev, p->tx_ring_handle,
   1268			 ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE),
   1269			 DMA_BIDIRECTIONAL);
   1270	kfree(p->tx_ring);
   1271
   1272	return 0;
   1273}
   1274
   1275static netdev_tx_t
   1276octeon_mgmt_xmit(struct sk_buff *skb, struct net_device *netdev)
   1277{
   1278	struct octeon_mgmt *p = netdev_priv(netdev);
   1279	union mgmt_port_ring_entry re;
   1280	unsigned long flags;
   1281	netdev_tx_t rv = NETDEV_TX_BUSY;
   1282
   1283	re.d64 = 0;
   1284	re.s.tstamp = ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) != 0);
   1285	re.s.len = skb->len;
   1286	re.s.addr = dma_map_single(p->dev, skb->data,
   1287				   skb->len,
   1288				   DMA_TO_DEVICE);
   1289
   1290	spin_lock_irqsave(&p->tx_list.lock, flags);
   1291
   1292	if (unlikely(p->tx_current_fill >= ring_max_fill(OCTEON_MGMT_TX_RING_SIZE) - 1)) {
   1293		spin_unlock_irqrestore(&p->tx_list.lock, flags);
   1294		netif_stop_queue(netdev);
   1295		spin_lock_irqsave(&p->tx_list.lock, flags);
   1296	}
   1297
   1298	if (unlikely(p->tx_current_fill >=
   1299		     ring_max_fill(OCTEON_MGMT_TX_RING_SIZE))) {
   1300		spin_unlock_irqrestore(&p->tx_list.lock, flags);
   1301		dma_unmap_single(p->dev, re.s.addr, re.s.len,
   1302				 DMA_TO_DEVICE);
   1303		goto out;
   1304	}
   1305
   1306	__skb_queue_tail(&p->tx_list, skb);
   1307
   1308	/* Put it in the ring.  */
   1309	p->tx_ring[p->tx_next] = re.d64;
   1310	p->tx_next = (p->tx_next + 1) % OCTEON_MGMT_TX_RING_SIZE;
   1311	p->tx_current_fill++;
   1312
   1313	spin_unlock_irqrestore(&p->tx_list.lock, flags);
   1314
   1315	dma_sync_single_for_device(p->dev, p->tx_ring_handle,
   1316				   ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE),
   1317				   DMA_BIDIRECTIONAL);
   1318
   1319	netdev->stats.tx_packets++;
   1320	netdev->stats.tx_bytes += skb->len;
   1321
   1322	/* Ring the bell.  */
   1323	cvmx_write_csr(p->mix + MIX_ORING2, 1);
   1324
   1325	netif_trans_update(netdev);
   1326	rv = NETDEV_TX_OK;
   1327out:
   1328	octeon_mgmt_update_tx_stats(netdev);
   1329	return rv;
   1330}
   1331
   1332#ifdef CONFIG_NET_POLL_CONTROLLER
   1333static void octeon_mgmt_poll_controller(struct net_device *netdev)
   1334{
   1335	struct octeon_mgmt *p = netdev_priv(netdev);
   1336
   1337	octeon_mgmt_receive_packets(p, 16);
   1338	octeon_mgmt_update_rx_stats(netdev);
   1339}
   1340#endif
   1341
   1342static void octeon_mgmt_get_drvinfo(struct net_device *netdev,
   1343				    struct ethtool_drvinfo *info)
   1344{
   1345	strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
   1346}
   1347
   1348static int octeon_mgmt_nway_reset(struct net_device *dev)
   1349{
   1350	if (!capable(CAP_NET_ADMIN))
   1351		return -EPERM;
   1352
   1353	if (dev->phydev)
   1354		return phy_start_aneg(dev->phydev);
   1355
   1356	return -EOPNOTSUPP;
   1357}
   1358
   1359static const struct ethtool_ops octeon_mgmt_ethtool_ops = {
   1360	.get_drvinfo = octeon_mgmt_get_drvinfo,
   1361	.nway_reset = octeon_mgmt_nway_reset,
   1362	.get_link = ethtool_op_get_link,
   1363	.get_link_ksettings = phy_ethtool_get_link_ksettings,
   1364	.set_link_ksettings = phy_ethtool_set_link_ksettings,
   1365};
   1366
   1367static const struct net_device_ops octeon_mgmt_ops = {
   1368	.ndo_open =			octeon_mgmt_open,
   1369	.ndo_stop =			octeon_mgmt_stop,
   1370	.ndo_start_xmit =		octeon_mgmt_xmit,
   1371	.ndo_set_rx_mode =		octeon_mgmt_set_rx_filtering,
   1372	.ndo_set_mac_address =		octeon_mgmt_set_mac_address,
   1373	.ndo_eth_ioctl =			octeon_mgmt_ioctl,
   1374	.ndo_change_mtu =		octeon_mgmt_change_mtu,
   1375#ifdef CONFIG_NET_POLL_CONTROLLER
   1376	.ndo_poll_controller =		octeon_mgmt_poll_controller,
   1377#endif
   1378};
   1379
   1380static int octeon_mgmt_probe(struct platform_device *pdev)
   1381{
   1382	struct net_device *netdev;
   1383	struct octeon_mgmt *p;
   1384	const __be32 *data;
   1385	struct resource *res_mix;
   1386	struct resource *res_agl;
   1387	struct resource *res_agl_prt_ctl;
   1388	int len;
   1389	int result;
   1390
   1391	netdev = alloc_etherdev(sizeof(struct octeon_mgmt));
   1392	if (netdev == NULL)
   1393		return -ENOMEM;
   1394
   1395	SET_NETDEV_DEV(netdev, &pdev->dev);
   1396
   1397	platform_set_drvdata(pdev, netdev);
   1398	p = netdev_priv(netdev);
   1399	netif_napi_add(netdev, &p->napi, octeon_mgmt_napi_poll,
   1400		       OCTEON_MGMT_NAPI_WEIGHT);
   1401
   1402	p->netdev = netdev;
   1403	p->dev = &pdev->dev;
   1404	p->has_rx_tstamp = false;
   1405
   1406	data = of_get_property(pdev->dev.of_node, "cell-index", &len);
   1407	if (data && len == sizeof(*data)) {
   1408		p->port = be32_to_cpup(data);
   1409	} else {
   1410		dev_err(&pdev->dev, "no 'cell-index' property\n");
   1411		result = -ENXIO;
   1412		goto err;
   1413	}
   1414
   1415	snprintf(netdev->name, IFNAMSIZ, "mgmt%d", p->port);
   1416
   1417	result = platform_get_irq(pdev, 0);
   1418	if (result < 0)
   1419		goto err;
   1420
   1421	p->irq = result;
   1422
   1423	res_mix = platform_get_resource(pdev, IORESOURCE_MEM, 0);
   1424	if (res_mix == NULL) {
   1425		dev_err(&pdev->dev, "no 'reg' resource\n");
   1426		result = -ENXIO;
   1427		goto err;
   1428	}
   1429
   1430	res_agl = platform_get_resource(pdev, IORESOURCE_MEM, 1);
   1431	if (res_agl == NULL) {
   1432		dev_err(&pdev->dev, "no 'reg' resource\n");
   1433		result = -ENXIO;
   1434		goto err;
   1435	}
   1436
   1437	res_agl_prt_ctl = platform_get_resource(pdev, IORESOURCE_MEM, 3);
   1438	if (res_agl_prt_ctl == NULL) {
   1439		dev_err(&pdev->dev, "no 'reg' resource\n");
   1440		result = -ENXIO;
   1441		goto err;
   1442	}
   1443
   1444	p->mix_phys = res_mix->start;
   1445	p->mix_size = resource_size(res_mix);
   1446	p->agl_phys = res_agl->start;
   1447	p->agl_size = resource_size(res_agl);
   1448	p->agl_prt_ctl_phys = res_agl_prt_ctl->start;
   1449	p->agl_prt_ctl_size = resource_size(res_agl_prt_ctl);
   1450
   1451
   1452	if (!devm_request_mem_region(&pdev->dev, p->mix_phys, p->mix_size,
   1453				     res_mix->name)) {
   1454		dev_err(&pdev->dev, "request_mem_region (%s) failed\n",
   1455			res_mix->name);
   1456		result = -ENXIO;
   1457		goto err;
   1458	}
   1459
   1460	if (!devm_request_mem_region(&pdev->dev, p->agl_phys, p->agl_size,
   1461				     res_agl->name)) {
   1462		result = -ENXIO;
   1463		dev_err(&pdev->dev, "request_mem_region (%s) failed\n",
   1464			res_agl->name);
   1465		goto err;
   1466	}
   1467
   1468	if (!devm_request_mem_region(&pdev->dev, p->agl_prt_ctl_phys,
   1469				     p->agl_prt_ctl_size, res_agl_prt_ctl->name)) {
   1470		result = -ENXIO;
   1471		dev_err(&pdev->dev, "request_mem_region (%s) failed\n",
   1472			res_agl_prt_ctl->name);
   1473		goto err;
   1474	}
   1475
   1476	p->mix = (u64)devm_ioremap(&pdev->dev, p->mix_phys, p->mix_size);
   1477	p->agl = (u64)devm_ioremap(&pdev->dev, p->agl_phys, p->agl_size);
   1478	p->agl_prt_ctl = (u64)devm_ioremap(&pdev->dev, p->agl_prt_ctl_phys,
   1479					   p->agl_prt_ctl_size);
   1480	if (!p->mix || !p->agl || !p->agl_prt_ctl) {
   1481		dev_err(&pdev->dev, "failed to map I/O memory\n");
   1482		result = -ENOMEM;
   1483		goto err;
   1484	}
   1485
   1486	spin_lock_init(&p->lock);
   1487
   1488	skb_queue_head_init(&p->tx_list);
   1489	skb_queue_head_init(&p->rx_list);
   1490	tasklet_setup(&p->tx_clean_tasklet,
   1491		      octeon_mgmt_clean_tx_tasklet);
   1492
   1493	netdev->priv_flags |= IFF_UNICAST_FLT;
   1494
   1495	netdev->netdev_ops = &octeon_mgmt_ops;
   1496	netdev->ethtool_ops = &octeon_mgmt_ethtool_ops;
   1497
   1498	netdev->min_mtu = 64 - OCTEON_MGMT_RX_HEADROOM;
   1499	netdev->max_mtu = 16383 - OCTEON_MGMT_RX_HEADROOM - VLAN_HLEN;
   1500
   1501	result = of_get_ethdev_address(pdev->dev.of_node, netdev);
   1502	if (result)
   1503		eth_hw_addr_random(netdev);
   1504
   1505	p->phy_np = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0);
   1506
   1507	result = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
   1508	if (result)
   1509		goto err;
   1510
   1511	netif_carrier_off(netdev);
   1512	result = register_netdev(netdev);
   1513	if (result)
   1514		goto err;
   1515
   1516	return 0;
   1517
   1518err:
   1519	of_node_put(p->phy_np);
   1520	free_netdev(netdev);
   1521	return result;
   1522}
   1523
   1524static int octeon_mgmt_remove(struct platform_device *pdev)
   1525{
   1526	struct net_device *netdev = platform_get_drvdata(pdev);
   1527	struct octeon_mgmt *p = netdev_priv(netdev);
   1528
   1529	unregister_netdev(netdev);
   1530	of_node_put(p->phy_np);
   1531	free_netdev(netdev);
   1532	return 0;
   1533}
   1534
   1535static const struct of_device_id octeon_mgmt_match[] = {
   1536	{
   1537		.compatible = "cavium,octeon-5750-mix",
   1538	},
   1539	{},
   1540};
   1541MODULE_DEVICE_TABLE(of, octeon_mgmt_match);
   1542
   1543static struct platform_driver octeon_mgmt_driver = {
   1544	.driver = {
   1545		.name		= "octeon_mgmt",
   1546		.of_match_table = octeon_mgmt_match,
   1547	},
   1548	.probe		= octeon_mgmt_probe,
   1549	.remove		= octeon_mgmt_remove,
   1550};
   1551
   1552module_platform_driver(octeon_mgmt_driver);
   1553
   1554MODULE_SOFTDEP("pre: mdio-cavium");
   1555MODULE_DESCRIPTION(DRV_DESCRIPTION);
   1556MODULE_AUTHOR("David Daney");
   1557MODULE_LICENSE("GPL");