cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

xgene_enet_main.c (53210B)


      1// SPDX-License-Identifier: GPL-2.0-or-later
      2/* Applied Micro X-Gene SoC Ethernet Driver
      3 *
      4 * Copyright (c) 2014, Applied Micro Circuits Corporation
      5 * Authors: Iyappan Subramanian <isubramanian@apm.com>
      6 *	    Ravi Patel <rapatel@apm.com>
      7 *	    Keyur Chudgar <kchudgar@apm.com>
      8 */
      9
     10#include <linux/gpio.h>
     11#include "xgene_enet_main.h"
     12#include "xgene_enet_hw.h"
     13#include "xgene_enet_sgmac.h"
     14#include "xgene_enet_xgmac.h"
     15
     16#define RES_ENET_CSR	0
     17#define RES_RING_CSR	1
     18#define RES_RING_CMD	2
     19
     20static void xgene_enet_init_bufpool(struct xgene_enet_desc_ring *buf_pool)
     21{
     22	struct xgene_enet_raw_desc16 *raw_desc;
     23	int i;
     24
     25	if (!buf_pool)
     26		return;
     27
     28	for (i = 0; i < buf_pool->slots; i++) {
     29		raw_desc = &buf_pool->raw_desc16[i];
     30
     31		/* Hardware expects descriptor in little endian format */
     32		raw_desc->m0 = cpu_to_le64(i |
     33				SET_VAL(FPQNUM, buf_pool->dst_ring_num) |
     34				SET_VAL(STASH, 3));
     35	}
     36}
     37
     38static u16 xgene_enet_get_data_len(u64 bufdatalen)
     39{
     40	u16 hw_len, mask;
     41
     42	hw_len = GET_VAL(BUFDATALEN, bufdatalen);
     43
     44	if (unlikely(hw_len == 0x7800)) {
     45		return 0;
     46	} else if (!(hw_len & BIT(14))) {
     47		mask = GENMASK(13, 0);
     48		return (hw_len & mask) ? (hw_len & mask) : SIZE_16K;
     49	} else if (!(hw_len & GENMASK(13, 12))) {
     50		mask = GENMASK(11, 0);
     51		return (hw_len & mask) ? (hw_len & mask) : SIZE_4K;
     52	} else {
     53		mask = GENMASK(11, 0);
     54		return (hw_len & mask) ? (hw_len & mask) : SIZE_2K;
     55	}
     56}
     57
     58static u16 xgene_enet_set_data_len(u32 size)
     59{
     60	u16 hw_len;
     61
     62	hw_len =  (size == SIZE_4K) ? BIT(14) : 0;
     63
     64	return hw_len;
     65}
     66
     67static int xgene_enet_refill_pagepool(struct xgene_enet_desc_ring *buf_pool,
     68				      u32 nbuf)
     69{
     70	struct xgene_enet_raw_desc16 *raw_desc;
     71	struct xgene_enet_pdata *pdata;
     72	struct net_device *ndev;
     73	dma_addr_t dma_addr;
     74	struct device *dev;
     75	struct page *page;
     76	u32 slots, tail;
     77	u16 hw_len;
     78	int i;
     79
     80	if (unlikely(!buf_pool))
     81		return 0;
     82
     83	ndev = buf_pool->ndev;
     84	pdata = netdev_priv(ndev);
     85	dev = ndev_to_dev(ndev);
     86	slots = buf_pool->slots - 1;
     87	tail = buf_pool->tail;
     88
     89	for (i = 0; i < nbuf; i++) {
     90		raw_desc = &buf_pool->raw_desc16[tail];
     91
     92		page = dev_alloc_page();
     93		if (unlikely(!page))
     94			return -ENOMEM;
     95
     96		dma_addr = dma_map_page(dev, page, 0,
     97					PAGE_SIZE, DMA_FROM_DEVICE);
     98		if (unlikely(dma_mapping_error(dev, dma_addr))) {
     99			put_page(page);
    100			return -ENOMEM;
    101		}
    102
    103		hw_len = xgene_enet_set_data_len(PAGE_SIZE);
    104		raw_desc->m1 = cpu_to_le64(SET_VAL(DATAADDR, dma_addr) |
    105					   SET_VAL(BUFDATALEN, hw_len) |
    106					   SET_BIT(COHERENT));
    107
    108		buf_pool->frag_page[tail] = page;
    109		tail = (tail + 1) & slots;
    110	}
    111
    112	pdata->ring_ops->wr_cmd(buf_pool, nbuf);
    113	buf_pool->tail = tail;
    114
    115	return 0;
    116}
    117
    118static int xgene_enet_refill_bufpool(struct xgene_enet_desc_ring *buf_pool,
    119				     u32 nbuf)
    120{
    121	struct sk_buff *skb;
    122	struct xgene_enet_raw_desc16 *raw_desc;
    123	struct xgene_enet_pdata *pdata;
    124	struct net_device *ndev;
    125	struct device *dev;
    126	dma_addr_t dma_addr;
    127	u32 tail = buf_pool->tail;
    128	u32 slots = buf_pool->slots - 1;
    129	u16 bufdatalen, len;
    130	int i;
    131
    132	ndev = buf_pool->ndev;
    133	dev = ndev_to_dev(buf_pool->ndev);
    134	pdata = netdev_priv(ndev);
    135
    136	bufdatalen = BUF_LEN_CODE_2K | (SKB_BUFFER_SIZE & GENMASK(11, 0));
    137	len = XGENE_ENET_STD_MTU;
    138
    139	for (i = 0; i < nbuf; i++) {
    140		raw_desc = &buf_pool->raw_desc16[tail];
    141
    142		skb = netdev_alloc_skb_ip_align(ndev, len);
    143		if (unlikely(!skb))
    144			return -ENOMEM;
    145
    146		dma_addr = dma_map_single(dev, skb->data, len, DMA_FROM_DEVICE);
    147		if (dma_mapping_error(dev, dma_addr)) {
    148			netdev_err(ndev, "DMA mapping error\n");
    149			dev_kfree_skb_any(skb);
    150			return -EINVAL;
    151		}
    152
    153		buf_pool->rx_skb[tail] = skb;
    154
    155		raw_desc->m1 = cpu_to_le64(SET_VAL(DATAADDR, dma_addr) |
    156					   SET_VAL(BUFDATALEN, bufdatalen) |
    157					   SET_BIT(COHERENT));
    158		tail = (tail + 1) & slots;
    159	}
    160
    161	pdata->ring_ops->wr_cmd(buf_pool, nbuf);
    162	buf_pool->tail = tail;
    163
    164	return 0;
    165}
    166
    167static u8 xgene_enet_hdr_len(const void *data)
    168{
    169	const struct ethhdr *eth = data;
    170
    171	return (eth->h_proto == htons(ETH_P_8021Q)) ? VLAN_ETH_HLEN : ETH_HLEN;
    172}
    173
    174static void xgene_enet_delete_bufpool(struct xgene_enet_desc_ring *buf_pool)
    175{
    176	struct device *dev = ndev_to_dev(buf_pool->ndev);
    177	struct xgene_enet_raw_desc16 *raw_desc;
    178	dma_addr_t dma_addr;
    179	int i;
    180
    181	/* Free up the buffers held by hardware */
    182	for (i = 0; i < buf_pool->slots; i++) {
    183		if (buf_pool->rx_skb[i]) {
    184			dev_kfree_skb_any(buf_pool->rx_skb[i]);
    185
    186			raw_desc = &buf_pool->raw_desc16[i];
    187			dma_addr = GET_VAL(DATAADDR, le64_to_cpu(raw_desc->m1));
    188			dma_unmap_single(dev, dma_addr, XGENE_ENET_MAX_MTU,
    189					 DMA_FROM_DEVICE);
    190		}
    191	}
    192}
    193
    194static void xgene_enet_delete_pagepool(struct xgene_enet_desc_ring *buf_pool)
    195{
    196	struct device *dev = ndev_to_dev(buf_pool->ndev);
    197	dma_addr_t dma_addr;
    198	struct page *page;
    199	int i;
    200
    201	/* Free up the buffers held by hardware */
    202	for (i = 0; i < buf_pool->slots; i++) {
    203		page = buf_pool->frag_page[i];
    204		if (page) {
    205			dma_addr = buf_pool->frag_dma_addr[i];
    206			dma_unmap_page(dev, dma_addr, PAGE_SIZE,
    207				       DMA_FROM_DEVICE);
    208			put_page(page);
    209		}
    210	}
    211}
    212
    213static irqreturn_t xgene_enet_rx_irq(const int irq, void *data)
    214{
    215	struct xgene_enet_desc_ring *rx_ring = data;
    216
    217	if (napi_schedule_prep(&rx_ring->napi)) {
    218		disable_irq_nosync(irq);
    219		__napi_schedule(&rx_ring->napi);
    220	}
    221
    222	return IRQ_HANDLED;
    223}
    224
    225static int xgene_enet_tx_completion(struct xgene_enet_desc_ring *cp_ring,
    226				    struct xgene_enet_raw_desc *raw_desc)
    227{
    228	struct xgene_enet_pdata *pdata = netdev_priv(cp_ring->ndev);
    229	struct sk_buff *skb;
    230	struct device *dev;
    231	skb_frag_t *frag;
    232	dma_addr_t *frag_dma_addr;
    233	u16 skb_index;
    234	u8 mss_index;
    235	u8 status;
    236	int i;
    237
    238	skb_index = GET_VAL(USERINFO, le64_to_cpu(raw_desc->m0));
    239	skb = cp_ring->cp_skb[skb_index];
    240	frag_dma_addr = &cp_ring->frag_dma_addr[skb_index * MAX_SKB_FRAGS];
    241
    242	dev = ndev_to_dev(cp_ring->ndev);
    243	dma_unmap_single(dev, GET_VAL(DATAADDR, le64_to_cpu(raw_desc->m1)),
    244			 skb_headlen(skb),
    245			 DMA_TO_DEVICE);
    246
    247	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
    248		frag = &skb_shinfo(skb)->frags[i];
    249		dma_unmap_page(dev, frag_dma_addr[i], skb_frag_size(frag),
    250			       DMA_TO_DEVICE);
    251	}
    252
    253	if (GET_BIT(ET, le64_to_cpu(raw_desc->m3))) {
    254		mss_index = GET_VAL(MSS, le64_to_cpu(raw_desc->m3));
    255		spin_lock(&pdata->mss_lock);
    256		pdata->mss_refcnt[mss_index]--;
    257		spin_unlock(&pdata->mss_lock);
    258	}
    259
    260	/* Checking for error */
    261	status = GET_VAL(LERR, le64_to_cpu(raw_desc->m0));
    262	if (unlikely(status > 2)) {
    263		cp_ring->tx_dropped++;
    264		cp_ring->tx_errors++;
    265	}
    266
    267	if (likely(skb)) {
    268		dev_kfree_skb_any(skb);
    269	} else {
    270		netdev_err(cp_ring->ndev, "completion skb is NULL\n");
    271	}
    272
    273	return 0;
    274}
    275
    276static int xgene_enet_setup_mss(struct net_device *ndev, u32 mss)
    277{
    278	struct xgene_enet_pdata *pdata = netdev_priv(ndev);
    279	int mss_index = -EBUSY;
    280	int i;
    281
    282	spin_lock(&pdata->mss_lock);
    283
    284	/* Reuse the slot if MSS matches */
    285	for (i = 0; mss_index < 0 && i < NUM_MSS_REG; i++) {
    286		if (pdata->mss[i] == mss) {
    287			pdata->mss_refcnt[i]++;
    288			mss_index = i;
    289		}
    290	}
    291
    292	/* Overwrite the slot with ref_count = 0 */
    293	for (i = 0; mss_index < 0 && i < NUM_MSS_REG; i++) {
    294		if (!pdata->mss_refcnt[i]) {
    295			pdata->mss_refcnt[i]++;
    296			pdata->mac_ops->set_mss(pdata, mss, i);
    297			pdata->mss[i] = mss;
    298			mss_index = i;
    299		}
    300	}
    301
    302	spin_unlock(&pdata->mss_lock);
    303
    304	return mss_index;
    305}
    306
    307static int xgene_enet_work_msg(struct sk_buff *skb, u64 *hopinfo)
    308{
    309	struct net_device *ndev = skb->dev;
    310	struct iphdr *iph;
    311	u8 l3hlen = 0, l4hlen = 0;
    312	u8 ethhdr, proto = 0, csum_enable = 0;
    313	u32 hdr_len, mss = 0;
    314	u32 i, len, nr_frags;
    315	int mss_index;
    316
    317	ethhdr = xgene_enet_hdr_len(skb->data);
    318
    319	if (unlikely(skb->protocol != htons(ETH_P_IP)) &&
    320	    unlikely(skb->protocol != htons(ETH_P_8021Q)))
    321		goto out;
    322
    323	if (unlikely(!(skb->dev->features & NETIF_F_IP_CSUM)))
    324		goto out;
    325
    326	iph = ip_hdr(skb);
    327	if (unlikely(ip_is_fragment(iph)))
    328		goto out;
    329
    330	if (likely(iph->protocol == IPPROTO_TCP)) {
    331		l4hlen = tcp_hdrlen(skb) >> 2;
    332		csum_enable = 1;
    333		proto = TSO_IPPROTO_TCP;
    334		if (ndev->features & NETIF_F_TSO) {
    335			hdr_len = ethhdr + ip_hdrlen(skb) + tcp_hdrlen(skb);
    336			mss = skb_shinfo(skb)->gso_size;
    337
    338			if (skb_is_nonlinear(skb)) {
    339				len = skb_headlen(skb);
    340				nr_frags = skb_shinfo(skb)->nr_frags;
    341
    342				for (i = 0; i < 2 && i < nr_frags; i++)
    343					len += skb_frag_size(
    344						&skb_shinfo(skb)->frags[i]);
    345
    346				/* HW requires header must reside in 3 buffer */
    347				if (unlikely(hdr_len > len)) {
    348					if (skb_linearize(skb))
    349						return 0;
    350				}
    351			}
    352
    353			if (!mss || ((skb->len - hdr_len) <= mss))
    354				goto out;
    355
    356			mss_index = xgene_enet_setup_mss(ndev, mss);
    357			if (unlikely(mss_index < 0))
    358				return -EBUSY;
    359
    360			*hopinfo |= SET_BIT(ET) | SET_VAL(MSS, mss_index);
    361		}
    362	} else if (iph->protocol == IPPROTO_UDP) {
    363		l4hlen = UDP_HDR_SIZE;
    364		csum_enable = 1;
    365	}
    366out:
    367	l3hlen = ip_hdrlen(skb) >> 2;
    368	*hopinfo |= SET_VAL(TCPHDR, l4hlen) |
    369		    SET_VAL(IPHDR, l3hlen) |
    370		    SET_VAL(ETHHDR, ethhdr) |
    371		    SET_VAL(EC, csum_enable) |
    372		    SET_VAL(IS, proto) |
    373		    SET_BIT(IC) |
    374		    SET_BIT(TYPE_ETH_WORK_MESSAGE);
    375
    376	return 0;
    377}
    378
    379static u16 xgene_enet_encode_len(u16 len)
    380{
    381	return (len == BUFLEN_16K) ? 0 : len;
    382}
    383
    384static void xgene_set_addr_len(__le64 *desc, u32 idx, dma_addr_t addr, u32 len)
    385{
    386	desc[idx ^ 1] = cpu_to_le64(SET_VAL(DATAADDR, addr) |
    387				    SET_VAL(BUFDATALEN, len));
    388}
    389
    390static __le64 *xgene_enet_get_exp_bufs(struct xgene_enet_desc_ring *ring)
    391{
    392	__le64 *exp_bufs;
    393
    394	exp_bufs = &ring->exp_bufs[ring->exp_buf_tail * MAX_EXP_BUFFS];
    395	memset(exp_bufs, 0, sizeof(__le64) * MAX_EXP_BUFFS);
    396	ring->exp_buf_tail = (ring->exp_buf_tail + 1) & ((ring->slots / 2) - 1);
    397
    398	return exp_bufs;
    399}
    400
    401static dma_addr_t *xgene_get_frag_dma_array(struct xgene_enet_desc_ring *ring)
    402{
    403	return &ring->cp_ring->frag_dma_addr[ring->tail * MAX_SKB_FRAGS];
    404}
    405
    406static int xgene_enet_setup_tx_desc(struct xgene_enet_desc_ring *tx_ring,
    407				    struct sk_buff *skb)
    408{
    409	struct device *dev = ndev_to_dev(tx_ring->ndev);
    410	struct xgene_enet_pdata *pdata = netdev_priv(tx_ring->ndev);
    411	struct xgene_enet_raw_desc *raw_desc;
    412	__le64 *exp_desc = NULL, *exp_bufs = NULL;
    413	dma_addr_t dma_addr, pbuf_addr, *frag_dma_addr;
    414	skb_frag_t *frag;
    415	u16 tail = tx_ring->tail;
    416	u64 hopinfo = 0;
    417	u32 len, hw_len;
    418	u8 ll = 0, nv = 0, idx = 0;
    419	bool split = false;
    420	u32 size, offset, ell_bytes = 0;
    421	u32 i, fidx, nr_frags, count = 1;
    422	int ret;
    423
    424	raw_desc = &tx_ring->raw_desc[tail];
    425	tail = (tail + 1) & (tx_ring->slots - 1);
    426	memset(raw_desc, 0, sizeof(struct xgene_enet_raw_desc));
    427
    428	ret = xgene_enet_work_msg(skb, &hopinfo);
    429	if (ret)
    430		return ret;
    431
    432	raw_desc->m3 = cpu_to_le64(SET_VAL(HENQNUM, tx_ring->dst_ring_num) |
    433				   hopinfo);
    434
    435	len = skb_headlen(skb);
    436	hw_len = xgene_enet_encode_len(len);
    437
    438	dma_addr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
    439	if (dma_mapping_error(dev, dma_addr)) {
    440		netdev_err(tx_ring->ndev, "DMA mapping error\n");
    441		return -EINVAL;
    442	}
    443
    444	/* Hardware expects descriptor in little endian format */
    445	raw_desc->m1 = cpu_to_le64(SET_VAL(DATAADDR, dma_addr) |
    446				   SET_VAL(BUFDATALEN, hw_len) |
    447				   SET_BIT(COHERENT));
    448
    449	if (!skb_is_nonlinear(skb))
    450		goto out;
    451
    452	/* scatter gather */
    453	nv = 1;
    454	exp_desc = (void *)&tx_ring->raw_desc[tail];
    455	tail = (tail + 1) & (tx_ring->slots - 1);
    456	memset(exp_desc, 0, sizeof(struct xgene_enet_raw_desc));
    457
    458	nr_frags = skb_shinfo(skb)->nr_frags;
    459	for (i = nr_frags; i < 4 ; i++)
    460		exp_desc[i ^ 1] = cpu_to_le64(LAST_BUFFER);
    461
    462	frag_dma_addr = xgene_get_frag_dma_array(tx_ring);
    463
    464	for (i = 0, fidx = 0; split || (fidx < nr_frags); i++) {
    465		if (!split) {
    466			frag = &skb_shinfo(skb)->frags[fidx];
    467			size = skb_frag_size(frag);
    468			offset = 0;
    469
    470			pbuf_addr = skb_frag_dma_map(dev, frag, 0, size,
    471						     DMA_TO_DEVICE);
    472			if (dma_mapping_error(dev, pbuf_addr))
    473				return -EINVAL;
    474
    475			frag_dma_addr[fidx] = pbuf_addr;
    476			fidx++;
    477
    478			if (size > BUFLEN_16K)
    479				split = true;
    480		}
    481
    482		if (size > BUFLEN_16K) {
    483			len = BUFLEN_16K;
    484			size -= BUFLEN_16K;
    485		} else {
    486			len = size;
    487			split = false;
    488		}
    489
    490		dma_addr = pbuf_addr + offset;
    491		hw_len = xgene_enet_encode_len(len);
    492
    493		switch (i) {
    494		case 0:
    495		case 1:
    496		case 2:
    497			xgene_set_addr_len(exp_desc, i, dma_addr, hw_len);
    498			break;
    499		case 3:
    500			if (split || (fidx != nr_frags)) {
    501				exp_bufs = xgene_enet_get_exp_bufs(tx_ring);
    502				xgene_set_addr_len(exp_bufs, idx, dma_addr,
    503						   hw_len);
    504				idx++;
    505				ell_bytes += len;
    506			} else {
    507				xgene_set_addr_len(exp_desc, i, dma_addr,
    508						   hw_len);
    509			}
    510			break;
    511		default:
    512			xgene_set_addr_len(exp_bufs, idx, dma_addr, hw_len);
    513			idx++;
    514			ell_bytes += len;
    515			break;
    516		}
    517
    518		if (split)
    519			offset += BUFLEN_16K;
    520	}
    521	count++;
    522
    523	if (idx) {
    524		ll = 1;
    525		dma_addr = dma_map_single(dev, exp_bufs,
    526					  sizeof(u64) * MAX_EXP_BUFFS,
    527					  DMA_TO_DEVICE);
    528		if (dma_mapping_error(dev, dma_addr)) {
    529			dev_kfree_skb_any(skb);
    530			return -EINVAL;
    531		}
    532		i = ell_bytes >> LL_BYTES_LSB_LEN;
    533		exp_desc[2] = cpu_to_le64(SET_VAL(DATAADDR, dma_addr) |
    534					  SET_VAL(LL_BYTES_MSB, i) |
    535					  SET_VAL(LL_LEN, idx));
    536		raw_desc->m2 = cpu_to_le64(SET_VAL(LL_BYTES_LSB, ell_bytes));
    537	}
    538
    539out:
    540	raw_desc->m0 = cpu_to_le64(SET_VAL(LL, ll) | SET_VAL(NV, nv) |
    541				   SET_VAL(USERINFO, tx_ring->tail));
    542	tx_ring->cp_ring->cp_skb[tx_ring->tail] = skb;
    543	pdata->tx_level[tx_ring->cp_ring->index] += count;
    544	tx_ring->tail = tail;
    545
    546	return count;
    547}
    548
    549static netdev_tx_t xgene_enet_start_xmit(struct sk_buff *skb,
    550					 struct net_device *ndev)
    551{
    552	struct xgene_enet_pdata *pdata = netdev_priv(ndev);
    553	struct xgene_enet_desc_ring *tx_ring;
    554	int index = skb->queue_mapping;
    555	u32 tx_level = pdata->tx_level[index];
    556	int count;
    557
    558	tx_ring = pdata->tx_ring[index];
    559	if (tx_level < pdata->txc_level[index])
    560		tx_level += ((typeof(pdata->tx_level[index]))~0U);
    561
    562	if ((tx_level - pdata->txc_level[index]) > pdata->tx_qcnt_hi) {
    563		netif_stop_subqueue(ndev, index);
    564		return NETDEV_TX_BUSY;
    565	}
    566
    567	if (skb_padto(skb, XGENE_MIN_ENET_FRAME_SIZE))
    568		return NETDEV_TX_OK;
    569
    570	count = xgene_enet_setup_tx_desc(tx_ring, skb);
    571	if (count == -EBUSY)
    572		return NETDEV_TX_BUSY;
    573
    574	if (count <= 0) {
    575		dev_kfree_skb_any(skb);
    576		return NETDEV_TX_OK;
    577	}
    578
    579	skb_tx_timestamp(skb);
    580
    581	tx_ring->tx_packets++;
    582	tx_ring->tx_bytes += skb->len;
    583
    584	pdata->ring_ops->wr_cmd(tx_ring, count);
    585	return NETDEV_TX_OK;
    586}
    587
    588static void xgene_enet_rx_csum(struct sk_buff *skb)
    589{
    590	struct net_device *ndev = skb->dev;
    591	struct iphdr *iph = ip_hdr(skb);
    592
    593	if (!(ndev->features & NETIF_F_RXCSUM))
    594		return;
    595
    596	if (skb->protocol != htons(ETH_P_IP))
    597		return;
    598
    599	if (ip_is_fragment(iph))
    600		return;
    601
    602	if (iph->protocol != IPPROTO_TCP && iph->protocol != IPPROTO_UDP)
    603		return;
    604
    605	skb->ip_summed = CHECKSUM_UNNECESSARY;
    606}
    607
    608static void xgene_enet_free_pagepool(struct xgene_enet_desc_ring *buf_pool,
    609				     struct xgene_enet_raw_desc *raw_desc,
    610				     struct xgene_enet_raw_desc *exp_desc)
    611{
    612	__le64 *desc = (void *)exp_desc;
    613	dma_addr_t dma_addr;
    614	struct device *dev;
    615	struct page *page;
    616	u16 slots, head;
    617	u32 frag_size;
    618	int i;
    619
    620	if (!buf_pool || !raw_desc || !exp_desc ||
    621	    (!GET_VAL(NV, le64_to_cpu(raw_desc->m0))))
    622		return;
    623
    624	dev = ndev_to_dev(buf_pool->ndev);
    625	slots = buf_pool->slots - 1;
    626	head = buf_pool->head;
    627
    628	for (i = 0; i < 4; i++) {
    629		frag_size = xgene_enet_get_data_len(le64_to_cpu(desc[i ^ 1]));
    630		if (!frag_size)
    631			break;
    632
    633		dma_addr = GET_VAL(DATAADDR, le64_to_cpu(desc[i ^ 1]));
    634		dma_unmap_page(dev, dma_addr, PAGE_SIZE, DMA_FROM_DEVICE);
    635
    636		page = buf_pool->frag_page[head];
    637		put_page(page);
    638
    639		buf_pool->frag_page[head] = NULL;
    640		head = (head + 1) & slots;
    641	}
    642	buf_pool->head = head;
    643}
    644
    645/* Errata 10GE_10 and ENET_15 - Fix duplicated HW statistic counters */
    646static bool xgene_enet_errata_10GE_10(struct sk_buff *skb, u32 len, u8 status)
    647{
    648	if (status == INGRESS_CRC &&
    649	    len >= (ETHER_STD_PACKET + 1) &&
    650	    len <= (ETHER_STD_PACKET + 4) &&
    651	    skb->protocol == htons(ETH_P_8021Q))
    652		return true;
    653
    654	return false;
    655}
    656
    657/* Errata 10GE_8 and ENET_11 - allow packet with length <=64B */
    658static bool xgene_enet_errata_10GE_8(struct sk_buff *skb, u32 len, u8 status)
    659{
    660	if (status == INGRESS_PKT_LEN && len == ETHER_MIN_PACKET) {
    661		if (ntohs(eth_hdr(skb)->h_proto) < 46)
    662			return true;
    663	}
    664
    665	return false;
    666}
    667
    668static int xgene_enet_rx_frame(struct xgene_enet_desc_ring *rx_ring,
    669			       struct xgene_enet_raw_desc *raw_desc,
    670			       struct xgene_enet_raw_desc *exp_desc)
    671{
    672	struct xgene_enet_desc_ring *buf_pool, *page_pool;
    673	u32 datalen, frag_size, skb_index;
    674	struct xgene_enet_pdata *pdata;
    675	struct net_device *ndev;
    676	dma_addr_t dma_addr;
    677	struct sk_buff *skb;
    678	struct device *dev;
    679	struct page *page;
    680	u16 slots, head;
    681	int i, ret = 0;
    682	__le64 *desc;
    683	u8 status;
    684	bool nv;
    685
    686	ndev = rx_ring->ndev;
    687	pdata = netdev_priv(ndev);
    688	dev = ndev_to_dev(rx_ring->ndev);
    689	buf_pool = rx_ring->buf_pool;
    690	page_pool = rx_ring->page_pool;
    691
    692	dma_unmap_single(dev, GET_VAL(DATAADDR, le64_to_cpu(raw_desc->m1)),
    693			 XGENE_ENET_STD_MTU, DMA_FROM_DEVICE);
    694	skb_index = GET_VAL(USERINFO, le64_to_cpu(raw_desc->m0));
    695	skb = buf_pool->rx_skb[skb_index];
    696	buf_pool->rx_skb[skb_index] = NULL;
    697
    698	datalen = xgene_enet_get_data_len(le64_to_cpu(raw_desc->m1));
    699
    700	/* strip off CRC as HW isn't doing this */
    701	nv = GET_VAL(NV, le64_to_cpu(raw_desc->m0));
    702	if (!nv)
    703		datalen -= 4;
    704
    705	skb_put(skb, datalen);
    706	prefetch(skb->data - NET_IP_ALIGN);
    707	skb->protocol = eth_type_trans(skb, ndev);
    708
    709	/* checking for error */
    710	status = (GET_VAL(ELERR, le64_to_cpu(raw_desc->m0)) << LERR_LEN) |
    711		  GET_VAL(LERR, le64_to_cpu(raw_desc->m0));
    712	if (unlikely(status)) {
    713		if (xgene_enet_errata_10GE_8(skb, datalen, status)) {
    714			pdata->false_rflr++;
    715		} else if (xgene_enet_errata_10GE_10(skb, datalen, status)) {
    716			pdata->vlan_rjbr++;
    717		} else {
    718			dev_kfree_skb_any(skb);
    719			xgene_enet_free_pagepool(page_pool, raw_desc, exp_desc);
    720			xgene_enet_parse_error(rx_ring, status);
    721			rx_ring->rx_dropped++;
    722			goto out;
    723		}
    724	}
    725
    726	if (!nv)
    727		goto skip_jumbo;
    728
    729	slots = page_pool->slots - 1;
    730	head = page_pool->head;
    731	desc = (void *)exp_desc;
    732
    733	for (i = 0; i < 4; i++) {
    734		frag_size = xgene_enet_get_data_len(le64_to_cpu(desc[i ^ 1]));
    735		if (!frag_size)
    736			break;
    737
    738		dma_addr = GET_VAL(DATAADDR, le64_to_cpu(desc[i ^ 1]));
    739		dma_unmap_page(dev, dma_addr, PAGE_SIZE, DMA_FROM_DEVICE);
    740
    741		page = page_pool->frag_page[head];
    742		skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, 0,
    743				frag_size, PAGE_SIZE);
    744
    745		datalen += frag_size;
    746
    747		page_pool->frag_page[head] = NULL;
    748		head = (head + 1) & slots;
    749	}
    750
    751	page_pool->head = head;
    752	rx_ring->npagepool -= skb_shinfo(skb)->nr_frags;
    753
    754skip_jumbo:
    755	skb_checksum_none_assert(skb);
    756	xgene_enet_rx_csum(skb);
    757
    758	rx_ring->rx_packets++;
    759	rx_ring->rx_bytes += datalen;
    760	napi_gro_receive(&rx_ring->napi, skb);
    761
    762out:
    763	if (rx_ring->npagepool <= 0) {
    764		ret = xgene_enet_refill_pagepool(page_pool, NUM_NXTBUFPOOL);
    765		rx_ring->npagepool = NUM_NXTBUFPOOL;
    766		if (ret)
    767			return ret;
    768	}
    769
    770	if (--rx_ring->nbufpool == 0) {
    771		ret = xgene_enet_refill_bufpool(buf_pool, NUM_BUFPOOL);
    772		rx_ring->nbufpool = NUM_BUFPOOL;
    773	}
    774
    775	return ret;
    776}
    777
    778static bool is_rx_desc(struct xgene_enet_raw_desc *raw_desc)
    779{
    780	return GET_VAL(FPQNUM, le64_to_cpu(raw_desc->m0)) ? true : false;
    781}
    782
    783static int xgene_enet_process_ring(struct xgene_enet_desc_ring *ring,
    784				   int budget)
    785{
    786	struct net_device *ndev = ring->ndev;
    787	struct xgene_enet_pdata *pdata = netdev_priv(ndev);
    788	struct xgene_enet_raw_desc *raw_desc, *exp_desc;
    789	u16 head = ring->head;
    790	u16 slots = ring->slots - 1;
    791	int ret, desc_count, count = 0, processed = 0;
    792	bool is_completion;
    793
    794	do {
    795		raw_desc = &ring->raw_desc[head];
    796		desc_count = 0;
    797		is_completion = false;
    798		exp_desc = NULL;
    799		if (unlikely(xgene_enet_is_desc_slot_empty(raw_desc)))
    800			break;
    801
    802		/* read fpqnum field after dataaddr field */
    803		dma_rmb();
    804		if (GET_BIT(NV, le64_to_cpu(raw_desc->m0))) {
    805			head = (head + 1) & slots;
    806			exp_desc = &ring->raw_desc[head];
    807
    808			if (unlikely(xgene_enet_is_desc_slot_empty(exp_desc))) {
    809				head = (head - 1) & slots;
    810				break;
    811			}
    812			dma_rmb();
    813			count++;
    814			desc_count++;
    815		}
    816		if (is_rx_desc(raw_desc)) {
    817			ret = xgene_enet_rx_frame(ring, raw_desc, exp_desc);
    818		} else {
    819			ret = xgene_enet_tx_completion(ring, raw_desc);
    820			is_completion = true;
    821		}
    822		xgene_enet_mark_desc_slot_empty(raw_desc);
    823		if (exp_desc)
    824			xgene_enet_mark_desc_slot_empty(exp_desc);
    825
    826		head = (head + 1) & slots;
    827		count++;
    828		desc_count++;
    829		processed++;
    830		if (is_completion)
    831			pdata->txc_level[ring->index] += desc_count;
    832
    833		if (ret)
    834			break;
    835	} while (--budget);
    836
    837	if (likely(count)) {
    838		pdata->ring_ops->wr_cmd(ring, -count);
    839		ring->head = head;
    840
    841		if (__netif_subqueue_stopped(ndev, ring->index))
    842			netif_start_subqueue(ndev, ring->index);
    843	}
    844
    845	return processed;
    846}
    847
    848static int xgene_enet_napi(struct napi_struct *napi, const int budget)
    849{
    850	struct xgene_enet_desc_ring *ring;
    851	int processed;
    852
    853	ring = container_of(napi, struct xgene_enet_desc_ring, napi);
    854	processed = xgene_enet_process_ring(ring, budget);
    855
    856	if (processed != budget) {
    857		napi_complete_done(napi, processed);
    858		enable_irq(ring->irq);
    859	}
    860
    861	return processed;
    862}
    863
    864static void xgene_enet_timeout(struct net_device *ndev, unsigned int txqueue)
    865{
    866	struct xgene_enet_pdata *pdata = netdev_priv(ndev);
    867	struct netdev_queue *txq;
    868	int i;
    869
    870	pdata->mac_ops->reset(pdata);
    871
    872	for (i = 0; i < pdata->txq_cnt; i++) {
    873		txq = netdev_get_tx_queue(ndev, i);
    874		txq_trans_cond_update(txq);
    875		netif_tx_start_queue(txq);
    876	}
    877}
    878
    879static void xgene_enet_set_irq_name(struct net_device *ndev)
    880{
    881	struct xgene_enet_pdata *pdata = netdev_priv(ndev);
    882	struct xgene_enet_desc_ring *ring;
    883	int i;
    884
    885	for (i = 0; i < pdata->rxq_cnt; i++) {
    886		ring = pdata->rx_ring[i];
    887		if (!pdata->cq_cnt) {
    888			snprintf(ring->irq_name, IRQ_ID_SIZE, "%s-rx-txc",
    889				 ndev->name);
    890		} else {
    891			snprintf(ring->irq_name, IRQ_ID_SIZE, "%s-rx-%d",
    892				 ndev->name, i);
    893		}
    894	}
    895
    896	for (i = 0; i < pdata->cq_cnt; i++) {
    897		ring = pdata->tx_ring[i]->cp_ring;
    898		snprintf(ring->irq_name, IRQ_ID_SIZE, "%s-txc-%d",
    899			 ndev->name, i);
    900	}
    901}
    902
    903static int xgene_enet_register_irq(struct net_device *ndev)
    904{
    905	struct xgene_enet_pdata *pdata = netdev_priv(ndev);
    906	struct device *dev = ndev_to_dev(ndev);
    907	struct xgene_enet_desc_ring *ring;
    908	int ret = 0, i;
    909
    910	xgene_enet_set_irq_name(ndev);
    911	for (i = 0; i < pdata->rxq_cnt; i++) {
    912		ring = pdata->rx_ring[i];
    913		irq_set_status_flags(ring->irq, IRQ_DISABLE_UNLAZY);
    914		ret = devm_request_irq(dev, ring->irq, xgene_enet_rx_irq,
    915				       0, ring->irq_name, ring);
    916		if (ret) {
    917			netdev_err(ndev, "Failed to request irq %s\n",
    918				   ring->irq_name);
    919		}
    920	}
    921
    922	for (i = 0; i < pdata->cq_cnt; i++) {
    923		ring = pdata->tx_ring[i]->cp_ring;
    924		irq_set_status_flags(ring->irq, IRQ_DISABLE_UNLAZY);
    925		ret = devm_request_irq(dev, ring->irq, xgene_enet_rx_irq,
    926				       0, ring->irq_name, ring);
    927		if (ret) {
    928			netdev_err(ndev, "Failed to request irq %s\n",
    929				   ring->irq_name);
    930		}
    931	}
    932
    933	return ret;
    934}
    935
    936static void xgene_enet_free_irq(struct net_device *ndev)
    937{
    938	struct xgene_enet_pdata *pdata;
    939	struct xgene_enet_desc_ring *ring;
    940	struct device *dev;
    941	int i;
    942
    943	pdata = netdev_priv(ndev);
    944	dev = ndev_to_dev(ndev);
    945
    946	for (i = 0; i < pdata->rxq_cnt; i++) {
    947		ring = pdata->rx_ring[i];
    948		irq_clear_status_flags(ring->irq, IRQ_DISABLE_UNLAZY);
    949		devm_free_irq(dev, ring->irq, ring);
    950	}
    951
    952	for (i = 0; i < pdata->cq_cnt; i++) {
    953		ring = pdata->tx_ring[i]->cp_ring;
    954		irq_clear_status_flags(ring->irq, IRQ_DISABLE_UNLAZY);
    955		devm_free_irq(dev, ring->irq, ring);
    956	}
    957}
    958
    959static void xgene_enet_napi_enable(struct xgene_enet_pdata *pdata)
    960{
    961	struct napi_struct *napi;
    962	int i;
    963
    964	for (i = 0; i < pdata->rxq_cnt; i++) {
    965		napi = &pdata->rx_ring[i]->napi;
    966		napi_enable(napi);
    967	}
    968
    969	for (i = 0; i < pdata->cq_cnt; i++) {
    970		napi = &pdata->tx_ring[i]->cp_ring->napi;
    971		napi_enable(napi);
    972	}
    973}
    974
    975static void xgene_enet_napi_disable(struct xgene_enet_pdata *pdata)
    976{
    977	struct napi_struct *napi;
    978	int i;
    979
    980	for (i = 0; i < pdata->rxq_cnt; i++) {
    981		napi = &pdata->rx_ring[i]->napi;
    982		napi_disable(napi);
    983	}
    984
    985	for (i = 0; i < pdata->cq_cnt; i++) {
    986		napi = &pdata->tx_ring[i]->cp_ring->napi;
    987		napi_disable(napi);
    988	}
    989}
    990
    991static int xgene_enet_open(struct net_device *ndev)
    992{
    993	struct xgene_enet_pdata *pdata = netdev_priv(ndev);
    994	const struct xgene_mac_ops *mac_ops = pdata->mac_ops;
    995	int ret;
    996
    997	ret = netif_set_real_num_tx_queues(ndev, pdata->txq_cnt);
    998	if (ret)
    999		return ret;
   1000
   1001	ret = netif_set_real_num_rx_queues(ndev, pdata->rxq_cnt);
   1002	if (ret)
   1003		return ret;
   1004
   1005	xgene_enet_napi_enable(pdata);
   1006	ret = xgene_enet_register_irq(ndev);
   1007	if (ret)
   1008		return ret;
   1009
   1010	if (ndev->phydev) {
   1011		phy_start(ndev->phydev);
   1012	} else {
   1013		schedule_delayed_work(&pdata->link_work, PHY_POLL_LINK_OFF);
   1014		netif_carrier_off(ndev);
   1015	}
   1016
   1017	mac_ops->tx_enable(pdata);
   1018	mac_ops->rx_enable(pdata);
   1019	netif_tx_start_all_queues(ndev);
   1020
   1021	return ret;
   1022}
   1023
   1024static int xgene_enet_close(struct net_device *ndev)
   1025{
   1026	struct xgene_enet_pdata *pdata = netdev_priv(ndev);
   1027	const struct xgene_mac_ops *mac_ops = pdata->mac_ops;
   1028	int i;
   1029
   1030	netif_tx_stop_all_queues(ndev);
   1031	mac_ops->tx_disable(pdata);
   1032	mac_ops->rx_disable(pdata);
   1033
   1034	if (ndev->phydev)
   1035		phy_stop(ndev->phydev);
   1036	else
   1037		cancel_delayed_work_sync(&pdata->link_work);
   1038
   1039	xgene_enet_free_irq(ndev);
   1040	xgene_enet_napi_disable(pdata);
   1041	for (i = 0; i < pdata->rxq_cnt; i++)
   1042		xgene_enet_process_ring(pdata->rx_ring[i], -1);
   1043
   1044	return 0;
   1045}
   1046static void xgene_enet_delete_ring(struct xgene_enet_desc_ring *ring)
   1047{
   1048	struct xgene_enet_pdata *pdata;
   1049	struct device *dev;
   1050
   1051	pdata = netdev_priv(ring->ndev);
   1052	dev = ndev_to_dev(ring->ndev);
   1053
   1054	pdata->ring_ops->clear(ring);
   1055	dmam_free_coherent(dev, ring->size, ring->desc_addr, ring->dma);
   1056}
   1057
   1058static void xgene_enet_delete_desc_rings(struct xgene_enet_pdata *pdata)
   1059{
   1060	struct xgene_enet_desc_ring *buf_pool, *page_pool;
   1061	struct xgene_enet_desc_ring *ring;
   1062	int i;
   1063
   1064	for (i = 0; i < pdata->txq_cnt; i++) {
   1065		ring = pdata->tx_ring[i];
   1066		if (ring) {
   1067			xgene_enet_delete_ring(ring);
   1068			pdata->port_ops->clear(pdata, ring);
   1069			if (pdata->cq_cnt)
   1070				xgene_enet_delete_ring(ring->cp_ring);
   1071			pdata->tx_ring[i] = NULL;
   1072		}
   1073
   1074	}
   1075
   1076	for (i = 0; i < pdata->rxq_cnt; i++) {
   1077		ring = pdata->rx_ring[i];
   1078		if (ring) {
   1079			page_pool = ring->page_pool;
   1080			if (page_pool) {
   1081				xgene_enet_delete_pagepool(page_pool);
   1082				xgene_enet_delete_ring(page_pool);
   1083				pdata->port_ops->clear(pdata, page_pool);
   1084			}
   1085
   1086			buf_pool = ring->buf_pool;
   1087			xgene_enet_delete_bufpool(buf_pool);
   1088			xgene_enet_delete_ring(buf_pool);
   1089			pdata->port_ops->clear(pdata, buf_pool);
   1090
   1091			xgene_enet_delete_ring(ring);
   1092			pdata->rx_ring[i] = NULL;
   1093		}
   1094
   1095	}
   1096}
   1097
   1098static int xgene_enet_get_ring_size(struct device *dev,
   1099				    enum xgene_enet_ring_cfgsize cfgsize)
   1100{
   1101	int size = -EINVAL;
   1102
   1103	switch (cfgsize) {
   1104	case RING_CFGSIZE_512B:
   1105		size = 0x200;
   1106		break;
   1107	case RING_CFGSIZE_2KB:
   1108		size = 0x800;
   1109		break;
   1110	case RING_CFGSIZE_16KB:
   1111		size = 0x4000;
   1112		break;
   1113	case RING_CFGSIZE_64KB:
   1114		size = 0x10000;
   1115		break;
   1116	case RING_CFGSIZE_512KB:
   1117		size = 0x80000;
   1118		break;
   1119	default:
   1120		dev_err(dev, "Unsupported cfg ring size %d\n", cfgsize);
   1121		break;
   1122	}
   1123
   1124	return size;
   1125}
   1126
   1127static void xgene_enet_free_desc_ring(struct xgene_enet_desc_ring *ring)
   1128{
   1129	struct xgene_enet_pdata *pdata;
   1130	struct device *dev;
   1131
   1132	if (!ring)
   1133		return;
   1134
   1135	dev = ndev_to_dev(ring->ndev);
   1136	pdata = netdev_priv(ring->ndev);
   1137
   1138	if (ring->desc_addr) {
   1139		pdata->ring_ops->clear(ring);
   1140		dmam_free_coherent(dev, ring->size, ring->desc_addr, ring->dma);
   1141	}
   1142	devm_kfree(dev, ring);
   1143}
   1144
   1145static void xgene_enet_free_desc_rings(struct xgene_enet_pdata *pdata)
   1146{
   1147	struct xgene_enet_desc_ring *page_pool;
   1148	struct device *dev = &pdata->pdev->dev;
   1149	struct xgene_enet_desc_ring *ring;
   1150	void *p;
   1151	int i;
   1152
   1153	for (i = 0; i < pdata->txq_cnt; i++) {
   1154		ring = pdata->tx_ring[i];
   1155		if (ring) {
   1156			if (ring->cp_ring && ring->cp_ring->cp_skb)
   1157				devm_kfree(dev, ring->cp_ring->cp_skb);
   1158
   1159			if (ring->cp_ring && pdata->cq_cnt)
   1160				xgene_enet_free_desc_ring(ring->cp_ring);
   1161
   1162			xgene_enet_free_desc_ring(ring);
   1163		}
   1164
   1165	}
   1166
   1167	for (i = 0; i < pdata->rxq_cnt; i++) {
   1168		ring = pdata->rx_ring[i];
   1169		if (ring) {
   1170			if (ring->buf_pool) {
   1171				if (ring->buf_pool->rx_skb)
   1172					devm_kfree(dev, ring->buf_pool->rx_skb);
   1173
   1174				xgene_enet_free_desc_ring(ring->buf_pool);
   1175			}
   1176
   1177			page_pool = ring->page_pool;
   1178			if (page_pool) {
   1179				p = page_pool->frag_page;
   1180				if (p)
   1181					devm_kfree(dev, p);
   1182
   1183				p = page_pool->frag_dma_addr;
   1184				if (p)
   1185					devm_kfree(dev, p);
   1186			}
   1187
   1188			xgene_enet_free_desc_ring(ring);
   1189		}
   1190	}
   1191}
   1192
   1193static bool is_irq_mbox_required(struct xgene_enet_pdata *pdata,
   1194				 struct xgene_enet_desc_ring *ring)
   1195{
   1196	if ((pdata->enet_id == XGENE_ENET2) &&
   1197	    (xgene_enet_ring_owner(ring->id) == RING_OWNER_CPU)) {
   1198		return true;
   1199	}
   1200
   1201	return false;
   1202}
   1203
   1204static void __iomem *xgene_enet_ring_cmd_base(struct xgene_enet_pdata *pdata,
   1205					      struct xgene_enet_desc_ring *ring)
   1206{
   1207	u8 num_ring_id_shift = pdata->ring_ops->num_ring_id_shift;
   1208
   1209	return pdata->ring_cmd_addr + (ring->num << num_ring_id_shift);
   1210}
   1211
   1212static struct xgene_enet_desc_ring *xgene_enet_create_desc_ring(
   1213			struct net_device *ndev, u32 ring_num,
   1214			enum xgene_enet_ring_cfgsize cfgsize, u32 ring_id)
   1215{
   1216	struct xgene_enet_pdata *pdata = netdev_priv(ndev);
   1217	struct device *dev = ndev_to_dev(ndev);
   1218	struct xgene_enet_desc_ring *ring;
   1219	void *irq_mbox_addr;
   1220	int size;
   1221
   1222	size = xgene_enet_get_ring_size(dev, cfgsize);
   1223	if (size < 0)
   1224		return NULL;
   1225
   1226	ring = devm_kzalloc(dev, sizeof(struct xgene_enet_desc_ring),
   1227			    GFP_KERNEL);
   1228	if (!ring)
   1229		return NULL;
   1230
   1231	ring->ndev = ndev;
   1232	ring->num = ring_num;
   1233	ring->cfgsize = cfgsize;
   1234	ring->id = ring_id;
   1235
   1236	ring->desc_addr = dmam_alloc_coherent(dev, size, &ring->dma,
   1237					      GFP_KERNEL | __GFP_ZERO);
   1238	if (!ring->desc_addr) {
   1239		devm_kfree(dev, ring);
   1240		return NULL;
   1241	}
   1242	ring->size = size;
   1243
   1244	if (is_irq_mbox_required(pdata, ring)) {
   1245		irq_mbox_addr = dmam_alloc_coherent(dev, INTR_MBOX_SIZE,
   1246						    &ring->irq_mbox_dma,
   1247						    GFP_KERNEL | __GFP_ZERO);
   1248		if (!irq_mbox_addr) {
   1249			dmam_free_coherent(dev, size, ring->desc_addr,
   1250					   ring->dma);
   1251			devm_kfree(dev, ring);
   1252			return NULL;
   1253		}
   1254		ring->irq_mbox_addr = irq_mbox_addr;
   1255	}
   1256
   1257	ring->cmd_base = xgene_enet_ring_cmd_base(pdata, ring);
   1258	ring->cmd = ring->cmd_base + INC_DEC_CMD_ADDR;
   1259	ring = pdata->ring_ops->setup(ring);
   1260	netdev_dbg(ndev, "ring info: num=%d  size=%d  id=%d  slots=%d\n",
   1261		   ring->num, ring->size, ring->id, ring->slots);
   1262
   1263	return ring;
   1264}
   1265
   1266static u16 xgene_enet_get_ring_id(enum xgene_ring_owner owner, u8 bufnum)
   1267{
   1268	return (owner << 6) | (bufnum & GENMASK(5, 0));
   1269}
   1270
   1271static enum xgene_ring_owner xgene_derive_ring_owner(struct xgene_enet_pdata *p)
   1272{
   1273	enum xgene_ring_owner owner;
   1274
   1275	if (p->enet_id == XGENE_ENET1) {
   1276		switch (p->phy_mode) {
   1277		case PHY_INTERFACE_MODE_SGMII:
   1278			owner = RING_OWNER_ETH0;
   1279			break;
   1280		default:
   1281			owner = (!p->port_id) ? RING_OWNER_ETH0 :
   1282						RING_OWNER_ETH1;
   1283			break;
   1284		}
   1285	} else {
   1286		owner = (!p->port_id) ? RING_OWNER_ETH0 : RING_OWNER_ETH1;
   1287	}
   1288
   1289	return owner;
   1290}
   1291
   1292static u8 xgene_start_cpu_bufnum(struct xgene_enet_pdata *pdata)
   1293{
   1294	struct device *dev = &pdata->pdev->dev;
   1295	u32 cpu_bufnum;
   1296	int ret;
   1297
   1298	ret = device_property_read_u32(dev, "channel", &cpu_bufnum);
   1299
   1300	return (!ret) ? cpu_bufnum : pdata->cpu_bufnum;
   1301}
   1302
   1303static int xgene_enet_create_desc_rings(struct net_device *ndev)
   1304{
   1305	struct xgene_enet_desc_ring *rx_ring, *tx_ring, *cp_ring;
   1306	struct xgene_enet_pdata *pdata = netdev_priv(ndev);
   1307	struct xgene_enet_desc_ring *page_pool = NULL;
   1308	struct xgene_enet_desc_ring *buf_pool = NULL;
   1309	struct device *dev = ndev_to_dev(ndev);
   1310	u8 eth_bufnum = pdata->eth_bufnum;
   1311	u8 bp_bufnum = pdata->bp_bufnum;
   1312	u16 ring_num = pdata->ring_num;
   1313	enum xgene_ring_owner owner;
   1314	dma_addr_t dma_exp_bufs;
   1315	u16 ring_id, slots;
   1316	__le64 *exp_bufs;
   1317	int i, ret, size;
   1318	u8 cpu_bufnum;
   1319
   1320	cpu_bufnum = xgene_start_cpu_bufnum(pdata);
   1321
   1322	for (i = 0; i < pdata->rxq_cnt; i++) {
   1323		/* allocate rx descriptor ring */
   1324		owner = xgene_derive_ring_owner(pdata);
   1325		ring_id = xgene_enet_get_ring_id(RING_OWNER_CPU, cpu_bufnum++);
   1326		rx_ring = xgene_enet_create_desc_ring(ndev, ring_num++,
   1327						      RING_CFGSIZE_16KB,
   1328						      ring_id);
   1329		if (!rx_ring) {
   1330			ret = -ENOMEM;
   1331			goto err;
   1332		}
   1333
   1334		/* allocate buffer pool for receiving packets */
   1335		owner = xgene_derive_ring_owner(pdata);
   1336		ring_id = xgene_enet_get_ring_id(owner, bp_bufnum++);
   1337		buf_pool = xgene_enet_create_desc_ring(ndev, ring_num++,
   1338						       RING_CFGSIZE_16KB,
   1339						       ring_id);
   1340		if (!buf_pool) {
   1341			ret = -ENOMEM;
   1342			goto err;
   1343		}
   1344
   1345		rx_ring->nbufpool = NUM_BUFPOOL;
   1346		rx_ring->npagepool = NUM_NXTBUFPOOL;
   1347		rx_ring->irq = pdata->irqs[i];
   1348		buf_pool->rx_skb = devm_kcalloc(dev, buf_pool->slots,
   1349						sizeof(struct sk_buff *),
   1350						GFP_KERNEL);
   1351		if (!buf_pool->rx_skb) {
   1352			ret = -ENOMEM;
   1353			goto err;
   1354		}
   1355
   1356		buf_pool->dst_ring_num = xgene_enet_dst_ring_num(buf_pool);
   1357		rx_ring->buf_pool = buf_pool;
   1358		pdata->rx_ring[i] = rx_ring;
   1359
   1360		if ((pdata->enet_id == XGENE_ENET1 &&  pdata->rxq_cnt > 4) ||
   1361		    (pdata->enet_id == XGENE_ENET2 &&  pdata->rxq_cnt > 16)) {
   1362			break;
   1363		}
   1364
   1365		/* allocate next buffer pool for jumbo packets */
   1366		owner = xgene_derive_ring_owner(pdata);
   1367		ring_id = xgene_enet_get_ring_id(owner, bp_bufnum++);
   1368		page_pool = xgene_enet_create_desc_ring(ndev, ring_num++,
   1369							RING_CFGSIZE_16KB,
   1370							ring_id);
   1371		if (!page_pool) {
   1372			ret = -ENOMEM;
   1373			goto err;
   1374		}
   1375
   1376		slots = page_pool->slots;
   1377		page_pool->frag_page = devm_kcalloc(dev, slots,
   1378						    sizeof(struct page *),
   1379						    GFP_KERNEL);
   1380		if (!page_pool->frag_page) {
   1381			ret = -ENOMEM;
   1382			goto err;
   1383		}
   1384
   1385		page_pool->frag_dma_addr = devm_kcalloc(dev, slots,
   1386							sizeof(dma_addr_t),
   1387							GFP_KERNEL);
   1388		if (!page_pool->frag_dma_addr) {
   1389			ret = -ENOMEM;
   1390			goto err;
   1391		}
   1392
   1393		page_pool->dst_ring_num = xgene_enet_dst_ring_num(page_pool);
   1394		rx_ring->page_pool = page_pool;
   1395	}
   1396
   1397	for (i = 0; i < pdata->txq_cnt; i++) {
   1398		/* allocate tx descriptor ring */
   1399		owner = xgene_derive_ring_owner(pdata);
   1400		ring_id = xgene_enet_get_ring_id(owner, eth_bufnum++);
   1401		tx_ring = xgene_enet_create_desc_ring(ndev, ring_num++,
   1402						      RING_CFGSIZE_16KB,
   1403						      ring_id);
   1404		if (!tx_ring) {
   1405			ret = -ENOMEM;
   1406			goto err;
   1407		}
   1408
   1409		size = (tx_ring->slots / 2) * sizeof(__le64) * MAX_EXP_BUFFS;
   1410		exp_bufs = dmam_alloc_coherent(dev, size, &dma_exp_bufs,
   1411					       GFP_KERNEL | __GFP_ZERO);
   1412		if (!exp_bufs) {
   1413			ret = -ENOMEM;
   1414			goto err;
   1415		}
   1416		tx_ring->exp_bufs = exp_bufs;
   1417
   1418		pdata->tx_ring[i] = tx_ring;
   1419
   1420		if (!pdata->cq_cnt) {
   1421			cp_ring = pdata->rx_ring[i];
   1422		} else {
   1423			/* allocate tx completion descriptor ring */
   1424			ring_id = xgene_enet_get_ring_id(RING_OWNER_CPU,
   1425							 cpu_bufnum++);
   1426			cp_ring = xgene_enet_create_desc_ring(ndev, ring_num++,
   1427							      RING_CFGSIZE_16KB,
   1428							      ring_id);
   1429			if (!cp_ring) {
   1430				ret = -ENOMEM;
   1431				goto err;
   1432			}
   1433
   1434			cp_ring->irq = pdata->irqs[pdata->rxq_cnt + i];
   1435			cp_ring->index = i;
   1436		}
   1437
   1438		cp_ring->cp_skb = devm_kcalloc(dev, tx_ring->slots,
   1439					       sizeof(struct sk_buff *),
   1440					       GFP_KERNEL);
   1441		if (!cp_ring->cp_skb) {
   1442			ret = -ENOMEM;
   1443			goto err;
   1444		}
   1445
   1446		size = sizeof(dma_addr_t) * MAX_SKB_FRAGS;
   1447		cp_ring->frag_dma_addr = devm_kcalloc(dev, tx_ring->slots,
   1448						      size, GFP_KERNEL);
   1449		if (!cp_ring->frag_dma_addr) {
   1450			devm_kfree(dev, cp_ring->cp_skb);
   1451			ret = -ENOMEM;
   1452			goto err;
   1453		}
   1454
   1455		tx_ring->cp_ring = cp_ring;
   1456		tx_ring->dst_ring_num = xgene_enet_dst_ring_num(cp_ring);
   1457	}
   1458
   1459	if (pdata->ring_ops->coalesce)
   1460		pdata->ring_ops->coalesce(pdata->tx_ring[0]);
   1461	pdata->tx_qcnt_hi = pdata->tx_ring[0]->slots - 128;
   1462
   1463	return 0;
   1464
   1465err:
   1466	xgene_enet_free_desc_rings(pdata);
   1467	return ret;
   1468}
   1469
   1470static void xgene_enet_get_stats64(
   1471			struct net_device *ndev,
   1472			struct rtnl_link_stats64 *stats)
   1473{
   1474	struct xgene_enet_pdata *pdata = netdev_priv(ndev);
   1475	struct xgene_enet_desc_ring *ring;
   1476	int i;
   1477
   1478	for (i = 0; i < pdata->txq_cnt; i++) {
   1479		ring = pdata->tx_ring[i];
   1480		if (ring) {
   1481			stats->tx_packets += ring->tx_packets;
   1482			stats->tx_bytes += ring->tx_bytes;
   1483			stats->tx_dropped += ring->tx_dropped;
   1484			stats->tx_errors += ring->tx_errors;
   1485		}
   1486	}
   1487
   1488	for (i = 0; i < pdata->rxq_cnt; i++) {
   1489		ring = pdata->rx_ring[i];
   1490		if (ring) {
   1491			stats->rx_packets += ring->rx_packets;
   1492			stats->rx_bytes += ring->rx_bytes;
   1493			stats->rx_dropped += ring->rx_dropped;
   1494			stats->rx_errors += ring->rx_errors +
   1495				ring->rx_length_errors +
   1496				ring->rx_crc_errors +
   1497				ring->rx_frame_errors +
   1498				ring->rx_fifo_errors;
   1499			stats->rx_length_errors += ring->rx_length_errors;
   1500			stats->rx_crc_errors += ring->rx_crc_errors;
   1501			stats->rx_frame_errors += ring->rx_frame_errors;
   1502			stats->rx_fifo_errors += ring->rx_fifo_errors;
   1503		}
   1504	}
   1505}
   1506
   1507static int xgene_enet_set_mac_address(struct net_device *ndev, void *addr)
   1508{
   1509	struct xgene_enet_pdata *pdata = netdev_priv(ndev);
   1510	int ret;
   1511
   1512	ret = eth_mac_addr(ndev, addr);
   1513	if (ret)
   1514		return ret;
   1515	pdata->mac_ops->set_mac_addr(pdata);
   1516
   1517	return ret;
   1518}
   1519
   1520static int xgene_change_mtu(struct net_device *ndev, int new_mtu)
   1521{
   1522	struct xgene_enet_pdata *pdata = netdev_priv(ndev);
   1523	int frame_size;
   1524
   1525	if (!netif_running(ndev))
   1526		return 0;
   1527
   1528	frame_size = (new_mtu > ETH_DATA_LEN) ? (new_mtu + 18) : 0x600;
   1529
   1530	xgene_enet_close(ndev);
   1531	ndev->mtu = new_mtu;
   1532	pdata->mac_ops->set_framesize(pdata, frame_size);
   1533	xgene_enet_open(ndev);
   1534
   1535	return 0;
   1536}
   1537
   1538static const struct net_device_ops xgene_ndev_ops = {
   1539	.ndo_open = xgene_enet_open,
   1540	.ndo_stop = xgene_enet_close,
   1541	.ndo_start_xmit = xgene_enet_start_xmit,
   1542	.ndo_tx_timeout = xgene_enet_timeout,
   1543	.ndo_get_stats64 = xgene_enet_get_stats64,
   1544	.ndo_change_mtu = xgene_change_mtu,
   1545	.ndo_set_mac_address = xgene_enet_set_mac_address,
   1546};
   1547
   1548#ifdef CONFIG_ACPI
   1549static void xgene_get_port_id_acpi(struct device *dev,
   1550				  struct xgene_enet_pdata *pdata)
   1551{
   1552	acpi_status status;
   1553	u64 temp;
   1554
   1555	status = acpi_evaluate_integer(ACPI_HANDLE(dev), "_SUN", NULL, &temp);
   1556	if (ACPI_FAILURE(status)) {
   1557		pdata->port_id = 0;
   1558	} else {
   1559		pdata->port_id = temp;
   1560	}
   1561
   1562	return;
   1563}
   1564#endif
   1565
   1566static void xgene_get_port_id_dt(struct device *dev, struct xgene_enet_pdata *pdata)
   1567{
   1568	u32 id = 0;
   1569
   1570	of_property_read_u32(dev->of_node, "port-id", &id);
   1571
   1572	pdata->port_id = id & BIT(0);
   1573
   1574	return;
   1575}
   1576
   1577static int xgene_get_tx_delay(struct xgene_enet_pdata *pdata)
   1578{
   1579	struct device *dev = &pdata->pdev->dev;
   1580	int delay, ret;
   1581
   1582	ret = device_property_read_u32(dev, "tx-delay", &delay);
   1583	if (ret) {
   1584		pdata->tx_delay = 4;
   1585		return 0;
   1586	}
   1587
   1588	if (delay < 0 || delay > 7) {
   1589		dev_err(dev, "Invalid tx-delay specified\n");
   1590		return -EINVAL;
   1591	}
   1592
   1593	pdata->tx_delay = delay;
   1594
   1595	return 0;
   1596}
   1597
   1598static int xgene_get_rx_delay(struct xgene_enet_pdata *pdata)
   1599{
   1600	struct device *dev = &pdata->pdev->dev;
   1601	int delay, ret;
   1602
   1603	ret = device_property_read_u32(dev, "rx-delay", &delay);
   1604	if (ret) {
   1605		pdata->rx_delay = 2;
   1606		return 0;
   1607	}
   1608
   1609	if (delay < 0 || delay > 7) {
   1610		dev_err(dev, "Invalid rx-delay specified\n");
   1611		return -EINVAL;
   1612	}
   1613
   1614	pdata->rx_delay = delay;
   1615
   1616	return 0;
   1617}
   1618
   1619static int xgene_enet_get_irqs(struct xgene_enet_pdata *pdata)
   1620{
   1621	struct platform_device *pdev = pdata->pdev;
   1622	int i, ret, max_irqs;
   1623
   1624	if (phy_interface_mode_is_rgmii(pdata->phy_mode))
   1625		max_irqs = 1;
   1626	else if (pdata->phy_mode == PHY_INTERFACE_MODE_SGMII)
   1627		max_irqs = 2;
   1628	else
   1629		max_irqs = XGENE_MAX_ENET_IRQ;
   1630
   1631	for (i = 0; i < max_irqs; i++) {
   1632		ret = platform_get_irq(pdev, i);
   1633		if (ret <= 0) {
   1634			if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) {
   1635				max_irqs = i;
   1636				pdata->rxq_cnt = max_irqs / 2;
   1637				pdata->txq_cnt = max_irqs / 2;
   1638				pdata->cq_cnt = max_irqs / 2;
   1639				break;
   1640			}
   1641			return ret ? : -ENXIO;
   1642		}
   1643		pdata->irqs[i] = ret;
   1644	}
   1645
   1646	return 0;
   1647}
   1648
   1649static void xgene_enet_check_phy_handle(struct xgene_enet_pdata *pdata)
   1650{
   1651	int ret;
   1652
   1653	if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII)
   1654		return;
   1655
   1656	if (!IS_ENABLED(CONFIG_MDIO_XGENE))
   1657		return;
   1658
   1659	ret = xgene_enet_phy_connect(pdata->ndev);
   1660	if (!ret)
   1661		pdata->mdio_driver = true;
   1662}
   1663
   1664static void xgene_enet_gpiod_get(struct xgene_enet_pdata *pdata)
   1665{
   1666	struct device *dev = &pdata->pdev->dev;
   1667
   1668	pdata->sfp_gpio_en = false;
   1669	if (pdata->phy_mode != PHY_INTERFACE_MODE_XGMII ||
   1670	    (!device_property_present(dev, "sfp-gpios") &&
   1671	     !device_property_present(dev, "rxlos-gpios")))
   1672		return;
   1673
   1674	pdata->sfp_gpio_en = true;
   1675	pdata->sfp_rdy = gpiod_get(dev, "rxlos", GPIOD_IN);
   1676	if (IS_ERR(pdata->sfp_rdy))
   1677		pdata->sfp_rdy = gpiod_get(dev, "sfp", GPIOD_IN);
   1678}
   1679
   1680static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata)
   1681{
   1682	struct platform_device *pdev;
   1683	struct net_device *ndev;
   1684	struct device *dev;
   1685	struct resource *res;
   1686	void __iomem *base_addr;
   1687	u32 offset;
   1688	int ret = 0;
   1689
   1690	pdev = pdata->pdev;
   1691	dev = &pdev->dev;
   1692	ndev = pdata->ndev;
   1693
   1694	res = platform_get_resource(pdev, IORESOURCE_MEM, RES_ENET_CSR);
   1695	if (!res) {
   1696		dev_err(dev, "Resource enet_csr not defined\n");
   1697		return -ENODEV;
   1698	}
   1699	pdata->base_addr = devm_ioremap(dev, res->start, resource_size(res));
   1700	if (!pdata->base_addr) {
   1701		dev_err(dev, "Unable to retrieve ENET Port CSR region\n");
   1702		return -ENOMEM;
   1703	}
   1704
   1705	res = platform_get_resource(pdev, IORESOURCE_MEM, RES_RING_CSR);
   1706	if (!res) {
   1707		dev_err(dev, "Resource ring_csr not defined\n");
   1708		return -ENODEV;
   1709	}
   1710	pdata->ring_csr_addr = devm_ioremap(dev, res->start,
   1711							resource_size(res));
   1712	if (!pdata->ring_csr_addr) {
   1713		dev_err(dev, "Unable to retrieve ENET Ring CSR region\n");
   1714		return -ENOMEM;
   1715	}
   1716
   1717	res = platform_get_resource(pdev, IORESOURCE_MEM, RES_RING_CMD);
   1718	if (!res) {
   1719		dev_err(dev, "Resource ring_cmd not defined\n");
   1720		return -ENODEV;
   1721	}
   1722	pdata->ring_cmd_addr = devm_ioremap(dev, res->start,
   1723							resource_size(res));
   1724	if (!pdata->ring_cmd_addr) {
   1725		dev_err(dev, "Unable to retrieve ENET Ring command region\n");
   1726		return -ENOMEM;
   1727	}
   1728
   1729	if (dev->of_node)
   1730		xgene_get_port_id_dt(dev, pdata);
   1731#ifdef CONFIG_ACPI
   1732	else
   1733		xgene_get_port_id_acpi(dev, pdata);
   1734#endif
   1735
   1736	if (device_get_ethdev_address(dev, ndev))
   1737		eth_hw_addr_random(ndev);
   1738
   1739	memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
   1740
   1741	pdata->phy_mode = device_get_phy_mode(dev);
   1742	if (pdata->phy_mode < 0) {
   1743		dev_err(dev, "Unable to get phy-connection-type\n");
   1744		return pdata->phy_mode;
   1745	}
   1746	if (!phy_interface_mode_is_rgmii(pdata->phy_mode) &&
   1747	    pdata->phy_mode != PHY_INTERFACE_MODE_SGMII &&
   1748	    pdata->phy_mode != PHY_INTERFACE_MODE_XGMII) {
   1749		dev_err(dev, "Incorrect phy-connection-type specified\n");
   1750		return -ENODEV;
   1751	}
   1752
   1753	ret = xgene_get_tx_delay(pdata);
   1754	if (ret)
   1755		return ret;
   1756
   1757	ret = xgene_get_rx_delay(pdata);
   1758	if (ret)
   1759		return ret;
   1760
   1761	ret = xgene_enet_get_irqs(pdata);
   1762	if (ret)
   1763		return ret;
   1764
   1765	xgene_enet_gpiod_get(pdata);
   1766
   1767	pdata->clk = devm_clk_get(&pdev->dev, NULL);
   1768	if (IS_ERR(pdata->clk)) {
   1769		if (pdata->phy_mode != PHY_INTERFACE_MODE_SGMII) {
   1770			/* Abort if the clock is defined but couldn't be
   1771			 * retrived. Always abort if the clock is missing on
   1772			 * DT system as the driver can't cope with this case.
   1773			 */
   1774			if (PTR_ERR(pdata->clk) != -ENOENT || dev->of_node)
   1775				return PTR_ERR(pdata->clk);
   1776			/* Firmware may have set up the clock already. */
   1777			dev_info(dev, "clocks have been setup already\n");
   1778		}
   1779	}
   1780
   1781	if (pdata->phy_mode != PHY_INTERFACE_MODE_XGMII)
   1782		base_addr = pdata->base_addr - (pdata->port_id * MAC_OFFSET);
   1783	else
   1784		base_addr = pdata->base_addr;
   1785	pdata->eth_csr_addr = base_addr + BLOCK_ETH_CSR_OFFSET;
   1786	pdata->cle.base = base_addr + BLOCK_ETH_CLE_CSR_OFFSET;
   1787	pdata->eth_ring_if_addr = base_addr + BLOCK_ETH_RING_IF_OFFSET;
   1788	pdata->eth_diag_csr_addr = base_addr + BLOCK_ETH_DIAG_CSR_OFFSET;
   1789	if (phy_interface_mode_is_rgmii(pdata->phy_mode) ||
   1790	    pdata->phy_mode == PHY_INTERFACE_MODE_SGMII) {
   1791		pdata->mcx_mac_addr = pdata->base_addr + BLOCK_ETH_MAC_OFFSET;
   1792		pdata->mcx_stats_addr =
   1793			pdata->base_addr + BLOCK_ETH_STATS_OFFSET;
   1794		offset = (pdata->enet_id == XGENE_ENET1) ?
   1795			  BLOCK_ETH_MAC_CSR_OFFSET :
   1796			  X2_BLOCK_ETH_MAC_CSR_OFFSET;
   1797		pdata->mcx_mac_csr_addr = base_addr + offset;
   1798	} else {
   1799		pdata->mcx_mac_addr = base_addr + BLOCK_AXG_MAC_OFFSET;
   1800		pdata->mcx_stats_addr = base_addr + BLOCK_AXG_STATS_OFFSET;
   1801		pdata->mcx_mac_csr_addr = base_addr + BLOCK_AXG_MAC_CSR_OFFSET;
   1802		pdata->pcs_addr = base_addr + BLOCK_PCS_OFFSET;
   1803	}
   1804	pdata->rx_buff_cnt = NUM_PKT_BUF;
   1805
   1806	return 0;
   1807}
   1808
   1809static int xgene_enet_init_hw(struct xgene_enet_pdata *pdata)
   1810{
   1811	struct xgene_enet_cle *enet_cle = &pdata->cle;
   1812	struct xgene_enet_desc_ring *page_pool;
   1813	struct net_device *ndev = pdata->ndev;
   1814	struct xgene_enet_desc_ring *buf_pool;
   1815	u16 dst_ring_num, ring_id;
   1816	int i, ret;
   1817	u32 count;
   1818
   1819	ret = pdata->port_ops->reset(pdata);
   1820	if (ret)
   1821		return ret;
   1822
   1823	ret = xgene_enet_create_desc_rings(ndev);
   1824	if (ret) {
   1825		netdev_err(ndev, "Error in ring configuration\n");
   1826		return ret;
   1827	}
   1828
   1829	/* setup buffer pool */
   1830	for (i = 0; i < pdata->rxq_cnt; i++) {
   1831		buf_pool = pdata->rx_ring[i]->buf_pool;
   1832		xgene_enet_init_bufpool(buf_pool);
   1833		page_pool = pdata->rx_ring[i]->page_pool;
   1834		xgene_enet_init_bufpool(page_pool);
   1835
   1836		count = pdata->rx_buff_cnt;
   1837		ret = xgene_enet_refill_bufpool(buf_pool, count);
   1838		if (ret)
   1839			goto err;
   1840
   1841		ret = xgene_enet_refill_pagepool(page_pool, count);
   1842		if (ret)
   1843			goto err;
   1844
   1845	}
   1846
   1847	dst_ring_num = xgene_enet_dst_ring_num(pdata->rx_ring[0]);
   1848	buf_pool = pdata->rx_ring[0]->buf_pool;
   1849	if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) {
   1850		/* Initialize and Enable  PreClassifier Tree */
   1851		enet_cle->max_nodes = 512;
   1852		enet_cle->max_dbptrs = 1024;
   1853		enet_cle->parsers = 3;
   1854		enet_cle->active_parser = PARSER_ALL;
   1855		enet_cle->ptree.start_node = 0;
   1856		enet_cle->ptree.start_dbptr = 0;
   1857		enet_cle->jump_bytes = 8;
   1858		ret = pdata->cle_ops->cle_init(pdata);
   1859		if (ret) {
   1860			netdev_err(ndev, "Preclass Tree init error\n");
   1861			goto err;
   1862		}
   1863
   1864	} else {
   1865		dst_ring_num = xgene_enet_dst_ring_num(pdata->rx_ring[0]);
   1866		buf_pool = pdata->rx_ring[0]->buf_pool;
   1867		page_pool = pdata->rx_ring[0]->page_pool;
   1868		ring_id = (page_pool) ? page_pool->id : 0;
   1869		pdata->port_ops->cle_bypass(pdata, dst_ring_num,
   1870					    buf_pool->id, ring_id);
   1871	}
   1872
   1873	ndev->max_mtu = XGENE_ENET_MAX_MTU;
   1874	pdata->phy_speed = SPEED_UNKNOWN;
   1875	pdata->mac_ops->init(pdata);
   1876
   1877	return ret;
   1878
   1879err:
   1880	xgene_enet_delete_desc_rings(pdata);
   1881	return ret;
   1882}
   1883
   1884static void xgene_enet_setup_ops(struct xgene_enet_pdata *pdata)
   1885{
   1886	switch (pdata->phy_mode) {
   1887	case PHY_INTERFACE_MODE_RGMII:
   1888	case PHY_INTERFACE_MODE_RGMII_ID:
   1889	case PHY_INTERFACE_MODE_RGMII_RXID:
   1890	case PHY_INTERFACE_MODE_RGMII_TXID:
   1891		pdata->mac_ops = &xgene_gmac_ops;
   1892		pdata->port_ops = &xgene_gport_ops;
   1893		pdata->rm = RM3;
   1894		pdata->rxq_cnt = 1;
   1895		pdata->txq_cnt = 1;
   1896		pdata->cq_cnt = 0;
   1897		break;
   1898	case PHY_INTERFACE_MODE_SGMII:
   1899		pdata->mac_ops = &xgene_sgmac_ops;
   1900		pdata->port_ops = &xgene_sgport_ops;
   1901		pdata->rm = RM1;
   1902		pdata->rxq_cnt = 1;
   1903		pdata->txq_cnt = 1;
   1904		pdata->cq_cnt = 1;
   1905		break;
   1906	default:
   1907		pdata->mac_ops = &xgene_xgmac_ops;
   1908		pdata->port_ops = &xgene_xgport_ops;
   1909		pdata->cle_ops = &xgene_cle3in_ops;
   1910		pdata->rm = RM0;
   1911		if (!pdata->rxq_cnt) {
   1912			pdata->rxq_cnt = XGENE_NUM_RX_RING;
   1913			pdata->txq_cnt = XGENE_NUM_TX_RING;
   1914			pdata->cq_cnt = XGENE_NUM_TXC_RING;
   1915		}
   1916		break;
   1917	}
   1918
   1919	if (pdata->enet_id == XGENE_ENET1) {
   1920		switch (pdata->port_id) {
   1921		case 0:
   1922			if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) {
   1923				pdata->cpu_bufnum = X2_START_CPU_BUFNUM_0;
   1924				pdata->eth_bufnum = X2_START_ETH_BUFNUM_0;
   1925				pdata->bp_bufnum = X2_START_BP_BUFNUM_0;
   1926				pdata->ring_num = START_RING_NUM_0;
   1927			} else {
   1928				pdata->cpu_bufnum = START_CPU_BUFNUM_0;
   1929				pdata->eth_bufnum = START_ETH_BUFNUM_0;
   1930				pdata->bp_bufnum = START_BP_BUFNUM_0;
   1931				pdata->ring_num = START_RING_NUM_0;
   1932			}
   1933			break;
   1934		case 1:
   1935			if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) {
   1936				pdata->cpu_bufnum = XG_START_CPU_BUFNUM_1;
   1937				pdata->eth_bufnum = XG_START_ETH_BUFNUM_1;
   1938				pdata->bp_bufnum = XG_START_BP_BUFNUM_1;
   1939				pdata->ring_num = XG_START_RING_NUM_1;
   1940			} else {
   1941				pdata->cpu_bufnum = START_CPU_BUFNUM_1;
   1942				pdata->eth_bufnum = START_ETH_BUFNUM_1;
   1943				pdata->bp_bufnum = START_BP_BUFNUM_1;
   1944				pdata->ring_num = START_RING_NUM_1;
   1945			}
   1946			break;
   1947		default:
   1948			break;
   1949		}
   1950		pdata->ring_ops = &xgene_ring1_ops;
   1951	} else {
   1952		switch (pdata->port_id) {
   1953		case 0:
   1954			pdata->cpu_bufnum = X2_START_CPU_BUFNUM_0;
   1955			pdata->eth_bufnum = X2_START_ETH_BUFNUM_0;
   1956			pdata->bp_bufnum = X2_START_BP_BUFNUM_0;
   1957			pdata->ring_num = X2_START_RING_NUM_0;
   1958			break;
   1959		case 1:
   1960			pdata->cpu_bufnum = X2_START_CPU_BUFNUM_1;
   1961			pdata->eth_bufnum = X2_START_ETH_BUFNUM_1;
   1962			pdata->bp_bufnum = X2_START_BP_BUFNUM_1;
   1963			pdata->ring_num = X2_START_RING_NUM_1;
   1964			break;
   1965		default:
   1966			break;
   1967		}
   1968		pdata->rm = RM0;
   1969		pdata->ring_ops = &xgene_ring2_ops;
   1970	}
   1971}
   1972
   1973static void xgene_enet_napi_add(struct xgene_enet_pdata *pdata)
   1974{
   1975	struct napi_struct *napi;
   1976	int i;
   1977
   1978	for (i = 0; i < pdata->rxq_cnt; i++) {
   1979		napi = &pdata->rx_ring[i]->napi;
   1980		netif_napi_add(pdata->ndev, napi, xgene_enet_napi,
   1981			       NAPI_POLL_WEIGHT);
   1982	}
   1983
   1984	for (i = 0; i < pdata->cq_cnt; i++) {
   1985		napi = &pdata->tx_ring[i]->cp_ring->napi;
   1986		netif_napi_add(pdata->ndev, napi, xgene_enet_napi,
   1987			       NAPI_POLL_WEIGHT);
   1988	}
   1989}
   1990
   1991#ifdef CONFIG_ACPI
   1992static const struct acpi_device_id xgene_enet_acpi_match[] = {
   1993	{ "APMC0D05", XGENE_ENET1},
   1994	{ "APMC0D30", XGENE_ENET1},
   1995	{ "APMC0D31", XGENE_ENET1},
   1996	{ "APMC0D3F", XGENE_ENET1},
   1997	{ "APMC0D26", XGENE_ENET2},
   1998	{ "APMC0D25", XGENE_ENET2},
   1999	{ }
   2000};
   2001MODULE_DEVICE_TABLE(acpi, xgene_enet_acpi_match);
   2002#endif
   2003
   2004static const struct of_device_id xgene_enet_of_match[] = {
   2005	{.compatible = "apm,xgene-enet",    .data = (void *)XGENE_ENET1},
   2006	{.compatible = "apm,xgene1-sgenet", .data = (void *)XGENE_ENET1},
   2007	{.compatible = "apm,xgene1-xgenet", .data = (void *)XGENE_ENET1},
   2008	{.compatible = "apm,xgene2-sgenet", .data = (void *)XGENE_ENET2},
   2009	{.compatible = "apm,xgene2-xgenet", .data = (void *)XGENE_ENET2},
   2010	{},
   2011};
   2012
   2013MODULE_DEVICE_TABLE(of, xgene_enet_of_match);
   2014
   2015static int xgene_enet_probe(struct platform_device *pdev)
   2016{
   2017	struct net_device *ndev;
   2018	struct xgene_enet_pdata *pdata;
   2019	struct device *dev = &pdev->dev;
   2020	void (*link_state)(struct work_struct *);
   2021	const struct of_device_id *of_id;
   2022	int ret;
   2023
   2024	ndev = alloc_etherdev_mqs(sizeof(struct xgene_enet_pdata),
   2025				  XGENE_NUM_TX_RING, XGENE_NUM_RX_RING);
   2026	if (!ndev)
   2027		return -ENOMEM;
   2028
   2029	pdata = netdev_priv(ndev);
   2030
   2031	pdata->pdev = pdev;
   2032	pdata->ndev = ndev;
   2033	SET_NETDEV_DEV(ndev, dev);
   2034	platform_set_drvdata(pdev, pdata);
   2035	ndev->netdev_ops = &xgene_ndev_ops;
   2036	xgene_enet_set_ethtool_ops(ndev);
   2037	ndev->features |= NETIF_F_IP_CSUM |
   2038			  NETIF_F_GSO |
   2039			  NETIF_F_GRO |
   2040			  NETIF_F_SG;
   2041
   2042	of_id = of_match_device(xgene_enet_of_match, &pdev->dev);
   2043	if (of_id) {
   2044		pdata->enet_id = (enum xgene_enet_id)of_id->data;
   2045	}
   2046#ifdef CONFIG_ACPI
   2047	else {
   2048		const struct acpi_device_id *acpi_id;
   2049
   2050		acpi_id = acpi_match_device(xgene_enet_acpi_match, &pdev->dev);
   2051		if (acpi_id)
   2052			pdata->enet_id = (enum xgene_enet_id) acpi_id->driver_data;
   2053	}
   2054#endif
   2055	if (!pdata->enet_id) {
   2056		ret = -ENODEV;
   2057		goto err;
   2058	}
   2059
   2060	ret = xgene_enet_get_resources(pdata);
   2061	if (ret)
   2062		goto err;
   2063
   2064	xgene_enet_setup_ops(pdata);
   2065	spin_lock_init(&pdata->mac_lock);
   2066
   2067	if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) {
   2068		ndev->features |= NETIF_F_TSO | NETIF_F_RXCSUM;
   2069		spin_lock_init(&pdata->mss_lock);
   2070	}
   2071	ndev->hw_features = ndev->features;
   2072
   2073	ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(64));
   2074	if (ret) {
   2075		netdev_err(ndev, "No usable DMA configuration\n");
   2076		goto err;
   2077	}
   2078
   2079	xgene_enet_check_phy_handle(pdata);
   2080
   2081	ret = xgene_enet_init_hw(pdata);
   2082	if (ret)
   2083		goto err2;
   2084
   2085	link_state = pdata->mac_ops->link_state;
   2086	if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) {
   2087		INIT_DELAYED_WORK(&pdata->link_work, link_state);
   2088	} else if (!pdata->mdio_driver) {
   2089		if (phy_interface_mode_is_rgmii(pdata->phy_mode))
   2090			ret = xgene_enet_mdio_config(pdata);
   2091		else
   2092			INIT_DELAYED_WORK(&pdata->link_work, link_state);
   2093
   2094		if (ret)
   2095			goto err1;
   2096	}
   2097
   2098	spin_lock_init(&pdata->stats_lock);
   2099	ret = xgene_extd_stats_init(pdata);
   2100	if (ret)
   2101		goto err1;
   2102
   2103	xgene_enet_napi_add(pdata);
   2104	ret = register_netdev(ndev);
   2105	if (ret) {
   2106		netdev_err(ndev, "Failed to register netdev\n");
   2107		goto err1;
   2108	}
   2109
   2110	return 0;
   2111
   2112err1:
   2113	/*
   2114	 * If necessary, free_netdev() will call netif_napi_del() and undo
   2115	 * the effects of xgene_enet_napi_add()'s calls to netif_napi_add().
   2116	 */
   2117
   2118	xgene_enet_delete_desc_rings(pdata);
   2119
   2120err2:
   2121	if (pdata->mdio_driver)
   2122		xgene_enet_phy_disconnect(pdata);
   2123	else if (phy_interface_mode_is_rgmii(pdata->phy_mode))
   2124		xgene_enet_mdio_remove(pdata);
   2125err:
   2126	free_netdev(ndev);
   2127	return ret;
   2128}
   2129
   2130static int xgene_enet_remove(struct platform_device *pdev)
   2131{
   2132	struct xgene_enet_pdata *pdata;
   2133	struct net_device *ndev;
   2134
   2135	pdata = platform_get_drvdata(pdev);
   2136	ndev = pdata->ndev;
   2137
   2138	rtnl_lock();
   2139	if (netif_running(ndev))
   2140		dev_close(ndev);
   2141	rtnl_unlock();
   2142
   2143	if (pdata->mdio_driver)
   2144		xgene_enet_phy_disconnect(pdata);
   2145	else if (phy_interface_mode_is_rgmii(pdata->phy_mode))
   2146		xgene_enet_mdio_remove(pdata);
   2147
   2148	unregister_netdev(ndev);
   2149	xgene_enet_delete_desc_rings(pdata);
   2150	pdata->port_ops->shutdown(pdata);
   2151	free_netdev(ndev);
   2152
   2153	return 0;
   2154}
   2155
   2156static void xgene_enet_shutdown(struct platform_device *pdev)
   2157{
   2158	struct xgene_enet_pdata *pdata;
   2159
   2160	pdata = platform_get_drvdata(pdev);
   2161	if (!pdata)
   2162		return;
   2163
   2164	if (!pdata->ndev)
   2165		return;
   2166
   2167	xgene_enet_remove(pdev);
   2168}
   2169
   2170static struct platform_driver xgene_enet_driver = {
   2171	.driver = {
   2172		   .name = "xgene-enet",
   2173		   .of_match_table = of_match_ptr(xgene_enet_of_match),
   2174		   .acpi_match_table = ACPI_PTR(xgene_enet_acpi_match),
   2175	},
   2176	.probe = xgene_enet_probe,
   2177	.remove = xgene_enet_remove,
   2178	.shutdown = xgene_enet_shutdown,
   2179};
   2180
   2181module_platform_driver(xgene_enet_driver);
   2182
   2183MODULE_DESCRIPTION("APM X-Gene SoC Ethernet driver");
   2184MODULE_AUTHOR("Iyappan Subramanian <isubramanian@apm.com>");
   2185MODULE_AUTHOR("Keyur Chudgar <kchudgar@apm.com>");
   2186MODULE_LICENSE("GPL");