cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

hip04_eth.c (27163B)


      1// SPDX-License-Identifier: GPL-2.0-or-later
      2
      3/* Copyright (c) 2014 Linaro Ltd.
      4 * Copyright (c) 2014 Hisilicon Limited.
      5 */
      6
      7#include <linux/module.h>
      8#include <linux/etherdevice.h>
      9#include <linux/platform_device.h>
     10#include <linux/interrupt.h>
     11#include <linux/ktime.h>
     12#include <linux/of_address.h>
     13#include <linux/phy.h>
     14#include <linux/of_mdio.h>
     15#include <linux/of_net.h>
     16#include <linux/mfd/syscon.h>
     17#include <linux/regmap.h>
     18
     19#define SC_PPE_RESET_DREQ		0x026C
     20
     21#define PPE_CFG_RX_ADDR			0x100
     22#define PPE_CFG_POOL_GRP		0x300
     23#define PPE_CFG_RX_BUF_SIZE		0x400
     24#define PPE_CFG_RX_FIFO_SIZE		0x500
     25#define PPE_CURR_BUF_CNT		0xa200
     26
     27#define GE_DUPLEX_TYPE			0x08
     28#define GE_MAX_FRM_SIZE_REG		0x3c
     29#define GE_PORT_MODE			0x40
     30#define GE_PORT_EN			0x44
     31#define GE_SHORT_RUNTS_THR_REG		0x50
     32#define GE_TX_LOCAL_PAGE_REG		0x5c
     33#define GE_TRANSMIT_CONTROL_REG		0x60
     34#define GE_CF_CRC_STRIP_REG		0x1b0
     35#define GE_MODE_CHANGE_REG		0x1b4
     36#define GE_RECV_CONTROL_REG		0x1e0
     37#define GE_STATION_MAC_ADDRESS		0x210
     38
     39#define PPE_CFG_BUS_CTRL_REG		0x424
     40#define PPE_CFG_RX_CTRL_REG		0x428
     41
     42#if defined(CONFIG_HI13X1_GMAC)
     43#define PPE_CFG_CPU_ADD_ADDR		0x6D0
     44#define PPE_CFG_MAX_FRAME_LEN_REG	0x500
     45#define PPE_CFG_RX_PKT_MODE_REG		0x504
     46#define PPE_CFG_QOS_VMID_GEN		0x520
     47#define PPE_CFG_RX_PKT_INT		0x740
     48#define PPE_INTEN			0x700
     49#define PPE_INTSTS			0x708
     50#define PPE_RINT			0x704
     51#define PPE_CFG_STS_MODE		0x880
     52#else
     53#define PPE_CFG_CPU_ADD_ADDR		0x580
     54#define PPE_CFG_MAX_FRAME_LEN_REG	0x408
     55#define PPE_CFG_RX_PKT_MODE_REG		0x438
     56#define PPE_CFG_QOS_VMID_GEN		0x500
     57#define PPE_CFG_RX_PKT_INT		0x538
     58#define PPE_INTEN			0x600
     59#define PPE_INTSTS			0x608
     60#define PPE_RINT			0x604
     61#define PPE_CFG_STS_MODE		0x700
     62#endif /* CONFIG_HI13X1_GMAC */
     63
     64#define PPE_HIS_RX_PKT_CNT		0x804
     65
     66#define RESET_DREQ_ALL			0xffffffff
     67
     68/* REG_INTERRUPT */
     69#define RCV_INT				BIT(10)
     70#define RCV_NOBUF			BIT(8)
     71#define RCV_DROP			BIT(7)
     72#define TX_DROP				BIT(6)
     73#define DEF_INT_ERR			(RCV_NOBUF | RCV_DROP | TX_DROP)
     74#define DEF_INT_MASK			(RCV_INT | DEF_INT_ERR)
     75
     76/* TX descriptor config */
     77#define TX_FREE_MEM			BIT(0)
     78#define TX_READ_ALLOC_L3		BIT(1)
     79#if defined(CONFIG_HI13X1_GMAC)
     80#define TX_CLEAR_WB			BIT(7)
     81#define TX_RELEASE_TO_PPE		BIT(4)
     82#define TX_FINISH_CACHE_INV		BIT(6)
     83#define TX_POOL_SHIFT			16
     84#else
     85#define TX_CLEAR_WB			BIT(4)
     86#define TX_FINISH_CACHE_INV		BIT(2)
     87#endif
     88#define TX_L3_CHECKSUM			BIT(5)
     89#define TX_LOOP_BACK			BIT(11)
     90
     91/* RX error */
     92#define RX_PKT_DROP			BIT(0)
     93#define RX_L2_ERR			BIT(1)
     94#define RX_PKT_ERR			(RX_PKT_DROP | RX_L2_ERR)
     95
     96#define SGMII_SPEED_1000		0x08
     97#define SGMII_SPEED_100			0x07
     98#define SGMII_SPEED_10			0x06
     99#define MII_SPEED_100			0x01
    100#define MII_SPEED_10			0x00
    101
    102#define GE_DUPLEX_FULL			BIT(0)
    103#define GE_DUPLEX_HALF			0x00
    104#define GE_MODE_CHANGE_EN		BIT(0)
    105
    106#define GE_TX_AUTO_NEG			BIT(5)
    107#define GE_TX_ADD_CRC			BIT(6)
    108#define GE_TX_SHORT_PAD_THROUGH		BIT(7)
    109
    110#define GE_RX_STRIP_CRC			BIT(0)
    111#define GE_RX_STRIP_PAD			BIT(3)
    112#define GE_RX_PAD_EN			BIT(4)
    113
    114#define GE_AUTO_NEG_CTL			BIT(0)
    115
    116#define GE_RX_INT_THRESHOLD		BIT(6)
    117#define GE_RX_TIMEOUT			0x04
    118
    119#define GE_RX_PORT_EN			BIT(1)
    120#define GE_TX_PORT_EN			BIT(2)
    121
    122#define PPE_CFG_RX_PKT_ALIGN		BIT(18)
    123
    124#if defined(CONFIG_HI13X1_GMAC)
    125#define PPE_CFG_QOS_VMID_GRP_SHIFT	4
    126#define PPE_CFG_RX_CTRL_ALIGN_SHIFT	7
    127#define PPE_CFG_STS_RX_PKT_CNT_RC	BIT(0)
    128#define PPE_CFG_QOS_VMID_MODE		BIT(15)
    129#define PPE_CFG_BUS_LOCAL_REL		(BIT(9) | BIT(15) | BIT(19) | BIT(23))
    130
    131/* buf unit size is cache_line_size, which is 64, so the shift is 6 */
    132#define PPE_BUF_SIZE_SHIFT		6
    133#define PPE_TX_BUF_HOLD			BIT(31)
    134#define SOC_CACHE_LINE_MASK		0x3F
    135#else
    136#define PPE_CFG_QOS_VMID_GRP_SHIFT	8
    137#define PPE_CFG_RX_CTRL_ALIGN_SHIFT	11
    138#define PPE_CFG_STS_RX_PKT_CNT_RC	BIT(12)
    139#define PPE_CFG_QOS_VMID_MODE		BIT(14)
    140#define PPE_CFG_BUS_LOCAL_REL		BIT(14)
    141
    142/* buf unit size is 1, so the shift is 6 */
    143#define PPE_BUF_SIZE_SHIFT		0
    144#define PPE_TX_BUF_HOLD			0
    145#endif /* CONFIG_HI13X1_GMAC */
    146
    147#define PPE_CFG_RX_FIFO_FSFU		BIT(11)
    148#define PPE_CFG_RX_DEPTH_SHIFT		16
    149#define PPE_CFG_RX_START_SHIFT		0
    150
    151#define PPE_CFG_BUS_BIG_ENDIEN		BIT(0)
    152
    153#define RX_DESC_NUM			128
    154#define TX_DESC_NUM			256
    155#define TX_NEXT(N)			(((N) + 1) & (TX_DESC_NUM-1))
    156#define RX_NEXT(N)			(((N) + 1) & (RX_DESC_NUM-1))
    157
    158#define GMAC_PPE_RX_PKT_MAX_LEN		379
    159#define GMAC_MAX_PKT_LEN		1516
    160#define GMAC_MIN_PKT_LEN		31
    161#define RX_BUF_SIZE			1600
    162#define RESET_TIMEOUT			1000
    163#define TX_TIMEOUT			(6 * HZ)
    164
    165#define DRV_NAME			"hip04-ether"
    166#define DRV_VERSION			"v1.0"
    167
    168#define HIP04_MAX_TX_COALESCE_USECS	200
    169#define HIP04_MIN_TX_COALESCE_USECS	100
    170#define HIP04_MAX_TX_COALESCE_FRAMES	200
    171#define HIP04_MIN_TX_COALESCE_FRAMES	100
    172
    173struct tx_desc {
    174#if defined(CONFIG_HI13X1_GMAC)
    175	u32 reserved1[2];
    176	u32 send_addr;
    177	u16 send_size;
    178	u16 data_offset;
    179	u32 reserved2[7];
    180	u32 cfg;
    181	u32 wb_addr;
    182	u32 reserved3[3];
    183#else
    184	u32 send_addr;
    185	u32 send_size;
    186	u32 next_addr;
    187	u32 cfg;
    188	u32 wb_addr;
    189#endif
    190} __aligned(64);
    191
    192struct rx_desc {
    193#if defined(CONFIG_HI13X1_GMAC)
    194	u32 reserved1[3];
    195	u16 pkt_len;
    196	u16 reserved_16;
    197	u32 reserved2[6];
    198	u32 pkt_err;
    199	u32 reserved3[5];
    200#else
    201	u16 reserved_16;
    202	u16 pkt_len;
    203	u32 reserve1[3];
    204	u32 pkt_err;
    205	u32 reserve2[4];
    206#endif
    207};
    208
    209struct hip04_priv {
    210	void __iomem *base;
    211#if defined(CONFIG_HI13X1_GMAC)
    212	void __iomem *sysctrl_base;
    213#endif
    214	phy_interface_t phy_mode;
    215	int chan;
    216	unsigned int port;
    217	unsigned int group;
    218	unsigned int speed;
    219	unsigned int duplex;
    220	unsigned int reg_inten;
    221
    222	struct napi_struct napi;
    223	struct device *dev;
    224	struct net_device *ndev;
    225
    226	struct tx_desc *tx_desc;
    227	dma_addr_t tx_desc_dma;
    228	struct sk_buff *tx_skb[TX_DESC_NUM];
    229	dma_addr_t tx_phys[TX_DESC_NUM];
    230	unsigned int tx_head;
    231
    232	int tx_coalesce_frames;
    233	int tx_coalesce_usecs;
    234	struct hrtimer tx_coalesce_timer;
    235
    236	unsigned char *rx_buf[RX_DESC_NUM];
    237	dma_addr_t rx_phys[RX_DESC_NUM];
    238	unsigned int rx_head;
    239	unsigned int rx_buf_size;
    240	unsigned int rx_cnt_remaining;
    241
    242	struct device_node *phy_node;
    243	struct phy_device *phy;
    244	struct regmap *map;
    245	struct work_struct tx_timeout_task;
    246
    247	/* written only by tx cleanup */
    248	unsigned int tx_tail ____cacheline_aligned_in_smp;
    249};
    250
    251static inline unsigned int tx_count(unsigned int head, unsigned int tail)
    252{
    253	return (head - tail) % TX_DESC_NUM;
    254}
    255
    256static void hip04_config_port(struct net_device *ndev, u32 speed, u32 duplex)
    257{
    258	struct hip04_priv *priv = netdev_priv(ndev);
    259	u32 val;
    260
    261	priv->speed = speed;
    262	priv->duplex = duplex;
    263
    264	switch (priv->phy_mode) {
    265	case PHY_INTERFACE_MODE_SGMII:
    266		if (speed == SPEED_1000)
    267			val = SGMII_SPEED_1000;
    268		else if (speed == SPEED_100)
    269			val = SGMII_SPEED_100;
    270		else
    271			val = SGMII_SPEED_10;
    272		break;
    273	case PHY_INTERFACE_MODE_MII:
    274		if (speed == SPEED_100)
    275			val = MII_SPEED_100;
    276		else
    277			val = MII_SPEED_10;
    278		break;
    279	default:
    280		netdev_warn(ndev, "not supported mode\n");
    281		val = MII_SPEED_10;
    282		break;
    283	}
    284	writel_relaxed(val, priv->base + GE_PORT_MODE);
    285
    286	val = duplex ? GE_DUPLEX_FULL : GE_DUPLEX_HALF;
    287	writel_relaxed(val, priv->base + GE_DUPLEX_TYPE);
    288
    289	val = GE_MODE_CHANGE_EN;
    290	writel_relaxed(val, priv->base + GE_MODE_CHANGE_REG);
    291}
    292
    293static void hip04_reset_dreq(struct hip04_priv *priv)
    294{
    295#if defined(CONFIG_HI13X1_GMAC)
    296	writel_relaxed(RESET_DREQ_ALL, priv->sysctrl_base + SC_PPE_RESET_DREQ);
    297#endif
    298}
    299
    300static void hip04_reset_ppe(struct hip04_priv *priv)
    301{
    302	u32 val, tmp, timeout = 0;
    303
    304	do {
    305		regmap_read(priv->map, priv->port * 4 + PPE_CURR_BUF_CNT, &val);
    306		regmap_read(priv->map, priv->port * 4 + PPE_CFG_RX_ADDR, &tmp);
    307		if (timeout++ > RESET_TIMEOUT)
    308			break;
    309	} while (val & 0xfff);
    310}
    311
    312static void hip04_config_fifo(struct hip04_priv *priv)
    313{
    314	u32 val;
    315
    316	val = readl_relaxed(priv->base + PPE_CFG_STS_MODE);
    317	val |= PPE_CFG_STS_RX_PKT_CNT_RC;
    318	writel_relaxed(val, priv->base + PPE_CFG_STS_MODE);
    319
    320	val = BIT(priv->group);
    321	regmap_write(priv->map, priv->port * 4 + PPE_CFG_POOL_GRP, val);
    322
    323	val = priv->group << PPE_CFG_QOS_VMID_GRP_SHIFT;
    324	val |= PPE_CFG_QOS_VMID_MODE;
    325	writel_relaxed(val, priv->base + PPE_CFG_QOS_VMID_GEN);
    326
    327	val = RX_BUF_SIZE >> PPE_BUF_SIZE_SHIFT;
    328	regmap_write(priv->map, priv->port * 4 + PPE_CFG_RX_BUF_SIZE, val);
    329
    330	val = RX_DESC_NUM << PPE_CFG_RX_DEPTH_SHIFT;
    331	val |= PPE_CFG_RX_FIFO_FSFU;
    332	val |= priv->chan << PPE_CFG_RX_START_SHIFT;
    333	regmap_write(priv->map, priv->port * 4 + PPE_CFG_RX_FIFO_SIZE, val);
    334
    335	val = NET_IP_ALIGN << PPE_CFG_RX_CTRL_ALIGN_SHIFT;
    336	writel_relaxed(val, priv->base + PPE_CFG_RX_CTRL_REG);
    337
    338	val = PPE_CFG_RX_PKT_ALIGN;
    339	writel_relaxed(val, priv->base + PPE_CFG_RX_PKT_MODE_REG);
    340
    341	val = PPE_CFG_BUS_LOCAL_REL | PPE_CFG_BUS_BIG_ENDIEN;
    342	writel_relaxed(val, priv->base + PPE_CFG_BUS_CTRL_REG);
    343
    344	val = GMAC_PPE_RX_PKT_MAX_LEN;
    345	writel_relaxed(val, priv->base + PPE_CFG_MAX_FRAME_LEN_REG);
    346
    347	val = GMAC_MAX_PKT_LEN;
    348	writel_relaxed(val, priv->base + GE_MAX_FRM_SIZE_REG);
    349
    350	val = GMAC_MIN_PKT_LEN;
    351	writel_relaxed(val, priv->base + GE_SHORT_RUNTS_THR_REG);
    352
    353	val = readl_relaxed(priv->base + GE_TRANSMIT_CONTROL_REG);
    354	val |= GE_TX_AUTO_NEG | GE_TX_ADD_CRC | GE_TX_SHORT_PAD_THROUGH;
    355	writel_relaxed(val, priv->base + GE_TRANSMIT_CONTROL_REG);
    356
    357	val = GE_RX_STRIP_CRC;
    358	writel_relaxed(val, priv->base + GE_CF_CRC_STRIP_REG);
    359
    360	val = readl_relaxed(priv->base + GE_RECV_CONTROL_REG);
    361	val |= GE_RX_STRIP_PAD | GE_RX_PAD_EN;
    362	writel_relaxed(val, priv->base + GE_RECV_CONTROL_REG);
    363
    364#ifndef CONFIG_HI13X1_GMAC
    365	val = GE_AUTO_NEG_CTL;
    366	writel_relaxed(val, priv->base + GE_TX_LOCAL_PAGE_REG);
    367#endif
    368}
    369
    370static void hip04_mac_enable(struct net_device *ndev)
    371{
    372	struct hip04_priv *priv = netdev_priv(ndev);
    373	u32 val;
    374
    375	/* enable tx & rx */
    376	val = readl_relaxed(priv->base + GE_PORT_EN);
    377	val |= GE_RX_PORT_EN | GE_TX_PORT_EN;
    378	writel_relaxed(val, priv->base + GE_PORT_EN);
    379
    380	/* clear rx int */
    381	val = RCV_INT;
    382	writel_relaxed(val, priv->base + PPE_RINT);
    383
    384	/* config recv int */
    385	val = GE_RX_INT_THRESHOLD | GE_RX_TIMEOUT;
    386	writel_relaxed(val, priv->base + PPE_CFG_RX_PKT_INT);
    387
    388	/* enable interrupt */
    389	priv->reg_inten = DEF_INT_MASK;
    390	writel_relaxed(priv->reg_inten, priv->base + PPE_INTEN);
    391}
    392
    393static void hip04_mac_disable(struct net_device *ndev)
    394{
    395	struct hip04_priv *priv = netdev_priv(ndev);
    396	u32 val;
    397
    398	/* disable int */
    399	priv->reg_inten &= ~(DEF_INT_MASK);
    400	writel_relaxed(priv->reg_inten, priv->base + PPE_INTEN);
    401
    402	/* disable tx & rx */
    403	val = readl_relaxed(priv->base + GE_PORT_EN);
    404	val &= ~(GE_RX_PORT_EN | GE_TX_PORT_EN);
    405	writel_relaxed(val, priv->base + GE_PORT_EN);
    406}
    407
    408static void hip04_set_xmit_desc(struct hip04_priv *priv, dma_addr_t phys)
    409{
    410	u32 val;
    411
    412	val = phys >> PPE_BUF_SIZE_SHIFT | PPE_TX_BUF_HOLD;
    413	writel(val, priv->base + PPE_CFG_CPU_ADD_ADDR);
    414}
    415
    416static void hip04_set_recv_desc(struct hip04_priv *priv, dma_addr_t phys)
    417{
    418	u32 val;
    419
    420	val = phys >> PPE_BUF_SIZE_SHIFT;
    421	regmap_write(priv->map, priv->port * 4 + PPE_CFG_RX_ADDR, val);
    422}
    423
    424static u32 hip04_recv_cnt(struct hip04_priv *priv)
    425{
    426	return readl(priv->base + PPE_HIS_RX_PKT_CNT);
    427}
    428
    429static void hip04_update_mac_address(struct net_device *ndev)
    430{
    431	struct hip04_priv *priv = netdev_priv(ndev);
    432
    433	writel_relaxed(((ndev->dev_addr[0] << 8) | (ndev->dev_addr[1])),
    434		       priv->base + GE_STATION_MAC_ADDRESS);
    435	writel_relaxed(((ndev->dev_addr[2] << 24) | (ndev->dev_addr[3] << 16) |
    436			(ndev->dev_addr[4] << 8) | (ndev->dev_addr[5])),
    437		       priv->base + GE_STATION_MAC_ADDRESS + 4);
    438}
    439
    440static int hip04_set_mac_address(struct net_device *ndev, void *addr)
    441{
    442	eth_mac_addr(ndev, addr);
    443	hip04_update_mac_address(ndev);
    444	return 0;
    445}
    446
    447static int hip04_tx_reclaim(struct net_device *ndev, bool force)
    448{
    449	struct hip04_priv *priv = netdev_priv(ndev);
    450	unsigned tx_tail = priv->tx_tail;
    451	struct tx_desc *desc;
    452	unsigned int bytes_compl = 0, pkts_compl = 0;
    453	unsigned int count;
    454
    455	smp_rmb();
    456	count = tx_count(READ_ONCE(priv->tx_head), tx_tail);
    457	if (count == 0)
    458		goto out;
    459
    460	while (count) {
    461		desc = &priv->tx_desc[tx_tail];
    462		if (desc->send_addr != 0) {
    463			if (force)
    464				desc->send_addr = 0;
    465			else
    466				break;
    467		}
    468
    469		if (priv->tx_phys[tx_tail]) {
    470			dma_unmap_single(priv->dev, priv->tx_phys[tx_tail],
    471					 priv->tx_skb[tx_tail]->len,
    472					 DMA_TO_DEVICE);
    473			priv->tx_phys[tx_tail] = 0;
    474		}
    475		pkts_compl++;
    476		bytes_compl += priv->tx_skb[tx_tail]->len;
    477		dev_kfree_skb(priv->tx_skb[tx_tail]);
    478		priv->tx_skb[tx_tail] = NULL;
    479		tx_tail = TX_NEXT(tx_tail);
    480		count--;
    481	}
    482
    483	priv->tx_tail = tx_tail;
    484	smp_wmb(); /* Ensure tx_tail visible to xmit */
    485
    486out:
    487	if (pkts_compl || bytes_compl)
    488		netdev_completed_queue(ndev, pkts_compl, bytes_compl);
    489
    490	if (unlikely(netif_queue_stopped(ndev)) && (count < (TX_DESC_NUM - 1)))
    491		netif_wake_queue(ndev);
    492
    493	return count;
    494}
    495
    496static void hip04_start_tx_timer(struct hip04_priv *priv)
    497{
    498	unsigned long ns = priv->tx_coalesce_usecs * NSEC_PER_USEC / 2;
    499
    500	/* allow timer to fire after half the time at the earliest */
    501	hrtimer_start_range_ns(&priv->tx_coalesce_timer, ns_to_ktime(ns),
    502			       ns, HRTIMER_MODE_REL);
    503}
    504
    505static netdev_tx_t
    506hip04_mac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
    507{
    508	struct hip04_priv *priv = netdev_priv(ndev);
    509	struct net_device_stats *stats = &ndev->stats;
    510	unsigned int tx_head = priv->tx_head, count;
    511	struct tx_desc *desc = &priv->tx_desc[tx_head];
    512	dma_addr_t phys;
    513
    514	smp_rmb();
    515	count = tx_count(tx_head, READ_ONCE(priv->tx_tail));
    516	if (count == (TX_DESC_NUM - 1)) {
    517		netif_stop_queue(ndev);
    518		return NETDEV_TX_BUSY;
    519	}
    520
    521	phys = dma_map_single(priv->dev, skb->data, skb->len, DMA_TO_DEVICE);
    522	if (dma_mapping_error(priv->dev, phys)) {
    523		dev_kfree_skb(skb);
    524		return NETDEV_TX_OK;
    525	}
    526
    527	priv->tx_skb[tx_head] = skb;
    528	priv->tx_phys[tx_head] = phys;
    529
    530	desc->send_size = (__force u32)cpu_to_be32(skb->len);
    531#if defined(CONFIG_HI13X1_GMAC)
    532	desc->cfg = (__force u32)cpu_to_be32(TX_CLEAR_WB | TX_FINISH_CACHE_INV
    533		| TX_RELEASE_TO_PPE | priv->port << TX_POOL_SHIFT);
    534	desc->data_offset = (__force u32)cpu_to_be32(phys & SOC_CACHE_LINE_MASK);
    535	desc->send_addr =  (__force u32)cpu_to_be32(phys & ~SOC_CACHE_LINE_MASK);
    536#else
    537	desc->cfg = (__force u32)cpu_to_be32(TX_CLEAR_WB | TX_FINISH_CACHE_INV);
    538	desc->send_addr = (__force u32)cpu_to_be32(phys);
    539#endif
    540	phys = priv->tx_desc_dma + tx_head * sizeof(struct tx_desc);
    541	desc->wb_addr = (__force u32)cpu_to_be32(phys +
    542		offsetof(struct tx_desc, send_addr));
    543	skb_tx_timestamp(skb);
    544
    545	hip04_set_xmit_desc(priv, phys);
    546	count++;
    547	netdev_sent_queue(ndev, skb->len);
    548	priv->tx_head = TX_NEXT(tx_head);
    549
    550	stats->tx_bytes += skb->len;
    551	stats->tx_packets++;
    552
    553	/* Ensure tx_head update visible to tx reclaim */
    554	smp_wmb();
    555
    556	/* queue is getting full, better start cleaning up now */
    557	if (count >= priv->tx_coalesce_frames) {
    558		if (napi_schedule_prep(&priv->napi)) {
    559			/* disable rx interrupt and timer */
    560			priv->reg_inten &= ~(RCV_INT);
    561			writel_relaxed(DEF_INT_MASK & ~RCV_INT,
    562				       priv->base + PPE_INTEN);
    563			hrtimer_cancel(&priv->tx_coalesce_timer);
    564			__napi_schedule(&priv->napi);
    565		}
    566	} else if (!hrtimer_is_queued(&priv->tx_coalesce_timer)) {
    567		/* cleanup not pending yet, start a new timer */
    568		hip04_start_tx_timer(priv);
    569	}
    570
    571	return NETDEV_TX_OK;
    572}
    573
    574static int hip04_rx_poll(struct napi_struct *napi, int budget)
    575{
    576	struct hip04_priv *priv = container_of(napi, struct hip04_priv, napi);
    577	struct net_device *ndev = priv->ndev;
    578	struct net_device_stats *stats = &ndev->stats;
    579	struct rx_desc *desc;
    580	struct sk_buff *skb;
    581	unsigned char *buf;
    582	bool last = false;
    583	dma_addr_t phys;
    584	int rx = 0;
    585	int tx_remaining;
    586	u16 len;
    587	u32 err;
    588
    589	/* clean up tx descriptors */
    590	tx_remaining = hip04_tx_reclaim(ndev, false);
    591	priv->rx_cnt_remaining += hip04_recv_cnt(priv);
    592	while (priv->rx_cnt_remaining && !last) {
    593		buf = priv->rx_buf[priv->rx_head];
    594		skb = build_skb(buf, priv->rx_buf_size);
    595		if (unlikely(!skb)) {
    596			net_dbg_ratelimited("build_skb failed\n");
    597			goto refill;
    598		}
    599
    600		dma_unmap_single(priv->dev, priv->rx_phys[priv->rx_head],
    601				 RX_BUF_SIZE, DMA_FROM_DEVICE);
    602		priv->rx_phys[priv->rx_head] = 0;
    603
    604		desc = (struct rx_desc *)skb->data;
    605		len = be16_to_cpu((__force __be16)desc->pkt_len);
    606		err = be32_to_cpu((__force __be32)desc->pkt_err);
    607
    608		if (0 == len) {
    609			dev_kfree_skb_any(skb);
    610			last = true;
    611		} else if ((err & RX_PKT_ERR) || (len >= GMAC_MAX_PKT_LEN)) {
    612			dev_kfree_skb_any(skb);
    613			stats->rx_dropped++;
    614			stats->rx_errors++;
    615		} else {
    616			skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
    617			skb_put(skb, len);
    618			skb->protocol = eth_type_trans(skb, ndev);
    619			napi_gro_receive(&priv->napi, skb);
    620			stats->rx_packets++;
    621			stats->rx_bytes += len;
    622			rx++;
    623		}
    624
    625refill:
    626		buf = netdev_alloc_frag(priv->rx_buf_size);
    627		if (!buf)
    628			goto done;
    629		phys = dma_map_single(priv->dev, buf,
    630				      RX_BUF_SIZE, DMA_FROM_DEVICE);
    631		if (dma_mapping_error(priv->dev, phys))
    632			goto done;
    633		priv->rx_buf[priv->rx_head] = buf;
    634		priv->rx_phys[priv->rx_head] = phys;
    635		hip04_set_recv_desc(priv, phys);
    636
    637		priv->rx_head = RX_NEXT(priv->rx_head);
    638		if (rx >= budget) {
    639			--priv->rx_cnt_remaining;
    640			goto done;
    641		}
    642
    643		if (--priv->rx_cnt_remaining == 0)
    644			priv->rx_cnt_remaining += hip04_recv_cnt(priv);
    645	}
    646
    647	if (!(priv->reg_inten & RCV_INT)) {
    648		/* enable rx interrupt */
    649		priv->reg_inten |= RCV_INT;
    650		writel_relaxed(priv->reg_inten, priv->base + PPE_INTEN);
    651	}
    652	napi_complete_done(napi, rx);
    653done:
    654	/* start a new timer if necessary */
    655	if (rx < budget && tx_remaining)
    656		hip04_start_tx_timer(priv);
    657
    658	return rx;
    659}
    660
    661static irqreturn_t hip04_mac_interrupt(int irq, void *dev_id)
    662{
    663	struct net_device *ndev = (struct net_device *)dev_id;
    664	struct hip04_priv *priv = netdev_priv(ndev);
    665	struct net_device_stats *stats = &ndev->stats;
    666	u32 ists = readl_relaxed(priv->base + PPE_INTSTS);
    667
    668	if (!ists)
    669		return IRQ_NONE;
    670
    671	writel_relaxed(DEF_INT_MASK, priv->base + PPE_RINT);
    672
    673	if (unlikely(ists & DEF_INT_ERR)) {
    674		if (ists & (RCV_NOBUF | RCV_DROP)) {
    675			stats->rx_errors++;
    676			stats->rx_dropped++;
    677			netdev_err(ndev, "rx drop\n");
    678		}
    679		if (ists & TX_DROP) {
    680			stats->tx_dropped++;
    681			netdev_err(ndev, "tx drop\n");
    682		}
    683	}
    684
    685	if (ists & RCV_INT && napi_schedule_prep(&priv->napi)) {
    686		/* disable rx interrupt */
    687		priv->reg_inten &= ~(RCV_INT);
    688		writel_relaxed(DEF_INT_MASK & ~RCV_INT, priv->base + PPE_INTEN);
    689		hrtimer_cancel(&priv->tx_coalesce_timer);
    690		__napi_schedule(&priv->napi);
    691	}
    692
    693	return IRQ_HANDLED;
    694}
    695
    696static enum hrtimer_restart tx_done(struct hrtimer *hrtimer)
    697{
    698	struct hip04_priv *priv;
    699
    700	priv = container_of(hrtimer, struct hip04_priv, tx_coalesce_timer);
    701
    702	if (napi_schedule_prep(&priv->napi)) {
    703		/* disable rx interrupt */
    704		priv->reg_inten &= ~(RCV_INT);
    705		writel_relaxed(DEF_INT_MASK & ~RCV_INT, priv->base + PPE_INTEN);
    706		__napi_schedule(&priv->napi);
    707	}
    708
    709	return HRTIMER_NORESTART;
    710}
    711
    712static void hip04_adjust_link(struct net_device *ndev)
    713{
    714	struct hip04_priv *priv = netdev_priv(ndev);
    715	struct phy_device *phy = priv->phy;
    716
    717	if ((priv->speed != phy->speed) || (priv->duplex != phy->duplex)) {
    718		hip04_config_port(ndev, phy->speed, phy->duplex);
    719		phy_print_status(phy);
    720	}
    721}
    722
    723static int hip04_mac_open(struct net_device *ndev)
    724{
    725	struct hip04_priv *priv = netdev_priv(ndev);
    726	int i;
    727
    728	priv->rx_head = 0;
    729	priv->rx_cnt_remaining = 0;
    730	priv->tx_head = 0;
    731	priv->tx_tail = 0;
    732	hip04_reset_ppe(priv);
    733
    734	for (i = 0; i < RX_DESC_NUM; i++) {
    735		dma_addr_t phys;
    736
    737		phys = dma_map_single(priv->dev, priv->rx_buf[i],
    738				      RX_BUF_SIZE, DMA_FROM_DEVICE);
    739		if (dma_mapping_error(priv->dev, phys))
    740			return -EIO;
    741
    742		priv->rx_phys[i] = phys;
    743		hip04_set_recv_desc(priv, phys);
    744	}
    745
    746	if (priv->phy)
    747		phy_start(priv->phy);
    748
    749	netdev_reset_queue(ndev);
    750	netif_start_queue(ndev);
    751	hip04_mac_enable(ndev);
    752	napi_enable(&priv->napi);
    753
    754	return 0;
    755}
    756
    757static int hip04_mac_stop(struct net_device *ndev)
    758{
    759	struct hip04_priv *priv = netdev_priv(ndev);
    760	int i;
    761
    762	napi_disable(&priv->napi);
    763	netif_stop_queue(ndev);
    764	hip04_mac_disable(ndev);
    765	hip04_tx_reclaim(ndev, true);
    766	hip04_reset_ppe(priv);
    767
    768	if (priv->phy)
    769		phy_stop(priv->phy);
    770
    771	for (i = 0; i < RX_DESC_NUM; i++) {
    772		if (priv->rx_phys[i]) {
    773			dma_unmap_single(priv->dev, priv->rx_phys[i],
    774					 RX_BUF_SIZE, DMA_FROM_DEVICE);
    775			priv->rx_phys[i] = 0;
    776		}
    777	}
    778
    779	return 0;
    780}
    781
    782static void hip04_timeout(struct net_device *ndev, unsigned int txqueue)
    783{
    784	struct hip04_priv *priv = netdev_priv(ndev);
    785
    786	schedule_work(&priv->tx_timeout_task);
    787}
    788
    789static void hip04_tx_timeout_task(struct work_struct *work)
    790{
    791	struct hip04_priv *priv;
    792
    793	priv = container_of(work, struct hip04_priv, tx_timeout_task);
    794	hip04_mac_stop(priv->ndev);
    795	hip04_mac_open(priv->ndev);
    796}
    797
    798static int hip04_get_coalesce(struct net_device *netdev,
    799			      struct ethtool_coalesce *ec,
    800			      struct kernel_ethtool_coalesce *kernel_coal,
    801			      struct netlink_ext_ack *extack)
    802{
    803	struct hip04_priv *priv = netdev_priv(netdev);
    804
    805	ec->tx_coalesce_usecs = priv->tx_coalesce_usecs;
    806	ec->tx_max_coalesced_frames = priv->tx_coalesce_frames;
    807
    808	return 0;
    809}
    810
    811static int hip04_set_coalesce(struct net_device *netdev,
    812			      struct ethtool_coalesce *ec,
    813			      struct kernel_ethtool_coalesce *kernel_coal,
    814			      struct netlink_ext_ack *extack)
    815{
    816	struct hip04_priv *priv = netdev_priv(netdev);
    817
    818	if ((ec->tx_coalesce_usecs > HIP04_MAX_TX_COALESCE_USECS ||
    819	     ec->tx_coalesce_usecs < HIP04_MIN_TX_COALESCE_USECS) ||
    820	    (ec->tx_max_coalesced_frames > HIP04_MAX_TX_COALESCE_FRAMES ||
    821	     ec->tx_max_coalesced_frames < HIP04_MIN_TX_COALESCE_FRAMES))
    822		return -EINVAL;
    823
    824	priv->tx_coalesce_usecs = ec->tx_coalesce_usecs;
    825	priv->tx_coalesce_frames = ec->tx_max_coalesced_frames;
    826
    827	return 0;
    828}
    829
    830static void hip04_get_drvinfo(struct net_device *netdev,
    831			      struct ethtool_drvinfo *drvinfo)
    832{
    833	strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
    834	strlcpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version));
    835}
    836
    837static const struct ethtool_ops hip04_ethtool_ops = {
    838	.supported_coalesce_params = ETHTOOL_COALESCE_TX_USECS |
    839				     ETHTOOL_COALESCE_TX_MAX_FRAMES,
    840	.get_coalesce		= hip04_get_coalesce,
    841	.set_coalesce		= hip04_set_coalesce,
    842	.get_drvinfo		= hip04_get_drvinfo,
    843};
    844
    845static const struct net_device_ops hip04_netdev_ops = {
    846	.ndo_open		= hip04_mac_open,
    847	.ndo_stop		= hip04_mac_stop,
    848	.ndo_start_xmit		= hip04_mac_start_xmit,
    849	.ndo_set_mac_address	= hip04_set_mac_address,
    850	.ndo_tx_timeout         = hip04_timeout,
    851	.ndo_validate_addr	= eth_validate_addr,
    852};
    853
    854static int hip04_alloc_ring(struct net_device *ndev, struct device *d)
    855{
    856	struct hip04_priv *priv = netdev_priv(ndev);
    857	int i;
    858
    859	priv->tx_desc = dma_alloc_coherent(d,
    860					   TX_DESC_NUM * sizeof(struct tx_desc),
    861					   &priv->tx_desc_dma, GFP_KERNEL);
    862	if (!priv->tx_desc)
    863		return -ENOMEM;
    864
    865	priv->rx_buf_size = RX_BUF_SIZE +
    866			    SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
    867	for (i = 0; i < RX_DESC_NUM; i++) {
    868		priv->rx_buf[i] = netdev_alloc_frag(priv->rx_buf_size);
    869		if (!priv->rx_buf[i])
    870			return -ENOMEM;
    871	}
    872
    873	return 0;
    874}
    875
    876static void hip04_free_ring(struct net_device *ndev, struct device *d)
    877{
    878	struct hip04_priv *priv = netdev_priv(ndev);
    879	int i;
    880
    881	for (i = 0; i < RX_DESC_NUM; i++)
    882		if (priv->rx_buf[i])
    883			skb_free_frag(priv->rx_buf[i]);
    884
    885	for (i = 0; i < TX_DESC_NUM; i++)
    886		if (priv->tx_skb[i])
    887			dev_kfree_skb_any(priv->tx_skb[i]);
    888
    889	dma_free_coherent(d, TX_DESC_NUM * sizeof(struct tx_desc),
    890			  priv->tx_desc, priv->tx_desc_dma);
    891}
    892
    893static int hip04_mac_probe(struct platform_device *pdev)
    894{
    895	struct device *d = &pdev->dev;
    896	struct device_node *node = d->of_node;
    897	struct of_phandle_args arg;
    898	struct net_device *ndev;
    899	struct hip04_priv *priv;
    900	int irq;
    901	int ret;
    902
    903	ndev = alloc_etherdev(sizeof(struct hip04_priv));
    904	if (!ndev)
    905		return -ENOMEM;
    906
    907	priv = netdev_priv(ndev);
    908	priv->dev = d;
    909	priv->ndev = ndev;
    910	platform_set_drvdata(pdev, ndev);
    911	SET_NETDEV_DEV(ndev, &pdev->dev);
    912
    913	priv->base = devm_platform_ioremap_resource(pdev, 0);
    914	if (IS_ERR(priv->base)) {
    915		ret = PTR_ERR(priv->base);
    916		goto init_fail;
    917	}
    918
    919#if defined(CONFIG_HI13X1_GMAC)
    920	priv->sysctrl_base = devm_platform_ioremap_resource(pdev, 1);
    921	if (IS_ERR(priv->sysctrl_base)) {
    922		ret = PTR_ERR(priv->sysctrl_base);
    923		goto init_fail;
    924	}
    925#endif
    926
    927	ret = of_parse_phandle_with_fixed_args(node, "port-handle", 3, 0, &arg);
    928	if (ret < 0) {
    929		dev_warn(d, "no port-handle\n");
    930		goto init_fail;
    931	}
    932
    933	priv->port = arg.args[0];
    934	priv->chan = arg.args[1] * RX_DESC_NUM;
    935	priv->group = arg.args[2];
    936
    937	hrtimer_init(&priv->tx_coalesce_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
    938
    939	/* BQL will try to keep the TX queue as short as possible, but it can't
    940	 * be faster than tx_coalesce_usecs, so we need a fast timeout here,
    941	 * but also long enough to gather up enough frames to ensure we don't
    942	 * get more interrupts than necessary.
    943	 * 200us is enough for 16 frames of 1500 bytes at gigabit ethernet rate
    944	 */
    945	priv->tx_coalesce_frames = TX_DESC_NUM * 3 / 4;
    946	priv->tx_coalesce_usecs = 200;
    947	priv->tx_coalesce_timer.function = tx_done;
    948
    949	priv->map = syscon_node_to_regmap(arg.np);
    950	if (IS_ERR(priv->map)) {
    951		dev_warn(d, "no syscon hisilicon,hip04-ppe\n");
    952		ret = PTR_ERR(priv->map);
    953		goto init_fail;
    954	}
    955
    956	ret = of_get_phy_mode(node, &priv->phy_mode);
    957	if (ret) {
    958		dev_warn(d, "not find phy-mode\n");
    959		goto init_fail;
    960	}
    961
    962	irq = platform_get_irq(pdev, 0);
    963	if (irq <= 0) {
    964		ret = -EINVAL;
    965		goto init_fail;
    966	}
    967
    968	ret = devm_request_irq(d, irq, hip04_mac_interrupt,
    969			       0, pdev->name, ndev);
    970	if (ret) {
    971		netdev_err(ndev, "devm_request_irq failed\n");
    972		goto init_fail;
    973	}
    974
    975	priv->phy_node = of_parse_phandle(node, "phy-handle", 0);
    976	if (priv->phy_node) {
    977		priv->phy = of_phy_connect(ndev, priv->phy_node,
    978					   &hip04_adjust_link,
    979					   0, priv->phy_mode);
    980		if (!priv->phy) {
    981			ret = -EPROBE_DEFER;
    982			goto init_fail;
    983		}
    984	}
    985
    986	INIT_WORK(&priv->tx_timeout_task, hip04_tx_timeout_task);
    987
    988	ndev->netdev_ops = &hip04_netdev_ops;
    989	ndev->ethtool_ops = &hip04_ethtool_ops;
    990	ndev->watchdog_timeo = TX_TIMEOUT;
    991	ndev->priv_flags |= IFF_UNICAST_FLT;
    992	ndev->irq = irq;
    993	netif_napi_add(ndev, &priv->napi, hip04_rx_poll, NAPI_POLL_WEIGHT);
    994
    995	hip04_reset_dreq(priv);
    996	hip04_reset_ppe(priv);
    997	if (priv->phy_mode == PHY_INTERFACE_MODE_MII)
    998		hip04_config_port(ndev, SPEED_100, DUPLEX_FULL);
    999
   1000	hip04_config_fifo(priv);
   1001	eth_hw_addr_random(ndev);
   1002	hip04_update_mac_address(ndev);
   1003
   1004	ret = hip04_alloc_ring(ndev, d);
   1005	if (ret) {
   1006		netdev_err(ndev, "alloc ring fail\n");
   1007		goto alloc_fail;
   1008	}
   1009
   1010	ret = register_netdev(ndev);
   1011	if (ret)
   1012		goto alloc_fail;
   1013
   1014	return 0;
   1015
   1016alloc_fail:
   1017	hip04_free_ring(ndev, d);
   1018init_fail:
   1019	of_node_put(priv->phy_node);
   1020	free_netdev(ndev);
   1021	return ret;
   1022}
   1023
   1024static int hip04_remove(struct platform_device *pdev)
   1025{
   1026	struct net_device *ndev = platform_get_drvdata(pdev);
   1027	struct hip04_priv *priv = netdev_priv(ndev);
   1028	struct device *d = &pdev->dev;
   1029
   1030	if (priv->phy)
   1031		phy_disconnect(priv->phy);
   1032
   1033	hip04_free_ring(ndev, d);
   1034	unregister_netdev(ndev);
   1035	of_node_put(priv->phy_node);
   1036	cancel_work_sync(&priv->tx_timeout_task);
   1037	free_netdev(ndev);
   1038
   1039	return 0;
   1040}
   1041
   1042static const struct of_device_id hip04_mac_match[] = {
   1043	{ .compatible = "hisilicon,hip04-mac" },
   1044	{ }
   1045};
   1046
   1047MODULE_DEVICE_TABLE(of, hip04_mac_match);
   1048
   1049static struct platform_driver hip04_mac_driver = {
   1050	.probe	= hip04_mac_probe,
   1051	.remove	= hip04_remove,
   1052	.driver	= {
   1053		.name		= DRV_NAME,
   1054		.of_match_table	= hip04_mac_match,
   1055	},
   1056};
   1057module_platform_driver(hip04_mac_driver);
   1058
   1059MODULE_DESCRIPTION("HISILICON P04 Ethernet driver");
   1060MODULE_LICENSE("GPL");