cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

netsec.c (57631B)


      1// SPDX-License-Identifier: GPL-2.0+
      2
      3#include <linux/types.h>
      4#include <linux/clk.h>
      5#include <linux/platform_device.h>
      6#include <linux/pm_runtime.h>
      7#include <linux/acpi.h>
      8#include <linux/of_mdio.h>
      9#include <linux/of_net.h>
     10#include <linux/etherdevice.h>
     11#include <linux/interrupt.h>
     12#include <linux/io.h>
     13#include <linux/netlink.h>
     14#include <linux/bpf.h>
     15#include <linux/bpf_trace.h>
     16
     17#include <net/tcp.h>
     18#include <net/page_pool.h>
     19#include <net/ip6_checksum.h>
     20
     21#define NETSEC_REG_SOFT_RST			0x104
     22#define NETSEC_REG_COM_INIT			0x120
     23
     24#define NETSEC_REG_TOP_STATUS			0x200
     25#define NETSEC_IRQ_RX				BIT(1)
     26#define NETSEC_IRQ_TX				BIT(0)
     27
     28#define NETSEC_REG_TOP_INTEN			0x204
     29#define NETSEC_REG_INTEN_SET			0x234
     30#define NETSEC_REG_INTEN_CLR			0x238
     31
     32#define NETSEC_REG_NRM_TX_STATUS		0x400
     33#define NETSEC_REG_NRM_TX_INTEN			0x404
     34#define NETSEC_REG_NRM_TX_INTEN_SET		0x428
     35#define NETSEC_REG_NRM_TX_INTEN_CLR		0x42c
     36#define NRM_TX_ST_NTOWNR	BIT(17)
     37#define NRM_TX_ST_TR_ERR	BIT(16)
     38#define NRM_TX_ST_TXDONE	BIT(15)
     39#define NRM_TX_ST_TMREXP	BIT(14)
     40
     41#define NETSEC_REG_NRM_RX_STATUS		0x440
     42#define NETSEC_REG_NRM_RX_INTEN			0x444
     43#define NETSEC_REG_NRM_RX_INTEN_SET		0x468
     44#define NETSEC_REG_NRM_RX_INTEN_CLR		0x46c
     45#define NRM_RX_ST_RC_ERR	BIT(16)
     46#define NRM_RX_ST_PKTCNT	BIT(15)
     47#define NRM_RX_ST_TMREXP	BIT(14)
     48
     49#define NETSEC_REG_PKT_CMD_BUF			0xd0
     50
     51#define NETSEC_REG_CLK_EN			0x100
     52
     53#define NETSEC_REG_PKT_CTRL			0x140
     54
     55#define NETSEC_REG_DMA_TMR_CTRL			0x20c
     56#define NETSEC_REG_F_TAIKI_MC_VER		0x22c
     57#define NETSEC_REG_F_TAIKI_VER			0x230
     58#define NETSEC_REG_DMA_HM_CTRL			0x214
     59#define NETSEC_REG_DMA_MH_CTRL			0x220
     60#define NETSEC_REG_ADDR_DIS_CORE		0x218
     61#define NETSEC_REG_DMAC_HM_CMD_BUF		0x210
     62#define NETSEC_REG_DMAC_MH_CMD_BUF		0x21c
     63
     64#define NETSEC_REG_NRM_TX_PKTCNT		0x410
     65
     66#define NETSEC_REG_NRM_TX_DONE_PKTCNT		0x414
     67#define NETSEC_REG_NRM_TX_DONE_TXINT_PKTCNT	0x418
     68
     69#define NETSEC_REG_NRM_TX_TMR			0x41c
     70
     71#define NETSEC_REG_NRM_RX_PKTCNT		0x454
     72#define NETSEC_REG_NRM_RX_RXINT_PKTCNT		0x458
     73#define NETSEC_REG_NRM_TX_TXINT_TMR		0x420
     74#define NETSEC_REG_NRM_RX_RXINT_TMR		0x460
     75
     76#define NETSEC_REG_NRM_RX_TMR			0x45c
     77
     78#define NETSEC_REG_NRM_TX_DESC_START_UP		0x434
     79#define NETSEC_REG_NRM_TX_DESC_START_LW		0x408
     80#define NETSEC_REG_NRM_RX_DESC_START_UP		0x474
     81#define NETSEC_REG_NRM_RX_DESC_START_LW		0x448
     82
     83#define NETSEC_REG_NRM_TX_CONFIG		0x430
     84#define NETSEC_REG_NRM_RX_CONFIG		0x470
     85
     86#define MAC_REG_STATUS				0x1024
     87#define MAC_REG_DATA				0x11c0
     88#define MAC_REG_CMD				0x11c4
     89#define MAC_REG_FLOW_TH				0x11cc
     90#define MAC_REG_INTF_SEL			0x11d4
     91#define MAC_REG_DESC_INIT			0x11fc
     92#define MAC_REG_DESC_SOFT_RST			0x1204
     93#define NETSEC_REG_MODE_TRANS_COMP_STATUS	0x500
     94
     95#define GMAC_REG_MCR				0x0000
     96#define GMAC_REG_MFFR				0x0004
     97#define GMAC_REG_GAR				0x0010
     98#define GMAC_REG_GDR				0x0014
     99#define GMAC_REG_FCR				0x0018
    100#define GMAC_REG_BMR				0x1000
    101#define GMAC_REG_RDLAR				0x100c
    102#define GMAC_REG_TDLAR				0x1010
    103#define GMAC_REG_OMR				0x1018
    104
    105#define MHZ(n)		((n) * 1000 * 1000)
    106
    107#define NETSEC_TX_SHIFT_OWN_FIELD		31
    108#define NETSEC_TX_SHIFT_LD_FIELD		30
    109#define NETSEC_TX_SHIFT_DRID_FIELD		24
    110#define NETSEC_TX_SHIFT_PT_FIELD		21
    111#define NETSEC_TX_SHIFT_TDRID_FIELD		16
    112#define NETSEC_TX_SHIFT_CC_FIELD		15
    113#define NETSEC_TX_SHIFT_FS_FIELD		9
    114#define NETSEC_TX_LAST				8
    115#define NETSEC_TX_SHIFT_CO			7
    116#define NETSEC_TX_SHIFT_SO			6
    117#define NETSEC_TX_SHIFT_TRS_FIELD		4
    118
    119#define NETSEC_RX_PKT_OWN_FIELD			31
    120#define NETSEC_RX_PKT_LD_FIELD			30
    121#define NETSEC_RX_PKT_SDRID_FIELD		24
    122#define NETSEC_RX_PKT_FR_FIELD			23
    123#define NETSEC_RX_PKT_ER_FIELD			21
    124#define NETSEC_RX_PKT_ERR_FIELD			16
    125#define NETSEC_RX_PKT_TDRID_FIELD		12
    126#define NETSEC_RX_PKT_FS_FIELD			9
    127#define NETSEC_RX_PKT_LS_FIELD			8
    128#define NETSEC_RX_PKT_CO_FIELD			6
    129
    130#define NETSEC_RX_PKT_ERR_MASK			3
    131
    132#define NETSEC_MAX_TX_PKT_LEN			1518
    133#define NETSEC_MAX_TX_JUMBO_PKT_LEN		9018
    134
    135#define NETSEC_RING_GMAC			15
    136#define NETSEC_RING_MAX				2
    137
    138#define NETSEC_TCP_SEG_LEN_MAX			1460
    139#define NETSEC_TCP_JUMBO_SEG_LEN_MAX		8960
    140
    141#define NETSEC_RX_CKSUM_NOTAVAIL		0
    142#define NETSEC_RX_CKSUM_OK			1
    143#define NETSEC_RX_CKSUM_NG			2
    144
    145#define NETSEC_TOP_IRQ_REG_CODE_LOAD_END	BIT(20)
    146#define NETSEC_IRQ_TRANSITION_COMPLETE		BIT(4)
    147
    148#define NETSEC_MODE_TRANS_COMP_IRQ_N2T		BIT(20)
    149#define NETSEC_MODE_TRANS_COMP_IRQ_T2N		BIT(19)
    150
    151#define NETSEC_INT_PKTCNT_MAX			2047
    152
    153#define NETSEC_FLOW_START_TH_MAX		95
    154#define NETSEC_FLOW_STOP_TH_MAX			95
    155#define NETSEC_FLOW_PAUSE_TIME_MIN		5
    156
    157#define NETSEC_CLK_EN_REG_DOM_ALL		0x3f
    158
    159#define NETSEC_PKT_CTRL_REG_MODE_NRM		BIT(28)
    160#define NETSEC_PKT_CTRL_REG_EN_JUMBO		BIT(27)
    161#define NETSEC_PKT_CTRL_REG_LOG_CHKSUM_ER	BIT(3)
    162#define NETSEC_PKT_CTRL_REG_LOG_HD_INCOMPLETE	BIT(2)
    163#define NETSEC_PKT_CTRL_REG_LOG_HD_ER		BIT(1)
    164#define NETSEC_PKT_CTRL_REG_DRP_NO_MATCH	BIT(0)
    165
    166#define NETSEC_CLK_EN_REG_DOM_G			BIT(5)
    167#define NETSEC_CLK_EN_REG_DOM_C			BIT(1)
    168#define NETSEC_CLK_EN_REG_DOM_D			BIT(0)
    169
    170#define NETSEC_COM_INIT_REG_DB			BIT(2)
    171#define NETSEC_COM_INIT_REG_CLS			BIT(1)
    172#define NETSEC_COM_INIT_REG_ALL			(NETSEC_COM_INIT_REG_CLS | \
    173						 NETSEC_COM_INIT_REG_DB)
    174
    175#define NETSEC_SOFT_RST_REG_RESET		0
    176#define NETSEC_SOFT_RST_REG_RUN			BIT(31)
    177
    178#define NETSEC_DMA_CTRL_REG_STOP		1
    179#define MH_CTRL__MODE_TRANS			BIT(20)
    180
    181#define NETSEC_GMAC_CMD_ST_READ			0
    182#define NETSEC_GMAC_CMD_ST_WRITE		BIT(28)
    183#define NETSEC_GMAC_CMD_ST_BUSY			BIT(31)
    184
    185#define NETSEC_GMAC_BMR_REG_COMMON		0x00412080
    186#define NETSEC_GMAC_BMR_REG_RESET		0x00020181
    187#define NETSEC_GMAC_BMR_REG_SWR			0x00000001
    188
    189#define NETSEC_GMAC_OMR_REG_ST			BIT(13)
    190#define NETSEC_GMAC_OMR_REG_SR			BIT(1)
    191
    192#define NETSEC_GMAC_MCR_REG_IBN			BIT(30)
    193#define NETSEC_GMAC_MCR_REG_CST			BIT(25)
    194#define NETSEC_GMAC_MCR_REG_JE			BIT(20)
    195#define NETSEC_MCR_PS				BIT(15)
    196#define NETSEC_GMAC_MCR_REG_FES			BIT(14)
    197#define NETSEC_GMAC_MCR_REG_FULL_DUPLEX_COMMON	0x0000280c
    198#define NETSEC_GMAC_MCR_REG_HALF_DUPLEX_COMMON	0x0001a00c
    199
    200#define NETSEC_FCR_RFE				BIT(2)
    201#define NETSEC_FCR_TFE				BIT(1)
    202
    203#define NETSEC_GMAC_GAR_REG_GW			BIT(1)
    204#define NETSEC_GMAC_GAR_REG_GB			BIT(0)
    205
    206#define NETSEC_GMAC_GAR_REG_SHIFT_PA		11
    207#define NETSEC_GMAC_GAR_REG_SHIFT_GR		6
    208#define GMAC_REG_SHIFT_CR_GAR			2
    209
    210#define NETSEC_GMAC_GAR_REG_CR_25_35_MHZ	2
    211#define NETSEC_GMAC_GAR_REG_CR_35_60_MHZ	3
    212#define NETSEC_GMAC_GAR_REG_CR_60_100_MHZ	0
    213#define NETSEC_GMAC_GAR_REG_CR_100_150_MHZ	1
    214#define NETSEC_GMAC_GAR_REG_CR_150_250_MHZ	4
    215#define NETSEC_GMAC_GAR_REG_CR_250_300_MHZ	5
    216
    217#define NETSEC_GMAC_RDLAR_REG_COMMON		0x18000
    218#define NETSEC_GMAC_TDLAR_REG_COMMON		0x1c000
    219
    220#define NETSEC_REG_NETSEC_VER_F_TAIKI		0x50000
    221
    222#define NETSEC_REG_DESC_RING_CONFIG_CFG_UP	BIT(31)
    223#define NETSEC_REG_DESC_RING_CONFIG_CH_RST	BIT(30)
    224#define NETSEC_REG_DESC_TMR_MODE		4
    225#define NETSEC_REG_DESC_ENDIAN			0
    226
    227#define NETSEC_MAC_DESC_SOFT_RST_SOFT_RST	1
    228#define NETSEC_MAC_DESC_INIT_REG_INIT		1
    229
    230#define NETSEC_EEPROM_MAC_ADDRESS		0x00
    231#define NETSEC_EEPROM_HM_ME_ADDRESS_H		0x08
    232#define NETSEC_EEPROM_HM_ME_ADDRESS_L		0x0C
    233#define NETSEC_EEPROM_HM_ME_SIZE		0x10
    234#define NETSEC_EEPROM_MH_ME_ADDRESS_H		0x14
    235#define NETSEC_EEPROM_MH_ME_ADDRESS_L		0x18
    236#define NETSEC_EEPROM_MH_ME_SIZE		0x1C
    237#define NETSEC_EEPROM_PKT_ME_ADDRESS		0x20
    238#define NETSEC_EEPROM_PKT_ME_SIZE		0x24
    239
    240#define DESC_NUM	256
    241
    242#define NETSEC_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN)
    243#define NETSEC_RXBUF_HEADROOM (max(XDP_PACKET_HEADROOM, NET_SKB_PAD) + \
    244			       NET_IP_ALIGN)
    245#define NETSEC_RX_BUF_NON_DATA (NETSEC_RXBUF_HEADROOM + \
    246				SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
    247#define NETSEC_RX_BUF_SIZE	(PAGE_SIZE - NETSEC_RX_BUF_NON_DATA)
    248
    249#define DESC_SZ	sizeof(struct netsec_de)
    250
    251#define NETSEC_F_NETSEC_VER_MAJOR_NUM(x)	((x) & 0xffff0000)
    252
    253#define NETSEC_XDP_PASS          0
    254#define NETSEC_XDP_CONSUMED      BIT(0)
    255#define NETSEC_XDP_TX            BIT(1)
    256#define NETSEC_XDP_REDIR         BIT(2)
    257
    258enum ring_id {
    259	NETSEC_RING_TX = 0,
    260	NETSEC_RING_RX
    261};
    262
    263enum buf_type {
    264	TYPE_NETSEC_SKB = 0,
    265	TYPE_NETSEC_XDP_TX,
    266	TYPE_NETSEC_XDP_NDO,
    267};
    268
    269struct netsec_desc {
    270	union {
    271		struct sk_buff *skb;
    272		struct xdp_frame *xdpf;
    273	};
    274	dma_addr_t dma_addr;
    275	void *addr;
    276	u16 len;
    277	u8 buf_type;
    278};
    279
    280struct netsec_desc_ring {
    281	dma_addr_t desc_dma;
    282	struct netsec_desc *desc;
    283	void *vaddr;
    284	u16 head, tail;
    285	u16 xdp_xmit; /* netsec_xdp_xmit packets */
    286	struct page_pool *page_pool;
    287	struct xdp_rxq_info xdp_rxq;
    288	spinlock_t lock; /* XDP tx queue locking */
    289};
    290
    291struct netsec_priv {
    292	struct netsec_desc_ring desc_ring[NETSEC_RING_MAX];
    293	struct ethtool_coalesce et_coalesce;
    294	struct bpf_prog *xdp_prog;
    295	spinlock_t reglock; /* protect reg access */
    296	struct napi_struct napi;
    297	phy_interface_t phy_interface;
    298	struct net_device *ndev;
    299	struct device_node *phy_np;
    300	struct phy_device *phydev;
    301	struct mii_bus *mii_bus;
    302	void __iomem *ioaddr;
    303	void __iomem *eeprom_base;
    304	struct device *dev;
    305	struct clk *clk;
    306	u32 msg_enable;
    307	u32 freq;
    308	u32 phy_addr;
    309	bool rx_cksum_offload_flag;
    310};
    311
    312struct netsec_de { /* Netsec Descriptor layout */
    313	u32 attr;
    314	u32 data_buf_addr_up;
    315	u32 data_buf_addr_lw;
    316	u32 buf_len_info;
    317};
    318
    319struct netsec_tx_pkt_ctrl {
    320	u16 tcp_seg_len;
    321	bool tcp_seg_offload_flag;
    322	bool cksum_offload_flag;
    323};
    324
    325struct netsec_rx_pkt_info {
    326	int rx_cksum_result;
    327	int err_code;
    328	bool err_flag;
    329};
    330
    331static void netsec_write(struct netsec_priv *priv, u32 reg_addr, u32 val)
    332{
    333	writel(val, priv->ioaddr + reg_addr);
    334}
    335
    336static u32 netsec_read(struct netsec_priv *priv, u32 reg_addr)
    337{
    338	return readl(priv->ioaddr + reg_addr);
    339}
    340
    341/************* MDIO BUS OPS FOLLOW *************/
    342
    343#define TIMEOUT_SPINS_MAC		1000
    344#define TIMEOUT_SECONDARY_MS_MAC	100
    345
    346static u32 netsec_clk_type(u32 freq)
    347{
    348	if (freq < MHZ(35))
    349		return NETSEC_GMAC_GAR_REG_CR_25_35_MHZ;
    350	if (freq < MHZ(60))
    351		return NETSEC_GMAC_GAR_REG_CR_35_60_MHZ;
    352	if (freq < MHZ(100))
    353		return NETSEC_GMAC_GAR_REG_CR_60_100_MHZ;
    354	if (freq < MHZ(150))
    355		return NETSEC_GMAC_GAR_REG_CR_100_150_MHZ;
    356	if (freq < MHZ(250))
    357		return NETSEC_GMAC_GAR_REG_CR_150_250_MHZ;
    358
    359	return NETSEC_GMAC_GAR_REG_CR_250_300_MHZ;
    360}
    361
    362static int netsec_wait_while_busy(struct netsec_priv *priv, u32 addr, u32 mask)
    363{
    364	u32 timeout = TIMEOUT_SPINS_MAC;
    365
    366	while (--timeout && netsec_read(priv, addr) & mask)
    367		cpu_relax();
    368	if (timeout)
    369		return 0;
    370
    371	timeout = TIMEOUT_SECONDARY_MS_MAC;
    372	while (--timeout && netsec_read(priv, addr) & mask)
    373		usleep_range(1000, 2000);
    374
    375	if (timeout)
    376		return 0;
    377
    378	netdev_WARN(priv->ndev, "%s: timeout\n", __func__);
    379
    380	return -ETIMEDOUT;
    381}
    382
    383static int netsec_mac_write(struct netsec_priv *priv, u32 addr, u32 value)
    384{
    385	netsec_write(priv, MAC_REG_DATA, value);
    386	netsec_write(priv, MAC_REG_CMD, addr | NETSEC_GMAC_CMD_ST_WRITE);
    387	return netsec_wait_while_busy(priv,
    388				      MAC_REG_CMD, NETSEC_GMAC_CMD_ST_BUSY);
    389}
    390
    391static int netsec_mac_read(struct netsec_priv *priv, u32 addr, u32 *read)
    392{
    393	int ret;
    394
    395	netsec_write(priv, MAC_REG_CMD, addr | NETSEC_GMAC_CMD_ST_READ);
    396	ret = netsec_wait_while_busy(priv,
    397				     MAC_REG_CMD, NETSEC_GMAC_CMD_ST_BUSY);
    398	if (ret)
    399		return ret;
    400
    401	*read = netsec_read(priv, MAC_REG_DATA);
    402
    403	return 0;
    404}
    405
    406static int netsec_mac_wait_while_busy(struct netsec_priv *priv,
    407				      u32 addr, u32 mask)
    408{
    409	u32 timeout = TIMEOUT_SPINS_MAC;
    410	int ret, data;
    411
    412	do {
    413		ret = netsec_mac_read(priv, addr, &data);
    414		if (ret)
    415			break;
    416		cpu_relax();
    417	} while (--timeout && (data & mask));
    418
    419	if (timeout)
    420		return 0;
    421
    422	timeout = TIMEOUT_SECONDARY_MS_MAC;
    423	do {
    424		usleep_range(1000, 2000);
    425
    426		ret = netsec_mac_read(priv, addr, &data);
    427		if (ret)
    428			break;
    429		cpu_relax();
    430	} while (--timeout && (data & mask));
    431
    432	if (timeout && !ret)
    433		return 0;
    434
    435	netdev_WARN(priv->ndev, "%s: timeout\n", __func__);
    436
    437	return -ETIMEDOUT;
    438}
    439
    440static int netsec_mac_update_to_phy_state(struct netsec_priv *priv)
    441{
    442	struct phy_device *phydev = priv->ndev->phydev;
    443	u32 value = 0;
    444
    445	value = phydev->duplex ? NETSEC_GMAC_MCR_REG_FULL_DUPLEX_COMMON :
    446				 NETSEC_GMAC_MCR_REG_HALF_DUPLEX_COMMON;
    447
    448	if (phydev->speed != SPEED_1000)
    449		value |= NETSEC_MCR_PS;
    450
    451	if (priv->phy_interface != PHY_INTERFACE_MODE_GMII &&
    452	    phydev->speed == SPEED_100)
    453		value |= NETSEC_GMAC_MCR_REG_FES;
    454
    455	value |= NETSEC_GMAC_MCR_REG_CST | NETSEC_GMAC_MCR_REG_JE;
    456
    457	if (phy_interface_mode_is_rgmii(priv->phy_interface))
    458		value |= NETSEC_GMAC_MCR_REG_IBN;
    459
    460	if (netsec_mac_write(priv, GMAC_REG_MCR, value))
    461		return -ETIMEDOUT;
    462
    463	return 0;
    464}
    465
    466static int netsec_phy_read(struct mii_bus *bus, int phy_addr, int reg_addr);
    467
    468static int netsec_phy_write(struct mii_bus *bus,
    469			    int phy_addr, int reg, u16 val)
    470{
    471	int status;
    472	struct netsec_priv *priv = bus->priv;
    473
    474	if (netsec_mac_write(priv, GMAC_REG_GDR, val))
    475		return -ETIMEDOUT;
    476	if (netsec_mac_write(priv, GMAC_REG_GAR,
    477			     phy_addr << NETSEC_GMAC_GAR_REG_SHIFT_PA |
    478			     reg << NETSEC_GMAC_GAR_REG_SHIFT_GR |
    479			     NETSEC_GMAC_GAR_REG_GW | NETSEC_GMAC_GAR_REG_GB |
    480			     (netsec_clk_type(priv->freq) <<
    481			      GMAC_REG_SHIFT_CR_GAR)))
    482		return -ETIMEDOUT;
    483
    484	status = netsec_mac_wait_while_busy(priv, GMAC_REG_GAR,
    485					    NETSEC_GMAC_GAR_REG_GB);
    486
    487	/* Developerbox implements RTL8211E PHY and there is
    488	 * a compatibility problem with F_GMAC4.
    489	 * RTL8211E expects MDC clock must be kept toggling for several
    490	 * clock cycle with MDIO high before entering the IDLE state.
    491	 * To meet this requirement, netsec driver needs to issue dummy
    492	 * read(e.g. read PHYID1(offset 0x2) register) right after write.
    493	 */
    494	netsec_phy_read(bus, phy_addr, MII_PHYSID1);
    495
    496	return status;
    497}
    498
    499static int netsec_phy_read(struct mii_bus *bus, int phy_addr, int reg_addr)
    500{
    501	struct netsec_priv *priv = bus->priv;
    502	u32 data;
    503	int ret;
    504
    505	if (netsec_mac_write(priv, GMAC_REG_GAR, NETSEC_GMAC_GAR_REG_GB |
    506			     phy_addr << NETSEC_GMAC_GAR_REG_SHIFT_PA |
    507			     reg_addr << NETSEC_GMAC_GAR_REG_SHIFT_GR |
    508			     (netsec_clk_type(priv->freq) <<
    509			      GMAC_REG_SHIFT_CR_GAR)))
    510		return -ETIMEDOUT;
    511
    512	ret = netsec_mac_wait_while_busy(priv, GMAC_REG_GAR,
    513					 NETSEC_GMAC_GAR_REG_GB);
    514	if (ret)
    515		return ret;
    516
    517	ret = netsec_mac_read(priv, GMAC_REG_GDR, &data);
    518	if (ret)
    519		return ret;
    520
    521	return data;
    522}
    523
    524/************* ETHTOOL_OPS FOLLOW *************/
    525
    526static void netsec_et_get_drvinfo(struct net_device *net_device,
    527				  struct ethtool_drvinfo *info)
    528{
    529	strlcpy(info->driver, "netsec", sizeof(info->driver));
    530	strlcpy(info->bus_info, dev_name(net_device->dev.parent),
    531		sizeof(info->bus_info));
    532}
    533
    534static int netsec_et_get_coalesce(struct net_device *net_device,
    535				  struct ethtool_coalesce *et_coalesce,
    536				  struct kernel_ethtool_coalesce *kernel_coal,
    537				  struct netlink_ext_ack *extack)
    538{
    539	struct netsec_priv *priv = netdev_priv(net_device);
    540
    541	*et_coalesce = priv->et_coalesce;
    542
    543	return 0;
    544}
    545
    546static int netsec_et_set_coalesce(struct net_device *net_device,
    547				  struct ethtool_coalesce *et_coalesce,
    548				  struct kernel_ethtool_coalesce *kernel_coal,
    549				  struct netlink_ext_ack *extack)
    550{
    551	struct netsec_priv *priv = netdev_priv(net_device);
    552
    553	priv->et_coalesce = *et_coalesce;
    554
    555	if (priv->et_coalesce.tx_coalesce_usecs < 50)
    556		priv->et_coalesce.tx_coalesce_usecs = 50;
    557	if (priv->et_coalesce.tx_max_coalesced_frames < 1)
    558		priv->et_coalesce.tx_max_coalesced_frames = 1;
    559
    560	netsec_write(priv, NETSEC_REG_NRM_TX_DONE_TXINT_PKTCNT,
    561		     priv->et_coalesce.tx_max_coalesced_frames);
    562	netsec_write(priv, NETSEC_REG_NRM_TX_TXINT_TMR,
    563		     priv->et_coalesce.tx_coalesce_usecs);
    564	netsec_write(priv, NETSEC_REG_NRM_TX_INTEN_SET, NRM_TX_ST_TXDONE);
    565	netsec_write(priv, NETSEC_REG_NRM_TX_INTEN_SET, NRM_TX_ST_TMREXP);
    566
    567	if (priv->et_coalesce.rx_coalesce_usecs < 50)
    568		priv->et_coalesce.rx_coalesce_usecs = 50;
    569	if (priv->et_coalesce.rx_max_coalesced_frames < 1)
    570		priv->et_coalesce.rx_max_coalesced_frames = 1;
    571
    572	netsec_write(priv, NETSEC_REG_NRM_RX_RXINT_PKTCNT,
    573		     priv->et_coalesce.rx_max_coalesced_frames);
    574	netsec_write(priv, NETSEC_REG_NRM_RX_RXINT_TMR,
    575		     priv->et_coalesce.rx_coalesce_usecs);
    576	netsec_write(priv, NETSEC_REG_NRM_RX_INTEN_SET, NRM_RX_ST_PKTCNT);
    577	netsec_write(priv, NETSEC_REG_NRM_RX_INTEN_SET, NRM_RX_ST_TMREXP);
    578
    579	return 0;
    580}
    581
    582static u32 netsec_et_get_msglevel(struct net_device *dev)
    583{
    584	struct netsec_priv *priv = netdev_priv(dev);
    585
    586	return priv->msg_enable;
    587}
    588
    589static void netsec_et_set_msglevel(struct net_device *dev, u32 datum)
    590{
    591	struct netsec_priv *priv = netdev_priv(dev);
    592
    593	priv->msg_enable = datum;
    594}
    595
    596static const struct ethtool_ops netsec_ethtool_ops = {
    597	.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
    598				     ETHTOOL_COALESCE_MAX_FRAMES,
    599	.get_drvinfo		= netsec_et_get_drvinfo,
    600	.get_link_ksettings	= phy_ethtool_get_link_ksettings,
    601	.set_link_ksettings	= phy_ethtool_set_link_ksettings,
    602	.get_link		= ethtool_op_get_link,
    603	.get_coalesce		= netsec_et_get_coalesce,
    604	.set_coalesce		= netsec_et_set_coalesce,
    605	.get_msglevel		= netsec_et_get_msglevel,
    606	.set_msglevel		= netsec_et_set_msglevel,
    607};
    608
    609/************* NETDEV_OPS FOLLOW *************/
    610
    611
    612static void netsec_set_rx_de(struct netsec_priv *priv,
    613			     struct netsec_desc_ring *dring, u16 idx,
    614			     const struct netsec_desc *desc)
    615{
    616	struct netsec_de *de = dring->vaddr + DESC_SZ * idx;
    617	u32 attr = (1 << NETSEC_RX_PKT_OWN_FIELD) |
    618		   (1 << NETSEC_RX_PKT_FS_FIELD) |
    619		   (1 << NETSEC_RX_PKT_LS_FIELD);
    620
    621	if (idx == DESC_NUM - 1)
    622		attr |= (1 << NETSEC_RX_PKT_LD_FIELD);
    623
    624	de->data_buf_addr_up = upper_32_bits(desc->dma_addr);
    625	de->data_buf_addr_lw = lower_32_bits(desc->dma_addr);
    626	de->buf_len_info = desc->len;
    627	de->attr = attr;
    628	dma_wmb();
    629
    630	dring->desc[idx].dma_addr = desc->dma_addr;
    631	dring->desc[idx].addr = desc->addr;
    632	dring->desc[idx].len = desc->len;
    633}
    634
    635static bool netsec_clean_tx_dring(struct netsec_priv *priv)
    636{
    637	struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_TX];
    638	struct xdp_frame_bulk bq;
    639	struct netsec_de *entry;
    640	int tail = dring->tail;
    641	unsigned int bytes;
    642	int cnt = 0;
    643
    644	spin_lock(&dring->lock);
    645
    646	bytes = 0;
    647	xdp_frame_bulk_init(&bq);
    648	entry = dring->vaddr + DESC_SZ * tail;
    649
    650	rcu_read_lock(); /* need for xdp_return_frame_bulk */
    651
    652	while (!(entry->attr & (1U << NETSEC_TX_SHIFT_OWN_FIELD)) &&
    653	       cnt < DESC_NUM) {
    654		struct netsec_desc *desc;
    655		int eop;
    656
    657		desc = &dring->desc[tail];
    658		eop = (entry->attr >> NETSEC_TX_LAST) & 1;
    659		dma_rmb();
    660
    661		/* if buf_type is either TYPE_NETSEC_SKB or
    662		 * TYPE_NETSEC_XDP_NDO we mapped it
    663		 */
    664		if (desc->buf_type != TYPE_NETSEC_XDP_TX)
    665			dma_unmap_single(priv->dev, desc->dma_addr, desc->len,
    666					 DMA_TO_DEVICE);
    667
    668		if (!eop)
    669			goto next;
    670
    671		if (desc->buf_type == TYPE_NETSEC_SKB) {
    672			bytes += desc->skb->len;
    673			dev_kfree_skb(desc->skb);
    674		} else {
    675			bytes += desc->xdpf->len;
    676			if (desc->buf_type == TYPE_NETSEC_XDP_TX)
    677				xdp_return_frame_rx_napi(desc->xdpf);
    678			else
    679				xdp_return_frame_bulk(desc->xdpf, &bq);
    680		}
    681next:
    682		/* clean up so netsec_uninit_pkt_dring() won't free the skb
    683		 * again
    684		 */
    685		*desc = (struct netsec_desc){};
    686
    687		/* entry->attr is not going to be accessed by the NIC until
    688		 * netsec_set_tx_de() is called. No need for a dma_wmb() here
    689		 */
    690		entry->attr = 1U << NETSEC_TX_SHIFT_OWN_FIELD;
    691		/* move tail ahead */
    692		dring->tail = (tail + 1) % DESC_NUM;
    693
    694		tail = dring->tail;
    695		entry = dring->vaddr + DESC_SZ * tail;
    696		cnt++;
    697	}
    698	xdp_flush_frame_bulk(&bq);
    699
    700	rcu_read_unlock();
    701
    702	spin_unlock(&dring->lock);
    703
    704	if (!cnt)
    705		return false;
    706
    707	/* reading the register clears the irq */
    708	netsec_read(priv, NETSEC_REG_NRM_TX_DONE_PKTCNT);
    709
    710	priv->ndev->stats.tx_packets += cnt;
    711	priv->ndev->stats.tx_bytes += bytes;
    712
    713	netdev_completed_queue(priv->ndev, cnt, bytes);
    714
    715	return true;
    716}
    717
    718static void netsec_process_tx(struct netsec_priv *priv)
    719{
    720	struct net_device *ndev = priv->ndev;
    721	bool cleaned;
    722
    723	cleaned = netsec_clean_tx_dring(priv);
    724
    725	if (cleaned && netif_queue_stopped(ndev)) {
    726		/* Make sure we update the value, anyone stopping the queue
    727		 * after this will read the proper consumer idx
    728		 */
    729		smp_wmb();
    730		netif_wake_queue(ndev);
    731	}
    732}
    733
    734static void *netsec_alloc_rx_data(struct netsec_priv *priv,
    735				  dma_addr_t *dma_handle, u16 *desc_len)
    736
    737{
    738
    739	struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_RX];
    740	struct page *page;
    741
    742	page = page_pool_dev_alloc_pages(dring->page_pool);
    743	if (!page)
    744		return NULL;
    745
    746	/* We allocate the same buffer length for XDP and non-XDP cases.
    747	 * page_pool API will map the whole page, skip what's needed for
    748	 * network payloads and/or XDP
    749	 */
    750	*dma_handle = page_pool_get_dma_addr(page) + NETSEC_RXBUF_HEADROOM;
    751	/* Make sure the incoming payload fits in the page for XDP and non-XDP
    752	 * cases and reserve enough space for headroom + skb_shared_info
    753	 */
    754	*desc_len = NETSEC_RX_BUF_SIZE;
    755
    756	return page_address(page);
    757}
    758
    759static void netsec_rx_fill(struct netsec_priv *priv, u16 from, u16 num)
    760{
    761	struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_RX];
    762	u16 idx = from;
    763
    764	while (num) {
    765		netsec_set_rx_de(priv, dring, idx, &dring->desc[idx]);
    766		idx++;
    767		if (idx >= DESC_NUM)
    768			idx = 0;
    769		num--;
    770	}
    771}
    772
    773static void netsec_xdp_ring_tx_db(struct netsec_priv *priv, u16 pkts)
    774{
    775	if (likely(pkts))
    776		netsec_write(priv, NETSEC_REG_NRM_TX_PKTCNT, pkts);
    777}
    778
    779static void netsec_finalize_xdp_rx(struct netsec_priv *priv, u32 xdp_res,
    780				   u16 pkts)
    781{
    782	if (xdp_res & NETSEC_XDP_REDIR)
    783		xdp_do_flush_map();
    784
    785	if (xdp_res & NETSEC_XDP_TX)
    786		netsec_xdp_ring_tx_db(priv, pkts);
    787}
    788
    789static void netsec_set_tx_de(struct netsec_priv *priv,
    790			     struct netsec_desc_ring *dring,
    791			     const struct netsec_tx_pkt_ctrl *tx_ctrl,
    792			     const struct netsec_desc *desc, void *buf)
    793{
    794	int idx = dring->head;
    795	struct netsec_de *de;
    796	u32 attr;
    797
    798	de = dring->vaddr + (DESC_SZ * idx);
    799
    800	attr = (1 << NETSEC_TX_SHIFT_OWN_FIELD) |
    801	       (1 << NETSEC_TX_SHIFT_PT_FIELD) |
    802	       (NETSEC_RING_GMAC << NETSEC_TX_SHIFT_TDRID_FIELD) |
    803	       (1 << NETSEC_TX_SHIFT_FS_FIELD) |
    804	       (1 << NETSEC_TX_LAST) |
    805	       (tx_ctrl->cksum_offload_flag << NETSEC_TX_SHIFT_CO) |
    806	       (tx_ctrl->tcp_seg_offload_flag << NETSEC_TX_SHIFT_SO) |
    807	       (1 << NETSEC_TX_SHIFT_TRS_FIELD);
    808	if (idx == DESC_NUM - 1)
    809		attr |= (1 << NETSEC_TX_SHIFT_LD_FIELD);
    810
    811	de->data_buf_addr_up = upper_32_bits(desc->dma_addr);
    812	de->data_buf_addr_lw = lower_32_bits(desc->dma_addr);
    813	de->buf_len_info = (tx_ctrl->tcp_seg_len << 16) | desc->len;
    814	de->attr = attr;
    815
    816	dring->desc[idx] = *desc;
    817	if (desc->buf_type == TYPE_NETSEC_SKB)
    818		dring->desc[idx].skb = buf;
    819	else if (desc->buf_type == TYPE_NETSEC_XDP_TX ||
    820		 desc->buf_type == TYPE_NETSEC_XDP_NDO)
    821		dring->desc[idx].xdpf = buf;
    822
    823	/* move head ahead */
    824	dring->head = (dring->head + 1) % DESC_NUM;
    825}
    826
    827/* The current driver only supports 1 Txq, this should run under spin_lock() */
    828static u32 netsec_xdp_queue_one(struct netsec_priv *priv,
    829				struct xdp_frame *xdpf, bool is_ndo)
    830
    831{
    832	struct netsec_desc_ring *tx_ring = &priv->desc_ring[NETSEC_RING_TX];
    833	struct page *page = virt_to_page(xdpf->data);
    834	struct netsec_tx_pkt_ctrl tx_ctrl = {};
    835	struct netsec_desc tx_desc;
    836	dma_addr_t dma_handle;
    837	u16 filled;
    838
    839	if (tx_ring->head >= tx_ring->tail)
    840		filled = tx_ring->head - tx_ring->tail;
    841	else
    842		filled = tx_ring->head + DESC_NUM - tx_ring->tail;
    843
    844	if (DESC_NUM - filled <= 1)
    845		return NETSEC_XDP_CONSUMED;
    846
    847	if (is_ndo) {
    848		/* this is for ndo_xdp_xmit, the buffer needs mapping before
    849		 * sending
    850		 */
    851		dma_handle = dma_map_single(priv->dev, xdpf->data, xdpf->len,
    852					    DMA_TO_DEVICE);
    853		if (dma_mapping_error(priv->dev, dma_handle))
    854			return NETSEC_XDP_CONSUMED;
    855		tx_desc.buf_type = TYPE_NETSEC_XDP_NDO;
    856	} else {
    857		/* This is the device Rx buffer from page_pool. No need to remap
    858		 * just sync and send it
    859		 */
    860		struct netsec_desc_ring *rx_ring =
    861			&priv->desc_ring[NETSEC_RING_RX];
    862		enum dma_data_direction dma_dir =
    863			page_pool_get_dma_dir(rx_ring->page_pool);
    864
    865		dma_handle = page_pool_get_dma_addr(page) + xdpf->headroom +
    866			sizeof(*xdpf);
    867		dma_sync_single_for_device(priv->dev, dma_handle, xdpf->len,
    868					   dma_dir);
    869		tx_desc.buf_type = TYPE_NETSEC_XDP_TX;
    870	}
    871
    872	tx_desc.dma_addr = dma_handle;
    873	tx_desc.addr = xdpf->data;
    874	tx_desc.len = xdpf->len;
    875
    876	netdev_sent_queue(priv->ndev, xdpf->len);
    877	netsec_set_tx_de(priv, tx_ring, &tx_ctrl, &tx_desc, xdpf);
    878
    879	return NETSEC_XDP_TX;
    880}
    881
    882static u32 netsec_xdp_xmit_back(struct netsec_priv *priv, struct xdp_buff *xdp)
    883{
    884	struct netsec_desc_ring *tx_ring = &priv->desc_ring[NETSEC_RING_TX];
    885	struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
    886	u32 ret;
    887
    888	if (unlikely(!xdpf))
    889		return NETSEC_XDP_CONSUMED;
    890
    891	spin_lock(&tx_ring->lock);
    892	ret = netsec_xdp_queue_one(priv, xdpf, false);
    893	spin_unlock(&tx_ring->lock);
    894
    895	return ret;
    896}
    897
    898static u32 netsec_run_xdp(struct netsec_priv *priv, struct bpf_prog *prog,
    899			  struct xdp_buff *xdp)
    900{
    901	struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_RX];
    902	unsigned int sync, len = xdp->data_end - xdp->data;
    903	u32 ret = NETSEC_XDP_PASS;
    904	struct page *page;
    905	int err;
    906	u32 act;
    907
    908	act = bpf_prog_run_xdp(prog, xdp);
    909
    910	/* Due xdp_adjust_tail: DMA sync for_device cover max len CPU touch */
    911	sync = xdp->data_end - xdp->data_hard_start - NETSEC_RXBUF_HEADROOM;
    912	sync = max(sync, len);
    913
    914	switch (act) {
    915	case XDP_PASS:
    916		ret = NETSEC_XDP_PASS;
    917		break;
    918	case XDP_TX:
    919		ret = netsec_xdp_xmit_back(priv, xdp);
    920		if (ret != NETSEC_XDP_TX) {
    921			page = virt_to_head_page(xdp->data);
    922			page_pool_put_page(dring->page_pool, page, sync, true);
    923		}
    924		break;
    925	case XDP_REDIRECT:
    926		err = xdp_do_redirect(priv->ndev, xdp, prog);
    927		if (!err) {
    928			ret = NETSEC_XDP_REDIR;
    929		} else {
    930			ret = NETSEC_XDP_CONSUMED;
    931			page = virt_to_head_page(xdp->data);
    932			page_pool_put_page(dring->page_pool, page, sync, true);
    933		}
    934		break;
    935	default:
    936		bpf_warn_invalid_xdp_action(priv->ndev, prog, act);
    937		fallthrough;
    938	case XDP_ABORTED:
    939		trace_xdp_exception(priv->ndev, prog, act);
    940		fallthrough;	/* handle aborts by dropping packet */
    941	case XDP_DROP:
    942		ret = NETSEC_XDP_CONSUMED;
    943		page = virt_to_head_page(xdp->data);
    944		page_pool_put_page(dring->page_pool, page, sync, true);
    945		break;
    946	}
    947
    948	return ret;
    949}
    950
    951static int netsec_process_rx(struct netsec_priv *priv, int budget)
    952{
    953	struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_RX];
    954	struct net_device *ndev = priv->ndev;
    955	struct netsec_rx_pkt_info rx_info;
    956	enum dma_data_direction dma_dir;
    957	struct bpf_prog *xdp_prog;
    958	struct xdp_buff xdp;
    959	u16 xdp_xmit = 0;
    960	u32 xdp_act = 0;
    961	int done = 0;
    962
    963	xdp_init_buff(&xdp, PAGE_SIZE, &dring->xdp_rxq);
    964
    965	xdp_prog = READ_ONCE(priv->xdp_prog);
    966	dma_dir = page_pool_get_dma_dir(dring->page_pool);
    967
    968	while (done < budget) {
    969		u16 idx = dring->tail;
    970		struct netsec_de *de = dring->vaddr + (DESC_SZ * idx);
    971		struct netsec_desc *desc = &dring->desc[idx];
    972		struct page *page = virt_to_page(desc->addr);
    973		u32 xdp_result = NETSEC_XDP_PASS;
    974		struct sk_buff *skb = NULL;
    975		u16 pkt_len, desc_len;
    976		dma_addr_t dma_handle;
    977		void *buf_addr;
    978
    979		if (de->attr & (1U << NETSEC_RX_PKT_OWN_FIELD)) {
    980			/* reading the register clears the irq */
    981			netsec_read(priv, NETSEC_REG_NRM_RX_PKTCNT);
    982			break;
    983		}
    984
    985		/* This  barrier is needed to keep us from reading
    986		 * any other fields out of the netsec_de until we have
    987		 * verified the descriptor has been written back
    988		 */
    989		dma_rmb();
    990		done++;
    991
    992		pkt_len = de->buf_len_info >> 16;
    993		rx_info.err_code = (de->attr >> NETSEC_RX_PKT_ERR_FIELD) &
    994			NETSEC_RX_PKT_ERR_MASK;
    995		rx_info.err_flag = (de->attr >> NETSEC_RX_PKT_ER_FIELD) & 1;
    996		if (rx_info.err_flag) {
    997			netif_err(priv, drv, priv->ndev,
    998				  "%s: rx fail err(%d)\n", __func__,
    999				  rx_info.err_code);
   1000			ndev->stats.rx_dropped++;
   1001			dring->tail = (dring->tail + 1) % DESC_NUM;
   1002			/* reuse buffer page frag */
   1003			netsec_rx_fill(priv, idx, 1);
   1004			continue;
   1005		}
   1006		rx_info.rx_cksum_result =
   1007			(de->attr >> NETSEC_RX_PKT_CO_FIELD) & 3;
   1008
   1009		/* allocate a fresh buffer and map it to the hardware.
   1010		 * This will eventually replace the old buffer in the hardware
   1011		 */
   1012		buf_addr = netsec_alloc_rx_data(priv, &dma_handle, &desc_len);
   1013
   1014		if (unlikely(!buf_addr))
   1015			break;
   1016
   1017		dma_sync_single_for_cpu(priv->dev, desc->dma_addr, pkt_len,
   1018					dma_dir);
   1019		prefetch(desc->addr);
   1020
   1021		xdp_prepare_buff(&xdp, desc->addr, NETSEC_RXBUF_HEADROOM,
   1022				 pkt_len, false);
   1023
   1024		if (xdp_prog) {
   1025			xdp_result = netsec_run_xdp(priv, xdp_prog, &xdp);
   1026			if (xdp_result != NETSEC_XDP_PASS) {
   1027				xdp_act |= xdp_result;
   1028				if (xdp_result == NETSEC_XDP_TX)
   1029					xdp_xmit++;
   1030				goto next;
   1031			}
   1032		}
   1033		skb = build_skb(desc->addr, desc->len + NETSEC_RX_BUF_NON_DATA);
   1034
   1035		if (unlikely(!skb)) {
   1036			/* If skb fails recycle_direct will either unmap and
   1037			 * free the page or refill the cache depending on the
   1038			 * cache state. Since we paid the allocation cost if
   1039			 * building an skb fails try to put the page into cache
   1040			 */
   1041			page_pool_put_page(dring->page_pool, page, pkt_len,
   1042					   true);
   1043			netif_err(priv, drv, priv->ndev,
   1044				  "rx failed to build skb\n");
   1045			break;
   1046		}
   1047		skb_mark_for_recycle(skb);
   1048
   1049		skb_reserve(skb, xdp.data - xdp.data_hard_start);
   1050		skb_put(skb, xdp.data_end - xdp.data);
   1051		skb->protocol = eth_type_trans(skb, priv->ndev);
   1052
   1053		if (priv->rx_cksum_offload_flag &&
   1054		    rx_info.rx_cksum_result == NETSEC_RX_CKSUM_OK)
   1055			skb->ip_summed = CHECKSUM_UNNECESSARY;
   1056
   1057next:
   1058		if (skb)
   1059			napi_gro_receive(&priv->napi, skb);
   1060		if (skb || xdp_result) {
   1061			ndev->stats.rx_packets++;
   1062			ndev->stats.rx_bytes += xdp.data_end - xdp.data;
   1063		}
   1064
   1065		/* Update the descriptor with fresh buffers */
   1066		desc->len = desc_len;
   1067		desc->dma_addr = dma_handle;
   1068		desc->addr = buf_addr;
   1069
   1070		netsec_rx_fill(priv, idx, 1);
   1071		dring->tail = (dring->tail + 1) % DESC_NUM;
   1072	}
   1073	netsec_finalize_xdp_rx(priv, xdp_act, xdp_xmit);
   1074
   1075	return done;
   1076}
   1077
   1078static int netsec_napi_poll(struct napi_struct *napi, int budget)
   1079{
   1080	struct netsec_priv *priv;
   1081	int done;
   1082
   1083	priv = container_of(napi, struct netsec_priv, napi);
   1084
   1085	netsec_process_tx(priv);
   1086	done = netsec_process_rx(priv, budget);
   1087
   1088	if (done < budget && napi_complete_done(napi, done)) {
   1089		unsigned long flags;
   1090
   1091		spin_lock_irqsave(&priv->reglock, flags);
   1092		netsec_write(priv, NETSEC_REG_INTEN_SET,
   1093			     NETSEC_IRQ_RX | NETSEC_IRQ_TX);
   1094		spin_unlock_irqrestore(&priv->reglock, flags);
   1095	}
   1096
   1097	return done;
   1098}
   1099
   1100
   1101static int netsec_desc_used(struct netsec_desc_ring *dring)
   1102{
   1103	int used;
   1104
   1105	if (dring->head >= dring->tail)
   1106		used = dring->head - dring->tail;
   1107	else
   1108		used = dring->head + DESC_NUM - dring->tail;
   1109
   1110	return used;
   1111}
   1112
   1113static int netsec_check_stop_tx(struct netsec_priv *priv, int used)
   1114{
   1115	struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_TX];
   1116
   1117	/* keep tail from touching the queue */
   1118	if (DESC_NUM - used < 2) {
   1119		netif_stop_queue(priv->ndev);
   1120
   1121		/* Make sure we read the updated value in case
   1122		 * descriptors got freed
   1123		 */
   1124		smp_rmb();
   1125
   1126		used = netsec_desc_used(dring);
   1127		if (DESC_NUM - used < 2)
   1128			return NETDEV_TX_BUSY;
   1129
   1130		netif_wake_queue(priv->ndev);
   1131	}
   1132
   1133	return 0;
   1134}
   1135
   1136static netdev_tx_t netsec_netdev_start_xmit(struct sk_buff *skb,
   1137					    struct net_device *ndev)
   1138{
   1139	struct netsec_priv *priv = netdev_priv(ndev);
   1140	struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_TX];
   1141	struct netsec_tx_pkt_ctrl tx_ctrl = {};
   1142	struct netsec_desc tx_desc;
   1143	u16 tso_seg_len = 0;
   1144	int filled;
   1145
   1146	spin_lock_bh(&dring->lock);
   1147	filled = netsec_desc_used(dring);
   1148	if (netsec_check_stop_tx(priv, filled)) {
   1149		spin_unlock_bh(&dring->lock);
   1150		net_warn_ratelimited("%s %s Tx queue full\n",
   1151				     dev_name(priv->dev), ndev->name);
   1152		return NETDEV_TX_BUSY;
   1153	}
   1154
   1155	if (skb->ip_summed == CHECKSUM_PARTIAL)
   1156		tx_ctrl.cksum_offload_flag = true;
   1157
   1158	if (skb_is_gso(skb))
   1159		tso_seg_len = skb_shinfo(skb)->gso_size;
   1160
   1161	if (tso_seg_len > 0) {
   1162		if (skb->protocol == htons(ETH_P_IP)) {
   1163			ip_hdr(skb)->tot_len = 0;
   1164			tcp_hdr(skb)->check =
   1165				~tcp_v4_check(0, ip_hdr(skb)->saddr,
   1166					      ip_hdr(skb)->daddr, 0);
   1167		} else {
   1168			tcp_v6_gso_csum_prep(skb);
   1169		}
   1170
   1171		tx_ctrl.tcp_seg_offload_flag = true;
   1172		tx_ctrl.tcp_seg_len = tso_seg_len;
   1173	}
   1174
   1175	tx_desc.dma_addr = dma_map_single(priv->dev, skb->data,
   1176					  skb_headlen(skb), DMA_TO_DEVICE);
   1177	if (dma_mapping_error(priv->dev, tx_desc.dma_addr)) {
   1178		spin_unlock_bh(&dring->lock);
   1179		netif_err(priv, drv, priv->ndev,
   1180			  "%s: DMA mapping failed\n", __func__);
   1181		ndev->stats.tx_dropped++;
   1182		dev_kfree_skb_any(skb);
   1183		return NETDEV_TX_OK;
   1184	}
   1185	tx_desc.addr = skb->data;
   1186	tx_desc.len = skb_headlen(skb);
   1187	tx_desc.buf_type = TYPE_NETSEC_SKB;
   1188
   1189	skb_tx_timestamp(skb);
   1190	netdev_sent_queue(priv->ndev, skb->len);
   1191
   1192	netsec_set_tx_de(priv, dring, &tx_ctrl, &tx_desc, skb);
   1193	spin_unlock_bh(&dring->lock);
   1194	netsec_write(priv, NETSEC_REG_NRM_TX_PKTCNT, 1); /* submit another tx */
   1195
   1196	return NETDEV_TX_OK;
   1197}
   1198
   1199static void netsec_uninit_pkt_dring(struct netsec_priv *priv, int id)
   1200{
   1201	struct netsec_desc_ring *dring = &priv->desc_ring[id];
   1202	struct netsec_desc *desc;
   1203	u16 idx;
   1204
   1205	if (!dring->vaddr || !dring->desc)
   1206		return;
   1207	for (idx = 0; idx < DESC_NUM; idx++) {
   1208		desc = &dring->desc[idx];
   1209		if (!desc->addr)
   1210			continue;
   1211
   1212		if (id == NETSEC_RING_RX) {
   1213			struct page *page = virt_to_page(desc->addr);
   1214
   1215			page_pool_put_full_page(dring->page_pool, page, false);
   1216		} else if (id == NETSEC_RING_TX) {
   1217			dma_unmap_single(priv->dev, desc->dma_addr, desc->len,
   1218					 DMA_TO_DEVICE);
   1219			dev_kfree_skb(desc->skb);
   1220		}
   1221	}
   1222
   1223	/* Rx is currently using page_pool */
   1224	if (id == NETSEC_RING_RX) {
   1225		if (xdp_rxq_info_is_reg(&dring->xdp_rxq))
   1226			xdp_rxq_info_unreg(&dring->xdp_rxq);
   1227		page_pool_destroy(dring->page_pool);
   1228	}
   1229
   1230	memset(dring->desc, 0, sizeof(struct netsec_desc) * DESC_NUM);
   1231	memset(dring->vaddr, 0, DESC_SZ * DESC_NUM);
   1232
   1233	dring->head = 0;
   1234	dring->tail = 0;
   1235
   1236	if (id == NETSEC_RING_TX)
   1237		netdev_reset_queue(priv->ndev);
   1238}
   1239
   1240static void netsec_free_dring(struct netsec_priv *priv, int id)
   1241{
   1242	struct netsec_desc_ring *dring = &priv->desc_ring[id];
   1243
   1244	if (dring->vaddr) {
   1245		dma_free_coherent(priv->dev, DESC_SZ * DESC_NUM,
   1246				  dring->vaddr, dring->desc_dma);
   1247		dring->vaddr = NULL;
   1248	}
   1249
   1250	kfree(dring->desc);
   1251	dring->desc = NULL;
   1252}
   1253
   1254static int netsec_alloc_dring(struct netsec_priv *priv, enum ring_id id)
   1255{
   1256	struct netsec_desc_ring *dring = &priv->desc_ring[id];
   1257
   1258	dring->vaddr = dma_alloc_coherent(priv->dev, DESC_SZ * DESC_NUM,
   1259					  &dring->desc_dma, GFP_KERNEL);
   1260	if (!dring->vaddr)
   1261		goto err;
   1262
   1263	dring->desc = kcalloc(DESC_NUM, sizeof(*dring->desc), GFP_KERNEL);
   1264	if (!dring->desc)
   1265		goto err;
   1266
   1267	return 0;
   1268err:
   1269	netsec_free_dring(priv, id);
   1270
   1271	return -ENOMEM;
   1272}
   1273
   1274static void netsec_setup_tx_dring(struct netsec_priv *priv)
   1275{
   1276	struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_TX];
   1277	int i;
   1278
   1279	for (i = 0; i < DESC_NUM; i++) {
   1280		struct netsec_de *de;
   1281
   1282		de = dring->vaddr + (DESC_SZ * i);
   1283		/* de->attr is not going to be accessed by the NIC
   1284		 * until netsec_set_tx_de() is called.
   1285		 * No need for a dma_wmb() here
   1286		 */
   1287		de->attr = 1U << NETSEC_TX_SHIFT_OWN_FIELD;
   1288	}
   1289}
   1290
   1291static int netsec_setup_rx_dring(struct netsec_priv *priv)
   1292{
   1293	struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_RX];
   1294	struct bpf_prog *xdp_prog = READ_ONCE(priv->xdp_prog);
   1295	struct page_pool_params pp_params = {
   1296		.order = 0,
   1297		/* internal DMA mapping in page_pool */
   1298		.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
   1299		.pool_size = DESC_NUM,
   1300		.nid = NUMA_NO_NODE,
   1301		.dev = priv->dev,
   1302		.dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE,
   1303		.offset = NETSEC_RXBUF_HEADROOM,
   1304		.max_len = NETSEC_RX_BUF_SIZE,
   1305	};
   1306	int i, err;
   1307
   1308	dring->page_pool = page_pool_create(&pp_params);
   1309	if (IS_ERR(dring->page_pool)) {
   1310		err = PTR_ERR(dring->page_pool);
   1311		dring->page_pool = NULL;
   1312		goto err_out;
   1313	}
   1314
   1315	err = xdp_rxq_info_reg(&dring->xdp_rxq, priv->ndev, 0, priv->napi.napi_id);
   1316	if (err)
   1317		goto err_out;
   1318
   1319	err = xdp_rxq_info_reg_mem_model(&dring->xdp_rxq, MEM_TYPE_PAGE_POOL,
   1320					 dring->page_pool);
   1321	if (err)
   1322		goto err_out;
   1323
   1324	for (i = 0; i < DESC_NUM; i++) {
   1325		struct netsec_desc *desc = &dring->desc[i];
   1326		dma_addr_t dma_handle;
   1327		void *buf;
   1328		u16 len;
   1329
   1330		buf = netsec_alloc_rx_data(priv, &dma_handle, &len);
   1331
   1332		if (!buf) {
   1333			err = -ENOMEM;
   1334			goto err_out;
   1335		}
   1336		desc->dma_addr = dma_handle;
   1337		desc->addr = buf;
   1338		desc->len = len;
   1339	}
   1340
   1341	netsec_rx_fill(priv, 0, DESC_NUM);
   1342
   1343	return 0;
   1344
   1345err_out:
   1346	netsec_uninit_pkt_dring(priv, NETSEC_RING_RX);
   1347	return err;
   1348}
   1349
   1350static int netsec_netdev_load_ucode_region(struct netsec_priv *priv, u32 reg,
   1351					   u32 addr_h, u32 addr_l, u32 size)
   1352{
   1353	u64 base = (u64)addr_h << 32 | addr_l;
   1354	void __iomem *ucode;
   1355	u32 i;
   1356
   1357	ucode = ioremap(base, size * sizeof(u32));
   1358	if (!ucode)
   1359		return -ENOMEM;
   1360
   1361	for (i = 0; i < size; i++)
   1362		netsec_write(priv, reg, readl(ucode + i * 4));
   1363
   1364	iounmap(ucode);
   1365	return 0;
   1366}
   1367
   1368static int netsec_netdev_load_microcode(struct netsec_priv *priv)
   1369{
   1370	u32 addr_h, addr_l, size;
   1371	int err;
   1372
   1373	addr_h = readl(priv->eeprom_base + NETSEC_EEPROM_HM_ME_ADDRESS_H);
   1374	addr_l = readl(priv->eeprom_base + NETSEC_EEPROM_HM_ME_ADDRESS_L);
   1375	size = readl(priv->eeprom_base + NETSEC_EEPROM_HM_ME_SIZE);
   1376	err = netsec_netdev_load_ucode_region(priv, NETSEC_REG_DMAC_HM_CMD_BUF,
   1377					      addr_h, addr_l, size);
   1378	if (err)
   1379		return err;
   1380
   1381	addr_h = readl(priv->eeprom_base + NETSEC_EEPROM_MH_ME_ADDRESS_H);
   1382	addr_l = readl(priv->eeprom_base + NETSEC_EEPROM_MH_ME_ADDRESS_L);
   1383	size = readl(priv->eeprom_base + NETSEC_EEPROM_MH_ME_SIZE);
   1384	err = netsec_netdev_load_ucode_region(priv, NETSEC_REG_DMAC_MH_CMD_BUF,
   1385					      addr_h, addr_l, size);
   1386	if (err)
   1387		return err;
   1388
   1389	addr_h = 0;
   1390	addr_l = readl(priv->eeprom_base + NETSEC_EEPROM_PKT_ME_ADDRESS);
   1391	size = readl(priv->eeprom_base + NETSEC_EEPROM_PKT_ME_SIZE);
   1392	err = netsec_netdev_load_ucode_region(priv, NETSEC_REG_PKT_CMD_BUF,
   1393					      addr_h, addr_l, size);
   1394	if (err)
   1395		return err;
   1396
   1397	return 0;
   1398}
   1399
   1400static int netsec_reset_hardware(struct netsec_priv *priv,
   1401				 bool load_ucode)
   1402{
   1403	u32 value;
   1404	int err;
   1405
   1406	/* stop DMA engines */
   1407	if (!netsec_read(priv, NETSEC_REG_ADDR_DIS_CORE)) {
   1408		netsec_write(priv, NETSEC_REG_DMA_HM_CTRL,
   1409			     NETSEC_DMA_CTRL_REG_STOP);
   1410		netsec_write(priv, NETSEC_REG_DMA_MH_CTRL,
   1411			     NETSEC_DMA_CTRL_REG_STOP);
   1412
   1413		while (netsec_read(priv, NETSEC_REG_DMA_HM_CTRL) &
   1414		       NETSEC_DMA_CTRL_REG_STOP)
   1415			cpu_relax();
   1416
   1417		while (netsec_read(priv, NETSEC_REG_DMA_MH_CTRL) &
   1418		       NETSEC_DMA_CTRL_REG_STOP)
   1419			cpu_relax();
   1420	}
   1421
   1422	netsec_write(priv, NETSEC_REG_SOFT_RST, NETSEC_SOFT_RST_REG_RESET);
   1423	netsec_write(priv, NETSEC_REG_SOFT_RST, NETSEC_SOFT_RST_REG_RUN);
   1424	netsec_write(priv, NETSEC_REG_COM_INIT, NETSEC_COM_INIT_REG_ALL);
   1425
   1426	while (netsec_read(priv, NETSEC_REG_COM_INIT) != 0)
   1427		cpu_relax();
   1428
   1429	/* set desc_start addr */
   1430	netsec_write(priv, NETSEC_REG_NRM_RX_DESC_START_UP,
   1431		     upper_32_bits(priv->desc_ring[NETSEC_RING_RX].desc_dma));
   1432	netsec_write(priv, NETSEC_REG_NRM_RX_DESC_START_LW,
   1433		     lower_32_bits(priv->desc_ring[NETSEC_RING_RX].desc_dma));
   1434
   1435	netsec_write(priv, NETSEC_REG_NRM_TX_DESC_START_UP,
   1436		     upper_32_bits(priv->desc_ring[NETSEC_RING_TX].desc_dma));
   1437	netsec_write(priv, NETSEC_REG_NRM_TX_DESC_START_LW,
   1438		     lower_32_bits(priv->desc_ring[NETSEC_RING_TX].desc_dma));
   1439
   1440	/* set normal tx dring ring config */
   1441	netsec_write(priv, NETSEC_REG_NRM_TX_CONFIG,
   1442		     1 << NETSEC_REG_DESC_ENDIAN);
   1443	netsec_write(priv, NETSEC_REG_NRM_RX_CONFIG,
   1444		     1 << NETSEC_REG_DESC_ENDIAN);
   1445
   1446	if (load_ucode) {
   1447		err = netsec_netdev_load_microcode(priv);
   1448		if (err) {
   1449			netif_err(priv, probe, priv->ndev,
   1450				  "%s: failed to load microcode (%d)\n",
   1451				  __func__, err);
   1452			return err;
   1453		}
   1454	}
   1455
   1456	/* start DMA engines */
   1457	netsec_write(priv, NETSEC_REG_DMA_TMR_CTRL, priv->freq / 1000000 - 1);
   1458	netsec_write(priv, NETSEC_REG_ADDR_DIS_CORE, 0);
   1459
   1460	usleep_range(1000, 2000);
   1461
   1462	if (!(netsec_read(priv, NETSEC_REG_TOP_STATUS) &
   1463	      NETSEC_TOP_IRQ_REG_CODE_LOAD_END)) {
   1464		netif_err(priv, probe, priv->ndev,
   1465			  "microengine start failed\n");
   1466		return -ENXIO;
   1467	}
   1468	netsec_write(priv, NETSEC_REG_TOP_STATUS,
   1469		     NETSEC_TOP_IRQ_REG_CODE_LOAD_END);
   1470
   1471	value = NETSEC_PKT_CTRL_REG_MODE_NRM;
   1472	if (priv->ndev->mtu > ETH_DATA_LEN)
   1473		value |= NETSEC_PKT_CTRL_REG_EN_JUMBO;
   1474
   1475	/* change to normal mode */
   1476	netsec_write(priv, NETSEC_REG_DMA_MH_CTRL, MH_CTRL__MODE_TRANS);
   1477	netsec_write(priv, NETSEC_REG_PKT_CTRL, value);
   1478
   1479	while ((netsec_read(priv, NETSEC_REG_MODE_TRANS_COMP_STATUS) &
   1480		NETSEC_MODE_TRANS_COMP_IRQ_T2N) == 0)
   1481		cpu_relax();
   1482
   1483	/* clear any pending EMPTY/ERR irq status */
   1484	netsec_write(priv, NETSEC_REG_NRM_TX_STATUS, ~0);
   1485
   1486	/* Disable TX & RX intr */
   1487	netsec_write(priv, NETSEC_REG_INTEN_CLR, ~0);
   1488
   1489	return 0;
   1490}
   1491
   1492static int netsec_start_gmac(struct netsec_priv *priv)
   1493{
   1494	struct phy_device *phydev = priv->ndev->phydev;
   1495	u32 value = 0;
   1496	int ret;
   1497
   1498	if (phydev->speed != SPEED_1000)
   1499		value = (NETSEC_GMAC_MCR_REG_CST |
   1500			 NETSEC_GMAC_MCR_REG_HALF_DUPLEX_COMMON);
   1501
   1502	if (netsec_mac_write(priv, GMAC_REG_MCR, value))
   1503		return -ETIMEDOUT;
   1504	if (netsec_mac_write(priv, GMAC_REG_BMR,
   1505			     NETSEC_GMAC_BMR_REG_RESET))
   1506		return -ETIMEDOUT;
   1507
   1508	/* Wait soft reset */
   1509	usleep_range(1000, 5000);
   1510
   1511	ret = netsec_mac_read(priv, GMAC_REG_BMR, &value);
   1512	if (ret)
   1513		return ret;
   1514	if (value & NETSEC_GMAC_BMR_REG_SWR)
   1515		return -EAGAIN;
   1516
   1517	netsec_write(priv, MAC_REG_DESC_SOFT_RST, 1);
   1518	if (netsec_wait_while_busy(priv, MAC_REG_DESC_SOFT_RST, 1))
   1519		return -ETIMEDOUT;
   1520
   1521	netsec_write(priv, MAC_REG_DESC_INIT, 1);
   1522	if (netsec_wait_while_busy(priv, MAC_REG_DESC_INIT, 1))
   1523		return -ETIMEDOUT;
   1524
   1525	if (netsec_mac_write(priv, GMAC_REG_BMR,
   1526			     NETSEC_GMAC_BMR_REG_COMMON))
   1527		return -ETIMEDOUT;
   1528	if (netsec_mac_write(priv, GMAC_REG_RDLAR,
   1529			     NETSEC_GMAC_RDLAR_REG_COMMON))
   1530		return -ETIMEDOUT;
   1531	if (netsec_mac_write(priv, GMAC_REG_TDLAR,
   1532			     NETSEC_GMAC_TDLAR_REG_COMMON))
   1533		return -ETIMEDOUT;
   1534	if (netsec_mac_write(priv, GMAC_REG_MFFR, 0x80000001))
   1535		return -ETIMEDOUT;
   1536
   1537	ret = netsec_mac_update_to_phy_state(priv);
   1538	if (ret)
   1539		return ret;
   1540
   1541	ret = netsec_mac_read(priv, GMAC_REG_OMR, &value);
   1542	if (ret)
   1543		return ret;
   1544
   1545	value |= NETSEC_GMAC_OMR_REG_SR;
   1546	value |= NETSEC_GMAC_OMR_REG_ST;
   1547
   1548	netsec_write(priv, NETSEC_REG_NRM_RX_INTEN_CLR, ~0);
   1549	netsec_write(priv, NETSEC_REG_NRM_TX_INTEN_CLR, ~0);
   1550
   1551	netsec_et_set_coalesce(priv->ndev, &priv->et_coalesce, NULL, NULL);
   1552
   1553	if (netsec_mac_write(priv, GMAC_REG_OMR, value))
   1554		return -ETIMEDOUT;
   1555
   1556	return 0;
   1557}
   1558
   1559static int netsec_stop_gmac(struct netsec_priv *priv)
   1560{
   1561	u32 value;
   1562	int ret;
   1563
   1564	ret = netsec_mac_read(priv, GMAC_REG_OMR, &value);
   1565	if (ret)
   1566		return ret;
   1567	value &= ~NETSEC_GMAC_OMR_REG_SR;
   1568	value &= ~NETSEC_GMAC_OMR_REG_ST;
   1569
   1570	/* disable all interrupts */
   1571	netsec_write(priv, NETSEC_REG_NRM_RX_INTEN_CLR, ~0);
   1572	netsec_write(priv, NETSEC_REG_NRM_TX_INTEN_CLR, ~0);
   1573
   1574	return netsec_mac_write(priv, GMAC_REG_OMR, value);
   1575}
   1576
   1577static void netsec_phy_adjust_link(struct net_device *ndev)
   1578{
   1579	struct netsec_priv *priv = netdev_priv(ndev);
   1580
   1581	if (ndev->phydev->link)
   1582		netsec_start_gmac(priv);
   1583	else
   1584		netsec_stop_gmac(priv);
   1585
   1586	phy_print_status(ndev->phydev);
   1587}
   1588
   1589static irqreturn_t netsec_irq_handler(int irq, void *dev_id)
   1590{
   1591	struct netsec_priv *priv = dev_id;
   1592	u32 val, status = netsec_read(priv, NETSEC_REG_TOP_STATUS);
   1593	unsigned long flags;
   1594
   1595	/* Disable interrupts */
   1596	if (status & NETSEC_IRQ_TX) {
   1597		val = netsec_read(priv, NETSEC_REG_NRM_TX_STATUS);
   1598		netsec_write(priv, NETSEC_REG_NRM_TX_STATUS, val);
   1599	}
   1600	if (status & NETSEC_IRQ_RX) {
   1601		val = netsec_read(priv, NETSEC_REG_NRM_RX_STATUS);
   1602		netsec_write(priv, NETSEC_REG_NRM_RX_STATUS, val);
   1603	}
   1604
   1605	spin_lock_irqsave(&priv->reglock, flags);
   1606	netsec_write(priv, NETSEC_REG_INTEN_CLR, NETSEC_IRQ_RX | NETSEC_IRQ_TX);
   1607	spin_unlock_irqrestore(&priv->reglock, flags);
   1608
   1609	napi_schedule(&priv->napi);
   1610
   1611	return IRQ_HANDLED;
   1612}
   1613
   1614static int netsec_netdev_open(struct net_device *ndev)
   1615{
   1616	struct netsec_priv *priv = netdev_priv(ndev);
   1617	int ret;
   1618
   1619	pm_runtime_get_sync(priv->dev);
   1620
   1621	netsec_setup_tx_dring(priv);
   1622	ret = netsec_setup_rx_dring(priv);
   1623	if (ret) {
   1624		netif_err(priv, probe, priv->ndev,
   1625			  "%s: fail setup ring\n", __func__);
   1626		goto err1;
   1627	}
   1628
   1629	ret = request_irq(priv->ndev->irq, netsec_irq_handler,
   1630			  IRQF_SHARED, "netsec", priv);
   1631	if (ret) {
   1632		netif_err(priv, drv, priv->ndev, "request_irq failed\n");
   1633		goto err2;
   1634	}
   1635
   1636	if (dev_of_node(priv->dev)) {
   1637		if (!of_phy_connect(priv->ndev, priv->phy_np,
   1638				    netsec_phy_adjust_link, 0,
   1639				    priv->phy_interface)) {
   1640			netif_err(priv, link, priv->ndev, "missing PHY\n");
   1641			ret = -ENODEV;
   1642			goto err3;
   1643		}
   1644	} else {
   1645		ret = phy_connect_direct(priv->ndev, priv->phydev,
   1646					 netsec_phy_adjust_link,
   1647					 priv->phy_interface);
   1648		if (ret) {
   1649			netif_err(priv, link, priv->ndev,
   1650				  "phy_connect_direct() failed (%d)\n", ret);
   1651			goto err3;
   1652		}
   1653	}
   1654
   1655	phy_start(ndev->phydev);
   1656
   1657	netsec_start_gmac(priv);
   1658	napi_enable(&priv->napi);
   1659	netif_start_queue(ndev);
   1660
   1661	/* Enable TX+RX intr. */
   1662	netsec_write(priv, NETSEC_REG_INTEN_SET, NETSEC_IRQ_RX | NETSEC_IRQ_TX);
   1663
   1664	return 0;
   1665err3:
   1666	free_irq(priv->ndev->irq, priv);
   1667err2:
   1668	netsec_uninit_pkt_dring(priv, NETSEC_RING_RX);
   1669err1:
   1670	pm_runtime_put_sync(priv->dev);
   1671	return ret;
   1672}
   1673
   1674static int netsec_netdev_stop(struct net_device *ndev)
   1675{
   1676	int ret;
   1677	struct netsec_priv *priv = netdev_priv(ndev);
   1678
   1679	netif_stop_queue(priv->ndev);
   1680	dma_wmb();
   1681
   1682	napi_disable(&priv->napi);
   1683
   1684	netsec_write(priv, NETSEC_REG_INTEN_CLR, ~0);
   1685	netsec_stop_gmac(priv);
   1686
   1687	free_irq(priv->ndev->irq, priv);
   1688
   1689	netsec_uninit_pkt_dring(priv, NETSEC_RING_TX);
   1690	netsec_uninit_pkt_dring(priv, NETSEC_RING_RX);
   1691
   1692	phy_stop(ndev->phydev);
   1693	phy_disconnect(ndev->phydev);
   1694
   1695	ret = netsec_reset_hardware(priv, false);
   1696
   1697	pm_runtime_put_sync(priv->dev);
   1698
   1699	return ret;
   1700}
   1701
   1702static int netsec_netdev_init(struct net_device *ndev)
   1703{
   1704	struct netsec_priv *priv = netdev_priv(ndev);
   1705	int ret;
   1706	u16 data;
   1707
   1708	BUILD_BUG_ON_NOT_POWER_OF_2(DESC_NUM);
   1709
   1710	ret = netsec_alloc_dring(priv, NETSEC_RING_TX);
   1711	if (ret)
   1712		return ret;
   1713
   1714	ret = netsec_alloc_dring(priv, NETSEC_RING_RX);
   1715	if (ret)
   1716		goto err1;
   1717
   1718	/* set phy power down */
   1719	data = netsec_phy_read(priv->mii_bus, priv->phy_addr, MII_BMCR);
   1720	netsec_phy_write(priv->mii_bus, priv->phy_addr, MII_BMCR,
   1721			 data | BMCR_PDOWN);
   1722
   1723	ret = netsec_reset_hardware(priv, true);
   1724	if (ret)
   1725		goto err2;
   1726
   1727	/* Restore phy power state */
   1728	netsec_phy_write(priv->mii_bus, priv->phy_addr, MII_BMCR, data);
   1729
   1730	spin_lock_init(&priv->desc_ring[NETSEC_RING_TX].lock);
   1731	spin_lock_init(&priv->desc_ring[NETSEC_RING_RX].lock);
   1732
   1733	return 0;
   1734err2:
   1735	netsec_free_dring(priv, NETSEC_RING_RX);
   1736err1:
   1737	netsec_free_dring(priv, NETSEC_RING_TX);
   1738	return ret;
   1739}
   1740
   1741static void netsec_netdev_uninit(struct net_device *ndev)
   1742{
   1743	struct netsec_priv *priv = netdev_priv(ndev);
   1744
   1745	netsec_free_dring(priv, NETSEC_RING_RX);
   1746	netsec_free_dring(priv, NETSEC_RING_TX);
   1747}
   1748
   1749static int netsec_netdev_set_features(struct net_device *ndev,
   1750				      netdev_features_t features)
   1751{
   1752	struct netsec_priv *priv = netdev_priv(ndev);
   1753
   1754	priv->rx_cksum_offload_flag = !!(features & NETIF_F_RXCSUM);
   1755
   1756	return 0;
   1757}
   1758
   1759static int netsec_xdp_xmit(struct net_device *ndev, int n,
   1760			   struct xdp_frame **frames, u32 flags)
   1761{
   1762	struct netsec_priv *priv = netdev_priv(ndev);
   1763	struct netsec_desc_ring *tx_ring = &priv->desc_ring[NETSEC_RING_TX];
   1764	int i, nxmit = 0;
   1765
   1766	if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
   1767		return -EINVAL;
   1768
   1769	spin_lock(&tx_ring->lock);
   1770	for (i = 0; i < n; i++) {
   1771		struct xdp_frame *xdpf = frames[i];
   1772		int err;
   1773
   1774		err = netsec_xdp_queue_one(priv, xdpf, true);
   1775		if (err != NETSEC_XDP_TX)
   1776			break;
   1777
   1778		tx_ring->xdp_xmit++;
   1779		nxmit++;
   1780	}
   1781	spin_unlock(&tx_ring->lock);
   1782
   1783	if (unlikely(flags & XDP_XMIT_FLUSH)) {
   1784		netsec_xdp_ring_tx_db(priv, tx_ring->xdp_xmit);
   1785		tx_ring->xdp_xmit = 0;
   1786	}
   1787
   1788	return nxmit;
   1789}
   1790
   1791static int netsec_xdp_setup(struct netsec_priv *priv, struct bpf_prog *prog,
   1792			    struct netlink_ext_ack *extack)
   1793{
   1794	struct net_device *dev = priv->ndev;
   1795	struct bpf_prog *old_prog;
   1796
   1797	/* For now just support only the usual MTU sized frames */
   1798	if (prog && dev->mtu > 1500) {
   1799		NL_SET_ERR_MSG_MOD(extack, "Jumbo frames not supported on XDP");
   1800		return -EOPNOTSUPP;
   1801	}
   1802
   1803	if (netif_running(dev))
   1804		netsec_netdev_stop(dev);
   1805
   1806	/* Detach old prog, if any */
   1807	old_prog = xchg(&priv->xdp_prog, prog);
   1808	if (old_prog)
   1809		bpf_prog_put(old_prog);
   1810
   1811	if (netif_running(dev))
   1812		netsec_netdev_open(dev);
   1813
   1814	return 0;
   1815}
   1816
   1817static int netsec_xdp(struct net_device *ndev, struct netdev_bpf *xdp)
   1818{
   1819	struct netsec_priv *priv = netdev_priv(ndev);
   1820
   1821	switch (xdp->command) {
   1822	case XDP_SETUP_PROG:
   1823		return netsec_xdp_setup(priv, xdp->prog, xdp->extack);
   1824	default:
   1825		return -EINVAL;
   1826	}
   1827}
   1828
   1829static const struct net_device_ops netsec_netdev_ops = {
   1830	.ndo_init		= netsec_netdev_init,
   1831	.ndo_uninit		= netsec_netdev_uninit,
   1832	.ndo_open		= netsec_netdev_open,
   1833	.ndo_stop		= netsec_netdev_stop,
   1834	.ndo_start_xmit		= netsec_netdev_start_xmit,
   1835	.ndo_set_features	= netsec_netdev_set_features,
   1836	.ndo_set_mac_address    = eth_mac_addr,
   1837	.ndo_validate_addr	= eth_validate_addr,
   1838	.ndo_eth_ioctl		= phy_do_ioctl,
   1839	.ndo_xdp_xmit		= netsec_xdp_xmit,
   1840	.ndo_bpf		= netsec_xdp,
   1841};
   1842
   1843static int netsec_of_probe(struct platform_device *pdev,
   1844			   struct netsec_priv *priv, u32 *phy_addr)
   1845{
   1846	int err;
   1847
   1848	err = of_get_phy_mode(pdev->dev.of_node, &priv->phy_interface);
   1849	if (err) {
   1850		dev_err(&pdev->dev, "missing required property 'phy-mode'\n");
   1851		return err;
   1852	}
   1853
   1854	priv->phy_np = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0);
   1855	if (!priv->phy_np) {
   1856		dev_err(&pdev->dev, "missing required property 'phy-handle'\n");
   1857		return -EINVAL;
   1858	}
   1859
   1860	*phy_addr = of_mdio_parse_addr(&pdev->dev, priv->phy_np);
   1861
   1862	priv->clk = devm_clk_get(&pdev->dev, NULL); /* get by 'phy_ref_clk' */
   1863	if (IS_ERR(priv->clk))
   1864		return dev_err_probe(&pdev->dev, PTR_ERR(priv->clk),
   1865				     "phy_ref_clk not found\n");
   1866	priv->freq = clk_get_rate(priv->clk);
   1867
   1868	return 0;
   1869}
   1870
   1871static int netsec_acpi_probe(struct platform_device *pdev,
   1872			     struct netsec_priv *priv, u32 *phy_addr)
   1873{
   1874	int ret;
   1875
   1876	if (!IS_ENABLED(CONFIG_ACPI))
   1877		return -ENODEV;
   1878
   1879	/* ACPI systems are assumed to configure the PHY in firmware, so
   1880	 * there is really no need to discover the PHY mode from the DSDT.
   1881	 * Since firmware is known to exist in the field that configures the
   1882	 * PHY correctly but passes the wrong mode string in the phy-mode
   1883	 * device property, we have no choice but to ignore it.
   1884	 */
   1885	priv->phy_interface = PHY_INTERFACE_MODE_NA;
   1886
   1887	ret = device_property_read_u32(&pdev->dev, "phy-channel", phy_addr);
   1888	if (ret)
   1889		return dev_err_probe(&pdev->dev, ret,
   1890				     "missing required property 'phy-channel'\n");
   1891
   1892	ret = device_property_read_u32(&pdev->dev,
   1893				       "socionext,phy-clock-frequency",
   1894				       &priv->freq);
   1895	if (ret)
   1896		return dev_err_probe(&pdev->dev, ret,
   1897				     "missing required property 'socionext,phy-clock-frequency'\n");
   1898	return 0;
   1899}
   1900
   1901static void netsec_unregister_mdio(struct netsec_priv *priv)
   1902{
   1903	struct phy_device *phydev = priv->phydev;
   1904
   1905	if (!dev_of_node(priv->dev) && phydev) {
   1906		phy_device_remove(phydev);
   1907		phy_device_free(phydev);
   1908	}
   1909
   1910	mdiobus_unregister(priv->mii_bus);
   1911}
   1912
   1913static int netsec_register_mdio(struct netsec_priv *priv, u32 phy_addr)
   1914{
   1915	struct mii_bus *bus;
   1916	int ret;
   1917
   1918	bus = devm_mdiobus_alloc(priv->dev);
   1919	if (!bus)
   1920		return -ENOMEM;
   1921
   1922	snprintf(bus->id, MII_BUS_ID_SIZE, "%s", dev_name(priv->dev));
   1923	bus->priv = priv;
   1924	bus->name = "SNI NETSEC MDIO";
   1925	bus->read = netsec_phy_read;
   1926	bus->write = netsec_phy_write;
   1927	bus->parent = priv->dev;
   1928	priv->mii_bus = bus;
   1929
   1930	if (dev_of_node(priv->dev)) {
   1931		struct device_node *mdio_node, *parent = dev_of_node(priv->dev);
   1932
   1933		mdio_node = of_get_child_by_name(parent, "mdio");
   1934		if (mdio_node) {
   1935			parent = mdio_node;
   1936		} else {
   1937			/* older f/w doesn't populate the mdio subnode,
   1938			 * allow relaxed upgrade of f/w in due time.
   1939			 */
   1940			dev_info(priv->dev, "Upgrade f/w for mdio subnode!\n");
   1941		}
   1942
   1943		ret = of_mdiobus_register(bus, parent);
   1944		of_node_put(mdio_node);
   1945
   1946		if (ret) {
   1947			dev_err(priv->dev, "mdiobus register err(%d)\n", ret);
   1948			return ret;
   1949		}
   1950	} else {
   1951		/* Mask out all PHYs from auto probing. */
   1952		bus->phy_mask = ~0;
   1953		ret = mdiobus_register(bus);
   1954		if (ret) {
   1955			dev_err(priv->dev, "mdiobus register err(%d)\n", ret);
   1956			return ret;
   1957		}
   1958
   1959		priv->phydev = get_phy_device(bus, phy_addr, false);
   1960		if (IS_ERR(priv->phydev)) {
   1961			ret = PTR_ERR(priv->phydev);
   1962			dev_err(priv->dev, "get_phy_device err(%d)\n", ret);
   1963			priv->phydev = NULL;
   1964			return -ENODEV;
   1965		}
   1966
   1967		ret = phy_device_register(priv->phydev);
   1968		if (ret) {
   1969			mdiobus_unregister(bus);
   1970			dev_err(priv->dev,
   1971				"phy_device_register err(%d)\n", ret);
   1972		}
   1973	}
   1974
   1975	return ret;
   1976}
   1977
   1978static int netsec_probe(struct platform_device *pdev)
   1979{
   1980	struct resource *mmio_res, *eeprom_res;
   1981	struct netsec_priv *priv;
   1982	u32 hw_ver, phy_addr = 0;
   1983	struct net_device *ndev;
   1984	int ret;
   1985	int irq;
   1986
   1987	mmio_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
   1988	if (!mmio_res) {
   1989		dev_err(&pdev->dev, "No MMIO resource found.\n");
   1990		return -ENODEV;
   1991	}
   1992
   1993	eeprom_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
   1994	if (!eeprom_res) {
   1995		dev_info(&pdev->dev, "No EEPROM resource found.\n");
   1996		return -ENODEV;
   1997	}
   1998
   1999	irq = platform_get_irq(pdev, 0);
   2000	if (irq < 0)
   2001		return irq;
   2002
   2003	ndev = alloc_etherdev(sizeof(*priv));
   2004	if (!ndev)
   2005		return -ENOMEM;
   2006
   2007	priv = netdev_priv(ndev);
   2008
   2009	spin_lock_init(&priv->reglock);
   2010	SET_NETDEV_DEV(ndev, &pdev->dev);
   2011	platform_set_drvdata(pdev, priv);
   2012	ndev->irq = irq;
   2013	priv->dev = &pdev->dev;
   2014	priv->ndev = ndev;
   2015
   2016	priv->msg_enable = NETIF_MSG_TX_ERR | NETIF_MSG_HW | NETIF_MSG_DRV |
   2017			   NETIF_MSG_LINK | NETIF_MSG_PROBE;
   2018
   2019	priv->ioaddr = devm_ioremap(&pdev->dev, mmio_res->start,
   2020				    resource_size(mmio_res));
   2021	if (!priv->ioaddr) {
   2022		dev_err(&pdev->dev, "devm_ioremap() failed\n");
   2023		ret = -ENXIO;
   2024		goto free_ndev;
   2025	}
   2026
   2027	priv->eeprom_base = devm_ioremap(&pdev->dev, eeprom_res->start,
   2028					 resource_size(eeprom_res));
   2029	if (!priv->eeprom_base) {
   2030		dev_err(&pdev->dev, "devm_ioremap() failed for EEPROM\n");
   2031		ret = -ENXIO;
   2032		goto free_ndev;
   2033	}
   2034
   2035	ret = device_get_ethdev_address(&pdev->dev, ndev);
   2036	if (ret && priv->eeprom_base) {
   2037		void __iomem *macp = priv->eeprom_base +
   2038					NETSEC_EEPROM_MAC_ADDRESS;
   2039		u8 addr[ETH_ALEN];
   2040
   2041		addr[0] = readb(macp + 3);
   2042		addr[1] = readb(macp + 2);
   2043		addr[2] = readb(macp + 1);
   2044		addr[3] = readb(macp + 0);
   2045		addr[4] = readb(macp + 7);
   2046		addr[5] = readb(macp + 6);
   2047		eth_hw_addr_set(ndev, addr);
   2048	}
   2049
   2050	if (!is_valid_ether_addr(ndev->dev_addr)) {
   2051		dev_warn(&pdev->dev, "No MAC address found, using random\n");
   2052		eth_hw_addr_random(ndev);
   2053	}
   2054
   2055	if (dev_of_node(&pdev->dev))
   2056		ret = netsec_of_probe(pdev, priv, &phy_addr);
   2057	else
   2058		ret = netsec_acpi_probe(pdev, priv, &phy_addr);
   2059	if (ret)
   2060		goto free_ndev;
   2061
   2062	priv->phy_addr = phy_addr;
   2063
   2064	if (!priv->freq) {
   2065		dev_err(&pdev->dev, "missing PHY reference clock frequency\n");
   2066		ret = -ENODEV;
   2067		goto free_ndev;
   2068	}
   2069
   2070	/* default for throughput */
   2071	priv->et_coalesce.rx_coalesce_usecs = 500;
   2072	priv->et_coalesce.rx_max_coalesced_frames = 8;
   2073	priv->et_coalesce.tx_coalesce_usecs = 500;
   2074	priv->et_coalesce.tx_max_coalesced_frames = 8;
   2075
   2076	ret = device_property_read_u32(&pdev->dev, "max-frame-size",
   2077				       &ndev->max_mtu);
   2078	if (ret < 0)
   2079		ndev->max_mtu = ETH_DATA_LEN;
   2080
   2081	/* runtime_pm coverage just for probe, open/close also cover it */
   2082	pm_runtime_enable(&pdev->dev);
   2083	pm_runtime_get_sync(&pdev->dev);
   2084
   2085	hw_ver = netsec_read(priv, NETSEC_REG_F_TAIKI_VER);
   2086	/* this driver only supports F_TAIKI style NETSEC */
   2087	if (NETSEC_F_NETSEC_VER_MAJOR_NUM(hw_ver) !=
   2088	    NETSEC_F_NETSEC_VER_MAJOR_NUM(NETSEC_REG_NETSEC_VER_F_TAIKI)) {
   2089		ret = -ENODEV;
   2090		goto pm_disable;
   2091	}
   2092
   2093	dev_info(&pdev->dev, "hardware revision %d.%d\n",
   2094		 hw_ver >> 16, hw_ver & 0xffff);
   2095
   2096	netif_napi_add(ndev, &priv->napi, netsec_napi_poll, NAPI_POLL_WEIGHT);
   2097
   2098	ndev->netdev_ops = &netsec_netdev_ops;
   2099	ndev->ethtool_ops = &netsec_ethtool_ops;
   2100
   2101	ndev->features |= NETIF_F_HIGHDMA | NETIF_F_RXCSUM | NETIF_F_GSO |
   2102				NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
   2103	ndev->hw_features = ndev->features;
   2104
   2105	priv->rx_cksum_offload_flag = true;
   2106
   2107	ret = netsec_register_mdio(priv, phy_addr);
   2108	if (ret)
   2109		goto unreg_napi;
   2110
   2111	if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40)))
   2112		dev_warn(&pdev->dev, "Failed to set DMA mask\n");
   2113
   2114	ret = register_netdev(ndev);
   2115	if (ret) {
   2116		netif_err(priv, probe, ndev, "register_netdev() failed\n");
   2117		goto unreg_mii;
   2118	}
   2119
   2120	pm_runtime_put_sync(&pdev->dev);
   2121	return 0;
   2122
   2123unreg_mii:
   2124	netsec_unregister_mdio(priv);
   2125unreg_napi:
   2126	netif_napi_del(&priv->napi);
   2127pm_disable:
   2128	pm_runtime_put_sync(&pdev->dev);
   2129	pm_runtime_disable(&pdev->dev);
   2130free_ndev:
   2131	free_netdev(ndev);
   2132	dev_err(&pdev->dev, "init failed\n");
   2133
   2134	return ret;
   2135}
   2136
   2137static int netsec_remove(struct platform_device *pdev)
   2138{
   2139	struct netsec_priv *priv = platform_get_drvdata(pdev);
   2140
   2141	unregister_netdev(priv->ndev);
   2142
   2143	netsec_unregister_mdio(priv);
   2144
   2145	netif_napi_del(&priv->napi);
   2146
   2147	pm_runtime_disable(&pdev->dev);
   2148	free_netdev(priv->ndev);
   2149
   2150	return 0;
   2151}
   2152
   2153#ifdef CONFIG_PM
   2154static int netsec_runtime_suspend(struct device *dev)
   2155{
   2156	struct netsec_priv *priv = dev_get_drvdata(dev);
   2157
   2158	netsec_write(priv, NETSEC_REG_CLK_EN, 0);
   2159
   2160	clk_disable_unprepare(priv->clk);
   2161
   2162	return 0;
   2163}
   2164
   2165static int netsec_runtime_resume(struct device *dev)
   2166{
   2167	struct netsec_priv *priv = dev_get_drvdata(dev);
   2168
   2169	clk_prepare_enable(priv->clk);
   2170
   2171	netsec_write(priv, NETSEC_REG_CLK_EN, NETSEC_CLK_EN_REG_DOM_D |
   2172					       NETSEC_CLK_EN_REG_DOM_C |
   2173					       NETSEC_CLK_EN_REG_DOM_G);
   2174	return 0;
   2175}
   2176#endif
   2177
   2178static const struct dev_pm_ops netsec_pm_ops = {
   2179	SET_RUNTIME_PM_OPS(netsec_runtime_suspend, netsec_runtime_resume, NULL)
   2180};
   2181
   2182static const struct of_device_id netsec_dt_ids[] = {
   2183	{ .compatible = "socionext,synquacer-netsec" },
   2184	{ }
   2185};
   2186MODULE_DEVICE_TABLE(of, netsec_dt_ids);
   2187
   2188#ifdef CONFIG_ACPI
   2189static const struct acpi_device_id netsec_acpi_ids[] = {
   2190	{ "SCX0001" },
   2191	{ }
   2192};
   2193MODULE_DEVICE_TABLE(acpi, netsec_acpi_ids);
   2194#endif
   2195
   2196static struct platform_driver netsec_driver = {
   2197	.probe	= netsec_probe,
   2198	.remove	= netsec_remove,
   2199	.driver = {
   2200		.name = "netsec",
   2201		.pm = &netsec_pm_ops,
   2202		.of_match_table = netsec_dt_ids,
   2203		.acpi_match_table = ACPI_PTR(netsec_acpi_ids),
   2204	},
   2205};
   2206module_platform_driver(netsec_driver);
   2207
   2208MODULE_AUTHOR("Jassi Brar <jaswinder.singh@linaro.org>");
   2209MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
   2210MODULE_DESCRIPTION("NETSEC Ethernet driver");
   2211MODULE_LICENSE("GPL");