cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

mtk_eth_soc.c (96308B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/*
      3 *
      4 *   Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org>
      5 *   Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org>
      6 *   Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com>
      7 */
      8
      9#include <linux/of_device.h>
     10#include <linux/of_mdio.h>
     11#include <linux/of_net.h>
     12#include <linux/of_address.h>
     13#include <linux/mfd/syscon.h>
     14#include <linux/regmap.h>
     15#include <linux/clk.h>
     16#include <linux/pm_runtime.h>
     17#include <linux/if_vlan.h>
     18#include <linux/reset.h>
     19#include <linux/tcp.h>
     20#include <linux/interrupt.h>
     21#include <linux/pinctrl/devinfo.h>
     22#include <linux/phylink.h>
     23#include <linux/jhash.h>
     24#include <linux/bitfield.h>
     25#include <net/dsa.h>
     26
     27#include "mtk_eth_soc.h"
     28#include "mtk_wed.h"
     29
     30static int mtk_msg_level = -1;
     31module_param_named(msg_level, mtk_msg_level, int, 0);
     32MODULE_PARM_DESC(msg_level, "Message level (-1=defaults,0=none,...,16=all)");
     33
     34#define MTK_ETHTOOL_STAT(x) { #x, \
     35			      offsetof(struct mtk_hw_stats, x) / sizeof(u64) }
     36
     37static const struct mtk_reg_map mtk_reg_map = {
     38	.tx_irq_mask		= 0x1a1c,
     39	.tx_irq_status		= 0x1a18,
     40	.pdma = {
     41		.rx_ptr		= 0x0900,
     42		.rx_cnt_cfg	= 0x0904,
     43		.pcrx_ptr	= 0x0908,
     44		.glo_cfg	= 0x0a04,
     45		.rst_idx	= 0x0a08,
     46		.delay_irq	= 0x0a0c,
     47		.irq_status	= 0x0a20,
     48		.irq_mask	= 0x0a28,
     49		.int_grp	= 0x0a50,
     50	},
     51	.qdma = {
     52		.qtx_cfg	= 0x1800,
     53		.rx_ptr		= 0x1900,
     54		.rx_cnt_cfg	= 0x1904,
     55		.qcrx_ptr	= 0x1908,
     56		.glo_cfg	= 0x1a04,
     57		.rst_idx	= 0x1a08,
     58		.delay_irq	= 0x1a0c,
     59		.fc_th		= 0x1a10,
     60		.int_grp	= 0x1a20,
     61		.hred		= 0x1a44,
     62		.ctx_ptr	= 0x1b00,
     63		.dtx_ptr	= 0x1b04,
     64		.crx_ptr	= 0x1b10,
     65		.drx_ptr	= 0x1b14,
     66		.fq_head	= 0x1b20,
     67		.fq_tail	= 0x1b24,
     68		.fq_count	= 0x1b28,
     69		.fq_blen	= 0x1b2c,
     70	},
     71	.gdm1_cnt		= 0x2400,
     72};
     73
     74static const struct mtk_reg_map mt7628_reg_map = {
     75	.tx_irq_mask		= 0x0a28,
     76	.tx_irq_status		= 0x0a20,
     77	.pdma = {
     78		.rx_ptr		= 0x0900,
     79		.rx_cnt_cfg	= 0x0904,
     80		.pcrx_ptr	= 0x0908,
     81		.glo_cfg	= 0x0a04,
     82		.rst_idx	= 0x0a08,
     83		.delay_irq	= 0x0a0c,
     84		.irq_status	= 0x0a20,
     85		.irq_mask	= 0x0a28,
     86		.int_grp	= 0x0a50,
     87	},
     88};
     89
     90static const struct mtk_reg_map mt7986_reg_map = {
     91	.tx_irq_mask		= 0x461c,
     92	.tx_irq_status		= 0x4618,
     93	.pdma = {
     94		.rx_ptr		= 0x6100,
     95		.rx_cnt_cfg	= 0x6104,
     96		.pcrx_ptr	= 0x6108,
     97		.glo_cfg	= 0x6204,
     98		.rst_idx	= 0x6208,
     99		.delay_irq	= 0x620c,
    100		.irq_status	= 0x6220,
    101		.irq_mask	= 0x6228,
    102		.int_grp	= 0x6250,
    103	},
    104	.qdma = {
    105		.qtx_cfg	= 0x4400,
    106		.rx_ptr		= 0x4500,
    107		.rx_cnt_cfg	= 0x4504,
    108		.qcrx_ptr	= 0x4508,
    109		.glo_cfg	= 0x4604,
    110		.rst_idx	= 0x4608,
    111		.delay_irq	= 0x460c,
    112		.fc_th		= 0x4610,
    113		.int_grp	= 0x4620,
    114		.hred		= 0x4644,
    115		.ctx_ptr	= 0x4700,
    116		.dtx_ptr	= 0x4704,
    117		.crx_ptr	= 0x4710,
    118		.drx_ptr	= 0x4714,
    119		.fq_head	= 0x4720,
    120		.fq_tail	= 0x4724,
    121		.fq_count	= 0x4728,
    122		.fq_blen	= 0x472c,
    123	},
    124	.gdm1_cnt		= 0x1c00,
    125};
    126
    127/* strings used by ethtool */
    128static const struct mtk_ethtool_stats {
    129	char str[ETH_GSTRING_LEN];
    130	u32 offset;
    131} mtk_ethtool_stats[] = {
    132	MTK_ETHTOOL_STAT(tx_bytes),
    133	MTK_ETHTOOL_STAT(tx_packets),
    134	MTK_ETHTOOL_STAT(tx_skip),
    135	MTK_ETHTOOL_STAT(tx_collisions),
    136	MTK_ETHTOOL_STAT(rx_bytes),
    137	MTK_ETHTOOL_STAT(rx_packets),
    138	MTK_ETHTOOL_STAT(rx_overflow),
    139	MTK_ETHTOOL_STAT(rx_fcs_errors),
    140	MTK_ETHTOOL_STAT(rx_short_errors),
    141	MTK_ETHTOOL_STAT(rx_long_errors),
    142	MTK_ETHTOOL_STAT(rx_checksum_errors),
    143	MTK_ETHTOOL_STAT(rx_flow_control_packets),
    144};
    145
    146static const char * const mtk_clks_source_name[] = {
    147	"ethif", "sgmiitop", "esw", "gp0", "gp1", "gp2", "fe", "trgpll",
    148	"sgmii_tx250m", "sgmii_rx250m", "sgmii_cdr_ref", "sgmii_cdr_fb",
    149	"sgmii2_tx250m", "sgmii2_rx250m", "sgmii2_cdr_ref", "sgmii2_cdr_fb",
    150	"sgmii_ck", "eth2pll", "wocpu0", "wocpu1", "netsys0", "netsys1"
    151};
    152
    153void mtk_w32(struct mtk_eth *eth, u32 val, unsigned reg)
    154{
    155	__raw_writel(val, eth->base + reg);
    156}
    157
    158u32 mtk_r32(struct mtk_eth *eth, unsigned reg)
    159{
    160	return __raw_readl(eth->base + reg);
    161}
    162
    163static u32 mtk_m32(struct mtk_eth *eth, u32 mask, u32 set, unsigned reg)
    164{
    165	u32 val;
    166
    167	val = mtk_r32(eth, reg);
    168	val &= ~mask;
    169	val |= set;
    170	mtk_w32(eth, val, reg);
    171	return reg;
    172}
    173
    174static int mtk_mdio_busy_wait(struct mtk_eth *eth)
    175{
    176	unsigned long t_start = jiffies;
    177
    178	while (1) {
    179		if (!(mtk_r32(eth, MTK_PHY_IAC) & PHY_IAC_ACCESS))
    180			return 0;
    181		if (time_after(jiffies, t_start + PHY_IAC_TIMEOUT))
    182			break;
    183		cond_resched();
    184	}
    185
    186	dev_err(eth->dev, "mdio: MDIO timeout\n");
    187	return -ETIMEDOUT;
    188}
    189
    190static int _mtk_mdio_write(struct mtk_eth *eth, u32 phy_addr, u32 phy_reg,
    191			   u32 write_data)
    192{
    193	int ret;
    194
    195	ret = mtk_mdio_busy_wait(eth);
    196	if (ret < 0)
    197		return ret;
    198
    199	if (phy_reg & MII_ADDR_C45) {
    200		mtk_w32(eth, PHY_IAC_ACCESS |
    201			     PHY_IAC_START_C45 |
    202			     PHY_IAC_CMD_C45_ADDR |
    203			     PHY_IAC_REG(mdiobus_c45_devad(phy_reg)) |
    204			     PHY_IAC_ADDR(phy_addr) |
    205			     PHY_IAC_DATA(mdiobus_c45_regad(phy_reg)),
    206			MTK_PHY_IAC);
    207
    208		ret = mtk_mdio_busy_wait(eth);
    209		if (ret < 0)
    210			return ret;
    211
    212		mtk_w32(eth, PHY_IAC_ACCESS |
    213			     PHY_IAC_START_C45 |
    214			     PHY_IAC_CMD_WRITE |
    215			     PHY_IAC_REG(mdiobus_c45_devad(phy_reg)) |
    216			     PHY_IAC_ADDR(phy_addr) |
    217			     PHY_IAC_DATA(write_data),
    218			MTK_PHY_IAC);
    219	} else {
    220		mtk_w32(eth, PHY_IAC_ACCESS |
    221			     PHY_IAC_START_C22 |
    222			     PHY_IAC_CMD_WRITE |
    223			     PHY_IAC_REG(phy_reg) |
    224			     PHY_IAC_ADDR(phy_addr) |
    225			     PHY_IAC_DATA(write_data),
    226			MTK_PHY_IAC);
    227	}
    228
    229	ret = mtk_mdio_busy_wait(eth);
    230	if (ret < 0)
    231		return ret;
    232
    233	return 0;
    234}
    235
    236static int _mtk_mdio_read(struct mtk_eth *eth, u32 phy_addr, u32 phy_reg)
    237{
    238	int ret;
    239
    240	ret = mtk_mdio_busy_wait(eth);
    241	if (ret < 0)
    242		return ret;
    243
    244	if (phy_reg & MII_ADDR_C45) {
    245		mtk_w32(eth, PHY_IAC_ACCESS |
    246			     PHY_IAC_START_C45 |
    247			     PHY_IAC_CMD_C45_ADDR |
    248			     PHY_IAC_REG(mdiobus_c45_devad(phy_reg)) |
    249			     PHY_IAC_ADDR(phy_addr) |
    250			     PHY_IAC_DATA(mdiobus_c45_regad(phy_reg)),
    251			MTK_PHY_IAC);
    252
    253		ret = mtk_mdio_busy_wait(eth);
    254		if (ret < 0)
    255			return ret;
    256
    257		mtk_w32(eth, PHY_IAC_ACCESS |
    258			     PHY_IAC_START_C45 |
    259			     PHY_IAC_CMD_C45_READ |
    260			     PHY_IAC_REG(mdiobus_c45_devad(phy_reg)) |
    261			     PHY_IAC_ADDR(phy_addr),
    262			MTK_PHY_IAC);
    263	} else {
    264		mtk_w32(eth, PHY_IAC_ACCESS |
    265			     PHY_IAC_START_C22 |
    266			     PHY_IAC_CMD_C22_READ |
    267			     PHY_IAC_REG(phy_reg) |
    268			     PHY_IAC_ADDR(phy_addr),
    269			MTK_PHY_IAC);
    270	}
    271
    272	ret = mtk_mdio_busy_wait(eth);
    273	if (ret < 0)
    274		return ret;
    275
    276	return mtk_r32(eth, MTK_PHY_IAC) & PHY_IAC_DATA_MASK;
    277}
    278
    279static int mtk_mdio_write(struct mii_bus *bus, int phy_addr,
    280			  int phy_reg, u16 val)
    281{
    282	struct mtk_eth *eth = bus->priv;
    283
    284	return _mtk_mdio_write(eth, phy_addr, phy_reg, val);
    285}
    286
    287static int mtk_mdio_read(struct mii_bus *bus, int phy_addr, int phy_reg)
    288{
    289	struct mtk_eth *eth = bus->priv;
    290
    291	return _mtk_mdio_read(eth, phy_addr, phy_reg);
    292}
    293
    294static int mt7621_gmac0_rgmii_adjust(struct mtk_eth *eth,
    295				     phy_interface_t interface)
    296{
    297	u32 val;
    298
    299	/* Check DDR memory type.
    300	 * Currently TRGMII mode with DDR2 memory is not supported.
    301	 */
    302	regmap_read(eth->ethsys, ETHSYS_SYSCFG, &val);
    303	if (interface == PHY_INTERFACE_MODE_TRGMII &&
    304	    val & SYSCFG_DRAM_TYPE_DDR2) {
    305		dev_err(eth->dev,
    306			"TRGMII mode with DDR2 memory is not supported!\n");
    307		return -EOPNOTSUPP;
    308	}
    309
    310	val = (interface == PHY_INTERFACE_MODE_TRGMII) ?
    311		ETHSYS_TRGMII_MT7621_DDR_PLL : 0;
    312
    313	regmap_update_bits(eth->ethsys, ETHSYS_CLKCFG0,
    314			   ETHSYS_TRGMII_MT7621_MASK, val);
    315
    316	return 0;
    317}
    318
    319static void mtk_gmac0_rgmii_adjust(struct mtk_eth *eth,
    320				   phy_interface_t interface, int speed)
    321{
    322	u32 val;
    323	int ret;
    324
    325	if (interface == PHY_INTERFACE_MODE_TRGMII) {
    326		mtk_w32(eth, TRGMII_MODE, INTF_MODE);
    327		val = 500000000;
    328		ret = clk_set_rate(eth->clks[MTK_CLK_TRGPLL], val);
    329		if (ret)
    330			dev_err(eth->dev, "Failed to set trgmii pll: %d\n", ret);
    331		return;
    332	}
    333
    334	val = (speed == SPEED_1000) ?
    335		INTF_MODE_RGMII_1000 : INTF_MODE_RGMII_10_100;
    336	mtk_w32(eth, val, INTF_MODE);
    337
    338	regmap_update_bits(eth->ethsys, ETHSYS_CLKCFG0,
    339			   ETHSYS_TRGMII_CLK_SEL362_5,
    340			   ETHSYS_TRGMII_CLK_SEL362_5);
    341
    342	val = (speed == SPEED_1000) ? 250000000 : 500000000;
    343	ret = clk_set_rate(eth->clks[MTK_CLK_TRGPLL], val);
    344	if (ret)
    345		dev_err(eth->dev, "Failed to set trgmii pll: %d\n", ret);
    346
    347	val = (speed == SPEED_1000) ?
    348		RCK_CTRL_RGMII_1000 : RCK_CTRL_RGMII_10_100;
    349	mtk_w32(eth, val, TRGMII_RCK_CTRL);
    350
    351	val = (speed == SPEED_1000) ?
    352		TCK_CTRL_RGMII_1000 : TCK_CTRL_RGMII_10_100;
    353	mtk_w32(eth, val, TRGMII_TCK_CTRL);
    354}
    355
    356static struct phylink_pcs *mtk_mac_select_pcs(struct phylink_config *config,
    357					      phy_interface_t interface)
    358{
    359	struct mtk_mac *mac = container_of(config, struct mtk_mac,
    360					   phylink_config);
    361	struct mtk_eth *eth = mac->hw;
    362	unsigned int sid;
    363
    364	if (interface == PHY_INTERFACE_MODE_SGMII ||
    365	    phy_interface_mode_is_8023z(interface)) {
    366		sid = (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_SGMII)) ?
    367		       0 : mac->id;
    368
    369		return mtk_sgmii_select_pcs(eth->sgmii, sid);
    370	}
    371
    372	return NULL;
    373}
    374
    375static void mtk_mac_config(struct phylink_config *config, unsigned int mode,
    376			   const struct phylink_link_state *state)
    377{
    378	struct mtk_mac *mac = container_of(config, struct mtk_mac,
    379					   phylink_config);
    380	struct mtk_eth *eth = mac->hw;
    381	int val, ge_mode, err = 0;
    382	u32 i;
    383
    384	/* MT76x8 has no hardware settings between for the MAC */
    385	if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628) &&
    386	    mac->interface != state->interface) {
    387		/* Setup soc pin functions */
    388		switch (state->interface) {
    389		case PHY_INTERFACE_MODE_TRGMII:
    390			if (mac->id)
    391				goto err_phy;
    392			if (!MTK_HAS_CAPS(mac->hw->soc->caps,
    393					  MTK_GMAC1_TRGMII))
    394				goto err_phy;
    395			fallthrough;
    396		case PHY_INTERFACE_MODE_RGMII_TXID:
    397		case PHY_INTERFACE_MODE_RGMII_RXID:
    398		case PHY_INTERFACE_MODE_RGMII_ID:
    399		case PHY_INTERFACE_MODE_RGMII:
    400		case PHY_INTERFACE_MODE_MII:
    401		case PHY_INTERFACE_MODE_REVMII:
    402		case PHY_INTERFACE_MODE_RMII:
    403			if (MTK_HAS_CAPS(eth->soc->caps, MTK_RGMII)) {
    404				err = mtk_gmac_rgmii_path_setup(eth, mac->id);
    405				if (err)
    406					goto init_err;
    407			}
    408			break;
    409		case PHY_INTERFACE_MODE_1000BASEX:
    410		case PHY_INTERFACE_MODE_2500BASEX:
    411		case PHY_INTERFACE_MODE_SGMII:
    412			if (MTK_HAS_CAPS(eth->soc->caps, MTK_SGMII)) {
    413				err = mtk_gmac_sgmii_path_setup(eth, mac->id);
    414				if (err)
    415					goto init_err;
    416			}
    417			break;
    418		case PHY_INTERFACE_MODE_GMII:
    419			if (MTK_HAS_CAPS(eth->soc->caps, MTK_GEPHY)) {
    420				err = mtk_gmac_gephy_path_setup(eth, mac->id);
    421				if (err)
    422					goto init_err;
    423			}
    424			break;
    425		default:
    426			goto err_phy;
    427		}
    428
    429		/* Setup clock for 1st gmac */
    430		if (!mac->id && state->interface != PHY_INTERFACE_MODE_SGMII &&
    431		    !phy_interface_mode_is_8023z(state->interface) &&
    432		    MTK_HAS_CAPS(mac->hw->soc->caps, MTK_GMAC1_TRGMII)) {
    433			if (MTK_HAS_CAPS(mac->hw->soc->caps,
    434					 MTK_TRGMII_MT7621_CLK)) {
    435				if (mt7621_gmac0_rgmii_adjust(mac->hw,
    436							      state->interface))
    437					goto err_phy;
    438			} else {
    439				/* FIXME: this is incorrect. Not only does it
    440				 * use state->speed (which is not guaranteed
    441				 * to be correct) but it also makes use of it
    442				 * in a code path that will only be reachable
    443				 * when the PHY interface mode changes, not
    444				 * when the speed changes. Consequently, RGMII
    445				 * is probably broken.
    446				 */
    447				mtk_gmac0_rgmii_adjust(mac->hw,
    448						       state->interface,
    449						       state->speed);
    450
    451				/* mt7623_pad_clk_setup */
    452				for (i = 0 ; i < NUM_TRGMII_CTRL; i++)
    453					mtk_w32(mac->hw,
    454						TD_DM_DRVP(8) | TD_DM_DRVN(8),
    455						TRGMII_TD_ODT(i));
    456
    457				/* Assert/release MT7623 RXC reset */
    458				mtk_m32(mac->hw, 0, RXC_RST | RXC_DQSISEL,
    459					TRGMII_RCK_CTRL);
    460				mtk_m32(mac->hw, RXC_RST, 0, TRGMII_RCK_CTRL);
    461			}
    462		}
    463
    464		ge_mode = 0;
    465		switch (state->interface) {
    466		case PHY_INTERFACE_MODE_MII:
    467		case PHY_INTERFACE_MODE_GMII:
    468			ge_mode = 1;
    469			break;
    470		case PHY_INTERFACE_MODE_REVMII:
    471			ge_mode = 2;
    472			break;
    473		case PHY_INTERFACE_MODE_RMII:
    474			if (mac->id)
    475				goto err_phy;
    476			ge_mode = 3;
    477			break;
    478		default:
    479			break;
    480		}
    481
    482		/* put the gmac into the right mode */
    483		regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val);
    484		val &= ~SYSCFG0_GE_MODE(SYSCFG0_GE_MASK, mac->id);
    485		val |= SYSCFG0_GE_MODE(ge_mode, mac->id);
    486		regmap_write(eth->ethsys, ETHSYS_SYSCFG0, val);
    487
    488		mac->interface = state->interface;
    489	}
    490
    491	/* SGMII */
    492	if (state->interface == PHY_INTERFACE_MODE_SGMII ||
    493	    phy_interface_mode_is_8023z(state->interface)) {
    494		/* The path GMAC to SGMII will be enabled once the SGMIISYS is
    495		 * being setup done.
    496		 */
    497		regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val);
    498
    499		regmap_update_bits(eth->ethsys, ETHSYS_SYSCFG0,
    500				   SYSCFG0_SGMII_MASK,
    501				   ~(u32)SYSCFG0_SGMII_MASK);
    502
    503		/* Save the syscfg0 value for mac_finish */
    504		mac->syscfg0 = val;
    505	} else if (phylink_autoneg_inband(mode)) {
    506		dev_err(eth->dev,
    507			"In-band mode not supported in non SGMII mode!\n");
    508		return;
    509	}
    510
    511	return;
    512
    513err_phy:
    514	dev_err(eth->dev, "%s: GMAC%d mode %s not supported!\n", __func__,
    515		mac->id, phy_modes(state->interface));
    516	return;
    517
    518init_err:
    519	dev_err(eth->dev, "%s: GMAC%d mode %s err: %d!\n", __func__,
    520		mac->id, phy_modes(state->interface), err);
    521}
    522
    523static int mtk_mac_finish(struct phylink_config *config, unsigned int mode,
    524			  phy_interface_t interface)
    525{
    526	struct mtk_mac *mac = container_of(config, struct mtk_mac,
    527					   phylink_config);
    528	struct mtk_eth *eth = mac->hw;
    529	u32 mcr_cur, mcr_new;
    530
    531	/* Enable SGMII */
    532	if (interface == PHY_INTERFACE_MODE_SGMII ||
    533	    phy_interface_mode_is_8023z(interface))
    534		regmap_update_bits(eth->ethsys, ETHSYS_SYSCFG0,
    535				   SYSCFG0_SGMII_MASK, mac->syscfg0);
    536
    537	/* Setup gmac */
    538	mcr_cur = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
    539	mcr_new = mcr_cur;
    540	mcr_new |= MAC_MCR_IPG_CFG | MAC_MCR_FORCE_MODE |
    541		   MAC_MCR_BACKOFF_EN | MAC_MCR_BACKPR_EN | MAC_MCR_FORCE_LINK;
    542
    543	/* Only update control register when needed! */
    544	if (mcr_new != mcr_cur)
    545		mtk_w32(mac->hw, mcr_new, MTK_MAC_MCR(mac->id));
    546
    547	return 0;
    548}
    549
    550static void mtk_mac_pcs_get_state(struct phylink_config *config,
    551				  struct phylink_link_state *state)
    552{
    553	struct mtk_mac *mac = container_of(config, struct mtk_mac,
    554					   phylink_config);
    555	u32 pmsr = mtk_r32(mac->hw, MTK_MAC_MSR(mac->id));
    556
    557	state->link = (pmsr & MAC_MSR_LINK);
    558	state->duplex = (pmsr & MAC_MSR_DPX) >> 1;
    559
    560	switch (pmsr & (MAC_MSR_SPEED_1000 | MAC_MSR_SPEED_100)) {
    561	case 0:
    562		state->speed = SPEED_10;
    563		break;
    564	case MAC_MSR_SPEED_100:
    565		state->speed = SPEED_100;
    566		break;
    567	case MAC_MSR_SPEED_1000:
    568		state->speed = SPEED_1000;
    569		break;
    570	default:
    571		state->speed = SPEED_UNKNOWN;
    572		break;
    573	}
    574
    575	state->pause &= (MLO_PAUSE_RX | MLO_PAUSE_TX);
    576	if (pmsr & MAC_MSR_RX_FC)
    577		state->pause |= MLO_PAUSE_RX;
    578	if (pmsr & MAC_MSR_TX_FC)
    579		state->pause |= MLO_PAUSE_TX;
    580}
    581
    582static void mtk_mac_link_down(struct phylink_config *config, unsigned int mode,
    583			      phy_interface_t interface)
    584{
    585	struct mtk_mac *mac = container_of(config, struct mtk_mac,
    586					   phylink_config);
    587	u32 mcr = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
    588
    589	mcr &= ~(MAC_MCR_TX_EN | MAC_MCR_RX_EN);
    590	mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id));
    591}
    592
    593static void mtk_mac_link_up(struct phylink_config *config,
    594			    struct phy_device *phy,
    595			    unsigned int mode, phy_interface_t interface,
    596			    int speed, int duplex, bool tx_pause, bool rx_pause)
    597{
    598	struct mtk_mac *mac = container_of(config, struct mtk_mac,
    599					   phylink_config);
    600	u32 mcr;
    601
    602	mcr = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
    603	mcr &= ~(MAC_MCR_SPEED_100 | MAC_MCR_SPEED_1000 |
    604		 MAC_MCR_FORCE_DPX | MAC_MCR_FORCE_TX_FC |
    605		 MAC_MCR_FORCE_RX_FC);
    606
    607	/* Configure speed */
    608	switch (speed) {
    609	case SPEED_2500:
    610	case SPEED_1000:
    611		mcr |= MAC_MCR_SPEED_1000;
    612		break;
    613	case SPEED_100:
    614		mcr |= MAC_MCR_SPEED_100;
    615		break;
    616	}
    617
    618	/* Configure duplex */
    619	if (duplex == DUPLEX_FULL)
    620		mcr |= MAC_MCR_FORCE_DPX;
    621
    622	/* Configure pause modes - phylink will avoid these for half duplex */
    623	if (tx_pause)
    624		mcr |= MAC_MCR_FORCE_TX_FC;
    625	if (rx_pause)
    626		mcr |= MAC_MCR_FORCE_RX_FC;
    627
    628	mcr |= MAC_MCR_TX_EN | MAC_MCR_RX_EN;
    629	mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id));
    630}
    631
    632static const struct phylink_mac_ops mtk_phylink_ops = {
    633	.validate = phylink_generic_validate,
    634	.mac_select_pcs = mtk_mac_select_pcs,
    635	.mac_pcs_get_state = mtk_mac_pcs_get_state,
    636	.mac_config = mtk_mac_config,
    637	.mac_finish = mtk_mac_finish,
    638	.mac_link_down = mtk_mac_link_down,
    639	.mac_link_up = mtk_mac_link_up,
    640};
    641
    642static int mtk_mdio_init(struct mtk_eth *eth)
    643{
    644	struct device_node *mii_np;
    645	int ret;
    646
    647	mii_np = of_get_child_by_name(eth->dev->of_node, "mdio-bus");
    648	if (!mii_np) {
    649		dev_err(eth->dev, "no %s child node found", "mdio-bus");
    650		return -ENODEV;
    651	}
    652
    653	if (!of_device_is_available(mii_np)) {
    654		ret = -ENODEV;
    655		goto err_put_node;
    656	}
    657
    658	eth->mii_bus = devm_mdiobus_alloc(eth->dev);
    659	if (!eth->mii_bus) {
    660		ret = -ENOMEM;
    661		goto err_put_node;
    662	}
    663
    664	eth->mii_bus->name = "mdio";
    665	eth->mii_bus->read = mtk_mdio_read;
    666	eth->mii_bus->write = mtk_mdio_write;
    667	eth->mii_bus->probe_capabilities = MDIOBUS_C22_C45;
    668	eth->mii_bus->priv = eth;
    669	eth->mii_bus->parent = eth->dev;
    670
    671	snprintf(eth->mii_bus->id, MII_BUS_ID_SIZE, "%pOFn", mii_np);
    672	ret = of_mdiobus_register(eth->mii_bus, mii_np);
    673
    674err_put_node:
    675	of_node_put(mii_np);
    676	return ret;
    677}
    678
    679static void mtk_mdio_cleanup(struct mtk_eth *eth)
    680{
    681	if (!eth->mii_bus)
    682		return;
    683
    684	mdiobus_unregister(eth->mii_bus);
    685}
    686
    687static inline void mtk_tx_irq_disable(struct mtk_eth *eth, u32 mask)
    688{
    689	unsigned long flags;
    690	u32 val;
    691
    692	spin_lock_irqsave(&eth->tx_irq_lock, flags);
    693	val = mtk_r32(eth, eth->soc->reg_map->tx_irq_mask);
    694	mtk_w32(eth, val & ~mask, eth->soc->reg_map->tx_irq_mask);
    695	spin_unlock_irqrestore(&eth->tx_irq_lock, flags);
    696}
    697
    698static inline void mtk_tx_irq_enable(struct mtk_eth *eth, u32 mask)
    699{
    700	unsigned long flags;
    701	u32 val;
    702
    703	spin_lock_irqsave(&eth->tx_irq_lock, flags);
    704	val = mtk_r32(eth, eth->soc->reg_map->tx_irq_mask);
    705	mtk_w32(eth, val | mask, eth->soc->reg_map->tx_irq_mask);
    706	spin_unlock_irqrestore(&eth->tx_irq_lock, flags);
    707}
    708
    709static inline void mtk_rx_irq_disable(struct mtk_eth *eth, u32 mask)
    710{
    711	unsigned long flags;
    712	u32 val;
    713
    714	spin_lock_irqsave(&eth->rx_irq_lock, flags);
    715	val = mtk_r32(eth, eth->soc->reg_map->pdma.irq_mask);
    716	mtk_w32(eth, val & ~mask, eth->soc->reg_map->pdma.irq_mask);
    717	spin_unlock_irqrestore(&eth->rx_irq_lock, flags);
    718}
    719
    720static inline void mtk_rx_irq_enable(struct mtk_eth *eth, u32 mask)
    721{
    722	unsigned long flags;
    723	u32 val;
    724
    725	spin_lock_irqsave(&eth->rx_irq_lock, flags);
    726	val = mtk_r32(eth, eth->soc->reg_map->pdma.irq_mask);
    727	mtk_w32(eth, val | mask, eth->soc->reg_map->pdma.irq_mask);
    728	spin_unlock_irqrestore(&eth->rx_irq_lock, flags);
    729}
    730
    731static int mtk_set_mac_address(struct net_device *dev, void *p)
    732{
    733	int ret = eth_mac_addr(dev, p);
    734	struct mtk_mac *mac = netdev_priv(dev);
    735	struct mtk_eth *eth = mac->hw;
    736	const char *macaddr = dev->dev_addr;
    737
    738	if (ret)
    739		return ret;
    740
    741	if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
    742		return -EBUSY;
    743
    744	spin_lock_bh(&mac->hw->page_lock);
    745	if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
    746		mtk_w32(mac->hw, (macaddr[0] << 8) | macaddr[1],
    747			MT7628_SDM_MAC_ADRH);
    748		mtk_w32(mac->hw, (macaddr[2] << 24) | (macaddr[3] << 16) |
    749			(macaddr[4] << 8) | macaddr[5],
    750			MT7628_SDM_MAC_ADRL);
    751	} else {
    752		mtk_w32(mac->hw, (macaddr[0] << 8) | macaddr[1],
    753			MTK_GDMA_MAC_ADRH(mac->id));
    754		mtk_w32(mac->hw, (macaddr[2] << 24) | (macaddr[3] << 16) |
    755			(macaddr[4] << 8) | macaddr[5],
    756			MTK_GDMA_MAC_ADRL(mac->id));
    757	}
    758	spin_unlock_bh(&mac->hw->page_lock);
    759
    760	return 0;
    761}
    762
    763void mtk_stats_update_mac(struct mtk_mac *mac)
    764{
    765	struct mtk_hw_stats *hw_stats = mac->hw_stats;
    766	struct mtk_eth *eth = mac->hw;
    767
    768	u64_stats_update_begin(&hw_stats->syncp);
    769
    770	if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
    771		hw_stats->tx_packets += mtk_r32(mac->hw, MT7628_SDM_TPCNT);
    772		hw_stats->tx_bytes += mtk_r32(mac->hw, MT7628_SDM_TBCNT);
    773		hw_stats->rx_packets += mtk_r32(mac->hw, MT7628_SDM_RPCNT);
    774		hw_stats->rx_bytes += mtk_r32(mac->hw, MT7628_SDM_RBCNT);
    775		hw_stats->rx_checksum_errors +=
    776			mtk_r32(mac->hw, MT7628_SDM_CS_ERR);
    777	} else {
    778		const struct mtk_reg_map *reg_map = eth->soc->reg_map;
    779		unsigned int offs = hw_stats->reg_offset;
    780		u64 stats;
    781
    782		hw_stats->rx_bytes += mtk_r32(mac->hw, reg_map->gdm1_cnt + offs);
    783		stats = mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x4 + offs);
    784		if (stats)
    785			hw_stats->rx_bytes += (stats << 32);
    786		hw_stats->rx_packets +=
    787			mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x8 + offs);
    788		hw_stats->rx_overflow +=
    789			mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x10 + offs);
    790		hw_stats->rx_fcs_errors +=
    791			mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x14 + offs);
    792		hw_stats->rx_short_errors +=
    793			mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x18 + offs);
    794		hw_stats->rx_long_errors +=
    795			mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x1c + offs);
    796		hw_stats->rx_checksum_errors +=
    797			mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x20 + offs);
    798		hw_stats->rx_flow_control_packets +=
    799			mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x24 + offs);
    800		hw_stats->tx_skip +=
    801			mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x28 + offs);
    802		hw_stats->tx_collisions +=
    803			mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x2c + offs);
    804		hw_stats->tx_bytes +=
    805			mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x30 + offs);
    806		stats =  mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x34 + offs);
    807		if (stats)
    808			hw_stats->tx_bytes += (stats << 32);
    809		hw_stats->tx_packets +=
    810			mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x38 + offs);
    811	}
    812
    813	u64_stats_update_end(&hw_stats->syncp);
    814}
    815
    816static void mtk_stats_update(struct mtk_eth *eth)
    817{
    818	int i;
    819
    820	for (i = 0; i < MTK_MAC_COUNT; i++) {
    821		if (!eth->mac[i] || !eth->mac[i]->hw_stats)
    822			continue;
    823		if (spin_trylock(&eth->mac[i]->hw_stats->stats_lock)) {
    824			mtk_stats_update_mac(eth->mac[i]);
    825			spin_unlock(&eth->mac[i]->hw_stats->stats_lock);
    826		}
    827	}
    828}
    829
    830static void mtk_get_stats64(struct net_device *dev,
    831			    struct rtnl_link_stats64 *storage)
    832{
    833	struct mtk_mac *mac = netdev_priv(dev);
    834	struct mtk_hw_stats *hw_stats = mac->hw_stats;
    835	unsigned int start;
    836
    837	if (netif_running(dev) && netif_device_present(dev)) {
    838		if (spin_trylock_bh(&hw_stats->stats_lock)) {
    839			mtk_stats_update_mac(mac);
    840			spin_unlock_bh(&hw_stats->stats_lock);
    841		}
    842	}
    843
    844	do {
    845		start = u64_stats_fetch_begin_irq(&hw_stats->syncp);
    846		storage->rx_packets = hw_stats->rx_packets;
    847		storage->tx_packets = hw_stats->tx_packets;
    848		storage->rx_bytes = hw_stats->rx_bytes;
    849		storage->tx_bytes = hw_stats->tx_bytes;
    850		storage->collisions = hw_stats->tx_collisions;
    851		storage->rx_length_errors = hw_stats->rx_short_errors +
    852			hw_stats->rx_long_errors;
    853		storage->rx_over_errors = hw_stats->rx_overflow;
    854		storage->rx_crc_errors = hw_stats->rx_fcs_errors;
    855		storage->rx_errors = hw_stats->rx_checksum_errors;
    856		storage->tx_aborted_errors = hw_stats->tx_skip;
    857	} while (u64_stats_fetch_retry_irq(&hw_stats->syncp, start));
    858
    859	storage->tx_errors = dev->stats.tx_errors;
    860	storage->rx_dropped = dev->stats.rx_dropped;
    861	storage->tx_dropped = dev->stats.tx_dropped;
    862}
    863
    864static inline int mtk_max_frag_size(int mtu)
    865{
    866	/* make sure buf_size will be at least MTK_MAX_RX_LENGTH */
    867	if (mtu + MTK_RX_ETH_HLEN < MTK_MAX_RX_LENGTH_2K)
    868		mtu = MTK_MAX_RX_LENGTH_2K - MTK_RX_ETH_HLEN;
    869
    870	return SKB_DATA_ALIGN(MTK_RX_HLEN + mtu) +
    871		SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
    872}
    873
    874static inline int mtk_max_buf_size(int frag_size)
    875{
    876	int buf_size = frag_size - NET_SKB_PAD - NET_IP_ALIGN -
    877		       SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
    878
    879	WARN_ON(buf_size < MTK_MAX_RX_LENGTH_2K);
    880
    881	return buf_size;
    882}
    883
    884static bool mtk_rx_get_desc(struct mtk_eth *eth, struct mtk_rx_dma_v2 *rxd,
    885			    struct mtk_rx_dma_v2 *dma_rxd)
    886{
    887	rxd->rxd2 = READ_ONCE(dma_rxd->rxd2);
    888	if (!(rxd->rxd2 & RX_DMA_DONE))
    889		return false;
    890
    891	rxd->rxd1 = READ_ONCE(dma_rxd->rxd1);
    892	rxd->rxd3 = READ_ONCE(dma_rxd->rxd3);
    893	rxd->rxd4 = READ_ONCE(dma_rxd->rxd4);
    894	if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
    895		rxd->rxd5 = READ_ONCE(dma_rxd->rxd5);
    896		rxd->rxd6 = READ_ONCE(dma_rxd->rxd6);
    897	}
    898
    899	return true;
    900}
    901
    902static void *mtk_max_lro_buf_alloc(gfp_t gfp_mask)
    903{
    904	unsigned int size = mtk_max_frag_size(MTK_MAX_LRO_RX_LENGTH);
    905	unsigned long data;
    906
    907	data = __get_free_pages(gfp_mask | __GFP_COMP | __GFP_NOWARN,
    908				get_order(size));
    909
    910	return (void *)data;
    911}
    912
    913/* the qdma core needs scratch memory to be setup */
    914static int mtk_init_fq_dma(struct mtk_eth *eth)
    915{
    916	const struct mtk_soc_data *soc = eth->soc;
    917	dma_addr_t phy_ring_tail;
    918	int cnt = MTK_DMA_SIZE;
    919	dma_addr_t dma_addr;
    920	int i;
    921
    922	eth->scratch_ring = dma_alloc_coherent(eth->dma_dev,
    923					       cnt * soc->txrx.txd_size,
    924					       &eth->phy_scratch_ring,
    925					       GFP_KERNEL);
    926	if (unlikely(!eth->scratch_ring))
    927		return -ENOMEM;
    928
    929	eth->scratch_head = kcalloc(cnt, MTK_QDMA_PAGE_SIZE, GFP_KERNEL);
    930	if (unlikely(!eth->scratch_head))
    931		return -ENOMEM;
    932
    933	dma_addr = dma_map_single(eth->dma_dev,
    934				  eth->scratch_head, cnt * MTK_QDMA_PAGE_SIZE,
    935				  DMA_FROM_DEVICE);
    936	if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr)))
    937		return -ENOMEM;
    938
    939	phy_ring_tail = eth->phy_scratch_ring + soc->txrx.txd_size * (cnt - 1);
    940
    941	for (i = 0; i < cnt; i++) {
    942		struct mtk_tx_dma_v2 *txd;
    943
    944		txd = eth->scratch_ring + i * soc->txrx.txd_size;
    945		txd->txd1 = dma_addr + i * MTK_QDMA_PAGE_SIZE;
    946		if (i < cnt - 1)
    947			txd->txd2 = eth->phy_scratch_ring +
    948				    (i + 1) * soc->txrx.txd_size;
    949
    950		txd->txd3 = TX_DMA_PLEN0(MTK_QDMA_PAGE_SIZE);
    951		txd->txd4 = 0;
    952		if (MTK_HAS_CAPS(soc->caps, MTK_NETSYS_V2)) {
    953			txd->txd5 = 0;
    954			txd->txd6 = 0;
    955			txd->txd7 = 0;
    956			txd->txd8 = 0;
    957		}
    958	}
    959
    960	mtk_w32(eth, eth->phy_scratch_ring, soc->reg_map->qdma.fq_head);
    961	mtk_w32(eth, phy_ring_tail, soc->reg_map->qdma.fq_tail);
    962	mtk_w32(eth, (cnt << 16) | cnt, soc->reg_map->qdma.fq_count);
    963	mtk_w32(eth, MTK_QDMA_PAGE_SIZE << 16, soc->reg_map->qdma.fq_blen);
    964
    965	return 0;
    966}
    967
    968static void *mtk_qdma_phys_to_virt(struct mtk_tx_ring *ring, u32 desc)
    969{
    970	return ring->dma + (desc - ring->phys);
    971}
    972
    973static struct mtk_tx_buf *mtk_desc_to_tx_buf(struct mtk_tx_ring *ring,
    974					     void *txd, u32 txd_size)
    975{
    976	int idx = (txd - ring->dma) / txd_size;
    977
    978	return &ring->buf[idx];
    979}
    980
    981static struct mtk_tx_dma *qdma_to_pdma(struct mtk_tx_ring *ring,
    982				       struct mtk_tx_dma *dma)
    983{
    984	return ring->dma_pdma - (struct mtk_tx_dma *)ring->dma + dma;
    985}
    986
    987static int txd_to_idx(struct mtk_tx_ring *ring, void *dma, u32 txd_size)
    988{
    989	return (dma - ring->dma) / txd_size;
    990}
    991
    992static void mtk_tx_unmap(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf,
    993			 bool napi)
    994{
    995	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
    996		if (tx_buf->flags & MTK_TX_FLAGS_SINGLE0) {
    997			dma_unmap_single(eth->dma_dev,
    998					 dma_unmap_addr(tx_buf, dma_addr0),
    999					 dma_unmap_len(tx_buf, dma_len0),
   1000					 DMA_TO_DEVICE);
   1001		} else if (tx_buf->flags & MTK_TX_FLAGS_PAGE0) {
   1002			dma_unmap_page(eth->dma_dev,
   1003				       dma_unmap_addr(tx_buf, dma_addr0),
   1004				       dma_unmap_len(tx_buf, dma_len0),
   1005				       DMA_TO_DEVICE);
   1006		}
   1007	} else {
   1008		if (dma_unmap_len(tx_buf, dma_len0)) {
   1009			dma_unmap_page(eth->dma_dev,
   1010				       dma_unmap_addr(tx_buf, dma_addr0),
   1011				       dma_unmap_len(tx_buf, dma_len0),
   1012				       DMA_TO_DEVICE);
   1013		}
   1014
   1015		if (dma_unmap_len(tx_buf, dma_len1)) {
   1016			dma_unmap_page(eth->dma_dev,
   1017				       dma_unmap_addr(tx_buf, dma_addr1),
   1018				       dma_unmap_len(tx_buf, dma_len1),
   1019				       DMA_TO_DEVICE);
   1020		}
   1021	}
   1022
   1023	tx_buf->flags = 0;
   1024	if (tx_buf->skb &&
   1025	    (tx_buf->skb != (struct sk_buff *)MTK_DMA_DUMMY_DESC)) {
   1026		if (napi)
   1027			napi_consume_skb(tx_buf->skb, napi);
   1028		else
   1029			dev_kfree_skb_any(tx_buf->skb);
   1030	}
   1031	tx_buf->skb = NULL;
   1032}
   1033
   1034static void setup_tx_buf(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf,
   1035			 struct mtk_tx_dma *txd, dma_addr_t mapped_addr,
   1036			 size_t size, int idx)
   1037{
   1038	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
   1039		dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
   1040		dma_unmap_len_set(tx_buf, dma_len0, size);
   1041	} else {
   1042		if (idx & 1) {
   1043			txd->txd3 = mapped_addr;
   1044			txd->txd2 |= TX_DMA_PLEN1(size);
   1045			dma_unmap_addr_set(tx_buf, dma_addr1, mapped_addr);
   1046			dma_unmap_len_set(tx_buf, dma_len1, size);
   1047		} else {
   1048			tx_buf->skb = (struct sk_buff *)MTK_DMA_DUMMY_DESC;
   1049			txd->txd1 = mapped_addr;
   1050			txd->txd2 = TX_DMA_PLEN0(size);
   1051			dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
   1052			dma_unmap_len_set(tx_buf, dma_len0, size);
   1053		}
   1054	}
   1055}
   1056
   1057static void mtk_tx_set_dma_desc_v1(struct net_device *dev, void *txd,
   1058				   struct mtk_tx_dma_desc_info *info)
   1059{
   1060	struct mtk_mac *mac = netdev_priv(dev);
   1061	struct mtk_eth *eth = mac->hw;
   1062	struct mtk_tx_dma *desc = txd;
   1063	u32 data;
   1064
   1065	WRITE_ONCE(desc->txd1, info->addr);
   1066
   1067	data = TX_DMA_SWC | TX_DMA_PLEN0(info->size);
   1068	if (info->last)
   1069		data |= TX_DMA_LS0;
   1070	WRITE_ONCE(desc->txd3, data);
   1071
   1072	data = (mac->id + 1) << TX_DMA_FPORT_SHIFT; /* forward port */
   1073	if (info->first) {
   1074		if (info->gso)
   1075			data |= TX_DMA_TSO;
   1076		/* tx checksum offload */
   1077		if (info->csum)
   1078			data |= TX_DMA_CHKSUM;
   1079		/* vlan header offload */
   1080		if (info->vlan)
   1081			data |= TX_DMA_INS_VLAN | info->vlan_tci;
   1082	}
   1083	WRITE_ONCE(desc->txd4, data);
   1084}
   1085
   1086static void mtk_tx_set_dma_desc_v2(struct net_device *dev, void *txd,
   1087				   struct mtk_tx_dma_desc_info *info)
   1088{
   1089	struct mtk_mac *mac = netdev_priv(dev);
   1090	struct mtk_tx_dma_v2 *desc = txd;
   1091	struct mtk_eth *eth = mac->hw;
   1092	u32 data;
   1093
   1094	WRITE_ONCE(desc->txd1, info->addr);
   1095
   1096	data = TX_DMA_PLEN0(info->size);
   1097	if (info->last)
   1098		data |= TX_DMA_LS0;
   1099	WRITE_ONCE(desc->txd3, data);
   1100
   1101	if (!info->qid && mac->id)
   1102		info->qid = MTK_QDMA_GMAC2_QID;
   1103
   1104	data = (mac->id + 1) << TX_DMA_FPORT_SHIFT_V2; /* forward port */
   1105	data |= TX_DMA_SWC_V2 | QID_BITS_V2(info->qid);
   1106	WRITE_ONCE(desc->txd4, data);
   1107
   1108	data = 0;
   1109	if (info->first) {
   1110		if (info->gso)
   1111			data |= TX_DMA_TSO_V2;
   1112		/* tx checksum offload */
   1113		if (info->csum)
   1114			data |= TX_DMA_CHKSUM_V2;
   1115	}
   1116	WRITE_ONCE(desc->txd5, data);
   1117
   1118	data = 0;
   1119	if (info->first && info->vlan)
   1120		data |= TX_DMA_INS_VLAN_V2 | info->vlan_tci;
   1121	WRITE_ONCE(desc->txd6, data);
   1122
   1123	WRITE_ONCE(desc->txd7, 0);
   1124	WRITE_ONCE(desc->txd8, 0);
   1125}
   1126
   1127static void mtk_tx_set_dma_desc(struct net_device *dev, void *txd,
   1128				struct mtk_tx_dma_desc_info *info)
   1129{
   1130	struct mtk_mac *mac = netdev_priv(dev);
   1131	struct mtk_eth *eth = mac->hw;
   1132
   1133	if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
   1134		mtk_tx_set_dma_desc_v2(dev, txd, info);
   1135	else
   1136		mtk_tx_set_dma_desc_v1(dev, txd, info);
   1137}
   1138
   1139static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
   1140		      int tx_num, struct mtk_tx_ring *ring, bool gso)
   1141{
   1142	struct mtk_tx_dma_desc_info txd_info = {
   1143		.size = skb_headlen(skb),
   1144		.gso = gso,
   1145		.csum = skb->ip_summed == CHECKSUM_PARTIAL,
   1146		.vlan = skb_vlan_tag_present(skb),
   1147		.qid = skb->mark & MTK_QDMA_TX_MASK,
   1148		.vlan_tci = skb_vlan_tag_get(skb),
   1149		.first = true,
   1150		.last = !skb_is_nonlinear(skb),
   1151	};
   1152	struct mtk_mac *mac = netdev_priv(dev);
   1153	struct mtk_eth *eth = mac->hw;
   1154	const struct mtk_soc_data *soc = eth->soc;
   1155	struct mtk_tx_dma *itxd, *txd;
   1156	struct mtk_tx_dma *itxd_pdma, *txd_pdma;
   1157	struct mtk_tx_buf *itx_buf, *tx_buf;
   1158	int i, n_desc = 1;
   1159	int k = 0;
   1160
   1161	itxd = ring->next_free;
   1162	itxd_pdma = qdma_to_pdma(ring, itxd);
   1163	if (itxd == ring->last_free)
   1164		return -ENOMEM;
   1165
   1166	itx_buf = mtk_desc_to_tx_buf(ring, itxd, soc->txrx.txd_size);
   1167	memset(itx_buf, 0, sizeof(*itx_buf));
   1168
   1169	txd_info.addr = dma_map_single(eth->dma_dev, skb->data, txd_info.size,
   1170				       DMA_TO_DEVICE);
   1171	if (unlikely(dma_mapping_error(eth->dma_dev, txd_info.addr)))
   1172		return -ENOMEM;
   1173
   1174	mtk_tx_set_dma_desc(dev, itxd, &txd_info);
   1175
   1176	itx_buf->flags |= MTK_TX_FLAGS_SINGLE0;
   1177	itx_buf->flags |= (!mac->id) ? MTK_TX_FLAGS_FPORT0 :
   1178			  MTK_TX_FLAGS_FPORT1;
   1179	setup_tx_buf(eth, itx_buf, itxd_pdma, txd_info.addr, txd_info.size,
   1180		     k++);
   1181
   1182	/* TX SG offload */
   1183	txd = itxd;
   1184	txd_pdma = qdma_to_pdma(ring, txd);
   1185
   1186	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
   1187		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
   1188		unsigned int offset = 0;
   1189		int frag_size = skb_frag_size(frag);
   1190
   1191		while (frag_size) {
   1192			bool new_desc = true;
   1193
   1194			if (MTK_HAS_CAPS(soc->caps, MTK_QDMA) ||
   1195			    (i & 0x1)) {
   1196				txd = mtk_qdma_phys_to_virt(ring, txd->txd2);
   1197				txd_pdma = qdma_to_pdma(ring, txd);
   1198				if (txd == ring->last_free)
   1199					goto err_dma;
   1200
   1201				n_desc++;
   1202			} else {
   1203				new_desc = false;
   1204			}
   1205
   1206			memset(&txd_info, 0, sizeof(struct mtk_tx_dma_desc_info));
   1207			txd_info.size = min_t(unsigned int, frag_size,
   1208					      soc->txrx.dma_max_len);
   1209			txd_info.qid = skb->mark & MTK_QDMA_TX_MASK;
   1210			txd_info.last = i == skb_shinfo(skb)->nr_frags - 1 &&
   1211					!(frag_size - txd_info.size);
   1212			txd_info.addr = skb_frag_dma_map(eth->dma_dev, frag,
   1213							 offset, txd_info.size,
   1214							 DMA_TO_DEVICE);
   1215			if (unlikely(dma_mapping_error(eth->dma_dev, txd_info.addr)))
   1216				goto err_dma;
   1217
   1218			mtk_tx_set_dma_desc(dev, txd, &txd_info);
   1219
   1220			tx_buf = mtk_desc_to_tx_buf(ring, txd,
   1221						    soc->txrx.txd_size);
   1222			if (new_desc)
   1223				memset(tx_buf, 0, sizeof(*tx_buf));
   1224			tx_buf->skb = (struct sk_buff *)MTK_DMA_DUMMY_DESC;
   1225			tx_buf->flags |= MTK_TX_FLAGS_PAGE0;
   1226			tx_buf->flags |= (!mac->id) ? MTK_TX_FLAGS_FPORT0 :
   1227					 MTK_TX_FLAGS_FPORT1;
   1228
   1229			setup_tx_buf(eth, tx_buf, txd_pdma, txd_info.addr,
   1230				     txd_info.size, k++);
   1231
   1232			frag_size -= txd_info.size;
   1233			offset += txd_info.size;
   1234		}
   1235	}
   1236
   1237	/* store skb to cleanup */
   1238	itx_buf->skb = skb;
   1239
   1240	if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
   1241		if (k & 0x1)
   1242			txd_pdma->txd2 |= TX_DMA_LS0;
   1243		else
   1244			txd_pdma->txd2 |= TX_DMA_LS1;
   1245	}
   1246
   1247	netdev_sent_queue(dev, skb->len);
   1248	skb_tx_timestamp(skb);
   1249
   1250	ring->next_free = mtk_qdma_phys_to_virt(ring, txd->txd2);
   1251	atomic_sub(n_desc, &ring->free_count);
   1252
   1253	/* make sure that all changes to the dma ring are flushed before we
   1254	 * continue
   1255	 */
   1256	wmb();
   1257
   1258	if (MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
   1259		if (netif_xmit_stopped(netdev_get_tx_queue(dev, 0)) ||
   1260		    !netdev_xmit_more())
   1261			mtk_w32(eth, txd->txd2, soc->reg_map->qdma.ctx_ptr);
   1262	} else {
   1263		int next_idx;
   1264
   1265		next_idx = NEXT_DESP_IDX(txd_to_idx(ring, txd, soc->txrx.txd_size),
   1266					 ring->dma_size);
   1267		mtk_w32(eth, next_idx, MT7628_TX_CTX_IDX0);
   1268	}
   1269
   1270	return 0;
   1271
   1272err_dma:
   1273	do {
   1274		tx_buf = mtk_desc_to_tx_buf(ring, itxd, soc->txrx.txd_size);
   1275
   1276		/* unmap dma */
   1277		mtk_tx_unmap(eth, tx_buf, false);
   1278
   1279		itxd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
   1280		if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA))
   1281			itxd_pdma->txd2 = TX_DMA_DESP2_DEF;
   1282
   1283		itxd = mtk_qdma_phys_to_virt(ring, itxd->txd2);
   1284		itxd_pdma = qdma_to_pdma(ring, itxd);
   1285	} while (itxd != txd);
   1286
   1287	return -ENOMEM;
   1288}
   1289
   1290static int mtk_cal_txd_req(struct mtk_eth *eth, struct sk_buff *skb)
   1291{
   1292	int i, nfrags = 1;
   1293	skb_frag_t *frag;
   1294
   1295	if (skb_is_gso(skb)) {
   1296		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
   1297			frag = &skb_shinfo(skb)->frags[i];
   1298			nfrags += DIV_ROUND_UP(skb_frag_size(frag),
   1299					       eth->soc->txrx.dma_max_len);
   1300		}
   1301	} else {
   1302		nfrags += skb_shinfo(skb)->nr_frags;
   1303	}
   1304
   1305	return nfrags;
   1306}
   1307
   1308static int mtk_queue_stopped(struct mtk_eth *eth)
   1309{
   1310	int i;
   1311
   1312	for (i = 0; i < MTK_MAC_COUNT; i++) {
   1313		if (!eth->netdev[i])
   1314			continue;
   1315		if (netif_queue_stopped(eth->netdev[i]))
   1316			return 1;
   1317	}
   1318
   1319	return 0;
   1320}
   1321
   1322static void mtk_wake_queue(struct mtk_eth *eth)
   1323{
   1324	int i;
   1325
   1326	for (i = 0; i < MTK_MAC_COUNT; i++) {
   1327		if (!eth->netdev[i])
   1328			continue;
   1329		netif_wake_queue(eth->netdev[i]);
   1330	}
   1331}
   1332
   1333static netdev_tx_t mtk_start_xmit(struct sk_buff *skb, struct net_device *dev)
   1334{
   1335	struct mtk_mac *mac = netdev_priv(dev);
   1336	struct mtk_eth *eth = mac->hw;
   1337	struct mtk_tx_ring *ring = &eth->tx_ring;
   1338	struct net_device_stats *stats = &dev->stats;
   1339	bool gso = false;
   1340	int tx_num;
   1341
   1342	/* normally we can rely on the stack not calling this more than once,
   1343	 * however we have 2 queues running on the same ring so we need to lock
   1344	 * the ring access
   1345	 */
   1346	spin_lock(&eth->page_lock);
   1347
   1348	if (unlikely(test_bit(MTK_RESETTING, &eth->state)))
   1349		goto drop;
   1350
   1351	tx_num = mtk_cal_txd_req(eth, skb);
   1352	if (unlikely(atomic_read(&ring->free_count) <= tx_num)) {
   1353		netif_stop_queue(dev);
   1354		netif_err(eth, tx_queued, dev,
   1355			  "Tx Ring full when queue awake!\n");
   1356		spin_unlock(&eth->page_lock);
   1357		return NETDEV_TX_BUSY;
   1358	}
   1359
   1360	/* TSO: fill MSS info in tcp checksum field */
   1361	if (skb_is_gso(skb)) {
   1362		if (skb_cow_head(skb, 0)) {
   1363			netif_warn(eth, tx_err, dev,
   1364				   "GSO expand head fail.\n");
   1365			goto drop;
   1366		}
   1367
   1368		if (skb_shinfo(skb)->gso_type &
   1369				(SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
   1370			gso = true;
   1371			tcp_hdr(skb)->check = htons(skb_shinfo(skb)->gso_size);
   1372		}
   1373	}
   1374
   1375	if (mtk_tx_map(skb, dev, tx_num, ring, gso) < 0)
   1376		goto drop;
   1377
   1378	if (unlikely(atomic_read(&ring->free_count) <= ring->thresh))
   1379		netif_stop_queue(dev);
   1380
   1381	spin_unlock(&eth->page_lock);
   1382
   1383	return NETDEV_TX_OK;
   1384
   1385drop:
   1386	spin_unlock(&eth->page_lock);
   1387	stats->tx_dropped++;
   1388	dev_kfree_skb_any(skb);
   1389	return NETDEV_TX_OK;
   1390}
   1391
   1392static struct mtk_rx_ring *mtk_get_rx_ring(struct mtk_eth *eth)
   1393{
   1394	int i;
   1395	struct mtk_rx_ring *ring;
   1396	int idx;
   1397
   1398	if (!eth->hwlro)
   1399		return &eth->rx_ring[0];
   1400
   1401	for (i = 0; i < MTK_MAX_RX_RING_NUM; i++) {
   1402		struct mtk_rx_dma *rxd;
   1403
   1404		ring = &eth->rx_ring[i];
   1405		idx = NEXT_DESP_IDX(ring->calc_idx, ring->dma_size);
   1406		rxd = ring->dma + idx * eth->soc->txrx.rxd_size;
   1407		if (rxd->rxd2 & RX_DMA_DONE) {
   1408			ring->calc_idx_update = true;
   1409			return ring;
   1410		}
   1411	}
   1412
   1413	return NULL;
   1414}
   1415
   1416static void mtk_update_rx_cpu_idx(struct mtk_eth *eth)
   1417{
   1418	struct mtk_rx_ring *ring;
   1419	int i;
   1420
   1421	if (!eth->hwlro) {
   1422		ring = &eth->rx_ring[0];
   1423		mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg);
   1424	} else {
   1425		for (i = 0; i < MTK_MAX_RX_RING_NUM; i++) {
   1426			ring = &eth->rx_ring[i];
   1427			if (ring->calc_idx_update) {
   1428				ring->calc_idx_update = false;
   1429				mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg);
   1430			}
   1431		}
   1432	}
   1433}
   1434
   1435static int mtk_poll_rx(struct napi_struct *napi, int budget,
   1436		       struct mtk_eth *eth)
   1437{
   1438	struct dim_sample dim_sample = {};
   1439	struct mtk_rx_ring *ring;
   1440	int idx;
   1441	struct sk_buff *skb;
   1442	u8 *data, *new_data;
   1443	struct mtk_rx_dma_v2 *rxd, trxd;
   1444	int done = 0, bytes = 0;
   1445
   1446	while (done < budget) {
   1447		struct net_device *netdev;
   1448		unsigned int pktlen;
   1449		dma_addr_t dma_addr;
   1450		u32 hash, reason;
   1451		int mac = 0;
   1452
   1453		ring = mtk_get_rx_ring(eth);
   1454		if (unlikely(!ring))
   1455			goto rx_done;
   1456
   1457		idx = NEXT_DESP_IDX(ring->calc_idx, ring->dma_size);
   1458		rxd = ring->dma + idx * eth->soc->txrx.rxd_size;
   1459		data = ring->data[idx];
   1460
   1461		if (!mtk_rx_get_desc(eth, &trxd, rxd))
   1462			break;
   1463
   1464		/* find out which mac the packet come from. values start at 1 */
   1465		if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
   1466			mac = RX_DMA_GET_SPORT_V2(trxd.rxd5) - 1;
   1467		else if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628) &&
   1468			 !(trxd.rxd4 & RX_DMA_SPECIAL_TAG))
   1469			mac = RX_DMA_GET_SPORT(trxd.rxd4) - 1;
   1470
   1471		if (unlikely(mac < 0 || mac >= MTK_MAC_COUNT ||
   1472			     !eth->netdev[mac]))
   1473			goto release_desc;
   1474
   1475		netdev = eth->netdev[mac];
   1476
   1477		if (unlikely(test_bit(MTK_RESETTING, &eth->state)))
   1478			goto release_desc;
   1479
   1480		/* alloc new buffer */
   1481		if (ring->frag_size <= PAGE_SIZE)
   1482			new_data = napi_alloc_frag(ring->frag_size);
   1483		else
   1484			new_data = mtk_max_lro_buf_alloc(GFP_ATOMIC);
   1485		if (unlikely(!new_data)) {
   1486			netdev->stats.rx_dropped++;
   1487			goto release_desc;
   1488		}
   1489		dma_addr = dma_map_single(eth->dma_dev,
   1490					  new_data + NET_SKB_PAD +
   1491					  eth->ip_align,
   1492					  ring->buf_size,
   1493					  DMA_FROM_DEVICE);
   1494		if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr))) {
   1495			skb_free_frag(new_data);
   1496			netdev->stats.rx_dropped++;
   1497			goto release_desc;
   1498		}
   1499
   1500		dma_unmap_single(eth->dma_dev, trxd.rxd1,
   1501				 ring->buf_size, DMA_FROM_DEVICE);
   1502
   1503		/* receive data */
   1504		skb = build_skb(data, ring->frag_size);
   1505		if (unlikely(!skb)) {
   1506			skb_free_frag(data);
   1507			netdev->stats.rx_dropped++;
   1508			goto skip_rx;
   1509		}
   1510		skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
   1511
   1512		pktlen = RX_DMA_GET_PLEN0(trxd.rxd2);
   1513		skb->dev = netdev;
   1514		skb_put(skb, pktlen);
   1515		if (trxd.rxd4 & eth->soc->txrx.rx_dma_l4_valid)
   1516			skb->ip_summed = CHECKSUM_UNNECESSARY;
   1517		else
   1518			skb_checksum_none_assert(skb);
   1519		skb->protocol = eth_type_trans(skb, netdev);
   1520		bytes += pktlen;
   1521
   1522		hash = trxd.rxd4 & MTK_RXD4_FOE_ENTRY;
   1523		if (hash != MTK_RXD4_FOE_ENTRY) {
   1524			hash = jhash_1word(hash, 0);
   1525			skb_set_hash(skb, hash, PKT_HASH_TYPE_L4);
   1526		}
   1527
   1528		reason = FIELD_GET(MTK_RXD4_PPE_CPU_REASON, trxd.rxd4);
   1529		if (reason == MTK_PPE_CPU_REASON_HIT_UNBIND_RATE_REACHED)
   1530			mtk_ppe_check_skb(eth->ppe, skb,
   1531					  trxd.rxd4 & MTK_RXD4_FOE_ENTRY);
   1532
   1533		if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) {
   1534			if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
   1535				if (trxd.rxd3 & RX_DMA_VTAG_V2)
   1536					__vlan_hwaccel_put_tag(skb,
   1537						htons(RX_DMA_VPID(trxd.rxd4)),
   1538						RX_DMA_VID(trxd.rxd4));
   1539			} else if (trxd.rxd2 & RX_DMA_VTAG) {
   1540				__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
   1541						       RX_DMA_VID(trxd.rxd3));
   1542			}
   1543
   1544			/* If the device is attached to a dsa switch, the special
   1545			 * tag inserted in VLAN field by hw switch can * be offloaded
   1546			 * by RX HW VLAN offload. Clear vlan info.
   1547			 */
   1548			if (netdev_uses_dsa(netdev))
   1549				__vlan_hwaccel_clear_tag(skb);
   1550		}
   1551
   1552		skb_record_rx_queue(skb, 0);
   1553		napi_gro_receive(napi, skb);
   1554
   1555skip_rx:
   1556		ring->data[idx] = new_data;
   1557		rxd->rxd1 = (unsigned int)dma_addr;
   1558
   1559release_desc:
   1560		if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
   1561			rxd->rxd2 = RX_DMA_LSO;
   1562		else
   1563			rxd->rxd2 = RX_DMA_PREP_PLEN0(ring->buf_size);
   1564
   1565		ring->calc_idx = idx;
   1566
   1567		done++;
   1568	}
   1569
   1570rx_done:
   1571	if (done) {
   1572		/* make sure that all changes to the dma ring are flushed before
   1573		 * we continue
   1574		 */
   1575		wmb();
   1576		mtk_update_rx_cpu_idx(eth);
   1577	}
   1578
   1579	eth->rx_packets += done;
   1580	eth->rx_bytes += bytes;
   1581	dim_update_sample(eth->rx_events, eth->rx_packets, eth->rx_bytes,
   1582			  &dim_sample);
   1583	net_dim(&eth->rx_dim, dim_sample);
   1584
   1585	return done;
   1586}
   1587
   1588static int mtk_poll_tx_qdma(struct mtk_eth *eth, int budget,
   1589			    unsigned int *done, unsigned int *bytes)
   1590{
   1591	const struct mtk_reg_map *reg_map = eth->soc->reg_map;
   1592	struct mtk_tx_ring *ring = &eth->tx_ring;
   1593	struct mtk_tx_dma *desc;
   1594	struct sk_buff *skb;
   1595	struct mtk_tx_buf *tx_buf;
   1596	u32 cpu, dma;
   1597
   1598	cpu = ring->last_free_ptr;
   1599	dma = mtk_r32(eth, reg_map->qdma.drx_ptr);
   1600
   1601	desc = mtk_qdma_phys_to_virt(ring, cpu);
   1602
   1603	while ((cpu != dma) && budget) {
   1604		u32 next_cpu = desc->txd2;
   1605		int mac = 0;
   1606
   1607		desc = mtk_qdma_phys_to_virt(ring, desc->txd2);
   1608		if ((desc->txd3 & TX_DMA_OWNER_CPU) == 0)
   1609			break;
   1610
   1611		tx_buf = mtk_desc_to_tx_buf(ring, desc,
   1612					    eth->soc->txrx.txd_size);
   1613		if (tx_buf->flags & MTK_TX_FLAGS_FPORT1)
   1614			mac = 1;
   1615
   1616		skb = tx_buf->skb;
   1617		if (!skb)
   1618			break;
   1619
   1620		if (skb != (struct sk_buff *)MTK_DMA_DUMMY_DESC) {
   1621			bytes[mac] += skb->len;
   1622			done[mac]++;
   1623			budget--;
   1624		}
   1625		mtk_tx_unmap(eth, tx_buf, true);
   1626
   1627		ring->last_free = desc;
   1628		atomic_inc(&ring->free_count);
   1629
   1630		cpu = next_cpu;
   1631	}
   1632
   1633	ring->last_free_ptr = cpu;
   1634	mtk_w32(eth, cpu, reg_map->qdma.crx_ptr);
   1635
   1636	return budget;
   1637}
   1638
   1639static int mtk_poll_tx_pdma(struct mtk_eth *eth, int budget,
   1640			    unsigned int *done, unsigned int *bytes)
   1641{
   1642	struct mtk_tx_ring *ring = &eth->tx_ring;
   1643	struct mtk_tx_dma *desc;
   1644	struct sk_buff *skb;
   1645	struct mtk_tx_buf *tx_buf;
   1646	u32 cpu, dma;
   1647
   1648	cpu = ring->cpu_idx;
   1649	dma = mtk_r32(eth, MT7628_TX_DTX_IDX0);
   1650
   1651	while ((cpu != dma) && budget) {
   1652		tx_buf = &ring->buf[cpu];
   1653		skb = tx_buf->skb;
   1654		if (!skb)
   1655			break;
   1656
   1657		if (skb != (struct sk_buff *)MTK_DMA_DUMMY_DESC) {
   1658			bytes[0] += skb->len;
   1659			done[0]++;
   1660			budget--;
   1661		}
   1662
   1663		mtk_tx_unmap(eth, tx_buf, true);
   1664
   1665		desc = ring->dma + cpu * eth->soc->txrx.txd_size;
   1666		ring->last_free = desc;
   1667		atomic_inc(&ring->free_count);
   1668
   1669		cpu = NEXT_DESP_IDX(cpu, ring->dma_size);
   1670	}
   1671
   1672	ring->cpu_idx = cpu;
   1673
   1674	return budget;
   1675}
   1676
   1677static int mtk_poll_tx(struct mtk_eth *eth, int budget)
   1678{
   1679	struct mtk_tx_ring *ring = &eth->tx_ring;
   1680	struct dim_sample dim_sample = {};
   1681	unsigned int done[MTK_MAX_DEVS];
   1682	unsigned int bytes[MTK_MAX_DEVS];
   1683	int total = 0, i;
   1684
   1685	memset(done, 0, sizeof(done));
   1686	memset(bytes, 0, sizeof(bytes));
   1687
   1688	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
   1689		budget = mtk_poll_tx_qdma(eth, budget, done, bytes);
   1690	else
   1691		budget = mtk_poll_tx_pdma(eth, budget, done, bytes);
   1692
   1693	for (i = 0; i < MTK_MAC_COUNT; i++) {
   1694		if (!eth->netdev[i] || !done[i])
   1695			continue;
   1696		netdev_completed_queue(eth->netdev[i], done[i], bytes[i]);
   1697		total += done[i];
   1698		eth->tx_packets += done[i];
   1699		eth->tx_bytes += bytes[i];
   1700	}
   1701
   1702	dim_update_sample(eth->tx_events, eth->tx_packets, eth->tx_bytes,
   1703			  &dim_sample);
   1704	net_dim(&eth->tx_dim, dim_sample);
   1705
   1706	if (mtk_queue_stopped(eth) &&
   1707	    (atomic_read(&ring->free_count) > ring->thresh))
   1708		mtk_wake_queue(eth);
   1709
   1710	return total;
   1711}
   1712
   1713static void mtk_handle_status_irq(struct mtk_eth *eth)
   1714{
   1715	u32 status2 = mtk_r32(eth, MTK_INT_STATUS2);
   1716
   1717	if (unlikely(status2 & (MTK_GDM1_AF | MTK_GDM2_AF))) {
   1718		mtk_stats_update(eth);
   1719		mtk_w32(eth, (MTK_GDM1_AF | MTK_GDM2_AF),
   1720			MTK_INT_STATUS2);
   1721	}
   1722}
   1723
   1724static int mtk_napi_tx(struct napi_struct *napi, int budget)
   1725{
   1726	struct mtk_eth *eth = container_of(napi, struct mtk_eth, tx_napi);
   1727	const struct mtk_reg_map *reg_map = eth->soc->reg_map;
   1728	int tx_done = 0;
   1729
   1730	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
   1731		mtk_handle_status_irq(eth);
   1732	mtk_w32(eth, MTK_TX_DONE_INT, reg_map->tx_irq_status);
   1733	tx_done = mtk_poll_tx(eth, budget);
   1734
   1735	if (unlikely(netif_msg_intr(eth))) {
   1736		dev_info(eth->dev,
   1737			 "done tx %d, intr 0x%08x/0x%x\n", tx_done,
   1738			 mtk_r32(eth, reg_map->tx_irq_status),
   1739			 mtk_r32(eth, reg_map->tx_irq_mask));
   1740	}
   1741
   1742	if (tx_done == budget)
   1743		return budget;
   1744
   1745	if (mtk_r32(eth, reg_map->tx_irq_status) & MTK_TX_DONE_INT)
   1746		return budget;
   1747
   1748	if (napi_complete_done(napi, tx_done))
   1749		mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
   1750
   1751	return tx_done;
   1752}
   1753
   1754static int mtk_napi_rx(struct napi_struct *napi, int budget)
   1755{
   1756	struct mtk_eth *eth = container_of(napi, struct mtk_eth, rx_napi);
   1757	const struct mtk_reg_map *reg_map = eth->soc->reg_map;
   1758	int rx_done_total = 0;
   1759
   1760	mtk_handle_status_irq(eth);
   1761
   1762	do {
   1763		int rx_done;
   1764
   1765		mtk_w32(eth, eth->soc->txrx.rx_irq_done_mask,
   1766			reg_map->pdma.irq_status);
   1767		rx_done = mtk_poll_rx(napi, budget - rx_done_total, eth);
   1768		rx_done_total += rx_done;
   1769
   1770		if (unlikely(netif_msg_intr(eth))) {
   1771			dev_info(eth->dev,
   1772				 "done rx %d, intr 0x%08x/0x%x\n", rx_done,
   1773				 mtk_r32(eth, reg_map->pdma.irq_status),
   1774				 mtk_r32(eth, reg_map->pdma.irq_mask));
   1775		}
   1776
   1777		if (rx_done_total == budget)
   1778			return budget;
   1779
   1780	} while (mtk_r32(eth, reg_map->pdma.irq_status) &
   1781		 eth->soc->txrx.rx_irq_done_mask);
   1782
   1783	if (napi_complete_done(napi, rx_done_total))
   1784		mtk_rx_irq_enable(eth, eth->soc->txrx.rx_irq_done_mask);
   1785
   1786	return rx_done_total;
   1787}
   1788
   1789static int mtk_tx_alloc(struct mtk_eth *eth)
   1790{
   1791	const struct mtk_soc_data *soc = eth->soc;
   1792	struct mtk_tx_ring *ring = &eth->tx_ring;
   1793	int i, sz = soc->txrx.txd_size;
   1794	struct mtk_tx_dma_v2 *txd;
   1795
   1796	ring->buf = kcalloc(MTK_DMA_SIZE, sizeof(*ring->buf),
   1797			       GFP_KERNEL);
   1798	if (!ring->buf)
   1799		goto no_tx_mem;
   1800
   1801	ring->dma = dma_alloc_coherent(eth->dma_dev, MTK_DMA_SIZE * sz,
   1802				       &ring->phys, GFP_KERNEL);
   1803	if (!ring->dma)
   1804		goto no_tx_mem;
   1805
   1806	for (i = 0; i < MTK_DMA_SIZE; i++) {
   1807		int next = (i + 1) % MTK_DMA_SIZE;
   1808		u32 next_ptr = ring->phys + next * sz;
   1809
   1810		txd = ring->dma + i * sz;
   1811		txd->txd2 = next_ptr;
   1812		txd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
   1813		txd->txd4 = 0;
   1814		if (MTK_HAS_CAPS(soc->caps, MTK_NETSYS_V2)) {
   1815			txd->txd5 = 0;
   1816			txd->txd6 = 0;
   1817			txd->txd7 = 0;
   1818			txd->txd8 = 0;
   1819		}
   1820	}
   1821
   1822	/* On MT7688 (PDMA only) this driver uses the ring->dma structs
   1823	 * only as the framework. The real HW descriptors are the PDMA
   1824	 * descriptors in ring->dma_pdma.
   1825	 */
   1826	if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
   1827		ring->dma_pdma = dma_alloc_coherent(eth->dma_dev, MTK_DMA_SIZE * sz,
   1828						    &ring->phys_pdma, GFP_KERNEL);
   1829		if (!ring->dma_pdma)
   1830			goto no_tx_mem;
   1831
   1832		for (i = 0; i < MTK_DMA_SIZE; i++) {
   1833			ring->dma_pdma[i].txd2 = TX_DMA_DESP2_DEF;
   1834			ring->dma_pdma[i].txd4 = 0;
   1835		}
   1836	}
   1837
   1838	ring->dma_size = MTK_DMA_SIZE;
   1839	atomic_set(&ring->free_count, MTK_DMA_SIZE - 2);
   1840	ring->next_free = ring->dma;
   1841	ring->last_free = (void *)txd;
   1842	ring->last_free_ptr = (u32)(ring->phys + ((MTK_DMA_SIZE - 1) * sz));
   1843	ring->thresh = MAX_SKB_FRAGS;
   1844
   1845	/* make sure that all changes to the dma ring are flushed before we
   1846	 * continue
   1847	 */
   1848	wmb();
   1849
   1850	if (MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
   1851		mtk_w32(eth, ring->phys, soc->reg_map->qdma.ctx_ptr);
   1852		mtk_w32(eth, ring->phys, soc->reg_map->qdma.dtx_ptr);
   1853		mtk_w32(eth,
   1854			ring->phys + ((MTK_DMA_SIZE - 1) * sz),
   1855			soc->reg_map->qdma.crx_ptr);
   1856		mtk_w32(eth, ring->last_free_ptr, soc->reg_map->qdma.drx_ptr);
   1857		mtk_w32(eth, (QDMA_RES_THRES << 8) | QDMA_RES_THRES,
   1858			soc->reg_map->qdma.qtx_cfg);
   1859	} else {
   1860		mtk_w32(eth, ring->phys_pdma, MT7628_TX_BASE_PTR0);
   1861		mtk_w32(eth, MTK_DMA_SIZE, MT7628_TX_MAX_CNT0);
   1862		mtk_w32(eth, 0, MT7628_TX_CTX_IDX0);
   1863		mtk_w32(eth, MT7628_PST_DTX_IDX0, soc->reg_map->pdma.rst_idx);
   1864	}
   1865
   1866	return 0;
   1867
   1868no_tx_mem:
   1869	return -ENOMEM;
   1870}
   1871
   1872static void mtk_tx_clean(struct mtk_eth *eth)
   1873{
   1874	const struct mtk_soc_data *soc = eth->soc;
   1875	struct mtk_tx_ring *ring = &eth->tx_ring;
   1876	int i;
   1877
   1878	if (ring->buf) {
   1879		for (i = 0; i < MTK_DMA_SIZE; i++)
   1880			mtk_tx_unmap(eth, &ring->buf[i], false);
   1881		kfree(ring->buf);
   1882		ring->buf = NULL;
   1883	}
   1884
   1885	if (ring->dma) {
   1886		dma_free_coherent(eth->dma_dev,
   1887				  MTK_DMA_SIZE * soc->txrx.txd_size,
   1888				  ring->dma, ring->phys);
   1889		ring->dma = NULL;
   1890	}
   1891
   1892	if (ring->dma_pdma) {
   1893		dma_free_coherent(eth->dma_dev,
   1894				  MTK_DMA_SIZE * soc->txrx.txd_size,
   1895				  ring->dma_pdma, ring->phys_pdma);
   1896		ring->dma_pdma = NULL;
   1897	}
   1898}
   1899
   1900static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag)
   1901{
   1902	const struct mtk_reg_map *reg_map = eth->soc->reg_map;
   1903	struct mtk_rx_ring *ring;
   1904	int rx_data_len, rx_dma_size;
   1905	int i;
   1906
   1907	if (rx_flag == MTK_RX_FLAGS_QDMA) {
   1908		if (ring_no)
   1909			return -EINVAL;
   1910		ring = &eth->rx_ring_qdma;
   1911	} else {
   1912		ring = &eth->rx_ring[ring_no];
   1913	}
   1914
   1915	if (rx_flag == MTK_RX_FLAGS_HWLRO) {
   1916		rx_data_len = MTK_MAX_LRO_RX_LENGTH;
   1917		rx_dma_size = MTK_HW_LRO_DMA_SIZE;
   1918	} else {
   1919		rx_data_len = ETH_DATA_LEN;
   1920		rx_dma_size = MTK_DMA_SIZE;
   1921	}
   1922
   1923	ring->frag_size = mtk_max_frag_size(rx_data_len);
   1924	ring->buf_size = mtk_max_buf_size(ring->frag_size);
   1925	ring->data = kcalloc(rx_dma_size, sizeof(*ring->data),
   1926			     GFP_KERNEL);
   1927	if (!ring->data)
   1928		return -ENOMEM;
   1929
   1930	for (i = 0; i < rx_dma_size; i++) {
   1931		if (ring->frag_size <= PAGE_SIZE)
   1932			ring->data[i] = netdev_alloc_frag(ring->frag_size);
   1933		else
   1934			ring->data[i] = mtk_max_lro_buf_alloc(GFP_KERNEL);
   1935		if (!ring->data[i])
   1936			return -ENOMEM;
   1937	}
   1938
   1939	ring->dma = dma_alloc_coherent(eth->dma_dev,
   1940				       rx_dma_size * eth->soc->txrx.rxd_size,
   1941				       &ring->phys, GFP_KERNEL);
   1942	if (!ring->dma)
   1943		return -ENOMEM;
   1944
   1945	for (i = 0; i < rx_dma_size; i++) {
   1946		struct mtk_rx_dma_v2 *rxd;
   1947
   1948		dma_addr_t dma_addr = dma_map_single(eth->dma_dev,
   1949				ring->data[i] + NET_SKB_PAD + eth->ip_align,
   1950				ring->buf_size,
   1951				DMA_FROM_DEVICE);
   1952		if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr)))
   1953			return -ENOMEM;
   1954
   1955		rxd = ring->dma + i * eth->soc->txrx.rxd_size;
   1956		rxd->rxd1 = (unsigned int)dma_addr;
   1957
   1958		if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
   1959			rxd->rxd2 = RX_DMA_LSO;
   1960		else
   1961			rxd->rxd2 = RX_DMA_PREP_PLEN0(ring->buf_size);
   1962
   1963		rxd->rxd3 = 0;
   1964		rxd->rxd4 = 0;
   1965		if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
   1966			rxd->rxd5 = 0;
   1967			rxd->rxd6 = 0;
   1968			rxd->rxd7 = 0;
   1969			rxd->rxd8 = 0;
   1970		}
   1971	}
   1972	ring->dma_size = rx_dma_size;
   1973	ring->calc_idx_update = false;
   1974	ring->calc_idx = rx_dma_size - 1;
   1975	if (rx_flag == MTK_RX_FLAGS_QDMA)
   1976		ring->crx_idx_reg = reg_map->qdma.qcrx_ptr +
   1977				    ring_no * MTK_QRX_OFFSET;
   1978	else
   1979		ring->crx_idx_reg = reg_map->pdma.pcrx_ptr +
   1980				    ring_no * MTK_QRX_OFFSET;
   1981	/* make sure that all changes to the dma ring are flushed before we
   1982	 * continue
   1983	 */
   1984	wmb();
   1985
   1986	if (rx_flag == MTK_RX_FLAGS_QDMA) {
   1987		mtk_w32(eth, ring->phys,
   1988			reg_map->qdma.rx_ptr + ring_no * MTK_QRX_OFFSET);
   1989		mtk_w32(eth, rx_dma_size,
   1990			reg_map->qdma.rx_cnt_cfg + ring_no * MTK_QRX_OFFSET);
   1991		mtk_w32(eth, MTK_PST_DRX_IDX_CFG(ring_no),
   1992			reg_map->qdma.rst_idx);
   1993	} else {
   1994		mtk_w32(eth, ring->phys,
   1995			reg_map->pdma.rx_ptr + ring_no * MTK_QRX_OFFSET);
   1996		mtk_w32(eth, rx_dma_size,
   1997			reg_map->pdma.rx_cnt_cfg + ring_no * MTK_QRX_OFFSET);
   1998		mtk_w32(eth, MTK_PST_DRX_IDX_CFG(ring_no),
   1999			reg_map->pdma.rst_idx);
   2000	}
   2001	mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg);
   2002
   2003	return 0;
   2004}
   2005
   2006static void mtk_rx_clean(struct mtk_eth *eth, struct mtk_rx_ring *ring)
   2007{
   2008	int i;
   2009
   2010	if (ring->data && ring->dma) {
   2011		for (i = 0; i < ring->dma_size; i++) {
   2012			struct mtk_rx_dma *rxd;
   2013
   2014			if (!ring->data[i])
   2015				continue;
   2016
   2017			rxd = ring->dma + i * eth->soc->txrx.rxd_size;
   2018			if (!rxd->rxd1)
   2019				continue;
   2020
   2021			dma_unmap_single(eth->dma_dev, rxd->rxd1,
   2022					 ring->buf_size, DMA_FROM_DEVICE);
   2023			skb_free_frag(ring->data[i]);
   2024		}
   2025		kfree(ring->data);
   2026		ring->data = NULL;
   2027	}
   2028
   2029	if (ring->dma) {
   2030		dma_free_coherent(eth->dma_dev,
   2031				  ring->dma_size * eth->soc->txrx.rxd_size,
   2032				  ring->dma, ring->phys);
   2033		ring->dma = NULL;
   2034	}
   2035}
   2036
   2037static int mtk_hwlro_rx_init(struct mtk_eth *eth)
   2038{
   2039	int i;
   2040	u32 ring_ctrl_dw1 = 0, ring_ctrl_dw2 = 0, ring_ctrl_dw3 = 0;
   2041	u32 lro_ctrl_dw0 = 0, lro_ctrl_dw3 = 0;
   2042
   2043	/* set LRO rings to auto-learn modes */
   2044	ring_ctrl_dw2 |= MTK_RING_AUTO_LERAN_MODE;
   2045
   2046	/* validate LRO ring */
   2047	ring_ctrl_dw2 |= MTK_RING_VLD;
   2048
   2049	/* set AGE timer (unit: 20us) */
   2050	ring_ctrl_dw2 |= MTK_RING_AGE_TIME_H;
   2051	ring_ctrl_dw1 |= MTK_RING_AGE_TIME_L;
   2052
   2053	/* set max AGG timer (unit: 20us) */
   2054	ring_ctrl_dw2 |= MTK_RING_MAX_AGG_TIME;
   2055
   2056	/* set max LRO AGG count */
   2057	ring_ctrl_dw2 |= MTK_RING_MAX_AGG_CNT_L;
   2058	ring_ctrl_dw3 |= MTK_RING_MAX_AGG_CNT_H;
   2059
   2060	for (i = 1; i < MTK_MAX_RX_RING_NUM; i++) {
   2061		mtk_w32(eth, ring_ctrl_dw1, MTK_LRO_CTRL_DW1_CFG(i));
   2062		mtk_w32(eth, ring_ctrl_dw2, MTK_LRO_CTRL_DW2_CFG(i));
   2063		mtk_w32(eth, ring_ctrl_dw3, MTK_LRO_CTRL_DW3_CFG(i));
   2064	}
   2065
   2066	/* IPv4 checksum update enable */
   2067	lro_ctrl_dw0 |= MTK_L3_CKS_UPD_EN;
   2068
   2069	/* switch priority comparison to packet count mode */
   2070	lro_ctrl_dw0 |= MTK_LRO_ALT_PKT_CNT_MODE;
   2071
   2072	/* bandwidth threshold setting */
   2073	mtk_w32(eth, MTK_HW_LRO_BW_THRE, MTK_PDMA_LRO_CTRL_DW2);
   2074
   2075	/* auto-learn score delta setting */
   2076	mtk_w32(eth, MTK_HW_LRO_REPLACE_DELTA, MTK_PDMA_LRO_ALT_SCORE_DELTA);
   2077
   2078	/* set refresh timer for altering flows to 1 sec. (unit: 20us) */
   2079	mtk_w32(eth, (MTK_HW_LRO_TIMER_UNIT << 16) | MTK_HW_LRO_REFRESH_TIME,
   2080		MTK_PDMA_LRO_ALT_REFRESH_TIMER);
   2081
   2082	/* set HW LRO mode & the max aggregation count for rx packets */
   2083	lro_ctrl_dw3 |= MTK_ADMA_MODE | (MTK_HW_LRO_MAX_AGG_CNT & 0xff);
   2084
   2085	/* the minimal remaining room of SDL0 in RXD for lro aggregation */
   2086	lro_ctrl_dw3 |= MTK_LRO_MIN_RXD_SDL;
   2087
   2088	/* enable HW LRO */
   2089	lro_ctrl_dw0 |= MTK_LRO_EN;
   2090
   2091	mtk_w32(eth, lro_ctrl_dw3, MTK_PDMA_LRO_CTRL_DW3);
   2092	mtk_w32(eth, lro_ctrl_dw0, MTK_PDMA_LRO_CTRL_DW0);
   2093
   2094	return 0;
   2095}
   2096
   2097static void mtk_hwlro_rx_uninit(struct mtk_eth *eth)
   2098{
   2099	int i;
   2100	u32 val;
   2101
   2102	/* relinquish lro rings, flush aggregated packets */
   2103	mtk_w32(eth, MTK_LRO_RING_RELINQUISH_REQ, MTK_PDMA_LRO_CTRL_DW0);
   2104
   2105	/* wait for relinquishments done */
   2106	for (i = 0; i < 10; i++) {
   2107		val = mtk_r32(eth, MTK_PDMA_LRO_CTRL_DW0);
   2108		if (val & MTK_LRO_RING_RELINQUISH_DONE) {
   2109			msleep(20);
   2110			continue;
   2111		}
   2112		break;
   2113	}
   2114
   2115	/* invalidate lro rings */
   2116	for (i = 1; i < MTK_MAX_RX_RING_NUM; i++)
   2117		mtk_w32(eth, 0, MTK_LRO_CTRL_DW2_CFG(i));
   2118
   2119	/* disable HW LRO */
   2120	mtk_w32(eth, 0, MTK_PDMA_LRO_CTRL_DW0);
   2121}
   2122
   2123static void mtk_hwlro_val_ipaddr(struct mtk_eth *eth, int idx, __be32 ip)
   2124{
   2125	u32 reg_val;
   2126
   2127	reg_val = mtk_r32(eth, MTK_LRO_CTRL_DW2_CFG(idx));
   2128
   2129	/* invalidate the IP setting */
   2130	mtk_w32(eth, (reg_val & ~MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx));
   2131
   2132	mtk_w32(eth, ip, MTK_LRO_DIP_DW0_CFG(idx));
   2133
   2134	/* validate the IP setting */
   2135	mtk_w32(eth, (reg_val | MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx));
   2136}
   2137
   2138static void mtk_hwlro_inval_ipaddr(struct mtk_eth *eth, int idx)
   2139{
   2140	u32 reg_val;
   2141
   2142	reg_val = mtk_r32(eth, MTK_LRO_CTRL_DW2_CFG(idx));
   2143
   2144	/* invalidate the IP setting */
   2145	mtk_w32(eth, (reg_val & ~MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx));
   2146
   2147	mtk_w32(eth, 0, MTK_LRO_DIP_DW0_CFG(idx));
   2148}
   2149
   2150static int mtk_hwlro_get_ip_cnt(struct mtk_mac *mac)
   2151{
   2152	int cnt = 0;
   2153	int i;
   2154
   2155	for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) {
   2156		if (mac->hwlro_ip[i])
   2157			cnt++;
   2158	}
   2159
   2160	return cnt;
   2161}
   2162
   2163static int mtk_hwlro_add_ipaddr(struct net_device *dev,
   2164				struct ethtool_rxnfc *cmd)
   2165{
   2166	struct ethtool_rx_flow_spec *fsp =
   2167		(struct ethtool_rx_flow_spec *)&cmd->fs;
   2168	struct mtk_mac *mac = netdev_priv(dev);
   2169	struct mtk_eth *eth = mac->hw;
   2170	int hwlro_idx;
   2171
   2172	if ((fsp->flow_type != TCP_V4_FLOW) ||
   2173	    (!fsp->h_u.tcp_ip4_spec.ip4dst) ||
   2174	    (fsp->location > 1))
   2175		return -EINVAL;
   2176
   2177	mac->hwlro_ip[fsp->location] = htonl(fsp->h_u.tcp_ip4_spec.ip4dst);
   2178	hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + fsp->location;
   2179
   2180	mac->hwlro_ip_cnt = mtk_hwlro_get_ip_cnt(mac);
   2181
   2182	mtk_hwlro_val_ipaddr(eth, hwlro_idx, mac->hwlro_ip[fsp->location]);
   2183
   2184	return 0;
   2185}
   2186
   2187static int mtk_hwlro_del_ipaddr(struct net_device *dev,
   2188				struct ethtool_rxnfc *cmd)
   2189{
   2190	struct ethtool_rx_flow_spec *fsp =
   2191		(struct ethtool_rx_flow_spec *)&cmd->fs;
   2192	struct mtk_mac *mac = netdev_priv(dev);
   2193	struct mtk_eth *eth = mac->hw;
   2194	int hwlro_idx;
   2195
   2196	if (fsp->location > 1)
   2197		return -EINVAL;
   2198
   2199	mac->hwlro_ip[fsp->location] = 0;
   2200	hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + fsp->location;
   2201
   2202	mac->hwlro_ip_cnt = mtk_hwlro_get_ip_cnt(mac);
   2203
   2204	mtk_hwlro_inval_ipaddr(eth, hwlro_idx);
   2205
   2206	return 0;
   2207}
   2208
   2209static void mtk_hwlro_netdev_disable(struct net_device *dev)
   2210{
   2211	struct mtk_mac *mac = netdev_priv(dev);
   2212	struct mtk_eth *eth = mac->hw;
   2213	int i, hwlro_idx;
   2214
   2215	for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) {
   2216		mac->hwlro_ip[i] = 0;
   2217		hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + i;
   2218
   2219		mtk_hwlro_inval_ipaddr(eth, hwlro_idx);
   2220	}
   2221
   2222	mac->hwlro_ip_cnt = 0;
   2223}
   2224
   2225static int mtk_hwlro_get_fdir_entry(struct net_device *dev,
   2226				    struct ethtool_rxnfc *cmd)
   2227{
   2228	struct mtk_mac *mac = netdev_priv(dev);
   2229	struct ethtool_rx_flow_spec *fsp =
   2230		(struct ethtool_rx_flow_spec *)&cmd->fs;
   2231
   2232	if (fsp->location >= ARRAY_SIZE(mac->hwlro_ip))
   2233		return -EINVAL;
   2234
   2235	/* only tcp dst ipv4 is meaningful, others are meaningless */
   2236	fsp->flow_type = TCP_V4_FLOW;
   2237	fsp->h_u.tcp_ip4_spec.ip4dst = ntohl(mac->hwlro_ip[fsp->location]);
   2238	fsp->m_u.tcp_ip4_spec.ip4dst = 0;
   2239
   2240	fsp->h_u.tcp_ip4_spec.ip4src = 0;
   2241	fsp->m_u.tcp_ip4_spec.ip4src = 0xffffffff;
   2242	fsp->h_u.tcp_ip4_spec.psrc = 0;
   2243	fsp->m_u.tcp_ip4_spec.psrc = 0xffff;
   2244	fsp->h_u.tcp_ip4_spec.pdst = 0;
   2245	fsp->m_u.tcp_ip4_spec.pdst = 0xffff;
   2246	fsp->h_u.tcp_ip4_spec.tos = 0;
   2247	fsp->m_u.tcp_ip4_spec.tos = 0xff;
   2248
   2249	return 0;
   2250}
   2251
   2252static int mtk_hwlro_get_fdir_all(struct net_device *dev,
   2253				  struct ethtool_rxnfc *cmd,
   2254				  u32 *rule_locs)
   2255{
   2256	struct mtk_mac *mac = netdev_priv(dev);
   2257	int cnt = 0;
   2258	int i;
   2259
   2260	for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) {
   2261		if (mac->hwlro_ip[i]) {
   2262			rule_locs[cnt] = i;
   2263			cnt++;
   2264		}
   2265	}
   2266
   2267	cmd->rule_cnt = cnt;
   2268
   2269	return 0;
   2270}
   2271
   2272static netdev_features_t mtk_fix_features(struct net_device *dev,
   2273					  netdev_features_t features)
   2274{
   2275	if (!(features & NETIF_F_LRO)) {
   2276		struct mtk_mac *mac = netdev_priv(dev);
   2277		int ip_cnt = mtk_hwlro_get_ip_cnt(mac);
   2278
   2279		if (ip_cnt) {
   2280			netdev_info(dev, "RX flow is programmed, LRO should keep on\n");
   2281
   2282			features |= NETIF_F_LRO;
   2283		}
   2284	}
   2285
   2286	return features;
   2287}
   2288
   2289static int mtk_set_features(struct net_device *dev, netdev_features_t features)
   2290{
   2291	int err = 0;
   2292
   2293	if (!((dev->features ^ features) & NETIF_F_LRO))
   2294		return 0;
   2295
   2296	if (!(features & NETIF_F_LRO))
   2297		mtk_hwlro_netdev_disable(dev);
   2298
   2299	return err;
   2300}
   2301
   2302/* wait for DMA to finish whatever it is doing before we start using it again */
   2303static int mtk_dma_busy_wait(struct mtk_eth *eth)
   2304{
   2305	unsigned int reg;
   2306	int ret;
   2307	u32 val;
   2308
   2309	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
   2310		reg = eth->soc->reg_map->qdma.glo_cfg;
   2311	else
   2312		reg = eth->soc->reg_map->pdma.glo_cfg;
   2313
   2314	ret = readx_poll_timeout_atomic(__raw_readl, eth->base + reg, val,
   2315					!(val & (MTK_RX_DMA_BUSY | MTK_TX_DMA_BUSY)),
   2316					5, MTK_DMA_BUSY_TIMEOUT_US);
   2317	if (ret)
   2318		dev_err(eth->dev, "DMA init timeout\n");
   2319
   2320	return ret;
   2321}
   2322
   2323static int mtk_dma_init(struct mtk_eth *eth)
   2324{
   2325	int err;
   2326	u32 i;
   2327
   2328	if (mtk_dma_busy_wait(eth))
   2329		return -EBUSY;
   2330
   2331	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
   2332		/* QDMA needs scratch memory for internal reordering of the
   2333		 * descriptors
   2334		 */
   2335		err = mtk_init_fq_dma(eth);
   2336		if (err)
   2337			return err;
   2338	}
   2339
   2340	err = mtk_tx_alloc(eth);
   2341	if (err)
   2342		return err;
   2343
   2344	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
   2345		err = mtk_rx_alloc(eth, 0, MTK_RX_FLAGS_QDMA);
   2346		if (err)
   2347			return err;
   2348	}
   2349
   2350	err = mtk_rx_alloc(eth, 0, MTK_RX_FLAGS_NORMAL);
   2351	if (err)
   2352		return err;
   2353
   2354	if (eth->hwlro) {
   2355		for (i = 1; i < MTK_MAX_RX_RING_NUM; i++) {
   2356			err = mtk_rx_alloc(eth, i, MTK_RX_FLAGS_HWLRO);
   2357			if (err)
   2358				return err;
   2359		}
   2360		err = mtk_hwlro_rx_init(eth);
   2361		if (err)
   2362			return err;
   2363	}
   2364
   2365	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
   2366		/* Enable random early drop and set drop threshold
   2367		 * automatically
   2368		 */
   2369		mtk_w32(eth, FC_THRES_DROP_MODE | FC_THRES_DROP_EN |
   2370			FC_THRES_MIN, eth->soc->reg_map->qdma.fc_th);
   2371		mtk_w32(eth, 0x0, eth->soc->reg_map->qdma.hred);
   2372	}
   2373
   2374	return 0;
   2375}
   2376
   2377static void mtk_dma_free(struct mtk_eth *eth)
   2378{
   2379	const struct mtk_soc_data *soc = eth->soc;
   2380	int i;
   2381
   2382	for (i = 0; i < MTK_MAC_COUNT; i++)
   2383		if (eth->netdev[i])
   2384			netdev_reset_queue(eth->netdev[i]);
   2385	if (eth->scratch_ring) {
   2386		dma_free_coherent(eth->dma_dev,
   2387				  MTK_DMA_SIZE * soc->txrx.txd_size,
   2388				  eth->scratch_ring, eth->phy_scratch_ring);
   2389		eth->scratch_ring = NULL;
   2390		eth->phy_scratch_ring = 0;
   2391	}
   2392	mtk_tx_clean(eth);
   2393	mtk_rx_clean(eth, &eth->rx_ring[0]);
   2394	mtk_rx_clean(eth, &eth->rx_ring_qdma);
   2395
   2396	if (eth->hwlro) {
   2397		mtk_hwlro_rx_uninit(eth);
   2398		for (i = 1; i < MTK_MAX_RX_RING_NUM; i++)
   2399			mtk_rx_clean(eth, &eth->rx_ring[i]);
   2400	}
   2401
   2402	kfree(eth->scratch_head);
   2403}
   2404
   2405static void mtk_tx_timeout(struct net_device *dev, unsigned int txqueue)
   2406{
   2407	struct mtk_mac *mac = netdev_priv(dev);
   2408	struct mtk_eth *eth = mac->hw;
   2409
   2410	eth->netdev[mac->id]->stats.tx_errors++;
   2411	netif_err(eth, tx_err, dev,
   2412		  "transmit timed out\n");
   2413	schedule_work(&eth->pending_work);
   2414}
   2415
   2416static irqreturn_t mtk_handle_irq_rx(int irq, void *_eth)
   2417{
   2418	struct mtk_eth *eth = _eth;
   2419
   2420	eth->rx_events++;
   2421	if (likely(napi_schedule_prep(&eth->rx_napi))) {
   2422		__napi_schedule(&eth->rx_napi);
   2423		mtk_rx_irq_disable(eth, eth->soc->txrx.rx_irq_done_mask);
   2424	}
   2425
   2426	return IRQ_HANDLED;
   2427}
   2428
   2429static irqreturn_t mtk_handle_irq_tx(int irq, void *_eth)
   2430{
   2431	struct mtk_eth *eth = _eth;
   2432
   2433	eth->tx_events++;
   2434	if (likely(napi_schedule_prep(&eth->tx_napi))) {
   2435		__napi_schedule(&eth->tx_napi);
   2436		mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
   2437	}
   2438
   2439	return IRQ_HANDLED;
   2440}
   2441
   2442static irqreturn_t mtk_handle_irq(int irq, void *_eth)
   2443{
   2444	struct mtk_eth *eth = _eth;
   2445	const struct mtk_reg_map *reg_map = eth->soc->reg_map;
   2446
   2447	if (mtk_r32(eth, reg_map->pdma.irq_mask) &
   2448	    eth->soc->txrx.rx_irq_done_mask) {
   2449		if (mtk_r32(eth, reg_map->pdma.irq_status) &
   2450		    eth->soc->txrx.rx_irq_done_mask)
   2451			mtk_handle_irq_rx(irq, _eth);
   2452	}
   2453	if (mtk_r32(eth, reg_map->tx_irq_mask) & MTK_TX_DONE_INT) {
   2454		if (mtk_r32(eth, reg_map->tx_irq_status) & MTK_TX_DONE_INT)
   2455			mtk_handle_irq_tx(irq, _eth);
   2456	}
   2457
   2458	return IRQ_HANDLED;
   2459}
   2460
   2461#ifdef CONFIG_NET_POLL_CONTROLLER
   2462static void mtk_poll_controller(struct net_device *dev)
   2463{
   2464	struct mtk_mac *mac = netdev_priv(dev);
   2465	struct mtk_eth *eth = mac->hw;
   2466
   2467	mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
   2468	mtk_rx_irq_disable(eth, eth->soc->txrx.rx_irq_done_mask);
   2469	mtk_handle_irq_rx(eth->irq[2], dev);
   2470	mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
   2471	mtk_rx_irq_enable(eth, eth->soc->txrx.rx_irq_done_mask);
   2472}
   2473#endif
   2474
   2475static int mtk_start_dma(struct mtk_eth *eth)
   2476{
   2477	u32 val, rx_2b_offset = (NET_IP_ALIGN == 2) ? MTK_RX_2B_OFFSET : 0;
   2478	const struct mtk_reg_map *reg_map = eth->soc->reg_map;
   2479	int err;
   2480
   2481	err = mtk_dma_init(eth);
   2482	if (err) {
   2483		mtk_dma_free(eth);
   2484		return err;
   2485	}
   2486
   2487	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
   2488		val = mtk_r32(eth, reg_map->qdma.glo_cfg);
   2489		val |= MTK_TX_DMA_EN | MTK_RX_DMA_EN |
   2490		       MTK_TX_BT_32DWORDS | MTK_NDP_CO_PRO |
   2491		       MTK_RX_2B_OFFSET | MTK_TX_WB_DDONE;
   2492
   2493		if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
   2494			val |= MTK_MUTLI_CNT | MTK_RESV_BUF |
   2495			       MTK_WCOMP_EN | MTK_DMAD_WR_WDONE |
   2496			       MTK_CHK_DDONE_EN;
   2497		else
   2498			val |= MTK_RX_BT_32DWORDS;
   2499		mtk_w32(eth, val, reg_map->qdma.glo_cfg);
   2500
   2501		mtk_w32(eth,
   2502			MTK_RX_DMA_EN | rx_2b_offset |
   2503			MTK_RX_BT_32DWORDS | MTK_MULTI_EN,
   2504			reg_map->pdma.glo_cfg);
   2505	} else {
   2506		mtk_w32(eth, MTK_TX_WB_DDONE | MTK_TX_DMA_EN | MTK_RX_DMA_EN |
   2507			MTK_MULTI_EN | MTK_PDMA_SIZE_8DWORDS,
   2508			reg_map->pdma.glo_cfg);
   2509	}
   2510
   2511	return 0;
   2512}
   2513
   2514static void mtk_gdm_config(struct mtk_eth *eth, u32 config)
   2515{
   2516	int i;
   2517
   2518	if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
   2519		return;
   2520
   2521	for (i = 0; i < MTK_MAC_COUNT; i++) {
   2522		u32 val = mtk_r32(eth, MTK_GDMA_FWD_CFG(i));
   2523
   2524		/* default setup the forward port to send frame to PDMA */
   2525		val &= ~0xffff;
   2526
   2527		/* Enable RX checksum */
   2528		val |= MTK_GDMA_ICS_EN | MTK_GDMA_TCS_EN | MTK_GDMA_UCS_EN;
   2529
   2530		val |= config;
   2531
   2532		if (!i && eth->netdev[0] && netdev_uses_dsa(eth->netdev[0]))
   2533			val |= MTK_GDMA_SPECIAL_TAG;
   2534
   2535		mtk_w32(eth, val, MTK_GDMA_FWD_CFG(i));
   2536	}
   2537	/* Reset and enable PSE */
   2538	mtk_w32(eth, RST_GL_PSE, MTK_RST_GL);
   2539	mtk_w32(eth, 0, MTK_RST_GL);
   2540}
   2541
   2542static int mtk_open(struct net_device *dev)
   2543{
   2544	struct mtk_mac *mac = netdev_priv(dev);
   2545	struct mtk_eth *eth = mac->hw;
   2546	int err;
   2547
   2548	err = phylink_of_phy_connect(mac->phylink, mac->of_node, 0);
   2549	if (err) {
   2550		netdev_err(dev, "%s: could not attach PHY: %d\n", __func__,
   2551			   err);
   2552		return err;
   2553	}
   2554
   2555	/* we run 2 netdevs on the same dma ring so we only bring it up once */
   2556	if (!refcount_read(&eth->dma_refcnt)) {
   2557		u32 gdm_config = MTK_GDMA_TO_PDMA;
   2558
   2559		err = mtk_start_dma(eth);
   2560		if (err)
   2561			return err;
   2562
   2563		if (eth->soc->offload_version && mtk_ppe_start(eth->ppe) == 0)
   2564			gdm_config = MTK_GDMA_TO_PPE;
   2565
   2566		mtk_gdm_config(eth, gdm_config);
   2567
   2568		napi_enable(&eth->tx_napi);
   2569		napi_enable(&eth->rx_napi);
   2570		mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
   2571		mtk_rx_irq_enable(eth, eth->soc->txrx.rx_irq_done_mask);
   2572		refcount_set(&eth->dma_refcnt, 1);
   2573	}
   2574	else
   2575		refcount_inc(&eth->dma_refcnt);
   2576
   2577	phylink_start(mac->phylink);
   2578	netif_start_queue(dev);
   2579	return 0;
   2580}
   2581
   2582static void mtk_stop_dma(struct mtk_eth *eth, u32 glo_cfg)
   2583{
   2584	u32 val;
   2585	int i;
   2586
   2587	/* stop the dma engine */
   2588	spin_lock_bh(&eth->page_lock);
   2589	val = mtk_r32(eth, glo_cfg);
   2590	mtk_w32(eth, val & ~(MTK_TX_WB_DDONE | MTK_RX_DMA_EN | MTK_TX_DMA_EN),
   2591		glo_cfg);
   2592	spin_unlock_bh(&eth->page_lock);
   2593
   2594	/* wait for dma stop */
   2595	for (i = 0; i < 10; i++) {
   2596		val = mtk_r32(eth, glo_cfg);
   2597		if (val & (MTK_TX_DMA_BUSY | MTK_RX_DMA_BUSY)) {
   2598			msleep(20);
   2599			continue;
   2600		}
   2601		break;
   2602	}
   2603}
   2604
   2605static int mtk_stop(struct net_device *dev)
   2606{
   2607	struct mtk_mac *mac = netdev_priv(dev);
   2608	struct mtk_eth *eth = mac->hw;
   2609
   2610	phylink_stop(mac->phylink);
   2611
   2612	netif_tx_disable(dev);
   2613
   2614	phylink_disconnect_phy(mac->phylink);
   2615
   2616	/* only shutdown DMA if this is the last user */
   2617	if (!refcount_dec_and_test(&eth->dma_refcnt))
   2618		return 0;
   2619
   2620	mtk_gdm_config(eth, MTK_GDMA_DROP_ALL);
   2621
   2622	mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
   2623	mtk_rx_irq_disable(eth, eth->soc->txrx.rx_irq_done_mask);
   2624	napi_disable(&eth->tx_napi);
   2625	napi_disable(&eth->rx_napi);
   2626
   2627	cancel_work_sync(&eth->rx_dim.work);
   2628	cancel_work_sync(&eth->tx_dim.work);
   2629
   2630	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
   2631		mtk_stop_dma(eth, eth->soc->reg_map->qdma.glo_cfg);
   2632	mtk_stop_dma(eth, eth->soc->reg_map->pdma.glo_cfg);
   2633
   2634	mtk_dma_free(eth);
   2635
   2636	if (eth->soc->offload_version)
   2637		mtk_ppe_stop(eth->ppe);
   2638
   2639	return 0;
   2640}
   2641
   2642static void ethsys_reset(struct mtk_eth *eth, u32 reset_bits)
   2643{
   2644	regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL,
   2645			   reset_bits,
   2646			   reset_bits);
   2647
   2648	usleep_range(1000, 1100);
   2649	regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL,
   2650			   reset_bits,
   2651			   ~reset_bits);
   2652	mdelay(10);
   2653}
   2654
   2655static void mtk_clk_disable(struct mtk_eth *eth)
   2656{
   2657	int clk;
   2658
   2659	for (clk = MTK_CLK_MAX - 1; clk >= 0; clk--)
   2660		clk_disable_unprepare(eth->clks[clk]);
   2661}
   2662
   2663static int mtk_clk_enable(struct mtk_eth *eth)
   2664{
   2665	int clk, ret;
   2666
   2667	for (clk = 0; clk < MTK_CLK_MAX ; clk++) {
   2668		ret = clk_prepare_enable(eth->clks[clk]);
   2669		if (ret)
   2670			goto err_disable_clks;
   2671	}
   2672
   2673	return 0;
   2674
   2675err_disable_clks:
   2676	while (--clk >= 0)
   2677		clk_disable_unprepare(eth->clks[clk]);
   2678
   2679	return ret;
   2680}
   2681
   2682static void mtk_dim_rx(struct work_struct *work)
   2683{
   2684	struct dim *dim = container_of(work, struct dim, work);
   2685	struct mtk_eth *eth = container_of(dim, struct mtk_eth, rx_dim);
   2686	const struct mtk_reg_map *reg_map = eth->soc->reg_map;
   2687	struct dim_cq_moder cur_profile;
   2688	u32 val, cur;
   2689
   2690	cur_profile = net_dim_get_rx_moderation(eth->rx_dim.mode,
   2691						dim->profile_ix);
   2692	spin_lock_bh(&eth->dim_lock);
   2693
   2694	val = mtk_r32(eth, reg_map->pdma.delay_irq);
   2695	val &= MTK_PDMA_DELAY_TX_MASK;
   2696	val |= MTK_PDMA_DELAY_RX_EN;
   2697
   2698	cur = min_t(u32, DIV_ROUND_UP(cur_profile.usec, 20), MTK_PDMA_DELAY_PTIME_MASK);
   2699	val |= cur << MTK_PDMA_DELAY_RX_PTIME_SHIFT;
   2700
   2701	cur = min_t(u32, cur_profile.pkts, MTK_PDMA_DELAY_PINT_MASK);
   2702	val |= cur << MTK_PDMA_DELAY_RX_PINT_SHIFT;
   2703
   2704	mtk_w32(eth, val, reg_map->pdma.delay_irq);
   2705	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
   2706		mtk_w32(eth, val, reg_map->qdma.delay_irq);
   2707
   2708	spin_unlock_bh(&eth->dim_lock);
   2709
   2710	dim->state = DIM_START_MEASURE;
   2711}
   2712
   2713static void mtk_dim_tx(struct work_struct *work)
   2714{
   2715	struct dim *dim = container_of(work, struct dim, work);
   2716	struct mtk_eth *eth = container_of(dim, struct mtk_eth, tx_dim);
   2717	const struct mtk_reg_map *reg_map = eth->soc->reg_map;
   2718	struct dim_cq_moder cur_profile;
   2719	u32 val, cur;
   2720
   2721	cur_profile = net_dim_get_tx_moderation(eth->tx_dim.mode,
   2722						dim->profile_ix);
   2723	spin_lock_bh(&eth->dim_lock);
   2724
   2725	val = mtk_r32(eth, reg_map->pdma.delay_irq);
   2726	val &= MTK_PDMA_DELAY_RX_MASK;
   2727	val |= MTK_PDMA_DELAY_TX_EN;
   2728
   2729	cur = min_t(u32, DIV_ROUND_UP(cur_profile.usec, 20), MTK_PDMA_DELAY_PTIME_MASK);
   2730	val |= cur << MTK_PDMA_DELAY_TX_PTIME_SHIFT;
   2731
   2732	cur = min_t(u32, cur_profile.pkts, MTK_PDMA_DELAY_PINT_MASK);
   2733	val |= cur << MTK_PDMA_DELAY_TX_PINT_SHIFT;
   2734
   2735	mtk_w32(eth, val, reg_map->pdma.delay_irq);
   2736	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
   2737		mtk_w32(eth, val, reg_map->qdma.delay_irq);
   2738
   2739	spin_unlock_bh(&eth->dim_lock);
   2740
   2741	dim->state = DIM_START_MEASURE;
   2742}
   2743
   2744static int mtk_hw_init(struct mtk_eth *eth)
   2745{
   2746	u32 dma_mask = ETHSYS_DMA_AG_MAP_PDMA | ETHSYS_DMA_AG_MAP_QDMA |
   2747		       ETHSYS_DMA_AG_MAP_PPE;
   2748	const struct mtk_reg_map *reg_map = eth->soc->reg_map;
   2749	int i, val, ret;
   2750
   2751	if (test_and_set_bit(MTK_HW_INIT, &eth->state))
   2752		return 0;
   2753
   2754	pm_runtime_enable(eth->dev);
   2755	pm_runtime_get_sync(eth->dev);
   2756
   2757	ret = mtk_clk_enable(eth);
   2758	if (ret)
   2759		goto err_disable_pm;
   2760
   2761	if (eth->ethsys)
   2762		regmap_update_bits(eth->ethsys, ETHSYS_DMA_AG_MAP, dma_mask,
   2763				   of_dma_is_coherent(eth->dma_dev->of_node) * dma_mask);
   2764
   2765	if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
   2766		ret = device_reset(eth->dev);
   2767		if (ret) {
   2768			dev_err(eth->dev, "MAC reset failed!\n");
   2769			goto err_disable_pm;
   2770		}
   2771
   2772		/* set interrupt delays based on current Net DIM sample */
   2773		mtk_dim_rx(&eth->rx_dim.work);
   2774		mtk_dim_tx(&eth->tx_dim.work);
   2775
   2776		/* disable delay and normal interrupt */
   2777		mtk_tx_irq_disable(eth, ~0);
   2778		mtk_rx_irq_disable(eth, ~0);
   2779
   2780		return 0;
   2781	}
   2782
   2783	val = RSTCTRL_FE | RSTCTRL_PPE;
   2784	if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
   2785		regmap_write(eth->ethsys, ETHSYS_FE_RST_CHK_IDLE_EN, 0);
   2786
   2787		val |= RSTCTRL_ETH;
   2788		if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
   2789			val |= RSTCTRL_PPE1;
   2790	}
   2791
   2792	ethsys_reset(eth, val);
   2793
   2794	if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
   2795		regmap_write(eth->ethsys, ETHSYS_FE_RST_CHK_IDLE_EN,
   2796			     0x3ffffff);
   2797
   2798		/* Set FE to PDMAv2 if necessary */
   2799		val = mtk_r32(eth, MTK_FE_GLO_MISC);
   2800		mtk_w32(eth,  val | BIT(4), MTK_FE_GLO_MISC);
   2801	}
   2802
   2803	if (eth->pctl) {
   2804		/* Set GE2 driving and slew rate */
   2805		regmap_write(eth->pctl, GPIO_DRV_SEL10, 0xa00);
   2806
   2807		/* set GE2 TDSEL */
   2808		regmap_write(eth->pctl, GPIO_OD33_CTRL8, 0x5);
   2809
   2810		/* set GE2 TUNE */
   2811		regmap_write(eth->pctl, GPIO_BIAS_CTRL, 0x0);
   2812	}
   2813
   2814	/* Set linkdown as the default for each GMAC. Its own MCR would be set
   2815	 * up with the more appropriate value when mtk_mac_config call is being
   2816	 * invoked.
   2817	 */
   2818	for (i = 0; i < MTK_MAC_COUNT; i++)
   2819		mtk_w32(eth, MAC_MCR_FORCE_LINK_DOWN, MTK_MAC_MCR(i));
   2820
   2821	/* Indicates CDM to parse the MTK special tag from CPU
   2822	 * which also is working out for untag packets.
   2823	 */
   2824	val = mtk_r32(eth, MTK_CDMQ_IG_CTRL);
   2825	mtk_w32(eth, val | MTK_CDMQ_STAG_EN, MTK_CDMQ_IG_CTRL);
   2826
   2827	/* Enable RX VLan Offloading */
   2828	mtk_w32(eth, 1, MTK_CDMP_EG_CTRL);
   2829
   2830	/* set interrupt delays based on current Net DIM sample */
   2831	mtk_dim_rx(&eth->rx_dim.work);
   2832	mtk_dim_tx(&eth->tx_dim.work);
   2833
   2834	/* disable delay and normal interrupt */
   2835	mtk_tx_irq_disable(eth, ~0);
   2836	mtk_rx_irq_disable(eth, ~0);
   2837
   2838	/* FE int grouping */
   2839	mtk_w32(eth, MTK_TX_DONE_INT, reg_map->pdma.int_grp);
   2840	mtk_w32(eth, eth->soc->txrx.rx_irq_done_mask, reg_map->pdma.int_grp + 4);
   2841	mtk_w32(eth, MTK_TX_DONE_INT, reg_map->qdma.int_grp);
   2842	mtk_w32(eth, eth->soc->txrx.rx_irq_done_mask, reg_map->qdma.int_grp + 4);
   2843	mtk_w32(eth, 0x21021000, MTK_FE_INT_GRP);
   2844
   2845	if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
   2846		/* PSE should not drop port8 and port9 packets */
   2847		mtk_w32(eth, 0x00000300, PSE_DROP_CFG);
   2848
   2849		/* PSE Free Queue Flow Control  */
   2850		mtk_w32(eth, 0x01fa01f4, PSE_FQFC_CFG2);
   2851
   2852		/* PSE config input queue threshold */
   2853		mtk_w32(eth, 0x001a000e, PSE_IQ_REV(1));
   2854		mtk_w32(eth, 0x01ff001a, PSE_IQ_REV(2));
   2855		mtk_w32(eth, 0x000e01ff, PSE_IQ_REV(3));
   2856		mtk_w32(eth, 0x000e000e, PSE_IQ_REV(4));
   2857		mtk_w32(eth, 0x000e000e, PSE_IQ_REV(5));
   2858		mtk_w32(eth, 0x000e000e, PSE_IQ_REV(6));
   2859		mtk_w32(eth, 0x000e000e, PSE_IQ_REV(7));
   2860		mtk_w32(eth, 0x000e000e, PSE_IQ_REV(8));
   2861
   2862		/* PSE config output queue threshold */
   2863		mtk_w32(eth, 0x000f000a, PSE_OQ_TH(1));
   2864		mtk_w32(eth, 0x001a000f, PSE_OQ_TH(2));
   2865		mtk_w32(eth, 0x000f001a, PSE_OQ_TH(3));
   2866		mtk_w32(eth, 0x01ff000f, PSE_OQ_TH(4));
   2867		mtk_w32(eth, 0x000f000f, PSE_OQ_TH(5));
   2868		mtk_w32(eth, 0x0006000f, PSE_OQ_TH(6));
   2869		mtk_w32(eth, 0x00060006, PSE_OQ_TH(7));
   2870		mtk_w32(eth, 0x00060006, PSE_OQ_TH(8));
   2871
   2872		/* GDM and CDM Threshold */
   2873		mtk_w32(eth, 0x00000004, MTK_GDM2_THRES);
   2874		mtk_w32(eth, 0x00000004, MTK_CDMW0_THRES);
   2875		mtk_w32(eth, 0x00000004, MTK_CDMW1_THRES);
   2876		mtk_w32(eth, 0x00000004, MTK_CDME0_THRES);
   2877		mtk_w32(eth, 0x00000004, MTK_CDME1_THRES);
   2878		mtk_w32(eth, 0x00000004, MTK_CDMM_THRES);
   2879	}
   2880
   2881	return 0;
   2882
   2883err_disable_pm:
   2884	pm_runtime_put_sync(eth->dev);
   2885	pm_runtime_disable(eth->dev);
   2886
   2887	return ret;
   2888}
   2889
   2890static int mtk_hw_deinit(struct mtk_eth *eth)
   2891{
   2892	if (!test_and_clear_bit(MTK_HW_INIT, &eth->state))
   2893		return 0;
   2894
   2895	mtk_clk_disable(eth);
   2896
   2897	pm_runtime_put_sync(eth->dev);
   2898	pm_runtime_disable(eth->dev);
   2899
   2900	return 0;
   2901}
   2902
   2903static int __init mtk_init(struct net_device *dev)
   2904{
   2905	struct mtk_mac *mac = netdev_priv(dev);
   2906	struct mtk_eth *eth = mac->hw;
   2907	int ret;
   2908
   2909	ret = of_get_ethdev_address(mac->of_node, dev);
   2910	if (ret) {
   2911		/* If the mac address is invalid, use random mac address */
   2912		eth_hw_addr_random(dev);
   2913		dev_err(eth->dev, "generated random MAC address %pM\n",
   2914			dev->dev_addr);
   2915	}
   2916
   2917	return 0;
   2918}
   2919
   2920static void mtk_uninit(struct net_device *dev)
   2921{
   2922	struct mtk_mac *mac = netdev_priv(dev);
   2923	struct mtk_eth *eth = mac->hw;
   2924
   2925	phylink_disconnect_phy(mac->phylink);
   2926	mtk_tx_irq_disable(eth, ~0);
   2927	mtk_rx_irq_disable(eth, ~0);
   2928}
   2929
   2930static int mtk_change_mtu(struct net_device *dev, int new_mtu)
   2931{
   2932	int length = new_mtu + MTK_RX_ETH_HLEN;
   2933	struct mtk_mac *mac = netdev_priv(dev);
   2934	struct mtk_eth *eth = mac->hw;
   2935	u32 mcr_cur, mcr_new;
   2936
   2937	if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
   2938		mcr_cur = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
   2939		mcr_new = mcr_cur & ~MAC_MCR_MAX_RX_MASK;
   2940
   2941		if (length <= 1518)
   2942			mcr_new |= MAC_MCR_MAX_RX(MAC_MCR_MAX_RX_1518);
   2943		else if (length <= 1536)
   2944			mcr_new |= MAC_MCR_MAX_RX(MAC_MCR_MAX_RX_1536);
   2945		else if (length <= 1552)
   2946			mcr_new |= MAC_MCR_MAX_RX(MAC_MCR_MAX_RX_1552);
   2947		else
   2948			mcr_new |= MAC_MCR_MAX_RX(MAC_MCR_MAX_RX_2048);
   2949
   2950		if (mcr_new != mcr_cur)
   2951			mtk_w32(mac->hw, mcr_new, MTK_MAC_MCR(mac->id));
   2952	}
   2953
   2954	dev->mtu = new_mtu;
   2955
   2956	return 0;
   2957}
   2958
   2959static int mtk_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
   2960{
   2961	struct mtk_mac *mac = netdev_priv(dev);
   2962
   2963	switch (cmd) {
   2964	case SIOCGMIIPHY:
   2965	case SIOCGMIIREG:
   2966	case SIOCSMIIREG:
   2967		return phylink_mii_ioctl(mac->phylink, ifr, cmd);
   2968	default:
   2969		break;
   2970	}
   2971
   2972	return -EOPNOTSUPP;
   2973}
   2974
   2975static void mtk_pending_work(struct work_struct *work)
   2976{
   2977	struct mtk_eth *eth = container_of(work, struct mtk_eth, pending_work);
   2978	int err, i;
   2979	unsigned long restart = 0;
   2980
   2981	rtnl_lock();
   2982
   2983	dev_dbg(eth->dev, "[%s][%d] reset\n", __func__, __LINE__);
   2984
   2985	while (test_and_set_bit_lock(MTK_RESETTING, &eth->state))
   2986		cpu_relax();
   2987
   2988	dev_dbg(eth->dev, "[%s][%d] mtk_stop starts\n", __func__, __LINE__);
   2989	/* stop all devices to make sure that dma is properly shut down */
   2990	for (i = 0; i < MTK_MAC_COUNT; i++) {
   2991		if (!eth->netdev[i])
   2992			continue;
   2993		mtk_stop(eth->netdev[i]);
   2994		__set_bit(i, &restart);
   2995	}
   2996	dev_dbg(eth->dev, "[%s][%d] mtk_stop ends\n", __func__, __LINE__);
   2997
   2998	/* restart underlying hardware such as power, clock, pin mux
   2999	 * and the connected phy
   3000	 */
   3001	mtk_hw_deinit(eth);
   3002
   3003	if (eth->dev->pins)
   3004		pinctrl_select_state(eth->dev->pins->p,
   3005				     eth->dev->pins->default_state);
   3006	mtk_hw_init(eth);
   3007
   3008	/* restart DMA and enable IRQs */
   3009	for (i = 0; i < MTK_MAC_COUNT; i++) {
   3010		if (!test_bit(i, &restart))
   3011			continue;
   3012		err = mtk_open(eth->netdev[i]);
   3013		if (err) {
   3014			netif_alert(eth, ifup, eth->netdev[i],
   3015			      "Driver up/down cycle failed, closing device.\n");
   3016			dev_close(eth->netdev[i]);
   3017		}
   3018	}
   3019
   3020	dev_dbg(eth->dev, "[%s][%d] reset done\n", __func__, __LINE__);
   3021
   3022	clear_bit_unlock(MTK_RESETTING, &eth->state);
   3023
   3024	rtnl_unlock();
   3025}
   3026
   3027static int mtk_free_dev(struct mtk_eth *eth)
   3028{
   3029	int i;
   3030
   3031	for (i = 0; i < MTK_MAC_COUNT; i++) {
   3032		if (!eth->netdev[i])
   3033			continue;
   3034		free_netdev(eth->netdev[i]);
   3035	}
   3036
   3037	return 0;
   3038}
   3039
   3040static int mtk_unreg_dev(struct mtk_eth *eth)
   3041{
   3042	int i;
   3043
   3044	for (i = 0; i < MTK_MAC_COUNT; i++) {
   3045		if (!eth->netdev[i])
   3046			continue;
   3047		unregister_netdev(eth->netdev[i]);
   3048	}
   3049
   3050	return 0;
   3051}
   3052
   3053static int mtk_cleanup(struct mtk_eth *eth)
   3054{
   3055	mtk_unreg_dev(eth);
   3056	mtk_free_dev(eth);
   3057	cancel_work_sync(&eth->pending_work);
   3058
   3059	return 0;
   3060}
   3061
   3062static int mtk_get_link_ksettings(struct net_device *ndev,
   3063				  struct ethtool_link_ksettings *cmd)
   3064{
   3065	struct mtk_mac *mac = netdev_priv(ndev);
   3066
   3067	if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
   3068		return -EBUSY;
   3069
   3070	return phylink_ethtool_ksettings_get(mac->phylink, cmd);
   3071}
   3072
   3073static int mtk_set_link_ksettings(struct net_device *ndev,
   3074				  const struct ethtool_link_ksettings *cmd)
   3075{
   3076	struct mtk_mac *mac = netdev_priv(ndev);
   3077
   3078	if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
   3079		return -EBUSY;
   3080
   3081	return phylink_ethtool_ksettings_set(mac->phylink, cmd);
   3082}
   3083
   3084static void mtk_get_drvinfo(struct net_device *dev,
   3085			    struct ethtool_drvinfo *info)
   3086{
   3087	struct mtk_mac *mac = netdev_priv(dev);
   3088
   3089	strlcpy(info->driver, mac->hw->dev->driver->name, sizeof(info->driver));
   3090	strlcpy(info->bus_info, dev_name(mac->hw->dev), sizeof(info->bus_info));
   3091	info->n_stats = ARRAY_SIZE(mtk_ethtool_stats);
   3092}
   3093
   3094static u32 mtk_get_msglevel(struct net_device *dev)
   3095{
   3096	struct mtk_mac *mac = netdev_priv(dev);
   3097
   3098	return mac->hw->msg_enable;
   3099}
   3100
   3101static void mtk_set_msglevel(struct net_device *dev, u32 value)
   3102{
   3103	struct mtk_mac *mac = netdev_priv(dev);
   3104
   3105	mac->hw->msg_enable = value;
   3106}
   3107
   3108static int mtk_nway_reset(struct net_device *dev)
   3109{
   3110	struct mtk_mac *mac = netdev_priv(dev);
   3111
   3112	if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
   3113		return -EBUSY;
   3114
   3115	if (!mac->phylink)
   3116		return -ENOTSUPP;
   3117
   3118	return phylink_ethtool_nway_reset(mac->phylink);
   3119}
   3120
   3121static void mtk_get_strings(struct net_device *dev, u32 stringset, u8 *data)
   3122{
   3123	int i;
   3124
   3125	switch (stringset) {
   3126	case ETH_SS_STATS:
   3127		for (i = 0; i < ARRAY_SIZE(mtk_ethtool_stats); i++) {
   3128			memcpy(data, mtk_ethtool_stats[i].str, ETH_GSTRING_LEN);
   3129			data += ETH_GSTRING_LEN;
   3130		}
   3131		break;
   3132	}
   3133}
   3134
   3135static int mtk_get_sset_count(struct net_device *dev, int sset)
   3136{
   3137	switch (sset) {
   3138	case ETH_SS_STATS:
   3139		return ARRAY_SIZE(mtk_ethtool_stats);
   3140	default:
   3141		return -EOPNOTSUPP;
   3142	}
   3143}
   3144
   3145static void mtk_get_ethtool_stats(struct net_device *dev,
   3146				  struct ethtool_stats *stats, u64 *data)
   3147{
   3148	struct mtk_mac *mac = netdev_priv(dev);
   3149	struct mtk_hw_stats *hwstats = mac->hw_stats;
   3150	u64 *data_src, *data_dst;
   3151	unsigned int start;
   3152	int i;
   3153
   3154	if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
   3155		return;
   3156
   3157	if (netif_running(dev) && netif_device_present(dev)) {
   3158		if (spin_trylock_bh(&hwstats->stats_lock)) {
   3159			mtk_stats_update_mac(mac);
   3160			spin_unlock_bh(&hwstats->stats_lock);
   3161		}
   3162	}
   3163
   3164	data_src = (u64 *)hwstats;
   3165
   3166	do {
   3167		data_dst = data;
   3168		start = u64_stats_fetch_begin_irq(&hwstats->syncp);
   3169
   3170		for (i = 0; i < ARRAY_SIZE(mtk_ethtool_stats); i++)
   3171			*data_dst++ = *(data_src + mtk_ethtool_stats[i].offset);
   3172	} while (u64_stats_fetch_retry_irq(&hwstats->syncp, start));
   3173}
   3174
   3175static int mtk_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
   3176			 u32 *rule_locs)
   3177{
   3178	int ret = -EOPNOTSUPP;
   3179
   3180	switch (cmd->cmd) {
   3181	case ETHTOOL_GRXRINGS:
   3182		if (dev->hw_features & NETIF_F_LRO) {
   3183			cmd->data = MTK_MAX_RX_RING_NUM;
   3184			ret = 0;
   3185		}
   3186		break;
   3187	case ETHTOOL_GRXCLSRLCNT:
   3188		if (dev->hw_features & NETIF_F_LRO) {
   3189			struct mtk_mac *mac = netdev_priv(dev);
   3190
   3191			cmd->rule_cnt = mac->hwlro_ip_cnt;
   3192			ret = 0;
   3193		}
   3194		break;
   3195	case ETHTOOL_GRXCLSRULE:
   3196		if (dev->hw_features & NETIF_F_LRO)
   3197			ret = mtk_hwlro_get_fdir_entry(dev, cmd);
   3198		break;
   3199	case ETHTOOL_GRXCLSRLALL:
   3200		if (dev->hw_features & NETIF_F_LRO)
   3201			ret = mtk_hwlro_get_fdir_all(dev, cmd,
   3202						     rule_locs);
   3203		break;
   3204	default:
   3205		break;
   3206	}
   3207
   3208	return ret;
   3209}
   3210
   3211static int mtk_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
   3212{
   3213	int ret = -EOPNOTSUPP;
   3214
   3215	switch (cmd->cmd) {
   3216	case ETHTOOL_SRXCLSRLINS:
   3217		if (dev->hw_features & NETIF_F_LRO)
   3218			ret = mtk_hwlro_add_ipaddr(dev, cmd);
   3219		break;
   3220	case ETHTOOL_SRXCLSRLDEL:
   3221		if (dev->hw_features & NETIF_F_LRO)
   3222			ret = mtk_hwlro_del_ipaddr(dev, cmd);
   3223		break;
   3224	default:
   3225		break;
   3226	}
   3227
   3228	return ret;
   3229}
   3230
   3231static const struct ethtool_ops mtk_ethtool_ops = {
   3232	.get_link_ksettings	= mtk_get_link_ksettings,
   3233	.set_link_ksettings	= mtk_set_link_ksettings,
   3234	.get_drvinfo		= mtk_get_drvinfo,
   3235	.get_msglevel		= mtk_get_msglevel,
   3236	.set_msglevel		= mtk_set_msglevel,
   3237	.nway_reset		= mtk_nway_reset,
   3238	.get_link		= ethtool_op_get_link,
   3239	.get_strings		= mtk_get_strings,
   3240	.get_sset_count		= mtk_get_sset_count,
   3241	.get_ethtool_stats	= mtk_get_ethtool_stats,
   3242	.get_rxnfc		= mtk_get_rxnfc,
   3243	.set_rxnfc              = mtk_set_rxnfc,
   3244};
   3245
   3246static const struct net_device_ops mtk_netdev_ops = {
   3247	.ndo_init		= mtk_init,
   3248	.ndo_uninit		= mtk_uninit,
   3249	.ndo_open		= mtk_open,
   3250	.ndo_stop		= mtk_stop,
   3251	.ndo_start_xmit		= mtk_start_xmit,
   3252	.ndo_set_mac_address	= mtk_set_mac_address,
   3253	.ndo_validate_addr	= eth_validate_addr,
   3254	.ndo_eth_ioctl		= mtk_do_ioctl,
   3255	.ndo_change_mtu		= mtk_change_mtu,
   3256	.ndo_tx_timeout		= mtk_tx_timeout,
   3257	.ndo_get_stats64        = mtk_get_stats64,
   3258	.ndo_fix_features	= mtk_fix_features,
   3259	.ndo_set_features	= mtk_set_features,
   3260#ifdef CONFIG_NET_POLL_CONTROLLER
   3261	.ndo_poll_controller	= mtk_poll_controller,
   3262#endif
   3263	.ndo_setup_tc		= mtk_eth_setup_tc,
   3264};
   3265
   3266static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
   3267{
   3268	const __be32 *_id = of_get_property(np, "reg", NULL);
   3269	phy_interface_t phy_mode;
   3270	struct phylink *phylink;
   3271	struct mtk_mac *mac;
   3272	int id, err;
   3273
   3274	if (!_id) {
   3275		dev_err(eth->dev, "missing mac id\n");
   3276		return -EINVAL;
   3277	}
   3278
   3279	id = be32_to_cpup(_id);
   3280	if (id >= MTK_MAC_COUNT) {
   3281		dev_err(eth->dev, "%d is not a valid mac id\n", id);
   3282		return -EINVAL;
   3283	}
   3284
   3285	if (eth->netdev[id]) {
   3286		dev_err(eth->dev, "duplicate mac id found: %d\n", id);
   3287		return -EINVAL;
   3288	}
   3289
   3290	eth->netdev[id] = alloc_etherdev(sizeof(*mac));
   3291	if (!eth->netdev[id]) {
   3292		dev_err(eth->dev, "alloc_etherdev failed\n");
   3293		return -ENOMEM;
   3294	}
   3295	mac = netdev_priv(eth->netdev[id]);
   3296	eth->mac[id] = mac;
   3297	mac->id = id;
   3298	mac->hw = eth;
   3299	mac->of_node = np;
   3300
   3301	memset(mac->hwlro_ip, 0, sizeof(mac->hwlro_ip));
   3302	mac->hwlro_ip_cnt = 0;
   3303
   3304	mac->hw_stats = devm_kzalloc(eth->dev,
   3305				     sizeof(*mac->hw_stats),
   3306				     GFP_KERNEL);
   3307	if (!mac->hw_stats) {
   3308		dev_err(eth->dev, "failed to allocate counter memory\n");
   3309		err = -ENOMEM;
   3310		goto free_netdev;
   3311	}
   3312	spin_lock_init(&mac->hw_stats->stats_lock);
   3313	u64_stats_init(&mac->hw_stats->syncp);
   3314	mac->hw_stats->reg_offset = id * MTK_STAT_OFFSET;
   3315
   3316	/* phylink create */
   3317	err = of_get_phy_mode(np, &phy_mode);
   3318	if (err) {
   3319		dev_err(eth->dev, "incorrect phy-mode\n");
   3320		goto free_netdev;
   3321	}
   3322
   3323	/* mac config is not set */
   3324	mac->interface = PHY_INTERFACE_MODE_NA;
   3325	mac->speed = SPEED_UNKNOWN;
   3326
   3327	mac->phylink_config.dev = &eth->netdev[id]->dev;
   3328	mac->phylink_config.type = PHYLINK_NETDEV;
   3329	/* This driver makes use of state->speed in mac_config */
   3330	mac->phylink_config.legacy_pre_march2020 = true;
   3331	mac->phylink_config.mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
   3332		MAC_10 | MAC_100 | MAC_1000 | MAC_2500FD;
   3333
   3334	__set_bit(PHY_INTERFACE_MODE_MII,
   3335		  mac->phylink_config.supported_interfaces);
   3336	__set_bit(PHY_INTERFACE_MODE_GMII,
   3337		  mac->phylink_config.supported_interfaces);
   3338
   3339	if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_RGMII))
   3340		phy_interface_set_rgmii(mac->phylink_config.supported_interfaces);
   3341
   3342	if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_TRGMII) && !mac->id)
   3343		__set_bit(PHY_INTERFACE_MODE_TRGMII,
   3344			  mac->phylink_config.supported_interfaces);
   3345
   3346	if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_SGMII)) {
   3347		__set_bit(PHY_INTERFACE_MODE_SGMII,
   3348			  mac->phylink_config.supported_interfaces);
   3349		__set_bit(PHY_INTERFACE_MODE_1000BASEX,
   3350			  mac->phylink_config.supported_interfaces);
   3351		__set_bit(PHY_INTERFACE_MODE_2500BASEX,
   3352			  mac->phylink_config.supported_interfaces);
   3353	}
   3354
   3355	phylink = phylink_create(&mac->phylink_config,
   3356				 of_fwnode_handle(mac->of_node),
   3357				 phy_mode, &mtk_phylink_ops);
   3358	if (IS_ERR(phylink)) {
   3359		err = PTR_ERR(phylink);
   3360		goto free_netdev;
   3361	}
   3362
   3363	mac->phylink = phylink;
   3364
   3365	SET_NETDEV_DEV(eth->netdev[id], eth->dev);
   3366	eth->netdev[id]->watchdog_timeo = 5 * HZ;
   3367	eth->netdev[id]->netdev_ops = &mtk_netdev_ops;
   3368	eth->netdev[id]->base_addr = (unsigned long)eth->base;
   3369
   3370	eth->netdev[id]->hw_features = eth->soc->hw_features;
   3371	if (eth->hwlro)
   3372		eth->netdev[id]->hw_features |= NETIF_F_LRO;
   3373
   3374	eth->netdev[id]->vlan_features = eth->soc->hw_features &
   3375		~(NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX);
   3376	eth->netdev[id]->features |= eth->soc->hw_features;
   3377	eth->netdev[id]->ethtool_ops = &mtk_ethtool_ops;
   3378
   3379	eth->netdev[id]->irq = eth->irq[0];
   3380	eth->netdev[id]->dev.of_node = np;
   3381
   3382	if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
   3383		eth->netdev[id]->max_mtu = MTK_MAX_RX_LENGTH - MTK_RX_ETH_HLEN;
   3384	else
   3385		eth->netdev[id]->max_mtu = MTK_MAX_RX_LENGTH_2K - MTK_RX_ETH_HLEN;
   3386
   3387	return 0;
   3388
   3389free_netdev:
   3390	free_netdev(eth->netdev[id]);
   3391	return err;
   3392}
   3393
   3394void mtk_eth_set_dma_device(struct mtk_eth *eth, struct device *dma_dev)
   3395{
   3396	struct net_device *dev, *tmp;
   3397	LIST_HEAD(dev_list);
   3398	int i;
   3399
   3400	rtnl_lock();
   3401
   3402	for (i = 0; i < MTK_MAC_COUNT; i++) {
   3403		dev = eth->netdev[i];
   3404
   3405		if (!dev || !(dev->flags & IFF_UP))
   3406			continue;
   3407
   3408		list_add_tail(&dev->close_list, &dev_list);
   3409	}
   3410
   3411	dev_close_many(&dev_list, false);
   3412
   3413	eth->dma_dev = dma_dev;
   3414
   3415	list_for_each_entry_safe(dev, tmp, &dev_list, close_list) {
   3416		list_del_init(&dev->close_list);
   3417		dev_open(dev, NULL);
   3418	}
   3419
   3420	rtnl_unlock();
   3421}
   3422
   3423static int mtk_probe(struct platform_device *pdev)
   3424{
   3425	struct device_node *mac_np;
   3426	struct mtk_eth *eth;
   3427	int err, i;
   3428
   3429	eth = devm_kzalloc(&pdev->dev, sizeof(*eth), GFP_KERNEL);
   3430	if (!eth)
   3431		return -ENOMEM;
   3432
   3433	eth->soc = of_device_get_match_data(&pdev->dev);
   3434
   3435	eth->dev = &pdev->dev;
   3436	eth->dma_dev = &pdev->dev;
   3437	eth->base = devm_platform_ioremap_resource(pdev, 0);
   3438	if (IS_ERR(eth->base))
   3439		return PTR_ERR(eth->base);
   3440
   3441	if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
   3442		eth->ip_align = NET_IP_ALIGN;
   3443
   3444	spin_lock_init(&eth->page_lock);
   3445	spin_lock_init(&eth->tx_irq_lock);
   3446	spin_lock_init(&eth->rx_irq_lock);
   3447	spin_lock_init(&eth->dim_lock);
   3448
   3449	eth->rx_dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
   3450	INIT_WORK(&eth->rx_dim.work, mtk_dim_rx);
   3451
   3452	eth->tx_dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
   3453	INIT_WORK(&eth->tx_dim.work, mtk_dim_tx);
   3454
   3455	if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
   3456		eth->ethsys = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
   3457							      "mediatek,ethsys");
   3458		if (IS_ERR(eth->ethsys)) {
   3459			dev_err(&pdev->dev, "no ethsys regmap found\n");
   3460			return PTR_ERR(eth->ethsys);
   3461		}
   3462	}
   3463
   3464	if (MTK_HAS_CAPS(eth->soc->caps, MTK_INFRA)) {
   3465		eth->infra = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
   3466							     "mediatek,infracfg");
   3467		if (IS_ERR(eth->infra)) {
   3468			dev_err(&pdev->dev, "no infracfg regmap found\n");
   3469			return PTR_ERR(eth->infra);
   3470		}
   3471	}
   3472
   3473	if (of_dma_is_coherent(pdev->dev.of_node)) {
   3474		struct regmap *cci;
   3475
   3476		cci = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
   3477						      "cci-control-port");
   3478		/* enable CPU/bus coherency */
   3479		if (!IS_ERR(cci))
   3480			regmap_write(cci, 0, 3);
   3481	}
   3482
   3483	if (MTK_HAS_CAPS(eth->soc->caps, MTK_SGMII)) {
   3484		eth->sgmii = devm_kzalloc(eth->dev, sizeof(*eth->sgmii),
   3485					  GFP_KERNEL);
   3486		if (!eth->sgmii)
   3487			return -ENOMEM;
   3488
   3489		err = mtk_sgmii_init(eth->sgmii, pdev->dev.of_node,
   3490				     eth->soc->ana_rgc3);
   3491
   3492		if (err)
   3493			return err;
   3494	}
   3495
   3496	if (eth->soc->required_pctl) {
   3497		eth->pctl = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
   3498							    "mediatek,pctl");
   3499		if (IS_ERR(eth->pctl)) {
   3500			dev_err(&pdev->dev, "no pctl regmap found\n");
   3501			return PTR_ERR(eth->pctl);
   3502		}
   3503	}
   3504
   3505	for (i = 0;; i++) {
   3506		struct device_node *np = of_parse_phandle(pdev->dev.of_node,
   3507							  "mediatek,wed", i);
   3508		static const u32 wdma_regs[] = {
   3509			MTK_WDMA0_BASE,
   3510			MTK_WDMA1_BASE
   3511		};
   3512		void __iomem *wdma;
   3513
   3514		if (!np || i >= ARRAY_SIZE(wdma_regs))
   3515			break;
   3516
   3517		wdma = eth->base + wdma_regs[i];
   3518		mtk_wed_add_hw(np, eth, wdma, i);
   3519	}
   3520
   3521	for (i = 0; i < 3; i++) {
   3522		if (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_INT) && i > 0)
   3523			eth->irq[i] = eth->irq[0];
   3524		else
   3525			eth->irq[i] = platform_get_irq(pdev, i);
   3526		if (eth->irq[i] < 0) {
   3527			dev_err(&pdev->dev, "no IRQ%d resource found\n", i);
   3528			return -ENXIO;
   3529		}
   3530	}
   3531	for (i = 0; i < ARRAY_SIZE(eth->clks); i++) {
   3532		eth->clks[i] = devm_clk_get(eth->dev,
   3533					    mtk_clks_source_name[i]);
   3534		if (IS_ERR(eth->clks[i])) {
   3535			if (PTR_ERR(eth->clks[i]) == -EPROBE_DEFER)
   3536				return -EPROBE_DEFER;
   3537			if (eth->soc->required_clks & BIT(i)) {
   3538				dev_err(&pdev->dev, "clock %s not found\n",
   3539					mtk_clks_source_name[i]);
   3540				return -EINVAL;
   3541			}
   3542			eth->clks[i] = NULL;
   3543		}
   3544	}
   3545
   3546	eth->msg_enable = netif_msg_init(mtk_msg_level, MTK_DEFAULT_MSG_ENABLE);
   3547	INIT_WORK(&eth->pending_work, mtk_pending_work);
   3548
   3549	err = mtk_hw_init(eth);
   3550	if (err)
   3551		return err;
   3552
   3553	eth->hwlro = MTK_HAS_CAPS(eth->soc->caps, MTK_HWLRO);
   3554
   3555	for_each_child_of_node(pdev->dev.of_node, mac_np) {
   3556		if (!of_device_is_compatible(mac_np,
   3557					     "mediatek,eth-mac"))
   3558			continue;
   3559
   3560		if (!of_device_is_available(mac_np))
   3561			continue;
   3562
   3563		err = mtk_add_mac(eth, mac_np);
   3564		if (err) {
   3565			of_node_put(mac_np);
   3566			goto err_deinit_hw;
   3567		}
   3568	}
   3569
   3570	if (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_INT)) {
   3571		err = devm_request_irq(eth->dev, eth->irq[0],
   3572				       mtk_handle_irq, 0,
   3573				       dev_name(eth->dev), eth);
   3574	} else {
   3575		err = devm_request_irq(eth->dev, eth->irq[1],
   3576				       mtk_handle_irq_tx, 0,
   3577				       dev_name(eth->dev), eth);
   3578		if (err)
   3579			goto err_free_dev;
   3580
   3581		err = devm_request_irq(eth->dev, eth->irq[2],
   3582				       mtk_handle_irq_rx, 0,
   3583				       dev_name(eth->dev), eth);
   3584	}
   3585	if (err)
   3586		goto err_free_dev;
   3587
   3588	/* No MT7628/88 support yet */
   3589	if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
   3590		err = mtk_mdio_init(eth);
   3591		if (err)
   3592			goto err_free_dev;
   3593	}
   3594
   3595	if (eth->soc->offload_version) {
   3596		eth->ppe = mtk_ppe_init(eth, eth->base + MTK_ETH_PPE_BASE, 2);
   3597		if (!eth->ppe) {
   3598			err = -ENOMEM;
   3599			goto err_free_dev;
   3600		}
   3601
   3602		err = mtk_eth_offload_init(eth);
   3603		if (err)
   3604			goto err_free_dev;
   3605	}
   3606
   3607	for (i = 0; i < MTK_MAX_DEVS; i++) {
   3608		if (!eth->netdev[i])
   3609			continue;
   3610
   3611		err = register_netdev(eth->netdev[i]);
   3612		if (err) {
   3613			dev_err(eth->dev, "error bringing up device\n");
   3614			goto err_deinit_mdio;
   3615		} else
   3616			netif_info(eth, probe, eth->netdev[i],
   3617				   "mediatek frame engine at 0x%08lx, irq %d\n",
   3618				   eth->netdev[i]->base_addr, eth->irq[0]);
   3619	}
   3620
   3621	/* we run 2 devices on the same DMA ring so we need a dummy device
   3622	 * for NAPI to work
   3623	 */
   3624	init_dummy_netdev(&eth->dummy_dev);
   3625	netif_napi_add(&eth->dummy_dev, &eth->tx_napi, mtk_napi_tx,
   3626		       NAPI_POLL_WEIGHT);
   3627	netif_napi_add(&eth->dummy_dev, &eth->rx_napi, mtk_napi_rx,
   3628		       NAPI_POLL_WEIGHT);
   3629
   3630	platform_set_drvdata(pdev, eth);
   3631
   3632	return 0;
   3633
   3634err_deinit_mdio:
   3635	mtk_mdio_cleanup(eth);
   3636err_free_dev:
   3637	mtk_free_dev(eth);
   3638err_deinit_hw:
   3639	mtk_hw_deinit(eth);
   3640
   3641	return err;
   3642}
   3643
   3644static int mtk_remove(struct platform_device *pdev)
   3645{
   3646	struct mtk_eth *eth = platform_get_drvdata(pdev);
   3647	struct mtk_mac *mac;
   3648	int i;
   3649
   3650	/* stop all devices to make sure that dma is properly shut down */
   3651	for (i = 0; i < MTK_MAC_COUNT; i++) {
   3652		if (!eth->netdev[i])
   3653			continue;
   3654		mtk_stop(eth->netdev[i]);
   3655		mac = netdev_priv(eth->netdev[i]);
   3656		phylink_disconnect_phy(mac->phylink);
   3657	}
   3658
   3659	mtk_hw_deinit(eth);
   3660
   3661	netif_napi_del(&eth->tx_napi);
   3662	netif_napi_del(&eth->rx_napi);
   3663	mtk_cleanup(eth);
   3664	mtk_mdio_cleanup(eth);
   3665
   3666	return 0;
   3667}
   3668
   3669static const struct mtk_soc_data mt2701_data = {
   3670	.reg_map = &mtk_reg_map,
   3671	.caps = MT7623_CAPS | MTK_HWLRO,
   3672	.hw_features = MTK_HW_FEATURES,
   3673	.required_clks = MT7623_CLKS_BITMAP,
   3674	.required_pctl = true,
   3675	.txrx = {
   3676		.txd_size = sizeof(struct mtk_tx_dma),
   3677		.rxd_size = sizeof(struct mtk_rx_dma),
   3678		.rx_irq_done_mask = MTK_RX_DONE_INT,
   3679		.rx_dma_l4_valid = RX_DMA_L4_VALID,
   3680		.dma_max_len = MTK_TX_DMA_BUF_LEN,
   3681		.dma_len_offset = 16,
   3682	},
   3683};
   3684
   3685static const struct mtk_soc_data mt7621_data = {
   3686	.reg_map = &mtk_reg_map,
   3687	.caps = MT7621_CAPS,
   3688	.hw_features = MTK_HW_FEATURES,
   3689	.required_clks = MT7621_CLKS_BITMAP,
   3690	.required_pctl = false,
   3691	.offload_version = 2,
   3692	.txrx = {
   3693		.txd_size = sizeof(struct mtk_tx_dma),
   3694		.rxd_size = sizeof(struct mtk_rx_dma),
   3695		.rx_irq_done_mask = MTK_RX_DONE_INT,
   3696		.rx_dma_l4_valid = RX_DMA_L4_VALID,
   3697		.dma_max_len = MTK_TX_DMA_BUF_LEN,
   3698		.dma_len_offset = 16,
   3699	},
   3700};
   3701
   3702static const struct mtk_soc_data mt7622_data = {
   3703	.reg_map = &mtk_reg_map,
   3704	.ana_rgc3 = 0x2028,
   3705	.caps = MT7622_CAPS | MTK_HWLRO,
   3706	.hw_features = MTK_HW_FEATURES,
   3707	.required_clks = MT7622_CLKS_BITMAP,
   3708	.required_pctl = false,
   3709	.offload_version = 2,
   3710	.txrx = {
   3711		.txd_size = sizeof(struct mtk_tx_dma),
   3712		.rxd_size = sizeof(struct mtk_rx_dma),
   3713		.rx_irq_done_mask = MTK_RX_DONE_INT,
   3714		.rx_dma_l4_valid = RX_DMA_L4_VALID,
   3715		.dma_max_len = MTK_TX_DMA_BUF_LEN,
   3716		.dma_len_offset = 16,
   3717	},
   3718};
   3719
   3720static const struct mtk_soc_data mt7623_data = {
   3721	.reg_map = &mtk_reg_map,
   3722	.caps = MT7623_CAPS | MTK_HWLRO,
   3723	.hw_features = MTK_HW_FEATURES,
   3724	.required_clks = MT7623_CLKS_BITMAP,
   3725	.required_pctl = true,
   3726	.offload_version = 2,
   3727	.txrx = {
   3728		.txd_size = sizeof(struct mtk_tx_dma),
   3729		.rxd_size = sizeof(struct mtk_rx_dma),
   3730		.rx_irq_done_mask = MTK_RX_DONE_INT,
   3731		.rx_dma_l4_valid = RX_DMA_L4_VALID,
   3732		.dma_max_len = MTK_TX_DMA_BUF_LEN,
   3733		.dma_len_offset = 16,
   3734	},
   3735};
   3736
   3737static const struct mtk_soc_data mt7629_data = {
   3738	.reg_map = &mtk_reg_map,
   3739	.ana_rgc3 = 0x128,
   3740	.caps = MT7629_CAPS | MTK_HWLRO,
   3741	.hw_features = MTK_HW_FEATURES,
   3742	.required_clks = MT7629_CLKS_BITMAP,
   3743	.required_pctl = false,
   3744	.txrx = {
   3745		.txd_size = sizeof(struct mtk_tx_dma),
   3746		.rxd_size = sizeof(struct mtk_rx_dma),
   3747		.rx_irq_done_mask = MTK_RX_DONE_INT,
   3748		.rx_dma_l4_valid = RX_DMA_L4_VALID,
   3749		.dma_max_len = MTK_TX_DMA_BUF_LEN,
   3750		.dma_len_offset = 16,
   3751	},
   3752};
   3753
   3754static const struct mtk_soc_data mt7986_data = {
   3755	.reg_map = &mt7986_reg_map,
   3756	.ana_rgc3 = 0x128,
   3757	.caps = MT7986_CAPS,
   3758	.required_clks = MT7986_CLKS_BITMAP,
   3759	.required_pctl = false,
   3760	.txrx = {
   3761		.txd_size = sizeof(struct mtk_tx_dma_v2),
   3762		.rxd_size = sizeof(struct mtk_rx_dma_v2),
   3763		.rx_irq_done_mask = MTK_RX_DONE_INT_V2,
   3764		.dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
   3765		.dma_len_offset = 8,
   3766	},
   3767};
   3768
   3769static const struct mtk_soc_data rt5350_data = {
   3770	.reg_map = &mt7628_reg_map,
   3771	.caps = MT7628_CAPS,
   3772	.hw_features = MTK_HW_FEATURES_MT7628,
   3773	.required_clks = MT7628_CLKS_BITMAP,
   3774	.required_pctl = false,
   3775	.txrx = {
   3776		.txd_size = sizeof(struct mtk_tx_dma),
   3777		.rxd_size = sizeof(struct mtk_rx_dma),
   3778		.rx_irq_done_mask = MTK_RX_DONE_INT,
   3779		.rx_dma_l4_valid = RX_DMA_L4_VALID_PDMA,
   3780		.dma_max_len = MTK_TX_DMA_BUF_LEN,
   3781		.dma_len_offset = 16,
   3782	},
   3783};
   3784
   3785const struct of_device_id of_mtk_match[] = {
   3786	{ .compatible = "mediatek,mt2701-eth", .data = &mt2701_data},
   3787	{ .compatible = "mediatek,mt7621-eth", .data = &mt7621_data},
   3788	{ .compatible = "mediatek,mt7622-eth", .data = &mt7622_data},
   3789	{ .compatible = "mediatek,mt7623-eth", .data = &mt7623_data},
   3790	{ .compatible = "mediatek,mt7629-eth", .data = &mt7629_data},
   3791	{ .compatible = "mediatek,mt7986-eth", .data = &mt7986_data},
   3792	{ .compatible = "ralink,rt5350-eth", .data = &rt5350_data},
   3793	{},
   3794};
   3795MODULE_DEVICE_TABLE(of, of_mtk_match);
   3796
   3797static struct platform_driver mtk_driver = {
   3798	.probe = mtk_probe,
   3799	.remove = mtk_remove,
   3800	.driver = {
   3801		.name = "mtk_soc_eth",
   3802		.of_match_table = of_mtk_match,
   3803	},
   3804};
   3805
   3806module_platform_driver(mtk_driver);
   3807
   3808MODULE_LICENSE("GPL");
   3809MODULE_AUTHOR("John Crispin <blogic@openwrt.org>");
   3810MODULE_DESCRIPTION("Ethernet driver for MediaTek SoC");