cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

am65-cpsw-nuss.c (77480B)


      1// SPDX-License-Identifier: GPL-2.0
      2/* Texas Instruments K3 AM65 Ethernet Switch SubSystem Driver
      3 *
      4 * Copyright (C) 2020 Texas Instruments Incorporated - http://www.ti.com/
      5 *
      6 */
      7
      8#include <linux/clk.h>
      9#include <linux/etherdevice.h>
     10#include <linux/if_vlan.h>
     11#include <linux/interrupt.h>
     12#include <linux/irqdomain.h>
     13#include <linux/kernel.h>
     14#include <linux/kmemleak.h>
     15#include <linux/module.h>
     16#include <linux/netdevice.h>
     17#include <linux/net_tstamp.h>
     18#include <linux/of.h>
     19#include <linux/of_mdio.h>
     20#include <linux/of_net.h>
     21#include <linux/of_device.h>
     22#include <linux/phylink.h>
     23#include <linux/phy/phy.h>
     24#include <linux/platform_device.h>
     25#include <linux/pm_runtime.h>
     26#include <linux/regmap.h>
     27#include <linux/mfd/syscon.h>
     28#include <linux/sys_soc.h>
     29#include <linux/dma/ti-cppi5.h>
     30#include <linux/dma/k3-udma-glue.h>
     31#include <net/switchdev.h>
     32
     33#include "cpsw_ale.h"
     34#include "cpsw_sl.h"
     35#include "am65-cpsw-nuss.h"
     36#include "am65-cpsw-switchdev.h"
     37#include "k3-cppi-desc-pool.h"
     38#include "am65-cpts.h"
     39
     40#define AM65_CPSW_SS_BASE	0x0
     41#define AM65_CPSW_SGMII_BASE	0x100
     42#define AM65_CPSW_XGMII_BASE	0x2100
     43#define AM65_CPSW_CPSW_NU_BASE	0x20000
     44#define AM65_CPSW_NU_PORTS_BASE	0x1000
     45#define AM65_CPSW_NU_FRAM_BASE	0x12000
     46#define AM65_CPSW_NU_STATS_BASE	0x1a000
     47#define AM65_CPSW_NU_ALE_BASE	0x1e000
     48#define AM65_CPSW_NU_CPTS_BASE	0x1d000
     49
     50#define AM65_CPSW_NU_PORTS_OFFSET	0x1000
     51#define AM65_CPSW_NU_STATS_PORT_OFFSET	0x200
     52#define AM65_CPSW_NU_FRAM_PORT_OFFSET	0x200
     53
     54#define AM65_CPSW_MAX_PORTS	8
     55
     56#define AM65_CPSW_MIN_PACKET_SIZE	VLAN_ETH_ZLEN
     57#define AM65_CPSW_MAX_PACKET_SIZE	(VLAN_ETH_FRAME_LEN + ETH_FCS_LEN)
     58
     59#define AM65_CPSW_REG_CTL		0x004
     60#define AM65_CPSW_REG_STAT_PORT_EN	0x014
     61#define AM65_CPSW_REG_PTYPE		0x018
     62
     63#define AM65_CPSW_P0_REG_CTL			0x004
     64#define AM65_CPSW_PORT0_REG_FLOW_ID_OFFSET	0x008
     65
     66#define AM65_CPSW_PORT_REG_PRI_CTL		0x01c
     67#define AM65_CPSW_PORT_REG_RX_PRI_MAP		0x020
     68#define AM65_CPSW_PORT_REG_RX_MAXLEN		0x024
     69
     70#define AM65_CPSW_PORTN_REG_SA_L		0x308
     71#define AM65_CPSW_PORTN_REG_SA_H		0x30c
     72#define AM65_CPSW_PORTN_REG_TS_CTL              0x310
     73#define AM65_CPSW_PORTN_REG_TS_SEQ_LTYPE_REG	0x314
     74#define AM65_CPSW_PORTN_REG_TS_VLAN_LTYPE_REG	0x318
     75#define AM65_CPSW_PORTN_REG_TS_CTL_LTYPE2       0x31C
     76
     77#define AM65_CPSW_CTL_VLAN_AWARE		BIT(1)
     78#define AM65_CPSW_CTL_P0_ENABLE			BIT(2)
     79#define AM65_CPSW_CTL_P0_TX_CRC_REMOVE		BIT(13)
     80#define AM65_CPSW_CTL_P0_RX_PAD			BIT(14)
     81
     82/* AM65_CPSW_P0_REG_CTL */
     83#define AM65_CPSW_P0_REG_CTL_RX_CHECKSUM_EN	BIT(0)
     84
     85/* AM65_CPSW_PORT_REG_PRI_CTL */
     86#define AM65_CPSW_PORT_REG_PRI_CTL_RX_PTYPE_RROBIN	BIT(8)
     87
     88/* AM65_CPSW_PN_TS_CTL register fields */
     89#define AM65_CPSW_PN_TS_CTL_TX_ANX_F_EN		BIT(4)
     90#define AM65_CPSW_PN_TS_CTL_TX_VLAN_LT1_EN	BIT(5)
     91#define AM65_CPSW_PN_TS_CTL_TX_VLAN_LT2_EN	BIT(6)
     92#define AM65_CPSW_PN_TS_CTL_TX_ANX_D_EN		BIT(7)
     93#define AM65_CPSW_PN_TS_CTL_TX_ANX_E_EN		BIT(10)
     94#define AM65_CPSW_PN_TS_CTL_TX_HOST_TS_EN	BIT(11)
     95#define AM65_CPSW_PN_TS_CTL_MSG_TYPE_EN_SHIFT	16
     96
     97/* AM65_CPSW_PORTN_REG_TS_SEQ_LTYPE_REG register fields */
     98#define AM65_CPSW_PN_TS_SEQ_ID_OFFSET_SHIFT	16
     99
    100/* AM65_CPSW_PORTN_REG_TS_CTL_LTYPE2 */
    101#define AM65_CPSW_PN_TS_CTL_LTYPE2_TS_107	BIT(16)
    102#define AM65_CPSW_PN_TS_CTL_LTYPE2_TS_129	BIT(17)
    103#define AM65_CPSW_PN_TS_CTL_LTYPE2_TS_130	BIT(18)
    104#define AM65_CPSW_PN_TS_CTL_LTYPE2_TS_131	BIT(19)
    105#define AM65_CPSW_PN_TS_CTL_LTYPE2_TS_132	BIT(20)
    106#define AM65_CPSW_PN_TS_CTL_LTYPE2_TS_319	BIT(21)
    107#define AM65_CPSW_PN_TS_CTL_LTYPE2_TS_320	BIT(22)
    108#define AM65_CPSW_PN_TS_CTL_LTYPE2_TS_TTL_NONZERO BIT(23)
    109
    110/* The PTP event messages - Sync, Delay_Req, Pdelay_Req, and Pdelay_Resp. */
    111#define AM65_CPSW_TS_EVENT_MSG_TYPE_BITS (BIT(0) | BIT(1) | BIT(2) | BIT(3))
    112
    113#define AM65_CPSW_TS_SEQ_ID_OFFSET (0x1e)
    114
    115#define AM65_CPSW_TS_TX_ANX_ALL_EN		\
    116	(AM65_CPSW_PN_TS_CTL_TX_ANX_D_EN |	\
    117	 AM65_CPSW_PN_TS_CTL_TX_ANX_E_EN |	\
    118	 AM65_CPSW_PN_TS_CTL_TX_ANX_F_EN)
    119
    120#define AM65_CPSW_ALE_AGEOUT_DEFAULT	30
    121/* Number of TX/RX descriptors */
    122#define AM65_CPSW_MAX_TX_DESC	500
    123#define AM65_CPSW_MAX_RX_DESC	500
    124
    125#define AM65_CPSW_NAV_PS_DATA_SIZE 16
    126#define AM65_CPSW_NAV_SW_DATA_SIZE 16
    127
    128#define AM65_CPSW_DEBUG	(NETIF_MSG_HW | NETIF_MSG_DRV | NETIF_MSG_LINK | \
    129			 NETIF_MSG_IFUP	| NETIF_MSG_PROBE | NETIF_MSG_IFDOWN | \
    130			 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
    131
    132static void am65_cpsw_port_set_sl_mac(struct am65_cpsw_port *slave,
    133				      const u8 *dev_addr)
    134{
    135	u32 mac_hi = (dev_addr[0] << 0) | (dev_addr[1] << 8) |
    136		     (dev_addr[2] << 16) | (dev_addr[3] << 24);
    137	u32 mac_lo = (dev_addr[4] << 0) | (dev_addr[5] << 8);
    138
    139	writel(mac_hi, slave->port_base + AM65_CPSW_PORTN_REG_SA_H);
    140	writel(mac_lo, slave->port_base + AM65_CPSW_PORTN_REG_SA_L);
    141}
    142
    143static void am65_cpsw_sl_ctl_reset(struct am65_cpsw_port *port)
    144{
    145	cpsw_sl_reset(port->slave.mac_sl, 100);
    146	/* Max length register has to be restored after MAC SL reset */
    147	writel(AM65_CPSW_MAX_PACKET_SIZE,
    148	       port->port_base + AM65_CPSW_PORT_REG_RX_MAXLEN);
    149}
    150
    151static void am65_cpsw_nuss_get_ver(struct am65_cpsw_common *common)
    152{
    153	common->nuss_ver = readl(common->ss_base);
    154	common->cpsw_ver = readl(common->cpsw_base);
    155	dev_info(common->dev,
    156		 "initializing am65 cpsw nuss version 0x%08X, cpsw version 0x%08X Ports: %u quirks:%08x\n",
    157		common->nuss_ver,
    158		common->cpsw_ver,
    159		common->port_num + 1,
    160		common->pdata.quirks);
    161}
    162
    163static int am65_cpsw_nuss_ndo_slave_add_vid(struct net_device *ndev,
    164					    __be16 proto, u16 vid)
    165{
    166	struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
    167	struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
    168	u32 port_mask, unreg_mcast = 0;
    169	int ret;
    170
    171	if (!common->is_emac_mode)
    172		return 0;
    173
    174	if (!netif_running(ndev) || !vid)
    175		return 0;
    176
    177	ret = pm_runtime_resume_and_get(common->dev);
    178	if (ret < 0)
    179		return ret;
    180
    181	port_mask = BIT(port->port_id) | ALE_PORT_HOST;
    182	if (!vid)
    183		unreg_mcast = port_mask;
    184	dev_info(common->dev, "Adding vlan %d to vlan filter\n", vid);
    185	ret = cpsw_ale_vlan_add_modify(common->ale, vid, port_mask,
    186				       unreg_mcast, port_mask, 0);
    187
    188	pm_runtime_put(common->dev);
    189	return ret;
    190}
    191
    192static int am65_cpsw_nuss_ndo_slave_kill_vid(struct net_device *ndev,
    193					     __be16 proto, u16 vid)
    194{
    195	struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
    196	struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
    197	int ret;
    198
    199	if (!common->is_emac_mode)
    200		return 0;
    201
    202	if (!netif_running(ndev) || !vid)
    203		return 0;
    204
    205	ret = pm_runtime_resume_and_get(common->dev);
    206	if (ret < 0)
    207		return ret;
    208
    209	dev_info(common->dev, "Removing vlan %d from vlan filter\n", vid);
    210	ret = cpsw_ale_del_vlan(common->ale, vid,
    211				BIT(port->port_id) | ALE_PORT_HOST);
    212
    213	pm_runtime_put(common->dev);
    214	return ret;
    215}
    216
    217static void am65_cpsw_slave_set_promisc(struct am65_cpsw_port *port,
    218					bool promisc)
    219{
    220	struct am65_cpsw_common *common = port->common;
    221
    222	if (promisc && !common->is_emac_mode) {
    223		dev_dbg(common->dev, "promisc mode requested in switch mode");
    224		return;
    225	}
    226
    227	if (promisc) {
    228		/* Enable promiscuous mode */
    229		cpsw_ale_control_set(common->ale, port->port_id,
    230				     ALE_PORT_MACONLY_CAF, 1);
    231		dev_dbg(common->dev, "promisc enabled\n");
    232	} else {
    233		/* Disable promiscuous mode */
    234		cpsw_ale_control_set(common->ale, port->port_id,
    235				     ALE_PORT_MACONLY_CAF, 0);
    236		dev_dbg(common->dev, "promisc disabled\n");
    237	}
    238}
    239
    240static void am65_cpsw_nuss_ndo_slave_set_rx_mode(struct net_device *ndev)
    241{
    242	struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
    243	struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
    244	u32 port_mask;
    245	bool promisc;
    246
    247	promisc = !!(ndev->flags & IFF_PROMISC);
    248	am65_cpsw_slave_set_promisc(port, promisc);
    249
    250	if (promisc)
    251		return;
    252
    253	/* Restore allmulti on vlans if necessary */
    254	cpsw_ale_set_allmulti(common->ale,
    255			      ndev->flags & IFF_ALLMULTI, port->port_id);
    256
    257	port_mask = ALE_PORT_HOST;
    258	/* Clear all mcast from ALE */
    259	cpsw_ale_flush_multicast(common->ale, port_mask, -1);
    260
    261	if (!netdev_mc_empty(ndev)) {
    262		struct netdev_hw_addr *ha;
    263
    264		/* program multicast address list into ALE register */
    265		netdev_for_each_mc_addr(ha, ndev) {
    266			cpsw_ale_add_mcast(common->ale, ha->addr,
    267					   port_mask, 0, 0, 0);
    268		}
    269	}
    270}
    271
    272static void am65_cpsw_nuss_ndo_host_tx_timeout(struct net_device *ndev,
    273					       unsigned int txqueue)
    274{
    275	struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
    276	struct am65_cpsw_tx_chn *tx_chn;
    277	struct netdev_queue *netif_txq;
    278	unsigned long trans_start;
    279
    280	netif_txq = netdev_get_tx_queue(ndev, txqueue);
    281	tx_chn = &common->tx_chns[txqueue];
    282	trans_start = READ_ONCE(netif_txq->trans_start);
    283
    284	netdev_err(ndev, "txq:%d DRV_XOFF:%d tmo:%u dql_avail:%d free_desc:%zu\n",
    285		   txqueue,
    286		   netif_tx_queue_stopped(netif_txq),
    287		   jiffies_to_msecs(jiffies - trans_start),
    288		   dql_avail(&netif_txq->dql),
    289		   k3_cppi_desc_pool_avail(tx_chn->desc_pool));
    290
    291	if (netif_tx_queue_stopped(netif_txq)) {
    292		/* try recover if stopped by us */
    293		txq_trans_update(netif_txq);
    294		netif_tx_wake_queue(netif_txq);
    295	}
    296}
    297
    298static int am65_cpsw_nuss_rx_push(struct am65_cpsw_common *common,
    299				  struct sk_buff *skb)
    300{
    301	struct am65_cpsw_rx_chn *rx_chn = &common->rx_chns;
    302	struct cppi5_host_desc_t *desc_rx;
    303	struct device *dev = common->dev;
    304	u32 pkt_len = skb_tailroom(skb);
    305	dma_addr_t desc_dma;
    306	dma_addr_t buf_dma;
    307	void *swdata;
    308
    309	desc_rx = k3_cppi_desc_pool_alloc(rx_chn->desc_pool);
    310	if (!desc_rx) {
    311		dev_err(dev, "Failed to allocate RXFDQ descriptor\n");
    312		return -ENOMEM;
    313	}
    314	desc_dma = k3_cppi_desc_pool_virt2dma(rx_chn->desc_pool, desc_rx);
    315
    316	buf_dma = dma_map_single(rx_chn->dma_dev, skb->data, pkt_len,
    317				 DMA_FROM_DEVICE);
    318	if (unlikely(dma_mapping_error(rx_chn->dma_dev, buf_dma))) {
    319		k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx);
    320		dev_err(dev, "Failed to map rx skb buffer\n");
    321		return -EINVAL;
    322	}
    323
    324	cppi5_hdesc_init(desc_rx, CPPI5_INFO0_HDESC_EPIB_PRESENT,
    325			 AM65_CPSW_NAV_PS_DATA_SIZE);
    326	k3_udma_glue_rx_dma_to_cppi5_addr(rx_chn->rx_chn, &buf_dma);
    327	cppi5_hdesc_attach_buf(desc_rx, buf_dma, skb_tailroom(skb), buf_dma, skb_tailroom(skb));
    328	swdata = cppi5_hdesc_get_swdata(desc_rx);
    329	*((void **)swdata) = skb;
    330
    331	return k3_udma_glue_push_rx_chn(rx_chn->rx_chn, 0, desc_rx, desc_dma);
    332}
    333
    334void am65_cpsw_nuss_set_p0_ptype(struct am65_cpsw_common *common)
    335{
    336	struct am65_cpsw_host *host_p = am65_common_get_host(common);
    337	u32 val, pri_map;
    338
    339	/* P0 set Receive Priority Type */
    340	val = readl(host_p->port_base + AM65_CPSW_PORT_REG_PRI_CTL);
    341
    342	if (common->pf_p0_rx_ptype_rrobin) {
    343		val |= AM65_CPSW_PORT_REG_PRI_CTL_RX_PTYPE_RROBIN;
    344		/* Enet Ports fifos works in fixed priority mode only, so
    345		 * reset P0_Rx_Pri_Map so all packet will go in Enet fifo 0
    346		 */
    347		pri_map = 0x0;
    348	} else {
    349		val &= ~AM65_CPSW_PORT_REG_PRI_CTL_RX_PTYPE_RROBIN;
    350		/* restore P0_Rx_Pri_Map */
    351		pri_map = 0x76543210;
    352	}
    353
    354	writel(pri_map, host_p->port_base + AM65_CPSW_PORT_REG_RX_PRI_MAP);
    355	writel(val, host_p->port_base + AM65_CPSW_PORT_REG_PRI_CTL);
    356}
    357
    358static void am65_cpsw_init_host_port_switch(struct am65_cpsw_common *common);
    359static void am65_cpsw_init_host_port_emac(struct am65_cpsw_common *common);
    360static void am65_cpsw_init_port_switch_ale(struct am65_cpsw_port *port);
    361static void am65_cpsw_init_port_emac_ale(struct am65_cpsw_port *port);
    362
    363static int am65_cpsw_nuss_common_open(struct am65_cpsw_common *common,
    364				      netdev_features_t features)
    365{
    366	struct am65_cpsw_host *host_p = am65_common_get_host(common);
    367	int port_idx, i, ret;
    368	struct sk_buff *skb;
    369	u32 val, port_mask;
    370
    371	if (common->usage_count)
    372		return 0;
    373
    374	/* Control register */
    375	writel(AM65_CPSW_CTL_P0_ENABLE | AM65_CPSW_CTL_P0_TX_CRC_REMOVE |
    376	       AM65_CPSW_CTL_VLAN_AWARE | AM65_CPSW_CTL_P0_RX_PAD,
    377	       common->cpsw_base + AM65_CPSW_REG_CTL);
    378	/* Max length register */
    379	writel(AM65_CPSW_MAX_PACKET_SIZE,
    380	       host_p->port_base + AM65_CPSW_PORT_REG_RX_MAXLEN);
    381	/* set base flow_id */
    382	writel(common->rx_flow_id_base,
    383	       host_p->port_base + AM65_CPSW_PORT0_REG_FLOW_ID_OFFSET);
    384	/* en tx crc offload */
    385	writel(AM65_CPSW_P0_REG_CTL_RX_CHECKSUM_EN, host_p->port_base + AM65_CPSW_P0_REG_CTL);
    386
    387	am65_cpsw_nuss_set_p0_ptype(common);
    388
    389	/* enable statistic */
    390	val = BIT(HOST_PORT_NUM);
    391	for (port_idx = 0; port_idx < common->port_num; port_idx++) {
    392		struct am65_cpsw_port *port = &common->ports[port_idx];
    393
    394		if (!port->disabled)
    395			val |=  BIT(port->port_id);
    396	}
    397	writel(val, common->cpsw_base + AM65_CPSW_REG_STAT_PORT_EN);
    398
    399	/* disable priority elevation */
    400	writel(0, common->cpsw_base + AM65_CPSW_REG_PTYPE);
    401
    402	cpsw_ale_start(common->ale);
    403
    404	/* limit to one RX flow only */
    405	cpsw_ale_control_set(common->ale, HOST_PORT_NUM,
    406			     ALE_DEFAULT_THREAD_ID, 0);
    407	cpsw_ale_control_set(common->ale, HOST_PORT_NUM,
    408			     ALE_DEFAULT_THREAD_ENABLE, 1);
    409	/* switch to vlan unaware mode */
    410	cpsw_ale_control_set(common->ale, HOST_PORT_NUM, ALE_VLAN_AWARE, 1);
    411	cpsw_ale_control_set(common->ale, HOST_PORT_NUM,
    412			     ALE_PORT_STATE, ALE_PORT_STATE_FORWARD);
    413
    414	/* default vlan cfg: create mask based on enabled ports */
    415	port_mask = GENMASK(common->port_num, 0) &
    416		    ~common->disabled_ports_mask;
    417
    418	cpsw_ale_add_vlan(common->ale, 0, port_mask,
    419			  port_mask, port_mask,
    420			  port_mask & ~ALE_PORT_HOST);
    421
    422	if (common->is_emac_mode)
    423		am65_cpsw_init_host_port_emac(common);
    424	else
    425		am65_cpsw_init_host_port_switch(common);
    426
    427	for (i = 0; i < common->rx_chns.descs_num; i++) {
    428		skb = __netdev_alloc_skb_ip_align(NULL,
    429						  AM65_CPSW_MAX_PACKET_SIZE,
    430						  GFP_KERNEL);
    431		if (!skb) {
    432			dev_err(common->dev, "cannot allocate skb\n");
    433			return -ENOMEM;
    434		}
    435
    436		ret = am65_cpsw_nuss_rx_push(common, skb);
    437		if (ret < 0) {
    438			dev_err(common->dev,
    439				"cannot submit skb to channel rx, error %d\n",
    440				ret);
    441			kfree_skb(skb);
    442			return ret;
    443		}
    444		kmemleak_not_leak(skb);
    445	}
    446	k3_udma_glue_enable_rx_chn(common->rx_chns.rx_chn);
    447
    448	for (i = 0; i < common->tx_ch_num; i++) {
    449		ret = k3_udma_glue_enable_tx_chn(common->tx_chns[i].tx_chn);
    450		if (ret)
    451			return ret;
    452		napi_enable(&common->tx_chns[i].napi_tx);
    453	}
    454
    455	napi_enable(&common->napi_rx);
    456	if (common->rx_irq_disabled) {
    457		common->rx_irq_disabled = false;
    458		enable_irq(common->rx_chns.irq);
    459	}
    460
    461	dev_dbg(common->dev, "cpsw_nuss started\n");
    462	return 0;
    463}
    464
    465static void am65_cpsw_nuss_tx_cleanup(void *data, dma_addr_t desc_dma);
    466static void am65_cpsw_nuss_rx_cleanup(void *data, dma_addr_t desc_dma);
    467
    468static int am65_cpsw_nuss_common_stop(struct am65_cpsw_common *common)
    469{
    470	int i;
    471
    472	if (common->usage_count != 1)
    473		return 0;
    474
    475	cpsw_ale_control_set(common->ale, HOST_PORT_NUM,
    476			     ALE_PORT_STATE, ALE_PORT_STATE_DISABLE);
    477
    478	/* shutdown tx channels */
    479	atomic_set(&common->tdown_cnt, common->tx_ch_num);
    480	/* ensure new tdown_cnt value is visible */
    481	smp_mb__after_atomic();
    482	reinit_completion(&common->tdown_complete);
    483
    484	for (i = 0; i < common->tx_ch_num; i++)
    485		k3_udma_glue_tdown_tx_chn(common->tx_chns[i].tx_chn, false);
    486
    487	i = wait_for_completion_timeout(&common->tdown_complete,
    488					msecs_to_jiffies(1000));
    489	if (!i)
    490		dev_err(common->dev, "tx timeout\n");
    491	for (i = 0; i < common->tx_ch_num; i++)
    492		napi_disable(&common->tx_chns[i].napi_tx);
    493
    494	for (i = 0; i < common->tx_ch_num; i++) {
    495		k3_udma_glue_reset_tx_chn(common->tx_chns[i].tx_chn,
    496					  &common->tx_chns[i],
    497					  am65_cpsw_nuss_tx_cleanup);
    498		k3_udma_glue_disable_tx_chn(common->tx_chns[i].tx_chn);
    499	}
    500
    501	k3_udma_glue_tdown_rx_chn(common->rx_chns.rx_chn, true);
    502	napi_disable(&common->napi_rx);
    503
    504	for (i = 0; i < AM65_CPSW_MAX_RX_FLOWS; i++)
    505		k3_udma_glue_reset_rx_chn(common->rx_chns.rx_chn, i,
    506					  &common->rx_chns,
    507					  am65_cpsw_nuss_rx_cleanup, !!i);
    508
    509	k3_udma_glue_disable_rx_chn(common->rx_chns.rx_chn);
    510
    511	cpsw_ale_stop(common->ale);
    512
    513	writel(0, common->cpsw_base + AM65_CPSW_REG_CTL);
    514	writel(0, common->cpsw_base + AM65_CPSW_REG_STAT_PORT_EN);
    515
    516	dev_dbg(common->dev, "cpsw_nuss stopped\n");
    517	return 0;
    518}
    519
    520static int am65_cpsw_nuss_ndo_slave_stop(struct net_device *ndev)
    521{
    522	struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
    523	struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
    524	int ret;
    525
    526	phylink_stop(port->slave.phylink);
    527
    528	netif_tx_stop_all_queues(ndev);
    529
    530	phylink_disconnect_phy(port->slave.phylink);
    531
    532	ret = am65_cpsw_nuss_common_stop(common);
    533	if (ret)
    534		return ret;
    535
    536	common->usage_count--;
    537	pm_runtime_put(common->dev);
    538	return 0;
    539}
    540
    541static int cpsw_restore_vlans(struct net_device *vdev, int vid, void *arg)
    542{
    543	struct am65_cpsw_port *port = arg;
    544
    545	if (!vdev)
    546		return 0;
    547
    548	return am65_cpsw_nuss_ndo_slave_add_vid(port->ndev, 0, vid);
    549}
    550
    551static int am65_cpsw_nuss_ndo_slave_open(struct net_device *ndev)
    552{
    553	struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
    554	struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
    555	int ret, i;
    556
    557	ret = pm_runtime_resume_and_get(common->dev);
    558	if (ret < 0)
    559		return ret;
    560
    561	/* Notify the stack of the actual queue counts. */
    562	ret = netif_set_real_num_tx_queues(ndev, common->tx_ch_num);
    563	if (ret) {
    564		dev_err(common->dev, "cannot set real number of tx queues\n");
    565		return ret;
    566	}
    567
    568	ret = netif_set_real_num_rx_queues(ndev, AM65_CPSW_MAX_RX_QUEUES);
    569	if (ret) {
    570		dev_err(common->dev, "cannot set real number of rx queues\n");
    571		return ret;
    572	}
    573
    574	for (i = 0; i < common->tx_ch_num; i++)
    575		netdev_tx_reset_queue(netdev_get_tx_queue(ndev, i));
    576
    577	ret = am65_cpsw_nuss_common_open(common, ndev->features);
    578	if (ret)
    579		return ret;
    580
    581	common->usage_count++;
    582
    583	am65_cpsw_port_set_sl_mac(port, ndev->dev_addr);
    584
    585	if (common->is_emac_mode)
    586		am65_cpsw_init_port_emac_ale(port);
    587	else
    588		am65_cpsw_init_port_switch_ale(port);
    589
    590	/* mac_sl should be configured via phy-link interface */
    591	am65_cpsw_sl_ctl_reset(port);
    592
    593	ret = phy_set_mode_ext(port->slave.ifphy, PHY_MODE_ETHERNET,
    594			       port->slave.phy_if);
    595	if (ret)
    596		goto error_cleanup;
    597
    598	ret = phylink_of_phy_connect(port->slave.phylink, port->slave.phy_node, 0);
    599	if (ret)
    600		goto error_cleanup;
    601
    602	/* restore vlan configurations */
    603	vlan_for_each(ndev, cpsw_restore_vlans, port);
    604
    605	phylink_start(port->slave.phylink);
    606
    607	return 0;
    608
    609error_cleanup:
    610	am65_cpsw_nuss_ndo_slave_stop(ndev);
    611	return ret;
    612}
    613
    614static void am65_cpsw_nuss_rx_cleanup(void *data, dma_addr_t desc_dma)
    615{
    616	struct am65_cpsw_rx_chn *rx_chn = data;
    617	struct cppi5_host_desc_t *desc_rx;
    618	struct sk_buff *skb;
    619	dma_addr_t buf_dma;
    620	u32 buf_dma_len;
    621	void **swdata;
    622
    623	desc_rx = k3_cppi_desc_pool_dma2virt(rx_chn->desc_pool, desc_dma);
    624	swdata = cppi5_hdesc_get_swdata(desc_rx);
    625	skb = *swdata;
    626	cppi5_hdesc_get_obuf(desc_rx, &buf_dma, &buf_dma_len);
    627	k3_udma_glue_rx_cppi5_to_dma_addr(rx_chn->rx_chn, &buf_dma);
    628
    629	dma_unmap_single(rx_chn->dma_dev, buf_dma, buf_dma_len, DMA_FROM_DEVICE);
    630	k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx);
    631
    632	dev_kfree_skb_any(skb);
    633}
    634
    635static void am65_cpsw_nuss_rx_ts(struct sk_buff *skb, u32 *psdata)
    636{
    637	struct skb_shared_hwtstamps *ssh;
    638	u64 ns;
    639
    640	ns = ((u64)psdata[1] << 32) | psdata[0];
    641
    642	ssh = skb_hwtstamps(skb);
    643	memset(ssh, 0, sizeof(*ssh));
    644	ssh->hwtstamp = ns_to_ktime(ns);
    645}
    646
    647/* RX psdata[2] word format - checksum information */
    648#define AM65_CPSW_RX_PSD_CSUM_ADD	GENMASK(15, 0)
    649#define AM65_CPSW_RX_PSD_CSUM_ERR	BIT(16)
    650#define AM65_CPSW_RX_PSD_IS_FRAGMENT	BIT(17)
    651#define AM65_CPSW_RX_PSD_IS_TCP		BIT(18)
    652#define AM65_CPSW_RX_PSD_IPV6_VALID	BIT(19)
    653#define AM65_CPSW_RX_PSD_IPV4_VALID	BIT(20)
    654
    655static void am65_cpsw_nuss_rx_csum(struct sk_buff *skb, u32 csum_info)
    656{
    657	/* HW can verify IPv4/IPv6 TCP/UDP packets checksum
    658	 * csum information provides in psdata[2] word:
    659	 * AM65_CPSW_RX_PSD_CSUM_ERR bit - indicates csum error
    660	 * AM65_CPSW_RX_PSD_IPV6_VALID and AM65_CPSW_RX_PSD_IPV4_VALID
    661	 * bits - indicates IPv4/IPv6 packet
    662	 * AM65_CPSW_RX_PSD_IS_FRAGMENT bit - indicates fragmented packet
    663	 * AM65_CPSW_RX_PSD_CSUM_ADD has value 0xFFFF for non fragmented packets
    664	 * or csum value for fragmented packets if !AM65_CPSW_RX_PSD_CSUM_ERR
    665	 */
    666	skb_checksum_none_assert(skb);
    667
    668	if (unlikely(!(skb->dev->features & NETIF_F_RXCSUM)))
    669		return;
    670
    671	if ((csum_info & (AM65_CPSW_RX_PSD_IPV6_VALID |
    672			  AM65_CPSW_RX_PSD_IPV4_VALID)) &&
    673			  !(csum_info & AM65_CPSW_RX_PSD_CSUM_ERR)) {
    674		/* csum for fragmented packets is unsupported */
    675		if (!(csum_info & AM65_CPSW_RX_PSD_IS_FRAGMENT))
    676			skb->ip_summed = CHECKSUM_UNNECESSARY;
    677	}
    678}
    679
    680static int am65_cpsw_nuss_rx_packets(struct am65_cpsw_common *common,
    681				     u32 flow_idx)
    682{
    683	struct am65_cpsw_rx_chn *rx_chn = &common->rx_chns;
    684	u32 buf_dma_len, pkt_len, port_id = 0, csum_info;
    685	struct am65_cpsw_ndev_priv *ndev_priv;
    686	struct am65_cpsw_ndev_stats *stats;
    687	struct cppi5_host_desc_t *desc_rx;
    688	struct device *dev = common->dev;
    689	struct sk_buff *skb, *new_skb;
    690	dma_addr_t desc_dma, buf_dma;
    691	struct am65_cpsw_port *port;
    692	struct net_device *ndev;
    693	void **swdata;
    694	u32 *psdata;
    695	int ret = 0;
    696
    697	ret = k3_udma_glue_pop_rx_chn(rx_chn->rx_chn, flow_idx, &desc_dma);
    698	if (ret) {
    699		if (ret != -ENODATA)
    700			dev_err(dev, "RX: pop chn fail %d\n", ret);
    701		return ret;
    702	}
    703
    704	if (cppi5_desc_is_tdcm(desc_dma)) {
    705		dev_dbg(dev, "%s RX tdown flow: %u\n", __func__, flow_idx);
    706		return 0;
    707	}
    708
    709	desc_rx = k3_cppi_desc_pool_dma2virt(rx_chn->desc_pool, desc_dma);
    710	dev_dbg(dev, "%s flow_idx: %u desc %pad\n",
    711		__func__, flow_idx, &desc_dma);
    712
    713	swdata = cppi5_hdesc_get_swdata(desc_rx);
    714	skb = *swdata;
    715	cppi5_hdesc_get_obuf(desc_rx, &buf_dma, &buf_dma_len);
    716	k3_udma_glue_rx_cppi5_to_dma_addr(rx_chn->rx_chn, &buf_dma);
    717	pkt_len = cppi5_hdesc_get_pktlen(desc_rx);
    718	cppi5_desc_get_tags_ids(&desc_rx->hdr, &port_id, NULL);
    719	dev_dbg(dev, "%s rx port_id:%d\n", __func__, port_id);
    720	port = am65_common_get_port(common, port_id);
    721	ndev = port->ndev;
    722	skb->dev = ndev;
    723
    724	psdata = cppi5_hdesc_get_psdata(desc_rx);
    725	/* add RX timestamp */
    726	if (port->rx_ts_enabled)
    727		am65_cpsw_nuss_rx_ts(skb, psdata);
    728	csum_info = psdata[2];
    729	dev_dbg(dev, "%s rx csum_info:%#x\n", __func__, csum_info);
    730
    731	dma_unmap_single(rx_chn->dma_dev, buf_dma, buf_dma_len, DMA_FROM_DEVICE);
    732
    733	k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx);
    734
    735	new_skb = netdev_alloc_skb_ip_align(ndev, AM65_CPSW_MAX_PACKET_SIZE);
    736	if (new_skb) {
    737		ndev_priv = netdev_priv(ndev);
    738		am65_cpsw_nuss_set_offload_fwd_mark(skb, ndev_priv->offload_fwd_mark);
    739		skb_put(skb, pkt_len);
    740		skb->protocol = eth_type_trans(skb, ndev);
    741		am65_cpsw_nuss_rx_csum(skb, csum_info);
    742		napi_gro_receive(&common->napi_rx, skb);
    743
    744		stats = this_cpu_ptr(ndev_priv->stats);
    745
    746		u64_stats_update_begin(&stats->syncp);
    747		stats->rx_packets++;
    748		stats->rx_bytes += pkt_len;
    749		u64_stats_update_end(&stats->syncp);
    750		kmemleak_not_leak(new_skb);
    751	} else {
    752		ndev->stats.rx_dropped++;
    753		new_skb = skb;
    754	}
    755
    756	if (netif_dormant(ndev)) {
    757		dev_kfree_skb_any(new_skb);
    758		ndev->stats.rx_dropped++;
    759		return 0;
    760	}
    761
    762	ret = am65_cpsw_nuss_rx_push(common, new_skb);
    763	if (WARN_ON(ret < 0)) {
    764		dev_kfree_skb_any(new_skb);
    765		ndev->stats.rx_errors++;
    766		ndev->stats.rx_dropped++;
    767	}
    768
    769	return ret;
    770}
    771
    772static int am65_cpsw_nuss_rx_poll(struct napi_struct *napi_rx, int budget)
    773{
    774	struct am65_cpsw_common *common = am65_cpsw_napi_to_common(napi_rx);
    775	int flow = AM65_CPSW_MAX_RX_FLOWS;
    776	int cur_budget, ret;
    777	int num_rx = 0;
    778
    779	/* process every flow */
    780	while (flow--) {
    781		cur_budget = budget - num_rx;
    782
    783		while (cur_budget--) {
    784			ret = am65_cpsw_nuss_rx_packets(common, flow);
    785			if (ret)
    786				break;
    787			num_rx++;
    788		}
    789
    790		if (num_rx >= budget)
    791			break;
    792	}
    793
    794	dev_dbg(common->dev, "%s num_rx:%d %d\n", __func__, num_rx, budget);
    795
    796	if (num_rx < budget && napi_complete_done(napi_rx, num_rx)) {
    797		if (common->rx_irq_disabled) {
    798			common->rx_irq_disabled = false;
    799			enable_irq(common->rx_chns.irq);
    800		}
    801	}
    802
    803	return num_rx;
    804}
    805
    806static void am65_cpsw_nuss_xmit_free(struct am65_cpsw_tx_chn *tx_chn,
    807				     struct cppi5_host_desc_t *desc)
    808{
    809	struct cppi5_host_desc_t *first_desc, *next_desc;
    810	dma_addr_t buf_dma, next_desc_dma;
    811	u32 buf_dma_len;
    812
    813	first_desc = desc;
    814	next_desc = first_desc;
    815
    816	cppi5_hdesc_get_obuf(first_desc, &buf_dma, &buf_dma_len);
    817	k3_udma_glue_tx_cppi5_to_dma_addr(tx_chn->tx_chn, &buf_dma);
    818
    819	dma_unmap_single(tx_chn->dma_dev, buf_dma, buf_dma_len, DMA_TO_DEVICE);
    820
    821	next_desc_dma = cppi5_hdesc_get_next_hbdesc(first_desc);
    822	k3_udma_glue_tx_cppi5_to_dma_addr(tx_chn->tx_chn, &next_desc_dma);
    823	while (next_desc_dma) {
    824		next_desc = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool,
    825						       next_desc_dma);
    826		cppi5_hdesc_get_obuf(next_desc, &buf_dma, &buf_dma_len);
    827		k3_udma_glue_tx_cppi5_to_dma_addr(tx_chn->tx_chn, &buf_dma);
    828
    829		dma_unmap_page(tx_chn->dma_dev, buf_dma, buf_dma_len,
    830			       DMA_TO_DEVICE);
    831
    832		next_desc_dma = cppi5_hdesc_get_next_hbdesc(next_desc);
    833		k3_udma_glue_tx_cppi5_to_dma_addr(tx_chn->tx_chn, &next_desc_dma);
    834
    835		k3_cppi_desc_pool_free(tx_chn->desc_pool, next_desc);
    836	}
    837
    838	k3_cppi_desc_pool_free(tx_chn->desc_pool, first_desc);
    839}
    840
    841static void am65_cpsw_nuss_tx_cleanup(void *data, dma_addr_t desc_dma)
    842{
    843	struct am65_cpsw_tx_chn *tx_chn = data;
    844	struct cppi5_host_desc_t *desc_tx;
    845	struct sk_buff *skb;
    846	void **swdata;
    847
    848	desc_tx = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool, desc_dma);
    849	swdata = cppi5_hdesc_get_swdata(desc_tx);
    850	skb = *(swdata);
    851	am65_cpsw_nuss_xmit_free(tx_chn, desc_tx);
    852
    853	dev_kfree_skb_any(skb);
    854}
    855
    856static struct sk_buff *
    857am65_cpsw_nuss_tx_compl_packet(struct am65_cpsw_tx_chn *tx_chn,
    858			       dma_addr_t desc_dma)
    859{
    860	struct am65_cpsw_ndev_priv *ndev_priv;
    861	struct am65_cpsw_ndev_stats *stats;
    862	struct cppi5_host_desc_t *desc_tx;
    863	struct net_device *ndev;
    864	struct sk_buff *skb;
    865	void **swdata;
    866
    867	desc_tx = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool,
    868					     desc_dma);
    869	swdata = cppi5_hdesc_get_swdata(desc_tx);
    870	skb = *(swdata);
    871	am65_cpsw_nuss_xmit_free(tx_chn, desc_tx);
    872
    873	ndev = skb->dev;
    874
    875	am65_cpts_tx_timestamp(tx_chn->common->cpts, skb);
    876
    877	ndev_priv = netdev_priv(ndev);
    878	stats = this_cpu_ptr(ndev_priv->stats);
    879	u64_stats_update_begin(&stats->syncp);
    880	stats->tx_packets++;
    881	stats->tx_bytes += skb->len;
    882	u64_stats_update_end(&stats->syncp);
    883
    884	return skb;
    885}
    886
    887static void am65_cpsw_nuss_tx_wake(struct am65_cpsw_tx_chn *tx_chn, struct net_device *ndev,
    888				   struct netdev_queue *netif_txq)
    889{
    890	if (netif_tx_queue_stopped(netif_txq)) {
    891		/* Check whether the queue is stopped due to stalled
    892		 * tx dma, if the queue is stopped then wake the queue
    893		 * as we have free desc for tx
    894		 */
    895		__netif_tx_lock(netif_txq, smp_processor_id());
    896		if (netif_running(ndev) &&
    897		    (k3_cppi_desc_pool_avail(tx_chn->desc_pool) >= MAX_SKB_FRAGS))
    898			netif_tx_wake_queue(netif_txq);
    899
    900		__netif_tx_unlock(netif_txq);
    901	}
    902}
    903
    904static int am65_cpsw_nuss_tx_compl_packets(struct am65_cpsw_common *common,
    905					   int chn, unsigned int budget)
    906{
    907	struct device *dev = common->dev;
    908	struct am65_cpsw_tx_chn *tx_chn;
    909	struct netdev_queue *netif_txq;
    910	unsigned int total_bytes = 0;
    911	struct net_device *ndev;
    912	struct sk_buff *skb;
    913	dma_addr_t desc_dma;
    914	int res, num_tx = 0;
    915
    916	tx_chn = &common->tx_chns[chn];
    917
    918	while (true) {
    919		spin_lock(&tx_chn->lock);
    920		res = k3_udma_glue_pop_tx_chn(tx_chn->tx_chn, &desc_dma);
    921		spin_unlock(&tx_chn->lock);
    922		if (res == -ENODATA)
    923			break;
    924
    925		if (cppi5_desc_is_tdcm(desc_dma)) {
    926			if (atomic_dec_and_test(&common->tdown_cnt))
    927				complete(&common->tdown_complete);
    928			break;
    929		}
    930
    931		skb = am65_cpsw_nuss_tx_compl_packet(tx_chn, desc_dma);
    932		total_bytes = skb->len;
    933		ndev = skb->dev;
    934		napi_consume_skb(skb, budget);
    935		num_tx++;
    936
    937		netif_txq = netdev_get_tx_queue(ndev, chn);
    938
    939		netdev_tx_completed_queue(netif_txq, num_tx, total_bytes);
    940
    941		am65_cpsw_nuss_tx_wake(tx_chn, ndev, netif_txq);
    942	}
    943
    944	dev_dbg(dev, "%s:%u pkt:%d\n", __func__, chn, num_tx);
    945
    946	return num_tx;
    947}
    948
    949static int am65_cpsw_nuss_tx_compl_packets_2g(struct am65_cpsw_common *common,
    950					      int chn, unsigned int budget)
    951{
    952	struct device *dev = common->dev;
    953	struct am65_cpsw_tx_chn *tx_chn;
    954	struct netdev_queue *netif_txq;
    955	unsigned int total_bytes = 0;
    956	struct net_device *ndev;
    957	struct sk_buff *skb;
    958	dma_addr_t desc_dma;
    959	int res, num_tx = 0;
    960
    961	tx_chn = &common->tx_chns[chn];
    962
    963	while (true) {
    964		res = k3_udma_glue_pop_tx_chn(tx_chn->tx_chn, &desc_dma);
    965		if (res == -ENODATA)
    966			break;
    967
    968		if (cppi5_desc_is_tdcm(desc_dma)) {
    969			if (atomic_dec_and_test(&common->tdown_cnt))
    970				complete(&common->tdown_complete);
    971			break;
    972		}
    973
    974		skb = am65_cpsw_nuss_tx_compl_packet(tx_chn, desc_dma);
    975
    976		ndev = skb->dev;
    977		total_bytes += skb->len;
    978		napi_consume_skb(skb, budget);
    979		num_tx++;
    980	}
    981
    982	if (!num_tx)
    983		return 0;
    984
    985	netif_txq = netdev_get_tx_queue(ndev, chn);
    986
    987	netdev_tx_completed_queue(netif_txq, num_tx, total_bytes);
    988
    989	am65_cpsw_nuss_tx_wake(tx_chn, ndev, netif_txq);
    990
    991	dev_dbg(dev, "%s:%u pkt:%d\n", __func__, chn, num_tx);
    992
    993	return num_tx;
    994}
    995
    996static int am65_cpsw_nuss_tx_poll(struct napi_struct *napi_tx, int budget)
    997{
    998	struct am65_cpsw_tx_chn *tx_chn = am65_cpsw_napi_to_tx_chn(napi_tx);
    999	int num_tx;
   1000
   1001	if (AM65_CPSW_IS_CPSW2G(tx_chn->common))
   1002		num_tx = am65_cpsw_nuss_tx_compl_packets_2g(tx_chn->common, tx_chn->id, budget);
   1003	else
   1004		num_tx = am65_cpsw_nuss_tx_compl_packets(tx_chn->common, tx_chn->id, budget);
   1005
   1006	if (num_tx >= budget)
   1007		return budget;
   1008
   1009	if (napi_complete_done(napi_tx, num_tx))
   1010		enable_irq(tx_chn->irq);
   1011
   1012	return 0;
   1013}
   1014
   1015static irqreturn_t am65_cpsw_nuss_rx_irq(int irq, void *dev_id)
   1016{
   1017	struct am65_cpsw_common *common = dev_id;
   1018
   1019	common->rx_irq_disabled = true;
   1020	disable_irq_nosync(irq);
   1021	napi_schedule(&common->napi_rx);
   1022
   1023	return IRQ_HANDLED;
   1024}
   1025
   1026static irqreturn_t am65_cpsw_nuss_tx_irq(int irq, void *dev_id)
   1027{
   1028	struct am65_cpsw_tx_chn *tx_chn = dev_id;
   1029
   1030	disable_irq_nosync(irq);
   1031	napi_schedule(&tx_chn->napi_tx);
   1032
   1033	return IRQ_HANDLED;
   1034}
   1035
   1036static netdev_tx_t am65_cpsw_nuss_ndo_slave_xmit(struct sk_buff *skb,
   1037						 struct net_device *ndev)
   1038{
   1039	struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
   1040	struct cppi5_host_desc_t *first_desc, *next_desc, *cur_desc;
   1041	struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
   1042	struct device *dev = common->dev;
   1043	struct am65_cpsw_tx_chn *tx_chn;
   1044	struct netdev_queue *netif_txq;
   1045	dma_addr_t desc_dma, buf_dma;
   1046	int ret, q_idx, i;
   1047	void **swdata;
   1048	u32 *psdata;
   1049	u32 pkt_len;
   1050
   1051	/* padding enabled in hw */
   1052	pkt_len = skb_headlen(skb);
   1053
   1054	/* SKB TX timestamp */
   1055	if (port->tx_ts_enabled)
   1056		am65_cpts_prep_tx_timestamp(common->cpts, skb);
   1057
   1058	q_idx = skb_get_queue_mapping(skb);
   1059	dev_dbg(dev, "%s skb_queue:%d\n", __func__, q_idx);
   1060
   1061	tx_chn = &common->tx_chns[q_idx];
   1062	netif_txq = netdev_get_tx_queue(ndev, q_idx);
   1063
   1064	/* Map the linear buffer */
   1065	buf_dma = dma_map_single(tx_chn->dma_dev, skb->data, pkt_len,
   1066				 DMA_TO_DEVICE);
   1067	if (unlikely(dma_mapping_error(tx_chn->dma_dev, buf_dma))) {
   1068		dev_err(dev, "Failed to map tx skb buffer\n");
   1069		ndev->stats.tx_errors++;
   1070		goto err_free_skb;
   1071	}
   1072
   1073	first_desc = k3_cppi_desc_pool_alloc(tx_chn->desc_pool);
   1074	if (!first_desc) {
   1075		dev_dbg(dev, "Failed to allocate descriptor\n");
   1076		dma_unmap_single(tx_chn->dma_dev, buf_dma, pkt_len,
   1077				 DMA_TO_DEVICE);
   1078		goto busy_stop_q;
   1079	}
   1080
   1081	cppi5_hdesc_init(first_desc, CPPI5_INFO0_HDESC_EPIB_PRESENT,
   1082			 AM65_CPSW_NAV_PS_DATA_SIZE);
   1083	cppi5_desc_set_pktids(&first_desc->hdr, 0, 0x3FFF);
   1084	cppi5_hdesc_set_pkttype(first_desc, 0x7);
   1085	cppi5_desc_set_tags_ids(&first_desc->hdr, 0, port->port_id);
   1086
   1087	k3_udma_glue_tx_dma_to_cppi5_addr(tx_chn->tx_chn, &buf_dma);
   1088	cppi5_hdesc_attach_buf(first_desc, buf_dma, pkt_len, buf_dma, pkt_len);
   1089	swdata = cppi5_hdesc_get_swdata(first_desc);
   1090	*(swdata) = skb;
   1091	psdata = cppi5_hdesc_get_psdata(first_desc);
   1092
   1093	/* HW csum offload if enabled */
   1094	psdata[2] = 0;
   1095	if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
   1096		unsigned int cs_start, cs_offset;
   1097
   1098		cs_start = skb_transport_offset(skb);
   1099		cs_offset = cs_start + skb->csum_offset;
   1100		/* HW numerates bytes starting from 1 */
   1101		psdata[2] = ((cs_offset + 1) << 24) |
   1102			    ((cs_start + 1) << 16) | (skb->len - cs_start);
   1103		dev_dbg(dev, "%s tx psdata:%#x\n", __func__, psdata[2]);
   1104	}
   1105
   1106	if (!skb_is_nonlinear(skb))
   1107		goto done_tx;
   1108
   1109	dev_dbg(dev, "fragmented SKB\n");
   1110
   1111	/* Handle the case where skb is fragmented in pages */
   1112	cur_desc = first_desc;
   1113	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
   1114		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
   1115		u32 frag_size = skb_frag_size(frag);
   1116
   1117		next_desc = k3_cppi_desc_pool_alloc(tx_chn->desc_pool);
   1118		if (!next_desc) {
   1119			dev_err(dev, "Failed to allocate descriptor\n");
   1120			goto busy_free_descs;
   1121		}
   1122
   1123		buf_dma = skb_frag_dma_map(tx_chn->dma_dev, frag, 0, frag_size,
   1124					   DMA_TO_DEVICE);
   1125		if (unlikely(dma_mapping_error(tx_chn->dma_dev, buf_dma))) {
   1126			dev_err(dev, "Failed to map tx skb page\n");
   1127			k3_cppi_desc_pool_free(tx_chn->desc_pool, next_desc);
   1128			ndev->stats.tx_errors++;
   1129			goto err_free_descs;
   1130		}
   1131
   1132		cppi5_hdesc_reset_hbdesc(next_desc);
   1133		k3_udma_glue_tx_dma_to_cppi5_addr(tx_chn->tx_chn, &buf_dma);
   1134		cppi5_hdesc_attach_buf(next_desc,
   1135				       buf_dma, frag_size, buf_dma, frag_size);
   1136
   1137		desc_dma = k3_cppi_desc_pool_virt2dma(tx_chn->desc_pool,
   1138						      next_desc);
   1139		k3_udma_glue_tx_dma_to_cppi5_addr(tx_chn->tx_chn, &desc_dma);
   1140		cppi5_hdesc_link_hbdesc(cur_desc, desc_dma);
   1141
   1142		pkt_len += frag_size;
   1143		cur_desc = next_desc;
   1144	}
   1145	WARN_ON(pkt_len != skb->len);
   1146
   1147done_tx:
   1148	skb_tx_timestamp(skb);
   1149
   1150	/* report bql before sending packet */
   1151	netdev_tx_sent_queue(netif_txq, pkt_len);
   1152
   1153	cppi5_hdesc_set_pktlen(first_desc, pkt_len);
   1154	desc_dma = k3_cppi_desc_pool_virt2dma(tx_chn->desc_pool, first_desc);
   1155	if (AM65_CPSW_IS_CPSW2G(common)) {
   1156		ret = k3_udma_glue_push_tx_chn(tx_chn->tx_chn, first_desc, desc_dma);
   1157	} else {
   1158		spin_lock_bh(&tx_chn->lock);
   1159		ret = k3_udma_glue_push_tx_chn(tx_chn->tx_chn, first_desc, desc_dma);
   1160		spin_unlock_bh(&tx_chn->lock);
   1161	}
   1162	if (ret) {
   1163		dev_err(dev, "can't push desc %d\n", ret);
   1164		/* inform bql */
   1165		netdev_tx_completed_queue(netif_txq, 1, pkt_len);
   1166		ndev->stats.tx_errors++;
   1167		goto err_free_descs;
   1168	}
   1169
   1170	if (k3_cppi_desc_pool_avail(tx_chn->desc_pool) < MAX_SKB_FRAGS) {
   1171		netif_tx_stop_queue(netif_txq);
   1172		/* Barrier, so that stop_queue visible to other cpus */
   1173		smp_mb__after_atomic();
   1174		dev_dbg(dev, "netif_tx_stop_queue %d\n", q_idx);
   1175
   1176		/* re-check for smp */
   1177		if (k3_cppi_desc_pool_avail(tx_chn->desc_pool) >=
   1178		    MAX_SKB_FRAGS) {
   1179			netif_tx_wake_queue(netif_txq);
   1180			dev_dbg(dev, "netif_tx_wake_queue %d\n", q_idx);
   1181		}
   1182	}
   1183
   1184	return NETDEV_TX_OK;
   1185
   1186err_free_descs:
   1187	am65_cpsw_nuss_xmit_free(tx_chn, first_desc);
   1188err_free_skb:
   1189	ndev->stats.tx_dropped++;
   1190	dev_kfree_skb_any(skb);
   1191	return NETDEV_TX_OK;
   1192
   1193busy_free_descs:
   1194	am65_cpsw_nuss_xmit_free(tx_chn, first_desc);
   1195busy_stop_q:
   1196	netif_tx_stop_queue(netif_txq);
   1197	return NETDEV_TX_BUSY;
   1198}
   1199
   1200static int am65_cpsw_nuss_ndo_slave_set_mac_address(struct net_device *ndev,
   1201						    void *addr)
   1202{
   1203	struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
   1204	struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
   1205	struct sockaddr *sockaddr = (struct sockaddr *)addr;
   1206	int ret;
   1207
   1208	ret = eth_prepare_mac_addr_change(ndev, addr);
   1209	if (ret < 0)
   1210		return ret;
   1211
   1212	ret = pm_runtime_resume_and_get(common->dev);
   1213	if (ret < 0)
   1214		return ret;
   1215
   1216	cpsw_ale_del_ucast(common->ale, ndev->dev_addr,
   1217			   HOST_PORT_NUM, 0, 0);
   1218	cpsw_ale_add_ucast(common->ale, sockaddr->sa_data,
   1219			   HOST_PORT_NUM, ALE_SECURE, 0);
   1220
   1221	am65_cpsw_port_set_sl_mac(port, addr);
   1222	eth_commit_mac_addr_change(ndev, sockaddr);
   1223
   1224	pm_runtime_put(common->dev);
   1225
   1226	return 0;
   1227}
   1228
   1229static int am65_cpsw_nuss_hwtstamp_set(struct net_device *ndev,
   1230				       struct ifreq *ifr)
   1231{
   1232	struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
   1233	struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
   1234	u32 ts_ctrl, seq_id, ts_ctrl_ltype2, ts_vlan_ltype;
   1235	struct hwtstamp_config cfg;
   1236
   1237	if (!IS_ENABLED(CONFIG_TI_K3_AM65_CPTS))
   1238		return -EOPNOTSUPP;
   1239
   1240	if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
   1241		return -EFAULT;
   1242
   1243	/* TX HW timestamp */
   1244	switch (cfg.tx_type) {
   1245	case HWTSTAMP_TX_OFF:
   1246	case HWTSTAMP_TX_ON:
   1247		break;
   1248	default:
   1249		return -ERANGE;
   1250	}
   1251
   1252	switch (cfg.rx_filter) {
   1253	case HWTSTAMP_FILTER_NONE:
   1254		port->rx_ts_enabled = false;
   1255		break;
   1256	case HWTSTAMP_FILTER_ALL:
   1257	case HWTSTAMP_FILTER_SOME:
   1258	case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
   1259	case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
   1260	case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
   1261	case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
   1262	case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
   1263	case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
   1264	case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
   1265	case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
   1266	case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
   1267	case HWTSTAMP_FILTER_PTP_V2_EVENT:
   1268	case HWTSTAMP_FILTER_PTP_V2_SYNC:
   1269	case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
   1270	case HWTSTAMP_FILTER_NTP_ALL:
   1271		port->rx_ts_enabled = true;
   1272		cfg.rx_filter = HWTSTAMP_FILTER_ALL;
   1273		break;
   1274	default:
   1275		return -ERANGE;
   1276	}
   1277
   1278	port->tx_ts_enabled = (cfg.tx_type == HWTSTAMP_TX_ON);
   1279
   1280	/* cfg TX timestamp */
   1281	seq_id = (AM65_CPSW_TS_SEQ_ID_OFFSET <<
   1282		  AM65_CPSW_PN_TS_SEQ_ID_OFFSET_SHIFT) | ETH_P_1588;
   1283
   1284	ts_vlan_ltype = ETH_P_8021Q;
   1285
   1286	ts_ctrl_ltype2 = ETH_P_1588 |
   1287			 AM65_CPSW_PN_TS_CTL_LTYPE2_TS_107 |
   1288			 AM65_CPSW_PN_TS_CTL_LTYPE2_TS_129 |
   1289			 AM65_CPSW_PN_TS_CTL_LTYPE2_TS_130 |
   1290			 AM65_CPSW_PN_TS_CTL_LTYPE2_TS_131 |
   1291			 AM65_CPSW_PN_TS_CTL_LTYPE2_TS_132 |
   1292			 AM65_CPSW_PN_TS_CTL_LTYPE2_TS_319 |
   1293			 AM65_CPSW_PN_TS_CTL_LTYPE2_TS_320 |
   1294			 AM65_CPSW_PN_TS_CTL_LTYPE2_TS_TTL_NONZERO;
   1295
   1296	ts_ctrl = AM65_CPSW_TS_EVENT_MSG_TYPE_BITS <<
   1297		  AM65_CPSW_PN_TS_CTL_MSG_TYPE_EN_SHIFT;
   1298
   1299	if (port->tx_ts_enabled)
   1300		ts_ctrl |= AM65_CPSW_TS_TX_ANX_ALL_EN |
   1301			   AM65_CPSW_PN_TS_CTL_TX_VLAN_LT1_EN;
   1302
   1303	writel(seq_id, port->port_base + AM65_CPSW_PORTN_REG_TS_SEQ_LTYPE_REG);
   1304	writel(ts_vlan_ltype, port->port_base +
   1305	       AM65_CPSW_PORTN_REG_TS_VLAN_LTYPE_REG);
   1306	writel(ts_ctrl_ltype2, port->port_base +
   1307	       AM65_CPSW_PORTN_REG_TS_CTL_LTYPE2);
   1308	writel(ts_ctrl, port->port_base + AM65_CPSW_PORTN_REG_TS_CTL);
   1309
   1310	/* en/dis RX timestamp */
   1311	am65_cpts_rx_enable(common->cpts, port->rx_ts_enabled);
   1312
   1313	return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
   1314}
   1315
   1316static int am65_cpsw_nuss_hwtstamp_get(struct net_device *ndev,
   1317				       struct ifreq *ifr)
   1318{
   1319	struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
   1320	struct hwtstamp_config cfg;
   1321
   1322	if (!IS_ENABLED(CONFIG_TI_K3_AM65_CPTS))
   1323		return -EOPNOTSUPP;
   1324
   1325	cfg.flags = 0;
   1326	cfg.tx_type = port->tx_ts_enabled ?
   1327		      HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
   1328	cfg.rx_filter = port->rx_ts_enabled ?
   1329			HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE;
   1330
   1331	return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
   1332}
   1333
   1334static int am65_cpsw_nuss_ndo_slave_ioctl(struct net_device *ndev,
   1335					  struct ifreq *req, int cmd)
   1336{
   1337	struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
   1338
   1339	if (!netif_running(ndev))
   1340		return -EINVAL;
   1341
   1342	switch (cmd) {
   1343	case SIOCSHWTSTAMP:
   1344		return am65_cpsw_nuss_hwtstamp_set(ndev, req);
   1345	case SIOCGHWTSTAMP:
   1346		return am65_cpsw_nuss_hwtstamp_get(ndev, req);
   1347	}
   1348
   1349	return phylink_mii_ioctl(port->slave.phylink, req, cmd);
   1350}
   1351
   1352static void am65_cpsw_nuss_ndo_get_stats(struct net_device *dev,
   1353					 struct rtnl_link_stats64 *stats)
   1354{
   1355	struct am65_cpsw_ndev_priv *ndev_priv = netdev_priv(dev);
   1356	unsigned int start;
   1357	int cpu;
   1358
   1359	for_each_possible_cpu(cpu) {
   1360		struct am65_cpsw_ndev_stats *cpu_stats;
   1361		u64 rx_packets;
   1362		u64 rx_bytes;
   1363		u64 tx_packets;
   1364		u64 tx_bytes;
   1365
   1366		cpu_stats = per_cpu_ptr(ndev_priv->stats, cpu);
   1367		do {
   1368			start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
   1369			rx_packets = cpu_stats->rx_packets;
   1370			rx_bytes   = cpu_stats->rx_bytes;
   1371			tx_packets = cpu_stats->tx_packets;
   1372			tx_bytes   = cpu_stats->tx_bytes;
   1373		} while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
   1374
   1375		stats->rx_packets += rx_packets;
   1376		stats->rx_bytes   += rx_bytes;
   1377		stats->tx_packets += tx_packets;
   1378		stats->tx_bytes   += tx_bytes;
   1379	}
   1380
   1381	stats->rx_errors	= dev->stats.rx_errors;
   1382	stats->rx_dropped	= dev->stats.rx_dropped;
   1383	stats->tx_dropped	= dev->stats.tx_dropped;
   1384}
   1385
   1386static struct devlink_port *am65_cpsw_ndo_get_devlink_port(struct net_device *ndev)
   1387{
   1388	struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
   1389
   1390	return &port->devlink_port;
   1391}
   1392
   1393static const struct net_device_ops am65_cpsw_nuss_netdev_ops = {
   1394	.ndo_open		= am65_cpsw_nuss_ndo_slave_open,
   1395	.ndo_stop		= am65_cpsw_nuss_ndo_slave_stop,
   1396	.ndo_start_xmit		= am65_cpsw_nuss_ndo_slave_xmit,
   1397	.ndo_set_rx_mode	= am65_cpsw_nuss_ndo_slave_set_rx_mode,
   1398	.ndo_get_stats64        = am65_cpsw_nuss_ndo_get_stats,
   1399	.ndo_validate_addr	= eth_validate_addr,
   1400	.ndo_set_mac_address	= am65_cpsw_nuss_ndo_slave_set_mac_address,
   1401	.ndo_tx_timeout		= am65_cpsw_nuss_ndo_host_tx_timeout,
   1402	.ndo_vlan_rx_add_vid	= am65_cpsw_nuss_ndo_slave_add_vid,
   1403	.ndo_vlan_rx_kill_vid	= am65_cpsw_nuss_ndo_slave_kill_vid,
   1404	.ndo_eth_ioctl		= am65_cpsw_nuss_ndo_slave_ioctl,
   1405	.ndo_setup_tc           = am65_cpsw_qos_ndo_setup_tc,
   1406	.ndo_get_devlink_port   = am65_cpsw_ndo_get_devlink_port,
   1407};
   1408
   1409static void am65_cpsw_nuss_mac_config(struct phylink_config *config, unsigned int mode,
   1410				      const struct phylink_link_state *state)
   1411{
   1412	/* Currently not used */
   1413}
   1414
   1415static void am65_cpsw_nuss_mac_link_down(struct phylink_config *config, unsigned int mode,
   1416					 phy_interface_t interface)
   1417{
   1418	struct am65_cpsw_slave_data *slave = container_of(config, struct am65_cpsw_slave_data,
   1419							  phylink_config);
   1420	struct am65_cpsw_port *port = container_of(slave, struct am65_cpsw_port, slave);
   1421	struct am65_cpsw_common *common = port->common;
   1422	struct net_device *ndev = port->ndev;
   1423	int tmo;
   1424
   1425	/* disable forwarding */
   1426	cpsw_ale_control_set(common->ale, port->port_id, ALE_PORT_STATE, ALE_PORT_STATE_DISABLE);
   1427
   1428	cpsw_sl_ctl_set(port->slave.mac_sl, CPSW_SL_CTL_CMD_IDLE);
   1429
   1430	tmo = cpsw_sl_wait_for_idle(port->slave.mac_sl, 100);
   1431	dev_dbg(common->dev, "down msc_sl %08x tmo %d\n",
   1432		cpsw_sl_reg_read(port->slave.mac_sl, CPSW_SL_MACSTATUS), tmo);
   1433
   1434	cpsw_sl_ctl_reset(port->slave.mac_sl);
   1435
   1436	am65_cpsw_qos_link_down(ndev);
   1437	netif_tx_stop_all_queues(ndev);
   1438}
   1439
   1440static void am65_cpsw_nuss_mac_link_up(struct phylink_config *config, struct phy_device *phy,
   1441				       unsigned int mode, phy_interface_t interface, int speed,
   1442				       int duplex, bool tx_pause, bool rx_pause)
   1443{
   1444	struct am65_cpsw_slave_data *slave = container_of(config, struct am65_cpsw_slave_data,
   1445							  phylink_config);
   1446	struct am65_cpsw_port *port = container_of(slave, struct am65_cpsw_port, slave);
   1447	struct am65_cpsw_common *common = port->common;
   1448	u32 mac_control = CPSW_SL_CTL_GMII_EN;
   1449	struct net_device *ndev = port->ndev;
   1450
   1451	if (speed == SPEED_1000)
   1452		mac_control |= CPSW_SL_CTL_GIG;
   1453	if (speed == SPEED_10 && interface == PHY_INTERFACE_MODE_RGMII)
   1454		/* Can be used with in band mode only */
   1455		mac_control |= CPSW_SL_CTL_EXT_EN;
   1456	if (speed == SPEED_100 && interface == PHY_INTERFACE_MODE_RMII)
   1457		mac_control |= CPSW_SL_CTL_IFCTL_A;
   1458	if (duplex)
   1459		mac_control |= CPSW_SL_CTL_FULLDUPLEX;
   1460
   1461	/* rx_pause/tx_pause */
   1462	if (rx_pause)
   1463		mac_control |= CPSW_SL_CTL_RX_FLOW_EN;
   1464
   1465	if (tx_pause)
   1466		mac_control |= CPSW_SL_CTL_TX_FLOW_EN;
   1467
   1468	cpsw_sl_ctl_set(port->slave.mac_sl, mac_control);
   1469
   1470	/* enable forwarding */
   1471	cpsw_ale_control_set(common->ale, port->port_id, ALE_PORT_STATE, ALE_PORT_STATE_FORWARD);
   1472
   1473	am65_cpsw_qos_link_up(ndev, speed);
   1474	netif_tx_wake_all_queues(ndev);
   1475}
   1476
   1477static const struct phylink_mac_ops am65_cpsw_phylink_mac_ops = {
   1478	.validate = phylink_generic_validate,
   1479	.mac_config = am65_cpsw_nuss_mac_config,
   1480	.mac_link_down = am65_cpsw_nuss_mac_link_down,
   1481	.mac_link_up = am65_cpsw_nuss_mac_link_up,
   1482};
   1483
   1484static void am65_cpsw_nuss_slave_disable_unused(struct am65_cpsw_port *port)
   1485{
   1486	struct am65_cpsw_common *common = port->common;
   1487
   1488	if (!port->disabled)
   1489		return;
   1490
   1491	cpsw_ale_control_set(common->ale, port->port_id,
   1492			     ALE_PORT_STATE, ALE_PORT_STATE_DISABLE);
   1493
   1494	cpsw_sl_reset(port->slave.mac_sl, 100);
   1495	cpsw_sl_ctl_reset(port->slave.mac_sl);
   1496}
   1497
   1498static void am65_cpsw_nuss_free_tx_chns(void *data)
   1499{
   1500	struct am65_cpsw_common *common = data;
   1501	int i;
   1502
   1503	for (i = 0; i < common->tx_ch_num; i++) {
   1504		struct am65_cpsw_tx_chn *tx_chn = &common->tx_chns[i];
   1505
   1506		if (!IS_ERR_OR_NULL(tx_chn->desc_pool))
   1507			k3_cppi_desc_pool_destroy(tx_chn->desc_pool);
   1508
   1509		if (!IS_ERR_OR_NULL(tx_chn->tx_chn))
   1510			k3_udma_glue_release_tx_chn(tx_chn->tx_chn);
   1511
   1512		memset(tx_chn, 0, sizeof(*tx_chn));
   1513	}
   1514}
   1515
   1516void am65_cpsw_nuss_remove_tx_chns(struct am65_cpsw_common *common)
   1517{
   1518	struct device *dev = common->dev;
   1519	int i;
   1520
   1521	devm_remove_action(dev, am65_cpsw_nuss_free_tx_chns, common);
   1522
   1523	for (i = 0; i < common->tx_ch_num; i++) {
   1524		struct am65_cpsw_tx_chn *tx_chn = &common->tx_chns[i];
   1525
   1526		if (tx_chn->irq)
   1527			devm_free_irq(dev, tx_chn->irq, tx_chn);
   1528
   1529		netif_napi_del(&tx_chn->napi_tx);
   1530
   1531		if (!IS_ERR_OR_NULL(tx_chn->desc_pool))
   1532			k3_cppi_desc_pool_destroy(tx_chn->desc_pool);
   1533
   1534		if (!IS_ERR_OR_NULL(tx_chn->tx_chn))
   1535			k3_udma_glue_release_tx_chn(tx_chn->tx_chn);
   1536
   1537		memset(tx_chn, 0, sizeof(*tx_chn));
   1538	}
   1539}
   1540
   1541static int am65_cpsw_nuss_init_tx_chns(struct am65_cpsw_common *common)
   1542{
   1543	u32  max_desc_num = ALIGN(AM65_CPSW_MAX_TX_DESC, MAX_SKB_FRAGS);
   1544	struct k3_udma_glue_tx_channel_cfg tx_cfg = { 0 };
   1545	struct device *dev = common->dev;
   1546	struct k3_ring_cfg ring_cfg = {
   1547		.elm_size = K3_RINGACC_RING_ELSIZE_8,
   1548		.mode = K3_RINGACC_RING_MODE_RING,
   1549		.flags = 0
   1550	};
   1551	u32 hdesc_size;
   1552	int i, ret = 0;
   1553
   1554	hdesc_size = cppi5_hdesc_calc_size(true, AM65_CPSW_NAV_PS_DATA_SIZE,
   1555					   AM65_CPSW_NAV_SW_DATA_SIZE);
   1556
   1557	tx_cfg.swdata_size = AM65_CPSW_NAV_SW_DATA_SIZE;
   1558	tx_cfg.tx_cfg = ring_cfg;
   1559	tx_cfg.txcq_cfg = ring_cfg;
   1560	tx_cfg.tx_cfg.size = max_desc_num;
   1561	tx_cfg.txcq_cfg.size = max_desc_num;
   1562
   1563	for (i = 0; i < common->tx_ch_num; i++) {
   1564		struct am65_cpsw_tx_chn *tx_chn = &common->tx_chns[i];
   1565
   1566		snprintf(tx_chn->tx_chn_name,
   1567			 sizeof(tx_chn->tx_chn_name), "tx%d", i);
   1568
   1569		spin_lock_init(&tx_chn->lock);
   1570		tx_chn->common = common;
   1571		tx_chn->id = i;
   1572		tx_chn->descs_num = max_desc_num;
   1573
   1574		tx_chn->tx_chn =
   1575			k3_udma_glue_request_tx_chn(dev,
   1576						    tx_chn->tx_chn_name,
   1577						    &tx_cfg);
   1578		if (IS_ERR(tx_chn->tx_chn)) {
   1579			ret = dev_err_probe(dev, PTR_ERR(tx_chn->tx_chn),
   1580					    "Failed to request tx dma channel\n");
   1581			goto err;
   1582		}
   1583		tx_chn->dma_dev = k3_udma_glue_tx_get_dma_device(tx_chn->tx_chn);
   1584
   1585		tx_chn->desc_pool = k3_cppi_desc_pool_create_name(tx_chn->dma_dev,
   1586								  tx_chn->descs_num,
   1587								  hdesc_size,
   1588								  tx_chn->tx_chn_name);
   1589		if (IS_ERR(tx_chn->desc_pool)) {
   1590			ret = PTR_ERR(tx_chn->desc_pool);
   1591			dev_err(dev, "Failed to create poll %d\n", ret);
   1592			goto err;
   1593		}
   1594
   1595		tx_chn->irq = k3_udma_glue_tx_get_irq(tx_chn->tx_chn);
   1596		if (tx_chn->irq <= 0) {
   1597			dev_err(dev, "Failed to get tx dma irq %d\n",
   1598				tx_chn->irq);
   1599			goto err;
   1600		}
   1601
   1602		snprintf(tx_chn->tx_chn_name,
   1603			 sizeof(tx_chn->tx_chn_name), "%s-tx%d",
   1604			 dev_name(dev), tx_chn->id);
   1605	}
   1606
   1607err:
   1608	i = devm_add_action(dev, am65_cpsw_nuss_free_tx_chns, common);
   1609	if (i) {
   1610		dev_err(dev, "Failed to add free_tx_chns action %d\n", i);
   1611		return i;
   1612	}
   1613
   1614	return ret;
   1615}
   1616
   1617static void am65_cpsw_nuss_free_rx_chns(void *data)
   1618{
   1619	struct am65_cpsw_common *common = data;
   1620	struct am65_cpsw_rx_chn *rx_chn;
   1621
   1622	rx_chn = &common->rx_chns;
   1623
   1624	if (!IS_ERR_OR_NULL(rx_chn->desc_pool))
   1625		k3_cppi_desc_pool_destroy(rx_chn->desc_pool);
   1626
   1627	if (!IS_ERR_OR_NULL(rx_chn->rx_chn))
   1628		k3_udma_glue_release_rx_chn(rx_chn->rx_chn);
   1629}
   1630
   1631static int am65_cpsw_nuss_init_rx_chns(struct am65_cpsw_common *common)
   1632{
   1633	struct am65_cpsw_rx_chn *rx_chn = &common->rx_chns;
   1634	struct k3_udma_glue_rx_channel_cfg rx_cfg = { 0 };
   1635	u32  max_desc_num = AM65_CPSW_MAX_RX_DESC;
   1636	struct device *dev = common->dev;
   1637	u32 hdesc_size;
   1638	u32 fdqring_id;
   1639	int i, ret = 0;
   1640
   1641	hdesc_size = cppi5_hdesc_calc_size(true, AM65_CPSW_NAV_PS_DATA_SIZE,
   1642					   AM65_CPSW_NAV_SW_DATA_SIZE);
   1643
   1644	rx_cfg.swdata_size = AM65_CPSW_NAV_SW_DATA_SIZE;
   1645	rx_cfg.flow_id_num = AM65_CPSW_MAX_RX_FLOWS;
   1646	rx_cfg.flow_id_base = common->rx_flow_id_base;
   1647
   1648	/* init all flows */
   1649	rx_chn->dev = dev;
   1650	rx_chn->descs_num = max_desc_num;
   1651
   1652	rx_chn->rx_chn = k3_udma_glue_request_rx_chn(dev, "rx", &rx_cfg);
   1653	if (IS_ERR(rx_chn->rx_chn)) {
   1654		ret = dev_err_probe(dev, PTR_ERR(rx_chn->rx_chn),
   1655				    "Failed to request rx dma channel\n");
   1656		goto err;
   1657	}
   1658	rx_chn->dma_dev = k3_udma_glue_rx_get_dma_device(rx_chn->rx_chn);
   1659
   1660	rx_chn->desc_pool = k3_cppi_desc_pool_create_name(rx_chn->dma_dev,
   1661							  rx_chn->descs_num,
   1662							  hdesc_size, "rx");
   1663	if (IS_ERR(rx_chn->desc_pool)) {
   1664		ret = PTR_ERR(rx_chn->desc_pool);
   1665		dev_err(dev, "Failed to create rx poll %d\n", ret);
   1666		goto err;
   1667	}
   1668
   1669	common->rx_flow_id_base =
   1670			k3_udma_glue_rx_get_flow_id_base(rx_chn->rx_chn);
   1671	dev_info(dev, "set new flow-id-base %u\n", common->rx_flow_id_base);
   1672
   1673	fdqring_id = K3_RINGACC_RING_ID_ANY;
   1674	for (i = 0; i < rx_cfg.flow_id_num; i++) {
   1675		struct k3_ring_cfg rxring_cfg = {
   1676			.elm_size = K3_RINGACC_RING_ELSIZE_8,
   1677			.mode = K3_RINGACC_RING_MODE_RING,
   1678			.flags = 0,
   1679		};
   1680		struct k3_ring_cfg fdqring_cfg = {
   1681			.elm_size = K3_RINGACC_RING_ELSIZE_8,
   1682			.flags = K3_RINGACC_RING_SHARED,
   1683		};
   1684		struct k3_udma_glue_rx_flow_cfg rx_flow_cfg = {
   1685			.rx_cfg = rxring_cfg,
   1686			.rxfdq_cfg = fdqring_cfg,
   1687			.ring_rxq_id = K3_RINGACC_RING_ID_ANY,
   1688			.src_tag_lo_sel =
   1689				K3_UDMA_GLUE_SRC_TAG_LO_USE_REMOTE_SRC_TAG,
   1690		};
   1691
   1692		rx_flow_cfg.ring_rxfdq0_id = fdqring_id;
   1693		rx_flow_cfg.rx_cfg.size = max_desc_num;
   1694		rx_flow_cfg.rxfdq_cfg.size = max_desc_num;
   1695		rx_flow_cfg.rxfdq_cfg.mode = common->pdata.fdqring_mode;
   1696
   1697		ret = k3_udma_glue_rx_flow_init(rx_chn->rx_chn,
   1698						i, &rx_flow_cfg);
   1699		if (ret) {
   1700			dev_err(dev, "Failed to init rx flow%d %d\n", i, ret);
   1701			goto err;
   1702		}
   1703		if (!i)
   1704			fdqring_id =
   1705				k3_udma_glue_rx_flow_get_fdq_id(rx_chn->rx_chn,
   1706								i);
   1707
   1708		rx_chn->irq = k3_udma_glue_rx_get_irq(rx_chn->rx_chn, i);
   1709
   1710		if (rx_chn->irq <= 0) {
   1711			dev_err(dev, "Failed to get rx dma irq %d\n",
   1712				rx_chn->irq);
   1713			ret = -ENXIO;
   1714			goto err;
   1715		}
   1716	}
   1717
   1718err:
   1719	i = devm_add_action(dev, am65_cpsw_nuss_free_rx_chns, common);
   1720	if (i) {
   1721		dev_err(dev, "Failed to add free_rx_chns action %d\n", i);
   1722		return i;
   1723	}
   1724
   1725	return ret;
   1726}
   1727
   1728static int am65_cpsw_nuss_init_host_p(struct am65_cpsw_common *common)
   1729{
   1730	struct am65_cpsw_host *host_p = am65_common_get_host(common);
   1731
   1732	host_p->common = common;
   1733	host_p->port_base = common->cpsw_base + AM65_CPSW_NU_PORTS_BASE;
   1734	host_p->stat_base = common->cpsw_base + AM65_CPSW_NU_STATS_BASE;
   1735
   1736	return 0;
   1737}
   1738
   1739static int am65_cpsw_am654_get_efuse_macid(struct device_node *of_node,
   1740					   int slave, u8 *mac_addr)
   1741{
   1742	u32 mac_lo, mac_hi, offset;
   1743	struct regmap *syscon;
   1744	int ret;
   1745
   1746	syscon = syscon_regmap_lookup_by_phandle(of_node, "ti,syscon-efuse");
   1747	if (IS_ERR(syscon)) {
   1748		if (PTR_ERR(syscon) == -ENODEV)
   1749			return 0;
   1750		return PTR_ERR(syscon);
   1751	}
   1752
   1753	ret = of_property_read_u32_index(of_node, "ti,syscon-efuse", 1,
   1754					 &offset);
   1755	if (ret)
   1756		return ret;
   1757
   1758	regmap_read(syscon, offset, &mac_lo);
   1759	regmap_read(syscon, offset + 4, &mac_hi);
   1760
   1761	mac_addr[0] = (mac_hi >> 8) & 0xff;
   1762	mac_addr[1] = mac_hi & 0xff;
   1763	mac_addr[2] = (mac_lo >> 24) & 0xff;
   1764	mac_addr[3] = (mac_lo >> 16) & 0xff;
   1765	mac_addr[4] = (mac_lo >> 8) & 0xff;
   1766	mac_addr[5] = mac_lo & 0xff;
   1767
   1768	return 0;
   1769}
   1770
   1771static int am65_cpsw_init_cpts(struct am65_cpsw_common *common)
   1772{
   1773	struct device *dev = common->dev;
   1774	struct device_node *node;
   1775	struct am65_cpts *cpts;
   1776	void __iomem *reg_base;
   1777
   1778	if (!IS_ENABLED(CONFIG_TI_K3_AM65_CPTS))
   1779		return 0;
   1780
   1781	node = of_get_child_by_name(dev->of_node, "cpts");
   1782	if (!node) {
   1783		dev_err(dev, "%s cpts not found\n", __func__);
   1784		return -ENOENT;
   1785	}
   1786
   1787	reg_base = common->cpsw_base + AM65_CPSW_NU_CPTS_BASE;
   1788	cpts = am65_cpts_create(dev, reg_base, node);
   1789	if (IS_ERR(cpts)) {
   1790		int ret = PTR_ERR(cpts);
   1791
   1792		of_node_put(node);
   1793		if (ret == -EOPNOTSUPP) {
   1794			dev_info(dev, "cpts disabled\n");
   1795			return 0;
   1796		}
   1797
   1798		dev_err(dev, "cpts create err %d\n", ret);
   1799		return ret;
   1800	}
   1801	common->cpts = cpts;
   1802	/* Forbid PM runtime if CPTS is running.
   1803	 * K3 CPSWxG modules may completely lose context during ON->OFF
   1804	 * transitions depending on integration.
   1805	 * AM65x/J721E MCU CPSW2G: false
   1806	 * J721E MAIN_CPSW9G: true
   1807	 */
   1808	pm_runtime_forbid(dev);
   1809
   1810	return 0;
   1811}
   1812
   1813static int am65_cpsw_nuss_init_slave_ports(struct am65_cpsw_common *common)
   1814{
   1815	struct device_node *node, *port_np;
   1816	struct device *dev = common->dev;
   1817	int ret;
   1818
   1819	node = of_get_child_by_name(dev->of_node, "ethernet-ports");
   1820	if (!node)
   1821		return -ENOENT;
   1822
   1823	for_each_child_of_node(node, port_np) {
   1824		struct am65_cpsw_port *port;
   1825		u32 port_id;
   1826
   1827		/* it is not a slave port node, continue */
   1828		if (strcmp(port_np->name, "port"))
   1829			continue;
   1830
   1831		ret = of_property_read_u32(port_np, "reg", &port_id);
   1832		if (ret < 0) {
   1833			dev_err(dev, "%pOF error reading port_id %d\n",
   1834				port_np, ret);
   1835			goto of_node_put;
   1836		}
   1837
   1838		if (!port_id || port_id > common->port_num) {
   1839			dev_err(dev, "%pOF has invalid port_id %u %s\n",
   1840				port_np, port_id, port_np->name);
   1841			ret = -EINVAL;
   1842			goto of_node_put;
   1843		}
   1844
   1845		port = am65_common_get_port(common, port_id);
   1846		port->port_id = port_id;
   1847		port->common = common;
   1848		port->port_base = common->cpsw_base + AM65_CPSW_NU_PORTS_BASE +
   1849				  AM65_CPSW_NU_PORTS_OFFSET * (port_id);
   1850		port->stat_base = common->cpsw_base + AM65_CPSW_NU_STATS_BASE +
   1851				  (AM65_CPSW_NU_STATS_PORT_OFFSET * port_id);
   1852		port->name = of_get_property(port_np, "label", NULL);
   1853		port->fetch_ram_base =
   1854				common->cpsw_base + AM65_CPSW_NU_FRAM_BASE +
   1855				(AM65_CPSW_NU_FRAM_PORT_OFFSET * (port_id - 1));
   1856
   1857		port->slave.mac_sl = cpsw_sl_get("am65", dev, port->port_base);
   1858		if (IS_ERR(port->slave.mac_sl)) {
   1859			ret = PTR_ERR(port->slave.mac_sl);
   1860			goto of_node_put;
   1861		}
   1862
   1863		port->disabled = !of_device_is_available(port_np);
   1864		if (port->disabled) {
   1865			common->disabled_ports_mask |= BIT(port->port_id);
   1866			continue;
   1867		}
   1868
   1869		port->slave.ifphy = devm_of_phy_get(dev, port_np, NULL);
   1870		if (IS_ERR(port->slave.ifphy)) {
   1871			ret = PTR_ERR(port->slave.ifphy);
   1872			dev_err(dev, "%pOF error retrieving port phy: %d\n",
   1873				port_np, ret);
   1874			goto of_node_put;
   1875		}
   1876
   1877		port->slave.mac_only =
   1878				of_property_read_bool(port_np, "ti,mac-only");
   1879
   1880		/* get phy/link info */
   1881		port->slave.phy_node = port_np;
   1882		ret = of_get_phy_mode(port_np, &port->slave.phy_if);
   1883		if (ret) {
   1884			dev_err(dev, "%pOF read phy-mode err %d\n",
   1885				port_np, ret);
   1886			goto of_node_put;
   1887		}
   1888
   1889		ret = of_get_mac_address(port_np, port->slave.mac_addr);
   1890		if (ret) {
   1891			am65_cpsw_am654_get_efuse_macid(port_np,
   1892							port->port_id,
   1893							port->slave.mac_addr);
   1894			if (!is_valid_ether_addr(port->slave.mac_addr)) {
   1895				eth_random_addr(port->slave.mac_addr);
   1896				dev_err(dev, "Use random MAC address\n");
   1897			}
   1898		}
   1899	}
   1900	of_node_put(node);
   1901
   1902	/* is there at least one ext.port */
   1903	if (!(~common->disabled_ports_mask & GENMASK(common->port_num, 1))) {
   1904		dev_err(dev, "No Ext. port are available\n");
   1905		return -ENODEV;
   1906	}
   1907
   1908	return 0;
   1909
   1910of_node_put:
   1911	of_node_put(port_np);
   1912	of_node_put(node);
   1913	return ret;
   1914}
   1915
   1916static void am65_cpsw_pcpu_stats_free(void *data)
   1917{
   1918	struct am65_cpsw_ndev_stats __percpu *stats = data;
   1919
   1920	free_percpu(stats);
   1921}
   1922
   1923static void am65_cpsw_nuss_phylink_cleanup(struct am65_cpsw_common *common)
   1924{
   1925	struct am65_cpsw_port *port;
   1926	int i;
   1927
   1928	for (i = 0; i < common->port_num; i++) {
   1929		port = &common->ports[i];
   1930		if (port->slave.phylink)
   1931			phylink_destroy(port->slave.phylink);
   1932	}
   1933}
   1934
   1935static int
   1936am65_cpsw_nuss_init_port_ndev(struct am65_cpsw_common *common, u32 port_idx)
   1937{
   1938	struct am65_cpsw_ndev_priv *ndev_priv;
   1939	struct device *dev = common->dev;
   1940	struct am65_cpsw_port *port;
   1941	struct phylink *phylink;
   1942	int ret;
   1943
   1944	port = &common->ports[port_idx];
   1945
   1946	if (port->disabled)
   1947		return 0;
   1948
   1949	/* alloc netdev */
   1950	port->ndev = devm_alloc_etherdev_mqs(common->dev,
   1951					     sizeof(struct am65_cpsw_ndev_priv),
   1952					     AM65_CPSW_MAX_TX_QUEUES,
   1953					     AM65_CPSW_MAX_RX_QUEUES);
   1954	if (!port->ndev) {
   1955		dev_err(dev, "error allocating slave net_device %u\n",
   1956			port->port_id);
   1957		return -ENOMEM;
   1958	}
   1959
   1960	ndev_priv = netdev_priv(port->ndev);
   1961	ndev_priv->port = port;
   1962	ndev_priv->msg_enable = AM65_CPSW_DEBUG;
   1963	SET_NETDEV_DEV(port->ndev, dev);
   1964
   1965	eth_hw_addr_set(port->ndev, port->slave.mac_addr);
   1966
   1967	port->ndev->min_mtu = AM65_CPSW_MIN_PACKET_SIZE;
   1968	port->ndev->max_mtu = AM65_CPSW_MAX_PACKET_SIZE;
   1969	port->ndev->hw_features = NETIF_F_SG |
   1970				  NETIF_F_RXCSUM |
   1971				  NETIF_F_HW_CSUM |
   1972				  NETIF_F_HW_TC;
   1973	port->ndev->features = port->ndev->hw_features |
   1974			       NETIF_F_HW_VLAN_CTAG_FILTER;
   1975	port->ndev->vlan_features |=  NETIF_F_SG;
   1976	port->ndev->netdev_ops = &am65_cpsw_nuss_netdev_ops;
   1977	port->ndev->ethtool_ops = &am65_cpsw_ethtool_ops_slave;
   1978
   1979	/* Configuring Phylink */
   1980	port->slave.phylink_config.dev = &port->ndev->dev;
   1981	port->slave.phylink_config.type = PHYLINK_NETDEV;
   1982	port->slave.phylink_config.mac_capabilities = MAC_SYM_PAUSE | MAC_10 | MAC_100 | MAC_1000FD;
   1983
   1984	phy_interface_set_rgmii(port->slave.phylink_config.supported_interfaces);
   1985
   1986	phylink = phylink_create(&port->slave.phylink_config,
   1987				 of_node_to_fwnode(port->slave.phy_node),
   1988				 port->slave.phy_if,
   1989				 &am65_cpsw_phylink_mac_ops);
   1990	if (IS_ERR(phylink))
   1991		return PTR_ERR(phylink);
   1992
   1993	port->slave.phylink = phylink;
   1994
   1995	/* Disable TX checksum offload by default due to HW bug */
   1996	if (common->pdata.quirks & AM65_CPSW_QUIRK_I2027_NO_TX_CSUM)
   1997		port->ndev->features &= ~NETIF_F_HW_CSUM;
   1998
   1999	ndev_priv->stats = netdev_alloc_pcpu_stats(struct am65_cpsw_ndev_stats);
   2000	if (!ndev_priv->stats)
   2001		return -ENOMEM;
   2002
   2003	ret = devm_add_action_or_reset(dev, am65_cpsw_pcpu_stats_free,
   2004				       ndev_priv->stats);
   2005	if (ret)
   2006		dev_err(dev, "failed to add percpu stat free action %d\n", ret);
   2007
   2008	if (!common->dma_ndev)
   2009		common->dma_ndev = port->ndev;
   2010
   2011	return ret;
   2012}
   2013
   2014static int am65_cpsw_nuss_init_ndevs(struct am65_cpsw_common *common)
   2015{
   2016	int ret;
   2017	int i;
   2018
   2019	for (i = 0; i < common->port_num; i++) {
   2020		ret = am65_cpsw_nuss_init_port_ndev(common, i);
   2021		if (ret)
   2022			return ret;
   2023	}
   2024
   2025	netif_napi_add(common->dma_ndev, &common->napi_rx,
   2026		       am65_cpsw_nuss_rx_poll, NAPI_POLL_WEIGHT);
   2027
   2028	return ret;
   2029}
   2030
   2031static int am65_cpsw_nuss_ndev_add_tx_napi(struct am65_cpsw_common *common)
   2032{
   2033	struct device *dev = common->dev;
   2034	int i, ret = 0;
   2035
   2036	for (i = 0; i < common->tx_ch_num; i++) {
   2037		struct am65_cpsw_tx_chn *tx_chn = &common->tx_chns[i];
   2038
   2039		netif_napi_add_tx(common->dma_ndev, &tx_chn->napi_tx,
   2040				  am65_cpsw_nuss_tx_poll);
   2041
   2042		ret = devm_request_irq(dev, tx_chn->irq,
   2043				       am65_cpsw_nuss_tx_irq,
   2044				       IRQF_TRIGGER_HIGH,
   2045				       tx_chn->tx_chn_name, tx_chn);
   2046		if (ret) {
   2047			dev_err(dev, "failure requesting tx%u irq %u, %d\n",
   2048				tx_chn->id, tx_chn->irq, ret);
   2049			goto err;
   2050		}
   2051	}
   2052
   2053err:
   2054	return ret;
   2055}
   2056
   2057static void am65_cpsw_nuss_cleanup_ndev(struct am65_cpsw_common *common)
   2058{
   2059	struct am65_cpsw_port *port;
   2060	int i;
   2061
   2062	for (i = 0; i < common->port_num; i++) {
   2063		port = &common->ports[i];
   2064		if (port->ndev)
   2065			unregister_netdev(port->ndev);
   2066	}
   2067}
   2068
   2069static void am65_cpsw_port_offload_fwd_mark_update(struct am65_cpsw_common *common)
   2070{
   2071	int set_val = 0;
   2072	int i;
   2073
   2074	if (common->br_members == (GENMASK(common->port_num, 1) & ~common->disabled_ports_mask))
   2075		set_val = 1;
   2076
   2077	dev_dbg(common->dev, "set offload_fwd_mark %d\n", set_val);
   2078
   2079	for (i = 1; i <= common->port_num; i++) {
   2080		struct am65_cpsw_port *port = am65_common_get_port(common, i);
   2081		struct am65_cpsw_ndev_priv *priv;
   2082
   2083		if (!port->ndev)
   2084			continue;
   2085
   2086		priv = am65_ndev_to_priv(port->ndev);
   2087		priv->offload_fwd_mark = set_val;
   2088	}
   2089}
   2090
   2091bool am65_cpsw_port_dev_check(const struct net_device *ndev)
   2092{
   2093	if (ndev->netdev_ops == &am65_cpsw_nuss_netdev_ops) {
   2094		struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
   2095
   2096		return !common->is_emac_mode;
   2097	}
   2098
   2099	return false;
   2100}
   2101
   2102static int am65_cpsw_netdevice_port_link(struct net_device *ndev,
   2103					 struct net_device *br_ndev,
   2104					 struct netlink_ext_ack *extack)
   2105{
   2106	struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
   2107	struct am65_cpsw_ndev_priv *priv = am65_ndev_to_priv(ndev);
   2108	int err;
   2109
   2110	if (!common->br_members) {
   2111		common->hw_bridge_dev = br_ndev;
   2112	} else {
   2113		/* This is adding the port to a second bridge, this is
   2114		 * unsupported
   2115		 */
   2116		if (common->hw_bridge_dev != br_ndev)
   2117			return -EOPNOTSUPP;
   2118	}
   2119
   2120	err = switchdev_bridge_port_offload(ndev, ndev, NULL, NULL, NULL,
   2121					    false, extack);
   2122	if (err)
   2123		return err;
   2124
   2125	common->br_members |= BIT(priv->port->port_id);
   2126
   2127	am65_cpsw_port_offload_fwd_mark_update(common);
   2128
   2129	return NOTIFY_DONE;
   2130}
   2131
   2132static void am65_cpsw_netdevice_port_unlink(struct net_device *ndev)
   2133{
   2134	struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
   2135	struct am65_cpsw_ndev_priv *priv = am65_ndev_to_priv(ndev);
   2136
   2137	switchdev_bridge_port_unoffload(ndev, NULL, NULL, NULL);
   2138
   2139	common->br_members &= ~BIT(priv->port->port_id);
   2140
   2141	am65_cpsw_port_offload_fwd_mark_update(common);
   2142
   2143	if (!common->br_members)
   2144		common->hw_bridge_dev = NULL;
   2145}
   2146
   2147/* netdev notifier */
   2148static int am65_cpsw_netdevice_event(struct notifier_block *unused,
   2149				     unsigned long event, void *ptr)
   2150{
   2151	struct netlink_ext_ack *extack = netdev_notifier_info_to_extack(ptr);
   2152	struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
   2153	struct netdev_notifier_changeupper_info *info;
   2154	int ret = NOTIFY_DONE;
   2155
   2156	if (!am65_cpsw_port_dev_check(ndev))
   2157		return NOTIFY_DONE;
   2158
   2159	switch (event) {
   2160	case NETDEV_CHANGEUPPER:
   2161		info = ptr;
   2162
   2163		if (netif_is_bridge_master(info->upper_dev)) {
   2164			if (info->linking)
   2165				ret = am65_cpsw_netdevice_port_link(ndev,
   2166								    info->upper_dev,
   2167								    extack);
   2168			else
   2169				am65_cpsw_netdevice_port_unlink(ndev);
   2170		}
   2171		break;
   2172	default:
   2173		return NOTIFY_DONE;
   2174	}
   2175
   2176	return notifier_from_errno(ret);
   2177}
   2178
   2179static int am65_cpsw_register_notifiers(struct am65_cpsw_common *cpsw)
   2180{
   2181	int ret = 0;
   2182
   2183	if (AM65_CPSW_IS_CPSW2G(cpsw) ||
   2184	    !IS_REACHABLE(CONFIG_TI_K3_AM65_CPSW_SWITCHDEV))
   2185		return 0;
   2186
   2187	cpsw->am65_cpsw_netdevice_nb.notifier_call = &am65_cpsw_netdevice_event;
   2188	ret = register_netdevice_notifier(&cpsw->am65_cpsw_netdevice_nb);
   2189	if (ret) {
   2190		dev_err(cpsw->dev, "can't register netdevice notifier\n");
   2191		return ret;
   2192	}
   2193
   2194	ret = am65_cpsw_switchdev_register_notifiers(cpsw);
   2195	if (ret)
   2196		unregister_netdevice_notifier(&cpsw->am65_cpsw_netdevice_nb);
   2197
   2198	return ret;
   2199}
   2200
   2201static void am65_cpsw_unregister_notifiers(struct am65_cpsw_common *cpsw)
   2202{
   2203	if (AM65_CPSW_IS_CPSW2G(cpsw) ||
   2204	    !IS_REACHABLE(CONFIG_TI_K3_AM65_CPSW_SWITCHDEV))
   2205		return;
   2206
   2207	am65_cpsw_switchdev_unregister_notifiers(cpsw);
   2208	unregister_netdevice_notifier(&cpsw->am65_cpsw_netdevice_nb);
   2209}
   2210
   2211static const struct devlink_ops am65_cpsw_devlink_ops = {};
   2212
   2213static void am65_cpsw_init_stp_ale_entry(struct am65_cpsw_common *cpsw)
   2214{
   2215	cpsw_ale_add_mcast(cpsw->ale, eth_stp_addr, ALE_PORT_HOST, ALE_SUPER, 0,
   2216			   ALE_MCAST_BLOCK_LEARN_FWD);
   2217}
   2218
   2219static void am65_cpsw_init_host_port_switch(struct am65_cpsw_common *common)
   2220{
   2221	struct am65_cpsw_host *host = am65_common_get_host(common);
   2222
   2223	writel(common->default_vlan, host->port_base + AM65_CPSW_PORT_VLAN_REG_OFFSET);
   2224
   2225	am65_cpsw_init_stp_ale_entry(common);
   2226
   2227	cpsw_ale_control_set(common->ale, HOST_PORT_NUM, ALE_P0_UNI_FLOOD, 1);
   2228	dev_dbg(common->dev, "Set P0_UNI_FLOOD\n");
   2229	cpsw_ale_control_set(common->ale, HOST_PORT_NUM, ALE_PORT_NOLEARN, 0);
   2230}
   2231
   2232static void am65_cpsw_init_host_port_emac(struct am65_cpsw_common *common)
   2233{
   2234	struct am65_cpsw_host *host = am65_common_get_host(common);
   2235
   2236	writel(0, host->port_base + AM65_CPSW_PORT_VLAN_REG_OFFSET);
   2237
   2238	cpsw_ale_control_set(common->ale, HOST_PORT_NUM, ALE_P0_UNI_FLOOD, 0);
   2239	dev_dbg(common->dev, "unset P0_UNI_FLOOD\n");
   2240
   2241	/* learning make no sense in multi-mac mode */
   2242	cpsw_ale_control_set(common->ale, HOST_PORT_NUM, ALE_PORT_NOLEARN, 1);
   2243}
   2244
   2245static int am65_cpsw_dl_switch_mode_get(struct devlink *dl, u32 id,
   2246					struct devlink_param_gset_ctx *ctx)
   2247{
   2248	struct am65_cpsw_devlink *dl_priv = devlink_priv(dl);
   2249	struct am65_cpsw_common *common = dl_priv->common;
   2250
   2251	dev_dbg(common->dev, "%s id:%u\n", __func__, id);
   2252
   2253	if (id != AM65_CPSW_DL_PARAM_SWITCH_MODE)
   2254		return -EOPNOTSUPP;
   2255
   2256	ctx->val.vbool = !common->is_emac_mode;
   2257
   2258	return 0;
   2259}
   2260
   2261static void am65_cpsw_init_port_emac_ale(struct  am65_cpsw_port *port)
   2262{
   2263	struct am65_cpsw_slave_data *slave = &port->slave;
   2264	struct am65_cpsw_common *common = port->common;
   2265	u32 port_mask;
   2266
   2267	writel(slave->port_vlan, port->port_base + AM65_CPSW_PORT_VLAN_REG_OFFSET);
   2268
   2269	if (slave->mac_only)
   2270		/* enable mac-only mode on port */
   2271		cpsw_ale_control_set(common->ale, port->port_id,
   2272				     ALE_PORT_MACONLY, 1);
   2273
   2274	cpsw_ale_control_set(common->ale, port->port_id, ALE_PORT_NOLEARN, 1);
   2275
   2276	port_mask = BIT(port->port_id) | ALE_PORT_HOST;
   2277
   2278	cpsw_ale_add_ucast(common->ale, port->ndev->dev_addr,
   2279			   HOST_PORT_NUM, ALE_SECURE, slave->port_vlan);
   2280	cpsw_ale_add_mcast(common->ale, port->ndev->broadcast,
   2281			   port_mask, ALE_VLAN, slave->port_vlan, ALE_MCAST_FWD_2);
   2282}
   2283
   2284static void am65_cpsw_init_port_switch_ale(struct am65_cpsw_port *port)
   2285{
   2286	struct am65_cpsw_slave_data *slave = &port->slave;
   2287	struct am65_cpsw_common *cpsw = port->common;
   2288	u32 port_mask;
   2289
   2290	cpsw_ale_control_set(cpsw->ale, port->port_id,
   2291			     ALE_PORT_NOLEARN, 0);
   2292
   2293	cpsw_ale_add_ucast(cpsw->ale, port->ndev->dev_addr,
   2294			   HOST_PORT_NUM, ALE_SECURE | ALE_BLOCKED | ALE_VLAN,
   2295			   slave->port_vlan);
   2296
   2297	port_mask = BIT(port->port_id) | ALE_PORT_HOST;
   2298
   2299	cpsw_ale_add_mcast(cpsw->ale, port->ndev->broadcast,
   2300			   port_mask, ALE_VLAN, slave->port_vlan,
   2301			   ALE_MCAST_FWD_2);
   2302
   2303	writel(slave->port_vlan, port->port_base + AM65_CPSW_PORT_VLAN_REG_OFFSET);
   2304
   2305	cpsw_ale_control_set(cpsw->ale, port->port_id,
   2306			     ALE_PORT_MACONLY, 0);
   2307}
   2308
   2309static int am65_cpsw_dl_switch_mode_set(struct devlink *dl, u32 id,
   2310					struct devlink_param_gset_ctx *ctx)
   2311{
   2312	struct am65_cpsw_devlink *dl_priv = devlink_priv(dl);
   2313	struct am65_cpsw_common *cpsw = dl_priv->common;
   2314	bool switch_en = ctx->val.vbool;
   2315	bool if_running = false;
   2316	int i;
   2317
   2318	dev_dbg(cpsw->dev, "%s id:%u\n", __func__, id);
   2319
   2320	if (id != AM65_CPSW_DL_PARAM_SWITCH_MODE)
   2321		return -EOPNOTSUPP;
   2322
   2323	if (switch_en == !cpsw->is_emac_mode)
   2324		return 0;
   2325
   2326	if (!switch_en && cpsw->br_members) {
   2327		dev_err(cpsw->dev, "Remove ports from bridge before disabling switch mode\n");
   2328		return -EINVAL;
   2329	}
   2330
   2331	rtnl_lock();
   2332
   2333	cpsw->is_emac_mode = !switch_en;
   2334
   2335	for (i = 0; i < cpsw->port_num; i++) {
   2336		struct net_device *sl_ndev = cpsw->ports[i].ndev;
   2337
   2338		if (!sl_ndev || !netif_running(sl_ndev))
   2339			continue;
   2340
   2341		if_running = true;
   2342	}
   2343
   2344	if (!if_running) {
   2345		/* all ndevs are down */
   2346		for (i = 0; i < cpsw->port_num; i++) {
   2347			struct net_device *sl_ndev = cpsw->ports[i].ndev;
   2348			struct am65_cpsw_slave_data *slave;
   2349
   2350			if (!sl_ndev)
   2351				continue;
   2352
   2353			slave = am65_ndev_to_slave(sl_ndev);
   2354			if (switch_en)
   2355				slave->port_vlan = cpsw->default_vlan;
   2356			else
   2357				slave->port_vlan = 0;
   2358		}
   2359
   2360		goto exit;
   2361	}
   2362
   2363	cpsw_ale_control_set(cpsw->ale, 0, ALE_BYPASS, 1);
   2364	/* clean up ALE table */
   2365	cpsw_ale_control_set(cpsw->ale, HOST_PORT_NUM, ALE_CLEAR, 1);
   2366	cpsw_ale_control_get(cpsw->ale, HOST_PORT_NUM, ALE_AGEOUT);
   2367
   2368	if (switch_en) {
   2369		dev_info(cpsw->dev, "Enable switch mode\n");
   2370
   2371		am65_cpsw_init_host_port_switch(cpsw);
   2372
   2373		for (i = 0; i < cpsw->port_num; i++) {
   2374			struct net_device *sl_ndev = cpsw->ports[i].ndev;
   2375			struct am65_cpsw_slave_data *slave;
   2376			struct am65_cpsw_port *port;
   2377
   2378			if (!sl_ndev)
   2379				continue;
   2380
   2381			port = am65_ndev_to_port(sl_ndev);
   2382			slave = am65_ndev_to_slave(sl_ndev);
   2383			slave->port_vlan = cpsw->default_vlan;
   2384
   2385			if (netif_running(sl_ndev))
   2386				am65_cpsw_init_port_switch_ale(port);
   2387		}
   2388
   2389	} else {
   2390		dev_info(cpsw->dev, "Disable switch mode\n");
   2391
   2392		am65_cpsw_init_host_port_emac(cpsw);
   2393
   2394		for (i = 0; i < cpsw->port_num; i++) {
   2395			struct net_device *sl_ndev = cpsw->ports[i].ndev;
   2396			struct am65_cpsw_port *port;
   2397
   2398			if (!sl_ndev)
   2399				continue;
   2400
   2401			port = am65_ndev_to_port(sl_ndev);
   2402			port->slave.port_vlan = 0;
   2403			if (netif_running(sl_ndev))
   2404				am65_cpsw_init_port_emac_ale(port);
   2405		}
   2406	}
   2407	cpsw_ale_control_set(cpsw->ale, HOST_PORT_NUM, ALE_BYPASS, 0);
   2408exit:
   2409	rtnl_unlock();
   2410
   2411	return 0;
   2412}
   2413
   2414static const struct devlink_param am65_cpsw_devlink_params[] = {
   2415	DEVLINK_PARAM_DRIVER(AM65_CPSW_DL_PARAM_SWITCH_MODE, "switch_mode",
   2416			     DEVLINK_PARAM_TYPE_BOOL,
   2417			     BIT(DEVLINK_PARAM_CMODE_RUNTIME),
   2418			     am65_cpsw_dl_switch_mode_get,
   2419			     am65_cpsw_dl_switch_mode_set, NULL),
   2420};
   2421
   2422static int am65_cpsw_nuss_register_devlink(struct am65_cpsw_common *common)
   2423{
   2424	struct devlink_port_attrs attrs = {};
   2425	struct am65_cpsw_devlink *dl_priv;
   2426	struct device *dev = common->dev;
   2427	struct devlink_port *dl_port;
   2428	struct am65_cpsw_port *port;
   2429	int ret = 0;
   2430	int i;
   2431
   2432	common->devlink =
   2433		devlink_alloc(&am65_cpsw_devlink_ops, sizeof(*dl_priv), dev);
   2434	if (!common->devlink)
   2435		return -ENOMEM;
   2436
   2437	dl_priv = devlink_priv(common->devlink);
   2438	dl_priv->common = common;
   2439
   2440	/* Provide devlink hook to switch mode when multiple external ports
   2441	 * are present NUSS switchdev driver is enabled.
   2442	 */
   2443	if (!AM65_CPSW_IS_CPSW2G(common) &&
   2444	    IS_ENABLED(CONFIG_TI_K3_AM65_CPSW_SWITCHDEV)) {
   2445		ret = devlink_params_register(common->devlink,
   2446					      am65_cpsw_devlink_params,
   2447					      ARRAY_SIZE(am65_cpsw_devlink_params));
   2448		if (ret) {
   2449			dev_err(dev, "devlink params reg fail ret:%d\n", ret);
   2450			goto dl_unreg;
   2451		}
   2452	}
   2453
   2454	for (i = 1; i <= common->port_num; i++) {
   2455		port = am65_common_get_port(common, i);
   2456		dl_port = &port->devlink_port;
   2457
   2458		attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL;
   2459		attrs.phys.port_number = port->port_id;
   2460		attrs.switch_id.id_len = sizeof(resource_size_t);
   2461		memcpy(attrs.switch_id.id, common->switch_id, attrs.switch_id.id_len);
   2462		devlink_port_attrs_set(dl_port, &attrs);
   2463
   2464		ret = devlink_port_register(common->devlink, dl_port, port->port_id);
   2465		if (ret) {
   2466			dev_err(dev, "devlink_port reg fail for port %d, ret:%d\n",
   2467				port->port_id, ret);
   2468			goto dl_port_unreg;
   2469		}
   2470		devlink_port_type_eth_set(dl_port, port->ndev);
   2471	}
   2472	devlink_register(common->devlink);
   2473	return ret;
   2474
   2475dl_port_unreg:
   2476	for (i = i - 1; i >= 1; i--) {
   2477		port = am65_common_get_port(common, i);
   2478		dl_port = &port->devlink_port;
   2479
   2480		devlink_port_unregister(dl_port);
   2481	}
   2482dl_unreg:
   2483	devlink_free(common->devlink);
   2484	return ret;
   2485}
   2486
   2487static void am65_cpsw_unregister_devlink(struct am65_cpsw_common *common)
   2488{
   2489	struct devlink_port *dl_port;
   2490	struct am65_cpsw_port *port;
   2491	int i;
   2492
   2493	devlink_unregister(common->devlink);
   2494
   2495	for (i = 1; i <= common->port_num; i++) {
   2496		port = am65_common_get_port(common, i);
   2497		dl_port = &port->devlink_port;
   2498
   2499		devlink_port_unregister(dl_port);
   2500	}
   2501
   2502	if (!AM65_CPSW_IS_CPSW2G(common) &&
   2503	    IS_ENABLED(CONFIG_TI_K3_AM65_CPSW_SWITCHDEV))
   2504		devlink_params_unregister(common->devlink,
   2505					  am65_cpsw_devlink_params,
   2506					  ARRAY_SIZE(am65_cpsw_devlink_params));
   2507
   2508	devlink_free(common->devlink);
   2509}
   2510
   2511static int am65_cpsw_nuss_register_ndevs(struct am65_cpsw_common *common)
   2512{
   2513	struct device *dev = common->dev;
   2514	struct am65_cpsw_port *port;
   2515	int ret = 0, i;
   2516
   2517	ret = am65_cpsw_nuss_ndev_add_tx_napi(common);
   2518	if (ret)
   2519		return ret;
   2520
   2521	ret = devm_request_irq(dev, common->rx_chns.irq,
   2522			       am65_cpsw_nuss_rx_irq,
   2523			       IRQF_TRIGGER_HIGH, dev_name(dev), common);
   2524	if (ret) {
   2525		dev_err(dev, "failure requesting rx irq %u, %d\n",
   2526			common->rx_chns.irq, ret);
   2527		return ret;
   2528	}
   2529
   2530	for (i = 0; i < common->port_num; i++) {
   2531		port = &common->ports[i];
   2532
   2533		if (!port->ndev)
   2534			continue;
   2535
   2536		ret = register_netdev(port->ndev);
   2537		if (ret) {
   2538			dev_err(dev, "error registering slave net device%i %d\n",
   2539				i, ret);
   2540			goto err_cleanup_ndev;
   2541		}
   2542	}
   2543
   2544	ret = am65_cpsw_register_notifiers(common);
   2545	if (ret)
   2546		goto err_cleanup_ndev;
   2547
   2548	ret = am65_cpsw_nuss_register_devlink(common);
   2549	if (ret)
   2550		goto clean_unregister_notifiers;
   2551
   2552	/* can't auto unregister ndev using devm_add_action() due to
   2553	 * devres release sequence in DD core for DMA
   2554	 */
   2555
   2556	return 0;
   2557clean_unregister_notifiers:
   2558	am65_cpsw_unregister_notifiers(common);
   2559err_cleanup_ndev:
   2560	am65_cpsw_nuss_cleanup_ndev(common);
   2561
   2562	return ret;
   2563}
   2564
   2565int am65_cpsw_nuss_update_tx_chns(struct am65_cpsw_common *common, int num_tx)
   2566{
   2567	int ret;
   2568
   2569	common->tx_ch_num = num_tx;
   2570	ret = am65_cpsw_nuss_init_tx_chns(common);
   2571	if (ret)
   2572		return ret;
   2573
   2574	return am65_cpsw_nuss_ndev_add_tx_napi(common);
   2575}
   2576
   2577struct am65_cpsw_soc_pdata {
   2578	u32	quirks_dis;
   2579};
   2580
   2581static const struct am65_cpsw_soc_pdata am65x_soc_sr2_0 = {
   2582	.quirks_dis = AM65_CPSW_QUIRK_I2027_NO_TX_CSUM,
   2583};
   2584
   2585static const struct soc_device_attribute am65_cpsw_socinfo[] = {
   2586	{ .family = "AM65X",
   2587	  .revision = "SR2.0",
   2588	  .data = &am65x_soc_sr2_0
   2589	},
   2590	{/* sentinel */}
   2591};
   2592
   2593static const struct am65_cpsw_pdata am65x_sr1_0 = {
   2594	.quirks = AM65_CPSW_QUIRK_I2027_NO_TX_CSUM,
   2595	.ale_dev_id = "am65x-cpsw2g",
   2596	.fdqring_mode = K3_RINGACC_RING_MODE_MESSAGE,
   2597};
   2598
   2599static const struct am65_cpsw_pdata j721e_pdata = {
   2600	.quirks = 0,
   2601	.ale_dev_id = "am65x-cpsw2g",
   2602	.fdqring_mode = K3_RINGACC_RING_MODE_MESSAGE,
   2603};
   2604
   2605static const struct am65_cpsw_pdata am64x_cpswxg_pdata = {
   2606	.quirks = 0,
   2607	.ale_dev_id = "am64-cpswxg",
   2608	.fdqring_mode = K3_RINGACC_RING_MODE_RING,
   2609};
   2610
   2611static const struct of_device_id am65_cpsw_nuss_of_mtable[] = {
   2612	{ .compatible = "ti,am654-cpsw-nuss", .data = &am65x_sr1_0},
   2613	{ .compatible = "ti,j721e-cpsw-nuss", .data = &j721e_pdata},
   2614	{ .compatible = "ti,am642-cpsw-nuss", .data = &am64x_cpswxg_pdata},
   2615	{ /* sentinel */ },
   2616};
   2617MODULE_DEVICE_TABLE(of, am65_cpsw_nuss_of_mtable);
   2618
   2619static void am65_cpsw_nuss_apply_socinfo(struct am65_cpsw_common *common)
   2620{
   2621	const struct soc_device_attribute *soc;
   2622
   2623	soc = soc_device_match(am65_cpsw_socinfo);
   2624	if (soc && soc->data) {
   2625		const struct am65_cpsw_soc_pdata *socdata = soc->data;
   2626
   2627		/* disable quirks */
   2628		common->pdata.quirks &= ~socdata->quirks_dis;
   2629	}
   2630}
   2631
   2632static int am65_cpsw_nuss_probe(struct platform_device *pdev)
   2633{
   2634	struct cpsw_ale_params ale_params = { 0 };
   2635	const struct of_device_id *of_id;
   2636	struct device *dev = &pdev->dev;
   2637	struct am65_cpsw_common *common;
   2638	struct device_node *node;
   2639	struct resource *res;
   2640	struct clk *clk;
   2641	u64 id_temp;
   2642	int ret, i;
   2643
   2644	common = devm_kzalloc(dev, sizeof(struct am65_cpsw_common), GFP_KERNEL);
   2645	if (!common)
   2646		return -ENOMEM;
   2647	common->dev = dev;
   2648
   2649	of_id = of_match_device(am65_cpsw_nuss_of_mtable, dev);
   2650	if (!of_id)
   2651		return -EINVAL;
   2652	common->pdata = *(const struct am65_cpsw_pdata *)of_id->data;
   2653
   2654	am65_cpsw_nuss_apply_socinfo(common);
   2655
   2656	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cpsw_nuss");
   2657	common->ss_base = devm_ioremap_resource(&pdev->dev, res);
   2658	if (IS_ERR(common->ss_base))
   2659		return PTR_ERR(common->ss_base);
   2660	common->cpsw_base = common->ss_base + AM65_CPSW_CPSW_NU_BASE;
   2661	/* Use device's physical base address as switch id */
   2662	id_temp = cpu_to_be64(res->start);
   2663	memcpy(common->switch_id, &id_temp, sizeof(res->start));
   2664
   2665	node = of_get_child_by_name(dev->of_node, "ethernet-ports");
   2666	if (!node)
   2667		return -ENOENT;
   2668	common->port_num = of_get_child_count(node);
   2669	of_node_put(node);
   2670	if (common->port_num < 1 || common->port_num > AM65_CPSW_MAX_PORTS)
   2671		return -ENOENT;
   2672
   2673	common->rx_flow_id_base = -1;
   2674	init_completion(&common->tdown_complete);
   2675	common->tx_ch_num = 1;
   2676	common->pf_p0_rx_ptype_rrobin = false;
   2677	common->default_vlan = 1;
   2678
   2679	common->ports = devm_kcalloc(dev, common->port_num,
   2680				     sizeof(*common->ports),
   2681				     GFP_KERNEL);
   2682	if (!common->ports)
   2683		return -ENOMEM;
   2684
   2685	clk = devm_clk_get(dev, "fck");
   2686	if (IS_ERR(clk))
   2687		return dev_err_probe(dev, PTR_ERR(clk), "getting fck clock\n");
   2688	common->bus_freq = clk_get_rate(clk);
   2689
   2690	pm_runtime_enable(dev);
   2691	ret = pm_runtime_resume_and_get(dev);
   2692	if (ret < 0) {
   2693		pm_runtime_disable(dev);
   2694		return ret;
   2695	}
   2696
   2697	node = of_get_child_by_name(dev->of_node, "mdio");
   2698	if (!node) {
   2699		dev_warn(dev, "MDIO node not found\n");
   2700	} else if (of_device_is_available(node)) {
   2701		struct platform_device *mdio_pdev;
   2702
   2703		mdio_pdev = of_platform_device_create(node, NULL, dev);
   2704		if (!mdio_pdev) {
   2705			ret = -ENODEV;
   2706			goto err_pm_clear;
   2707		}
   2708
   2709		common->mdio_dev =  &mdio_pdev->dev;
   2710	}
   2711	of_node_put(node);
   2712
   2713	am65_cpsw_nuss_get_ver(common);
   2714
   2715	/* init tx channels */
   2716	ret = am65_cpsw_nuss_init_tx_chns(common);
   2717	if (ret)
   2718		goto err_of_clear;
   2719	ret = am65_cpsw_nuss_init_rx_chns(common);
   2720	if (ret)
   2721		goto err_of_clear;
   2722
   2723	ret = am65_cpsw_nuss_init_host_p(common);
   2724	if (ret)
   2725		goto err_of_clear;
   2726
   2727	ret = am65_cpsw_nuss_init_slave_ports(common);
   2728	if (ret)
   2729		goto err_of_clear;
   2730
   2731	/* init common data */
   2732	ale_params.dev = dev;
   2733	ale_params.ale_ageout = AM65_CPSW_ALE_AGEOUT_DEFAULT;
   2734	ale_params.ale_ports = common->port_num + 1;
   2735	ale_params.ale_regs = common->cpsw_base + AM65_CPSW_NU_ALE_BASE;
   2736	ale_params.dev_id = common->pdata.ale_dev_id;
   2737	ale_params.bus_freq = common->bus_freq;
   2738
   2739	common->ale = cpsw_ale_create(&ale_params);
   2740	if (IS_ERR(common->ale)) {
   2741		dev_err(dev, "error initializing ale engine\n");
   2742		ret = PTR_ERR(common->ale);
   2743		goto err_of_clear;
   2744	}
   2745
   2746	ret = am65_cpsw_init_cpts(common);
   2747	if (ret)
   2748		goto err_of_clear;
   2749
   2750	/* init ports */
   2751	for (i = 0; i < common->port_num; i++)
   2752		am65_cpsw_nuss_slave_disable_unused(&common->ports[i]);
   2753
   2754	dev_set_drvdata(dev, common);
   2755
   2756	common->is_emac_mode = true;
   2757
   2758	ret = am65_cpsw_nuss_init_ndevs(common);
   2759	if (ret)
   2760		goto err_free_phylink;
   2761
   2762	ret = am65_cpsw_nuss_register_ndevs(common);
   2763	if (ret)
   2764		goto err_free_phylink;
   2765
   2766	pm_runtime_put(dev);
   2767	return 0;
   2768
   2769err_free_phylink:
   2770	am65_cpsw_nuss_phylink_cleanup(common);
   2771err_of_clear:
   2772	of_platform_device_destroy(common->mdio_dev, NULL);
   2773err_pm_clear:
   2774	pm_runtime_put_sync(dev);
   2775	pm_runtime_disable(dev);
   2776	return ret;
   2777}
   2778
   2779static int am65_cpsw_nuss_remove(struct platform_device *pdev)
   2780{
   2781	struct device *dev = &pdev->dev;
   2782	struct am65_cpsw_common *common;
   2783	int ret;
   2784
   2785	common = dev_get_drvdata(dev);
   2786
   2787	ret = pm_runtime_resume_and_get(&pdev->dev);
   2788	if (ret < 0)
   2789		return ret;
   2790
   2791	am65_cpsw_nuss_phylink_cleanup(common);
   2792	am65_cpsw_unregister_devlink(common);
   2793	am65_cpsw_unregister_notifiers(common);
   2794
   2795	/* must unregister ndevs here because DD release_driver routine calls
   2796	 * dma_deconfigure(dev) before devres_release_all(dev)
   2797	 */
   2798	am65_cpsw_nuss_cleanup_ndev(common);
   2799
   2800	of_platform_device_destroy(common->mdio_dev, NULL);
   2801
   2802	pm_runtime_put_sync(&pdev->dev);
   2803	pm_runtime_disable(&pdev->dev);
   2804	return 0;
   2805}
   2806
   2807static struct platform_driver am65_cpsw_nuss_driver = {
   2808	.driver = {
   2809		.name	 = AM65_CPSW_DRV_NAME,
   2810		.of_match_table = am65_cpsw_nuss_of_mtable,
   2811	},
   2812	.probe = am65_cpsw_nuss_probe,
   2813	.remove = am65_cpsw_nuss_remove,
   2814};
   2815
   2816module_platform_driver(am65_cpsw_nuss_driver);
   2817
   2818MODULE_LICENSE("GPL v2");
   2819MODULE_AUTHOR("Grygorii Strashko <grygorii.strashko@ti.com>");
   2820MODULE_DESCRIPTION("TI AM65 CPSW Ethernet driver");