cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

lantiq_gswip.c (65861B)


      1// SPDX-License-Identifier: GPL-2.0
      2/*
      3 * Lantiq / Intel GSWIP switch driver for VRX200, xRX300 and xRX330 SoCs
      4 *
      5 * Copyright (C) 2010 Lantiq Deutschland
      6 * Copyright (C) 2012 John Crispin <john@phrozen.org>
      7 * Copyright (C) 2017 - 2019 Hauke Mehrtens <hauke@hauke-m.de>
      8 *
      9 * The VLAN and bridge model the GSWIP hardware uses does not directly
     10 * matches the model DSA uses.
     11 *
     12 * The hardware has 64 possible table entries for bridges with one VLAN
     13 * ID, one flow id and a list of ports for each bridge. All entries which
     14 * match the same flow ID are combined in the mac learning table, they
     15 * act as one global bridge.
     16 * The hardware does not support VLAN filter on the port, but on the
     17 * bridge, this driver converts the DSA model to the hardware.
     18 *
     19 * The CPU gets all the exception frames which do not match any forwarding
     20 * rule and the CPU port is also added to all bridges. This makes it possible
     21 * to handle all the special cases easily in software.
     22 * At the initialization the driver allocates one bridge table entry for
     23 * each switch port which is used when the port is used without an
     24 * explicit bridge. This prevents the frames from being forwarded
     25 * between all LAN ports by default.
     26 */
     27
     28#include <linux/clk.h>
     29#include <linux/delay.h>
     30#include <linux/etherdevice.h>
     31#include <linux/firmware.h>
     32#include <linux/if_bridge.h>
     33#include <linux/if_vlan.h>
     34#include <linux/iopoll.h>
     35#include <linux/mfd/syscon.h>
     36#include <linux/module.h>
     37#include <linux/of_mdio.h>
     38#include <linux/of_net.h>
     39#include <linux/of_platform.h>
     40#include <linux/phy.h>
     41#include <linux/phylink.h>
     42#include <linux/platform_device.h>
     43#include <linux/regmap.h>
     44#include <linux/reset.h>
     45#include <net/dsa.h>
     46#include <dt-bindings/mips/lantiq_rcu_gphy.h>
     47
     48#include "lantiq_pce.h"
     49
     50/* GSWIP MDIO Registers */
     51#define GSWIP_MDIO_GLOB			0x00
     52#define  GSWIP_MDIO_GLOB_ENABLE		BIT(15)
     53#define GSWIP_MDIO_CTRL			0x08
     54#define  GSWIP_MDIO_CTRL_BUSY		BIT(12)
     55#define  GSWIP_MDIO_CTRL_RD		BIT(11)
     56#define  GSWIP_MDIO_CTRL_WR		BIT(10)
     57#define  GSWIP_MDIO_CTRL_PHYAD_MASK	0x1f
     58#define  GSWIP_MDIO_CTRL_PHYAD_SHIFT	5
     59#define  GSWIP_MDIO_CTRL_REGAD_MASK	0x1f
     60#define GSWIP_MDIO_READ			0x09
     61#define GSWIP_MDIO_WRITE		0x0A
     62#define GSWIP_MDIO_MDC_CFG0		0x0B
     63#define GSWIP_MDIO_MDC_CFG1		0x0C
     64#define GSWIP_MDIO_PHYp(p)		(0x15 - (p))
     65#define  GSWIP_MDIO_PHY_LINK_MASK	0x6000
     66#define  GSWIP_MDIO_PHY_LINK_AUTO	0x0000
     67#define  GSWIP_MDIO_PHY_LINK_DOWN	0x4000
     68#define  GSWIP_MDIO_PHY_LINK_UP		0x2000
     69#define  GSWIP_MDIO_PHY_SPEED_MASK	0x1800
     70#define  GSWIP_MDIO_PHY_SPEED_AUTO	0x1800
     71#define  GSWIP_MDIO_PHY_SPEED_M10	0x0000
     72#define  GSWIP_MDIO_PHY_SPEED_M100	0x0800
     73#define  GSWIP_MDIO_PHY_SPEED_G1	0x1000
     74#define  GSWIP_MDIO_PHY_FDUP_MASK	0x0600
     75#define  GSWIP_MDIO_PHY_FDUP_AUTO	0x0000
     76#define  GSWIP_MDIO_PHY_FDUP_EN		0x0200
     77#define  GSWIP_MDIO_PHY_FDUP_DIS	0x0600
     78#define  GSWIP_MDIO_PHY_FCONTX_MASK	0x0180
     79#define  GSWIP_MDIO_PHY_FCONTX_AUTO	0x0000
     80#define  GSWIP_MDIO_PHY_FCONTX_EN	0x0100
     81#define  GSWIP_MDIO_PHY_FCONTX_DIS	0x0180
     82#define  GSWIP_MDIO_PHY_FCONRX_MASK	0x0060
     83#define  GSWIP_MDIO_PHY_FCONRX_AUTO	0x0000
     84#define  GSWIP_MDIO_PHY_FCONRX_EN	0x0020
     85#define  GSWIP_MDIO_PHY_FCONRX_DIS	0x0060
     86#define  GSWIP_MDIO_PHY_ADDR_MASK	0x001f
     87#define  GSWIP_MDIO_PHY_MASK		(GSWIP_MDIO_PHY_ADDR_MASK | \
     88					 GSWIP_MDIO_PHY_FCONRX_MASK | \
     89					 GSWIP_MDIO_PHY_FCONTX_MASK | \
     90					 GSWIP_MDIO_PHY_LINK_MASK | \
     91					 GSWIP_MDIO_PHY_SPEED_MASK | \
     92					 GSWIP_MDIO_PHY_FDUP_MASK)
     93
     94/* GSWIP MII Registers */
     95#define GSWIP_MII_CFGp(p)		(0x2 * (p))
     96#define  GSWIP_MII_CFG_RESET		BIT(15)
     97#define  GSWIP_MII_CFG_EN		BIT(14)
     98#define  GSWIP_MII_CFG_ISOLATE		BIT(13)
     99#define  GSWIP_MII_CFG_LDCLKDIS		BIT(12)
    100#define  GSWIP_MII_CFG_RGMII_IBS	BIT(8)
    101#define  GSWIP_MII_CFG_RMII_CLK		BIT(7)
    102#define  GSWIP_MII_CFG_MODE_MIIP	0x0
    103#define  GSWIP_MII_CFG_MODE_MIIM	0x1
    104#define  GSWIP_MII_CFG_MODE_RMIIP	0x2
    105#define  GSWIP_MII_CFG_MODE_RMIIM	0x3
    106#define  GSWIP_MII_CFG_MODE_RGMII	0x4
    107#define  GSWIP_MII_CFG_MODE_GMII	0x9
    108#define  GSWIP_MII_CFG_MODE_MASK	0xf
    109#define  GSWIP_MII_CFG_RATE_M2P5	0x00
    110#define  GSWIP_MII_CFG_RATE_M25	0x10
    111#define  GSWIP_MII_CFG_RATE_M125	0x20
    112#define  GSWIP_MII_CFG_RATE_M50	0x30
    113#define  GSWIP_MII_CFG_RATE_AUTO	0x40
    114#define  GSWIP_MII_CFG_RATE_MASK	0x70
    115#define GSWIP_MII_PCDU0			0x01
    116#define GSWIP_MII_PCDU1			0x03
    117#define GSWIP_MII_PCDU5			0x05
    118#define  GSWIP_MII_PCDU_TXDLY_MASK	GENMASK(2, 0)
    119#define  GSWIP_MII_PCDU_RXDLY_MASK	GENMASK(9, 7)
    120
    121/* GSWIP Core Registers */
    122#define GSWIP_SWRES			0x000
    123#define  GSWIP_SWRES_R1			BIT(1)	/* GSWIP Software reset */
    124#define  GSWIP_SWRES_R0			BIT(0)	/* GSWIP Hardware reset */
    125#define GSWIP_VERSION			0x013
    126#define  GSWIP_VERSION_REV_SHIFT	0
    127#define  GSWIP_VERSION_REV_MASK		GENMASK(7, 0)
    128#define  GSWIP_VERSION_MOD_SHIFT	8
    129#define  GSWIP_VERSION_MOD_MASK		GENMASK(15, 8)
    130#define   GSWIP_VERSION_2_0		0x100
    131#define   GSWIP_VERSION_2_1		0x021
    132#define   GSWIP_VERSION_2_2		0x122
    133#define   GSWIP_VERSION_2_2_ETC		0x022
    134
    135#define GSWIP_BM_RAM_VAL(x)		(0x043 - (x))
    136#define GSWIP_BM_RAM_ADDR		0x044
    137#define GSWIP_BM_RAM_CTRL		0x045
    138#define  GSWIP_BM_RAM_CTRL_BAS		BIT(15)
    139#define  GSWIP_BM_RAM_CTRL_OPMOD	BIT(5)
    140#define  GSWIP_BM_RAM_CTRL_ADDR_MASK	GENMASK(4, 0)
    141#define GSWIP_BM_QUEUE_GCTRL		0x04A
    142#define  GSWIP_BM_QUEUE_GCTRL_GL_MOD	BIT(10)
    143/* buffer management Port Configuration Register */
    144#define GSWIP_BM_PCFGp(p)		(0x080 + ((p) * 2))
    145#define  GSWIP_BM_PCFG_CNTEN		BIT(0)	/* RMON Counter Enable */
    146#define  GSWIP_BM_PCFG_IGCNT		BIT(1)	/* Ingres Special Tag RMON count */
    147/* buffer management Port Control Register */
    148#define GSWIP_BM_RMON_CTRLp(p)		(0x81 + ((p) * 2))
    149#define  GSWIP_BM_CTRL_RMON_RAM1_RES	BIT(0)	/* Software Reset for RMON RAM 1 */
    150#define  GSWIP_BM_CTRL_RMON_RAM2_RES	BIT(1)	/* Software Reset for RMON RAM 2 */
    151
    152/* PCE */
    153#define GSWIP_PCE_TBL_KEY(x)		(0x447 - (x))
    154#define GSWIP_PCE_TBL_MASK		0x448
    155#define GSWIP_PCE_TBL_VAL(x)		(0x44D - (x))
    156#define GSWIP_PCE_TBL_ADDR		0x44E
    157#define GSWIP_PCE_TBL_CTRL		0x44F
    158#define  GSWIP_PCE_TBL_CTRL_BAS		BIT(15)
    159#define  GSWIP_PCE_TBL_CTRL_TYPE	BIT(13)
    160#define  GSWIP_PCE_TBL_CTRL_VLD		BIT(12)
    161#define  GSWIP_PCE_TBL_CTRL_KEYFORM	BIT(11)
    162#define  GSWIP_PCE_TBL_CTRL_GMAP_MASK	GENMASK(10, 7)
    163#define  GSWIP_PCE_TBL_CTRL_OPMOD_MASK	GENMASK(6, 5)
    164#define  GSWIP_PCE_TBL_CTRL_OPMOD_ADRD	0x00
    165#define  GSWIP_PCE_TBL_CTRL_OPMOD_ADWR	0x20
    166#define  GSWIP_PCE_TBL_CTRL_OPMOD_KSRD	0x40
    167#define  GSWIP_PCE_TBL_CTRL_OPMOD_KSWR	0x60
    168#define  GSWIP_PCE_TBL_CTRL_ADDR_MASK	GENMASK(4, 0)
    169#define GSWIP_PCE_PMAP1			0x453	/* Monitoring port map */
    170#define GSWIP_PCE_PMAP2			0x454	/* Default Multicast port map */
    171#define GSWIP_PCE_PMAP3			0x455	/* Default Unknown Unicast port map */
    172#define GSWIP_PCE_GCTRL_0		0x456
    173#define  GSWIP_PCE_GCTRL_0_MTFL		BIT(0)  /* MAC Table Flushing */
    174#define  GSWIP_PCE_GCTRL_0_MC_VALID	BIT(3)
    175#define  GSWIP_PCE_GCTRL_0_VLAN		BIT(14) /* VLAN aware Switching */
    176#define GSWIP_PCE_GCTRL_1		0x457
    177#define  GSWIP_PCE_GCTRL_1_MAC_GLOCK	BIT(2)	/* MAC Address table lock */
    178#define  GSWIP_PCE_GCTRL_1_MAC_GLOCK_MOD	BIT(3) /* Mac address table lock forwarding mode */
    179#define GSWIP_PCE_PCTRL_0p(p)		(0x480 + ((p) * 0xA))
    180#define  GSWIP_PCE_PCTRL_0_TVM		BIT(5)	/* Transparent VLAN mode */
    181#define  GSWIP_PCE_PCTRL_0_VREP		BIT(6)	/* VLAN Replace Mode */
    182#define  GSWIP_PCE_PCTRL_0_INGRESS	BIT(11)	/* Accept special tag in ingress */
    183#define  GSWIP_PCE_PCTRL_0_PSTATE_LISTEN	0x0
    184#define  GSWIP_PCE_PCTRL_0_PSTATE_RX		0x1
    185#define  GSWIP_PCE_PCTRL_0_PSTATE_TX		0x2
    186#define  GSWIP_PCE_PCTRL_0_PSTATE_LEARNING	0x3
    187#define  GSWIP_PCE_PCTRL_0_PSTATE_FORWARDING	0x7
    188#define  GSWIP_PCE_PCTRL_0_PSTATE_MASK	GENMASK(2, 0)
    189#define GSWIP_PCE_VCTRL(p)		(0x485 + ((p) * 0xA))
    190#define  GSWIP_PCE_VCTRL_UVR		BIT(0)	/* Unknown VLAN Rule */
    191#define  GSWIP_PCE_VCTRL_VIMR		BIT(3)	/* VLAN Ingress Member violation rule */
    192#define  GSWIP_PCE_VCTRL_VEMR		BIT(4)	/* VLAN Egress Member violation rule */
    193#define  GSWIP_PCE_VCTRL_VSR		BIT(5)	/* VLAN Security */
    194#define  GSWIP_PCE_VCTRL_VID0		BIT(6)	/* Priority Tagged Rule */
    195#define GSWIP_PCE_DEFPVID(p)		(0x486 + ((p) * 0xA))
    196
    197#define GSWIP_MAC_FLEN			0x8C5
    198#define GSWIP_MAC_CTRL_0p(p)		(0x903 + ((p) * 0xC))
    199#define  GSWIP_MAC_CTRL_0_PADEN		BIT(8)
    200#define  GSWIP_MAC_CTRL_0_FCS_EN	BIT(7)
    201#define  GSWIP_MAC_CTRL_0_FCON_MASK	0x0070
    202#define  GSWIP_MAC_CTRL_0_FCON_AUTO	0x0000
    203#define  GSWIP_MAC_CTRL_0_FCON_RX	0x0010
    204#define  GSWIP_MAC_CTRL_0_FCON_TX	0x0020
    205#define  GSWIP_MAC_CTRL_0_FCON_RXTX	0x0030
    206#define  GSWIP_MAC_CTRL_0_FCON_NONE	0x0040
    207#define  GSWIP_MAC_CTRL_0_FDUP_MASK	0x000C
    208#define  GSWIP_MAC_CTRL_0_FDUP_AUTO	0x0000
    209#define  GSWIP_MAC_CTRL_0_FDUP_EN	0x0004
    210#define  GSWIP_MAC_CTRL_0_FDUP_DIS	0x000C
    211#define  GSWIP_MAC_CTRL_0_GMII_MASK	0x0003
    212#define  GSWIP_MAC_CTRL_0_GMII_AUTO	0x0000
    213#define  GSWIP_MAC_CTRL_0_GMII_MII	0x0001
    214#define  GSWIP_MAC_CTRL_0_GMII_RGMII	0x0002
    215#define GSWIP_MAC_CTRL_2p(p)		(0x905 + ((p) * 0xC))
    216#define GSWIP_MAC_CTRL_2_LCHKL		BIT(2) /* Frame Length Check Long Enable */
    217#define GSWIP_MAC_CTRL_2_MLEN		BIT(3) /* Maximum Untagged Frame Lnegth */
    218
    219/* Ethernet Switch Fetch DMA Port Control Register */
    220#define GSWIP_FDMA_PCTRLp(p)		(0xA80 + ((p) * 0x6))
    221#define  GSWIP_FDMA_PCTRL_EN		BIT(0)	/* FDMA Port Enable */
    222#define  GSWIP_FDMA_PCTRL_STEN		BIT(1)	/* Special Tag Insertion Enable */
    223#define  GSWIP_FDMA_PCTRL_VLANMOD_MASK	GENMASK(4, 3)	/* VLAN Modification Control */
    224#define  GSWIP_FDMA_PCTRL_VLANMOD_SHIFT	3	/* VLAN Modification Control */
    225#define  GSWIP_FDMA_PCTRL_VLANMOD_DIS	(0x0 << GSWIP_FDMA_PCTRL_VLANMOD_SHIFT)
    226#define  GSWIP_FDMA_PCTRL_VLANMOD_PRIO	(0x1 << GSWIP_FDMA_PCTRL_VLANMOD_SHIFT)
    227#define  GSWIP_FDMA_PCTRL_VLANMOD_ID	(0x2 << GSWIP_FDMA_PCTRL_VLANMOD_SHIFT)
    228#define  GSWIP_FDMA_PCTRL_VLANMOD_BOTH	(0x3 << GSWIP_FDMA_PCTRL_VLANMOD_SHIFT)
    229
    230/* Ethernet Switch Store DMA Port Control Register */
    231#define GSWIP_SDMA_PCTRLp(p)		(0xBC0 + ((p) * 0x6))
    232#define  GSWIP_SDMA_PCTRL_EN		BIT(0)	/* SDMA Port Enable */
    233#define  GSWIP_SDMA_PCTRL_FCEN		BIT(1)	/* Flow Control Enable */
    234#define  GSWIP_SDMA_PCTRL_PAUFWD	BIT(3)	/* Pause Frame Forwarding */
    235
    236#define GSWIP_TABLE_ACTIVE_VLAN		0x01
    237#define GSWIP_TABLE_VLAN_MAPPING	0x02
    238#define GSWIP_TABLE_MAC_BRIDGE		0x0b
    239#define  GSWIP_TABLE_MAC_BRIDGE_STATIC	0x01	/* Static not, aging entry */
    240
    241#define XRX200_GPHY_FW_ALIGN	(16 * 1024)
    242
    243/* Maximum packet size supported by the switch. In theory this should be 10240,
    244 * but long packets currently cause lock-ups with an MTU of over 2526. Medium
    245 * packets are sometimes dropped (e.g. TCP over 2477, UDP over 2516-2519, ICMP
    246 * over 2526), hence an MTU value of 2400 seems safe. This issue only affects
    247 * packet reception. This is probably caused by the PPA engine, which is on the
    248 * RX part of the device. Packet transmission works properly up to 10240.
    249 */
    250#define GSWIP_MAX_PACKET_LENGTH	2400
    251
    252struct gswip_hw_info {
    253	int max_ports;
    254	int cpu_port;
    255	const struct dsa_switch_ops *ops;
    256};
    257
    258struct xway_gphy_match_data {
    259	char *fe_firmware_name;
    260	char *ge_firmware_name;
    261};
    262
    263struct gswip_gphy_fw {
    264	struct clk *clk_gate;
    265	struct reset_control *reset;
    266	u32 fw_addr_offset;
    267	char *fw_name;
    268};
    269
    270struct gswip_vlan {
    271	struct net_device *bridge;
    272	u16 vid;
    273	u8 fid;
    274};
    275
    276struct gswip_priv {
    277	__iomem void *gswip;
    278	__iomem void *mdio;
    279	__iomem void *mii;
    280	const struct gswip_hw_info *hw_info;
    281	const struct xway_gphy_match_data *gphy_fw_name_cfg;
    282	struct dsa_switch *ds;
    283	struct device *dev;
    284	struct regmap *rcu_regmap;
    285	struct gswip_vlan vlans[64];
    286	int num_gphy_fw;
    287	struct gswip_gphy_fw *gphy_fw;
    288	u32 port_vlan_filter;
    289	struct mutex pce_table_lock;
    290};
    291
    292struct gswip_pce_table_entry {
    293	u16 index;      // PCE_TBL_ADDR.ADDR = pData->table_index
    294	u16 table;      // PCE_TBL_CTRL.ADDR = pData->table
    295	u16 key[8];
    296	u16 val[5];
    297	u16 mask;
    298	u8 gmap;
    299	bool type;
    300	bool valid;
    301	bool key_mode;
    302};
    303
    304struct gswip_rmon_cnt_desc {
    305	unsigned int size;
    306	unsigned int offset;
    307	const char *name;
    308};
    309
    310#define MIB_DESC(_size, _offset, _name) {.size = _size, .offset = _offset, .name = _name}
    311
    312static const struct gswip_rmon_cnt_desc gswip_rmon_cnt[] = {
    313	/** Receive Packet Count (only packets that are accepted and not discarded). */
    314	MIB_DESC(1, 0x1F, "RxGoodPkts"),
    315	MIB_DESC(1, 0x23, "RxUnicastPkts"),
    316	MIB_DESC(1, 0x22, "RxMulticastPkts"),
    317	MIB_DESC(1, 0x21, "RxFCSErrorPkts"),
    318	MIB_DESC(1, 0x1D, "RxUnderSizeGoodPkts"),
    319	MIB_DESC(1, 0x1E, "RxUnderSizeErrorPkts"),
    320	MIB_DESC(1, 0x1B, "RxOversizeGoodPkts"),
    321	MIB_DESC(1, 0x1C, "RxOversizeErrorPkts"),
    322	MIB_DESC(1, 0x20, "RxGoodPausePkts"),
    323	MIB_DESC(1, 0x1A, "RxAlignErrorPkts"),
    324	MIB_DESC(1, 0x12, "Rx64BytePkts"),
    325	MIB_DESC(1, 0x13, "Rx127BytePkts"),
    326	MIB_DESC(1, 0x14, "Rx255BytePkts"),
    327	MIB_DESC(1, 0x15, "Rx511BytePkts"),
    328	MIB_DESC(1, 0x16, "Rx1023BytePkts"),
    329	/** Receive Size 1024-1522 (or more, if configured) Packet Count. */
    330	MIB_DESC(1, 0x17, "RxMaxBytePkts"),
    331	MIB_DESC(1, 0x18, "RxDroppedPkts"),
    332	MIB_DESC(1, 0x19, "RxFilteredPkts"),
    333	MIB_DESC(2, 0x24, "RxGoodBytes"),
    334	MIB_DESC(2, 0x26, "RxBadBytes"),
    335	MIB_DESC(1, 0x11, "TxAcmDroppedPkts"),
    336	MIB_DESC(1, 0x0C, "TxGoodPkts"),
    337	MIB_DESC(1, 0x06, "TxUnicastPkts"),
    338	MIB_DESC(1, 0x07, "TxMulticastPkts"),
    339	MIB_DESC(1, 0x00, "Tx64BytePkts"),
    340	MIB_DESC(1, 0x01, "Tx127BytePkts"),
    341	MIB_DESC(1, 0x02, "Tx255BytePkts"),
    342	MIB_DESC(1, 0x03, "Tx511BytePkts"),
    343	MIB_DESC(1, 0x04, "Tx1023BytePkts"),
    344	/** Transmit Size 1024-1522 (or more, if configured) Packet Count. */
    345	MIB_DESC(1, 0x05, "TxMaxBytePkts"),
    346	MIB_DESC(1, 0x08, "TxSingleCollCount"),
    347	MIB_DESC(1, 0x09, "TxMultCollCount"),
    348	MIB_DESC(1, 0x0A, "TxLateCollCount"),
    349	MIB_DESC(1, 0x0B, "TxExcessCollCount"),
    350	MIB_DESC(1, 0x0D, "TxPauseCount"),
    351	MIB_DESC(1, 0x10, "TxDroppedPkts"),
    352	MIB_DESC(2, 0x0E, "TxGoodBytes"),
    353};
    354
    355static u32 gswip_switch_r(struct gswip_priv *priv, u32 offset)
    356{
    357	return __raw_readl(priv->gswip + (offset * 4));
    358}
    359
    360static void gswip_switch_w(struct gswip_priv *priv, u32 val, u32 offset)
    361{
    362	__raw_writel(val, priv->gswip + (offset * 4));
    363}
    364
    365static void gswip_switch_mask(struct gswip_priv *priv, u32 clear, u32 set,
    366			      u32 offset)
    367{
    368	u32 val = gswip_switch_r(priv, offset);
    369
    370	val &= ~(clear);
    371	val |= set;
    372	gswip_switch_w(priv, val, offset);
    373}
    374
    375static u32 gswip_switch_r_timeout(struct gswip_priv *priv, u32 offset,
    376				  u32 cleared)
    377{
    378	u32 val;
    379
    380	return readx_poll_timeout(__raw_readl, priv->gswip + (offset * 4), val,
    381				  (val & cleared) == 0, 20, 50000);
    382}
    383
    384static u32 gswip_mdio_r(struct gswip_priv *priv, u32 offset)
    385{
    386	return __raw_readl(priv->mdio + (offset * 4));
    387}
    388
    389static void gswip_mdio_w(struct gswip_priv *priv, u32 val, u32 offset)
    390{
    391	__raw_writel(val, priv->mdio + (offset * 4));
    392}
    393
    394static void gswip_mdio_mask(struct gswip_priv *priv, u32 clear, u32 set,
    395			    u32 offset)
    396{
    397	u32 val = gswip_mdio_r(priv, offset);
    398
    399	val &= ~(clear);
    400	val |= set;
    401	gswip_mdio_w(priv, val, offset);
    402}
    403
    404static u32 gswip_mii_r(struct gswip_priv *priv, u32 offset)
    405{
    406	return __raw_readl(priv->mii + (offset * 4));
    407}
    408
    409static void gswip_mii_w(struct gswip_priv *priv, u32 val, u32 offset)
    410{
    411	__raw_writel(val, priv->mii + (offset * 4));
    412}
    413
    414static void gswip_mii_mask(struct gswip_priv *priv, u32 clear, u32 set,
    415			   u32 offset)
    416{
    417	u32 val = gswip_mii_r(priv, offset);
    418
    419	val &= ~(clear);
    420	val |= set;
    421	gswip_mii_w(priv, val, offset);
    422}
    423
    424static void gswip_mii_mask_cfg(struct gswip_priv *priv, u32 clear, u32 set,
    425			       int port)
    426{
    427	/* There's no MII_CFG register for the CPU port */
    428	if (!dsa_is_cpu_port(priv->ds, port))
    429		gswip_mii_mask(priv, clear, set, GSWIP_MII_CFGp(port));
    430}
    431
    432static void gswip_mii_mask_pcdu(struct gswip_priv *priv, u32 clear, u32 set,
    433				int port)
    434{
    435	switch (port) {
    436	case 0:
    437		gswip_mii_mask(priv, clear, set, GSWIP_MII_PCDU0);
    438		break;
    439	case 1:
    440		gswip_mii_mask(priv, clear, set, GSWIP_MII_PCDU1);
    441		break;
    442	case 5:
    443		gswip_mii_mask(priv, clear, set, GSWIP_MII_PCDU5);
    444		break;
    445	}
    446}
    447
    448static int gswip_mdio_poll(struct gswip_priv *priv)
    449{
    450	int cnt = 100;
    451
    452	while (likely(cnt--)) {
    453		u32 ctrl = gswip_mdio_r(priv, GSWIP_MDIO_CTRL);
    454
    455		if ((ctrl & GSWIP_MDIO_CTRL_BUSY) == 0)
    456			return 0;
    457		usleep_range(20, 40);
    458	}
    459
    460	return -ETIMEDOUT;
    461}
    462
    463static int gswip_mdio_wr(struct mii_bus *bus, int addr, int reg, u16 val)
    464{
    465	struct gswip_priv *priv = bus->priv;
    466	int err;
    467
    468	err = gswip_mdio_poll(priv);
    469	if (err) {
    470		dev_err(&bus->dev, "waiting for MDIO bus busy timed out\n");
    471		return err;
    472	}
    473
    474	gswip_mdio_w(priv, val, GSWIP_MDIO_WRITE);
    475	gswip_mdio_w(priv, GSWIP_MDIO_CTRL_BUSY | GSWIP_MDIO_CTRL_WR |
    476		((addr & GSWIP_MDIO_CTRL_PHYAD_MASK) << GSWIP_MDIO_CTRL_PHYAD_SHIFT) |
    477		(reg & GSWIP_MDIO_CTRL_REGAD_MASK),
    478		GSWIP_MDIO_CTRL);
    479
    480	return 0;
    481}
    482
    483static int gswip_mdio_rd(struct mii_bus *bus, int addr, int reg)
    484{
    485	struct gswip_priv *priv = bus->priv;
    486	int err;
    487
    488	err = gswip_mdio_poll(priv);
    489	if (err) {
    490		dev_err(&bus->dev, "waiting for MDIO bus busy timed out\n");
    491		return err;
    492	}
    493
    494	gswip_mdio_w(priv, GSWIP_MDIO_CTRL_BUSY | GSWIP_MDIO_CTRL_RD |
    495		((addr & GSWIP_MDIO_CTRL_PHYAD_MASK) << GSWIP_MDIO_CTRL_PHYAD_SHIFT) |
    496		(reg & GSWIP_MDIO_CTRL_REGAD_MASK),
    497		GSWIP_MDIO_CTRL);
    498
    499	err = gswip_mdio_poll(priv);
    500	if (err) {
    501		dev_err(&bus->dev, "waiting for MDIO bus busy timed out\n");
    502		return err;
    503	}
    504
    505	return gswip_mdio_r(priv, GSWIP_MDIO_READ);
    506}
    507
    508static int gswip_mdio(struct gswip_priv *priv, struct device_node *mdio_np)
    509{
    510	struct dsa_switch *ds = priv->ds;
    511	int err;
    512
    513	ds->slave_mii_bus = mdiobus_alloc();
    514	if (!ds->slave_mii_bus)
    515		return -ENOMEM;
    516
    517	ds->slave_mii_bus->priv = priv;
    518	ds->slave_mii_bus->read = gswip_mdio_rd;
    519	ds->slave_mii_bus->write = gswip_mdio_wr;
    520	ds->slave_mii_bus->name = "lantiq,xrx200-mdio";
    521	snprintf(ds->slave_mii_bus->id, MII_BUS_ID_SIZE, "%s-mii",
    522		 dev_name(priv->dev));
    523	ds->slave_mii_bus->parent = priv->dev;
    524	ds->slave_mii_bus->phy_mask = ~ds->phys_mii_mask;
    525
    526	err = of_mdiobus_register(ds->slave_mii_bus, mdio_np);
    527	if (err)
    528		mdiobus_free(ds->slave_mii_bus);
    529
    530	return err;
    531}
    532
    533static int gswip_pce_table_entry_read(struct gswip_priv *priv,
    534				      struct gswip_pce_table_entry *tbl)
    535{
    536	int i;
    537	int err;
    538	u16 crtl;
    539	u16 addr_mode = tbl->key_mode ? GSWIP_PCE_TBL_CTRL_OPMOD_KSRD :
    540					GSWIP_PCE_TBL_CTRL_OPMOD_ADRD;
    541
    542	mutex_lock(&priv->pce_table_lock);
    543
    544	err = gswip_switch_r_timeout(priv, GSWIP_PCE_TBL_CTRL,
    545				     GSWIP_PCE_TBL_CTRL_BAS);
    546	if (err) {
    547		mutex_unlock(&priv->pce_table_lock);
    548		return err;
    549	}
    550
    551	gswip_switch_w(priv, tbl->index, GSWIP_PCE_TBL_ADDR);
    552	gswip_switch_mask(priv, GSWIP_PCE_TBL_CTRL_ADDR_MASK |
    553				GSWIP_PCE_TBL_CTRL_OPMOD_MASK,
    554			  tbl->table | addr_mode | GSWIP_PCE_TBL_CTRL_BAS,
    555			  GSWIP_PCE_TBL_CTRL);
    556
    557	err = gswip_switch_r_timeout(priv, GSWIP_PCE_TBL_CTRL,
    558				     GSWIP_PCE_TBL_CTRL_BAS);
    559	if (err) {
    560		mutex_unlock(&priv->pce_table_lock);
    561		return err;
    562	}
    563
    564	for (i = 0; i < ARRAY_SIZE(tbl->key); i++)
    565		tbl->key[i] = gswip_switch_r(priv, GSWIP_PCE_TBL_KEY(i));
    566
    567	for (i = 0; i < ARRAY_SIZE(tbl->val); i++)
    568		tbl->val[i] = gswip_switch_r(priv, GSWIP_PCE_TBL_VAL(i));
    569
    570	tbl->mask = gswip_switch_r(priv, GSWIP_PCE_TBL_MASK);
    571
    572	crtl = gswip_switch_r(priv, GSWIP_PCE_TBL_CTRL);
    573
    574	tbl->type = !!(crtl & GSWIP_PCE_TBL_CTRL_TYPE);
    575	tbl->valid = !!(crtl & GSWIP_PCE_TBL_CTRL_VLD);
    576	tbl->gmap = (crtl & GSWIP_PCE_TBL_CTRL_GMAP_MASK) >> 7;
    577
    578	mutex_unlock(&priv->pce_table_lock);
    579
    580	return 0;
    581}
    582
    583static int gswip_pce_table_entry_write(struct gswip_priv *priv,
    584				       struct gswip_pce_table_entry *tbl)
    585{
    586	int i;
    587	int err;
    588	u16 crtl;
    589	u16 addr_mode = tbl->key_mode ? GSWIP_PCE_TBL_CTRL_OPMOD_KSWR :
    590					GSWIP_PCE_TBL_CTRL_OPMOD_ADWR;
    591
    592	mutex_lock(&priv->pce_table_lock);
    593
    594	err = gswip_switch_r_timeout(priv, GSWIP_PCE_TBL_CTRL,
    595				     GSWIP_PCE_TBL_CTRL_BAS);
    596	if (err) {
    597		mutex_unlock(&priv->pce_table_lock);
    598		return err;
    599	}
    600
    601	gswip_switch_w(priv, tbl->index, GSWIP_PCE_TBL_ADDR);
    602	gswip_switch_mask(priv, GSWIP_PCE_TBL_CTRL_ADDR_MASK |
    603				GSWIP_PCE_TBL_CTRL_OPMOD_MASK,
    604			  tbl->table | addr_mode,
    605			  GSWIP_PCE_TBL_CTRL);
    606
    607	for (i = 0; i < ARRAY_SIZE(tbl->key); i++)
    608		gswip_switch_w(priv, tbl->key[i], GSWIP_PCE_TBL_KEY(i));
    609
    610	for (i = 0; i < ARRAY_SIZE(tbl->val); i++)
    611		gswip_switch_w(priv, tbl->val[i], GSWIP_PCE_TBL_VAL(i));
    612
    613	gswip_switch_mask(priv, GSWIP_PCE_TBL_CTRL_ADDR_MASK |
    614				GSWIP_PCE_TBL_CTRL_OPMOD_MASK,
    615			  tbl->table | addr_mode,
    616			  GSWIP_PCE_TBL_CTRL);
    617
    618	gswip_switch_w(priv, tbl->mask, GSWIP_PCE_TBL_MASK);
    619
    620	crtl = gswip_switch_r(priv, GSWIP_PCE_TBL_CTRL);
    621	crtl &= ~(GSWIP_PCE_TBL_CTRL_TYPE | GSWIP_PCE_TBL_CTRL_VLD |
    622		  GSWIP_PCE_TBL_CTRL_GMAP_MASK);
    623	if (tbl->type)
    624		crtl |= GSWIP_PCE_TBL_CTRL_TYPE;
    625	if (tbl->valid)
    626		crtl |= GSWIP_PCE_TBL_CTRL_VLD;
    627	crtl |= (tbl->gmap << 7) & GSWIP_PCE_TBL_CTRL_GMAP_MASK;
    628	crtl |= GSWIP_PCE_TBL_CTRL_BAS;
    629	gswip_switch_w(priv, crtl, GSWIP_PCE_TBL_CTRL);
    630
    631	err = gswip_switch_r_timeout(priv, GSWIP_PCE_TBL_CTRL,
    632				     GSWIP_PCE_TBL_CTRL_BAS);
    633
    634	mutex_unlock(&priv->pce_table_lock);
    635
    636	return err;
    637}
    638
    639/* Add the LAN port into a bridge with the CPU port by
    640 * default. This prevents automatic forwarding of
    641 * packages between the LAN ports when no explicit
    642 * bridge is configured.
    643 */
    644static int gswip_add_single_port_br(struct gswip_priv *priv, int port, bool add)
    645{
    646	struct gswip_pce_table_entry vlan_active = {0,};
    647	struct gswip_pce_table_entry vlan_mapping = {0,};
    648	unsigned int cpu_port = priv->hw_info->cpu_port;
    649	unsigned int max_ports = priv->hw_info->max_ports;
    650	int err;
    651
    652	if (port >= max_ports) {
    653		dev_err(priv->dev, "single port for %i supported\n", port);
    654		return -EIO;
    655	}
    656
    657	vlan_active.index = port + 1;
    658	vlan_active.table = GSWIP_TABLE_ACTIVE_VLAN;
    659	vlan_active.key[0] = 0; /* vid */
    660	vlan_active.val[0] = port + 1 /* fid */;
    661	vlan_active.valid = add;
    662	err = gswip_pce_table_entry_write(priv, &vlan_active);
    663	if (err) {
    664		dev_err(priv->dev, "failed to write active VLAN: %d\n", err);
    665		return err;
    666	}
    667
    668	if (!add)
    669		return 0;
    670
    671	vlan_mapping.index = port + 1;
    672	vlan_mapping.table = GSWIP_TABLE_VLAN_MAPPING;
    673	vlan_mapping.val[0] = 0 /* vid */;
    674	vlan_mapping.val[1] = BIT(port) | BIT(cpu_port);
    675	vlan_mapping.val[2] = 0;
    676	err = gswip_pce_table_entry_write(priv, &vlan_mapping);
    677	if (err) {
    678		dev_err(priv->dev, "failed to write VLAN mapping: %d\n", err);
    679		return err;
    680	}
    681
    682	return 0;
    683}
    684
    685static int gswip_port_enable(struct dsa_switch *ds, int port,
    686			     struct phy_device *phydev)
    687{
    688	struct gswip_priv *priv = ds->priv;
    689	int err;
    690
    691	if (!dsa_is_user_port(ds, port))
    692		return 0;
    693
    694	if (!dsa_is_cpu_port(ds, port)) {
    695		err = gswip_add_single_port_br(priv, port, true);
    696		if (err)
    697			return err;
    698	}
    699
    700	/* RMON Counter Enable for port */
    701	gswip_switch_w(priv, GSWIP_BM_PCFG_CNTEN, GSWIP_BM_PCFGp(port));
    702
    703	/* enable port fetch/store dma & VLAN Modification */
    704	gswip_switch_mask(priv, 0, GSWIP_FDMA_PCTRL_EN |
    705				   GSWIP_FDMA_PCTRL_VLANMOD_BOTH,
    706			 GSWIP_FDMA_PCTRLp(port));
    707	gswip_switch_mask(priv, 0, GSWIP_SDMA_PCTRL_EN,
    708			  GSWIP_SDMA_PCTRLp(port));
    709
    710	if (!dsa_is_cpu_port(ds, port)) {
    711		u32 mdio_phy = 0;
    712
    713		if (phydev)
    714			mdio_phy = phydev->mdio.addr & GSWIP_MDIO_PHY_ADDR_MASK;
    715
    716		gswip_mdio_mask(priv, GSWIP_MDIO_PHY_ADDR_MASK, mdio_phy,
    717				GSWIP_MDIO_PHYp(port));
    718	}
    719
    720	return 0;
    721}
    722
    723static void gswip_port_disable(struct dsa_switch *ds, int port)
    724{
    725	struct gswip_priv *priv = ds->priv;
    726
    727	if (!dsa_is_user_port(ds, port))
    728		return;
    729
    730	gswip_switch_mask(priv, GSWIP_FDMA_PCTRL_EN, 0,
    731			  GSWIP_FDMA_PCTRLp(port));
    732	gswip_switch_mask(priv, GSWIP_SDMA_PCTRL_EN, 0,
    733			  GSWIP_SDMA_PCTRLp(port));
    734}
    735
    736static int gswip_pce_load_microcode(struct gswip_priv *priv)
    737{
    738	int i;
    739	int err;
    740
    741	gswip_switch_mask(priv, GSWIP_PCE_TBL_CTRL_ADDR_MASK |
    742				GSWIP_PCE_TBL_CTRL_OPMOD_MASK,
    743			  GSWIP_PCE_TBL_CTRL_OPMOD_ADWR, GSWIP_PCE_TBL_CTRL);
    744	gswip_switch_w(priv, 0, GSWIP_PCE_TBL_MASK);
    745
    746	for (i = 0; i < ARRAY_SIZE(gswip_pce_microcode); i++) {
    747		gswip_switch_w(priv, i, GSWIP_PCE_TBL_ADDR);
    748		gswip_switch_w(priv, gswip_pce_microcode[i].val_0,
    749			       GSWIP_PCE_TBL_VAL(0));
    750		gswip_switch_w(priv, gswip_pce_microcode[i].val_1,
    751			       GSWIP_PCE_TBL_VAL(1));
    752		gswip_switch_w(priv, gswip_pce_microcode[i].val_2,
    753			       GSWIP_PCE_TBL_VAL(2));
    754		gswip_switch_w(priv, gswip_pce_microcode[i].val_3,
    755			       GSWIP_PCE_TBL_VAL(3));
    756
    757		/* start the table access: */
    758		gswip_switch_mask(priv, 0, GSWIP_PCE_TBL_CTRL_BAS,
    759				  GSWIP_PCE_TBL_CTRL);
    760		err = gswip_switch_r_timeout(priv, GSWIP_PCE_TBL_CTRL,
    761					     GSWIP_PCE_TBL_CTRL_BAS);
    762		if (err)
    763			return err;
    764	}
    765
    766	/* tell the switch that the microcode is loaded */
    767	gswip_switch_mask(priv, 0, GSWIP_PCE_GCTRL_0_MC_VALID,
    768			  GSWIP_PCE_GCTRL_0);
    769
    770	return 0;
    771}
    772
    773static int gswip_port_vlan_filtering(struct dsa_switch *ds, int port,
    774				     bool vlan_filtering,
    775				     struct netlink_ext_ack *extack)
    776{
    777	struct net_device *bridge = dsa_port_bridge_dev_get(dsa_to_port(ds, port));
    778	struct gswip_priv *priv = ds->priv;
    779
    780	/* Do not allow changing the VLAN filtering options while in bridge */
    781	if (bridge && !!(priv->port_vlan_filter & BIT(port)) != vlan_filtering) {
    782		NL_SET_ERR_MSG_MOD(extack,
    783				   "Dynamic toggling of vlan_filtering not supported");
    784		return -EIO;
    785	}
    786
    787	if (vlan_filtering) {
    788		/* Use port based VLAN tag */
    789		gswip_switch_mask(priv,
    790				  GSWIP_PCE_VCTRL_VSR,
    791				  GSWIP_PCE_VCTRL_UVR | GSWIP_PCE_VCTRL_VIMR |
    792				  GSWIP_PCE_VCTRL_VEMR,
    793				  GSWIP_PCE_VCTRL(port));
    794		gswip_switch_mask(priv, GSWIP_PCE_PCTRL_0_TVM, 0,
    795				  GSWIP_PCE_PCTRL_0p(port));
    796	} else {
    797		/* Use port based VLAN tag */
    798		gswip_switch_mask(priv,
    799				  GSWIP_PCE_VCTRL_UVR | GSWIP_PCE_VCTRL_VIMR |
    800				  GSWIP_PCE_VCTRL_VEMR,
    801				  GSWIP_PCE_VCTRL_VSR,
    802				  GSWIP_PCE_VCTRL(port));
    803		gswip_switch_mask(priv, 0, GSWIP_PCE_PCTRL_0_TVM,
    804				  GSWIP_PCE_PCTRL_0p(port));
    805	}
    806
    807	return 0;
    808}
    809
    810static int gswip_setup(struct dsa_switch *ds)
    811{
    812	struct gswip_priv *priv = ds->priv;
    813	unsigned int cpu_port = priv->hw_info->cpu_port;
    814	int i;
    815	int err;
    816
    817	gswip_switch_w(priv, GSWIP_SWRES_R0, GSWIP_SWRES);
    818	usleep_range(5000, 10000);
    819	gswip_switch_w(priv, 0, GSWIP_SWRES);
    820
    821	/* disable port fetch/store dma on all ports */
    822	for (i = 0; i < priv->hw_info->max_ports; i++) {
    823		gswip_port_disable(ds, i);
    824		gswip_port_vlan_filtering(ds, i, false, NULL);
    825	}
    826
    827	/* enable Switch */
    828	gswip_mdio_mask(priv, 0, GSWIP_MDIO_GLOB_ENABLE, GSWIP_MDIO_GLOB);
    829
    830	err = gswip_pce_load_microcode(priv);
    831	if (err) {
    832		dev_err(priv->dev, "writing PCE microcode failed, %i", err);
    833		return err;
    834	}
    835
    836	/* Default unknown Broadcast/Multicast/Unicast port maps */
    837	gswip_switch_w(priv, BIT(cpu_port), GSWIP_PCE_PMAP1);
    838	gswip_switch_w(priv, BIT(cpu_port), GSWIP_PCE_PMAP2);
    839	gswip_switch_w(priv, BIT(cpu_port), GSWIP_PCE_PMAP3);
    840
    841	/* Deactivate MDIO PHY auto polling. Some PHYs as the AR8030 have an
    842	 * interoperability problem with this auto polling mechanism because
    843	 * their status registers think that the link is in a different state
    844	 * than it actually is. For the AR8030 it has the BMSR_ESTATEN bit set
    845	 * as well as ESTATUS_1000_TFULL and ESTATUS_1000_XFULL. This makes the
    846	 * auto polling state machine consider the link being negotiated with
    847	 * 1Gbit/s. Since the PHY itself is a Fast Ethernet RMII PHY this leads
    848	 * to the switch port being completely dead (RX and TX are both not
    849	 * working).
    850	 * Also with various other PHY / port combinations (PHY11G GPHY, PHY22F
    851	 * GPHY, external RGMII PEF7071/7072) any traffic would stop. Sometimes
    852	 * it would work fine for a few minutes to hours and then stop, on
    853	 * other device it would no traffic could be sent or received at all.
    854	 * Testing shows that when PHY auto polling is disabled these problems
    855	 * go away.
    856	 */
    857	gswip_mdio_w(priv, 0x0, GSWIP_MDIO_MDC_CFG0);
    858
    859	/* Configure the MDIO Clock 2.5 MHz */
    860	gswip_mdio_mask(priv, 0xff, 0x09, GSWIP_MDIO_MDC_CFG1);
    861
    862	/* Disable the xMII interface and clear it's isolation bit */
    863	for (i = 0; i < priv->hw_info->max_ports; i++)
    864		gswip_mii_mask_cfg(priv,
    865				   GSWIP_MII_CFG_EN | GSWIP_MII_CFG_ISOLATE,
    866				   0, i);
    867
    868	/* enable special tag insertion on cpu port */
    869	gswip_switch_mask(priv, 0, GSWIP_FDMA_PCTRL_STEN,
    870			  GSWIP_FDMA_PCTRLp(cpu_port));
    871
    872	/* accept special tag in ingress direction */
    873	gswip_switch_mask(priv, 0, GSWIP_PCE_PCTRL_0_INGRESS,
    874			  GSWIP_PCE_PCTRL_0p(cpu_port));
    875
    876	gswip_switch_mask(priv, 0, GSWIP_BM_QUEUE_GCTRL_GL_MOD,
    877			  GSWIP_BM_QUEUE_GCTRL);
    878
    879	/* VLAN aware Switching */
    880	gswip_switch_mask(priv, 0, GSWIP_PCE_GCTRL_0_VLAN, GSWIP_PCE_GCTRL_0);
    881
    882	/* Flush MAC Table */
    883	gswip_switch_mask(priv, 0, GSWIP_PCE_GCTRL_0_MTFL, GSWIP_PCE_GCTRL_0);
    884
    885	err = gswip_switch_r_timeout(priv, GSWIP_PCE_GCTRL_0,
    886				     GSWIP_PCE_GCTRL_0_MTFL);
    887	if (err) {
    888		dev_err(priv->dev, "MAC flushing didn't finish\n");
    889		return err;
    890	}
    891
    892	ds->mtu_enforcement_ingress = true;
    893
    894	gswip_port_enable(ds, cpu_port, NULL);
    895
    896	ds->configure_vlan_while_not_filtering = false;
    897
    898	return 0;
    899}
    900
    901static enum dsa_tag_protocol gswip_get_tag_protocol(struct dsa_switch *ds,
    902						    int port,
    903						    enum dsa_tag_protocol mp)
    904{
    905	return DSA_TAG_PROTO_GSWIP;
    906}
    907
    908static int gswip_vlan_active_create(struct gswip_priv *priv,
    909				    struct net_device *bridge,
    910				    int fid, u16 vid)
    911{
    912	struct gswip_pce_table_entry vlan_active = {0,};
    913	unsigned int max_ports = priv->hw_info->max_ports;
    914	int idx = -1;
    915	int err;
    916	int i;
    917
    918	/* Look for a free slot */
    919	for (i = max_ports; i < ARRAY_SIZE(priv->vlans); i++) {
    920		if (!priv->vlans[i].bridge) {
    921			idx = i;
    922			break;
    923		}
    924	}
    925
    926	if (idx == -1)
    927		return -ENOSPC;
    928
    929	if (fid == -1)
    930		fid = idx;
    931
    932	vlan_active.index = idx;
    933	vlan_active.table = GSWIP_TABLE_ACTIVE_VLAN;
    934	vlan_active.key[0] = vid;
    935	vlan_active.val[0] = fid;
    936	vlan_active.valid = true;
    937
    938	err = gswip_pce_table_entry_write(priv, &vlan_active);
    939	if (err) {
    940		dev_err(priv->dev, "failed to write active VLAN: %d\n",	err);
    941		return err;
    942	}
    943
    944	priv->vlans[idx].bridge = bridge;
    945	priv->vlans[idx].vid = vid;
    946	priv->vlans[idx].fid = fid;
    947
    948	return idx;
    949}
    950
    951static int gswip_vlan_active_remove(struct gswip_priv *priv, int idx)
    952{
    953	struct gswip_pce_table_entry vlan_active = {0,};
    954	int err;
    955
    956	vlan_active.index = idx;
    957	vlan_active.table = GSWIP_TABLE_ACTIVE_VLAN;
    958	vlan_active.valid = false;
    959	err = gswip_pce_table_entry_write(priv, &vlan_active);
    960	if (err)
    961		dev_err(priv->dev, "failed to delete active VLAN: %d\n", err);
    962	priv->vlans[idx].bridge = NULL;
    963
    964	return err;
    965}
    966
    967static int gswip_vlan_add_unaware(struct gswip_priv *priv,
    968				  struct net_device *bridge, int port)
    969{
    970	struct gswip_pce_table_entry vlan_mapping = {0,};
    971	unsigned int max_ports = priv->hw_info->max_ports;
    972	unsigned int cpu_port = priv->hw_info->cpu_port;
    973	bool active_vlan_created = false;
    974	int idx = -1;
    975	int i;
    976	int err;
    977
    978	/* Check if there is already a page for this bridge */
    979	for (i = max_ports; i < ARRAY_SIZE(priv->vlans); i++) {
    980		if (priv->vlans[i].bridge == bridge) {
    981			idx = i;
    982			break;
    983		}
    984	}
    985
    986	/* If this bridge is not programmed yet, add a Active VLAN table
    987	 * entry in a free slot and prepare the VLAN mapping table entry.
    988	 */
    989	if (idx == -1) {
    990		idx = gswip_vlan_active_create(priv, bridge, -1, 0);
    991		if (idx < 0)
    992			return idx;
    993		active_vlan_created = true;
    994
    995		vlan_mapping.index = idx;
    996		vlan_mapping.table = GSWIP_TABLE_VLAN_MAPPING;
    997		/* VLAN ID byte, maps to the VLAN ID of vlan active table */
    998		vlan_mapping.val[0] = 0;
    999	} else {
   1000		/* Read the existing VLAN mapping entry from the switch */
   1001		vlan_mapping.index = idx;
   1002		vlan_mapping.table = GSWIP_TABLE_VLAN_MAPPING;
   1003		err = gswip_pce_table_entry_read(priv, &vlan_mapping);
   1004		if (err) {
   1005			dev_err(priv->dev, "failed to read VLAN mapping: %d\n",
   1006				err);
   1007			return err;
   1008		}
   1009	}
   1010
   1011	/* Update the VLAN mapping entry and write it to the switch */
   1012	vlan_mapping.val[1] |= BIT(cpu_port);
   1013	vlan_mapping.val[1] |= BIT(port);
   1014	err = gswip_pce_table_entry_write(priv, &vlan_mapping);
   1015	if (err) {
   1016		dev_err(priv->dev, "failed to write VLAN mapping: %d\n", err);
   1017		/* In case an Active VLAN was creaetd delete it again */
   1018		if (active_vlan_created)
   1019			gswip_vlan_active_remove(priv, idx);
   1020		return err;
   1021	}
   1022
   1023	gswip_switch_w(priv, 0, GSWIP_PCE_DEFPVID(port));
   1024	return 0;
   1025}
   1026
   1027static int gswip_vlan_add_aware(struct gswip_priv *priv,
   1028				struct net_device *bridge, int port,
   1029				u16 vid, bool untagged,
   1030				bool pvid)
   1031{
   1032	struct gswip_pce_table_entry vlan_mapping = {0,};
   1033	unsigned int max_ports = priv->hw_info->max_ports;
   1034	unsigned int cpu_port = priv->hw_info->cpu_port;
   1035	bool active_vlan_created = false;
   1036	int idx = -1;
   1037	int fid = -1;
   1038	int i;
   1039	int err;
   1040
   1041	/* Check if there is already a page for this bridge */
   1042	for (i = max_ports; i < ARRAY_SIZE(priv->vlans); i++) {
   1043		if (priv->vlans[i].bridge == bridge) {
   1044			if (fid != -1 && fid != priv->vlans[i].fid)
   1045				dev_err(priv->dev, "one bridge with multiple flow ids\n");
   1046			fid = priv->vlans[i].fid;
   1047			if (priv->vlans[i].vid == vid) {
   1048				idx = i;
   1049				break;
   1050			}
   1051		}
   1052	}
   1053
   1054	/* If this bridge is not programmed yet, add a Active VLAN table
   1055	 * entry in a free slot and prepare the VLAN mapping table entry.
   1056	 */
   1057	if (idx == -1) {
   1058		idx = gswip_vlan_active_create(priv, bridge, fid, vid);
   1059		if (idx < 0)
   1060			return idx;
   1061		active_vlan_created = true;
   1062
   1063		vlan_mapping.index = idx;
   1064		vlan_mapping.table = GSWIP_TABLE_VLAN_MAPPING;
   1065		/* VLAN ID byte, maps to the VLAN ID of vlan active table */
   1066		vlan_mapping.val[0] = vid;
   1067	} else {
   1068		/* Read the existing VLAN mapping entry from the switch */
   1069		vlan_mapping.index = idx;
   1070		vlan_mapping.table = GSWIP_TABLE_VLAN_MAPPING;
   1071		err = gswip_pce_table_entry_read(priv, &vlan_mapping);
   1072		if (err) {
   1073			dev_err(priv->dev, "failed to read VLAN mapping: %d\n",
   1074				err);
   1075			return err;
   1076		}
   1077	}
   1078
   1079	vlan_mapping.val[0] = vid;
   1080	/* Update the VLAN mapping entry and write it to the switch */
   1081	vlan_mapping.val[1] |= BIT(cpu_port);
   1082	vlan_mapping.val[2] |= BIT(cpu_port);
   1083	vlan_mapping.val[1] |= BIT(port);
   1084	if (untagged)
   1085		vlan_mapping.val[2] &= ~BIT(port);
   1086	else
   1087		vlan_mapping.val[2] |= BIT(port);
   1088	err = gswip_pce_table_entry_write(priv, &vlan_mapping);
   1089	if (err) {
   1090		dev_err(priv->dev, "failed to write VLAN mapping: %d\n", err);
   1091		/* In case an Active VLAN was creaetd delete it again */
   1092		if (active_vlan_created)
   1093			gswip_vlan_active_remove(priv, idx);
   1094		return err;
   1095	}
   1096
   1097	if (pvid)
   1098		gswip_switch_w(priv, idx, GSWIP_PCE_DEFPVID(port));
   1099
   1100	return 0;
   1101}
   1102
   1103static int gswip_vlan_remove(struct gswip_priv *priv,
   1104			     struct net_device *bridge, int port,
   1105			     u16 vid, bool pvid, bool vlan_aware)
   1106{
   1107	struct gswip_pce_table_entry vlan_mapping = {0,};
   1108	unsigned int max_ports = priv->hw_info->max_ports;
   1109	unsigned int cpu_port = priv->hw_info->cpu_port;
   1110	int idx = -1;
   1111	int i;
   1112	int err;
   1113
   1114	/* Check if there is already a page for this bridge */
   1115	for (i = max_ports; i < ARRAY_SIZE(priv->vlans); i++) {
   1116		if (priv->vlans[i].bridge == bridge &&
   1117		    (!vlan_aware || priv->vlans[i].vid == vid)) {
   1118			idx = i;
   1119			break;
   1120		}
   1121	}
   1122
   1123	if (idx == -1) {
   1124		dev_err(priv->dev, "bridge to leave does not exists\n");
   1125		return -ENOENT;
   1126	}
   1127
   1128	vlan_mapping.index = idx;
   1129	vlan_mapping.table = GSWIP_TABLE_VLAN_MAPPING;
   1130	err = gswip_pce_table_entry_read(priv, &vlan_mapping);
   1131	if (err) {
   1132		dev_err(priv->dev, "failed to read VLAN mapping: %d\n",	err);
   1133		return err;
   1134	}
   1135
   1136	vlan_mapping.val[1] &= ~BIT(port);
   1137	vlan_mapping.val[2] &= ~BIT(port);
   1138	err = gswip_pce_table_entry_write(priv, &vlan_mapping);
   1139	if (err) {
   1140		dev_err(priv->dev, "failed to write VLAN mapping: %d\n", err);
   1141		return err;
   1142	}
   1143
   1144	/* In case all ports are removed from the bridge, remove the VLAN */
   1145	if ((vlan_mapping.val[1] & ~BIT(cpu_port)) == 0) {
   1146		err = gswip_vlan_active_remove(priv, idx);
   1147		if (err) {
   1148			dev_err(priv->dev, "failed to write active VLAN: %d\n",
   1149				err);
   1150			return err;
   1151		}
   1152	}
   1153
   1154	/* GSWIP 2.2 (GRX300) and later program here the VID directly. */
   1155	if (pvid)
   1156		gswip_switch_w(priv, 0, GSWIP_PCE_DEFPVID(port));
   1157
   1158	return 0;
   1159}
   1160
   1161static int gswip_port_bridge_join(struct dsa_switch *ds, int port,
   1162				  struct dsa_bridge bridge,
   1163				  bool *tx_fwd_offload,
   1164				  struct netlink_ext_ack *extack)
   1165{
   1166	struct net_device *br = bridge.dev;
   1167	struct gswip_priv *priv = ds->priv;
   1168	int err;
   1169
   1170	/* When the bridge uses VLAN filtering we have to configure VLAN
   1171	 * specific bridges. No bridge is configured here.
   1172	 */
   1173	if (!br_vlan_enabled(br)) {
   1174		err = gswip_vlan_add_unaware(priv, br, port);
   1175		if (err)
   1176			return err;
   1177		priv->port_vlan_filter &= ~BIT(port);
   1178	} else {
   1179		priv->port_vlan_filter |= BIT(port);
   1180	}
   1181	return gswip_add_single_port_br(priv, port, false);
   1182}
   1183
   1184static void gswip_port_bridge_leave(struct dsa_switch *ds, int port,
   1185				    struct dsa_bridge bridge)
   1186{
   1187	struct net_device *br = bridge.dev;
   1188	struct gswip_priv *priv = ds->priv;
   1189
   1190	gswip_add_single_port_br(priv, port, true);
   1191
   1192	/* When the bridge uses VLAN filtering we have to configure VLAN
   1193	 * specific bridges. No bridge is configured here.
   1194	 */
   1195	if (!br_vlan_enabled(br))
   1196		gswip_vlan_remove(priv, br, port, 0, true, false);
   1197}
   1198
   1199static int gswip_port_vlan_prepare(struct dsa_switch *ds, int port,
   1200				   const struct switchdev_obj_port_vlan *vlan,
   1201				   struct netlink_ext_ack *extack)
   1202{
   1203	struct net_device *bridge = dsa_port_bridge_dev_get(dsa_to_port(ds, port));
   1204	struct gswip_priv *priv = ds->priv;
   1205	unsigned int max_ports = priv->hw_info->max_ports;
   1206	int pos = max_ports;
   1207	int i, idx = -1;
   1208
   1209	/* We only support VLAN filtering on bridges */
   1210	if (!dsa_is_cpu_port(ds, port) && !bridge)
   1211		return -EOPNOTSUPP;
   1212
   1213	/* Check if there is already a page for this VLAN */
   1214	for (i = max_ports; i < ARRAY_SIZE(priv->vlans); i++) {
   1215		if (priv->vlans[i].bridge == bridge &&
   1216		    priv->vlans[i].vid == vlan->vid) {
   1217			idx = i;
   1218			break;
   1219		}
   1220	}
   1221
   1222	/* If this VLAN is not programmed yet, we have to reserve
   1223	 * one entry in the VLAN table. Make sure we start at the
   1224	 * next position round.
   1225	 */
   1226	if (idx == -1) {
   1227		/* Look for a free slot */
   1228		for (; pos < ARRAY_SIZE(priv->vlans); pos++) {
   1229			if (!priv->vlans[pos].bridge) {
   1230				idx = pos;
   1231				pos++;
   1232				break;
   1233			}
   1234		}
   1235
   1236		if (idx == -1) {
   1237			NL_SET_ERR_MSG_MOD(extack, "No slot in VLAN table");
   1238			return -ENOSPC;
   1239		}
   1240	}
   1241
   1242	return 0;
   1243}
   1244
   1245static int gswip_port_vlan_add(struct dsa_switch *ds, int port,
   1246			       const struct switchdev_obj_port_vlan *vlan,
   1247			       struct netlink_ext_ack *extack)
   1248{
   1249	struct net_device *bridge = dsa_port_bridge_dev_get(dsa_to_port(ds, port));
   1250	struct gswip_priv *priv = ds->priv;
   1251	bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
   1252	bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
   1253	int err;
   1254
   1255	err = gswip_port_vlan_prepare(ds, port, vlan, extack);
   1256	if (err)
   1257		return err;
   1258
   1259	/* We have to receive all packets on the CPU port and should not
   1260	 * do any VLAN filtering here. This is also called with bridge
   1261	 * NULL and then we do not know for which bridge to configure
   1262	 * this.
   1263	 */
   1264	if (dsa_is_cpu_port(ds, port))
   1265		return 0;
   1266
   1267	return gswip_vlan_add_aware(priv, bridge, port, vlan->vid,
   1268				    untagged, pvid);
   1269}
   1270
   1271static int gswip_port_vlan_del(struct dsa_switch *ds, int port,
   1272			       const struct switchdev_obj_port_vlan *vlan)
   1273{
   1274	struct net_device *bridge = dsa_port_bridge_dev_get(dsa_to_port(ds, port));
   1275	struct gswip_priv *priv = ds->priv;
   1276	bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
   1277
   1278	/* We have to receive all packets on the CPU port and should not
   1279	 * do any VLAN filtering here. This is also called with bridge
   1280	 * NULL and then we do not know for which bridge to configure
   1281	 * this.
   1282	 */
   1283	if (dsa_is_cpu_port(ds, port))
   1284		return 0;
   1285
   1286	return gswip_vlan_remove(priv, bridge, port, vlan->vid, pvid, true);
   1287}
   1288
   1289static void gswip_port_fast_age(struct dsa_switch *ds, int port)
   1290{
   1291	struct gswip_priv *priv = ds->priv;
   1292	struct gswip_pce_table_entry mac_bridge = {0,};
   1293	int i;
   1294	int err;
   1295
   1296	for (i = 0; i < 2048; i++) {
   1297		mac_bridge.table = GSWIP_TABLE_MAC_BRIDGE;
   1298		mac_bridge.index = i;
   1299
   1300		err = gswip_pce_table_entry_read(priv, &mac_bridge);
   1301		if (err) {
   1302			dev_err(priv->dev, "failed to read mac bridge: %d\n",
   1303				err);
   1304			return;
   1305		}
   1306
   1307		if (!mac_bridge.valid)
   1308			continue;
   1309
   1310		if (mac_bridge.val[1] & GSWIP_TABLE_MAC_BRIDGE_STATIC)
   1311			continue;
   1312
   1313		if (((mac_bridge.val[0] & GENMASK(7, 4)) >> 4) != port)
   1314			continue;
   1315
   1316		mac_bridge.valid = false;
   1317		err = gswip_pce_table_entry_write(priv, &mac_bridge);
   1318		if (err) {
   1319			dev_err(priv->dev, "failed to write mac bridge: %d\n",
   1320				err);
   1321			return;
   1322		}
   1323	}
   1324}
   1325
   1326static void gswip_port_stp_state_set(struct dsa_switch *ds, int port, u8 state)
   1327{
   1328	struct gswip_priv *priv = ds->priv;
   1329	u32 stp_state;
   1330
   1331	switch (state) {
   1332	case BR_STATE_DISABLED:
   1333		gswip_switch_mask(priv, GSWIP_SDMA_PCTRL_EN, 0,
   1334				  GSWIP_SDMA_PCTRLp(port));
   1335		return;
   1336	case BR_STATE_BLOCKING:
   1337	case BR_STATE_LISTENING:
   1338		stp_state = GSWIP_PCE_PCTRL_0_PSTATE_LISTEN;
   1339		break;
   1340	case BR_STATE_LEARNING:
   1341		stp_state = GSWIP_PCE_PCTRL_0_PSTATE_LEARNING;
   1342		break;
   1343	case BR_STATE_FORWARDING:
   1344		stp_state = GSWIP_PCE_PCTRL_0_PSTATE_FORWARDING;
   1345		break;
   1346	default:
   1347		dev_err(priv->dev, "invalid STP state: %d\n", state);
   1348		return;
   1349	}
   1350
   1351	gswip_switch_mask(priv, 0, GSWIP_SDMA_PCTRL_EN,
   1352			  GSWIP_SDMA_PCTRLp(port));
   1353	gswip_switch_mask(priv, GSWIP_PCE_PCTRL_0_PSTATE_MASK, stp_state,
   1354			  GSWIP_PCE_PCTRL_0p(port));
   1355}
   1356
   1357static int gswip_port_fdb(struct dsa_switch *ds, int port,
   1358			  const unsigned char *addr, u16 vid, bool add)
   1359{
   1360	struct net_device *bridge = dsa_port_bridge_dev_get(dsa_to_port(ds, port));
   1361	struct gswip_priv *priv = ds->priv;
   1362	struct gswip_pce_table_entry mac_bridge = {0,};
   1363	unsigned int max_ports = priv->hw_info->max_ports;
   1364	int fid = -1;
   1365	int i;
   1366	int err;
   1367
   1368	if (!bridge)
   1369		return -EINVAL;
   1370
   1371	for (i = max_ports; i < ARRAY_SIZE(priv->vlans); i++) {
   1372		if (priv->vlans[i].bridge == bridge) {
   1373			fid = priv->vlans[i].fid;
   1374			break;
   1375		}
   1376	}
   1377
   1378	if (fid == -1) {
   1379		dev_err(priv->dev, "Port not part of a bridge\n");
   1380		return -EINVAL;
   1381	}
   1382
   1383	mac_bridge.table = GSWIP_TABLE_MAC_BRIDGE;
   1384	mac_bridge.key_mode = true;
   1385	mac_bridge.key[0] = addr[5] | (addr[4] << 8);
   1386	mac_bridge.key[1] = addr[3] | (addr[2] << 8);
   1387	mac_bridge.key[2] = addr[1] | (addr[0] << 8);
   1388	mac_bridge.key[3] = fid;
   1389	mac_bridge.val[0] = add ? BIT(port) : 0; /* port map */
   1390	mac_bridge.val[1] = GSWIP_TABLE_MAC_BRIDGE_STATIC;
   1391	mac_bridge.valid = add;
   1392
   1393	err = gswip_pce_table_entry_write(priv, &mac_bridge);
   1394	if (err)
   1395		dev_err(priv->dev, "failed to write mac bridge: %d\n", err);
   1396
   1397	return err;
   1398}
   1399
   1400static int gswip_port_fdb_add(struct dsa_switch *ds, int port,
   1401			      const unsigned char *addr, u16 vid,
   1402			      struct dsa_db db)
   1403{
   1404	return gswip_port_fdb(ds, port, addr, vid, true);
   1405}
   1406
   1407static int gswip_port_fdb_del(struct dsa_switch *ds, int port,
   1408			      const unsigned char *addr, u16 vid,
   1409			      struct dsa_db db)
   1410{
   1411	return gswip_port_fdb(ds, port, addr, vid, false);
   1412}
   1413
   1414static int gswip_port_fdb_dump(struct dsa_switch *ds, int port,
   1415			       dsa_fdb_dump_cb_t *cb, void *data)
   1416{
   1417	struct gswip_priv *priv = ds->priv;
   1418	struct gswip_pce_table_entry mac_bridge = {0,};
   1419	unsigned char addr[6];
   1420	int i;
   1421	int err;
   1422
   1423	for (i = 0; i < 2048; i++) {
   1424		mac_bridge.table = GSWIP_TABLE_MAC_BRIDGE;
   1425		mac_bridge.index = i;
   1426
   1427		err = gswip_pce_table_entry_read(priv, &mac_bridge);
   1428		if (err) {
   1429			dev_err(priv->dev,
   1430				"failed to read mac bridge entry %d: %d\n",
   1431				i, err);
   1432			return err;
   1433		}
   1434
   1435		if (!mac_bridge.valid)
   1436			continue;
   1437
   1438		addr[5] = mac_bridge.key[0] & 0xff;
   1439		addr[4] = (mac_bridge.key[0] >> 8) & 0xff;
   1440		addr[3] = mac_bridge.key[1] & 0xff;
   1441		addr[2] = (mac_bridge.key[1] >> 8) & 0xff;
   1442		addr[1] = mac_bridge.key[2] & 0xff;
   1443		addr[0] = (mac_bridge.key[2] >> 8) & 0xff;
   1444		if (mac_bridge.val[1] & GSWIP_TABLE_MAC_BRIDGE_STATIC) {
   1445			if (mac_bridge.val[0] & BIT(port)) {
   1446				err = cb(addr, 0, true, data);
   1447				if (err)
   1448					return err;
   1449			}
   1450		} else {
   1451			if (((mac_bridge.val[0] & GENMASK(7, 4)) >> 4) == port) {
   1452				err = cb(addr, 0, false, data);
   1453				if (err)
   1454					return err;
   1455			}
   1456		}
   1457	}
   1458	return 0;
   1459}
   1460
   1461static int gswip_port_max_mtu(struct dsa_switch *ds, int port)
   1462{
   1463	/* Includes 8 bytes for special header. */
   1464	return GSWIP_MAX_PACKET_LENGTH - VLAN_ETH_HLEN - ETH_FCS_LEN;
   1465}
   1466
   1467static int gswip_port_change_mtu(struct dsa_switch *ds, int port, int new_mtu)
   1468{
   1469	struct gswip_priv *priv = ds->priv;
   1470	int cpu_port = priv->hw_info->cpu_port;
   1471
   1472	/* CPU port always has maximum mtu of user ports, so use it to set
   1473	 * switch frame size, including 8 byte special header.
   1474	 */
   1475	if (port == cpu_port) {
   1476		new_mtu += 8;
   1477		gswip_switch_w(priv, VLAN_ETH_HLEN + new_mtu + ETH_FCS_LEN,
   1478			       GSWIP_MAC_FLEN);
   1479	}
   1480
   1481	/* Enable MLEN for ports with non-standard MTUs, including the special
   1482	 * header on the CPU port added above.
   1483	 */
   1484	if (new_mtu != ETH_DATA_LEN)
   1485		gswip_switch_mask(priv, 0, GSWIP_MAC_CTRL_2_MLEN,
   1486				  GSWIP_MAC_CTRL_2p(port));
   1487	else
   1488		gswip_switch_mask(priv, GSWIP_MAC_CTRL_2_MLEN, 0,
   1489				  GSWIP_MAC_CTRL_2p(port));
   1490
   1491	return 0;
   1492}
   1493
   1494static void gswip_xrx200_phylink_get_caps(struct dsa_switch *ds, int port,
   1495					  struct phylink_config *config)
   1496{
   1497	switch (port) {
   1498	case 0:
   1499	case 1:
   1500		phy_interface_set_rgmii(config->supported_interfaces);
   1501		__set_bit(PHY_INTERFACE_MODE_MII,
   1502			  config->supported_interfaces);
   1503		__set_bit(PHY_INTERFACE_MODE_REVMII,
   1504			  config->supported_interfaces);
   1505		__set_bit(PHY_INTERFACE_MODE_RMII,
   1506			  config->supported_interfaces);
   1507		break;
   1508
   1509	case 2:
   1510	case 3:
   1511	case 4:
   1512		__set_bit(PHY_INTERFACE_MODE_INTERNAL,
   1513			  config->supported_interfaces);
   1514		break;
   1515
   1516	case 5:
   1517		phy_interface_set_rgmii(config->supported_interfaces);
   1518		__set_bit(PHY_INTERFACE_MODE_INTERNAL,
   1519			  config->supported_interfaces);
   1520		break;
   1521	}
   1522
   1523	config->mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
   1524		MAC_10 | MAC_100 | MAC_1000;
   1525}
   1526
   1527static void gswip_xrx300_phylink_get_caps(struct dsa_switch *ds, int port,
   1528					  struct phylink_config *config)
   1529{
   1530	switch (port) {
   1531	case 0:
   1532		phy_interface_set_rgmii(config->supported_interfaces);
   1533		__set_bit(PHY_INTERFACE_MODE_GMII,
   1534			  config->supported_interfaces);
   1535		__set_bit(PHY_INTERFACE_MODE_RMII,
   1536			  config->supported_interfaces);
   1537		break;
   1538
   1539	case 1:
   1540	case 2:
   1541	case 3:
   1542	case 4:
   1543		__set_bit(PHY_INTERFACE_MODE_INTERNAL,
   1544			  config->supported_interfaces);
   1545		break;
   1546
   1547	case 5:
   1548		phy_interface_set_rgmii(config->supported_interfaces);
   1549		__set_bit(PHY_INTERFACE_MODE_INTERNAL,
   1550			  config->supported_interfaces);
   1551		__set_bit(PHY_INTERFACE_MODE_RMII,
   1552			  config->supported_interfaces);
   1553		break;
   1554	}
   1555
   1556	config->mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
   1557		MAC_10 | MAC_100 | MAC_1000;
   1558}
   1559
   1560static void gswip_port_set_link(struct gswip_priv *priv, int port, bool link)
   1561{
   1562	u32 mdio_phy;
   1563
   1564	if (link)
   1565		mdio_phy = GSWIP_MDIO_PHY_LINK_UP;
   1566	else
   1567		mdio_phy = GSWIP_MDIO_PHY_LINK_DOWN;
   1568
   1569	gswip_mdio_mask(priv, GSWIP_MDIO_PHY_LINK_MASK, mdio_phy,
   1570			GSWIP_MDIO_PHYp(port));
   1571}
   1572
   1573static void gswip_port_set_speed(struct gswip_priv *priv, int port, int speed,
   1574				 phy_interface_t interface)
   1575{
   1576	u32 mdio_phy = 0, mii_cfg = 0, mac_ctrl_0 = 0;
   1577
   1578	switch (speed) {
   1579	case SPEED_10:
   1580		mdio_phy = GSWIP_MDIO_PHY_SPEED_M10;
   1581
   1582		if (interface == PHY_INTERFACE_MODE_RMII)
   1583			mii_cfg = GSWIP_MII_CFG_RATE_M50;
   1584		else
   1585			mii_cfg = GSWIP_MII_CFG_RATE_M2P5;
   1586
   1587		mac_ctrl_0 = GSWIP_MAC_CTRL_0_GMII_MII;
   1588		break;
   1589
   1590	case SPEED_100:
   1591		mdio_phy = GSWIP_MDIO_PHY_SPEED_M100;
   1592
   1593		if (interface == PHY_INTERFACE_MODE_RMII)
   1594			mii_cfg = GSWIP_MII_CFG_RATE_M50;
   1595		else
   1596			mii_cfg = GSWIP_MII_CFG_RATE_M25;
   1597
   1598		mac_ctrl_0 = GSWIP_MAC_CTRL_0_GMII_MII;
   1599		break;
   1600
   1601	case SPEED_1000:
   1602		mdio_phy = GSWIP_MDIO_PHY_SPEED_G1;
   1603
   1604		mii_cfg = GSWIP_MII_CFG_RATE_M125;
   1605
   1606		mac_ctrl_0 = GSWIP_MAC_CTRL_0_GMII_RGMII;
   1607		break;
   1608	}
   1609
   1610	gswip_mdio_mask(priv, GSWIP_MDIO_PHY_SPEED_MASK, mdio_phy,
   1611			GSWIP_MDIO_PHYp(port));
   1612	gswip_mii_mask_cfg(priv, GSWIP_MII_CFG_RATE_MASK, mii_cfg, port);
   1613	gswip_switch_mask(priv, GSWIP_MAC_CTRL_0_GMII_MASK, mac_ctrl_0,
   1614			  GSWIP_MAC_CTRL_0p(port));
   1615}
   1616
   1617static void gswip_port_set_duplex(struct gswip_priv *priv, int port, int duplex)
   1618{
   1619	u32 mac_ctrl_0, mdio_phy;
   1620
   1621	if (duplex == DUPLEX_FULL) {
   1622		mac_ctrl_0 = GSWIP_MAC_CTRL_0_FDUP_EN;
   1623		mdio_phy = GSWIP_MDIO_PHY_FDUP_EN;
   1624	} else {
   1625		mac_ctrl_0 = GSWIP_MAC_CTRL_0_FDUP_DIS;
   1626		mdio_phy = GSWIP_MDIO_PHY_FDUP_DIS;
   1627	}
   1628
   1629	gswip_switch_mask(priv, GSWIP_MAC_CTRL_0_FDUP_MASK, mac_ctrl_0,
   1630			  GSWIP_MAC_CTRL_0p(port));
   1631	gswip_mdio_mask(priv, GSWIP_MDIO_PHY_FDUP_MASK, mdio_phy,
   1632			GSWIP_MDIO_PHYp(port));
   1633}
   1634
   1635static void gswip_port_set_pause(struct gswip_priv *priv, int port,
   1636				 bool tx_pause, bool rx_pause)
   1637{
   1638	u32 mac_ctrl_0, mdio_phy;
   1639
   1640	if (tx_pause && rx_pause) {
   1641		mac_ctrl_0 = GSWIP_MAC_CTRL_0_FCON_RXTX;
   1642		mdio_phy = GSWIP_MDIO_PHY_FCONTX_EN |
   1643			   GSWIP_MDIO_PHY_FCONRX_EN;
   1644	} else if (tx_pause) {
   1645		mac_ctrl_0 = GSWIP_MAC_CTRL_0_FCON_TX;
   1646		mdio_phy = GSWIP_MDIO_PHY_FCONTX_EN |
   1647			   GSWIP_MDIO_PHY_FCONRX_DIS;
   1648	} else if (rx_pause) {
   1649		mac_ctrl_0 = GSWIP_MAC_CTRL_0_FCON_RX;
   1650		mdio_phy = GSWIP_MDIO_PHY_FCONTX_DIS |
   1651			   GSWIP_MDIO_PHY_FCONRX_EN;
   1652	} else {
   1653		mac_ctrl_0 = GSWIP_MAC_CTRL_0_FCON_NONE;
   1654		mdio_phy = GSWIP_MDIO_PHY_FCONTX_DIS |
   1655			   GSWIP_MDIO_PHY_FCONRX_DIS;
   1656	}
   1657
   1658	gswip_switch_mask(priv, GSWIP_MAC_CTRL_0_FCON_MASK,
   1659			  mac_ctrl_0, GSWIP_MAC_CTRL_0p(port));
   1660	gswip_mdio_mask(priv,
   1661			GSWIP_MDIO_PHY_FCONTX_MASK |
   1662			GSWIP_MDIO_PHY_FCONRX_MASK,
   1663			mdio_phy, GSWIP_MDIO_PHYp(port));
   1664}
   1665
   1666static void gswip_phylink_mac_config(struct dsa_switch *ds, int port,
   1667				     unsigned int mode,
   1668				     const struct phylink_link_state *state)
   1669{
   1670	struct gswip_priv *priv = ds->priv;
   1671	u32 miicfg = 0;
   1672
   1673	miicfg |= GSWIP_MII_CFG_LDCLKDIS;
   1674
   1675	switch (state->interface) {
   1676	case PHY_INTERFACE_MODE_MII:
   1677	case PHY_INTERFACE_MODE_INTERNAL:
   1678		miicfg |= GSWIP_MII_CFG_MODE_MIIM;
   1679		break;
   1680	case PHY_INTERFACE_MODE_REVMII:
   1681		miicfg |= GSWIP_MII_CFG_MODE_MIIP;
   1682		break;
   1683	case PHY_INTERFACE_MODE_RMII:
   1684		miicfg |= GSWIP_MII_CFG_MODE_RMIIM;
   1685		break;
   1686	case PHY_INTERFACE_MODE_RGMII:
   1687	case PHY_INTERFACE_MODE_RGMII_ID:
   1688	case PHY_INTERFACE_MODE_RGMII_RXID:
   1689	case PHY_INTERFACE_MODE_RGMII_TXID:
   1690		miicfg |= GSWIP_MII_CFG_MODE_RGMII;
   1691		break;
   1692	case PHY_INTERFACE_MODE_GMII:
   1693		miicfg |= GSWIP_MII_CFG_MODE_GMII;
   1694		break;
   1695	default:
   1696		dev_err(ds->dev,
   1697			"Unsupported interface: %d\n", state->interface);
   1698		return;
   1699	}
   1700
   1701	gswip_mii_mask_cfg(priv,
   1702			   GSWIP_MII_CFG_MODE_MASK | GSWIP_MII_CFG_RMII_CLK |
   1703			   GSWIP_MII_CFG_RGMII_IBS | GSWIP_MII_CFG_LDCLKDIS,
   1704			   miicfg, port);
   1705
   1706	switch (state->interface) {
   1707	case PHY_INTERFACE_MODE_RGMII_ID:
   1708		gswip_mii_mask_pcdu(priv, GSWIP_MII_PCDU_TXDLY_MASK |
   1709					  GSWIP_MII_PCDU_RXDLY_MASK, 0, port);
   1710		break;
   1711	case PHY_INTERFACE_MODE_RGMII_RXID:
   1712		gswip_mii_mask_pcdu(priv, GSWIP_MII_PCDU_RXDLY_MASK, 0, port);
   1713		break;
   1714	case PHY_INTERFACE_MODE_RGMII_TXID:
   1715		gswip_mii_mask_pcdu(priv, GSWIP_MII_PCDU_TXDLY_MASK, 0, port);
   1716		break;
   1717	default:
   1718		break;
   1719	}
   1720}
   1721
   1722static void gswip_phylink_mac_link_down(struct dsa_switch *ds, int port,
   1723					unsigned int mode,
   1724					phy_interface_t interface)
   1725{
   1726	struct gswip_priv *priv = ds->priv;
   1727
   1728	gswip_mii_mask_cfg(priv, GSWIP_MII_CFG_EN, 0, port);
   1729
   1730	if (!dsa_is_cpu_port(ds, port))
   1731		gswip_port_set_link(priv, port, false);
   1732}
   1733
   1734static void gswip_phylink_mac_link_up(struct dsa_switch *ds, int port,
   1735				      unsigned int mode,
   1736				      phy_interface_t interface,
   1737				      struct phy_device *phydev,
   1738				      int speed, int duplex,
   1739				      bool tx_pause, bool rx_pause)
   1740{
   1741	struct gswip_priv *priv = ds->priv;
   1742
   1743	if (!dsa_is_cpu_port(ds, port)) {
   1744		gswip_port_set_link(priv, port, true);
   1745		gswip_port_set_speed(priv, port, speed, interface);
   1746		gswip_port_set_duplex(priv, port, duplex);
   1747		gswip_port_set_pause(priv, port, tx_pause, rx_pause);
   1748	}
   1749
   1750	gswip_mii_mask_cfg(priv, 0, GSWIP_MII_CFG_EN, port);
   1751}
   1752
   1753static void gswip_get_strings(struct dsa_switch *ds, int port, u32 stringset,
   1754			      uint8_t *data)
   1755{
   1756	int i;
   1757
   1758	if (stringset != ETH_SS_STATS)
   1759		return;
   1760
   1761	for (i = 0; i < ARRAY_SIZE(gswip_rmon_cnt); i++)
   1762		strncpy(data + i * ETH_GSTRING_LEN, gswip_rmon_cnt[i].name,
   1763			ETH_GSTRING_LEN);
   1764}
   1765
   1766static u32 gswip_bcm_ram_entry_read(struct gswip_priv *priv, u32 table,
   1767				    u32 index)
   1768{
   1769	u32 result;
   1770	int err;
   1771
   1772	gswip_switch_w(priv, index, GSWIP_BM_RAM_ADDR);
   1773	gswip_switch_mask(priv, GSWIP_BM_RAM_CTRL_ADDR_MASK |
   1774				GSWIP_BM_RAM_CTRL_OPMOD,
   1775			      table | GSWIP_BM_RAM_CTRL_BAS,
   1776			      GSWIP_BM_RAM_CTRL);
   1777
   1778	err = gswip_switch_r_timeout(priv, GSWIP_BM_RAM_CTRL,
   1779				     GSWIP_BM_RAM_CTRL_BAS);
   1780	if (err) {
   1781		dev_err(priv->dev, "timeout while reading table: %u, index: %u",
   1782			table, index);
   1783		return 0;
   1784	}
   1785
   1786	result = gswip_switch_r(priv, GSWIP_BM_RAM_VAL(0));
   1787	result |= gswip_switch_r(priv, GSWIP_BM_RAM_VAL(1)) << 16;
   1788
   1789	return result;
   1790}
   1791
   1792static void gswip_get_ethtool_stats(struct dsa_switch *ds, int port,
   1793				    uint64_t *data)
   1794{
   1795	struct gswip_priv *priv = ds->priv;
   1796	const struct gswip_rmon_cnt_desc *rmon_cnt;
   1797	int i;
   1798	u64 high;
   1799
   1800	for (i = 0; i < ARRAY_SIZE(gswip_rmon_cnt); i++) {
   1801		rmon_cnt = &gswip_rmon_cnt[i];
   1802
   1803		data[i] = gswip_bcm_ram_entry_read(priv, port,
   1804						   rmon_cnt->offset);
   1805		if (rmon_cnt->size == 2) {
   1806			high = gswip_bcm_ram_entry_read(priv, port,
   1807							rmon_cnt->offset + 1);
   1808			data[i] |= high << 32;
   1809		}
   1810	}
   1811}
   1812
   1813static int gswip_get_sset_count(struct dsa_switch *ds, int port, int sset)
   1814{
   1815	if (sset != ETH_SS_STATS)
   1816		return 0;
   1817
   1818	return ARRAY_SIZE(gswip_rmon_cnt);
   1819}
   1820
   1821static const struct dsa_switch_ops gswip_xrx200_switch_ops = {
   1822	.get_tag_protocol	= gswip_get_tag_protocol,
   1823	.setup			= gswip_setup,
   1824	.port_enable		= gswip_port_enable,
   1825	.port_disable		= gswip_port_disable,
   1826	.port_bridge_join	= gswip_port_bridge_join,
   1827	.port_bridge_leave	= gswip_port_bridge_leave,
   1828	.port_fast_age		= gswip_port_fast_age,
   1829	.port_vlan_filtering	= gswip_port_vlan_filtering,
   1830	.port_vlan_add		= gswip_port_vlan_add,
   1831	.port_vlan_del		= gswip_port_vlan_del,
   1832	.port_stp_state_set	= gswip_port_stp_state_set,
   1833	.port_fdb_add		= gswip_port_fdb_add,
   1834	.port_fdb_del		= gswip_port_fdb_del,
   1835	.port_fdb_dump		= gswip_port_fdb_dump,
   1836	.port_change_mtu	= gswip_port_change_mtu,
   1837	.port_max_mtu		= gswip_port_max_mtu,
   1838	.phylink_get_caps	= gswip_xrx200_phylink_get_caps,
   1839	.phylink_mac_config	= gswip_phylink_mac_config,
   1840	.phylink_mac_link_down	= gswip_phylink_mac_link_down,
   1841	.phylink_mac_link_up	= gswip_phylink_mac_link_up,
   1842	.get_strings		= gswip_get_strings,
   1843	.get_ethtool_stats	= gswip_get_ethtool_stats,
   1844	.get_sset_count		= gswip_get_sset_count,
   1845};
   1846
   1847static const struct dsa_switch_ops gswip_xrx300_switch_ops = {
   1848	.get_tag_protocol	= gswip_get_tag_protocol,
   1849	.setup			= gswip_setup,
   1850	.port_enable		= gswip_port_enable,
   1851	.port_disable		= gswip_port_disable,
   1852	.port_bridge_join	= gswip_port_bridge_join,
   1853	.port_bridge_leave	= gswip_port_bridge_leave,
   1854	.port_fast_age		= gswip_port_fast_age,
   1855	.port_vlan_filtering	= gswip_port_vlan_filtering,
   1856	.port_vlan_add		= gswip_port_vlan_add,
   1857	.port_vlan_del		= gswip_port_vlan_del,
   1858	.port_stp_state_set	= gswip_port_stp_state_set,
   1859	.port_fdb_add		= gswip_port_fdb_add,
   1860	.port_fdb_del		= gswip_port_fdb_del,
   1861	.port_fdb_dump		= gswip_port_fdb_dump,
   1862	.port_change_mtu	= gswip_port_change_mtu,
   1863	.port_max_mtu		= gswip_port_max_mtu,
   1864	.phylink_get_caps	= gswip_xrx300_phylink_get_caps,
   1865	.phylink_mac_config	= gswip_phylink_mac_config,
   1866	.phylink_mac_link_down	= gswip_phylink_mac_link_down,
   1867	.phylink_mac_link_up	= gswip_phylink_mac_link_up,
   1868	.get_strings		= gswip_get_strings,
   1869	.get_ethtool_stats	= gswip_get_ethtool_stats,
   1870	.get_sset_count		= gswip_get_sset_count,
   1871};
   1872
   1873static const struct xway_gphy_match_data xrx200a1x_gphy_data = {
   1874	.fe_firmware_name = "lantiq/xrx200_phy22f_a14.bin",
   1875	.ge_firmware_name = "lantiq/xrx200_phy11g_a14.bin",
   1876};
   1877
   1878static const struct xway_gphy_match_data xrx200a2x_gphy_data = {
   1879	.fe_firmware_name = "lantiq/xrx200_phy22f_a22.bin",
   1880	.ge_firmware_name = "lantiq/xrx200_phy11g_a22.bin",
   1881};
   1882
   1883static const struct xway_gphy_match_data xrx300_gphy_data = {
   1884	.fe_firmware_name = "lantiq/xrx300_phy22f_a21.bin",
   1885	.ge_firmware_name = "lantiq/xrx300_phy11g_a21.bin",
   1886};
   1887
   1888static const struct of_device_id xway_gphy_match[] = {
   1889	{ .compatible = "lantiq,xrx200-gphy-fw", .data = NULL },
   1890	{ .compatible = "lantiq,xrx200a1x-gphy-fw", .data = &xrx200a1x_gphy_data },
   1891	{ .compatible = "lantiq,xrx200a2x-gphy-fw", .data = &xrx200a2x_gphy_data },
   1892	{ .compatible = "lantiq,xrx300-gphy-fw", .data = &xrx300_gphy_data },
   1893	{ .compatible = "lantiq,xrx330-gphy-fw", .data = &xrx300_gphy_data },
   1894	{},
   1895};
   1896
   1897static int gswip_gphy_fw_load(struct gswip_priv *priv, struct gswip_gphy_fw *gphy_fw)
   1898{
   1899	struct device *dev = priv->dev;
   1900	const struct firmware *fw;
   1901	void *fw_addr;
   1902	dma_addr_t dma_addr;
   1903	dma_addr_t dev_addr;
   1904	size_t size;
   1905	int ret;
   1906
   1907	ret = clk_prepare_enable(gphy_fw->clk_gate);
   1908	if (ret)
   1909		return ret;
   1910
   1911	reset_control_assert(gphy_fw->reset);
   1912
   1913	/* The vendor BSP uses a 200ms delay after asserting the reset line.
   1914	 * Without this some users are observing that the PHY is not coming up
   1915	 * on the MDIO bus.
   1916	 */
   1917	msleep(200);
   1918
   1919	ret = request_firmware(&fw, gphy_fw->fw_name, dev);
   1920	if (ret) {
   1921		dev_err(dev, "failed to load firmware: %s, error: %i\n",
   1922			gphy_fw->fw_name, ret);
   1923		return ret;
   1924	}
   1925
   1926	/* GPHY cores need the firmware code in a persistent and contiguous
   1927	 * memory area with a 16 kB boundary aligned start address.
   1928	 */
   1929	size = fw->size + XRX200_GPHY_FW_ALIGN;
   1930
   1931	fw_addr = dmam_alloc_coherent(dev, size, &dma_addr, GFP_KERNEL);
   1932	if (fw_addr) {
   1933		fw_addr = PTR_ALIGN(fw_addr, XRX200_GPHY_FW_ALIGN);
   1934		dev_addr = ALIGN(dma_addr, XRX200_GPHY_FW_ALIGN);
   1935		memcpy(fw_addr, fw->data, fw->size);
   1936	} else {
   1937		dev_err(dev, "failed to alloc firmware memory\n");
   1938		release_firmware(fw);
   1939		return -ENOMEM;
   1940	}
   1941
   1942	release_firmware(fw);
   1943
   1944	ret = regmap_write(priv->rcu_regmap, gphy_fw->fw_addr_offset, dev_addr);
   1945	if (ret)
   1946		return ret;
   1947
   1948	reset_control_deassert(gphy_fw->reset);
   1949
   1950	return ret;
   1951}
   1952
   1953static int gswip_gphy_fw_probe(struct gswip_priv *priv,
   1954			       struct gswip_gphy_fw *gphy_fw,
   1955			       struct device_node *gphy_fw_np, int i)
   1956{
   1957	struct device *dev = priv->dev;
   1958	u32 gphy_mode;
   1959	int ret;
   1960	char gphyname[10];
   1961
   1962	snprintf(gphyname, sizeof(gphyname), "gphy%d", i);
   1963
   1964	gphy_fw->clk_gate = devm_clk_get(dev, gphyname);
   1965	if (IS_ERR(gphy_fw->clk_gate)) {
   1966		dev_err(dev, "Failed to lookup gate clock\n");
   1967		return PTR_ERR(gphy_fw->clk_gate);
   1968	}
   1969
   1970	ret = of_property_read_u32(gphy_fw_np, "reg", &gphy_fw->fw_addr_offset);
   1971	if (ret)
   1972		return ret;
   1973
   1974	ret = of_property_read_u32(gphy_fw_np, "lantiq,gphy-mode", &gphy_mode);
   1975	/* Default to GE mode */
   1976	if (ret)
   1977		gphy_mode = GPHY_MODE_GE;
   1978
   1979	switch (gphy_mode) {
   1980	case GPHY_MODE_FE:
   1981		gphy_fw->fw_name = priv->gphy_fw_name_cfg->fe_firmware_name;
   1982		break;
   1983	case GPHY_MODE_GE:
   1984		gphy_fw->fw_name = priv->gphy_fw_name_cfg->ge_firmware_name;
   1985		break;
   1986	default:
   1987		dev_err(dev, "Unknown GPHY mode %d\n", gphy_mode);
   1988		return -EINVAL;
   1989	}
   1990
   1991	gphy_fw->reset = of_reset_control_array_get_exclusive(gphy_fw_np);
   1992	if (IS_ERR(gphy_fw->reset)) {
   1993		if (PTR_ERR(gphy_fw->reset) != -EPROBE_DEFER)
   1994			dev_err(dev, "Failed to lookup gphy reset\n");
   1995		return PTR_ERR(gphy_fw->reset);
   1996	}
   1997
   1998	return gswip_gphy_fw_load(priv, gphy_fw);
   1999}
   2000
   2001static void gswip_gphy_fw_remove(struct gswip_priv *priv,
   2002				 struct gswip_gphy_fw *gphy_fw)
   2003{
   2004	int ret;
   2005
   2006	/* check if the device was fully probed */
   2007	if (!gphy_fw->fw_name)
   2008		return;
   2009
   2010	ret = regmap_write(priv->rcu_regmap, gphy_fw->fw_addr_offset, 0);
   2011	if (ret)
   2012		dev_err(priv->dev, "can not reset GPHY FW pointer");
   2013
   2014	clk_disable_unprepare(gphy_fw->clk_gate);
   2015
   2016	reset_control_put(gphy_fw->reset);
   2017}
   2018
   2019static int gswip_gphy_fw_list(struct gswip_priv *priv,
   2020			      struct device_node *gphy_fw_list_np, u32 version)
   2021{
   2022	struct device *dev = priv->dev;
   2023	struct device_node *gphy_fw_np;
   2024	const struct of_device_id *match;
   2025	int err;
   2026	int i = 0;
   2027
   2028	/* The VRX200 rev 1.1 uses the GSWIP 2.0 and needs the older
   2029	 * GPHY firmware. The VRX200 rev 1.2 uses the GSWIP 2.1 and also
   2030	 * needs a different GPHY firmware.
   2031	 */
   2032	if (of_device_is_compatible(gphy_fw_list_np, "lantiq,xrx200-gphy-fw")) {
   2033		switch (version) {
   2034		case GSWIP_VERSION_2_0:
   2035			priv->gphy_fw_name_cfg = &xrx200a1x_gphy_data;
   2036			break;
   2037		case GSWIP_VERSION_2_1:
   2038			priv->gphy_fw_name_cfg = &xrx200a2x_gphy_data;
   2039			break;
   2040		default:
   2041			dev_err(dev, "unknown GSWIP version: 0x%x", version);
   2042			return -ENOENT;
   2043		}
   2044	}
   2045
   2046	match = of_match_node(xway_gphy_match, gphy_fw_list_np);
   2047	if (match && match->data)
   2048		priv->gphy_fw_name_cfg = match->data;
   2049
   2050	if (!priv->gphy_fw_name_cfg) {
   2051		dev_err(dev, "GPHY compatible type not supported");
   2052		return -ENOENT;
   2053	}
   2054
   2055	priv->num_gphy_fw = of_get_available_child_count(gphy_fw_list_np);
   2056	if (!priv->num_gphy_fw)
   2057		return -ENOENT;
   2058
   2059	priv->rcu_regmap = syscon_regmap_lookup_by_phandle(gphy_fw_list_np,
   2060							   "lantiq,rcu");
   2061	if (IS_ERR(priv->rcu_regmap))
   2062		return PTR_ERR(priv->rcu_regmap);
   2063
   2064	priv->gphy_fw = devm_kmalloc_array(dev, priv->num_gphy_fw,
   2065					   sizeof(*priv->gphy_fw),
   2066					   GFP_KERNEL | __GFP_ZERO);
   2067	if (!priv->gphy_fw)
   2068		return -ENOMEM;
   2069
   2070	for_each_available_child_of_node(gphy_fw_list_np, gphy_fw_np) {
   2071		err = gswip_gphy_fw_probe(priv, &priv->gphy_fw[i],
   2072					  gphy_fw_np, i);
   2073		if (err) {
   2074			of_node_put(gphy_fw_np);
   2075			goto remove_gphy;
   2076		}
   2077		i++;
   2078	}
   2079
   2080	/* The standalone PHY11G requires 300ms to be fully
   2081	 * initialized and ready for any MDIO communication after being
   2082	 * taken out of reset. For the SoC-internal GPHY variant there
   2083	 * is no (known) documentation for the minimum time after a
   2084	 * reset. Use the same value as for the standalone variant as
   2085	 * some users have reported internal PHYs not being detected
   2086	 * without any delay.
   2087	 */
   2088	msleep(300);
   2089
   2090	return 0;
   2091
   2092remove_gphy:
   2093	for (i = 0; i < priv->num_gphy_fw; i++)
   2094		gswip_gphy_fw_remove(priv, &priv->gphy_fw[i]);
   2095	return err;
   2096}
   2097
   2098static int gswip_probe(struct platform_device *pdev)
   2099{
   2100	struct gswip_priv *priv;
   2101	struct device_node *np, *mdio_np, *gphy_fw_np;
   2102	struct device *dev = &pdev->dev;
   2103	int err;
   2104	int i;
   2105	u32 version;
   2106
   2107	priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
   2108	if (!priv)
   2109		return -ENOMEM;
   2110
   2111	priv->gswip = devm_platform_ioremap_resource(pdev, 0);
   2112	if (IS_ERR(priv->gswip))
   2113		return PTR_ERR(priv->gswip);
   2114
   2115	priv->mdio = devm_platform_ioremap_resource(pdev, 1);
   2116	if (IS_ERR(priv->mdio))
   2117		return PTR_ERR(priv->mdio);
   2118
   2119	priv->mii = devm_platform_ioremap_resource(pdev, 2);
   2120	if (IS_ERR(priv->mii))
   2121		return PTR_ERR(priv->mii);
   2122
   2123	priv->hw_info = of_device_get_match_data(dev);
   2124	if (!priv->hw_info)
   2125		return -EINVAL;
   2126
   2127	priv->ds = devm_kzalloc(dev, sizeof(*priv->ds), GFP_KERNEL);
   2128	if (!priv->ds)
   2129		return -ENOMEM;
   2130
   2131	priv->ds->dev = dev;
   2132	priv->ds->num_ports = priv->hw_info->max_ports;
   2133	priv->ds->priv = priv;
   2134	priv->ds->ops = priv->hw_info->ops;
   2135	priv->dev = dev;
   2136	mutex_init(&priv->pce_table_lock);
   2137	version = gswip_switch_r(priv, GSWIP_VERSION);
   2138
   2139	np = dev->of_node;
   2140	switch (version) {
   2141	case GSWIP_VERSION_2_0:
   2142	case GSWIP_VERSION_2_1:
   2143		if (!of_device_is_compatible(np, "lantiq,xrx200-gswip"))
   2144			return -EINVAL;
   2145		break;
   2146	case GSWIP_VERSION_2_2:
   2147	case GSWIP_VERSION_2_2_ETC:
   2148		if (!of_device_is_compatible(np, "lantiq,xrx300-gswip") &&
   2149		    !of_device_is_compatible(np, "lantiq,xrx330-gswip"))
   2150			return -EINVAL;
   2151		break;
   2152	default:
   2153		dev_err(dev, "unknown GSWIP version: 0x%x", version);
   2154		return -ENOENT;
   2155	}
   2156
   2157	/* bring up the mdio bus */
   2158	gphy_fw_np = of_get_compatible_child(dev->of_node, "lantiq,gphy-fw");
   2159	if (gphy_fw_np) {
   2160		err = gswip_gphy_fw_list(priv, gphy_fw_np, version);
   2161		of_node_put(gphy_fw_np);
   2162		if (err) {
   2163			dev_err(dev, "gphy fw probe failed\n");
   2164			return err;
   2165		}
   2166	}
   2167
   2168	/* bring up the mdio bus */
   2169	mdio_np = of_get_compatible_child(dev->of_node, "lantiq,xrx200-mdio");
   2170	if (mdio_np) {
   2171		err = gswip_mdio(priv, mdio_np);
   2172		if (err) {
   2173			dev_err(dev, "mdio probe failed\n");
   2174			goto put_mdio_node;
   2175		}
   2176	}
   2177
   2178	err = dsa_register_switch(priv->ds);
   2179	if (err) {
   2180		dev_err(dev, "dsa switch register failed: %i\n", err);
   2181		goto mdio_bus;
   2182	}
   2183	if (!dsa_is_cpu_port(priv->ds, priv->hw_info->cpu_port)) {
   2184		dev_err(dev, "wrong CPU port defined, HW only supports port: %i",
   2185			priv->hw_info->cpu_port);
   2186		err = -EINVAL;
   2187		goto disable_switch;
   2188	}
   2189
   2190	platform_set_drvdata(pdev, priv);
   2191
   2192	dev_info(dev, "probed GSWIP version %lx mod %lx\n",
   2193		 (version & GSWIP_VERSION_REV_MASK) >> GSWIP_VERSION_REV_SHIFT,
   2194		 (version & GSWIP_VERSION_MOD_MASK) >> GSWIP_VERSION_MOD_SHIFT);
   2195	return 0;
   2196
   2197disable_switch:
   2198	gswip_mdio_mask(priv, GSWIP_MDIO_GLOB_ENABLE, 0, GSWIP_MDIO_GLOB);
   2199	dsa_unregister_switch(priv->ds);
   2200mdio_bus:
   2201	if (mdio_np) {
   2202		mdiobus_unregister(priv->ds->slave_mii_bus);
   2203		mdiobus_free(priv->ds->slave_mii_bus);
   2204	}
   2205put_mdio_node:
   2206	of_node_put(mdio_np);
   2207	for (i = 0; i < priv->num_gphy_fw; i++)
   2208		gswip_gphy_fw_remove(priv, &priv->gphy_fw[i]);
   2209	return err;
   2210}
   2211
   2212static int gswip_remove(struct platform_device *pdev)
   2213{
   2214	struct gswip_priv *priv = platform_get_drvdata(pdev);
   2215	int i;
   2216
   2217	if (!priv)
   2218		return 0;
   2219
   2220	/* disable the switch */
   2221	gswip_mdio_mask(priv, GSWIP_MDIO_GLOB_ENABLE, 0, GSWIP_MDIO_GLOB);
   2222
   2223	dsa_unregister_switch(priv->ds);
   2224
   2225	if (priv->ds->slave_mii_bus) {
   2226		mdiobus_unregister(priv->ds->slave_mii_bus);
   2227		of_node_put(priv->ds->slave_mii_bus->dev.of_node);
   2228		mdiobus_free(priv->ds->slave_mii_bus);
   2229	}
   2230
   2231	for (i = 0; i < priv->num_gphy_fw; i++)
   2232		gswip_gphy_fw_remove(priv, &priv->gphy_fw[i]);
   2233
   2234	platform_set_drvdata(pdev, NULL);
   2235
   2236	return 0;
   2237}
   2238
   2239static void gswip_shutdown(struct platform_device *pdev)
   2240{
   2241	struct gswip_priv *priv = platform_get_drvdata(pdev);
   2242
   2243	if (!priv)
   2244		return;
   2245
   2246	dsa_switch_shutdown(priv->ds);
   2247
   2248	platform_set_drvdata(pdev, NULL);
   2249}
   2250
   2251static const struct gswip_hw_info gswip_xrx200 = {
   2252	.max_ports = 7,
   2253	.cpu_port = 6,
   2254	.ops = &gswip_xrx200_switch_ops,
   2255};
   2256
   2257static const struct gswip_hw_info gswip_xrx300 = {
   2258	.max_ports = 7,
   2259	.cpu_port = 6,
   2260	.ops = &gswip_xrx300_switch_ops,
   2261};
   2262
   2263static const struct of_device_id gswip_of_match[] = {
   2264	{ .compatible = "lantiq,xrx200-gswip", .data = &gswip_xrx200 },
   2265	{ .compatible = "lantiq,xrx300-gswip", .data = &gswip_xrx300 },
   2266	{ .compatible = "lantiq,xrx330-gswip", .data = &gswip_xrx300 },
   2267	{},
   2268};
   2269MODULE_DEVICE_TABLE(of, gswip_of_match);
   2270
   2271static struct platform_driver gswip_driver = {
   2272	.probe = gswip_probe,
   2273	.remove = gswip_remove,
   2274	.shutdown = gswip_shutdown,
   2275	.driver = {
   2276		.name = "gswip",
   2277		.of_match_table = gswip_of_match,
   2278	},
   2279};
   2280
   2281module_platform_driver(gswip_driver);
   2282
   2283MODULE_FIRMWARE("lantiq/xrx300_phy11g_a21.bin");
   2284MODULE_FIRMWARE("lantiq/xrx300_phy22f_a21.bin");
   2285MODULE_FIRMWARE("lantiq/xrx200_phy11g_a14.bin");
   2286MODULE_FIRMWARE("lantiq/xrx200_phy11g_a22.bin");
   2287MODULE_FIRMWARE("lantiq/xrx200_phy22f_a14.bin");
   2288MODULE_FIRMWARE("lantiq/xrx200_phy22f_a22.bin");
   2289MODULE_AUTHOR("Hauke Mehrtens <hauke@hauke-m.de>");
   2290MODULE_DESCRIPTION("Lantiq / Intel GSWIP driver");
   2291MODULE_LICENSE("GPL v2");