cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

ocelot.c (94085B)


      1// SPDX-License-Identifier: (GPL-2.0 OR MIT)
      2/*
      3 * Microsemi Ocelot Switch driver
      4 *
      5 * Copyright (c) 2017 Microsemi Corporation
      6 */
      7#include <linux/dsa/ocelot.h>
      8#include <linux/if_bridge.h>
      9#include <linux/ptp_classify.h>
     10#include <soc/mscc/ocelot_vcap.h>
     11#include "ocelot.h"
     12#include "ocelot_vcap.h"
     13
     14#define TABLE_UPDATE_SLEEP_US 10
     15#define TABLE_UPDATE_TIMEOUT_US 100000
     16#define OCELOT_RSV_VLAN_RANGE_START 4000
     17
     18struct ocelot_mact_entry {
     19	u8 mac[ETH_ALEN];
     20	u16 vid;
     21	enum macaccess_entry_type type;
     22};
     23
     24/* Caller must hold &ocelot->mact_lock */
     25static inline u32 ocelot_mact_read_macaccess(struct ocelot *ocelot)
     26{
     27	return ocelot_read(ocelot, ANA_TABLES_MACACCESS);
     28}
     29
     30/* Caller must hold &ocelot->mact_lock */
     31static inline int ocelot_mact_wait_for_completion(struct ocelot *ocelot)
     32{
     33	u32 val;
     34
     35	return readx_poll_timeout(ocelot_mact_read_macaccess,
     36		ocelot, val,
     37		(val & ANA_TABLES_MACACCESS_MAC_TABLE_CMD_M) ==
     38		MACACCESS_CMD_IDLE,
     39		TABLE_UPDATE_SLEEP_US, TABLE_UPDATE_TIMEOUT_US);
     40}
     41
     42/* Caller must hold &ocelot->mact_lock */
     43static void ocelot_mact_select(struct ocelot *ocelot,
     44			       const unsigned char mac[ETH_ALEN],
     45			       unsigned int vid)
     46{
     47	u32 macl = 0, mach = 0;
     48
     49	/* Set the MAC address to handle and the vlan associated in a format
     50	 * understood by the hardware.
     51	 */
     52	mach |= vid    << 16;
     53	mach |= mac[0] << 8;
     54	mach |= mac[1] << 0;
     55	macl |= mac[2] << 24;
     56	macl |= mac[3] << 16;
     57	macl |= mac[4] << 8;
     58	macl |= mac[5] << 0;
     59
     60	ocelot_write(ocelot, macl, ANA_TABLES_MACLDATA);
     61	ocelot_write(ocelot, mach, ANA_TABLES_MACHDATA);
     62
     63}
     64
     65static int __ocelot_mact_learn(struct ocelot *ocelot, int port,
     66			       const unsigned char mac[ETH_ALEN],
     67			       unsigned int vid, enum macaccess_entry_type type)
     68{
     69	u32 cmd = ANA_TABLES_MACACCESS_VALID |
     70		ANA_TABLES_MACACCESS_DEST_IDX(port) |
     71		ANA_TABLES_MACACCESS_ENTRYTYPE(type) |
     72		ANA_TABLES_MACACCESS_MAC_TABLE_CMD(MACACCESS_CMD_LEARN);
     73	unsigned int mc_ports;
     74	int err;
     75
     76	/* Set MAC_CPU_COPY if the CPU port is used by a multicast entry */
     77	if (type == ENTRYTYPE_MACv4)
     78		mc_ports = (mac[1] << 8) | mac[2];
     79	else if (type == ENTRYTYPE_MACv6)
     80		mc_ports = (mac[0] << 8) | mac[1];
     81	else
     82		mc_ports = 0;
     83
     84	if (mc_ports & BIT(ocelot->num_phys_ports))
     85		cmd |= ANA_TABLES_MACACCESS_MAC_CPU_COPY;
     86
     87	ocelot_mact_select(ocelot, mac, vid);
     88
     89	/* Issue a write command */
     90	ocelot_write(ocelot, cmd, ANA_TABLES_MACACCESS);
     91
     92	err = ocelot_mact_wait_for_completion(ocelot);
     93
     94	return err;
     95}
     96
     97int ocelot_mact_learn(struct ocelot *ocelot, int port,
     98		      const unsigned char mac[ETH_ALEN],
     99		      unsigned int vid, enum macaccess_entry_type type)
    100{
    101	int ret;
    102
    103	mutex_lock(&ocelot->mact_lock);
    104	ret = __ocelot_mact_learn(ocelot, port, mac, vid, type);
    105	mutex_unlock(&ocelot->mact_lock);
    106
    107	return ret;
    108}
    109EXPORT_SYMBOL(ocelot_mact_learn);
    110
    111int ocelot_mact_forget(struct ocelot *ocelot,
    112		       const unsigned char mac[ETH_ALEN], unsigned int vid)
    113{
    114	int err;
    115
    116	mutex_lock(&ocelot->mact_lock);
    117
    118	ocelot_mact_select(ocelot, mac, vid);
    119
    120	/* Issue a forget command */
    121	ocelot_write(ocelot,
    122		     ANA_TABLES_MACACCESS_MAC_TABLE_CMD(MACACCESS_CMD_FORGET),
    123		     ANA_TABLES_MACACCESS);
    124
    125	err = ocelot_mact_wait_for_completion(ocelot);
    126
    127	mutex_unlock(&ocelot->mact_lock);
    128
    129	return err;
    130}
    131EXPORT_SYMBOL(ocelot_mact_forget);
    132
    133int ocelot_mact_lookup(struct ocelot *ocelot, int *dst_idx,
    134		       const unsigned char mac[ETH_ALEN],
    135		       unsigned int vid, enum macaccess_entry_type *type)
    136{
    137	int val;
    138
    139	mutex_lock(&ocelot->mact_lock);
    140
    141	ocelot_mact_select(ocelot, mac, vid);
    142
    143	/* Issue a read command with MACACCESS_VALID=1. */
    144	ocelot_write(ocelot, ANA_TABLES_MACACCESS_VALID |
    145		     ANA_TABLES_MACACCESS_MAC_TABLE_CMD(MACACCESS_CMD_READ),
    146		     ANA_TABLES_MACACCESS);
    147
    148	if (ocelot_mact_wait_for_completion(ocelot)) {
    149		mutex_unlock(&ocelot->mact_lock);
    150		return -ETIMEDOUT;
    151	}
    152
    153	/* Read back the entry flags */
    154	val = ocelot_read(ocelot, ANA_TABLES_MACACCESS);
    155
    156	mutex_unlock(&ocelot->mact_lock);
    157
    158	if (!(val & ANA_TABLES_MACACCESS_VALID))
    159		return -ENOENT;
    160
    161	*dst_idx = ANA_TABLES_MACACCESS_DEST_IDX_X(val);
    162	*type = ANA_TABLES_MACACCESS_ENTRYTYPE_X(val);
    163
    164	return 0;
    165}
    166EXPORT_SYMBOL(ocelot_mact_lookup);
    167
    168int ocelot_mact_learn_streamdata(struct ocelot *ocelot, int dst_idx,
    169				 const unsigned char mac[ETH_ALEN],
    170				 unsigned int vid,
    171				 enum macaccess_entry_type type,
    172				 int sfid, int ssid)
    173{
    174	int ret;
    175
    176	mutex_lock(&ocelot->mact_lock);
    177
    178	ocelot_write(ocelot,
    179		     (sfid < 0 ? 0 : ANA_TABLES_STREAMDATA_SFID_VALID) |
    180		     ANA_TABLES_STREAMDATA_SFID(sfid) |
    181		     (ssid < 0 ? 0 : ANA_TABLES_STREAMDATA_SSID_VALID) |
    182		     ANA_TABLES_STREAMDATA_SSID(ssid),
    183		     ANA_TABLES_STREAMDATA);
    184
    185	ret = __ocelot_mact_learn(ocelot, dst_idx, mac, vid, type);
    186
    187	mutex_unlock(&ocelot->mact_lock);
    188
    189	return ret;
    190}
    191EXPORT_SYMBOL(ocelot_mact_learn_streamdata);
    192
    193static void ocelot_mact_init(struct ocelot *ocelot)
    194{
    195	/* Configure the learning mode entries attributes:
    196	 * - Do not copy the frame to the CPU extraction queues.
    197	 * - Use the vlan and mac_cpoy for dmac lookup.
    198	 */
    199	ocelot_rmw(ocelot, 0,
    200		   ANA_AGENCTRL_LEARN_CPU_COPY | ANA_AGENCTRL_IGNORE_DMAC_FLAGS
    201		   | ANA_AGENCTRL_LEARN_FWD_KILL
    202		   | ANA_AGENCTRL_LEARN_IGNORE_VLAN,
    203		   ANA_AGENCTRL);
    204
    205	/* Clear the MAC table. We are not concurrent with anyone, so
    206	 * holding &ocelot->mact_lock is pointless.
    207	 */
    208	ocelot_write(ocelot, MACACCESS_CMD_INIT, ANA_TABLES_MACACCESS);
    209}
    210
    211static void ocelot_vcap_enable(struct ocelot *ocelot, int port)
    212{
    213	ocelot_write_gix(ocelot, ANA_PORT_VCAP_S2_CFG_S2_ENA |
    214			 ANA_PORT_VCAP_S2_CFG_S2_IP6_CFG(0xa),
    215			 ANA_PORT_VCAP_S2_CFG, port);
    216
    217	ocelot_write_gix(ocelot, ANA_PORT_VCAP_CFG_S1_ENA,
    218			 ANA_PORT_VCAP_CFG, port);
    219
    220	ocelot_rmw_gix(ocelot, REW_PORT_CFG_ES0_EN,
    221		       REW_PORT_CFG_ES0_EN,
    222		       REW_PORT_CFG, port);
    223}
    224
    225static int ocelot_single_vlan_aware_bridge(struct ocelot *ocelot,
    226					   struct netlink_ext_ack *extack)
    227{
    228	struct net_device *bridge = NULL;
    229	int port;
    230
    231	for (port = 0; port < ocelot->num_phys_ports; port++) {
    232		struct ocelot_port *ocelot_port = ocelot->ports[port];
    233
    234		if (!ocelot_port || !ocelot_port->bridge ||
    235		    !br_vlan_enabled(ocelot_port->bridge))
    236			continue;
    237
    238		if (!bridge) {
    239			bridge = ocelot_port->bridge;
    240			continue;
    241		}
    242
    243		if (bridge == ocelot_port->bridge)
    244			continue;
    245
    246		NL_SET_ERR_MSG_MOD(extack,
    247				   "Only one VLAN-aware bridge is supported");
    248		return -EBUSY;
    249	}
    250
    251	return 0;
    252}
    253
    254static inline u32 ocelot_vlant_read_vlanaccess(struct ocelot *ocelot)
    255{
    256	return ocelot_read(ocelot, ANA_TABLES_VLANACCESS);
    257}
    258
    259static inline int ocelot_vlant_wait_for_completion(struct ocelot *ocelot)
    260{
    261	u32 val;
    262
    263	return readx_poll_timeout(ocelot_vlant_read_vlanaccess,
    264		ocelot,
    265		val,
    266		(val & ANA_TABLES_VLANACCESS_VLAN_TBL_CMD_M) ==
    267		ANA_TABLES_VLANACCESS_CMD_IDLE,
    268		TABLE_UPDATE_SLEEP_US, TABLE_UPDATE_TIMEOUT_US);
    269}
    270
    271static int ocelot_vlant_set_mask(struct ocelot *ocelot, u16 vid, u32 mask)
    272{
    273	/* Select the VID to configure */
    274	ocelot_write(ocelot, ANA_TABLES_VLANTIDX_V_INDEX(vid),
    275		     ANA_TABLES_VLANTIDX);
    276	/* Set the vlan port members mask and issue a write command */
    277	ocelot_write(ocelot, ANA_TABLES_VLANACCESS_VLAN_PORT_MASK(mask) |
    278			     ANA_TABLES_VLANACCESS_CMD_WRITE,
    279		     ANA_TABLES_VLANACCESS);
    280
    281	return ocelot_vlant_wait_for_completion(ocelot);
    282}
    283
    284static int ocelot_port_num_untagged_vlans(struct ocelot *ocelot, int port)
    285{
    286	struct ocelot_bridge_vlan *vlan;
    287	int num_untagged = 0;
    288
    289	list_for_each_entry(vlan, &ocelot->vlans, list) {
    290		if (!(vlan->portmask & BIT(port)))
    291			continue;
    292
    293		if (vlan->untagged & BIT(port))
    294			num_untagged++;
    295	}
    296
    297	return num_untagged;
    298}
    299
    300static int ocelot_port_num_tagged_vlans(struct ocelot *ocelot, int port)
    301{
    302	struct ocelot_bridge_vlan *vlan;
    303	int num_tagged = 0;
    304
    305	list_for_each_entry(vlan, &ocelot->vlans, list) {
    306		if (!(vlan->portmask & BIT(port)))
    307			continue;
    308
    309		if (!(vlan->untagged & BIT(port)))
    310			num_tagged++;
    311	}
    312
    313	return num_tagged;
    314}
    315
    316/* We use native VLAN when we have to mix egress-tagged VLANs with exactly
    317 * _one_ egress-untagged VLAN (_the_ native VLAN)
    318 */
    319static bool ocelot_port_uses_native_vlan(struct ocelot *ocelot, int port)
    320{
    321	return ocelot_port_num_tagged_vlans(ocelot, port) &&
    322	       ocelot_port_num_untagged_vlans(ocelot, port) == 1;
    323}
    324
    325static struct ocelot_bridge_vlan *
    326ocelot_port_find_native_vlan(struct ocelot *ocelot, int port)
    327{
    328	struct ocelot_bridge_vlan *vlan;
    329
    330	list_for_each_entry(vlan, &ocelot->vlans, list)
    331		if (vlan->portmask & BIT(port) && vlan->untagged & BIT(port))
    332			return vlan;
    333
    334	return NULL;
    335}
    336
    337/* Keep in sync REW_TAG_CFG_TAG_CFG and, if applicable,
    338 * REW_PORT_VLAN_CFG_PORT_VID, with the bridge VLAN table and VLAN awareness
    339 * state of the port.
    340 */
    341static void ocelot_port_manage_port_tag(struct ocelot *ocelot, int port)
    342{
    343	struct ocelot_port *ocelot_port = ocelot->ports[port];
    344	enum ocelot_port_tag_config tag_cfg;
    345	bool uses_native_vlan = false;
    346
    347	if (ocelot_port->vlan_aware) {
    348		uses_native_vlan = ocelot_port_uses_native_vlan(ocelot, port);
    349
    350		if (uses_native_vlan)
    351			tag_cfg = OCELOT_PORT_TAG_NATIVE;
    352		else if (ocelot_port_num_untagged_vlans(ocelot, port))
    353			tag_cfg = OCELOT_PORT_TAG_DISABLED;
    354		else
    355			tag_cfg = OCELOT_PORT_TAG_TRUNK;
    356	} else {
    357		tag_cfg = OCELOT_PORT_TAG_DISABLED;
    358	}
    359
    360	ocelot_rmw_gix(ocelot, REW_TAG_CFG_TAG_CFG(tag_cfg),
    361		       REW_TAG_CFG_TAG_CFG_M,
    362		       REW_TAG_CFG, port);
    363
    364	if (uses_native_vlan) {
    365		struct ocelot_bridge_vlan *native_vlan;
    366
    367		/* Not having a native VLAN is impossible, because
    368		 * ocelot_port_num_untagged_vlans has returned 1.
    369		 * So there is no use in checking for NULL here.
    370		 */
    371		native_vlan = ocelot_port_find_native_vlan(ocelot, port);
    372
    373		ocelot_rmw_gix(ocelot,
    374			       REW_PORT_VLAN_CFG_PORT_VID(native_vlan->vid),
    375			       REW_PORT_VLAN_CFG_PORT_VID_M,
    376			       REW_PORT_VLAN_CFG, port);
    377	}
    378}
    379
    380int ocelot_bridge_num_find(struct ocelot *ocelot,
    381			   const struct net_device *bridge)
    382{
    383	int port;
    384
    385	for (port = 0; port < ocelot->num_phys_ports; port++) {
    386		struct ocelot_port *ocelot_port = ocelot->ports[port];
    387
    388		if (ocelot_port && ocelot_port->bridge == bridge)
    389			return ocelot_port->bridge_num;
    390	}
    391
    392	return -1;
    393}
    394EXPORT_SYMBOL_GPL(ocelot_bridge_num_find);
    395
    396static u16 ocelot_vlan_unaware_pvid(struct ocelot *ocelot,
    397				    const struct net_device *bridge)
    398{
    399	int bridge_num;
    400
    401	/* Standalone ports use VID 0 */
    402	if (!bridge)
    403		return 0;
    404
    405	bridge_num = ocelot_bridge_num_find(ocelot, bridge);
    406	if (WARN_ON(bridge_num < 0))
    407		return 0;
    408
    409	/* VLAN-unaware bridges use a reserved VID going from 4095 downwards */
    410	return VLAN_N_VID - bridge_num - 1;
    411}
    412
    413/* Default vlan to clasify for untagged frames (may be zero) */
    414static void ocelot_port_set_pvid(struct ocelot *ocelot, int port,
    415				 const struct ocelot_bridge_vlan *pvid_vlan)
    416{
    417	struct ocelot_port *ocelot_port = ocelot->ports[port];
    418	u16 pvid = ocelot_vlan_unaware_pvid(ocelot, ocelot_port->bridge);
    419	u32 val = 0;
    420
    421	ocelot_port->pvid_vlan = pvid_vlan;
    422
    423	if (ocelot_port->vlan_aware && pvid_vlan)
    424		pvid = pvid_vlan->vid;
    425
    426	ocelot_rmw_gix(ocelot,
    427		       ANA_PORT_VLAN_CFG_VLAN_VID(pvid),
    428		       ANA_PORT_VLAN_CFG_VLAN_VID_M,
    429		       ANA_PORT_VLAN_CFG, port);
    430
    431	/* If there's no pvid, we should drop not only untagged traffic (which
    432	 * happens automatically), but also 802.1p traffic which gets
    433	 * classified to VLAN 0, but that is always in our RX filter, so it
    434	 * would get accepted were it not for this setting.
    435	 */
    436	if (!pvid_vlan && ocelot_port->vlan_aware)
    437		val = ANA_PORT_DROP_CFG_DROP_PRIO_S_TAGGED_ENA |
    438		      ANA_PORT_DROP_CFG_DROP_PRIO_C_TAGGED_ENA;
    439
    440	ocelot_rmw_gix(ocelot, val,
    441		       ANA_PORT_DROP_CFG_DROP_PRIO_S_TAGGED_ENA |
    442		       ANA_PORT_DROP_CFG_DROP_PRIO_C_TAGGED_ENA,
    443		       ANA_PORT_DROP_CFG, port);
    444}
    445
    446static struct ocelot_bridge_vlan *ocelot_bridge_vlan_find(struct ocelot *ocelot,
    447							  u16 vid)
    448{
    449	struct ocelot_bridge_vlan *vlan;
    450
    451	list_for_each_entry(vlan, &ocelot->vlans, list)
    452		if (vlan->vid == vid)
    453			return vlan;
    454
    455	return NULL;
    456}
    457
    458static int ocelot_vlan_member_add(struct ocelot *ocelot, int port, u16 vid,
    459				  bool untagged)
    460{
    461	struct ocelot_bridge_vlan *vlan = ocelot_bridge_vlan_find(ocelot, vid);
    462	unsigned long portmask;
    463	int err;
    464
    465	if (vlan) {
    466		portmask = vlan->portmask | BIT(port);
    467
    468		err = ocelot_vlant_set_mask(ocelot, vid, portmask);
    469		if (err)
    470			return err;
    471
    472		vlan->portmask = portmask;
    473		/* Bridge VLANs can be overwritten with a different
    474		 * egress-tagging setting, so make sure to override an untagged
    475		 * with a tagged VID if that's going on.
    476		 */
    477		if (untagged)
    478			vlan->untagged |= BIT(port);
    479		else
    480			vlan->untagged &= ~BIT(port);
    481
    482		return 0;
    483	}
    484
    485	vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
    486	if (!vlan)
    487		return -ENOMEM;
    488
    489	portmask = BIT(port);
    490
    491	err = ocelot_vlant_set_mask(ocelot, vid, portmask);
    492	if (err) {
    493		kfree(vlan);
    494		return err;
    495	}
    496
    497	vlan->vid = vid;
    498	vlan->portmask = portmask;
    499	if (untagged)
    500		vlan->untagged = BIT(port);
    501	INIT_LIST_HEAD(&vlan->list);
    502	list_add_tail(&vlan->list, &ocelot->vlans);
    503
    504	return 0;
    505}
    506
    507static int ocelot_vlan_member_del(struct ocelot *ocelot, int port, u16 vid)
    508{
    509	struct ocelot_bridge_vlan *vlan = ocelot_bridge_vlan_find(ocelot, vid);
    510	unsigned long portmask;
    511	int err;
    512
    513	if (!vlan)
    514		return 0;
    515
    516	portmask = vlan->portmask & ~BIT(port);
    517
    518	err = ocelot_vlant_set_mask(ocelot, vid, portmask);
    519	if (err)
    520		return err;
    521
    522	vlan->portmask = portmask;
    523	if (vlan->portmask)
    524		return 0;
    525
    526	list_del(&vlan->list);
    527	kfree(vlan);
    528
    529	return 0;
    530}
    531
    532static int ocelot_add_vlan_unaware_pvid(struct ocelot *ocelot, int port,
    533					const struct net_device *bridge)
    534{
    535	u16 vid = ocelot_vlan_unaware_pvid(ocelot, bridge);
    536
    537	return ocelot_vlan_member_add(ocelot, port, vid, true);
    538}
    539
    540static int ocelot_del_vlan_unaware_pvid(struct ocelot *ocelot, int port,
    541					const struct net_device *bridge)
    542{
    543	u16 vid = ocelot_vlan_unaware_pvid(ocelot, bridge);
    544
    545	return ocelot_vlan_member_del(ocelot, port, vid);
    546}
    547
    548int ocelot_port_vlan_filtering(struct ocelot *ocelot, int port,
    549			       bool vlan_aware, struct netlink_ext_ack *extack)
    550{
    551	struct ocelot_vcap_block *block = &ocelot->block[VCAP_IS1];
    552	struct ocelot_port *ocelot_port = ocelot->ports[port];
    553	struct ocelot_vcap_filter *filter;
    554	int err = 0;
    555	u32 val;
    556
    557	list_for_each_entry(filter, &block->rules, list) {
    558		if (filter->ingress_port_mask & BIT(port) &&
    559		    filter->action.vid_replace_ena) {
    560			NL_SET_ERR_MSG_MOD(extack,
    561					   "Cannot change VLAN state with vlan modify rules active");
    562			return -EBUSY;
    563		}
    564	}
    565
    566	err = ocelot_single_vlan_aware_bridge(ocelot, extack);
    567	if (err)
    568		return err;
    569
    570	if (vlan_aware)
    571		err = ocelot_del_vlan_unaware_pvid(ocelot, port,
    572						   ocelot_port->bridge);
    573	else if (ocelot_port->bridge)
    574		err = ocelot_add_vlan_unaware_pvid(ocelot, port,
    575						   ocelot_port->bridge);
    576	if (err)
    577		return err;
    578
    579	ocelot_port->vlan_aware = vlan_aware;
    580
    581	if (vlan_aware)
    582		val = ANA_PORT_VLAN_CFG_VLAN_AWARE_ENA |
    583		      ANA_PORT_VLAN_CFG_VLAN_POP_CNT(1);
    584	else
    585		val = 0;
    586	ocelot_rmw_gix(ocelot, val,
    587		       ANA_PORT_VLAN_CFG_VLAN_AWARE_ENA |
    588		       ANA_PORT_VLAN_CFG_VLAN_POP_CNT_M,
    589		       ANA_PORT_VLAN_CFG, port);
    590
    591	ocelot_port_set_pvid(ocelot, port, ocelot_port->pvid_vlan);
    592	ocelot_port_manage_port_tag(ocelot, port);
    593
    594	return 0;
    595}
    596EXPORT_SYMBOL(ocelot_port_vlan_filtering);
    597
    598int ocelot_vlan_prepare(struct ocelot *ocelot, int port, u16 vid, bool pvid,
    599			bool untagged, struct netlink_ext_ack *extack)
    600{
    601	if (untagged) {
    602		/* We are adding an egress-tagged VLAN */
    603		if (ocelot_port_uses_native_vlan(ocelot, port)) {
    604			NL_SET_ERR_MSG_MOD(extack,
    605					   "Port with egress-tagged VLANs cannot have more than one egress-untagged (native) VLAN");
    606			return -EBUSY;
    607		}
    608	} else {
    609		/* We are adding an egress-tagged VLAN */
    610		if (ocelot_port_num_untagged_vlans(ocelot, port) > 1) {
    611			NL_SET_ERR_MSG_MOD(extack,
    612					   "Port with more than one egress-untagged VLAN cannot have egress-tagged VLANs");
    613			return -EBUSY;
    614		}
    615	}
    616
    617	if (vid > OCELOT_RSV_VLAN_RANGE_START) {
    618		NL_SET_ERR_MSG_MOD(extack,
    619				   "VLAN range 4000-4095 reserved for VLAN-unaware bridging");
    620		return -EBUSY;
    621	}
    622
    623	return 0;
    624}
    625EXPORT_SYMBOL(ocelot_vlan_prepare);
    626
    627int ocelot_vlan_add(struct ocelot *ocelot, int port, u16 vid, bool pvid,
    628		    bool untagged)
    629{
    630	int err;
    631
    632	/* Ignore VID 0 added to our RX filter by the 8021q module, since
    633	 * that collides with OCELOT_STANDALONE_PVID and changes it from
    634	 * egress-untagged to egress-tagged.
    635	 */
    636	if (!vid)
    637		return 0;
    638
    639	err = ocelot_vlan_member_add(ocelot, port, vid, untagged);
    640	if (err)
    641		return err;
    642
    643	/* Default ingress vlan classification */
    644	if (pvid)
    645		ocelot_port_set_pvid(ocelot, port,
    646				     ocelot_bridge_vlan_find(ocelot, vid));
    647
    648	/* Untagged egress vlan clasification */
    649	ocelot_port_manage_port_tag(ocelot, port);
    650
    651	return 0;
    652}
    653EXPORT_SYMBOL(ocelot_vlan_add);
    654
    655int ocelot_vlan_del(struct ocelot *ocelot, int port, u16 vid)
    656{
    657	struct ocelot_port *ocelot_port = ocelot->ports[port];
    658	bool del_pvid = false;
    659	int err;
    660
    661	if (!vid)
    662		return 0;
    663
    664	if (ocelot_port->pvid_vlan && ocelot_port->pvid_vlan->vid == vid)
    665		del_pvid = true;
    666
    667	err = ocelot_vlan_member_del(ocelot, port, vid);
    668	if (err)
    669		return err;
    670
    671	/* Ingress */
    672	if (del_pvid)
    673		ocelot_port_set_pvid(ocelot, port, NULL);
    674
    675	/* Egress */
    676	ocelot_port_manage_port_tag(ocelot, port);
    677
    678	return 0;
    679}
    680EXPORT_SYMBOL(ocelot_vlan_del);
    681
    682static void ocelot_vlan_init(struct ocelot *ocelot)
    683{
    684	unsigned long all_ports = GENMASK(ocelot->num_phys_ports - 1, 0);
    685	u16 port, vid;
    686
    687	/* Clear VLAN table, by default all ports are members of all VLANs */
    688	ocelot_write(ocelot, ANA_TABLES_VLANACCESS_CMD_INIT,
    689		     ANA_TABLES_VLANACCESS);
    690	ocelot_vlant_wait_for_completion(ocelot);
    691
    692	/* Configure the port VLAN memberships */
    693	for (vid = 1; vid < VLAN_N_VID; vid++)
    694		ocelot_vlant_set_mask(ocelot, vid, 0);
    695
    696	/* We need VID 0 to get traffic on standalone ports.
    697	 * It is added automatically if the 8021q module is loaded, but we
    698	 * can't rely on that since it might not be.
    699	 */
    700	ocelot_vlant_set_mask(ocelot, OCELOT_STANDALONE_PVID, all_ports);
    701
    702	/* Set vlan ingress filter mask to all ports but the CPU port by
    703	 * default.
    704	 */
    705	ocelot_write(ocelot, all_ports, ANA_VLANMASK);
    706
    707	for (port = 0; port < ocelot->num_phys_ports; port++) {
    708		ocelot_write_gix(ocelot, 0, REW_PORT_VLAN_CFG, port);
    709		ocelot_write_gix(ocelot, 0, REW_TAG_CFG, port);
    710	}
    711}
    712
    713static u32 ocelot_read_eq_avail(struct ocelot *ocelot, int port)
    714{
    715	return ocelot_read_rix(ocelot, QSYS_SW_STATUS, port);
    716}
    717
    718static int ocelot_port_flush(struct ocelot *ocelot, int port)
    719{
    720	unsigned int pause_ena;
    721	int err, val;
    722
    723	/* Disable dequeuing from the egress queues */
    724	ocelot_rmw_rix(ocelot, QSYS_PORT_MODE_DEQUEUE_DIS,
    725		       QSYS_PORT_MODE_DEQUEUE_DIS,
    726		       QSYS_PORT_MODE, port);
    727
    728	/* Disable flow control */
    729	ocelot_fields_read(ocelot, port, SYS_PAUSE_CFG_PAUSE_ENA, &pause_ena);
    730	ocelot_fields_write(ocelot, port, SYS_PAUSE_CFG_PAUSE_ENA, 0);
    731
    732	/* Disable priority flow control */
    733	ocelot_fields_write(ocelot, port,
    734			    QSYS_SWITCH_PORT_MODE_TX_PFC_ENA, 0);
    735
    736	/* Wait at least the time it takes to receive a frame of maximum length
    737	 * at the port.
    738	 * Worst-case delays for 10 kilobyte jumbo frames are:
    739	 * 8 ms on a 10M port
    740	 * 800 μs on a 100M port
    741	 * 80 μs on a 1G port
    742	 * 32 μs on a 2.5G port
    743	 */
    744	usleep_range(8000, 10000);
    745
    746	/* Disable half duplex backpressure. */
    747	ocelot_rmw_rix(ocelot, 0, SYS_FRONT_PORT_MODE_HDX_MODE,
    748		       SYS_FRONT_PORT_MODE, port);
    749
    750	/* Flush the queues associated with the port. */
    751	ocelot_rmw_gix(ocelot, REW_PORT_CFG_FLUSH_ENA, REW_PORT_CFG_FLUSH_ENA,
    752		       REW_PORT_CFG, port);
    753
    754	/* Enable dequeuing from the egress queues. */
    755	ocelot_rmw_rix(ocelot, 0, QSYS_PORT_MODE_DEQUEUE_DIS, QSYS_PORT_MODE,
    756		       port);
    757
    758	/* Wait until flushing is complete. */
    759	err = read_poll_timeout(ocelot_read_eq_avail, val, !val,
    760				100, 2000000, false, ocelot, port);
    761
    762	/* Clear flushing again. */
    763	ocelot_rmw_gix(ocelot, 0, REW_PORT_CFG_FLUSH_ENA, REW_PORT_CFG, port);
    764
    765	/* Re-enable flow control */
    766	ocelot_fields_write(ocelot, port, SYS_PAUSE_CFG_PAUSE_ENA, pause_ena);
    767
    768	return err;
    769}
    770
    771void ocelot_phylink_mac_link_down(struct ocelot *ocelot, int port,
    772				  unsigned int link_an_mode,
    773				  phy_interface_t interface,
    774				  unsigned long quirks)
    775{
    776	struct ocelot_port *ocelot_port = ocelot->ports[port];
    777	int err;
    778
    779	ocelot_port->speed = SPEED_UNKNOWN;
    780
    781	ocelot_port_rmwl(ocelot_port, 0, DEV_MAC_ENA_CFG_RX_ENA,
    782			 DEV_MAC_ENA_CFG);
    783
    784	if (ocelot->ops->cut_through_fwd) {
    785		mutex_lock(&ocelot->fwd_domain_lock);
    786		ocelot->ops->cut_through_fwd(ocelot);
    787		mutex_unlock(&ocelot->fwd_domain_lock);
    788	}
    789
    790	ocelot_fields_write(ocelot, port, QSYS_SWITCH_PORT_MODE_PORT_ENA, 0);
    791
    792	err = ocelot_port_flush(ocelot, port);
    793	if (err)
    794		dev_err(ocelot->dev, "failed to flush port %d: %d\n",
    795			port, err);
    796
    797	/* Put the port in reset. */
    798	if (interface != PHY_INTERFACE_MODE_QSGMII ||
    799	    !(quirks & OCELOT_QUIRK_QSGMII_PORTS_MUST_BE_UP))
    800		ocelot_port_rmwl(ocelot_port,
    801				 DEV_CLOCK_CFG_MAC_TX_RST |
    802				 DEV_CLOCK_CFG_MAC_RX_RST,
    803				 DEV_CLOCK_CFG_MAC_TX_RST |
    804				 DEV_CLOCK_CFG_MAC_RX_RST,
    805				 DEV_CLOCK_CFG);
    806}
    807EXPORT_SYMBOL_GPL(ocelot_phylink_mac_link_down);
    808
    809void ocelot_phylink_mac_link_up(struct ocelot *ocelot, int port,
    810				struct phy_device *phydev,
    811				unsigned int link_an_mode,
    812				phy_interface_t interface,
    813				int speed, int duplex,
    814				bool tx_pause, bool rx_pause,
    815				unsigned long quirks)
    816{
    817	struct ocelot_port *ocelot_port = ocelot->ports[port];
    818	int mac_speed, mode = 0;
    819	u32 mac_fc_cfg;
    820
    821	ocelot_port->speed = speed;
    822
    823	/* The MAC might be integrated in systems where the MAC speed is fixed
    824	 * and it's the PCS who is performing the rate adaptation, so we have
    825	 * to write "1000Mbps" into the LINK_SPEED field of DEV_CLOCK_CFG
    826	 * (which is also its default value).
    827	 */
    828	if ((quirks & OCELOT_QUIRK_PCS_PERFORMS_RATE_ADAPTATION) ||
    829	    speed == SPEED_1000) {
    830		mac_speed = OCELOT_SPEED_1000;
    831		mode = DEV_MAC_MODE_CFG_GIGA_MODE_ENA;
    832	} else if (speed == SPEED_2500) {
    833		mac_speed = OCELOT_SPEED_2500;
    834		mode = DEV_MAC_MODE_CFG_GIGA_MODE_ENA;
    835	} else if (speed == SPEED_100) {
    836		mac_speed = OCELOT_SPEED_100;
    837	} else {
    838		mac_speed = OCELOT_SPEED_10;
    839	}
    840
    841	if (duplex == DUPLEX_FULL)
    842		mode |= DEV_MAC_MODE_CFG_FDX_ENA;
    843
    844	ocelot_port_writel(ocelot_port, mode, DEV_MAC_MODE_CFG);
    845
    846	/* Take port out of reset by clearing the MAC_TX_RST, MAC_RX_RST and
    847	 * PORT_RST bits in DEV_CLOCK_CFG.
    848	 */
    849	ocelot_port_writel(ocelot_port, DEV_CLOCK_CFG_LINK_SPEED(mac_speed),
    850			   DEV_CLOCK_CFG);
    851
    852	switch (speed) {
    853	case SPEED_10:
    854		mac_fc_cfg = SYS_MAC_FC_CFG_FC_LINK_SPEED(OCELOT_SPEED_10);
    855		break;
    856	case SPEED_100:
    857		mac_fc_cfg = SYS_MAC_FC_CFG_FC_LINK_SPEED(OCELOT_SPEED_100);
    858		break;
    859	case SPEED_1000:
    860	case SPEED_2500:
    861		mac_fc_cfg = SYS_MAC_FC_CFG_FC_LINK_SPEED(OCELOT_SPEED_1000);
    862		break;
    863	default:
    864		dev_err(ocelot->dev, "Unsupported speed on port %d: %d\n",
    865			port, speed);
    866		return;
    867	}
    868
    869	/* Handle RX pause in all cases, with 2500base-X this is used for rate
    870	 * adaptation.
    871	 */
    872	mac_fc_cfg |= SYS_MAC_FC_CFG_RX_FC_ENA;
    873
    874	if (tx_pause)
    875		mac_fc_cfg |= SYS_MAC_FC_CFG_TX_FC_ENA |
    876			      SYS_MAC_FC_CFG_PAUSE_VAL_CFG(0xffff) |
    877			      SYS_MAC_FC_CFG_FC_LATENCY_CFG(0x7) |
    878			      SYS_MAC_FC_CFG_ZERO_PAUSE_ENA;
    879
    880	/* Flow control. Link speed is only used here to evaluate the time
    881	 * specification in incoming pause frames.
    882	 */
    883	ocelot_write_rix(ocelot, mac_fc_cfg, SYS_MAC_FC_CFG, port);
    884
    885	ocelot_write_rix(ocelot, 0, ANA_POL_FLOWC, port);
    886
    887	/* Don't attempt to send PAUSE frames on the NPI port, it's broken */
    888	if (port != ocelot->npi)
    889		ocelot_fields_write(ocelot, port, SYS_PAUSE_CFG_PAUSE_ENA,
    890				    tx_pause);
    891
    892	/* Undo the effects of ocelot_phylink_mac_link_down:
    893	 * enable MAC module
    894	 */
    895	ocelot_port_writel(ocelot_port, DEV_MAC_ENA_CFG_RX_ENA |
    896			   DEV_MAC_ENA_CFG_TX_ENA, DEV_MAC_ENA_CFG);
    897
    898	/* If the port supports cut-through forwarding, update the masks before
    899	 * enabling forwarding on the port.
    900	 */
    901	if (ocelot->ops->cut_through_fwd) {
    902		mutex_lock(&ocelot->fwd_domain_lock);
    903		ocelot->ops->cut_through_fwd(ocelot);
    904		mutex_unlock(&ocelot->fwd_domain_lock);
    905	}
    906
    907	/* Core: Enable port for frame transfer */
    908	ocelot_fields_write(ocelot, port,
    909			    QSYS_SWITCH_PORT_MODE_PORT_ENA, 1);
    910}
    911EXPORT_SYMBOL_GPL(ocelot_phylink_mac_link_up);
    912
    913static int ocelot_port_add_txtstamp_skb(struct ocelot *ocelot, int port,
    914					struct sk_buff *clone)
    915{
    916	struct ocelot_port *ocelot_port = ocelot->ports[port];
    917	unsigned long flags;
    918
    919	spin_lock_irqsave(&ocelot->ts_id_lock, flags);
    920
    921	if (ocelot_port->ptp_skbs_in_flight == OCELOT_MAX_PTP_ID ||
    922	    ocelot->ptp_skbs_in_flight == OCELOT_PTP_FIFO_SIZE) {
    923		spin_unlock_irqrestore(&ocelot->ts_id_lock, flags);
    924		return -EBUSY;
    925	}
    926
    927	skb_shinfo(clone)->tx_flags |= SKBTX_IN_PROGRESS;
    928	/* Store timestamp ID in OCELOT_SKB_CB(clone)->ts_id */
    929	OCELOT_SKB_CB(clone)->ts_id = ocelot_port->ts_id;
    930
    931	ocelot_port->ts_id++;
    932	if (ocelot_port->ts_id == OCELOT_MAX_PTP_ID)
    933		ocelot_port->ts_id = 0;
    934
    935	ocelot_port->ptp_skbs_in_flight++;
    936	ocelot->ptp_skbs_in_flight++;
    937
    938	skb_queue_tail(&ocelot_port->tx_skbs, clone);
    939
    940	spin_unlock_irqrestore(&ocelot->ts_id_lock, flags);
    941
    942	return 0;
    943}
    944
    945static bool ocelot_ptp_is_onestep_sync(struct sk_buff *skb,
    946				       unsigned int ptp_class)
    947{
    948	struct ptp_header *hdr;
    949	u8 msgtype, twostep;
    950
    951	hdr = ptp_parse_header(skb, ptp_class);
    952	if (!hdr)
    953		return false;
    954
    955	msgtype = ptp_get_msgtype(hdr, ptp_class);
    956	twostep = hdr->flag_field[0] & 0x2;
    957
    958	if (msgtype == PTP_MSGTYPE_SYNC && twostep == 0)
    959		return true;
    960
    961	return false;
    962}
    963
    964int ocelot_port_txtstamp_request(struct ocelot *ocelot, int port,
    965				 struct sk_buff *skb,
    966				 struct sk_buff **clone)
    967{
    968	struct ocelot_port *ocelot_port = ocelot->ports[port];
    969	u8 ptp_cmd = ocelot_port->ptp_cmd;
    970	unsigned int ptp_class;
    971	int err;
    972
    973	/* Don't do anything if PTP timestamping not enabled */
    974	if (!ptp_cmd)
    975		return 0;
    976
    977	ptp_class = ptp_classify_raw(skb);
    978	if (ptp_class == PTP_CLASS_NONE)
    979		return -EINVAL;
    980
    981	/* Store ptp_cmd in OCELOT_SKB_CB(skb)->ptp_cmd */
    982	if (ptp_cmd == IFH_REW_OP_ORIGIN_PTP) {
    983		if (ocelot_ptp_is_onestep_sync(skb, ptp_class)) {
    984			OCELOT_SKB_CB(skb)->ptp_cmd = ptp_cmd;
    985			return 0;
    986		}
    987
    988		/* Fall back to two-step timestamping */
    989		ptp_cmd = IFH_REW_OP_TWO_STEP_PTP;
    990	}
    991
    992	if (ptp_cmd == IFH_REW_OP_TWO_STEP_PTP) {
    993		*clone = skb_clone_sk(skb);
    994		if (!(*clone))
    995			return -ENOMEM;
    996
    997		err = ocelot_port_add_txtstamp_skb(ocelot, port, *clone);
    998		if (err)
    999			return err;
   1000
   1001		OCELOT_SKB_CB(skb)->ptp_cmd = ptp_cmd;
   1002		OCELOT_SKB_CB(*clone)->ptp_class = ptp_class;
   1003	}
   1004
   1005	return 0;
   1006}
   1007EXPORT_SYMBOL(ocelot_port_txtstamp_request);
   1008
   1009static void ocelot_get_hwtimestamp(struct ocelot *ocelot,
   1010				   struct timespec64 *ts)
   1011{
   1012	unsigned long flags;
   1013	u32 val;
   1014
   1015	spin_lock_irqsave(&ocelot->ptp_clock_lock, flags);
   1016
   1017	/* Read current PTP time to get seconds */
   1018	val = ocelot_read_rix(ocelot, PTP_PIN_CFG, TOD_ACC_PIN);
   1019
   1020	val &= ~(PTP_PIN_CFG_SYNC | PTP_PIN_CFG_ACTION_MASK | PTP_PIN_CFG_DOM);
   1021	val |= PTP_PIN_CFG_ACTION(PTP_PIN_ACTION_SAVE);
   1022	ocelot_write_rix(ocelot, val, PTP_PIN_CFG, TOD_ACC_PIN);
   1023	ts->tv_sec = ocelot_read_rix(ocelot, PTP_PIN_TOD_SEC_LSB, TOD_ACC_PIN);
   1024
   1025	/* Read packet HW timestamp from FIFO */
   1026	val = ocelot_read(ocelot, SYS_PTP_TXSTAMP);
   1027	ts->tv_nsec = SYS_PTP_TXSTAMP_PTP_TXSTAMP(val);
   1028
   1029	/* Sec has incremented since the ts was registered */
   1030	if ((ts->tv_sec & 0x1) != !!(val & SYS_PTP_TXSTAMP_PTP_TXSTAMP_SEC))
   1031		ts->tv_sec--;
   1032
   1033	spin_unlock_irqrestore(&ocelot->ptp_clock_lock, flags);
   1034}
   1035
   1036static bool ocelot_validate_ptp_skb(struct sk_buff *clone, u16 seqid)
   1037{
   1038	struct ptp_header *hdr;
   1039
   1040	hdr = ptp_parse_header(clone, OCELOT_SKB_CB(clone)->ptp_class);
   1041	if (WARN_ON(!hdr))
   1042		return false;
   1043
   1044	return seqid == ntohs(hdr->sequence_id);
   1045}
   1046
   1047void ocelot_get_txtstamp(struct ocelot *ocelot)
   1048{
   1049	int budget = OCELOT_PTP_QUEUE_SZ;
   1050
   1051	while (budget--) {
   1052		struct sk_buff *skb, *skb_tmp, *skb_match = NULL;
   1053		struct skb_shared_hwtstamps shhwtstamps;
   1054		u32 val, id, seqid, txport;
   1055		struct ocelot_port *port;
   1056		struct timespec64 ts;
   1057		unsigned long flags;
   1058
   1059		val = ocelot_read(ocelot, SYS_PTP_STATUS);
   1060
   1061		/* Check if a timestamp can be retrieved */
   1062		if (!(val & SYS_PTP_STATUS_PTP_MESS_VLD))
   1063			break;
   1064
   1065		WARN_ON(val & SYS_PTP_STATUS_PTP_OVFL);
   1066
   1067		/* Retrieve the ts ID and Tx port */
   1068		id = SYS_PTP_STATUS_PTP_MESS_ID_X(val);
   1069		txport = SYS_PTP_STATUS_PTP_MESS_TXPORT_X(val);
   1070		seqid = SYS_PTP_STATUS_PTP_MESS_SEQ_ID(val);
   1071
   1072		port = ocelot->ports[txport];
   1073
   1074		spin_lock(&ocelot->ts_id_lock);
   1075		port->ptp_skbs_in_flight--;
   1076		ocelot->ptp_skbs_in_flight--;
   1077		spin_unlock(&ocelot->ts_id_lock);
   1078
   1079		/* Retrieve its associated skb */
   1080try_again:
   1081		spin_lock_irqsave(&port->tx_skbs.lock, flags);
   1082
   1083		skb_queue_walk_safe(&port->tx_skbs, skb, skb_tmp) {
   1084			if (OCELOT_SKB_CB(skb)->ts_id != id)
   1085				continue;
   1086			__skb_unlink(skb, &port->tx_skbs);
   1087			skb_match = skb;
   1088			break;
   1089		}
   1090
   1091		spin_unlock_irqrestore(&port->tx_skbs.lock, flags);
   1092
   1093		if (WARN_ON(!skb_match))
   1094			continue;
   1095
   1096		if (!ocelot_validate_ptp_skb(skb_match, seqid)) {
   1097			dev_err_ratelimited(ocelot->dev,
   1098					    "port %d received stale TX timestamp for seqid %d, discarding\n",
   1099					    txport, seqid);
   1100			dev_kfree_skb_any(skb);
   1101			goto try_again;
   1102		}
   1103
   1104		/* Get the h/w timestamp */
   1105		ocelot_get_hwtimestamp(ocelot, &ts);
   1106
   1107		/* Set the timestamp into the skb */
   1108		memset(&shhwtstamps, 0, sizeof(shhwtstamps));
   1109		shhwtstamps.hwtstamp = ktime_set(ts.tv_sec, ts.tv_nsec);
   1110		skb_complete_tx_timestamp(skb_match, &shhwtstamps);
   1111
   1112		/* Next ts */
   1113		ocelot_write(ocelot, SYS_PTP_NXT_PTP_NXT, SYS_PTP_NXT);
   1114	}
   1115}
   1116EXPORT_SYMBOL(ocelot_get_txtstamp);
   1117
   1118static int ocelot_rx_frame_word(struct ocelot *ocelot, u8 grp, bool ifh,
   1119				u32 *rval)
   1120{
   1121	u32 bytes_valid, val;
   1122
   1123	val = ocelot_read_rix(ocelot, QS_XTR_RD, grp);
   1124	if (val == XTR_NOT_READY) {
   1125		if (ifh)
   1126			return -EIO;
   1127
   1128		do {
   1129			val = ocelot_read_rix(ocelot, QS_XTR_RD, grp);
   1130		} while (val == XTR_NOT_READY);
   1131	}
   1132
   1133	switch (val) {
   1134	case XTR_ABORT:
   1135		return -EIO;
   1136	case XTR_EOF_0:
   1137	case XTR_EOF_1:
   1138	case XTR_EOF_2:
   1139	case XTR_EOF_3:
   1140	case XTR_PRUNED:
   1141		bytes_valid = XTR_VALID_BYTES(val);
   1142		val = ocelot_read_rix(ocelot, QS_XTR_RD, grp);
   1143		if (val == XTR_ESCAPE)
   1144			*rval = ocelot_read_rix(ocelot, QS_XTR_RD, grp);
   1145		else
   1146			*rval = val;
   1147
   1148		return bytes_valid;
   1149	case XTR_ESCAPE:
   1150		*rval = ocelot_read_rix(ocelot, QS_XTR_RD, grp);
   1151
   1152		return 4;
   1153	default:
   1154		*rval = val;
   1155
   1156		return 4;
   1157	}
   1158}
   1159
   1160static int ocelot_xtr_poll_xfh(struct ocelot *ocelot, int grp, u32 *xfh)
   1161{
   1162	int i, err = 0;
   1163
   1164	for (i = 0; i < OCELOT_TAG_LEN / 4; i++) {
   1165		err = ocelot_rx_frame_word(ocelot, grp, true, &xfh[i]);
   1166		if (err != 4)
   1167			return (err < 0) ? err : -EIO;
   1168	}
   1169
   1170	return 0;
   1171}
   1172
   1173void ocelot_ptp_rx_timestamp(struct ocelot *ocelot, struct sk_buff *skb,
   1174			     u64 timestamp)
   1175{
   1176	struct skb_shared_hwtstamps *shhwtstamps;
   1177	u64 tod_in_ns, full_ts_in_ns;
   1178	struct timespec64 ts;
   1179
   1180	ocelot_ptp_gettime64(&ocelot->ptp_info, &ts);
   1181
   1182	tod_in_ns = ktime_set(ts.tv_sec, ts.tv_nsec);
   1183	if ((tod_in_ns & 0xffffffff) < timestamp)
   1184		full_ts_in_ns = (((tod_in_ns >> 32) - 1) << 32) |
   1185				timestamp;
   1186	else
   1187		full_ts_in_ns = (tod_in_ns & GENMASK_ULL(63, 32)) |
   1188				timestamp;
   1189
   1190	shhwtstamps = skb_hwtstamps(skb);
   1191	memset(shhwtstamps, 0, sizeof(struct skb_shared_hwtstamps));
   1192	shhwtstamps->hwtstamp = full_ts_in_ns;
   1193}
   1194EXPORT_SYMBOL(ocelot_ptp_rx_timestamp);
   1195
   1196int ocelot_xtr_poll_frame(struct ocelot *ocelot, int grp, struct sk_buff **nskb)
   1197{
   1198	u64 timestamp, src_port, len;
   1199	u32 xfh[OCELOT_TAG_LEN / 4];
   1200	struct net_device *dev;
   1201	struct sk_buff *skb;
   1202	int sz, buf_len;
   1203	u32 val, *buf;
   1204	int err;
   1205
   1206	err = ocelot_xtr_poll_xfh(ocelot, grp, xfh);
   1207	if (err)
   1208		return err;
   1209
   1210	ocelot_xfh_get_src_port(xfh, &src_port);
   1211	ocelot_xfh_get_len(xfh, &len);
   1212	ocelot_xfh_get_rew_val(xfh, &timestamp);
   1213
   1214	if (WARN_ON(src_port >= ocelot->num_phys_ports))
   1215		return -EINVAL;
   1216
   1217	dev = ocelot->ops->port_to_netdev(ocelot, src_port);
   1218	if (!dev)
   1219		return -EINVAL;
   1220
   1221	skb = netdev_alloc_skb(dev, len);
   1222	if (unlikely(!skb)) {
   1223		netdev_err(dev, "Unable to allocate sk_buff\n");
   1224		return -ENOMEM;
   1225	}
   1226
   1227	buf_len = len - ETH_FCS_LEN;
   1228	buf = (u32 *)skb_put(skb, buf_len);
   1229
   1230	len = 0;
   1231	do {
   1232		sz = ocelot_rx_frame_word(ocelot, grp, false, &val);
   1233		if (sz < 0) {
   1234			err = sz;
   1235			goto out_free_skb;
   1236		}
   1237		*buf++ = val;
   1238		len += sz;
   1239	} while (len < buf_len);
   1240
   1241	/* Read the FCS */
   1242	sz = ocelot_rx_frame_word(ocelot, grp, false, &val);
   1243	if (sz < 0) {
   1244		err = sz;
   1245		goto out_free_skb;
   1246	}
   1247
   1248	/* Update the statistics if part of the FCS was read before */
   1249	len -= ETH_FCS_LEN - sz;
   1250
   1251	if (unlikely(dev->features & NETIF_F_RXFCS)) {
   1252		buf = (u32 *)skb_put(skb, ETH_FCS_LEN);
   1253		*buf = val;
   1254	}
   1255
   1256	if (ocelot->ptp)
   1257		ocelot_ptp_rx_timestamp(ocelot, skb, timestamp);
   1258
   1259	/* Everything we see on an interface that is in the HW bridge
   1260	 * has already been forwarded.
   1261	 */
   1262	if (ocelot->ports[src_port]->bridge)
   1263		skb->offload_fwd_mark = 1;
   1264
   1265	skb->protocol = eth_type_trans(skb, dev);
   1266
   1267	*nskb = skb;
   1268
   1269	return 0;
   1270
   1271out_free_skb:
   1272	kfree_skb(skb);
   1273	return err;
   1274}
   1275EXPORT_SYMBOL(ocelot_xtr_poll_frame);
   1276
   1277bool ocelot_can_inject(struct ocelot *ocelot, int grp)
   1278{
   1279	u32 val = ocelot_read(ocelot, QS_INJ_STATUS);
   1280
   1281	if (!(val & QS_INJ_STATUS_FIFO_RDY(BIT(grp))))
   1282		return false;
   1283	if (val & QS_INJ_STATUS_WMARK_REACHED(BIT(grp)))
   1284		return false;
   1285
   1286	return true;
   1287}
   1288EXPORT_SYMBOL(ocelot_can_inject);
   1289
   1290void ocelot_ifh_port_set(void *ifh, int port, u32 rew_op, u32 vlan_tag)
   1291{
   1292	ocelot_ifh_set_bypass(ifh, 1);
   1293	ocelot_ifh_set_dest(ifh, BIT_ULL(port));
   1294	ocelot_ifh_set_tag_type(ifh, IFH_TAG_TYPE_C);
   1295	if (vlan_tag)
   1296		ocelot_ifh_set_vlan_tci(ifh, vlan_tag);
   1297	if (rew_op)
   1298		ocelot_ifh_set_rew_op(ifh, rew_op);
   1299}
   1300EXPORT_SYMBOL(ocelot_ifh_port_set);
   1301
   1302void ocelot_port_inject_frame(struct ocelot *ocelot, int port, int grp,
   1303			      u32 rew_op, struct sk_buff *skb)
   1304{
   1305	u32 ifh[OCELOT_TAG_LEN / 4] = {0};
   1306	unsigned int i, count, last;
   1307
   1308	ocelot_write_rix(ocelot, QS_INJ_CTRL_GAP_SIZE(1) |
   1309			 QS_INJ_CTRL_SOF, QS_INJ_CTRL, grp);
   1310
   1311	ocelot_ifh_port_set(ifh, port, rew_op, skb_vlan_tag_get(skb));
   1312
   1313	for (i = 0; i < OCELOT_TAG_LEN / 4; i++)
   1314		ocelot_write_rix(ocelot, ifh[i], QS_INJ_WR, grp);
   1315
   1316	count = DIV_ROUND_UP(skb->len, 4);
   1317	last = skb->len % 4;
   1318	for (i = 0; i < count; i++)
   1319		ocelot_write_rix(ocelot, ((u32 *)skb->data)[i], QS_INJ_WR, grp);
   1320
   1321	/* Add padding */
   1322	while (i < (OCELOT_BUFFER_CELL_SZ / 4)) {
   1323		ocelot_write_rix(ocelot, 0, QS_INJ_WR, grp);
   1324		i++;
   1325	}
   1326
   1327	/* Indicate EOF and valid bytes in last word */
   1328	ocelot_write_rix(ocelot, QS_INJ_CTRL_GAP_SIZE(1) |
   1329			 QS_INJ_CTRL_VLD_BYTES(skb->len < OCELOT_BUFFER_CELL_SZ ? 0 : last) |
   1330			 QS_INJ_CTRL_EOF,
   1331			 QS_INJ_CTRL, grp);
   1332
   1333	/* Add dummy CRC */
   1334	ocelot_write_rix(ocelot, 0, QS_INJ_WR, grp);
   1335	skb_tx_timestamp(skb);
   1336
   1337	skb->dev->stats.tx_packets++;
   1338	skb->dev->stats.tx_bytes += skb->len;
   1339}
   1340EXPORT_SYMBOL(ocelot_port_inject_frame);
   1341
   1342void ocelot_drain_cpu_queue(struct ocelot *ocelot, int grp)
   1343{
   1344	while (ocelot_read(ocelot, QS_XTR_DATA_PRESENT) & BIT(grp))
   1345		ocelot_read_rix(ocelot, QS_XTR_RD, grp);
   1346}
   1347EXPORT_SYMBOL(ocelot_drain_cpu_queue);
   1348
   1349int ocelot_fdb_add(struct ocelot *ocelot, int port, const unsigned char *addr,
   1350		   u16 vid, const struct net_device *bridge)
   1351{
   1352	if (!vid)
   1353		vid = ocelot_vlan_unaware_pvid(ocelot, bridge);
   1354
   1355	return ocelot_mact_learn(ocelot, port, addr, vid, ENTRYTYPE_LOCKED);
   1356}
   1357EXPORT_SYMBOL(ocelot_fdb_add);
   1358
   1359int ocelot_fdb_del(struct ocelot *ocelot, int port, const unsigned char *addr,
   1360		   u16 vid, const struct net_device *bridge)
   1361{
   1362	if (!vid)
   1363		vid = ocelot_vlan_unaware_pvid(ocelot, bridge);
   1364
   1365	return ocelot_mact_forget(ocelot, addr, vid);
   1366}
   1367EXPORT_SYMBOL(ocelot_fdb_del);
   1368
   1369int ocelot_port_fdb_do_dump(const unsigned char *addr, u16 vid,
   1370			    bool is_static, void *data)
   1371{
   1372	struct ocelot_dump_ctx *dump = data;
   1373	u32 portid = NETLINK_CB(dump->cb->skb).portid;
   1374	u32 seq = dump->cb->nlh->nlmsg_seq;
   1375	struct nlmsghdr *nlh;
   1376	struct ndmsg *ndm;
   1377
   1378	if (dump->idx < dump->cb->args[2])
   1379		goto skip;
   1380
   1381	nlh = nlmsg_put(dump->skb, portid, seq, RTM_NEWNEIGH,
   1382			sizeof(*ndm), NLM_F_MULTI);
   1383	if (!nlh)
   1384		return -EMSGSIZE;
   1385
   1386	ndm = nlmsg_data(nlh);
   1387	ndm->ndm_family  = AF_BRIDGE;
   1388	ndm->ndm_pad1    = 0;
   1389	ndm->ndm_pad2    = 0;
   1390	ndm->ndm_flags   = NTF_SELF;
   1391	ndm->ndm_type    = 0;
   1392	ndm->ndm_ifindex = dump->dev->ifindex;
   1393	ndm->ndm_state   = is_static ? NUD_NOARP : NUD_REACHABLE;
   1394
   1395	if (nla_put(dump->skb, NDA_LLADDR, ETH_ALEN, addr))
   1396		goto nla_put_failure;
   1397
   1398	if (vid && nla_put_u16(dump->skb, NDA_VLAN, vid))
   1399		goto nla_put_failure;
   1400
   1401	nlmsg_end(dump->skb, nlh);
   1402
   1403skip:
   1404	dump->idx++;
   1405	return 0;
   1406
   1407nla_put_failure:
   1408	nlmsg_cancel(dump->skb, nlh);
   1409	return -EMSGSIZE;
   1410}
   1411EXPORT_SYMBOL(ocelot_port_fdb_do_dump);
   1412
   1413/* Caller must hold &ocelot->mact_lock */
   1414static int ocelot_mact_read(struct ocelot *ocelot, int port, int row, int col,
   1415			    struct ocelot_mact_entry *entry)
   1416{
   1417	u32 val, dst, macl, mach;
   1418	char mac[ETH_ALEN];
   1419
   1420	/* Set row and column to read from */
   1421	ocelot_field_write(ocelot, ANA_TABLES_MACTINDX_M_INDEX, row);
   1422	ocelot_field_write(ocelot, ANA_TABLES_MACTINDX_BUCKET, col);
   1423
   1424	/* Issue a read command */
   1425	ocelot_write(ocelot,
   1426		     ANA_TABLES_MACACCESS_MAC_TABLE_CMD(MACACCESS_CMD_READ),
   1427		     ANA_TABLES_MACACCESS);
   1428
   1429	if (ocelot_mact_wait_for_completion(ocelot))
   1430		return -ETIMEDOUT;
   1431
   1432	/* Read the entry flags */
   1433	val = ocelot_read(ocelot, ANA_TABLES_MACACCESS);
   1434	if (!(val & ANA_TABLES_MACACCESS_VALID))
   1435		return -EINVAL;
   1436
   1437	/* If the entry read has another port configured as its destination,
   1438	 * do not report it.
   1439	 */
   1440	dst = (val & ANA_TABLES_MACACCESS_DEST_IDX_M) >> 3;
   1441	if (dst != port)
   1442		return -EINVAL;
   1443
   1444	/* Get the entry's MAC address and VLAN id */
   1445	macl = ocelot_read(ocelot, ANA_TABLES_MACLDATA);
   1446	mach = ocelot_read(ocelot, ANA_TABLES_MACHDATA);
   1447
   1448	mac[0] = (mach >> 8)  & 0xff;
   1449	mac[1] = (mach >> 0)  & 0xff;
   1450	mac[2] = (macl >> 24) & 0xff;
   1451	mac[3] = (macl >> 16) & 0xff;
   1452	mac[4] = (macl >> 8)  & 0xff;
   1453	mac[5] = (macl >> 0)  & 0xff;
   1454
   1455	entry->vid = (mach >> 16) & 0xfff;
   1456	ether_addr_copy(entry->mac, mac);
   1457
   1458	return 0;
   1459}
   1460
   1461int ocelot_mact_flush(struct ocelot *ocelot, int port)
   1462{
   1463	int err;
   1464
   1465	mutex_lock(&ocelot->mact_lock);
   1466
   1467	/* Program ageing filter for a single port */
   1468	ocelot_write(ocelot, ANA_ANAGEFIL_PID_EN | ANA_ANAGEFIL_PID_VAL(port),
   1469		     ANA_ANAGEFIL);
   1470
   1471	/* Flushing dynamic FDB entries requires two successive age scans */
   1472	ocelot_write(ocelot,
   1473		     ANA_TABLES_MACACCESS_MAC_TABLE_CMD(MACACCESS_CMD_AGE),
   1474		     ANA_TABLES_MACACCESS);
   1475
   1476	err = ocelot_mact_wait_for_completion(ocelot);
   1477	if (err) {
   1478		mutex_unlock(&ocelot->mact_lock);
   1479		return err;
   1480	}
   1481
   1482	/* And second... */
   1483	ocelot_write(ocelot,
   1484		     ANA_TABLES_MACACCESS_MAC_TABLE_CMD(MACACCESS_CMD_AGE),
   1485		     ANA_TABLES_MACACCESS);
   1486
   1487	err = ocelot_mact_wait_for_completion(ocelot);
   1488
   1489	/* Restore ageing filter */
   1490	ocelot_write(ocelot, 0, ANA_ANAGEFIL);
   1491
   1492	mutex_unlock(&ocelot->mact_lock);
   1493
   1494	return err;
   1495}
   1496EXPORT_SYMBOL_GPL(ocelot_mact_flush);
   1497
   1498int ocelot_fdb_dump(struct ocelot *ocelot, int port,
   1499		    dsa_fdb_dump_cb_t *cb, void *data)
   1500{
   1501	int err = 0;
   1502	int i, j;
   1503
   1504	/* We could take the lock just around ocelot_mact_read, but doing so
   1505	 * thousands of times in a row seems rather pointless and inefficient.
   1506	 */
   1507	mutex_lock(&ocelot->mact_lock);
   1508
   1509	/* Loop through all the mac tables entries. */
   1510	for (i = 0; i < ocelot->num_mact_rows; i++) {
   1511		for (j = 0; j < 4; j++) {
   1512			struct ocelot_mact_entry entry;
   1513			bool is_static;
   1514
   1515			err = ocelot_mact_read(ocelot, port, i, j, &entry);
   1516			/* If the entry is invalid (wrong port, invalid...),
   1517			 * skip it.
   1518			 */
   1519			if (err == -EINVAL)
   1520				continue;
   1521			else if (err)
   1522				break;
   1523
   1524			is_static = (entry.type == ENTRYTYPE_LOCKED);
   1525
   1526			/* Hide the reserved VLANs used for
   1527			 * VLAN-unaware bridging.
   1528			 */
   1529			if (entry.vid > OCELOT_RSV_VLAN_RANGE_START)
   1530				entry.vid = 0;
   1531
   1532			err = cb(entry.mac, entry.vid, is_static, data);
   1533			if (err)
   1534				break;
   1535		}
   1536	}
   1537
   1538	mutex_unlock(&ocelot->mact_lock);
   1539
   1540	return err;
   1541}
   1542EXPORT_SYMBOL(ocelot_fdb_dump);
   1543
   1544static void ocelot_populate_l2_ptp_trap_key(struct ocelot_vcap_filter *trap)
   1545{
   1546	trap->key_type = OCELOT_VCAP_KEY_ETYPE;
   1547	*(__be16 *)trap->key.etype.etype.value = htons(ETH_P_1588);
   1548	*(__be16 *)trap->key.etype.etype.mask = htons(0xffff);
   1549}
   1550
   1551static void
   1552ocelot_populate_ipv4_ptp_event_trap_key(struct ocelot_vcap_filter *trap)
   1553{
   1554	trap->key_type = OCELOT_VCAP_KEY_IPV4;
   1555	trap->key.ipv4.proto.value[0] = IPPROTO_UDP;
   1556	trap->key.ipv4.proto.mask[0] = 0xff;
   1557	trap->key.ipv4.dport.value = PTP_EV_PORT;
   1558	trap->key.ipv4.dport.mask = 0xffff;
   1559}
   1560
   1561static void
   1562ocelot_populate_ipv6_ptp_event_trap_key(struct ocelot_vcap_filter *trap)
   1563{
   1564	trap->key_type = OCELOT_VCAP_KEY_IPV6;
   1565	trap->key.ipv4.proto.value[0] = IPPROTO_UDP;
   1566	trap->key.ipv4.proto.mask[0] = 0xff;
   1567	trap->key.ipv6.dport.value = PTP_EV_PORT;
   1568	trap->key.ipv6.dport.mask = 0xffff;
   1569}
   1570
   1571static void
   1572ocelot_populate_ipv4_ptp_general_trap_key(struct ocelot_vcap_filter *trap)
   1573{
   1574	trap->key_type = OCELOT_VCAP_KEY_IPV4;
   1575	trap->key.ipv4.proto.value[0] = IPPROTO_UDP;
   1576	trap->key.ipv4.proto.mask[0] = 0xff;
   1577	trap->key.ipv4.dport.value = PTP_GEN_PORT;
   1578	trap->key.ipv4.dport.mask = 0xffff;
   1579}
   1580
   1581static void
   1582ocelot_populate_ipv6_ptp_general_trap_key(struct ocelot_vcap_filter *trap)
   1583{
   1584	trap->key_type = OCELOT_VCAP_KEY_IPV6;
   1585	trap->key.ipv4.proto.value[0] = IPPROTO_UDP;
   1586	trap->key.ipv4.proto.mask[0] = 0xff;
   1587	trap->key.ipv6.dport.value = PTP_GEN_PORT;
   1588	trap->key.ipv6.dport.mask = 0xffff;
   1589}
   1590
   1591int ocelot_trap_add(struct ocelot *ocelot, int port,
   1592		    unsigned long cookie, bool take_ts,
   1593		    void (*populate)(struct ocelot_vcap_filter *f))
   1594{
   1595	struct ocelot_vcap_block *block_vcap_is2;
   1596	struct ocelot_vcap_filter *trap;
   1597	bool new = false;
   1598	int err;
   1599
   1600	block_vcap_is2 = &ocelot->block[VCAP_IS2];
   1601
   1602	trap = ocelot_vcap_block_find_filter_by_id(block_vcap_is2, cookie,
   1603						   false);
   1604	if (!trap) {
   1605		trap = kzalloc(sizeof(*trap), GFP_KERNEL);
   1606		if (!trap)
   1607			return -ENOMEM;
   1608
   1609		populate(trap);
   1610		trap->prio = 1;
   1611		trap->id.cookie = cookie;
   1612		trap->id.tc_offload = false;
   1613		trap->block_id = VCAP_IS2;
   1614		trap->type = OCELOT_VCAP_FILTER_OFFLOAD;
   1615		trap->lookup = 0;
   1616		trap->action.cpu_copy_ena = true;
   1617		trap->action.mask_mode = OCELOT_MASK_MODE_PERMIT_DENY;
   1618		trap->action.port_mask = 0;
   1619		trap->take_ts = take_ts;
   1620		trap->is_trap = true;
   1621		new = true;
   1622	}
   1623
   1624	trap->ingress_port_mask |= BIT(port);
   1625
   1626	if (new)
   1627		err = ocelot_vcap_filter_add(ocelot, trap, NULL);
   1628	else
   1629		err = ocelot_vcap_filter_replace(ocelot, trap);
   1630	if (err) {
   1631		trap->ingress_port_mask &= ~BIT(port);
   1632		if (!trap->ingress_port_mask)
   1633			kfree(trap);
   1634		return err;
   1635	}
   1636
   1637	return 0;
   1638}
   1639
   1640int ocelot_trap_del(struct ocelot *ocelot, int port, unsigned long cookie)
   1641{
   1642	struct ocelot_vcap_block *block_vcap_is2;
   1643	struct ocelot_vcap_filter *trap;
   1644
   1645	block_vcap_is2 = &ocelot->block[VCAP_IS2];
   1646
   1647	trap = ocelot_vcap_block_find_filter_by_id(block_vcap_is2, cookie,
   1648						   false);
   1649	if (!trap)
   1650		return 0;
   1651
   1652	trap->ingress_port_mask &= ~BIT(port);
   1653	if (!trap->ingress_port_mask)
   1654		return ocelot_vcap_filter_del(ocelot, trap);
   1655
   1656	return ocelot_vcap_filter_replace(ocelot, trap);
   1657}
   1658
   1659static int ocelot_l2_ptp_trap_add(struct ocelot *ocelot, int port)
   1660{
   1661	unsigned long l2_cookie = OCELOT_VCAP_IS2_L2_PTP_TRAP(ocelot);
   1662
   1663	return ocelot_trap_add(ocelot, port, l2_cookie, true,
   1664			       ocelot_populate_l2_ptp_trap_key);
   1665}
   1666
   1667static int ocelot_l2_ptp_trap_del(struct ocelot *ocelot, int port)
   1668{
   1669	unsigned long l2_cookie = OCELOT_VCAP_IS2_L2_PTP_TRAP(ocelot);
   1670
   1671	return ocelot_trap_del(ocelot, port, l2_cookie);
   1672}
   1673
   1674static int ocelot_ipv4_ptp_trap_add(struct ocelot *ocelot, int port)
   1675{
   1676	unsigned long ipv4_gen_cookie = OCELOT_VCAP_IS2_IPV4_GEN_PTP_TRAP(ocelot);
   1677	unsigned long ipv4_ev_cookie = OCELOT_VCAP_IS2_IPV4_EV_PTP_TRAP(ocelot);
   1678	int err;
   1679
   1680	err = ocelot_trap_add(ocelot, port, ipv4_ev_cookie, true,
   1681			      ocelot_populate_ipv4_ptp_event_trap_key);
   1682	if (err)
   1683		return err;
   1684
   1685	err = ocelot_trap_add(ocelot, port, ipv4_gen_cookie, false,
   1686			      ocelot_populate_ipv4_ptp_general_trap_key);
   1687	if (err)
   1688		ocelot_trap_del(ocelot, port, ipv4_ev_cookie);
   1689
   1690	return err;
   1691}
   1692
   1693static int ocelot_ipv4_ptp_trap_del(struct ocelot *ocelot, int port)
   1694{
   1695	unsigned long ipv4_gen_cookie = OCELOT_VCAP_IS2_IPV4_GEN_PTP_TRAP(ocelot);
   1696	unsigned long ipv4_ev_cookie = OCELOT_VCAP_IS2_IPV4_EV_PTP_TRAP(ocelot);
   1697	int err;
   1698
   1699	err = ocelot_trap_del(ocelot, port, ipv4_ev_cookie);
   1700	err |= ocelot_trap_del(ocelot, port, ipv4_gen_cookie);
   1701	return err;
   1702}
   1703
   1704static int ocelot_ipv6_ptp_trap_add(struct ocelot *ocelot, int port)
   1705{
   1706	unsigned long ipv6_gen_cookie = OCELOT_VCAP_IS2_IPV6_GEN_PTP_TRAP(ocelot);
   1707	unsigned long ipv6_ev_cookie = OCELOT_VCAP_IS2_IPV6_EV_PTP_TRAP(ocelot);
   1708	int err;
   1709
   1710	err = ocelot_trap_add(ocelot, port, ipv6_ev_cookie, true,
   1711			      ocelot_populate_ipv6_ptp_event_trap_key);
   1712	if (err)
   1713		return err;
   1714
   1715	err = ocelot_trap_add(ocelot, port, ipv6_gen_cookie, false,
   1716			      ocelot_populate_ipv6_ptp_general_trap_key);
   1717	if (err)
   1718		ocelot_trap_del(ocelot, port, ipv6_ev_cookie);
   1719
   1720	return err;
   1721}
   1722
   1723static int ocelot_ipv6_ptp_trap_del(struct ocelot *ocelot, int port)
   1724{
   1725	unsigned long ipv6_gen_cookie = OCELOT_VCAP_IS2_IPV6_GEN_PTP_TRAP(ocelot);
   1726	unsigned long ipv6_ev_cookie = OCELOT_VCAP_IS2_IPV6_EV_PTP_TRAP(ocelot);
   1727	int err;
   1728
   1729	err = ocelot_trap_del(ocelot, port, ipv6_ev_cookie);
   1730	err |= ocelot_trap_del(ocelot, port, ipv6_gen_cookie);
   1731	return err;
   1732}
   1733
   1734static int ocelot_setup_ptp_traps(struct ocelot *ocelot, int port,
   1735				  bool l2, bool l4)
   1736{
   1737	int err;
   1738
   1739	if (l2)
   1740		err = ocelot_l2_ptp_trap_add(ocelot, port);
   1741	else
   1742		err = ocelot_l2_ptp_trap_del(ocelot, port);
   1743	if (err)
   1744		return err;
   1745
   1746	if (l4) {
   1747		err = ocelot_ipv4_ptp_trap_add(ocelot, port);
   1748		if (err)
   1749			goto err_ipv4;
   1750
   1751		err = ocelot_ipv6_ptp_trap_add(ocelot, port);
   1752		if (err)
   1753			goto err_ipv6;
   1754	} else {
   1755		err = ocelot_ipv4_ptp_trap_del(ocelot, port);
   1756
   1757		err |= ocelot_ipv6_ptp_trap_del(ocelot, port);
   1758	}
   1759	if (err)
   1760		return err;
   1761
   1762	return 0;
   1763
   1764err_ipv6:
   1765	ocelot_ipv4_ptp_trap_del(ocelot, port);
   1766err_ipv4:
   1767	if (l2)
   1768		ocelot_l2_ptp_trap_del(ocelot, port);
   1769	return err;
   1770}
   1771
   1772int ocelot_hwstamp_get(struct ocelot *ocelot, int port, struct ifreq *ifr)
   1773{
   1774	return copy_to_user(ifr->ifr_data, &ocelot->hwtstamp_config,
   1775			    sizeof(ocelot->hwtstamp_config)) ? -EFAULT : 0;
   1776}
   1777EXPORT_SYMBOL(ocelot_hwstamp_get);
   1778
   1779int ocelot_hwstamp_set(struct ocelot *ocelot, int port, struct ifreq *ifr)
   1780{
   1781	struct ocelot_port *ocelot_port = ocelot->ports[port];
   1782	bool l2 = false, l4 = false;
   1783	struct hwtstamp_config cfg;
   1784	int err;
   1785
   1786	if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
   1787		return -EFAULT;
   1788
   1789	/* Tx type sanity check */
   1790	switch (cfg.tx_type) {
   1791	case HWTSTAMP_TX_ON:
   1792		ocelot_port->ptp_cmd = IFH_REW_OP_TWO_STEP_PTP;
   1793		break;
   1794	case HWTSTAMP_TX_ONESTEP_SYNC:
   1795		/* IFH_REW_OP_ONE_STEP_PTP updates the correctional field, we
   1796		 * need to update the origin time.
   1797		 */
   1798		ocelot_port->ptp_cmd = IFH_REW_OP_ORIGIN_PTP;
   1799		break;
   1800	case HWTSTAMP_TX_OFF:
   1801		ocelot_port->ptp_cmd = 0;
   1802		break;
   1803	default:
   1804		return -ERANGE;
   1805	}
   1806
   1807	mutex_lock(&ocelot->ptp_lock);
   1808
   1809	switch (cfg.rx_filter) {
   1810	case HWTSTAMP_FILTER_NONE:
   1811		break;
   1812	case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
   1813	case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
   1814	case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
   1815		l4 = true;
   1816		break;
   1817	case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
   1818	case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
   1819	case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
   1820		l2 = true;
   1821		break;
   1822	case HWTSTAMP_FILTER_PTP_V2_EVENT:
   1823	case HWTSTAMP_FILTER_PTP_V2_SYNC:
   1824	case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
   1825		l2 = true;
   1826		l4 = true;
   1827		break;
   1828	default:
   1829		mutex_unlock(&ocelot->ptp_lock);
   1830		return -ERANGE;
   1831	}
   1832
   1833	err = ocelot_setup_ptp_traps(ocelot, port, l2, l4);
   1834	if (err) {
   1835		mutex_unlock(&ocelot->ptp_lock);
   1836		return err;
   1837	}
   1838
   1839	if (l2 && l4)
   1840		cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
   1841	else if (l2)
   1842		cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
   1843	else if (l4)
   1844		cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
   1845	else
   1846		cfg.rx_filter = HWTSTAMP_FILTER_NONE;
   1847
   1848	/* Commit back the result & save it */
   1849	memcpy(&ocelot->hwtstamp_config, &cfg, sizeof(cfg));
   1850	mutex_unlock(&ocelot->ptp_lock);
   1851
   1852	return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
   1853}
   1854EXPORT_SYMBOL(ocelot_hwstamp_set);
   1855
   1856void ocelot_get_strings(struct ocelot *ocelot, int port, u32 sset, u8 *data)
   1857{
   1858	int i;
   1859
   1860	if (sset != ETH_SS_STATS)
   1861		return;
   1862
   1863	for (i = 0; i < ocelot->num_stats; i++)
   1864		memcpy(data + i * ETH_GSTRING_LEN, ocelot->stats_layout[i].name,
   1865		       ETH_GSTRING_LEN);
   1866}
   1867EXPORT_SYMBOL(ocelot_get_strings);
   1868
   1869/* Caller must hold &ocelot->stats_lock */
   1870static int ocelot_port_update_stats(struct ocelot *ocelot, int port)
   1871{
   1872	unsigned int idx = port * ocelot->num_stats;
   1873	struct ocelot_stats_region *region;
   1874	int err, j;
   1875
   1876	/* Configure the port to read the stats from */
   1877	ocelot_write(ocelot, SYS_STAT_CFG_STAT_VIEW(port), SYS_STAT_CFG);
   1878
   1879	list_for_each_entry(region, &ocelot->stats_regions, node) {
   1880		err = ocelot_bulk_read_rix(ocelot, SYS_COUNT_RX_OCTETS,
   1881					   region->offset, region->buf,
   1882					   region->count);
   1883		if (err)
   1884			return err;
   1885
   1886		for (j = 0; j < region->count; j++) {
   1887			u64 *stat = &ocelot->stats[idx + j];
   1888			u64 val = region->buf[j];
   1889
   1890			if (val < (*stat & U32_MAX))
   1891				*stat += (u64)1 << 32;
   1892
   1893			*stat = (*stat & ~(u64)U32_MAX) + val;
   1894		}
   1895
   1896		idx += region->count;
   1897	}
   1898
   1899	return err;
   1900}
   1901
   1902static void ocelot_check_stats_work(struct work_struct *work)
   1903{
   1904	struct delayed_work *del_work = to_delayed_work(work);
   1905	struct ocelot *ocelot = container_of(del_work, struct ocelot,
   1906					     stats_work);
   1907	int i, err;
   1908
   1909	mutex_lock(&ocelot->stats_lock);
   1910	for (i = 0; i < ocelot->num_phys_ports; i++) {
   1911		err = ocelot_port_update_stats(ocelot, i);
   1912		if (err)
   1913			break;
   1914	}
   1915	mutex_unlock(&ocelot->stats_lock);
   1916
   1917	if (err)
   1918		dev_err(ocelot->dev, "Error %d updating ethtool stats\n",  err);
   1919
   1920	queue_delayed_work(ocelot->stats_queue, &ocelot->stats_work,
   1921			   OCELOT_STATS_CHECK_DELAY);
   1922}
   1923
   1924void ocelot_get_ethtool_stats(struct ocelot *ocelot, int port, u64 *data)
   1925{
   1926	int i, err;
   1927
   1928	mutex_lock(&ocelot->stats_lock);
   1929
   1930	/* check and update now */
   1931	err = ocelot_port_update_stats(ocelot, port);
   1932
   1933	/* Copy all counters */
   1934	for (i = 0; i < ocelot->num_stats; i++)
   1935		*data++ = ocelot->stats[port * ocelot->num_stats + i];
   1936
   1937	mutex_unlock(&ocelot->stats_lock);
   1938
   1939	if (err)
   1940		dev_err(ocelot->dev, "Error %d updating ethtool stats\n", err);
   1941}
   1942EXPORT_SYMBOL(ocelot_get_ethtool_stats);
   1943
   1944int ocelot_get_sset_count(struct ocelot *ocelot, int port, int sset)
   1945{
   1946	if (sset != ETH_SS_STATS)
   1947		return -EOPNOTSUPP;
   1948
   1949	return ocelot->num_stats;
   1950}
   1951EXPORT_SYMBOL(ocelot_get_sset_count);
   1952
   1953static int ocelot_prepare_stats_regions(struct ocelot *ocelot)
   1954{
   1955	struct ocelot_stats_region *region = NULL;
   1956	unsigned int last;
   1957	int i;
   1958
   1959	INIT_LIST_HEAD(&ocelot->stats_regions);
   1960
   1961	for (i = 0; i < ocelot->num_stats; i++) {
   1962		if (region && ocelot->stats_layout[i].offset == last + 1) {
   1963			region->count++;
   1964		} else {
   1965			region = devm_kzalloc(ocelot->dev, sizeof(*region),
   1966					      GFP_KERNEL);
   1967			if (!region)
   1968				return -ENOMEM;
   1969
   1970			region->offset = ocelot->stats_layout[i].offset;
   1971			region->count = 1;
   1972			list_add_tail(&region->node, &ocelot->stats_regions);
   1973		}
   1974
   1975		last = ocelot->stats_layout[i].offset;
   1976	}
   1977
   1978	list_for_each_entry(region, &ocelot->stats_regions, node) {
   1979		region->buf = devm_kcalloc(ocelot->dev, region->count,
   1980					   sizeof(*region->buf), GFP_KERNEL);
   1981		if (!region->buf)
   1982			return -ENOMEM;
   1983	}
   1984
   1985	return 0;
   1986}
   1987
   1988int ocelot_get_ts_info(struct ocelot *ocelot, int port,
   1989		       struct ethtool_ts_info *info)
   1990{
   1991	info->phc_index = ocelot->ptp_clock ?
   1992			  ptp_clock_index(ocelot->ptp_clock) : -1;
   1993	if (info->phc_index == -1) {
   1994		info->so_timestamping |= SOF_TIMESTAMPING_TX_SOFTWARE |
   1995					 SOF_TIMESTAMPING_RX_SOFTWARE |
   1996					 SOF_TIMESTAMPING_SOFTWARE;
   1997		return 0;
   1998	}
   1999	info->so_timestamping |= SOF_TIMESTAMPING_TX_SOFTWARE |
   2000				 SOF_TIMESTAMPING_RX_SOFTWARE |
   2001				 SOF_TIMESTAMPING_SOFTWARE |
   2002				 SOF_TIMESTAMPING_TX_HARDWARE |
   2003				 SOF_TIMESTAMPING_RX_HARDWARE |
   2004				 SOF_TIMESTAMPING_RAW_HARDWARE;
   2005	info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON) |
   2006			 BIT(HWTSTAMP_TX_ONESTEP_SYNC);
   2007	info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) |
   2008			   BIT(HWTSTAMP_FILTER_PTP_V2_EVENT) |
   2009			   BIT(HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
   2010			   BIT(HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
   2011
   2012	return 0;
   2013}
   2014EXPORT_SYMBOL(ocelot_get_ts_info);
   2015
   2016static u32 ocelot_get_bond_mask(struct ocelot *ocelot, struct net_device *bond)
   2017{
   2018	u32 mask = 0;
   2019	int port;
   2020
   2021	lockdep_assert_held(&ocelot->fwd_domain_lock);
   2022
   2023	for (port = 0; port < ocelot->num_phys_ports; port++) {
   2024		struct ocelot_port *ocelot_port = ocelot->ports[port];
   2025
   2026		if (!ocelot_port)
   2027			continue;
   2028
   2029		if (ocelot_port->bond == bond)
   2030			mask |= BIT(port);
   2031	}
   2032
   2033	return mask;
   2034}
   2035
   2036/* The logical port number of a LAG is equal to the lowest numbered physical
   2037 * port ID present in that LAG. It may change if that port ever leaves the LAG.
   2038 */
   2039static int ocelot_bond_get_id(struct ocelot *ocelot, struct net_device *bond)
   2040{
   2041	int bond_mask = ocelot_get_bond_mask(ocelot, bond);
   2042
   2043	if (!bond_mask)
   2044		return -ENOENT;
   2045
   2046	return __ffs(bond_mask);
   2047}
   2048
   2049static u32 ocelot_dsa_8021q_cpu_assigned_ports(struct ocelot *ocelot,
   2050					       struct ocelot_port *cpu)
   2051{
   2052	u32 mask = 0;
   2053	int port;
   2054
   2055	for (port = 0; port < ocelot->num_phys_ports; port++) {
   2056		struct ocelot_port *ocelot_port = ocelot->ports[port];
   2057
   2058		if (!ocelot_port)
   2059			continue;
   2060
   2061		if (ocelot_port->dsa_8021q_cpu == cpu)
   2062			mask |= BIT(port);
   2063	}
   2064
   2065	return mask;
   2066}
   2067
   2068u32 ocelot_port_assigned_dsa_8021q_cpu_mask(struct ocelot *ocelot, int port)
   2069{
   2070	struct ocelot_port *ocelot_port = ocelot->ports[port];
   2071	struct ocelot_port *cpu_port = ocelot_port->dsa_8021q_cpu;
   2072
   2073	if (!cpu_port)
   2074		return 0;
   2075
   2076	return BIT(cpu_port->index);
   2077}
   2078EXPORT_SYMBOL_GPL(ocelot_port_assigned_dsa_8021q_cpu_mask);
   2079
   2080u32 ocelot_get_bridge_fwd_mask(struct ocelot *ocelot, int src_port)
   2081{
   2082	struct ocelot_port *ocelot_port = ocelot->ports[src_port];
   2083	const struct net_device *bridge;
   2084	u32 mask = 0;
   2085	int port;
   2086
   2087	if (!ocelot_port || ocelot_port->stp_state != BR_STATE_FORWARDING)
   2088		return 0;
   2089
   2090	bridge = ocelot_port->bridge;
   2091	if (!bridge)
   2092		return 0;
   2093
   2094	for (port = 0; port < ocelot->num_phys_ports; port++) {
   2095		ocelot_port = ocelot->ports[port];
   2096
   2097		if (!ocelot_port)
   2098			continue;
   2099
   2100		if (ocelot_port->stp_state == BR_STATE_FORWARDING &&
   2101		    ocelot_port->bridge == bridge)
   2102			mask |= BIT(port);
   2103	}
   2104
   2105	return mask;
   2106}
   2107EXPORT_SYMBOL_GPL(ocelot_get_bridge_fwd_mask);
   2108
   2109static void ocelot_apply_bridge_fwd_mask(struct ocelot *ocelot, bool joining)
   2110{
   2111	int port;
   2112
   2113	lockdep_assert_held(&ocelot->fwd_domain_lock);
   2114
   2115	/* If cut-through forwarding is supported, update the masks before a
   2116	 * port joins the forwarding domain, to avoid potential underruns if it
   2117	 * has the highest speed from the new domain.
   2118	 */
   2119	if (joining && ocelot->ops->cut_through_fwd)
   2120		ocelot->ops->cut_through_fwd(ocelot);
   2121
   2122	/* Apply FWD mask. The loop is needed to add/remove the current port as
   2123	 * a source for the other ports.
   2124	 */
   2125	for (port = 0; port < ocelot->num_phys_ports; port++) {
   2126		struct ocelot_port *ocelot_port = ocelot->ports[port];
   2127		unsigned long mask;
   2128
   2129		if (!ocelot_port) {
   2130			/* Unused ports can't send anywhere */
   2131			mask = 0;
   2132		} else if (ocelot_port->is_dsa_8021q_cpu) {
   2133			/* The DSA tag_8021q CPU ports need to be able to
   2134			 * forward packets to all ports assigned to them.
   2135			 */
   2136			mask = ocelot_dsa_8021q_cpu_assigned_ports(ocelot,
   2137								   ocelot_port);
   2138		} else if (ocelot_port->bridge) {
   2139			struct net_device *bond = ocelot_port->bond;
   2140
   2141			mask = ocelot_get_bridge_fwd_mask(ocelot, port);
   2142			mask &= ~BIT(port);
   2143
   2144			mask |= ocelot_port_assigned_dsa_8021q_cpu_mask(ocelot,
   2145									port);
   2146
   2147			if (bond)
   2148				mask &= ~ocelot_get_bond_mask(ocelot, bond);
   2149		} else {
   2150			/* Standalone ports forward only to DSA tag_8021q CPU
   2151			 * ports (if those exist), or to the hardware CPU port
   2152			 * module otherwise.
   2153			 */
   2154			mask = ocelot_port_assigned_dsa_8021q_cpu_mask(ocelot,
   2155								       port);
   2156		}
   2157
   2158		ocelot_write_rix(ocelot, mask, ANA_PGID_PGID, PGID_SRC + port);
   2159	}
   2160
   2161	/* If cut-through forwarding is supported and a port is leaving, there
   2162	 * is a chance that cut-through was disabled on the other ports due to
   2163	 * the port which is leaving (it has a higher link speed). We need to
   2164	 * update the cut-through masks of the remaining ports no earlier than
   2165	 * after the port has left, to prevent underruns from happening between
   2166	 * the cut-through update and the forwarding domain update.
   2167	 */
   2168	if (!joining && ocelot->ops->cut_through_fwd)
   2169		ocelot->ops->cut_through_fwd(ocelot);
   2170}
   2171
   2172/* Update PGID_CPU which is the destination port mask used for whitelisting
   2173 * unicast addresses filtered towards the host. In the normal and NPI modes,
   2174 * this points to the analyzer entry for the CPU port module, while in DSA
   2175 * tag_8021q mode, it is a bit mask of all active CPU ports.
   2176 * PGID_SRC will take care of forwarding a packet from one user port to
   2177 * no more than a single CPU port.
   2178 */
   2179static void ocelot_update_pgid_cpu(struct ocelot *ocelot)
   2180{
   2181	int pgid_cpu = 0;
   2182	int port;
   2183
   2184	for (port = 0; port < ocelot->num_phys_ports; port++) {
   2185		struct ocelot_port *ocelot_port = ocelot->ports[port];
   2186
   2187		if (!ocelot_port || !ocelot_port->is_dsa_8021q_cpu)
   2188			continue;
   2189
   2190		pgid_cpu |= BIT(port);
   2191	}
   2192
   2193	if (!pgid_cpu)
   2194		pgid_cpu = BIT(ocelot->num_phys_ports);
   2195
   2196	ocelot_write_rix(ocelot, pgid_cpu, ANA_PGID_PGID, PGID_CPU);
   2197}
   2198
   2199void ocelot_port_assign_dsa_8021q_cpu(struct ocelot *ocelot, int port,
   2200				      int cpu)
   2201{
   2202	struct ocelot_port *cpu_port = ocelot->ports[cpu];
   2203	u16 vid;
   2204
   2205	mutex_lock(&ocelot->fwd_domain_lock);
   2206
   2207	ocelot->ports[port]->dsa_8021q_cpu = cpu_port;
   2208
   2209	if (!cpu_port->is_dsa_8021q_cpu) {
   2210		cpu_port->is_dsa_8021q_cpu = true;
   2211
   2212		for (vid = OCELOT_RSV_VLAN_RANGE_START; vid < VLAN_N_VID; vid++)
   2213			ocelot_vlan_member_add(ocelot, cpu, vid, true);
   2214
   2215		ocelot_update_pgid_cpu(ocelot);
   2216	}
   2217
   2218	ocelot_apply_bridge_fwd_mask(ocelot, true);
   2219
   2220	mutex_unlock(&ocelot->fwd_domain_lock);
   2221}
   2222EXPORT_SYMBOL_GPL(ocelot_port_assign_dsa_8021q_cpu);
   2223
   2224void ocelot_port_unassign_dsa_8021q_cpu(struct ocelot *ocelot, int port)
   2225{
   2226	struct ocelot_port *cpu_port = ocelot->ports[port]->dsa_8021q_cpu;
   2227	bool keep = false;
   2228	u16 vid;
   2229	int p;
   2230
   2231	mutex_lock(&ocelot->fwd_domain_lock);
   2232
   2233	ocelot->ports[port]->dsa_8021q_cpu = NULL;
   2234
   2235	for (p = 0; p < ocelot->num_phys_ports; p++) {
   2236		if (!ocelot->ports[p])
   2237			continue;
   2238
   2239		if (ocelot->ports[p]->dsa_8021q_cpu == cpu_port) {
   2240			keep = true;
   2241			break;
   2242		}
   2243	}
   2244
   2245	if (!keep) {
   2246		cpu_port->is_dsa_8021q_cpu = false;
   2247
   2248		for (vid = OCELOT_RSV_VLAN_RANGE_START; vid < VLAN_N_VID; vid++)
   2249			ocelot_vlan_member_del(ocelot, cpu_port->index, vid);
   2250
   2251		ocelot_update_pgid_cpu(ocelot);
   2252	}
   2253
   2254	ocelot_apply_bridge_fwd_mask(ocelot, true);
   2255
   2256	mutex_unlock(&ocelot->fwd_domain_lock);
   2257}
   2258EXPORT_SYMBOL_GPL(ocelot_port_unassign_dsa_8021q_cpu);
   2259
   2260void ocelot_bridge_stp_state_set(struct ocelot *ocelot, int port, u8 state)
   2261{
   2262	struct ocelot_port *ocelot_port = ocelot->ports[port];
   2263	u32 learn_ena = 0;
   2264
   2265	mutex_lock(&ocelot->fwd_domain_lock);
   2266
   2267	ocelot_port->stp_state = state;
   2268
   2269	if ((state == BR_STATE_LEARNING || state == BR_STATE_FORWARDING) &&
   2270	    ocelot_port->learn_ena)
   2271		learn_ena = ANA_PORT_PORT_CFG_LEARN_ENA;
   2272
   2273	ocelot_rmw_gix(ocelot, learn_ena, ANA_PORT_PORT_CFG_LEARN_ENA,
   2274		       ANA_PORT_PORT_CFG, port);
   2275
   2276	ocelot_apply_bridge_fwd_mask(ocelot, state == BR_STATE_FORWARDING);
   2277
   2278	mutex_unlock(&ocelot->fwd_domain_lock);
   2279}
   2280EXPORT_SYMBOL(ocelot_bridge_stp_state_set);
   2281
   2282void ocelot_set_ageing_time(struct ocelot *ocelot, unsigned int msecs)
   2283{
   2284	unsigned int age_period = ANA_AUTOAGE_AGE_PERIOD(msecs / 2000);
   2285
   2286	/* Setting AGE_PERIOD to zero effectively disables automatic aging,
   2287	 * which is clearly not what our intention is. So avoid that.
   2288	 */
   2289	if (!age_period)
   2290		age_period = 1;
   2291
   2292	ocelot_rmw(ocelot, age_period, ANA_AUTOAGE_AGE_PERIOD_M, ANA_AUTOAGE);
   2293}
   2294EXPORT_SYMBOL(ocelot_set_ageing_time);
   2295
   2296static struct ocelot_multicast *ocelot_multicast_get(struct ocelot *ocelot,
   2297						     const unsigned char *addr,
   2298						     u16 vid)
   2299{
   2300	struct ocelot_multicast *mc;
   2301
   2302	list_for_each_entry(mc, &ocelot->multicast, list) {
   2303		if (ether_addr_equal(mc->addr, addr) && mc->vid == vid)
   2304			return mc;
   2305	}
   2306
   2307	return NULL;
   2308}
   2309
   2310static enum macaccess_entry_type ocelot_classify_mdb(const unsigned char *addr)
   2311{
   2312	if (addr[0] == 0x01 && addr[1] == 0x00 && addr[2] == 0x5e)
   2313		return ENTRYTYPE_MACv4;
   2314	if (addr[0] == 0x33 && addr[1] == 0x33)
   2315		return ENTRYTYPE_MACv6;
   2316	return ENTRYTYPE_LOCKED;
   2317}
   2318
   2319static struct ocelot_pgid *ocelot_pgid_alloc(struct ocelot *ocelot, int index,
   2320					     unsigned long ports)
   2321{
   2322	struct ocelot_pgid *pgid;
   2323
   2324	pgid = kzalloc(sizeof(*pgid), GFP_KERNEL);
   2325	if (!pgid)
   2326		return ERR_PTR(-ENOMEM);
   2327
   2328	pgid->ports = ports;
   2329	pgid->index = index;
   2330	refcount_set(&pgid->refcount, 1);
   2331	list_add_tail(&pgid->list, &ocelot->pgids);
   2332
   2333	return pgid;
   2334}
   2335
   2336static void ocelot_pgid_free(struct ocelot *ocelot, struct ocelot_pgid *pgid)
   2337{
   2338	if (!refcount_dec_and_test(&pgid->refcount))
   2339		return;
   2340
   2341	list_del(&pgid->list);
   2342	kfree(pgid);
   2343}
   2344
   2345static struct ocelot_pgid *ocelot_mdb_get_pgid(struct ocelot *ocelot,
   2346					       const struct ocelot_multicast *mc)
   2347{
   2348	struct ocelot_pgid *pgid;
   2349	int index;
   2350
   2351	/* According to VSC7514 datasheet 3.9.1.5 IPv4 Multicast Entries and
   2352	 * 3.9.1.6 IPv6 Multicast Entries, "Instead of a lookup in the
   2353	 * destination mask table (PGID), the destination set is programmed as
   2354	 * part of the entry MAC address.", and the DEST_IDX is set to 0.
   2355	 */
   2356	if (mc->entry_type == ENTRYTYPE_MACv4 ||
   2357	    mc->entry_type == ENTRYTYPE_MACv6)
   2358		return ocelot_pgid_alloc(ocelot, 0, mc->ports);
   2359
   2360	list_for_each_entry(pgid, &ocelot->pgids, list) {
   2361		/* When searching for a nonreserved multicast PGID, ignore the
   2362		 * dummy PGID of zero that we have for MACv4/MACv6 entries
   2363		 */
   2364		if (pgid->index && pgid->ports == mc->ports) {
   2365			refcount_inc(&pgid->refcount);
   2366			return pgid;
   2367		}
   2368	}
   2369
   2370	/* Search for a free index in the nonreserved multicast PGID area */
   2371	for_each_nonreserved_multicast_dest_pgid(ocelot, index) {
   2372		bool used = false;
   2373
   2374		list_for_each_entry(pgid, &ocelot->pgids, list) {
   2375			if (pgid->index == index) {
   2376				used = true;
   2377				break;
   2378			}
   2379		}
   2380
   2381		if (!used)
   2382			return ocelot_pgid_alloc(ocelot, index, mc->ports);
   2383	}
   2384
   2385	return ERR_PTR(-ENOSPC);
   2386}
   2387
   2388static void ocelot_encode_ports_to_mdb(unsigned char *addr,
   2389				       struct ocelot_multicast *mc)
   2390{
   2391	ether_addr_copy(addr, mc->addr);
   2392
   2393	if (mc->entry_type == ENTRYTYPE_MACv4) {
   2394		addr[0] = 0;
   2395		addr[1] = mc->ports >> 8;
   2396		addr[2] = mc->ports & 0xff;
   2397	} else if (mc->entry_type == ENTRYTYPE_MACv6) {
   2398		addr[0] = mc->ports >> 8;
   2399		addr[1] = mc->ports & 0xff;
   2400	}
   2401}
   2402
   2403int ocelot_port_mdb_add(struct ocelot *ocelot, int port,
   2404			const struct switchdev_obj_port_mdb *mdb,
   2405			const struct net_device *bridge)
   2406{
   2407	unsigned char addr[ETH_ALEN];
   2408	struct ocelot_multicast *mc;
   2409	struct ocelot_pgid *pgid;
   2410	u16 vid = mdb->vid;
   2411
   2412	if (!vid)
   2413		vid = ocelot_vlan_unaware_pvid(ocelot, bridge);
   2414
   2415	mc = ocelot_multicast_get(ocelot, mdb->addr, vid);
   2416	if (!mc) {
   2417		/* New entry */
   2418		mc = devm_kzalloc(ocelot->dev, sizeof(*mc), GFP_KERNEL);
   2419		if (!mc)
   2420			return -ENOMEM;
   2421
   2422		mc->entry_type = ocelot_classify_mdb(mdb->addr);
   2423		ether_addr_copy(mc->addr, mdb->addr);
   2424		mc->vid = vid;
   2425
   2426		list_add_tail(&mc->list, &ocelot->multicast);
   2427	} else {
   2428		/* Existing entry. Clean up the current port mask from
   2429		 * hardware now, because we'll be modifying it.
   2430		 */
   2431		ocelot_pgid_free(ocelot, mc->pgid);
   2432		ocelot_encode_ports_to_mdb(addr, mc);
   2433		ocelot_mact_forget(ocelot, addr, vid);
   2434	}
   2435
   2436	mc->ports |= BIT(port);
   2437
   2438	pgid = ocelot_mdb_get_pgid(ocelot, mc);
   2439	if (IS_ERR(pgid)) {
   2440		dev_err(ocelot->dev,
   2441			"Cannot allocate PGID for mdb %pM vid %d\n",
   2442			mc->addr, mc->vid);
   2443		devm_kfree(ocelot->dev, mc);
   2444		return PTR_ERR(pgid);
   2445	}
   2446	mc->pgid = pgid;
   2447
   2448	ocelot_encode_ports_to_mdb(addr, mc);
   2449
   2450	if (mc->entry_type != ENTRYTYPE_MACv4 &&
   2451	    mc->entry_type != ENTRYTYPE_MACv6)
   2452		ocelot_write_rix(ocelot, pgid->ports, ANA_PGID_PGID,
   2453				 pgid->index);
   2454
   2455	return ocelot_mact_learn(ocelot, pgid->index, addr, vid,
   2456				 mc->entry_type);
   2457}
   2458EXPORT_SYMBOL(ocelot_port_mdb_add);
   2459
   2460int ocelot_port_mdb_del(struct ocelot *ocelot, int port,
   2461			const struct switchdev_obj_port_mdb *mdb,
   2462			const struct net_device *bridge)
   2463{
   2464	unsigned char addr[ETH_ALEN];
   2465	struct ocelot_multicast *mc;
   2466	struct ocelot_pgid *pgid;
   2467	u16 vid = mdb->vid;
   2468
   2469	if (!vid)
   2470		vid = ocelot_vlan_unaware_pvid(ocelot, bridge);
   2471
   2472	mc = ocelot_multicast_get(ocelot, mdb->addr, vid);
   2473	if (!mc)
   2474		return -ENOENT;
   2475
   2476	ocelot_encode_ports_to_mdb(addr, mc);
   2477	ocelot_mact_forget(ocelot, addr, vid);
   2478
   2479	ocelot_pgid_free(ocelot, mc->pgid);
   2480	mc->ports &= ~BIT(port);
   2481	if (!mc->ports) {
   2482		list_del(&mc->list);
   2483		devm_kfree(ocelot->dev, mc);
   2484		return 0;
   2485	}
   2486
   2487	/* We have a PGID with fewer ports now */
   2488	pgid = ocelot_mdb_get_pgid(ocelot, mc);
   2489	if (IS_ERR(pgid))
   2490		return PTR_ERR(pgid);
   2491	mc->pgid = pgid;
   2492
   2493	ocelot_encode_ports_to_mdb(addr, mc);
   2494
   2495	if (mc->entry_type != ENTRYTYPE_MACv4 &&
   2496	    mc->entry_type != ENTRYTYPE_MACv6)
   2497		ocelot_write_rix(ocelot, pgid->ports, ANA_PGID_PGID,
   2498				 pgid->index);
   2499
   2500	return ocelot_mact_learn(ocelot, pgid->index, addr, vid,
   2501				 mc->entry_type);
   2502}
   2503EXPORT_SYMBOL(ocelot_port_mdb_del);
   2504
   2505int ocelot_port_bridge_join(struct ocelot *ocelot, int port,
   2506			    struct net_device *bridge, int bridge_num,
   2507			    struct netlink_ext_ack *extack)
   2508{
   2509	struct ocelot_port *ocelot_port = ocelot->ports[port];
   2510	int err;
   2511
   2512	err = ocelot_single_vlan_aware_bridge(ocelot, extack);
   2513	if (err)
   2514		return err;
   2515
   2516	mutex_lock(&ocelot->fwd_domain_lock);
   2517
   2518	ocelot_port->bridge = bridge;
   2519	ocelot_port->bridge_num = bridge_num;
   2520
   2521	ocelot_apply_bridge_fwd_mask(ocelot, true);
   2522
   2523	mutex_unlock(&ocelot->fwd_domain_lock);
   2524
   2525	if (br_vlan_enabled(bridge))
   2526		return 0;
   2527
   2528	return ocelot_add_vlan_unaware_pvid(ocelot, port, bridge);
   2529}
   2530EXPORT_SYMBOL(ocelot_port_bridge_join);
   2531
   2532void ocelot_port_bridge_leave(struct ocelot *ocelot, int port,
   2533			      struct net_device *bridge)
   2534{
   2535	struct ocelot_port *ocelot_port = ocelot->ports[port];
   2536
   2537	mutex_lock(&ocelot->fwd_domain_lock);
   2538
   2539	if (!br_vlan_enabled(bridge))
   2540		ocelot_del_vlan_unaware_pvid(ocelot, port, bridge);
   2541
   2542	ocelot_port->bridge = NULL;
   2543	ocelot_port->bridge_num = -1;
   2544
   2545	ocelot_port_set_pvid(ocelot, port, NULL);
   2546	ocelot_port_manage_port_tag(ocelot, port);
   2547	ocelot_apply_bridge_fwd_mask(ocelot, false);
   2548
   2549	mutex_unlock(&ocelot->fwd_domain_lock);
   2550}
   2551EXPORT_SYMBOL(ocelot_port_bridge_leave);
   2552
   2553static void ocelot_set_aggr_pgids(struct ocelot *ocelot)
   2554{
   2555	unsigned long visited = GENMASK(ocelot->num_phys_ports - 1, 0);
   2556	int i, port, lag;
   2557
   2558	/* Reset destination and aggregation PGIDS */
   2559	for_each_unicast_dest_pgid(ocelot, port)
   2560		ocelot_write_rix(ocelot, BIT(port), ANA_PGID_PGID, port);
   2561
   2562	for_each_aggr_pgid(ocelot, i)
   2563		ocelot_write_rix(ocelot, GENMASK(ocelot->num_phys_ports - 1, 0),
   2564				 ANA_PGID_PGID, i);
   2565
   2566	/* The visited ports bitmask holds the list of ports offloading any
   2567	 * bonding interface. Initially we mark all these ports as unvisited,
   2568	 * then every time we visit a port in this bitmask, we know that it is
   2569	 * the lowest numbered port, i.e. the one whose logical ID == physical
   2570	 * port ID == LAG ID. So we mark as visited all further ports in the
   2571	 * bitmask that are offloading the same bonding interface. This way,
   2572	 * we set up the aggregation PGIDs only once per bonding interface.
   2573	 */
   2574	for (port = 0; port < ocelot->num_phys_ports; port++) {
   2575		struct ocelot_port *ocelot_port = ocelot->ports[port];
   2576
   2577		if (!ocelot_port || !ocelot_port->bond)
   2578			continue;
   2579
   2580		visited &= ~BIT(port);
   2581	}
   2582
   2583	/* Now, set PGIDs for each active LAG */
   2584	for (lag = 0; lag < ocelot->num_phys_ports; lag++) {
   2585		struct net_device *bond = ocelot->ports[lag]->bond;
   2586		int num_active_ports = 0;
   2587		unsigned long bond_mask;
   2588		u8 aggr_idx[16];
   2589
   2590		if (!bond || (visited & BIT(lag)))
   2591			continue;
   2592
   2593		bond_mask = ocelot_get_bond_mask(ocelot, bond);
   2594
   2595		for_each_set_bit(port, &bond_mask, ocelot->num_phys_ports) {
   2596			struct ocelot_port *ocelot_port = ocelot->ports[port];
   2597
   2598			// Destination mask
   2599			ocelot_write_rix(ocelot, bond_mask,
   2600					 ANA_PGID_PGID, port);
   2601
   2602			if (ocelot_port->lag_tx_active)
   2603				aggr_idx[num_active_ports++] = port;
   2604		}
   2605
   2606		for_each_aggr_pgid(ocelot, i) {
   2607			u32 ac;
   2608
   2609			ac = ocelot_read_rix(ocelot, ANA_PGID_PGID, i);
   2610			ac &= ~bond_mask;
   2611			/* Don't do division by zero if there was no active
   2612			 * port. Just make all aggregation codes zero.
   2613			 */
   2614			if (num_active_ports)
   2615				ac |= BIT(aggr_idx[i % num_active_ports]);
   2616			ocelot_write_rix(ocelot, ac, ANA_PGID_PGID, i);
   2617		}
   2618
   2619		/* Mark all ports in the same LAG as visited to avoid applying
   2620		 * the same config again.
   2621		 */
   2622		for (port = lag; port < ocelot->num_phys_ports; port++) {
   2623			struct ocelot_port *ocelot_port = ocelot->ports[port];
   2624
   2625			if (!ocelot_port)
   2626				continue;
   2627
   2628			if (ocelot_port->bond == bond)
   2629				visited |= BIT(port);
   2630		}
   2631	}
   2632}
   2633
   2634/* When offloading a bonding interface, the switch ports configured under the
   2635 * same bond must have the same logical port ID, equal to the physical port ID
   2636 * of the lowest numbered physical port in that bond. Otherwise, in standalone/
   2637 * bridged mode, each port has a logical port ID equal to its physical port ID.
   2638 */
   2639static void ocelot_setup_logical_port_ids(struct ocelot *ocelot)
   2640{
   2641	int port;
   2642
   2643	for (port = 0; port < ocelot->num_phys_ports; port++) {
   2644		struct ocelot_port *ocelot_port = ocelot->ports[port];
   2645		struct net_device *bond;
   2646
   2647		if (!ocelot_port)
   2648			continue;
   2649
   2650		bond = ocelot_port->bond;
   2651		if (bond) {
   2652			int lag = ocelot_bond_get_id(ocelot, bond);
   2653
   2654			ocelot_rmw_gix(ocelot,
   2655				       ANA_PORT_PORT_CFG_PORTID_VAL(lag),
   2656				       ANA_PORT_PORT_CFG_PORTID_VAL_M,
   2657				       ANA_PORT_PORT_CFG, port);
   2658		} else {
   2659			ocelot_rmw_gix(ocelot,
   2660				       ANA_PORT_PORT_CFG_PORTID_VAL(port),
   2661				       ANA_PORT_PORT_CFG_PORTID_VAL_M,
   2662				       ANA_PORT_PORT_CFG, port);
   2663		}
   2664	}
   2665}
   2666
   2667static int ocelot_migrate_mc(struct ocelot *ocelot, struct ocelot_multicast *mc,
   2668			     unsigned long from_mask, unsigned long to_mask)
   2669{
   2670	unsigned char addr[ETH_ALEN];
   2671	struct ocelot_pgid *pgid;
   2672	u16 vid = mc->vid;
   2673
   2674	dev_dbg(ocelot->dev,
   2675		"Migrating multicast %pM vid %d from port mask 0x%lx to 0x%lx\n",
   2676		mc->addr, mc->vid, from_mask, to_mask);
   2677
   2678	/* First clean up the current port mask from hardware, because
   2679	 * we'll be modifying it.
   2680	 */
   2681	ocelot_pgid_free(ocelot, mc->pgid);
   2682	ocelot_encode_ports_to_mdb(addr, mc);
   2683	ocelot_mact_forget(ocelot, addr, vid);
   2684
   2685	mc->ports &= ~from_mask;
   2686	mc->ports |= to_mask;
   2687
   2688	pgid = ocelot_mdb_get_pgid(ocelot, mc);
   2689	if (IS_ERR(pgid)) {
   2690		dev_err(ocelot->dev,
   2691			"Cannot allocate PGID for mdb %pM vid %d\n",
   2692			mc->addr, mc->vid);
   2693		devm_kfree(ocelot->dev, mc);
   2694		return PTR_ERR(pgid);
   2695	}
   2696	mc->pgid = pgid;
   2697
   2698	ocelot_encode_ports_to_mdb(addr, mc);
   2699
   2700	if (mc->entry_type != ENTRYTYPE_MACv4 &&
   2701	    mc->entry_type != ENTRYTYPE_MACv6)
   2702		ocelot_write_rix(ocelot, pgid->ports, ANA_PGID_PGID,
   2703				 pgid->index);
   2704
   2705	return ocelot_mact_learn(ocelot, pgid->index, addr, vid,
   2706				 mc->entry_type);
   2707}
   2708
   2709int ocelot_migrate_mdbs(struct ocelot *ocelot, unsigned long from_mask,
   2710			unsigned long to_mask)
   2711{
   2712	struct ocelot_multicast *mc;
   2713	int err;
   2714
   2715	list_for_each_entry(mc, &ocelot->multicast, list) {
   2716		if (!(mc->ports & from_mask))
   2717			continue;
   2718
   2719		err = ocelot_migrate_mc(ocelot, mc, from_mask, to_mask);
   2720		if (err)
   2721			return err;
   2722	}
   2723
   2724	return 0;
   2725}
   2726EXPORT_SYMBOL_GPL(ocelot_migrate_mdbs);
   2727
   2728/* Documentation for PORTID_VAL says:
   2729 *     Logical port number for front port. If port is not a member of a LLAG,
   2730 *     then PORTID must be set to the physical port number.
   2731 *     If port is a member of a LLAG, then PORTID must be set to the common
   2732 *     PORTID_VAL used for all member ports of the LLAG.
   2733 *     The value must not exceed the number of physical ports on the device.
   2734 *
   2735 * This means we have little choice but to migrate FDB entries pointing towards
   2736 * a logical port when that changes.
   2737 */
   2738static void ocelot_migrate_lag_fdbs(struct ocelot *ocelot,
   2739				    struct net_device *bond,
   2740				    int lag)
   2741{
   2742	struct ocelot_lag_fdb *fdb;
   2743	int err;
   2744
   2745	lockdep_assert_held(&ocelot->fwd_domain_lock);
   2746
   2747	list_for_each_entry(fdb, &ocelot->lag_fdbs, list) {
   2748		if (fdb->bond != bond)
   2749			continue;
   2750
   2751		err = ocelot_mact_forget(ocelot, fdb->addr, fdb->vid);
   2752		if (err) {
   2753			dev_err(ocelot->dev,
   2754				"failed to delete LAG %s FDB %pM vid %d: %pe\n",
   2755				bond->name, fdb->addr, fdb->vid, ERR_PTR(err));
   2756		}
   2757
   2758		err = ocelot_mact_learn(ocelot, lag, fdb->addr, fdb->vid,
   2759					ENTRYTYPE_LOCKED);
   2760		if (err) {
   2761			dev_err(ocelot->dev,
   2762				"failed to migrate LAG %s FDB %pM vid %d: %pe\n",
   2763				bond->name, fdb->addr, fdb->vid, ERR_PTR(err));
   2764		}
   2765	}
   2766}
   2767
   2768int ocelot_port_lag_join(struct ocelot *ocelot, int port,
   2769			 struct net_device *bond,
   2770			 struct netdev_lag_upper_info *info)
   2771{
   2772	if (info->tx_type != NETDEV_LAG_TX_TYPE_HASH)
   2773		return -EOPNOTSUPP;
   2774
   2775	mutex_lock(&ocelot->fwd_domain_lock);
   2776
   2777	ocelot->ports[port]->bond = bond;
   2778
   2779	ocelot_setup_logical_port_ids(ocelot);
   2780	ocelot_apply_bridge_fwd_mask(ocelot, true);
   2781	ocelot_set_aggr_pgids(ocelot);
   2782
   2783	mutex_unlock(&ocelot->fwd_domain_lock);
   2784
   2785	return 0;
   2786}
   2787EXPORT_SYMBOL(ocelot_port_lag_join);
   2788
   2789void ocelot_port_lag_leave(struct ocelot *ocelot, int port,
   2790			   struct net_device *bond)
   2791{
   2792	int old_lag_id, new_lag_id;
   2793
   2794	mutex_lock(&ocelot->fwd_domain_lock);
   2795
   2796	old_lag_id = ocelot_bond_get_id(ocelot, bond);
   2797
   2798	ocelot->ports[port]->bond = NULL;
   2799
   2800	ocelot_setup_logical_port_ids(ocelot);
   2801	ocelot_apply_bridge_fwd_mask(ocelot, false);
   2802	ocelot_set_aggr_pgids(ocelot);
   2803
   2804	new_lag_id = ocelot_bond_get_id(ocelot, bond);
   2805
   2806	if (new_lag_id >= 0 && old_lag_id != new_lag_id)
   2807		ocelot_migrate_lag_fdbs(ocelot, bond, new_lag_id);
   2808
   2809	mutex_unlock(&ocelot->fwd_domain_lock);
   2810}
   2811EXPORT_SYMBOL(ocelot_port_lag_leave);
   2812
   2813void ocelot_port_lag_change(struct ocelot *ocelot, int port, bool lag_tx_active)
   2814{
   2815	struct ocelot_port *ocelot_port = ocelot->ports[port];
   2816
   2817	mutex_lock(&ocelot->fwd_domain_lock);
   2818
   2819	ocelot_port->lag_tx_active = lag_tx_active;
   2820
   2821	/* Rebalance the LAGs */
   2822	ocelot_set_aggr_pgids(ocelot);
   2823
   2824	mutex_unlock(&ocelot->fwd_domain_lock);
   2825}
   2826EXPORT_SYMBOL(ocelot_port_lag_change);
   2827
   2828int ocelot_lag_fdb_add(struct ocelot *ocelot, struct net_device *bond,
   2829		       const unsigned char *addr, u16 vid,
   2830		       const struct net_device *bridge)
   2831{
   2832	struct ocelot_lag_fdb *fdb;
   2833	int lag, err;
   2834
   2835	fdb = kzalloc(sizeof(*fdb), GFP_KERNEL);
   2836	if (!fdb)
   2837		return -ENOMEM;
   2838
   2839	mutex_lock(&ocelot->fwd_domain_lock);
   2840
   2841	if (!vid)
   2842		vid = ocelot_vlan_unaware_pvid(ocelot, bridge);
   2843
   2844	ether_addr_copy(fdb->addr, addr);
   2845	fdb->vid = vid;
   2846	fdb->bond = bond;
   2847
   2848	lag = ocelot_bond_get_id(ocelot, bond);
   2849
   2850	err = ocelot_mact_learn(ocelot, lag, addr, vid, ENTRYTYPE_LOCKED);
   2851	if (err) {
   2852		mutex_unlock(&ocelot->fwd_domain_lock);
   2853		kfree(fdb);
   2854		return err;
   2855	}
   2856
   2857	list_add_tail(&fdb->list, &ocelot->lag_fdbs);
   2858	mutex_unlock(&ocelot->fwd_domain_lock);
   2859
   2860	return 0;
   2861}
   2862EXPORT_SYMBOL_GPL(ocelot_lag_fdb_add);
   2863
   2864int ocelot_lag_fdb_del(struct ocelot *ocelot, struct net_device *bond,
   2865		       const unsigned char *addr, u16 vid,
   2866		       const struct net_device *bridge)
   2867{
   2868	struct ocelot_lag_fdb *fdb, *tmp;
   2869
   2870	mutex_lock(&ocelot->fwd_domain_lock);
   2871
   2872	if (!vid)
   2873		vid = ocelot_vlan_unaware_pvid(ocelot, bridge);
   2874
   2875	list_for_each_entry_safe(fdb, tmp, &ocelot->lag_fdbs, list) {
   2876		if (!ether_addr_equal(fdb->addr, addr) || fdb->vid != vid ||
   2877		    fdb->bond != bond)
   2878			continue;
   2879
   2880		ocelot_mact_forget(ocelot, addr, vid);
   2881		list_del(&fdb->list);
   2882		mutex_unlock(&ocelot->fwd_domain_lock);
   2883		kfree(fdb);
   2884
   2885		return 0;
   2886	}
   2887
   2888	mutex_unlock(&ocelot->fwd_domain_lock);
   2889
   2890	return -ENOENT;
   2891}
   2892EXPORT_SYMBOL_GPL(ocelot_lag_fdb_del);
   2893
   2894/* Configure the maximum SDU (L2 payload) on RX to the value specified in @sdu.
   2895 * The length of VLAN tags is accounted for automatically via DEV_MAC_TAGS_CFG.
   2896 * In the special case that it's the NPI port that we're configuring, the
   2897 * length of the tag and optional prefix needs to be accounted for privately,
   2898 * in order to be able to sustain communication at the requested @sdu.
   2899 */
   2900void ocelot_port_set_maxlen(struct ocelot *ocelot, int port, size_t sdu)
   2901{
   2902	struct ocelot_port *ocelot_port = ocelot->ports[port];
   2903	int maxlen = sdu + ETH_HLEN + ETH_FCS_LEN;
   2904	int pause_start, pause_stop;
   2905	int atop, atop_tot;
   2906
   2907	if (port == ocelot->npi) {
   2908		maxlen += OCELOT_TAG_LEN;
   2909
   2910		if (ocelot->npi_inj_prefix == OCELOT_TAG_PREFIX_SHORT)
   2911			maxlen += OCELOT_SHORT_PREFIX_LEN;
   2912		else if (ocelot->npi_inj_prefix == OCELOT_TAG_PREFIX_LONG)
   2913			maxlen += OCELOT_LONG_PREFIX_LEN;
   2914	}
   2915
   2916	ocelot_port_writel(ocelot_port, maxlen, DEV_MAC_MAXLEN_CFG);
   2917
   2918	/* Set Pause watermark hysteresis */
   2919	pause_start = 6 * maxlen / OCELOT_BUFFER_CELL_SZ;
   2920	pause_stop = 4 * maxlen / OCELOT_BUFFER_CELL_SZ;
   2921	ocelot_fields_write(ocelot, port, SYS_PAUSE_CFG_PAUSE_START,
   2922			    pause_start);
   2923	ocelot_fields_write(ocelot, port, SYS_PAUSE_CFG_PAUSE_STOP,
   2924			    pause_stop);
   2925
   2926	/* Tail dropping watermarks */
   2927	atop_tot = (ocelot->packet_buffer_size - 9 * maxlen) /
   2928		   OCELOT_BUFFER_CELL_SZ;
   2929	atop = (9 * maxlen) / OCELOT_BUFFER_CELL_SZ;
   2930	ocelot_write_rix(ocelot, ocelot->ops->wm_enc(atop), SYS_ATOP, port);
   2931	ocelot_write(ocelot, ocelot->ops->wm_enc(atop_tot), SYS_ATOP_TOT_CFG);
   2932}
   2933EXPORT_SYMBOL(ocelot_port_set_maxlen);
   2934
   2935int ocelot_get_max_mtu(struct ocelot *ocelot, int port)
   2936{
   2937	int max_mtu = 65535 - ETH_HLEN - ETH_FCS_LEN;
   2938
   2939	if (port == ocelot->npi) {
   2940		max_mtu -= OCELOT_TAG_LEN;
   2941
   2942		if (ocelot->npi_inj_prefix == OCELOT_TAG_PREFIX_SHORT)
   2943			max_mtu -= OCELOT_SHORT_PREFIX_LEN;
   2944		else if (ocelot->npi_inj_prefix == OCELOT_TAG_PREFIX_LONG)
   2945			max_mtu -= OCELOT_LONG_PREFIX_LEN;
   2946	}
   2947
   2948	return max_mtu;
   2949}
   2950EXPORT_SYMBOL(ocelot_get_max_mtu);
   2951
   2952static void ocelot_port_set_learning(struct ocelot *ocelot, int port,
   2953				     bool enabled)
   2954{
   2955	struct ocelot_port *ocelot_port = ocelot->ports[port];
   2956	u32 val = 0;
   2957
   2958	if (enabled)
   2959		val = ANA_PORT_PORT_CFG_LEARN_ENA;
   2960
   2961	ocelot_rmw_gix(ocelot, val, ANA_PORT_PORT_CFG_LEARN_ENA,
   2962		       ANA_PORT_PORT_CFG, port);
   2963
   2964	ocelot_port->learn_ena = enabled;
   2965}
   2966
   2967static void ocelot_port_set_ucast_flood(struct ocelot *ocelot, int port,
   2968					bool enabled)
   2969{
   2970	u32 val = 0;
   2971
   2972	if (enabled)
   2973		val = BIT(port);
   2974
   2975	ocelot_rmw_rix(ocelot, val, BIT(port), ANA_PGID_PGID, PGID_UC);
   2976}
   2977
   2978static void ocelot_port_set_mcast_flood(struct ocelot *ocelot, int port,
   2979					bool enabled)
   2980{
   2981	u32 val = 0;
   2982
   2983	if (enabled)
   2984		val = BIT(port);
   2985
   2986	ocelot_rmw_rix(ocelot, val, BIT(port), ANA_PGID_PGID, PGID_MC);
   2987	ocelot_rmw_rix(ocelot, val, BIT(port), ANA_PGID_PGID, PGID_MCIPV4);
   2988	ocelot_rmw_rix(ocelot, val, BIT(port), ANA_PGID_PGID, PGID_MCIPV6);
   2989}
   2990
   2991static void ocelot_port_set_bcast_flood(struct ocelot *ocelot, int port,
   2992					bool enabled)
   2993{
   2994	u32 val = 0;
   2995
   2996	if (enabled)
   2997		val = BIT(port);
   2998
   2999	ocelot_rmw_rix(ocelot, val, BIT(port), ANA_PGID_PGID, PGID_BC);
   3000}
   3001
   3002int ocelot_port_pre_bridge_flags(struct ocelot *ocelot, int port,
   3003				 struct switchdev_brport_flags flags)
   3004{
   3005	if (flags.mask & ~(BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD |
   3006			   BR_BCAST_FLOOD))
   3007		return -EINVAL;
   3008
   3009	return 0;
   3010}
   3011EXPORT_SYMBOL(ocelot_port_pre_bridge_flags);
   3012
   3013void ocelot_port_bridge_flags(struct ocelot *ocelot, int port,
   3014			      struct switchdev_brport_flags flags)
   3015{
   3016	if (flags.mask & BR_LEARNING)
   3017		ocelot_port_set_learning(ocelot, port,
   3018					 !!(flags.val & BR_LEARNING));
   3019
   3020	if (flags.mask & BR_FLOOD)
   3021		ocelot_port_set_ucast_flood(ocelot, port,
   3022					    !!(flags.val & BR_FLOOD));
   3023
   3024	if (flags.mask & BR_MCAST_FLOOD)
   3025		ocelot_port_set_mcast_flood(ocelot, port,
   3026					    !!(flags.val & BR_MCAST_FLOOD));
   3027
   3028	if (flags.mask & BR_BCAST_FLOOD)
   3029		ocelot_port_set_bcast_flood(ocelot, port,
   3030					    !!(flags.val & BR_BCAST_FLOOD));
   3031}
   3032EXPORT_SYMBOL(ocelot_port_bridge_flags);
   3033
   3034int ocelot_port_get_default_prio(struct ocelot *ocelot, int port)
   3035{
   3036	int val = ocelot_read_gix(ocelot, ANA_PORT_QOS_CFG, port);
   3037
   3038	return ANA_PORT_QOS_CFG_QOS_DEFAULT_VAL_X(val);
   3039}
   3040EXPORT_SYMBOL_GPL(ocelot_port_get_default_prio);
   3041
   3042int ocelot_port_set_default_prio(struct ocelot *ocelot, int port, u8 prio)
   3043{
   3044	if (prio >= OCELOT_NUM_TC)
   3045		return -ERANGE;
   3046
   3047	ocelot_rmw_gix(ocelot,
   3048		       ANA_PORT_QOS_CFG_QOS_DEFAULT_VAL(prio),
   3049		       ANA_PORT_QOS_CFG_QOS_DEFAULT_VAL_M,
   3050		       ANA_PORT_QOS_CFG,
   3051		       port);
   3052
   3053	return 0;
   3054}
   3055EXPORT_SYMBOL_GPL(ocelot_port_set_default_prio);
   3056
   3057int ocelot_port_get_dscp_prio(struct ocelot *ocelot, int port, u8 dscp)
   3058{
   3059	int qos_cfg = ocelot_read_gix(ocelot, ANA_PORT_QOS_CFG, port);
   3060	int dscp_cfg = ocelot_read_rix(ocelot, ANA_DSCP_CFG, dscp);
   3061
   3062	/* Return error if DSCP prioritization isn't enabled */
   3063	if (!(qos_cfg & ANA_PORT_QOS_CFG_QOS_DSCP_ENA))
   3064		return -EOPNOTSUPP;
   3065
   3066	if (qos_cfg & ANA_PORT_QOS_CFG_DSCP_TRANSLATE_ENA) {
   3067		dscp = ANA_DSCP_CFG_DSCP_TRANSLATE_VAL_X(dscp_cfg);
   3068		/* Re-read ANA_DSCP_CFG for the translated DSCP */
   3069		dscp_cfg = ocelot_read_rix(ocelot, ANA_DSCP_CFG, dscp);
   3070	}
   3071
   3072	/* If the DSCP value is not trusted, the QoS classification falls back
   3073	 * to VLAN PCP or port-based default.
   3074	 */
   3075	if (!(dscp_cfg & ANA_DSCP_CFG_DSCP_TRUST_ENA))
   3076		return -EOPNOTSUPP;
   3077
   3078	return ANA_DSCP_CFG_QOS_DSCP_VAL_X(dscp_cfg);
   3079}
   3080EXPORT_SYMBOL_GPL(ocelot_port_get_dscp_prio);
   3081
   3082int ocelot_port_add_dscp_prio(struct ocelot *ocelot, int port, u8 dscp, u8 prio)
   3083{
   3084	int mask, val;
   3085
   3086	if (prio >= OCELOT_NUM_TC)
   3087		return -ERANGE;
   3088
   3089	/* There is at least one app table priority (this one), so we need to
   3090	 * make sure DSCP prioritization is enabled on the port.
   3091	 * Also make sure DSCP translation is disabled
   3092	 * (dcbnl doesn't support it).
   3093	 */
   3094	mask = ANA_PORT_QOS_CFG_QOS_DSCP_ENA |
   3095	       ANA_PORT_QOS_CFG_DSCP_TRANSLATE_ENA;
   3096
   3097	ocelot_rmw_gix(ocelot, ANA_PORT_QOS_CFG_QOS_DSCP_ENA, mask,
   3098		       ANA_PORT_QOS_CFG, port);
   3099
   3100	/* Trust this DSCP value and map it to the given QoS class */
   3101	val = ANA_DSCP_CFG_DSCP_TRUST_ENA | ANA_DSCP_CFG_QOS_DSCP_VAL(prio);
   3102
   3103	ocelot_write_rix(ocelot, val, ANA_DSCP_CFG, dscp);
   3104
   3105	return 0;
   3106}
   3107EXPORT_SYMBOL_GPL(ocelot_port_add_dscp_prio);
   3108
   3109int ocelot_port_del_dscp_prio(struct ocelot *ocelot, int port, u8 dscp, u8 prio)
   3110{
   3111	int dscp_cfg = ocelot_read_rix(ocelot, ANA_DSCP_CFG, dscp);
   3112	int mask, i;
   3113
   3114	/* During a "dcb app replace" command, the new app table entry will be
   3115	 * added first, then the old one will be deleted. But the hardware only
   3116	 * supports one QoS class per DSCP value (duh), so if we blindly delete
   3117	 * the app table entry for this DSCP value, we end up deleting the
   3118	 * entry with the new priority. Avoid that by checking whether user
   3119	 * space wants to delete the priority which is currently configured, or
   3120	 * something else which is no longer current.
   3121	 */
   3122	if (ANA_DSCP_CFG_QOS_DSCP_VAL_X(dscp_cfg) != prio)
   3123		return 0;
   3124
   3125	/* Untrust this DSCP value */
   3126	ocelot_write_rix(ocelot, 0, ANA_DSCP_CFG, dscp);
   3127
   3128	for (i = 0; i < 64; i++) {
   3129		int dscp_cfg = ocelot_read_rix(ocelot, ANA_DSCP_CFG, i);
   3130
   3131		/* There are still app table entries on the port, so we need to
   3132		 * keep DSCP enabled, nothing to do.
   3133		 */
   3134		if (dscp_cfg & ANA_DSCP_CFG_DSCP_TRUST_ENA)
   3135			return 0;
   3136	}
   3137
   3138	/* Disable DSCP QoS classification if there isn't any trusted
   3139	 * DSCP value left.
   3140	 */
   3141	mask = ANA_PORT_QOS_CFG_QOS_DSCP_ENA |
   3142	       ANA_PORT_QOS_CFG_DSCP_TRANSLATE_ENA;
   3143
   3144	ocelot_rmw_gix(ocelot, 0, mask, ANA_PORT_QOS_CFG, port);
   3145
   3146	return 0;
   3147}
   3148EXPORT_SYMBOL_GPL(ocelot_port_del_dscp_prio);
   3149
   3150struct ocelot_mirror *ocelot_mirror_get(struct ocelot *ocelot, int to,
   3151					struct netlink_ext_ack *extack)
   3152{
   3153	struct ocelot_mirror *m = ocelot->mirror;
   3154
   3155	if (m) {
   3156		if (m->to != to) {
   3157			NL_SET_ERR_MSG_MOD(extack,
   3158					   "Mirroring already configured towards different egress port");
   3159			return ERR_PTR(-EBUSY);
   3160		}
   3161
   3162		refcount_inc(&m->refcount);
   3163		return m;
   3164	}
   3165
   3166	m = kzalloc(sizeof(*m), GFP_KERNEL);
   3167	if (!m)
   3168		return ERR_PTR(-ENOMEM);
   3169
   3170	m->to = to;
   3171	refcount_set(&m->refcount, 1);
   3172	ocelot->mirror = m;
   3173
   3174	/* Program the mirror port to hardware */
   3175	ocelot_write(ocelot, BIT(to), ANA_MIRRORPORTS);
   3176
   3177	return m;
   3178}
   3179
   3180void ocelot_mirror_put(struct ocelot *ocelot)
   3181{
   3182	struct ocelot_mirror *m = ocelot->mirror;
   3183
   3184	if (!refcount_dec_and_test(&m->refcount))
   3185		return;
   3186
   3187	ocelot_write(ocelot, 0, ANA_MIRRORPORTS);
   3188	ocelot->mirror = NULL;
   3189	kfree(m);
   3190}
   3191
   3192int ocelot_port_mirror_add(struct ocelot *ocelot, int from, int to,
   3193			   bool ingress, struct netlink_ext_ack *extack)
   3194{
   3195	struct ocelot_mirror *m = ocelot_mirror_get(ocelot, to, extack);
   3196
   3197	if (IS_ERR(m))
   3198		return PTR_ERR(m);
   3199
   3200	if (ingress) {
   3201		ocelot_rmw_gix(ocelot, ANA_PORT_PORT_CFG_SRC_MIRROR_ENA,
   3202			       ANA_PORT_PORT_CFG_SRC_MIRROR_ENA,
   3203			       ANA_PORT_PORT_CFG, from);
   3204	} else {
   3205		ocelot_rmw(ocelot, BIT(from), BIT(from),
   3206			   ANA_EMIRRORPORTS);
   3207	}
   3208
   3209	return 0;
   3210}
   3211EXPORT_SYMBOL_GPL(ocelot_port_mirror_add);
   3212
   3213void ocelot_port_mirror_del(struct ocelot *ocelot, int from, bool ingress)
   3214{
   3215	if (ingress) {
   3216		ocelot_rmw_gix(ocelot, 0, ANA_PORT_PORT_CFG_SRC_MIRROR_ENA,
   3217			       ANA_PORT_PORT_CFG, from);
   3218	} else {
   3219		ocelot_rmw(ocelot, 0, BIT(from), ANA_EMIRRORPORTS);
   3220	}
   3221
   3222	ocelot_mirror_put(ocelot);
   3223}
   3224EXPORT_SYMBOL_GPL(ocelot_port_mirror_del);
   3225
   3226void ocelot_init_port(struct ocelot *ocelot, int port)
   3227{
   3228	struct ocelot_port *ocelot_port = ocelot->ports[port];
   3229
   3230	skb_queue_head_init(&ocelot_port->tx_skbs);
   3231
   3232	/* Basic L2 initialization */
   3233
   3234	/* Set MAC IFG Gaps
   3235	 * FDX: TX_IFG = 5, RX_IFG1 = RX_IFG2 = 0
   3236	 * !FDX: TX_IFG = 5, RX_IFG1 = RX_IFG2 = 5
   3237	 */
   3238	ocelot_port_writel(ocelot_port, DEV_MAC_IFG_CFG_TX_IFG(5),
   3239			   DEV_MAC_IFG_CFG);
   3240
   3241	/* Load seed (0) and set MAC HDX late collision  */
   3242	ocelot_port_writel(ocelot_port, DEV_MAC_HDX_CFG_LATE_COL_POS(67) |
   3243			   DEV_MAC_HDX_CFG_SEED_LOAD,
   3244			   DEV_MAC_HDX_CFG);
   3245	mdelay(1);
   3246	ocelot_port_writel(ocelot_port, DEV_MAC_HDX_CFG_LATE_COL_POS(67),
   3247			   DEV_MAC_HDX_CFG);
   3248
   3249	/* Set Max Length and maximum tags allowed */
   3250	ocelot_port_set_maxlen(ocelot, port, ETH_DATA_LEN);
   3251	ocelot_port_writel(ocelot_port, DEV_MAC_TAGS_CFG_TAG_ID(ETH_P_8021AD) |
   3252			   DEV_MAC_TAGS_CFG_VLAN_AWR_ENA |
   3253			   DEV_MAC_TAGS_CFG_VLAN_DBL_AWR_ENA |
   3254			   DEV_MAC_TAGS_CFG_VLAN_LEN_AWR_ENA,
   3255			   DEV_MAC_TAGS_CFG);
   3256
   3257	/* Set SMAC of Pause frame (00:00:00:00:00:00) */
   3258	ocelot_port_writel(ocelot_port, 0, DEV_MAC_FC_MAC_HIGH_CFG);
   3259	ocelot_port_writel(ocelot_port, 0, DEV_MAC_FC_MAC_LOW_CFG);
   3260
   3261	/* Enable transmission of pause frames */
   3262	ocelot_fields_write(ocelot, port, SYS_PAUSE_CFG_PAUSE_ENA, 1);
   3263
   3264	/* Drop frames with multicast source address */
   3265	ocelot_rmw_gix(ocelot, ANA_PORT_DROP_CFG_DROP_MC_SMAC_ENA,
   3266		       ANA_PORT_DROP_CFG_DROP_MC_SMAC_ENA,
   3267		       ANA_PORT_DROP_CFG, port);
   3268
   3269	/* Set default VLAN and tag type to 8021Q. */
   3270	ocelot_rmw_gix(ocelot, REW_PORT_VLAN_CFG_PORT_TPID(ETH_P_8021Q),
   3271		       REW_PORT_VLAN_CFG_PORT_TPID_M,
   3272		       REW_PORT_VLAN_CFG, port);
   3273
   3274	/* Disable source address learning for standalone mode */
   3275	ocelot_port_set_learning(ocelot, port, false);
   3276
   3277	/* Set the port's initial logical port ID value, enable receiving
   3278	 * frames on it, and configure the MAC address learning type to
   3279	 * automatic.
   3280	 */
   3281	ocelot_write_gix(ocelot, ANA_PORT_PORT_CFG_LEARNAUTO |
   3282			 ANA_PORT_PORT_CFG_RECV_ENA |
   3283			 ANA_PORT_PORT_CFG_PORTID_VAL(port),
   3284			 ANA_PORT_PORT_CFG, port);
   3285
   3286	/* Enable vcap lookups */
   3287	ocelot_vcap_enable(ocelot, port);
   3288}
   3289EXPORT_SYMBOL(ocelot_init_port);
   3290
   3291/* Configure and enable the CPU port module, which is a set of queues
   3292 * accessible through register MMIO, frame DMA or Ethernet (in case
   3293 * NPI mode is used).
   3294 */
   3295static void ocelot_cpu_port_init(struct ocelot *ocelot)
   3296{
   3297	int cpu = ocelot->num_phys_ports;
   3298
   3299	/* The unicast destination PGID for the CPU port module is unused */
   3300	ocelot_write_rix(ocelot, 0, ANA_PGID_PGID, cpu);
   3301	/* Instead set up a multicast destination PGID for traffic copied to
   3302	 * the CPU. Whitelisted MAC addresses like the port netdevice MAC
   3303	 * addresses will be copied to the CPU via this PGID.
   3304	 */
   3305	ocelot_write_rix(ocelot, BIT(cpu), ANA_PGID_PGID, PGID_CPU);
   3306	ocelot_write_gix(ocelot, ANA_PORT_PORT_CFG_RECV_ENA |
   3307			 ANA_PORT_PORT_CFG_PORTID_VAL(cpu),
   3308			 ANA_PORT_PORT_CFG, cpu);
   3309
   3310	/* Enable CPU port module */
   3311	ocelot_fields_write(ocelot, cpu, QSYS_SWITCH_PORT_MODE_PORT_ENA, 1);
   3312	/* CPU port Injection/Extraction configuration */
   3313	ocelot_fields_write(ocelot, cpu, SYS_PORT_MODE_INCL_XTR_HDR,
   3314			    OCELOT_TAG_PREFIX_NONE);
   3315	ocelot_fields_write(ocelot, cpu, SYS_PORT_MODE_INCL_INJ_HDR,
   3316			    OCELOT_TAG_PREFIX_NONE);
   3317
   3318	/* Configure the CPU port to be VLAN aware */
   3319	ocelot_write_gix(ocelot,
   3320			 ANA_PORT_VLAN_CFG_VLAN_VID(OCELOT_STANDALONE_PVID) |
   3321			 ANA_PORT_VLAN_CFG_VLAN_AWARE_ENA |
   3322			 ANA_PORT_VLAN_CFG_VLAN_POP_CNT(1),
   3323			 ANA_PORT_VLAN_CFG, cpu);
   3324}
   3325
   3326static void ocelot_detect_features(struct ocelot *ocelot)
   3327{
   3328	int mmgt, eq_ctrl;
   3329
   3330	/* For Ocelot, Felix, Seville, Serval etc, SYS:MMGT:MMGT:FREECNT holds
   3331	 * the number of 240-byte free memory words (aka 4-cell chunks) and not
   3332	 * 192 bytes as the documentation incorrectly says.
   3333	 */
   3334	mmgt = ocelot_read(ocelot, SYS_MMGT);
   3335	ocelot->packet_buffer_size = 240 * SYS_MMGT_FREECNT(mmgt);
   3336
   3337	eq_ctrl = ocelot_read(ocelot, QSYS_EQ_CTRL);
   3338	ocelot->num_frame_refs = QSYS_MMGT_EQ_CTRL_FP_FREE_CNT(eq_ctrl);
   3339}
   3340
   3341int ocelot_init(struct ocelot *ocelot)
   3342{
   3343	const struct ocelot_stat_layout *stat;
   3344	char queue_name[32];
   3345	int i, ret;
   3346	u32 port;
   3347
   3348	if (ocelot->ops->reset) {
   3349		ret = ocelot->ops->reset(ocelot);
   3350		if (ret) {
   3351			dev_err(ocelot->dev, "Switch reset failed\n");
   3352			return ret;
   3353		}
   3354	}
   3355
   3356	ocelot->num_stats = 0;
   3357	for_each_stat(ocelot, stat)
   3358		ocelot->num_stats++;
   3359
   3360	ocelot->stats = devm_kcalloc(ocelot->dev,
   3361				     ocelot->num_phys_ports * ocelot->num_stats,
   3362				     sizeof(u64), GFP_KERNEL);
   3363	if (!ocelot->stats)
   3364		return -ENOMEM;
   3365
   3366	mutex_init(&ocelot->stats_lock);
   3367	mutex_init(&ocelot->ptp_lock);
   3368	mutex_init(&ocelot->mact_lock);
   3369	mutex_init(&ocelot->fwd_domain_lock);
   3370	spin_lock_init(&ocelot->ptp_clock_lock);
   3371	spin_lock_init(&ocelot->ts_id_lock);
   3372	snprintf(queue_name, sizeof(queue_name), "%s-stats",
   3373		 dev_name(ocelot->dev));
   3374	ocelot->stats_queue = create_singlethread_workqueue(queue_name);
   3375	if (!ocelot->stats_queue)
   3376		return -ENOMEM;
   3377
   3378	ocelot->owq = alloc_ordered_workqueue("ocelot-owq", 0);
   3379	if (!ocelot->owq) {
   3380		destroy_workqueue(ocelot->stats_queue);
   3381		return -ENOMEM;
   3382	}
   3383
   3384	INIT_LIST_HEAD(&ocelot->multicast);
   3385	INIT_LIST_HEAD(&ocelot->pgids);
   3386	INIT_LIST_HEAD(&ocelot->vlans);
   3387	INIT_LIST_HEAD(&ocelot->lag_fdbs);
   3388	ocelot_detect_features(ocelot);
   3389	ocelot_mact_init(ocelot);
   3390	ocelot_vlan_init(ocelot);
   3391	ocelot_vcap_init(ocelot);
   3392	ocelot_cpu_port_init(ocelot);
   3393
   3394	if (ocelot->ops->psfp_init)
   3395		ocelot->ops->psfp_init(ocelot);
   3396
   3397	for (port = 0; port < ocelot->num_phys_ports; port++) {
   3398		/* Clear all counters (5 groups) */
   3399		ocelot_write(ocelot, SYS_STAT_CFG_STAT_VIEW(port) |
   3400				     SYS_STAT_CFG_STAT_CLEAR_SHOT(0x7f),
   3401			     SYS_STAT_CFG);
   3402	}
   3403
   3404	/* Only use S-Tag */
   3405	ocelot_write(ocelot, ETH_P_8021AD, SYS_VLAN_ETYPE_CFG);
   3406
   3407	/* Aggregation mode */
   3408	ocelot_write(ocelot, ANA_AGGR_CFG_AC_SMAC_ENA |
   3409			     ANA_AGGR_CFG_AC_DMAC_ENA |
   3410			     ANA_AGGR_CFG_AC_IP4_SIPDIP_ENA |
   3411			     ANA_AGGR_CFG_AC_IP4_TCPUDP_ENA |
   3412			     ANA_AGGR_CFG_AC_IP6_FLOW_LBL_ENA |
   3413			     ANA_AGGR_CFG_AC_IP6_TCPUDP_ENA,
   3414			     ANA_AGGR_CFG);
   3415
   3416	/* Set MAC age time to default value. The entry is aged after
   3417	 * 2*AGE_PERIOD
   3418	 */
   3419	ocelot_write(ocelot,
   3420		     ANA_AUTOAGE_AGE_PERIOD(BR_DEFAULT_AGEING_TIME / 2 / HZ),
   3421		     ANA_AUTOAGE);
   3422
   3423	/* Disable learning for frames discarded by VLAN ingress filtering */
   3424	regmap_field_write(ocelot->regfields[ANA_ADVLEARN_VLAN_CHK], 1);
   3425
   3426	/* Setup frame ageing - fixed value "2 sec" - in 6.5 us units */
   3427	ocelot_write(ocelot, SYS_FRM_AGING_AGE_TX_ENA |
   3428		     SYS_FRM_AGING_MAX_AGE(307692), SYS_FRM_AGING);
   3429
   3430	/* Setup flooding PGIDs */
   3431	for (i = 0; i < ocelot->num_flooding_pgids; i++)
   3432		ocelot_write_rix(ocelot, ANA_FLOODING_FLD_MULTICAST(PGID_MC) |
   3433				 ANA_FLOODING_FLD_BROADCAST(PGID_BC) |
   3434				 ANA_FLOODING_FLD_UNICAST(PGID_UC),
   3435				 ANA_FLOODING, i);
   3436	ocelot_write(ocelot, ANA_FLOODING_IPMC_FLD_MC6_DATA(PGID_MCIPV6) |
   3437		     ANA_FLOODING_IPMC_FLD_MC6_CTRL(PGID_MC) |
   3438		     ANA_FLOODING_IPMC_FLD_MC4_DATA(PGID_MCIPV4) |
   3439		     ANA_FLOODING_IPMC_FLD_MC4_CTRL(PGID_MC),
   3440		     ANA_FLOODING_IPMC);
   3441
   3442	for (port = 0; port < ocelot->num_phys_ports; port++) {
   3443		/* Transmit the frame to the local port. */
   3444		ocelot_write_rix(ocelot, BIT(port), ANA_PGID_PGID, port);
   3445		/* Do not forward BPDU frames to the front ports. */
   3446		ocelot_write_gix(ocelot,
   3447				 ANA_PORT_CPU_FWD_BPDU_CFG_BPDU_REDIR_ENA(0xffff),
   3448				 ANA_PORT_CPU_FWD_BPDU_CFG,
   3449				 port);
   3450		/* Ensure bridging is disabled */
   3451		ocelot_write_rix(ocelot, 0, ANA_PGID_PGID, PGID_SRC + port);
   3452	}
   3453
   3454	for_each_nonreserved_multicast_dest_pgid(ocelot, i) {
   3455		u32 val = ANA_PGID_PGID_PGID(GENMASK(ocelot->num_phys_ports - 1, 0));
   3456
   3457		ocelot_write_rix(ocelot, val, ANA_PGID_PGID, i);
   3458	}
   3459
   3460	ocelot_write_rix(ocelot, 0, ANA_PGID_PGID, PGID_BLACKHOLE);
   3461
   3462	/* Allow broadcast and unknown L2 multicast to the CPU. */
   3463	ocelot_rmw_rix(ocelot, ANA_PGID_PGID_PGID(BIT(ocelot->num_phys_ports)),
   3464		       ANA_PGID_PGID_PGID(BIT(ocelot->num_phys_ports)),
   3465		       ANA_PGID_PGID, PGID_MC);
   3466	ocelot_rmw_rix(ocelot, ANA_PGID_PGID_PGID(BIT(ocelot->num_phys_ports)),
   3467		       ANA_PGID_PGID_PGID(BIT(ocelot->num_phys_ports)),
   3468		       ANA_PGID_PGID, PGID_BC);
   3469	ocelot_write_rix(ocelot, 0, ANA_PGID_PGID, PGID_MCIPV4);
   3470	ocelot_write_rix(ocelot, 0, ANA_PGID_PGID, PGID_MCIPV6);
   3471
   3472	/* Allow manual injection via DEVCPU_QS registers, and byte swap these
   3473	 * registers endianness.
   3474	 */
   3475	ocelot_write_rix(ocelot, QS_INJ_GRP_CFG_BYTE_SWAP |
   3476			 QS_INJ_GRP_CFG_MODE(1), QS_INJ_GRP_CFG, 0);
   3477	ocelot_write_rix(ocelot, QS_XTR_GRP_CFG_BYTE_SWAP |
   3478			 QS_XTR_GRP_CFG_MODE(1), QS_XTR_GRP_CFG, 0);
   3479	ocelot_write(ocelot, ANA_CPUQ_CFG_CPUQ_MIRROR(2) |
   3480		     ANA_CPUQ_CFG_CPUQ_LRN(2) |
   3481		     ANA_CPUQ_CFG_CPUQ_MAC_COPY(2) |
   3482		     ANA_CPUQ_CFG_CPUQ_SRC_COPY(2) |
   3483		     ANA_CPUQ_CFG_CPUQ_LOCKED_PORTMOVE(2) |
   3484		     ANA_CPUQ_CFG_CPUQ_ALLBRIDGE(6) |
   3485		     ANA_CPUQ_CFG_CPUQ_IPMC_CTRL(6) |
   3486		     ANA_CPUQ_CFG_CPUQ_IGMP(6) |
   3487		     ANA_CPUQ_CFG_CPUQ_MLD(6), ANA_CPUQ_CFG);
   3488	for (i = 0; i < 16; i++)
   3489		ocelot_write_rix(ocelot, ANA_CPUQ_8021_CFG_CPUQ_GARP_VAL(6) |
   3490				 ANA_CPUQ_8021_CFG_CPUQ_BPDU_VAL(6),
   3491				 ANA_CPUQ_8021_CFG, i);
   3492
   3493	ret = ocelot_prepare_stats_regions(ocelot);
   3494	if (ret) {
   3495		destroy_workqueue(ocelot->stats_queue);
   3496		destroy_workqueue(ocelot->owq);
   3497		return ret;
   3498	}
   3499
   3500	INIT_DELAYED_WORK(&ocelot->stats_work, ocelot_check_stats_work);
   3501	queue_delayed_work(ocelot->stats_queue, &ocelot->stats_work,
   3502			   OCELOT_STATS_CHECK_DELAY);
   3503
   3504	return 0;
   3505}
   3506EXPORT_SYMBOL(ocelot_init);
   3507
   3508void ocelot_deinit(struct ocelot *ocelot)
   3509{
   3510	cancel_delayed_work(&ocelot->stats_work);
   3511	destroy_workqueue(ocelot->stats_queue);
   3512	destroy_workqueue(ocelot->owq);
   3513	mutex_destroy(&ocelot->stats_lock);
   3514}
   3515EXPORT_SYMBOL(ocelot_deinit);
   3516
   3517void ocelot_deinit_port(struct ocelot *ocelot, int port)
   3518{
   3519	struct ocelot_port *ocelot_port = ocelot->ports[port];
   3520
   3521	skb_queue_purge(&ocelot_port->tx_skbs);
   3522}
   3523EXPORT_SYMBOL(ocelot_deinit_port);
   3524
   3525MODULE_LICENSE("Dual MIT/GPL");