cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

spectrum.c (150951B)


      1// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
      2/* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */
      3
      4#include <linux/kernel.h>
      5#include <linux/module.h>
      6#include <linux/types.h>
      7#include <linux/pci.h>
      8#include <linux/netdevice.h>
      9#include <linux/etherdevice.h>
     10#include <linux/ethtool.h>
     11#include <linux/slab.h>
     12#include <linux/device.h>
     13#include <linux/skbuff.h>
     14#include <linux/if_vlan.h>
     15#include <linux/if_bridge.h>
     16#include <linux/workqueue.h>
     17#include <linux/jiffies.h>
     18#include <linux/bitops.h>
     19#include <linux/list.h>
     20#include <linux/notifier.h>
     21#include <linux/dcbnl.h>
     22#include <linux/inetdevice.h>
     23#include <linux/netlink.h>
     24#include <linux/jhash.h>
     25#include <linux/log2.h>
     26#include <linux/refcount.h>
     27#include <linux/rhashtable.h>
     28#include <net/switchdev.h>
     29#include <net/pkt_cls.h>
     30#include <net/netevent.h>
     31#include <net/addrconf.h>
     32
     33#include "spectrum.h"
     34#include "pci.h"
     35#include "core.h"
     36#include "core_env.h"
     37#include "reg.h"
     38#include "port.h"
     39#include "trap.h"
     40#include "txheader.h"
     41#include "spectrum_cnt.h"
     42#include "spectrum_dpipe.h"
     43#include "spectrum_acl_flex_actions.h"
     44#include "spectrum_span.h"
     45#include "spectrum_ptp.h"
     46#include "spectrum_trap.h"
     47
     48#define MLXSW_SP_FWREV_MINOR 2010
     49#define MLXSW_SP_FWREV_SUBMINOR 1006
     50
     51#define MLXSW_SP1_FWREV_MAJOR 13
     52#define MLXSW_SP1_FWREV_CAN_RESET_MINOR 1702
     53
     54static const struct mlxsw_fw_rev mlxsw_sp1_fw_rev = {
     55	.major = MLXSW_SP1_FWREV_MAJOR,
     56	.minor = MLXSW_SP_FWREV_MINOR,
     57	.subminor = MLXSW_SP_FWREV_SUBMINOR,
     58	.can_reset_minor = MLXSW_SP1_FWREV_CAN_RESET_MINOR,
     59};
     60
     61#define MLXSW_SP1_FW_FILENAME \
     62	"mellanox/mlxsw_spectrum-" __stringify(MLXSW_SP1_FWREV_MAJOR) \
     63	"." __stringify(MLXSW_SP_FWREV_MINOR) \
     64	"." __stringify(MLXSW_SP_FWREV_SUBMINOR) ".mfa2"
     65
     66#define MLXSW_SP2_FWREV_MAJOR 29
     67
     68static const struct mlxsw_fw_rev mlxsw_sp2_fw_rev = {
     69	.major = MLXSW_SP2_FWREV_MAJOR,
     70	.minor = MLXSW_SP_FWREV_MINOR,
     71	.subminor = MLXSW_SP_FWREV_SUBMINOR,
     72};
     73
     74#define MLXSW_SP2_FW_FILENAME \
     75	"mellanox/mlxsw_spectrum2-" __stringify(MLXSW_SP2_FWREV_MAJOR) \
     76	"." __stringify(MLXSW_SP_FWREV_MINOR) \
     77	"." __stringify(MLXSW_SP_FWREV_SUBMINOR) ".mfa2"
     78
     79#define MLXSW_SP3_FWREV_MAJOR 30
     80
     81static const struct mlxsw_fw_rev mlxsw_sp3_fw_rev = {
     82	.major = MLXSW_SP3_FWREV_MAJOR,
     83	.minor = MLXSW_SP_FWREV_MINOR,
     84	.subminor = MLXSW_SP_FWREV_SUBMINOR,
     85};
     86
     87#define MLXSW_SP3_FW_FILENAME \
     88	"mellanox/mlxsw_spectrum3-" __stringify(MLXSW_SP3_FWREV_MAJOR) \
     89	"." __stringify(MLXSW_SP_FWREV_MINOR) \
     90	"." __stringify(MLXSW_SP_FWREV_SUBMINOR) ".mfa2"
     91
     92#define MLXSW_SP_LINECARDS_INI_BUNDLE_FILENAME \
     93	"mellanox/lc_ini_bundle_" \
     94	__stringify(MLXSW_SP_FWREV_MINOR) "_" \
     95	__stringify(MLXSW_SP_FWREV_SUBMINOR) ".bin"
     96
     97static const char mlxsw_sp1_driver_name[] = "mlxsw_spectrum";
     98static const char mlxsw_sp2_driver_name[] = "mlxsw_spectrum2";
     99static const char mlxsw_sp3_driver_name[] = "mlxsw_spectrum3";
    100static const char mlxsw_sp4_driver_name[] = "mlxsw_spectrum4";
    101
    102static const unsigned char mlxsw_sp1_mac_mask[ETH_ALEN] = {
    103	0xff, 0xff, 0xff, 0xff, 0xfc, 0x00
    104};
    105static const unsigned char mlxsw_sp2_mac_mask[ETH_ALEN] = {
    106	0xff, 0xff, 0xff, 0xff, 0xf0, 0x00
    107};
    108
    109/* tx_hdr_version
    110 * Tx header version.
    111 * Must be set to 1.
    112 */
    113MLXSW_ITEM32(tx, hdr, version, 0x00, 28, 4);
    114
    115/* tx_hdr_ctl
    116 * Packet control type.
    117 * 0 - Ethernet control (e.g. EMADs, LACP)
    118 * 1 - Ethernet data
    119 */
    120MLXSW_ITEM32(tx, hdr, ctl, 0x00, 26, 2);
    121
    122/* tx_hdr_proto
    123 * Packet protocol type. Must be set to 1 (Ethernet).
    124 */
    125MLXSW_ITEM32(tx, hdr, proto, 0x00, 21, 3);
    126
    127/* tx_hdr_rx_is_router
    128 * Packet is sent from the router. Valid for data packets only.
    129 */
    130MLXSW_ITEM32(tx, hdr, rx_is_router, 0x00, 19, 1);
    131
    132/* tx_hdr_fid_valid
    133 * Indicates if the 'fid' field is valid and should be used for
    134 * forwarding lookup. Valid for data packets only.
    135 */
    136MLXSW_ITEM32(tx, hdr, fid_valid, 0x00, 16, 1);
    137
    138/* tx_hdr_swid
    139 * Switch partition ID. Must be set to 0.
    140 */
    141MLXSW_ITEM32(tx, hdr, swid, 0x00, 12, 3);
    142
    143/* tx_hdr_control_tclass
    144 * Indicates if the packet should use the control TClass and not one
    145 * of the data TClasses.
    146 */
    147MLXSW_ITEM32(tx, hdr, control_tclass, 0x00, 6, 1);
    148
    149/* tx_hdr_etclass
    150 * Egress TClass to be used on the egress device on the egress port.
    151 */
    152MLXSW_ITEM32(tx, hdr, etclass, 0x00, 0, 4);
    153
    154/* tx_hdr_port_mid
    155 * Destination local port for unicast packets.
    156 * Destination multicast ID for multicast packets.
    157 *
    158 * Control packets are directed to a specific egress port, while data
    159 * packets are transmitted through the CPU port (0) into the switch partition,
    160 * where forwarding rules are applied.
    161 */
    162MLXSW_ITEM32(tx, hdr, port_mid, 0x04, 16, 16);
    163
    164/* tx_hdr_fid
    165 * Forwarding ID used for L2 forwarding lookup. Valid only if 'fid_valid' is
    166 * set, otherwise calculated based on the packet's VID using VID to FID mapping.
    167 * Valid for data packets only.
    168 */
    169MLXSW_ITEM32(tx, hdr, fid, 0x08, 0, 16);
    170
    171/* tx_hdr_type
    172 * 0 - Data packets
    173 * 6 - Control packets
    174 */
    175MLXSW_ITEM32(tx, hdr, type, 0x0C, 0, 4);
    176
    177int mlxsw_sp_flow_counter_get(struct mlxsw_sp *mlxsw_sp,
    178			      unsigned int counter_index, u64 *packets,
    179			      u64 *bytes)
    180{
    181	char mgpc_pl[MLXSW_REG_MGPC_LEN];
    182	int err;
    183
    184	mlxsw_reg_mgpc_pack(mgpc_pl, counter_index, MLXSW_REG_MGPC_OPCODE_NOP,
    185			    MLXSW_REG_FLOW_COUNTER_SET_TYPE_PACKETS_BYTES);
    186	err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mgpc), mgpc_pl);
    187	if (err)
    188		return err;
    189	if (packets)
    190		*packets = mlxsw_reg_mgpc_packet_counter_get(mgpc_pl);
    191	if (bytes)
    192		*bytes = mlxsw_reg_mgpc_byte_counter_get(mgpc_pl);
    193	return 0;
    194}
    195
    196static int mlxsw_sp_flow_counter_clear(struct mlxsw_sp *mlxsw_sp,
    197				       unsigned int counter_index)
    198{
    199	char mgpc_pl[MLXSW_REG_MGPC_LEN];
    200
    201	mlxsw_reg_mgpc_pack(mgpc_pl, counter_index, MLXSW_REG_MGPC_OPCODE_CLEAR,
    202			    MLXSW_REG_FLOW_COUNTER_SET_TYPE_PACKETS_BYTES);
    203	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mgpc), mgpc_pl);
    204}
    205
    206int mlxsw_sp_flow_counter_alloc(struct mlxsw_sp *mlxsw_sp,
    207				unsigned int *p_counter_index)
    208{
    209	int err;
    210
    211	err = mlxsw_sp_counter_alloc(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW,
    212				     p_counter_index);
    213	if (err)
    214		return err;
    215	err = mlxsw_sp_flow_counter_clear(mlxsw_sp, *p_counter_index);
    216	if (err)
    217		goto err_counter_clear;
    218	return 0;
    219
    220err_counter_clear:
    221	mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW,
    222			      *p_counter_index);
    223	return err;
    224}
    225
    226void mlxsw_sp_flow_counter_free(struct mlxsw_sp *mlxsw_sp,
    227				unsigned int counter_index)
    228{
    229	 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW,
    230			       counter_index);
    231}
    232
    233static void mlxsw_sp_txhdr_construct(struct sk_buff *skb,
    234				     const struct mlxsw_tx_info *tx_info)
    235{
    236	char *txhdr = skb_push(skb, MLXSW_TXHDR_LEN);
    237
    238	memset(txhdr, 0, MLXSW_TXHDR_LEN);
    239
    240	mlxsw_tx_hdr_version_set(txhdr, MLXSW_TXHDR_VERSION_1);
    241	mlxsw_tx_hdr_ctl_set(txhdr, MLXSW_TXHDR_ETH_CTL);
    242	mlxsw_tx_hdr_proto_set(txhdr, MLXSW_TXHDR_PROTO_ETH);
    243	mlxsw_tx_hdr_swid_set(txhdr, 0);
    244	mlxsw_tx_hdr_control_tclass_set(txhdr, 1);
    245	mlxsw_tx_hdr_port_mid_set(txhdr, tx_info->local_port);
    246	mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_CONTROL);
    247}
    248
    249enum mlxsw_reg_spms_state mlxsw_sp_stp_spms_state(u8 state)
    250{
    251	switch (state) {
    252	case BR_STATE_FORWARDING:
    253		return MLXSW_REG_SPMS_STATE_FORWARDING;
    254	case BR_STATE_LEARNING:
    255		return MLXSW_REG_SPMS_STATE_LEARNING;
    256	case BR_STATE_LISTENING:
    257	case BR_STATE_DISABLED:
    258	case BR_STATE_BLOCKING:
    259		return MLXSW_REG_SPMS_STATE_DISCARDING;
    260	default:
    261		BUG();
    262	}
    263}
    264
    265int mlxsw_sp_port_vid_stp_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid,
    266			      u8 state)
    267{
    268	enum mlxsw_reg_spms_state spms_state = mlxsw_sp_stp_spms_state(state);
    269	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
    270	char *spms_pl;
    271	int err;
    272
    273	spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL);
    274	if (!spms_pl)
    275		return -ENOMEM;
    276	mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port);
    277	mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state);
    278
    279	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl);
    280	kfree(spms_pl);
    281	return err;
    282}
    283
    284static int mlxsw_sp_base_mac_get(struct mlxsw_sp *mlxsw_sp)
    285{
    286	char spad_pl[MLXSW_REG_SPAD_LEN] = {0};
    287	int err;
    288
    289	err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(spad), spad_pl);
    290	if (err)
    291		return err;
    292	mlxsw_reg_spad_base_mac_memcpy_from(spad_pl, mlxsw_sp->base_mac);
    293	return 0;
    294}
    295
    296int mlxsw_sp_port_admin_status_set(struct mlxsw_sp_port *mlxsw_sp_port,
    297				   bool is_up)
    298{
    299	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
    300	char paos_pl[MLXSW_REG_PAOS_LEN];
    301
    302	mlxsw_reg_paos_pack(paos_pl, mlxsw_sp_port->local_port,
    303			    is_up ? MLXSW_PORT_ADMIN_STATUS_UP :
    304			    MLXSW_PORT_ADMIN_STATUS_DOWN);
    305	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(paos), paos_pl);
    306}
    307
    308static int mlxsw_sp_port_dev_addr_set(struct mlxsw_sp_port *mlxsw_sp_port,
    309				      const unsigned char *addr)
    310{
    311	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
    312	char ppad_pl[MLXSW_REG_PPAD_LEN];
    313
    314	mlxsw_reg_ppad_pack(ppad_pl, true, mlxsw_sp_port->local_port);
    315	mlxsw_reg_ppad_mac_memcpy_to(ppad_pl, addr);
    316	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppad), ppad_pl);
    317}
    318
    319static int mlxsw_sp_port_dev_addr_init(struct mlxsw_sp_port *mlxsw_sp_port)
    320{
    321	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
    322
    323	eth_hw_addr_gen(mlxsw_sp_port->dev, mlxsw_sp->base_mac,
    324			mlxsw_sp_port->local_port);
    325	return mlxsw_sp_port_dev_addr_set(mlxsw_sp_port,
    326					  mlxsw_sp_port->dev->dev_addr);
    327}
    328
    329static int mlxsw_sp_port_max_mtu_get(struct mlxsw_sp_port *mlxsw_sp_port, int *p_max_mtu)
    330{
    331	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
    332	char pmtu_pl[MLXSW_REG_PMTU_LEN];
    333	int err;
    334
    335	mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, 0);
    336	err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl);
    337	if (err)
    338		return err;
    339
    340	*p_max_mtu = mlxsw_reg_pmtu_max_mtu_get(pmtu_pl);
    341	return 0;
    342}
    343
    344static int mlxsw_sp_port_mtu_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu)
    345{
    346	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
    347	char pmtu_pl[MLXSW_REG_PMTU_LEN];
    348
    349	mtu += MLXSW_TXHDR_LEN + ETH_HLEN;
    350	if (mtu > mlxsw_sp_port->max_mtu)
    351		return -EINVAL;
    352
    353	mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, mtu);
    354	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl);
    355}
    356
    357static int mlxsw_sp_port_swid_set(struct mlxsw_sp *mlxsw_sp,
    358				  u16 local_port, u8 swid)
    359{
    360	char pspa_pl[MLXSW_REG_PSPA_LEN];
    361
    362	mlxsw_reg_pspa_pack(pspa_pl, swid, local_port);
    363	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pspa), pspa_pl);
    364}
    365
    366int mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port *mlxsw_sp_port, bool enable)
    367{
    368	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
    369	char svpe_pl[MLXSW_REG_SVPE_LEN];
    370
    371	mlxsw_reg_svpe_pack(svpe_pl, mlxsw_sp_port->local_port, enable);
    372	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svpe), svpe_pl);
    373}
    374
    375int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid,
    376				   bool learn_enable)
    377{
    378	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
    379	char *spvmlr_pl;
    380	int err;
    381
    382	spvmlr_pl = kmalloc(MLXSW_REG_SPVMLR_LEN, GFP_KERNEL);
    383	if (!spvmlr_pl)
    384		return -ENOMEM;
    385	mlxsw_reg_spvmlr_pack(spvmlr_pl, mlxsw_sp_port->local_port, vid, vid,
    386			      learn_enable);
    387	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvmlr), spvmlr_pl);
    388	kfree(spvmlr_pl);
    389	return err;
    390}
    391
    392int mlxsw_sp_ethtype_to_sver_type(u16 ethtype, u8 *p_sver_type)
    393{
    394	switch (ethtype) {
    395	case ETH_P_8021Q:
    396		*p_sver_type = 0;
    397		break;
    398	case ETH_P_8021AD:
    399		*p_sver_type = 1;
    400		break;
    401	default:
    402		return -EINVAL;
    403	}
    404
    405	return 0;
    406}
    407
    408int mlxsw_sp_port_egress_ethtype_set(struct mlxsw_sp_port *mlxsw_sp_port,
    409				     u16 ethtype)
    410{
    411	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
    412	char spevet_pl[MLXSW_REG_SPEVET_LEN];
    413	u8 sver_type;
    414	int err;
    415
    416	err = mlxsw_sp_ethtype_to_sver_type(ethtype, &sver_type);
    417	if (err)
    418		return err;
    419
    420	mlxsw_reg_spevet_pack(spevet_pl, mlxsw_sp_port->local_port, sver_type);
    421	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spevet), spevet_pl);
    422}
    423
    424static int __mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port,
    425				    u16 vid, u16 ethtype)
    426{
    427	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
    428	char spvid_pl[MLXSW_REG_SPVID_LEN];
    429	u8 sver_type;
    430	int err;
    431
    432	err = mlxsw_sp_ethtype_to_sver_type(ethtype, &sver_type);
    433	if (err)
    434		return err;
    435
    436	mlxsw_reg_spvid_pack(spvid_pl, mlxsw_sp_port->local_port, vid,
    437			     sver_type);
    438
    439	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvid), spvid_pl);
    440}
    441
    442static int mlxsw_sp_port_allow_untagged_set(struct mlxsw_sp_port *mlxsw_sp_port,
    443					    bool allow)
    444{
    445	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
    446	char spaft_pl[MLXSW_REG_SPAFT_LEN];
    447
    448	mlxsw_reg_spaft_pack(spaft_pl, mlxsw_sp_port->local_port, allow);
    449	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spaft), spaft_pl);
    450}
    451
    452int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid,
    453			   u16 ethtype)
    454{
    455	int err;
    456
    457	if (!vid) {
    458		err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, false);
    459		if (err)
    460			return err;
    461	} else {
    462		err = __mlxsw_sp_port_pvid_set(mlxsw_sp_port, vid, ethtype);
    463		if (err)
    464			return err;
    465		err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, true);
    466		if (err)
    467			goto err_port_allow_untagged_set;
    468	}
    469
    470	mlxsw_sp_port->pvid = vid;
    471	return 0;
    472
    473err_port_allow_untagged_set:
    474	__mlxsw_sp_port_pvid_set(mlxsw_sp_port, mlxsw_sp_port->pvid, ethtype);
    475	return err;
    476}
    477
    478static int
    479mlxsw_sp_port_system_port_mapping_set(struct mlxsw_sp_port *mlxsw_sp_port)
    480{
    481	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
    482	char sspr_pl[MLXSW_REG_SSPR_LEN];
    483
    484	mlxsw_reg_sspr_pack(sspr_pl, mlxsw_sp_port->local_port);
    485	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sspr), sspr_pl);
    486}
    487
    488static int
    489mlxsw_sp_port_module_info_parse(struct mlxsw_sp *mlxsw_sp,
    490				u16 local_port, char *pmlp_pl,
    491				struct mlxsw_sp_port_mapping *port_mapping)
    492{
    493	bool separate_rxtx;
    494	u8 first_lane;
    495	u8 slot_index;
    496	u8 module;
    497	u8 width;
    498	int i;
    499
    500	module = mlxsw_reg_pmlp_module_get(pmlp_pl, 0);
    501	slot_index = mlxsw_reg_pmlp_slot_index_get(pmlp_pl, 0);
    502	width = mlxsw_reg_pmlp_width_get(pmlp_pl);
    503	separate_rxtx = mlxsw_reg_pmlp_rxtx_get(pmlp_pl);
    504	first_lane = mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, 0);
    505
    506	if (width && !is_power_of_2(width)) {
    507		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: width value is not power of 2\n",
    508			local_port);
    509		return -EINVAL;
    510	}
    511
    512	for (i = 0; i < width; i++) {
    513		if (mlxsw_reg_pmlp_module_get(pmlp_pl, i) != module) {
    514			dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: contains multiple modules\n",
    515				local_port);
    516			return -EINVAL;
    517		}
    518		if (mlxsw_reg_pmlp_slot_index_get(pmlp_pl, i) != slot_index) {
    519			dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: contains multiple slot indexes\n",
    520				local_port);
    521			return -EINVAL;
    522		}
    523		if (separate_rxtx &&
    524		    mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, i) !=
    525		    mlxsw_reg_pmlp_rx_lane_get(pmlp_pl, i)) {
    526			dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: TX and RX lane numbers are different\n",
    527				local_port);
    528			return -EINVAL;
    529		}
    530		if (mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, i) != i + first_lane) {
    531			dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: TX and RX lane numbers are not sequential\n",
    532				local_port);
    533			return -EINVAL;
    534		}
    535	}
    536
    537	port_mapping->module = module;
    538	port_mapping->slot_index = slot_index;
    539	port_mapping->width = width;
    540	port_mapping->module_width = width;
    541	port_mapping->lane = mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, 0);
    542	return 0;
    543}
    544
    545static int
    546mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp, u16 local_port,
    547			      struct mlxsw_sp_port_mapping *port_mapping)
    548{
    549	char pmlp_pl[MLXSW_REG_PMLP_LEN];
    550	int err;
    551
    552	mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
    553	err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
    554	if (err)
    555		return err;
    556	return mlxsw_sp_port_module_info_parse(mlxsw_sp, local_port,
    557					       pmlp_pl, port_mapping);
    558}
    559
    560static int
    561mlxsw_sp_port_module_map(struct mlxsw_sp *mlxsw_sp, u16 local_port,
    562			 const struct mlxsw_sp_port_mapping *port_mapping)
    563{
    564	char pmlp_pl[MLXSW_REG_PMLP_LEN];
    565	int i, err;
    566
    567	mlxsw_env_module_port_map(mlxsw_sp->core, port_mapping->slot_index,
    568				  port_mapping->module);
    569
    570	mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
    571	mlxsw_reg_pmlp_width_set(pmlp_pl, port_mapping->width);
    572	for (i = 0; i < port_mapping->width; i++) {
    573		mlxsw_reg_pmlp_slot_index_set(pmlp_pl, i,
    574					      port_mapping->slot_index);
    575		mlxsw_reg_pmlp_module_set(pmlp_pl, i, port_mapping->module);
    576		mlxsw_reg_pmlp_tx_lane_set(pmlp_pl, i, port_mapping->lane + i); /* Rx & Tx */
    577	}
    578
    579	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
    580	if (err)
    581		goto err_pmlp_write;
    582	return 0;
    583
    584err_pmlp_write:
    585	mlxsw_env_module_port_unmap(mlxsw_sp->core, port_mapping->slot_index,
    586				    port_mapping->module);
    587	return err;
    588}
    589
    590static void mlxsw_sp_port_module_unmap(struct mlxsw_sp *mlxsw_sp, u16 local_port,
    591				       u8 slot_index, u8 module)
    592{
    593	char pmlp_pl[MLXSW_REG_PMLP_LEN];
    594
    595	mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
    596	mlxsw_reg_pmlp_width_set(pmlp_pl, 0);
    597	mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
    598	mlxsw_env_module_port_unmap(mlxsw_sp->core, slot_index, module);
    599}
    600
    601static int mlxsw_sp_port_open(struct net_device *dev)
    602{
    603	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
    604	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
    605	int err;
    606
    607	err = mlxsw_env_module_port_up(mlxsw_sp->core,
    608				       mlxsw_sp_port->mapping.slot_index,
    609				       mlxsw_sp_port->mapping.module);
    610	if (err)
    611		return err;
    612	err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true);
    613	if (err)
    614		goto err_port_admin_status_set;
    615	netif_start_queue(dev);
    616	return 0;
    617
    618err_port_admin_status_set:
    619	mlxsw_env_module_port_down(mlxsw_sp->core,
    620				   mlxsw_sp_port->mapping.slot_index,
    621				   mlxsw_sp_port->mapping.module);
    622	return err;
    623}
    624
    625static int mlxsw_sp_port_stop(struct net_device *dev)
    626{
    627	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
    628	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
    629
    630	netif_stop_queue(dev);
    631	mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
    632	mlxsw_env_module_port_down(mlxsw_sp->core,
    633				   mlxsw_sp_port->mapping.slot_index,
    634				   mlxsw_sp_port->mapping.module);
    635	return 0;
    636}
    637
    638static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb,
    639				      struct net_device *dev)
    640{
    641	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
    642	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
    643	struct mlxsw_sp_port_pcpu_stats *pcpu_stats;
    644	const struct mlxsw_tx_info tx_info = {
    645		.local_port = mlxsw_sp_port->local_port,
    646		.is_emad = false,
    647	};
    648	u64 len;
    649	int err;
    650
    651	if (skb_cow_head(skb, MLXSW_TXHDR_LEN)) {
    652		this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
    653		dev_kfree_skb_any(skb);
    654		return NETDEV_TX_OK;
    655	}
    656
    657	memset(skb->cb, 0, sizeof(struct mlxsw_skb_cb));
    658
    659	if (mlxsw_core_skb_transmit_busy(mlxsw_sp->core, &tx_info))
    660		return NETDEV_TX_BUSY;
    661
    662	if (eth_skb_pad(skb)) {
    663		this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
    664		return NETDEV_TX_OK;
    665	}
    666
    667	mlxsw_sp_txhdr_construct(skb, &tx_info);
    668	/* TX header is consumed by HW on the way so we shouldn't count its
    669	 * bytes as being sent.
    670	 */
    671	len = skb->len - MLXSW_TXHDR_LEN;
    672
    673	/* Due to a race we might fail here because of a full queue. In that
    674	 * unlikely case we simply drop the packet.
    675	 */
    676	err = mlxsw_core_skb_transmit(mlxsw_sp->core, skb, &tx_info);
    677
    678	if (!err) {
    679		pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats);
    680		u64_stats_update_begin(&pcpu_stats->syncp);
    681		pcpu_stats->tx_packets++;
    682		pcpu_stats->tx_bytes += len;
    683		u64_stats_update_end(&pcpu_stats->syncp);
    684	} else {
    685		this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
    686		dev_kfree_skb_any(skb);
    687	}
    688	return NETDEV_TX_OK;
    689}
    690
    691static void mlxsw_sp_set_rx_mode(struct net_device *dev)
    692{
    693}
    694
    695static int mlxsw_sp_port_set_mac_address(struct net_device *dev, void *p)
    696{
    697	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
    698	struct sockaddr *addr = p;
    699	int err;
    700
    701	if (!is_valid_ether_addr(addr->sa_data))
    702		return -EADDRNOTAVAIL;
    703
    704	err = mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr->sa_data);
    705	if (err)
    706		return err;
    707	eth_hw_addr_set(dev, addr->sa_data);
    708	return 0;
    709}
    710
    711static int mlxsw_sp_port_change_mtu(struct net_device *dev, int mtu)
    712{
    713	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
    714	struct mlxsw_sp_hdroom orig_hdroom;
    715	struct mlxsw_sp_hdroom hdroom;
    716	int err;
    717
    718	orig_hdroom = *mlxsw_sp_port->hdroom;
    719
    720	hdroom = orig_hdroom;
    721	hdroom.mtu = mtu;
    722	mlxsw_sp_hdroom_bufs_reset_sizes(mlxsw_sp_port, &hdroom);
    723
    724	err = mlxsw_sp_hdroom_configure(mlxsw_sp_port, &hdroom);
    725	if (err) {
    726		netdev_err(dev, "Failed to configure port's headroom\n");
    727		return err;
    728	}
    729
    730	err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, mtu);
    731	if (err)
    732		goto err_port_mtu_set;
    733	dev->mtu = mtu;
    734	return 0;
    735
    736err_port_mtu_set:
    737	mlxsw_sp_hdroom_configure(mlxsw_sp_port, &orig_hdroom);
    738	return err;
    739}
    740
    741static int
    742mlxsw_sp_port_get_sw_stats64(const struct net_device *dev,
    743			     struct rtnl_link_stats64 *stats)
    744{
    745	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
    746	struct mlxsw_sp_port_pcpu_stats *p;
    747	u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
    748	u32 tx_dropped = 0;
    749	unsigned int start;
    750	int i;
    751
    752	for_each_possible_cpu(i) {
    753		p = per_cpu_ptr(mlxsw_sp_port->pcpu_stats, i);
    754		do {
    755			start = u64_stats_fetch_begin_irq(&p->syncp);
    756			rx_packets	= p->rx_packets;
    757			rx_bytes	= p->rx_bytes;
    758			tx_packets	= p->tx_packets;
    759			tx_bytes	= p->tx_bytes;
    760		} while (u64_stats_fetch_retry_irq(&p->syncp, start));
    761
    762		stats->rx_packets	+= rx_packets;
    763		stats->rx_bytes		+= rx_bytes;
    764		stats->tx_packets	+= tx_packets;
    765		stats->tx_bytes		+= tx_bytes;
    766		/* tx_dropped is u32, updated without syncp protection. */
    767		tx_dropped	+= p->tx_dropped;
    768	}
    769	stats->tx_dropped	= tx_dropped;
    770	return 0;
    771}
    772
    773static bool mlxsw_sp_port_has_offload_stats(const struct net_device *dev, int attr_id)
    774{
    775	switch (attr_id) {
    776	case IFLA_OFFLOAD_XSTATS_CPU_HIT:
    777		return true;
    778	}
    779
    780	return false;
    781}
    782
    783static int mlxsw_sp_port_get_offload_stats(int attr_id, const struct net_device *dev,
    784					   void *sp)
    785{
    786	switch (attr_id) {
    787	case IFLA_OFFLOAD_XSTATS_CPU_HIT:
    788		return mlxsw_sp_port_get_sw_stats64(dev, sp);
    789	}
    790
    791	return -EINVAL;
    792}
    793
    794int mlxsw_sp_port_get_stats_raw(struct net_device *dev, int grp,
    795				int prio, char *ppcnt_pl)
    796{
    797	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
    798	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
    799
    800	mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sp_port->local_port, grp, prio);
    801	return mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ppcnt), ppcnt_pl);
    802}
    803
    804static int mlxsw_sp_port_get_hw_stats(struct net_device *dev,
    805				      struct rtnl_link_stats64 *stats)
    806{
    807	char ppcnt_pl[MLXSW_REG_PPCNT_LEN];
    808	int err;
    809
    810	err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_IEEE_8023_CNT,
    811					  0, ppcnt_pl);
    812	if (err)
    813		goto out;
    814
    815	stats->tx_packets =
    816		mlxsw_reg_ppcnt_a_frames_transmitted_ok_get(ppcnt_pl);
    817	stats->rx_packets =
    818		mlxsw_reg_ppcnt_a_frames_received_ok_get(ppcnt_pl);
    819	stats->tx_bytes =
    820		mlxsw_reg_ppcnt_a_octets_transmitted_ok_get(ppcnt_pl);
    821	stats->rx_bytes =
    822		mlxsw_reg_ppcnt_a_octets_received_ok_get(ppcnt_pl);
    823	stats->multicast =
    824		mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get(ppcnt_pl);
    825
    826	stats->rx_crc_errors =
    827		mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get(ppcnt_pl);
    828	stats->rx_frame_errors =
    829		mlxsw_reg_ppcnt_a_alignment_errors_get(ppcnt_pl);
    830
    831	stats->rx_length_errors = (
    832		mlxsw_reg_ppcnt_a_in_range_length_errors_get(ppcnt_pl) +
    833		mlxsw_reg_ppcnt_a_out_of_range_length_field_get(ppcnt_pl) +
    834		mlxsw_reg_ppcnt_a_frame_too_long_errors_get(ppcnt_pl));
    835
    836	stats->rx_errors = (stats->rx_crc_errors +
    837		stats->rx_frame_errors + stats->rx_length_errors);
    838
    839out:
    840	return err;
    841}
    842
    843static void
    844mlxsw_sp_port_get_hw_xstats(struct net_device *dev,
    845			    struct mlxsw_sp_port_xstats *xstats)
    846{
    847	char ppcnt_pl[MLXSW_REG_PPCNT_LEN];
    848	int err, i;
    849
    850	err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_EXT_CNT, 0,
    851					  ppcnt_pl);
    852	if (!err)
    853		xstats->ecn = mlxsw_reg_ppcnt_ecn_marked_get(ppcnt_pl);
    854
    855	for (i = 0; i < TC_MAX_QUEUE; i++) {
    856		err = mlxsw_sp_port_get_stats_raw(dev,
    857						  MLXSW_REG_PPCNT_TC_CONG_CNT,
    858						  i, ppcnt_pl);
    859		if (err)
    860			goto tc_cnt;
    861
    862		xstats->wred_drop[i] =
    863			mlxsw_reg_ppcnt_wred_discard_get(ppcnt_pl);
    864		xstats->tc_ecn[i] = mlxsw_reg_ppcnt_ecn_marked_tc_get(ppcnt_pl);
    865
    866tc_cnt:
    867		err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_TC_CNT,
    868						  i, ppcnt_pl);
    869		if (err)
    870			continue;
    871
    872		xstats->backlog[i] =
    873			mlxsw_reg_ppcnt_tc_transmit_queue_get(ppcnt_pl);
    874		xstats->tail_drop[i] =
    875			mlxsw_reg_ppcnt_tc_no_buffer_discard_uc_get(ppcnt_pl);
    876	}
    877
    878	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
    879		err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_PRIO_CNT,
    880						  i, ppcnt_pl);
    881		if (err)
    882			continue;
    883
    884		xstats->tx_packets[i] = mlxsw_reg_ppcnt_tx_frames_get(ppcnt_pl);
    885		xstats->tx_bytes[i] = mlxsw_reg_ppcnt_tx_octets_get(ppcnt_pl);
    886	}
    887}
    888
    889static void update_stats_cache(struct work_struct *work)
    890{
    891	struct mlxsw_sp_port *mlxsw_sp_port =
    892		container_of(work, struct mlxsw_sp_port,
    893			     periodic_hw_stats.update_dw.work);
    894
    895	if (!netif_carrier_ok(mlxsw_sp_port->dev))
    896		/* Note: mlxsw_sp_port_down_wipe_counters() clears the cache as
    897		 * necessary when port goes down.
    898		 */
    899		goto out;
    900
    901	mlxsw_sp_port_get_hw_stats(mlxsw_sp_port->dev,
    902				   &mlxsw_sp_port->periodic_hw_stats.stats);
    903	mlxsw_sp_port_get_hw_xstats(mlxsw_sp_port->dev,
    904				    &mlxsw_sp_port->periodic_hw_stats.xstats);
    905
    906out:
    907	mlxsw_core_schedule_dw(&mlxsw_sp_port->periodic_hw_stats.update_dw,
    908			       MLXSW_HW_STATS_UPDATE_TIME);
    909}
    910
    911/* Return the stats from a cache that is updated periodically,
    912 * as this function might get called in an atomic context.
    913 */
    914static void
    915mlxsw_sp_port_get_stats64(struct net_device *dev,
    916			  struct rtnl_link_stats64 *stats)
    917{
    918	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
    919
    920	memcpy(stats, &mlxsw_sp_port->periodic_hw_stats.stats, sizeof(*stats));
    921}
    922
    923static int __mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port,
    924				    u16 vid_begin, u16 vid_end,
    925				    bool is_member, bool untagged)
    926{
    927	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
    928	char *spvm_pl;
    929	int err;
    930
    931	spvm_pl = kmalloc(MLXSW_REG_SPVM_LEN, GFP_KERNEL);
    932	if (!spvm_pl)
    933		return -ENOMEM;
    934
    935	mlxsw_reg_spvm_pack(spvm_pl, mlxsw_sp_port->local_port,	vid_begin,
    936			    vid_end, is_member, untagged);
    937	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvm), spvm_pl);
    938	kfree(spvm_pl);
    939	return err;
    940}
    941
    942int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin,
    943			   u16 vid_end, bool is_member, bool untagged)
    944{
    945	u16 vid, vid_e;
    946	int err;
    947
    948	for (vid = vid_begin; vid <= vid_end;
    949	     vid += MLXSW_REG_SPVM_REC_MAX_COUNT) {
    950		vid_e = min((u16) (vid + MLXSW_REG_SPVM_REC_MAX_COUNT - 1),
    951			    vid_end);
    952
    953		err = __mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid_e,
    954					       is_member, untagged);
    955		if (err)
    956			return err;
    957	}
    958
    959	return 0;
    960}
    961
    962static void mlxsw_sp_port_vlan_flush(struct mlxsw_sp_port *mlxsw_sp_port,
    963				     bool flush_default)
    964{
    965	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan, *tmp;
    966
    967	list_for_each_entry_safe(mlxsw_sp_port_vlan, tmp,
    968				 &mlxsw_sp_port->vlans_list, list) {
    969		if (!flush_default &&
    970		    mlxsw_sp_port_vlan->vid == MLXSW_SP_DEFAULT_VID)
    971			continue;
    972		mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan);
    973	}
    974}
    975
    976static void
    977mlxsw_sp_port_vlan_cleanup(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
    978{
    979	if (mlxsw_sp_port_vlan->bridge_port)
    980		mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan);
    981	else if (mlxsw_sp_port_vlan->fid)
    982		mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan);
    983}
    984
    985struct mlxsw_sp_port_vlan *
    986mlxsw_sp_port_vlan_create(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
    987{
    988	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
    989	bool untagged = vid == MLXSW_SP_DEFAULT_VID;
    990	int err;
    991
    992	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
    993	if (mlxsw_sp_port_vlan)
    994		return ERR_PTR(-EEXIST);
    995
    996	err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, true, untagged);
    997	if (err)
    998		return ERR_PTR(err);
    999
   1000	mlxsw_sp_port_vlan = kzalloc(sizeof(*mlxsw_sp_port_vlan), GFP_KERNEL);
   1001	if (!mlxsw_sp_port_vlan) {
   1002		err = -ENOMEM;
   1003		goto err_port_vlan_alloc;
   1004	}
   1005
   1006	mlxsw_sp_port_vlan->mlxsw_sp_port = mlxsw_sp_port;
   1007	mlxsw_sp_port_vlan->vid = vid;
   1008	list_add(&mlxsw_sp_port_vlan->list, &mlxsw_sp_port->vlans_list);
   1009
   1010	return mlxsw_sp_port_vlan;
   1011
   1012err_port_vlan_alloc:
   1013	mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false);
   1014	return ERR_PTR(err);
   1015}
   1016
   1017void mlxsw_sp_port_vlan_destroy(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
   1018{
   1019	struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
   1020	u16 vid = mlxsw_sp_port_vlan->vid;
   1021
   1022	mlxsw_sp_port_vlan_cleanup(mlxsw_sp_port_vlan);
   1023	list_del(&mlxsw_sp_port_vlan->list);
   1024	kfree(mlxsw_sp_port_vlan);
   1025	mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false);
   1026}
   1027
   1028static int mlxsw_sp_port_add_vid(struct net_device *dev,
   1029				 __be16 __always_unused proto, u16 vid)
   1030{
   1031	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
   1032
   1033	/* VLAN 0 is added to HW filter when device goes up, but it is
   1034	 * reserved in our case, so simply return.
   1035	 */
   1036	if (!vid)
   1037		return 0;
   1038
   1039	return PTR_ERR_OR_ZERO(mlxsw_sp_port_vlan_create(mlxsw_sp_port, vid));
   1040}
   1041
   1042static int mlxsw_sp_port_kill_vid(struct net_device *dev,
   1043				  __be16 __always_unused proto, u16 vid)
   1044{
   1045	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
   1046	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
   1047
   1048	/* VLAN 0 is removed from HW filter when device goes down, but
   1049	 * it is reserved in our case, so simply return.
   1050	 */
   1051	if (!vid)
   1052		return 0;
   1053
   1054	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
   1055	if (!mlxsw_sp_port_vlan)
   1056		return 0;
   1057	mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan);
   1058
   1059	return 0;
   1060}
   1061
   1062static int mlxsw_sp_setup_tc_block(struct mlxsw_sp_port *mlxsw_sp_port,
   1063				   struct flow_block_offload *f)
   1064{
   1065	switch (f->binder_type) {
   1066	case FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS:
   1067		return mlxsw_sp_setup_tc_block_clsact(mlxsw_sp_port, f, true);
   1068	case FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS:
   1069		return mlxsw_sp_setup_tc_block_clsact(mlxsw_sp_port, f, false);
   1070	case FLOW_BLOCK_BINDER_TYPE_RED_EARLY_DROP:
   1071		return mlxsw_sp_setup_tc_block_qevent_early_drop(mlxsw_sp_port, f);
   1072	case FLOW_BLOCK_BINDER_TYPE_RED_MARK:
   1073		return mlxsw_sp_setup_tc_block_qevent_mark(mlxsw_sp_port, f);
   1074	default:
   1075		return -EOPNOTSUPP;
   1076	}
   1077}
   1078
   1079static int mlxsw_sp_setup_tc(struct net_device *dev, enum tc_setup_type type,
   1080			     void *type_data)
   1081{
   1082	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
   1083
   1084	switch (type) {
   1085	case TC_SETUP_BLOCK:
   1086		return mlxsw_sp_setup_tc_block(mlxsw_sp_port, type_data);
   1087	case TC_SETUP_QDISC_RED:
   1088		return mlxsw_sp_setup_tc_red(mlxsw_sp_port, type_data);
   1089	case TC_SETUP_QDISC_PRIO:
   1090		return mlxsw_sp_setup_tc_prio(mlxsw_sp_port, type_data);
   1091	case TC_SETUP_QDISC_ETS:
   1092		return mlxsw_sp_setup_tc_ets(mlxsw_sp_port, type_data);
   1093	case TC_SETUP_QDISC_TBF:
   1094		return mlxsw_sp_setup_tc_tbf(mlxsw_sp_port, type_data);
   1095	case TC_SETUP_QDISC_FIFO:
   1096		return mlxsw_sp_setup_tc_fifo(mlxsw_sp_port, type_data);
   1097	default:
   1098		return -EOPNOTSUPP;
   1099	}
   1100}
   1101
   1102static int mlxsw_sp_feature_hw_tc(struct net_device *dev, bool enable)
   1103{
   1104	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
   1105
   1106	if (!enable) {
   1107		if (mlxsw_sp_flow_block_rule_count(mlxsw_sp_port->ing_flow_block) ||
   1108		    mlxsw_sp_flow_block_rule_count(mlxsw_sp_port->eg_flow_block)) {
   1109			netdev_err(dev, "Active offloaded tc filters, can't turn hw_tc_offload off\n");
   1110			return -EINVAL;
   1111		}
   1112		mlxsw_sp_flow_block_disable_inc(mlxsw_sp_port->ing_flow_block);
   1113		mlxsw_sp_flow_block_disable_inc(mlxsw_sp_port->eg_flow_block);
   1114	} else {
   1115		mlxsw_sp_flow_block_disable_dec(mlxsw_sp_port->ing_flow_block);
   1116		mlxsw_sp_flow_block_disable_dec(mlxsw_sp_port->eg_flow_block);
   1117	}
   1118	return 0;
   1119}
   1120
   1121static int mlxsw_sp_feature_loopback(struct net_device *dev, bool enable)
   1122{
   1123	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
   1124	char pplr_pl[MLXSW_REG_PPLR_LEN];
   1125	int err;
   1126
   1127	if (netif_running(dev))
   1128		mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
   1129
   1130	mlxsw_reg_pplr_pack(pplr_pl, mlxsw_sp_port->local_port, enable);
   1131	err = mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pplr),
   1132			      pplr_pl);
   1133
   1134	if (netif_running(dev))
   1135		mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true);
   1136
   1137	return err;
   1138}
   1139
   1140typedef int (*mlxsw_sp_feature_handler)(struct net_device *dev, bool enable);
   1141
   1142static int mlxsw_sp_handle_feature(struct net_device *dev,
   1143				   netdev_features_t wanted_features,
   1144				   netdev_features_t feature,
   1145				   mlxsw_sp_feature_handler feature_handler)
   1146{
   1147	netdev_features_t changes = wanted_features ^ dev->features;
   1148	bool enable = !!(wanted_features & feature);
   1149	int err;
   1150
   1151	if (!(changes & feature))
   1152		return 0;
   1153
   1154	err = feature_handler(dev, enable);
   1155	if (err) {
   1156		netdev_err(dev, "%s feature %pNF failed, err %d\n",
   1157			   enable ? "Enable" : "Disable", &feature, err);
   1158		return err;
   1159	}
   1160
   1161	if (enable)
   1162		dev->features |= feature;
   1163	else
   1164		dev->features &= ~feature;
   1165
   1166	return 0;
   1167}
   1168static int mlxsw_sp_set_features(struct net_device *dev,
   1169				 netdev_features_t features)
   1170{
   1171	netdev_features_t oper_features = dev->features;
   1172	int err = 0;
   1173
   1174	err |= mlxsw_sp_handle_feature(dev, features, NETIF_F_HW_TC,
   1175				       mlxsw_sp_feature_hw_tc);
   1176	err |= mlxsw_sp_handle_feature(dev, features, NETIF_F_LOOPBACK,
   1177				       mlxsw_sp_feature_loopback);
   1178
   1179	if (err) {
   1180		dev->features = oper_features;
   1181		return -EINVAL;
   1182	}
   1183
   1184	return 0;
   1185}
   1186
   1187static struct devlink_port *
   1188mlxsw_sp_port_get_devlink_port(struct net_device *dev)
   1189{
   1190	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
   1191	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
   1192
   1193	return mlxsw_core_port_devlink_port_get(mlxsw_sp->core,
   1194						mlxsw_sp_port->local_port);
   1195}
   1196
   1197static int mlxsw_sp_port_hwtstamp_set(struct mlxsw_sp_port *mlxsw_sp_port,
   1198				      struct ifreq *ifr)
   1199{
   1200	struct hwtstamp_config config;
   1201	int err;
   1202
   1203	if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
   1204		return -EFAULT;
   1205
   1206	err = mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_set(mlxsw_sp_port,
   1207							     &config);
   1208	if (err)
   1209		return err;
   1210
   1211	if (copy_to_user(ifr->ifr_data, &config, sizeof(config)))
   1212		return -EFAULT;
   1213
   1214	return 0;
   1215}
   1216
   1217static int mlxsw_sp_port_hwtstamp_get(struct mlxsw_sp_port *mlxsw_sp_port,
   1218				      struct ifreq *ifr)
   1219{
   1220	struct hwtstamp_config config;
   1221	int err;
   1222
   1223	err = mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_get(mlxsw_sp_port,
   1224							     &config);
   1225	if (err)
   1226		return err;
   1227
   1228	if (copy_to_user(ifr->ifr_data, &config, sizeof(config)))
   1229		return -EFAULT;
   1230
   1231	return 0;
   1232}
   1233
   1234static inline void mlxsw_sp_port_ptp_clear(struct mlxsw_sp_port *mlxsw_sp_port)
   1235{
   1236	struct hwtstamp_config config = {0};
   1237
   1238	mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_set(mlxsw_sp_port, &config);
   1239}
   1240
   1241static int
   1242mlxsw_sp_port_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
   1243{
   1244	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
   1245
   1246	switch (cmd) {
   1247	case SIOCSHWTSTAMP:
   1248		return mlxsw_sp_port_hwtstamp_set(mlxsw_sp_port, ifr);
   1249	case SIOCGHWTSTAMP:
   1250		return mlxsw_sp_port_hwtstamp_get(mlxsw_sp_port, ifr);
   1251	default:
   1252		return -EOPNOTSUPP;
   1253	}
   1254}
   1255
   1256static const struct net_device_ops mlxsw_sp_port_netdev_ops = {
   1257	.ndo_open		= mlxsw_sp_port_open,
   1258	.ndo_stop		= mlxsw_sp_port_stop,
   1259	.ndo_start_xmit		= mlxsw_sp_port_xmit,
   1260	.ndo_setup_tc           = mlxsw_sp_setup_tc,
   1261	.ndo_set_rx_mode	= mlxsw_sp_set_rx_mode,
   1262	.ndo_set_mac_address	= mlxsw_sp_port_set_mac_address,
   1263	.ndo_change_mtu		= mlxsw_sp_port_change_mtu,
   1264	.ndo_get_stats64	= mlxsw_sp_port_get_stats64,
   1265	.ndo_has_offload_stats	= mlxsw_sp_port_has_offload_stats,
   1266	.ndo_get_offload_stats	= mlxsw_sp_port_get_offload_stats,
   1267	.ndo_vlan_rx_add_vid	= mlxsw_sp_port_add_vid,
   1268	.ndo_vlan_rx_kill_vid	= mlxsw_sp_port_kill_vid,
   1269	.ndo_set_features	= mlxsw_sp_set_features,
   1270	.ndo_get_devlink_port	= mlxsw_sp_port_get_devlink_port,
   1271	.ndo_eth_ioctl		= mlxsw_sp_port_ioctl,
   1272};
   1273
   1274static int
   1275mlxsw_sp_port_speed_by_width_set(struct mlxsw_sp_port *mlxsw_sp_port)
   1276{
   1277	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
   1278	u32 eth_proto_cap, eth_proto_admin, eth_proto_oper;
   1279	const struct mlxsw_sp_port_type_speed_ops *ops;
   1280	char ptys_pl[MLXSW_REG_PTYS_LEN];
   1281	u32 eth_proto_cap_masked;
   1282	int err;
   1283
   1284	ops = mlxsw_sp->port_type_speed_ops;
   1285
   1286	/* Set advertised speeds to speeds supported by both the driver
   1287	 * and the device.
   1288	 */
   1289	ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port,
   1290			       0, false);
   1291	err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
   1292	if (err)
   1293		return err;
   1294
   1295	ops->reg_ptys_eth_unpack(mlxsw_sp, ptys_pl, &eth_proto_cap,
   1296				 &eth_proto_admin, &eth_proto_oper);
   1297	eth_proto_cap_masked = ops->ptys_proto_cap_masked_get(eth_proto_cap);
   1298	ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port,
   1299			       eth_proto_cap_masked,
   1300			       mlxsw_sp_port->link.autoneg);
   1301	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
   1302}
   1303
   1304int mlxsw_sp_port_speed_get(struct mlxsw_sp_port *mlxsw_sp_port, u32 *speed)
   1305{
   1306	const struct mlxsw_sp_port_type_speed_ops *port_type_speed_ops;
   1307	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
   1308	char ptys_pl[MLXSW_REG_PTYS_LEN];
   1309	u32 eth_proto_oper;
   1310	int err;
   1311
   1312	port_type_speed_ops = mlxsw_sp->port_type_speed_ops;
   1313	port_type_speed_ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl,
   1314					       mlxsw_sp_port->local_port, 0,
   1315					       false);
   1316	err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
   1317	if (err)
   1318		return err;
   1319	port_type_speed_ops->reg_ptys_eth_unpack(mlxsw_sp, ptys_pl, NULL, NULL,
   1320						 &eth_proto_oper);
   1321	*speed = port_type_speed_ops->from_ptys_speed(mlxsw_sp, eth_proto_oper);
   1322	return 0;
   1323}
   1324
   1325int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port,
   1326			  enum mlxsw_reg_qeec_hr hr, u8 index, u8 next_index,
   1327			  bool dwrr, u8 dwrr_weight)
   1328{
   1329	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
   1330	char qeec_pl[MLXSW_REG_QEEC_LEN];
   1331
   1332	mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index,
   1333			    next_index);
   1334	mlxsw_reg_qeec_de_set(qeec_pl, true);
   1335	mlxsw_reg_qeec_dwrr_set(qeec_pl, dwrr);
   1336	mlxsw_reg_qeec_dwrr_weight_set(qeec_pl, dwrr_weight);
   1337	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl);
   1338}
   1339
   1340int mlxsw_sp_port_ets_maxrate_set(struct mlxsw_sp_port *mlxsw_sp_port,
   1341				  enum mlxsw_reg_qeec_hr hr, u8 index,
   1342				  u8 next_index, u32 maxrate, u8 burst_size)
   1343{
   1344	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
   1345	char qeec_pl[MLXSW_REG_QEEC_LEN];
   1346
   1347	mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index,
   1348			    next_index);
   1349	mlxsw_reg_qeec_mase_set(qeec_pl, true);
   1350	mlxsw_reg_qeec_max_shaper_rate_set(qeec_pl, maxrate);
   1351	mlxsw_reg_qeec_max_shaper_bs_set(qeec_pl, burst_size);
   1352	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl);
   1353}
   1354
   1355static int mlxsw_sp_port_min_bw_set(struct mlxsw_sp_port *mlxsw_sp_port,
   1356				    enum mlxsw_reg_qeec_hr hr, u8 index,
   1357				    u8 next_index, u32 minrate)
   1358{
   1359	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
   1360	char qeec_pl[MLXSW_REG_QEEC_LEN];
   1361
   1362	mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index,
   1363			    next_index);
   1364	mlxsw_reg_qeec_mise_set(qeec_pl, true);
   1365	mlxsw_reg_qeec_min_shaper_rate_set(qeec_pl, minrate);
   1366
   1367	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl);
   1368}
   1369
   1370int mlxsw_sp_port_prio_tc_set(struct mlxsw_sp_port *mlxsw_sp_port,
   1371			      u8 switch_prio, u8 tclass)
   1372{
   1373	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
   1374	char qtct_pl[MLXSW_REG_QTCT_LEN];
   1375
   1376	mlxsw_reg_qtct_pack(qtct_pl, mlxsw_sp_port->local_port, switch_prio,
   1377			    tclass);
   1378	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qtct), qtct_pl);
   1379}
   1380
   1381static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port)
   1382{
   1383	int err, i;
   1384
   1385	/* Setup the elements hierarcy, so that each TC is linked to
   1386	 * one subgroup, which are all member in the same group.
   1387	 */
   1388	err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
   1389				    MLXSW_REG_QEEC_HR_GROUP, 0, 0, false, 0);
   1390	if (err)
   1391		return err;
   1392	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
   1393		err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
   1394					    MLXSW_REG_QEEC_HR_SUBGROUP, i,
   1395					    0, false, 0);
   1396		if (err)
   1397			return err;
   1398	}
   1399	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
   1400		err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
   1401					    MLXSW_REG_QEEC_HR_TC, i, i,
   1402					    false, 0);
   1403		if (err)
   1404			return err;
   1405
   1406		err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
   1407					    MLXSW_REG_QEEC_HR_TC,
   1408					    i + 8, i,
   1409					    true, 100);
   1410		if (err)
   1411			return err;
   1412	}
   1413
   1414	/* Make sure the max shaper is disabled in all hierarchies that support
   1415	 * it. Note that this disables ptps (PTP shaper), but that is intended
   1416	 * for the initial configuration.
   1417	 */
   1418	err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
   1419					    MLXSW_REG_QEEC_HR_PORT, 0, 0,
   1420					    MLXSW_REG_QEEC_MAS_DIS, 0);
   1421	if (err)
   1422		return err;
   1423	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
   1424		err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
   1425						    MLXSW_REG_QEEC_HR_SUBGROUP,
   1426						    i, 0,
   1427						    MLXSW_REG_QEEC_MAS_DIS, 0);
   1428		if (err)
   1429			return err;
   1430	}
   1431	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
   1432		err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
   1433						    MLXSW_REG_QEEC_HR_TC,
   1434						    i, i,
   1435						    MLXSW_REG_QEEC_MAS_DIS, 0);
   1436		if (err)
   1437			return err;
   1438
   1439		err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
   1440						    MLXSW_REG_QEEC_HR_TC,
   1441						    i + 8, i,
   1442						    MLXSW_REG_QEEC_MAS_DIS, 0);
   1443		if (err)
   1444			return err;
   1445	}
   1446
   1447	/* Configure the min shaper for multicast TCs. */
   1448	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
   1449		err = mlxsw_sp_port_min_bw_set(mlxsw_sp_port,
   1450					       MLXSW_REG_QEEC_HR_TC,
   1451					       i + 8, i,
   1452					       MLXSW_REG_QEEC_MIS_MIN);
   1453		if (err)
   1454			return err;
   1455	}
   1456
   1457	/* Map all priorities to traffic class 0. */
   1458	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
   1459		err = mlxsw_sp_port_prio_tc_set(mlxsw_sp_port, i, 0);
   1460		if (err)
   1461			return err;
   1462	}
   1463
   1464	return 0;
   1465}
   1466
   1467static int mlxsw_sp_port_tc_mc_mode_set(struct mlxsw_sp_port *mlxsw_sp_port,
   1468					bool enable)
   1469{
   1470	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
   1471	char qtctm_pl[MLXSW_REG_QTCTM_LEN];
   1472
   1473	mlxsw_reg_qtctm_pack(qtctm_pl, mlxsw_sp_port->local_port, enable);
   1474	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qtctm), qtctm_pl);
   1475}
   1476
   1477static int mlxsw_sp_port_overheat_init_val_set(struct mlxsw_sp_port *mlxsw_sp_port)
   1478{
   1479	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
   1480	u8 slot_index = mlxsw_sp_port->mapping.slot_index;
   1481	u8 module = mlxsw_sp_port->mapping.module;
   1482	u64 overheat_counter;
   1483	int err;
   1484
   1485	err = mlxsw_env_module_overheat_counter_get(mlxsw_sp->core, slot_index,
   1486						    module, &overheat_counter);
   1487	if (err)
   1488		return err;
   1489
   1490	mlxsw_sp_port->module_overheat_initial_val = overheat_counter;
   1491	return 0;
   1492}
   1493
   1494int
   1495mlxsw_sp_port_vlan_classification_set(struct mlxsw_sp_port *mlxsw_sp_port,
   1496				      bool is_8021ad_tagged,
   1497				      bool is_8021q_tagged)
   1498{
   1499	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
   1500	char spvc_pl[MLXSW_REG_SPVC_LEN];
   1501
   1502	mlxsw_reg_spvc_pack(spvc_pl, mlxsw_sp_port->local_port,
   1503			    is_8021ad_tagged, is_8021q_tagged);
   1504	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvc), spvc_pl);
   1505}
   1506
   1507static int mlxsw_sp_port_label_info_get(struct mlxsw_sp *mlxsw_sp,
   1508					u16 local_port, u8 *port_number,
   1509					u8 *split_port_subnumber,
   1510					u8 *slot_index)
   1511{
   1512	char pllp_pl[MLXSW_REG_PLLP_LEN];
   1513	int err;
   1514
   1515	mlxsw_reg_pllp_pack(pllp_pl, local_port);
   1516	err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pllp), pllp_pl);
   1517	if (err)
   1518		return err;
   1519	mlxsw_reg_pllp_unpack(pllp_pl, port_number,
   1520			      split_port_subnumber, slot_index);
   1521	return 0;
   1522}
   1523
   1524static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u16 local_port,
   1525				bool split,
   1526				struct mlxsw_sp_port_mapping *port_mapping)
   1527{
   1528	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
   1529	struct mlxsw_sp_port *mlxsw_sp_port;
   1530	u32 lanes = port_mapping->width;
   1531	u8 split_port_subnumber;
   1532	struct net_device *dev;
   1533	u8 port_number;
   1534	u8 slot_index;
   1535	bool splittable;
   1536	int err;
   1537
   1538	err = mlxsw_sp_port_module_map(mlxsw_sp, local_port, port_mapping);
   1539	if (err) {
   1540		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to map module\n",
   1541			local_port);
   1542		return err;
   1543	}
   1544
   1545	err = mlxsw_sp_port_swid_set(mlxsw_sp, local_port, 0);
   1546	if (err) {
   1547		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set SWID\n",
   1548			local_port);
   1549		goto err_port_swid_set;
   1550	}
   1551
   1552	err = mlxsw_sp_port_label_info_get(mlxsw_sp, local_port, &port_number,
   1553					   &split_port_subnumber, &slot_index);
   1554	if (err) {
   1555		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to get port label information\n",
   1556			local_port);
   1557		goto err_port_label_info_get;
   1558	}
   1559
   1560	splittable = lanes > 1 && !split;
   1561	err = mlxsw_core_port_init(mlxsw_sp->core, local_port, slot_index,
   1562				   port_number, split, split_port_subnumber,
   1563				   splittable, lanes, mlxsw_sp->base_mac,
   1564				   sizeof(mlxsw_sp->base_mac));
   1565	if (err) {
   1566		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to init core port\n",
   1567			local_port);
   1568		goto err_core_port_init;
   1569	}
   1570
   1571	dev = alloc_etherdev(sizeof(struct mlxsw_sp_port));
   1572	if (!dev) {
   1573		err = -ENOMEM;
   1574		goto err_alloc_etherdev;
   1575	}
   1576	SET_NETDEV_DEV(dev, mlxsw_sp->bus_info->dev);
   1577	dev_net_set(dev, mlxsw_sp_net(mlxsw_sp));
   1578	mlxsw_sp_port = netdev_priv(dev);
   1579	mlxsw_sp_port->dev = dev;
   1580	mlxsw_sp_port->mlxsw_sp = mlxsw_sp;
   1581	mlxsw_sp_port->local_port = local_port;
   1582	mlxsw_sp_port->pvid = MLXSW_SP_DEFAULT_VID;
   1583	mlxsw_sp_port->split = split;
   1584	mlxsw_sp_port->mapping = *port_mapping;
   1585	mlxsw_sp_port->link.autoneg = 1;
   1586	INIT_LIST_HEAD(&mlxsw_sp_port->vlans_list);
   1587
   1588	mlxsw_sp_port->pcpu_stats =
   1589		netdev_alloc_pcpu_stats(struct mlxsw_sp_port_pcpu_stats);
   1590	if (!mlxsw_sp_port->pcpu_stats) {
   1591		err = -ENOMEM;
   1592		goto err_alloc_stats;
   1593	}
   1594
   1595	INIT_DELAYED_WORK(&mlxsw_sp_port->periodic_hw_stats.update_dw,
   1596			  &update_stats_cache);
   1597
   1598	dev->netdev_ops = &mlxsw_sp_port_netdev_ops;
   1599	dev->ethtool_ops = &mlxsw_sp_port_ethtool_ops;
   1600
   1601	err = mlxsw_sp_port_dev_addr_init(mlxsw_sp_port);
   1602	if (err) {
   1603		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unable to init port mac address\n",
   1604			mlxsw_sp_port->local_port);
   1605		goto err_dev_addr_init;
   1606	}
   1607
   1608	netif_carrier_off(dev);
   1609
   1610	dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_LLTX | NETIF_F_SG |
   1611			 NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_TC;
   1612	dev->hw_features |= NETIF_F_HW_TC | NETIF_F_LOOPBACK;
   1613
   1614	dev->min_mtu = 0;
   1615	dev->max_mtu = ETH_MAX_MTU;
   1616
   1617	/* Each packet needs to have a Tx header (metadata) on top all other
   1618	 * headers.
   1619	 */
   1620	dev->needed_headroom = MLXSW_TXHDR_LEN;
   1621
   1622	err = mlxsw_sp_port_system_port_mapping_set(mlxsw_sp_port);
   1623	if (err) {
   1624		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set system port mapping\n",
   1625			mlxsw_sp_port->local_port);
   1626		goto err_port_system_port_mapping_set;
   1627	}
   1628
   1629	err = mlxsw_sp_port_speed_by_width_set(mlxsw_sp_port);
   1630	if (err) {
   1631		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to enable speeds\n",
   1632			mlxsw_sp_port->local_port);
   1633		goto err_port_speed_by_width_set;
   1634	}
   1635
   1636	err = mlxsw_sp->port_type_speed_ops->ptys_max_speed(mlxsw_sp_port,
   1637							    &mlxsw_sp_port->max_speed);
   1638	if (err) {
   1639		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to get maximum speed\n",
   1640			mlxsw_sp_port->local_port);
   1641		goto err_max_speed_get;
   1642	}
   1643
   1644	err = mlxsw_sp_port_max_mtu_get(mlxsw_sp_port, &mlxsw_sp_port->max_mtu);
   1645	if (err) {
   1646		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to get maximum MTU\n",
   1647			mlxsw_sp_port->local_port);
   1648		goto err_port_max_mtu_get;
   1649	}
   1650
   1651	err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, ETH_DATA_LEN);
   1652	if (err) {
   1653		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set MTU\n",
   1654			mlxsw_sp_port->local_port);
   1655		goto err_port_mtu_set;
   1656	}
   1657
   1658	err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
   1659	if (err)
   1660		goto err_port_admin_status_set;
   1661
   1662	err = mlxsw_sp_port_buffers_init(mlxsw_sp_port);
   1663	if (err) {
   1664		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize buffers\n",
   1665			mlxsw_sp_port->local_port);
   1666		goto err_port_buffers_init;
   1667	}
   1668
   1669	err = mlxsw_sp_port_ets_init(mlxsw_sp_port);
   1670	if (err) {
   1671		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize ETS\n",
   1672			mlxsw_sp_port->local_port);
   1673		goto err_port_ets_init;
   1674	}
   1675
   1676	err = mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, true);
   1677	if (err) {
   1678		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize TC MC mode\n",
   1679			mlxsw_sp_port->local_port);
   1680		goto err_port_tc_mc_mode;
   1681	}
   1682
   1683	/* ETS and buffers must be initialized before DCB. */
   1684	err = mlxsw_sp_port_dcb_init(mlxsw_sp_port);
   1685	if (err) {
   1686		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize DCB\n",
   1687			mlxsw_sp_port->local_port);
   1688		goto err_port_dcb_init;
   1689	}
   1690
   1691	err = mlxsw_sp_port_fids_init(mlxsw_sp_port);
   1692	if (err) {
   1693		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize FIDs\n",
   1694			mlxsw_sp_port->local_port);
   1695		goto err_port_fids_init;
   1696	}
   1697
   1698	err = mlxsw_sp_tc_qdisc_init(mlxsw_sp_port);
   1699	if (err) {
   1700		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize TC qdiscs\n",
   1701			mlxsw_sp_port->local_port);
   1702		goto err_port_qdiscs_init;
   1703	}
   1704
   1705	err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, 0, VLAN_N_VID - 1, false,
   1706				     false);
   1707	if (err) {
   1708		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to clear VLAN filter\n",
   1709			mlxsw_sp_port->local_port);
   1710		goto err_port_vlan_clear;
   1711	}
   1712
   1713	err = mlxsw_sp_port_nve_init(mlxsw_sp_port);
   1714	if (err) {
   1715		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize NVE\n",
   1716			mlxsw_sp_port->local_port);
   1717		goto err_port_nve_init;
   1718	}
   1719
   1720	err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, MLXSW_SP_DEFAULT_VID,
   1721				     ETH_P_8021Q);
   1722	if (err) {
   1723		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set PVID\n",
   1724			mlxsw_sp_port->local_port);
   1725		goto err_port_pvid_set;
   1726	}
   1727
   1728	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_create(mlxsw_sp_port,
   1729						       MLXSW_SP_DEFAULT_VID);
   1730	if (IS_ERR(mlxsw_sp_port_vlan)) {
   1731		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to create VID 1\n",
   1732			mlxsw_sp_port->local_port);
   1733		err = PTR_ERR(mlxsw_sp_port_vlan);
   1734		goto err_port_vlan_create;
   1735	}
   1736	mlxsw_sp_port->default_vlan = mlxsw_sp_port_vlan;
   1737
   1738	/* Set SPVC.et0=true and SPVC.et1=false to make the local port to treat
   1739	 * only packets with 802.1q header as tagged packets.
   1740	 */
   1741	err = mlxsw_sp_port_vlan_classification_set(mlxsw_sp_port, false, true);
   1742	if (err) {
   1743		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set default VLAN classification\n",
   1744			local_port);
   1745		goto err_port_vlan_classification_set;
   1746	}
   1747
   1748	INIT_DELAYED_WORK(&mlxsw_sp_port->ptp.shaper_dw,
   1749			  mlxsw_sp->ptp_ops->shaper_work);
   1750
   1751	mlxsw_sp->ports[local_port] = mlxsw_sp_port;
   1752
   1753	err = mlxsw_sp_port_overheat_init_val_set(mlxsw_sp_port);
   1754	if (err) {
   1755		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set overheat initial value\n",
   1756			mlxsw_sp_port->local_port);
   1757		goto err_port_overheat_init_val_set;
   1758	}
   1759
   1760	err = register_netdev(dev);
   1761	if (err) {
   1762		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to register netdev\n",
   1763			mlxsw_sp_port->local_port);
   1764		goto err_register_netdev;
   1765	}
   1766
   1767	mlxsw_core_port_eth_set(mlxsw_sp->core, mlxsw_sp_port->local_port,
   1768				mlxsw_sp_port, dev);
   1769	mlxsw_core_schedule_dw(&mlxsw_sp_port->periodic_hw_stats.update_dw, 0);
   1770	return 0;
   1771
   1772err_register_netdev:
   1773err_port_overheat_init_val_set:
   1774	mlxsw_sp_port_vlan_classification_set(mlxsw_sp_port, true, true);
   1775err_port_vlan_classification_set:
   1776	mlxsw_sp->ports[local_port] = NULL;
   1777	mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan);
   1778err_port_vlan_create:
   1779err_port_pvid_set:
   1780	mlxsw_sp_port_nve_fini(mlxsw_sp_port);
   1781err_port_nve_init:
   1782err_port_vlan_clear:
   1783	mlxsw_sp_tc_qdisc_fini(mlxsw_sp_port);
   1784err_port_qdiscs_init:
   1785	mlxsw_sp_port_fids_fini(mlxsw_sp_port);
   1786err_port_fids_init:
   1787	mlxsw_sp_port_dcb_fini(mlxsw_sp_port);
   1788err_port_dcb_init:
   1789	mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, false);
   1790err_port_tc_mc_mode:
   1791err_port_ets_init:
   1792	mlxsw_sp_port_buffers_fini(mlxsw_sp_port);
   1793err_port_buffers_init:
   1794err_port_admin_status_set:
   1795err_port_mtu_set:
   1796err_port_max_mtu_get:
   1797err_max_speed_get:
   1798err_port_speed_by_width_set:
   1799err_port_system_port_mapping_set:
   1800err_dev_addr_init:
   1801	free_percpu(mlxsw_sp_port->pcpu_stats);
   1802err_alloc_stats:
   1803	free_netdev(dev);
   1804err_alloc_etherdev:
   1805	mlxsw_core_port_fini(mlxsw_sp->core, local_port);
   1806err_core_port_init:
   1807err_port_label_info_get:
   1808	mlxsw_sp_port_swid_set(mlxsw_sp, local_port,
   1809			       MLXSW_PORT_SWID_DISABLED_PORT);
   1810err_port_swid_set:
   1811	mlxsw_sp_port_module_unmap(mlxsw_sp, local_port,
   1812				   port_mapping->slot_index,
   1813				   port_mapping->module);
   1814	return err;
   1815}
   1816
   1817static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u16 local_port)
   1818{
   1819	struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
   1820	u8 slot_index = mlxsw_sp_port->mapping.slot_index;
   1821	u8 module = mlxsw_sp_port->mapping.module;
   1822
   1823	cancel_delayed_work_sync(&mlxsw_sp_port->periodic_hw_stats.update_dw);
   1824	cancel_delayed_work_sync(&mlxsw_sp_port->ptp.shaper_dw);
   1825	mlxsw_sp_port_ptp_clear(mlxsw_sp_port);
   1826	mlxsw_core_port_clear(mlxsw_sp->core, local_port, mlxsw_sp);
   1827	unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */
   1828	mlxsw_sp_port_vlan_classification_set(mlxsw_sp_port, true, true);
   1829	mlxsw_sp->ports[local_port] = NULL;
   1830	mlxsw_sp_port_vlan_flush(mlxsw_sp_port, true);
   1831	mlxsw_sp_port_nve_fini(mlxsw_sp_port);
   1832	mlxsw_sp_tc_qdisc_fini(mlxsw_sp_port);
   1833	mlxsw_sp_port_fids_fini(mlxsw_sp_port);
   1834	mlxsw_sp_port_dcb_fini(mlxsw_sp_port);
   1835	mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, false);
   1836	mlxsw_sp_port_buffers_fini(mlxsw_sp_port);
   1837	free_percpu(mlxsw_sp_port->pcpu_stats);
   1838	WARN_ON_ONCE(!list_empty(&mlxsw_sp_port->vlans_list));
   1839	free_netdev(mlxsw_sp_port->dev);
   1840	mlxsw_core_port_fini(mlxsw_sp->core, local_port);
   1841	mlxsw_sp_port_swid_set(mlxsw_sp, local_port,
   1842			       MLXSW_PORT_SWID_DISABLED_PORT);
   1843	mlxsw_sp_port_module_unmap(mlxsw_sp, local_port, slot_index, module);
   1844}
   1845
   1846static int mlxsw_sp_cpu_port_create(struct mlxsw_sp *mlxsw_sp)
   1847{
   1848	struct mlxsw_sp_port *mlxsw_sp_port;
   1849	int err;
   1850
   1851	mlxsw_sp_port = kzalloc(sizeof(*mlxsw_sp_port), GFP_KERNEL);
   1852	if (!mlxsw_sp_port)
   1853		return -ENOMEM;
   1854
   1855	mlxsw_sp_port->mlxsw_sp = mlxsw_sp;
   1856	mlxsw_sp_port->local_port = MLXSW_PORT_CPU_PORT;
   1857
   1858	err = mlxsw_core_cpu_port_init(mlxsw_sp->core,
   1859				       mlxsw_sp_port,
   1860				       mlxsw_sp->base_mac,
   1861				       sizeof(mlxsw_sp->base_mac));
   1862	if (err) {
   1863		dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize core CPU port\n");
   1864		goto err_core_cpu_port_init;
   1865	}
   1866
   1867	mlxsw_sp->ports[MLXSW_PORT_CPU_PORT] = mlxsw_sp_port;
   1868	return 0;
   1869
   1870err_core_cpu_port_init:
   1871	kfree(mlxsw_sp_port);
   1872	return err;
   1873}
   1874
   1875static void mlxsw_sp_cpu_port_remove(struct mlxsw_sp *mlxsw_sp)
   1876{
   1877	struct mlxsw_sp_port *mlxsw_sp_port =
   1878				mlxsw_sp->ports[MLXSW_PORT_CPU_PORT];
   1879
   1880	mlxsw_core_cpu_port_fini(mlxsw_sp->core);
   1881	mlxsw_sp->ports[MLXSW_PORT_CPU_PORT] = NULL;
   1882	kfree(mlxsw_sp_port);
   1883}
   1884
   1885static bool mlxsw_sp_local_port_valid(u16 local_port)
   1886{
   1887	return local_port != MLXSW_PORT_CPU_PORT;
   1888}
   1889
   1890static bool mlxsw_sp_port_created(struct mlxsw_sp *mlxsw_sp, u16 local_port)
   1891{
   1892	if (!mlxsw_sp_local_port_valid(local_port))
   1893		return false;
   1894	return mlxsw_sp->ports[local_port] != NULL;
   1895}
   1896
   1897static int mlxsw_sp_port_mapping_event_set(struct mlxsw_sp *mlxsw_sp,
   1898					   u16 local_port, bool enable)
   1899{
   1900	char pmecr_pl[MLXSW_REG_PMECR_LEN];
   1901
   1902	mlxsw_reg_pmecr_pack(pmecr_pl, local_port,
   1903			     enable ? MLXSW_REG_PMECR_E_GENERATE_EVENT :
   1904				      MLXSW_REG_PMECR_E_DO_NOT_GENERATE_EVENT);
   1905	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmecr), pmecr_pl);
   1906}
   1907
   1908struct mlxsw_sp_port_mapping_event {
   1909	struct list_head list;
   1910	char pmlp_pl[MLXSW_REG_PMLP_LEN];
   1911};
   1912
   1913static void mlxsw_sp_port_mapping_events_work(struct work_struct *work)
   1914{
   1915	struct mlxsw_sp_port_mapping_event *event, *next_event;
   1916	struct mlxsw_sp_port_mapping_events *events;
   1917	struct mlxsw_sp_port_mapping port_mapping;
   1918	struct mlxsw_sp *mlxsw_sp;
   1919	struct devlink *devlink;
   1920	LIST_HEAD(event_queue);
   1921	u16 local_port;
   1922	int err;
   1923
   1924	events = container_of(work, struct mlxsw_sp_port_mapping_events, work);
   1925	mlxsw_sp = container_of(events, struct mlxsw_sp, port_mapping_events);
   1926	devlink = priv_to_devlink(mlxsw_sp->core);
   1927
   1928	spin_lock_bh(&events->queue_lock);
   1929	list_splice_init(&events->queue, &event_queue);
   1930	spin_unlock_bh(&events->queue_lock);
   1931
   1932	list_for_each_entry_safe(event, next_event, &event_queue, list) {
   1933		local_port = mlxsw_reg_pmlp_local_port_get(event->pmlp_pl);
   1934		err = mlxsw_sp_port_module_info_parse(mlxsw_sp, local_port,
   1935						      event->pmlp_pl, &port_mapping);
   1936		if (err)
   1937			goto out;
   1938
   1939		if (WARN_ON_ONCE(!port_mapping.width))
   1940			goto out;
   1941
   1942		devl_lock(devlink);
   1943
   1944		if (!mlxsw_sp_port_created(mlxsw_sp, local_port))
   1945			mlxsw_sp_port_create(mlxsw_sp, local_port,
   1946					     false, &port_mapping);
   1947		else
   1948			WARN_ON_ONCE(1);
   1949
   1950		devl_unlock(devlink);
   1951
   1952		mlxsw_sp->port_mapping[local_port] = port_mapping;
   1953
   1954out:
   1955		kfree(event);
   1956	}
   1957}
   1958
   1959static void
   1960mlxsw_sp_port_mapping_listener_func(const struct mlxsw_reg_info *reg,
   1961				    char *pmlp_pl, void *priv)
   1962{
   1963	struct mlxsw_sp_port_mapping_events *events;
   1964	struct mlxsw_sp_port_mapping_event *event;
   1965	struct mlxsw_sp *mlxsw_sp = priv;
   1966	u16 local_port;
   1967
   1968	local_port = mlxsw_reg_pmlp_local_port_get(pmlp_pl);
   1969	if (WARN_ON_ONCE(!mlxsw_sp_local_port_is_valid(mlxsw_sp, local_port)))
   1970		return;
   1971
   1972	events = &mlxsw_sp->port_mapping_events;
   1973	event = kmalloc(sizeof(*event), GFP_ATOMIC);
   1974	if (!event)
   1975		return;
   1976	memcpy(event->pmlp_pl, pmlp_pl, sizeof(event->pmlp_pl));
   1977	spin_lock(&events->queue_lock);
   1978	list_add_tail(&event->list, &events->queue);
   1979	spin_unlock(&events->queue_lock);
   1980	mlxsw_core_schedule_work(&events->work);
   1981}
   1982
   1983static void
   1984__mlxsw_sp_port_mapping_events_cancel(struct mlxsw_sp *mlxsw_sp)
   1985{
   1986	struct mlxsw_sp_port_mapping_event *event, *next_event;
   1987	struct mlxsw_sp_port_mapping_events *events;
   1988
   1989	events = &mlxsw_sp->port_mapping_events;
   1990
   1991	/* Caller needs to make sure that no new event is going to appear. */
   1992	cancel_work_sync(&events->work);
   1993	list_for_each_entry_safe(event, next_event, &events->queue, list) {
   1994		list_del(&event->list);
   1995		kfree(event);
   1996	}
   1997}
   1998
   1999static void mlxsw_sp_ports_remove(struct mlxsw_sp *mlxsw_sp)
   2000{
   2001	unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core);
   2002	struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
   2003	int i;
   2004
   2005	for (i = 1; i < max_ports; i++)
   2006		mlxsw_sp_port_mapping_event_set(mlxsw_sp, i, false);
   2007	/* Make sure all scheduled events are processed */
   2008	__mlxsw_sp_port_mapping_events_cancel(mlxsw_sp);
   2009
   2010	devl_lock(devlink);
   2011	for (i = 1; i < max_ports; i++)
   2012		if (mlxsw_sp_port_created(mlxsw_sp, i))
   2013			mlxsw_sp_port_remove(mlxsw_sp, i);
   2014	mlxsw_sp_cpu_port_remove(mlxsw_sp);
   2015	devl_unlock(devlink);
   2016	kfree(mlxsw_sp->ports);
   2017	mlxsw_sp->ports = NULL;
   2018}
   2019
   2020static void
   2021mlxsw_sp_ports_remove_selected(struct mlxsw_core *mlxsw_core,
   2022			       bool (*selector)(void *priv, u16 local_port),
   2023			       void *priv)
   2024{
   2025	struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
   2026	unsigned int max_ports = mlxsw_core_max_ports(mlxsw_core);
   2027	int i;
   2028
   2029	for (i = 1; i < max_ports; i++)
   2030		if (mlxsw_sp_port_created(mlxsw_sp, i) && selector(priv, i))
   2031			mlxsw_sp_port_remove(mlxsw_sp, i);
   2032}
   2033
   2034static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp)
   2035{
   2036	unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core);
   2037	struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
   2038	struct mlxsw_sp_port_mapping_events *events;
   2039	struct mlxsw_sp_port_mapping *port_mapping;
   2040	size_t alloc_size;
   2041	int i;
   2042	int err;
   2043
   2044	alloc_size = sizeof(struct mlxsw_sp_port *) * max_ports;
   2045	mlxsw_sp->ports = kzalloc(alloc_size, GFP_KERNEL);
   2046	if (!mlxsw_sp->ports)
   2047		return -ENOMEM;
   2048
   2049	events = &mlxsw_sp->port_mapping_events;
   2050	INIT_LIST_HEAD(&events->queue);
   2051	spin_lock_init(&events->queue_lock);
   2052	INIT_WORK(&events->work, mlxsw_sp_port_mapping_events_work);
   2053
   2054	for (i = 1; i < max_ports; i++) {
   2055		err = mlxsw_sp_port_mapping_event_set(mlxsw_sp, i, true);
   2056		if (err)
   2057			goto err_event_enable;
   2058	}
   2059
   2060	devl_lock(devlink);
   2061	err = mlxsw_sp_cpu_port_create(mlxsw_sp);
   2062	if (err)
   2063		goto err_cpu_port_create;
   2064
   2065	for (i = 1; i < max_ports; i++) {
   2066		port_mapping = &mlxsw_sp->port_mapping[i];
   2067		if (!port_mapping->width)
   2068			continue;
   2069		err = mlxsw_sp_port_create(mlxsw_sp, i, false, port_mapping);
   2070		if (err)
   2071			goto err_port_create;
   2072	}
   2073	devl_unlock(devlink);
   2074	return 0;
   2075
   2076err_port_create:
   2077	for (i--; i >= 1; i--)
   2078		if (mlxsw_sp_port_created(mlxsw_sp, i))
   2079			mlxsw_sp_port_remove(mlxsw_sp, i);
   2080	i = max_ports;
   2081	mlxsw_sp_cpu_port_remove(mlxsw_sp);
   2082err_cpu_port_create:
   2083	devl_unlock(devlink);
   2084err_event_enable:
   2085	for (i--; i >= 1; i--)
   2086		mlxsw_sp_port_mapping_event_set(mlxsw_sp, i, false);
   2087	/* Make sure all scheduled events are processed */
   2088	__mlxsw_sp_port_mapping_events_cancel(mlxsw_sp);
   2089	kfree(mlxsw_sp->ports);
   2090	mlxsw_sp->ports = NULL;
   2091	return err;
   2092}
   2093
   2094static int mlxsw_sp_port_module_info_init(struct mlxsw_sp *mlxsw_sp)
   2095{
   2096	unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core);
   2097	struct mlxsw_sp_port_mapping *port_mapping;
   2098	int i;
   2099	int err;
   2100
   2101	mlxsw_sp->port_mapping = kcalloc(max_ports,
   2102					 sizeof(struct mlxsw_sp_port_mapping),
   2103					 GFP_KERNEL);
   2104	if (!mlxsw_sp->port_mapping)
   2105		return -ENOMEM;
   2106
   2107	for (i = 1; i < max_ports; i++) {
   2108		if (mlxsw_core_port_is_xm(mlxsw_sp->core, i))
   2109			continue;
   2110
   2111		port_mapping = &mlxsw_sp->port_mapping[i];
   2112		err = mlxsw_sp_port_module_info_get(mlxsw_sp, i, port_mapping);
   2113		if (err)
   2114			goto err_port_module_info_get;
   2115	}
   2116	return 0;
   2117
   2118err_port_module_info_get:
   2119	kfree(mlxsw_sp->port_mapping);
   2120	return err;
   2121}
   2122
   2123static void mlxsw_sp_port_module_info_fini(struct mlxsw_sp *mlxsw_sp)
   2124{
   2125	kfree(mlxsw_sp->port_mapping);
   2126}
   2127
   2128static int
   2129mlxsw_sp_port_split_create(struct mlxsw_sp *mlxsw_sp,
   2130			   struct mlxsw_sp_port_mapping *port_mapping,
   2131			   unsigned int count, const char *pmtdb_pl)
   2132{
   2133	struct mlxsw_sp_port_mapping split_port_mapping;
   2134	int err, i;
   2135
   2136	split_port_mapping = *port_mapping;
   2137	split_port_mapping.width /= count;
   2138	for (i = 0; i < count; i++) {
   2139		u16 s_local_port = mlxsw_reg_pmtdb_port_num_get(pmtdb_pl, i);
   2140
   2141		if (!mlxsw_sp_local_port_valid(s_local_port))
   2142			continue;
   2143
   2144		err = mlxsw_sp_port_create(mlxsw_sp, s_local_port,
   2145					   true, &split_port_mapping);
   2146		if (err)
   2147			goto err_port_create;
   2148		split_port_mapping.lane += split_port_mapping.width;
   2149	}
   2150
   2151	return 0;
   2152
   2153err_port_create:
   2154	for (i--; i >= 0; i--) {
   2155		u16 s_local_port = mlxsw_reg_pmtdb_port_num_get(pmtdb_pl, i);
   2156
   2157		if (mlxsw_sp_port_created(mlxsw_sp, s_local_port))
   2158			mlxsw_sp_port_remove(mlxsw_sp, s_local_port);
   2159	}
   2160	return err;
   2161}
   2162
   2163static void mlxsw_sp_port_unsplit_create(struct mlxsw_sp *mlxsw_sp,
   2164					 unsigned int count,
   2165					 const char *pmtdb_pl)
   2166{
   2167	struct mlxsw_sp_port_mapping *port_mapping;
   2168	int i;
   2169
   2170	/* Go over original unsplit ports in the gap and recreate them. */
   2171	for (i = 0; i < count; i++) {
   2172		u16 local_port = mlxsw_reg_pmtdb_port_num_get(pmtdb_pl, i);
   2173
   2174		port_mapping = &mlxsw_sp->port_mapping[local_port];
   2175		if (!port_mapping->width || !mlxsw_sp_local_port_valid(local_port))
   2176			continue;
   2177		mlxsw_sp_port_create(mlxsw_sp, local_port,
   2178				     false, port_mapping);
   2179	}
   2180}
   2181
   2182static struct mlxsw_sp_port *
   2183mlxsw_sp_port_get_by_local_port(struct mlxsw_sp *mlxsw_sp, u16 local_port)
   2184{
   2185	if (mlxsw_sp->ports && mlxsw_sp->ports[local_port])
   2186		return mlxsw_sp->ports[local_port];
   2187	return NULL;
   2188}
   2189
   2190static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u16 local_port,
   2191			       unsigned int count,
   2192			       struct netlink_ext_ack *extack)
   2193{
   2194	struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
   2195	struct mlxsw_sp_port_mapping port_mapping;
   2196	struct mlxsw_sp_port *mlxsw_sp_port;
   2197	enum mlxsw_reg_pmtdb_status status;
   2198	char pmtdb_pl[MLXSW_REG_PMTDB_LEN];
   2199	int i;
   2200	int err;
   2201
   2202	mlxsw_sp_port = mlxsw_sp_port_get_by_local_port(mlxsw_sp, local_port);
   2203	if (!mlxsw_sp_port) {
   2204		dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n",
   2205			local_port);
   2206		NL_SET_ERR_MSG_MOD(extack, "Port number does not exist");
   2207		return -EINVAL;
   2208	}
   2209
   2210	if (mlxsw_sp_port->split) {
   2211		NL_SET_ERR_MSG_MOD(extack, "Port is already split");
   2212		return -EINVAL;
   2213	}
   2214
   2215	mlxsw_reg_pmtdb_pack(pmtdb_pl, mlxsw_sp_port->mapping.slot_index,
   2216			     mlxsw_sp_port->mapping.module,
   2217			     mlxsw_sp_port->mapping.module_width / count,
   2218			     count);
   2219	err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(pmtdb), pmtdb_pl);
   2220	if (err) {
   2221		NL_SET_ERR_MSG_MOD(extack, "Failed to query split info");
   2222		return err;
   2223	}
   2224
   2225	status = mlxsw_reg_pmtdb_status_get(pmtdb_pl);
   2226	if (status != MLXSW_REG_PMTDB_STATUS_SUCCESS) {
   2227		NL_SET_ERR_MSG_MOD(extack, "Unsupported split configuration");
   2228		return -EINVAL;
   2229	}
   2230
   2231	port_mapping = mlxsw_sp_port->mapping;
   2232
   2233	for (i = 0; i < count; i++) {
   2234		u16 s_local_port = mlxsw_reg_pmtdb_port_num_get(pmtdb_pl, i);
   2235
   2236		if (mlxsw_sp_port_created(mlxsw_sp, s_local_port))
   2237			mlxsw_sp_port_remove(mlxsw_sp, s_local_port);
   2238	}
   2239
   2240	err = mlxsw_sp_port_split_create(mlxsw_sp, &port_mapping,
   2241					 count, pmtdb_pl);
   2242	if (err) {
   2243		dev_err(mlxsw_sp->bus_info->dev, "Failed to create split ports\n");
   2244		goto err_port_split_create;
   2245	}
   2246
   2247	return 0;
   2248
   2249err_port_split_create:
   2250	mlxsw_sp_port_unsplit_create(mlxsw_sp, count, pmtdb_pl);
   2251
   2252	return err;
   2253}
   2254
   2255static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u16 local_port,
   2256				 struct netlink_ext_ack *extack)
   2257{
   2258	struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
   2259	struct mlxsw_sp_port *mlxsw_sp_port;
   2260	char pmtdb_pl[MLXSW_REG_PMTDB_LEN];
   2261	unsigned int count;
   2262	int i;
   2263	int err;
   2264
   2265	mlxsw_sp_port = mlxsw_sp_port_get_by_local_port(mlxsw_sp, local_port);
   2266	if (!mlxsw_sp_port) {
   2267		dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n",
   2268			local_port);
   2269		NL_SET_ERR_MSG_MOD(extack, "Port number does not exist");
   2270		return -EINVAL;
   2271	}
   2272
   2273	if (!mlxsw_sp_port->split) {
   2274		NL_SET_ERR_MSG_MOD(extack, "Port was not split");
   2275		return -EINVAL;
   2276	}
   2277
   2278	count = mlxsw_sp_port->mapping.module_width /
   2279		mlxsw_sp_port->mapping.width;
   2280
   2281	mlxsw_reg_pmtdb_pack(pmtdb_pl, mlxsw_sp_port->mapping.slot_index,
   2282			     mlxsw_sp_port->mapping.module,
   2283			     mlxsw_sp_port->mapping.module_width / count,
   2284			     count);
   2285	err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(pmtdb), pmtdb_pl);
   2286	if (err) {
   2287		NL_SET_ERR_MSG_MOD(extack, "Failed to query split info");
   2288		return err;
   2289	}
   2290
   2291	for (i = 0; i < count; i++) {
   2292		u16 s_local_port = mlxsw_reg_pmtdb_port_num_get(pmtdb_pl, i);
   2293
   2294		if (mlxsw_sp_port_created(mlxsw_sp, s_local_port))
   2295			mlxsw_sp_port_remove(mlxsw_sp, s_local_port);
   2296	}
   2297
   2298	mlxsw_sp_port_unsplit_create(mlxsw_sp, count, pmtdb_pl);
   2299
   2300	return 0;
   2301}
   2302
   2303static void
   2304mlxsw_sp_port_down_wipe_counters(struct mlxsw_sp_port *mlxsw_sp_port)
   2305{
   2306	int i;
   2307
   2308	for (i = 0; i < TC_MAX_QUEUE; i++)
   2309		mlxsw_sp_port->periodic_hw_stats.xstats.backlog[i] = 0;
   2310}
   2311
   2312static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info *reg,
   2313				     char *pude_pl, void *priv)
   2314{
   2315	struct mlxsw_sp *mlxsw_sp = priv;
   2316	struct mlxsw_sp_port *mlxsw_sp_port;
   2317	enum mlxsw_reg_pude_oper_status status;
   2318	u16 local_port;
   2319
   2320	local_port = mlxsw_reg_pude_local_port_get(pude_pl);
   2321
   2322	if (WARN_ON_ONCE(!mlxsw_sp_local_port_is_valid(mlxsw_sp, local_port)))
   2323		return;
   2324	mlxsw_sp_port = mlxsw_sp->ports[local_port];
   2325	if (!mlxsw_sp_port)
   2326		return;
   2327
   2328	status = mlxsw_reg_pude_oper_status_get(pude_pl);
   2329	if (status == MLXSW_PORT_OPER_STATUS_UP) {
   2330		netdev_info(mlxsw_sp_port->dev, "link up\n");
   2331		netif_carrier_on(mlxsw_sp_port->dev);
   2332		mlxsw_core_schedule_dw(&mlxsw_sp_port->ptp.shaper_dw, 0);
   2333	} else {
   2334		netdev_info(mlxsw_sp_port->dev, "link down\n");
   2335		netif_carrier_off(mlxsw_sp_port->dev);
   2336		mlxsw_sp_port_down_wipe_counters(mlxsw_sp_port);
   2337	}
   2338}
   2339
   2340static void mlxsw_sp1_ptp_fifo_event_func(struct mlxsw_sp *mlxsw_sp,
   2341					  char *mtpptr_pl, bool ingress)
   2342{
   2343	u16 local_port;
   2344	u8 num_rec;
   2345	int i;
   2346
   2347	local_port = mlxsw_reg_mtpptr_local_port_get(mtpptr_pl);
   2348	num_rec = mlxsw_reg_mtpptr_num_rec_get(mtpptr_pl);
   2349	for (i = 0; i < num_rec; i++) {
   2350		u8 domain_number;
   2351		u8 message_type;
   2352		u16 sequence_id;
   2353		u64 timestamp;
   2354
   2355		mlxsw_reg_mtpptr_unpack(mtpptr_pl, i, &message_type,
   2356					&domain_number, &sequence_id,
   2357					&timestamp);
   2358		mlxsw_sp1_ptp_got_timestamp(mlxsw_sp, ingress, local_port,
   2359					    message_type, domain_number,
   2360					    sequence_id, timestamp);
   2361	}
   2362}
   2363
   2364static void mlxsw_sp1_ptp_ing_fifo_event_func(const struct mlxsw_reg_info *reg,
   2365					      char *mtpptr_pl, void *priv)
   2366{
   2367	struct mlxsw_sp *mlxsw_sp = priv;
   2368
   2369	mlxsw_sp1_ptp_fifo_event_func(mlxsw_sp, mtpptr_pl, true);
   2370}
   2371
   2372static void mlxsw_sp1_ptp_egr_fifo_event_func(const struct mlxsw_reg_info *reg,
   2373					      char *mtpptr_pl, void *priv)
   2374{
   2375	struct mlxsw_sp *mlxsw_sp = priv;
   2376
   2377	mlxsw_sp1_ptp_fifo_event_func(mlxsw_sp, mtpptr_pl, false);
   2378}
   2379
   2380void mlxsw_sp_rx_listener_no_mark_func(struct sk_buff *skb,
   2381				       u16 local_port, void *priv)
   2382{
   2383	struct mlxsw_sp *mlxsw_sp = priv;
   2384	struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
   2385	struct mlxsw_sp_port_pcpu_stats *pcpu_stats;
   2386
   2387	if (unlikely(!mlxsw_sp_port)) {
   2388		dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: skb received for non-existent port\n",
   2389				     local_port);
   2390		return;
   2391	}
   2392
   2393	skb->dev = mlxsw_sp_port->dev;
   2394
   2395	pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats);
   2396	u64_stats_update_begin(&pcpu_stats->syncp);
   2397	pcpu_stats->rx_packets++;
   2398	pcpu_stats->rx_bytes += skb->len;
   2399	u64_stats_update_end(&pcpu_stats->syncp);
   2400
   2401	skb->protocol = eth_type_trans(skb, skb->dev);
   2402	netif_receive_skb(skb);
   2403}
   2404
   2405static void mlxsw_sp_rx_listener_mark_func(struct sk_buff *skb, u16 local_port,
   2406					   void *priv)
   2407{
   2408	skb->offload_fwd_mark = 1;
   2409	return mlxsw_sp_rx_listener_no_mark_func(skb, local_port, priv);
   2410}
   2411
   2412static void mlxsw_sp_rx_listener_l3_mark_func(struct sk_buff *skb,
   2413					      u16 local_port, void *priv)
   2414{
   2415	skb->offload_l3_fwd_mark = 1;
   2416	skb->offload_fwd_mark = 1;
   2417	return mlxsw_sp_rx_listener_no_mark_func(skb, local_port, priv);
   2418}
   2419
   2420void mlxsw_sp_ptp_receive(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb,
   2421			  u16 local_port)
   2422{
   2423	mlxsw_sp->ptp_ops->receive(mlxsw_sp, skb, local_port);
   2424}
   2425
   2426#define MLXSW_SP_RXL_NO_MARK(_trap_id, _action, _trap_group, _is_ctrl)	\
   2427	MLXSW_RXL(mlxsw_sp_rx_listener_no_mark_func, _trap_id, _action,	\
   2428		  _is_ctrl, SP_##_trap_group, DISCARD)
   2429
   2430#define MLXSW_SP_RXL_MARK(_trap_id, _action, _trap_group, _is_ctrl)	\
   2431	MLXSW_RXL(mlxsw_sp_rx_listener_mark_func, _trap_id, _action,	\
   2432		_is_ctrl, SP_##_trap_group, DISCARD)
   2433
   2434#define MLXSW_SP_RXL_L3_MARK(_trap_id, _action, _trap_group, _is_ctrl)	\
   2435	MLXSW_RXL(mlxsw_sp_rx_listener_l3_mark_func, _trap_id, _action,	\
   2436		_is_ctrl, SP_##_trap_group, DISCARD)
   2437
   2438#define MLXSW_SP_EVENTL(_func, _trap_id)		\
   2439	MLXSW_EVENTL(_func, _trap_id, SP_EVENT)
   2440
   2441static const struct mlxsw_listener mlxsw_sp_listener[] = {
   2442	/* Events */
   2443	MLXSW_SP_EVENTL(mlxsw_sp_pude_event_func, PUDE),
   2444	/* L2 traps */
   2445	MLXSW_SP_RXL_NO_MARK(FID_MISS, TRAP_TO_CPU, FID_MISS, false),
   2446	/* L3 traps */
   2447	MLXSW_SP_RXL_MARK(IPV6_UNSPECIFIED_ADDRESS, TRAP_TO_CPU, ROUTER_EXP,
   2448			  false),
   2449	MLXSW_SP_RXL_MARK(IPV6_LINK_LOCAL_SRC, TRAP_TO_CPU, ROUTER_EXP, false),
   2450	MLXSW_SP_RXL_MARK(IPV6_MC_LINK_LOCAL_DEST, TRAP_TO_CPU, ROUTER_EXP,
   2451			  false),
   2452	MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_SIP_CLASS_E, FORWARD,
   2453			     ROUTER_EXP, false),
   2454	MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_MC_DMAC, FORWARD,
   2455			     ROUTER_EXP, false),
   2456	MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_SIP_DIP, FORWARD,
   2457			     ROUTER_EXP, false),
   2458	MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_DIP_LINK_LOCAL, FORWARD,
   2459			     ROUTER_EXP, false),
   2460	/* Multicast Router Traps */
   2461	MLXSW_SP_RXL_MARK(ACL1, TRAP_TO_CPU, MULTICAST, false),
   2462	MLXSW_SP_RXL_L3_MARK(ACL2, TRAP_TO_CPU, MULTICAST, false),
   2463	/* NVE traps */
   2464	MLXSW_SP_RXL_MARK(NVE_ENCAP_ARP, TRAP_TO_CPU, NEIGH_DISCOVERY, false),
   2465};
   2466
   2467static const struct mlxsw_listener mlxsw_sp1_listener[] = {
   2468	/* Events */
   2469	MLXSW_EVENTL(mlxsw_sp1_ptp_egr_fifo_event_func, PTP_EGR_FIFO, SP_PTP0),
   2470	MLXSW_EVENTL(mlxsw_sp1_ptp_ing_fifo_event_func, PTP_ING_FIFO, SP_PTP0),
   2471};
   2472
   2473static const struct mlxsw_listener mlxsw_sp2_listener[] = {
   2474	/* Events */
   2475	MLXSW_SP_EVENTL(mlxsw_sp_port_mapping_listener_func, PMLPE),
   2476};
   2477
   2478static int mlxsw_sp_cpu_policers_set(struct mlxsw_core *mlxsw_core)
   2479{
   2480	struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
   2481	char qpcr_pl[MLXSW_REG_QPCR_LEN];
   2482	enum mlxsw_reg_qpcr_ir_units ir_units;
   2483	int max_cpu_policers;
   2484	bool is_bytes;
   2485	u8 burst_size;
   2486	u32 rate;
   2487	int i, err;
   2488
   2489	if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_CPU_POLICERS))
   2490		return -EIO;
   2491
   2492	max_cpu_policers = MLXSW_CORE_RES_GET(mlxsw_core, MAX_CPU_POLICERS);
   2493
   2494	ir_units = MLXSW_REG_QPCR_IR_UNITS_M;
   2495	for (i = 0; i < max_cpu_policers; i++) {
   2496		is_bytes = false;
   2497		switch (i) {
   2498		case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP:
   2499		case MLXSW_REG_HTGT_TRAP_GROUP_SP_MULTICAST:
   2500		case MLXSW_REG_HTGT_TRAP_GROUP_SP_FID_MISS:
   2501			rate = 1024;
   2502			burst_size = 7;
   2503			break;
   2504		default:
   2505			continue;
   2506		}
   2507
   2508		__set_bit(i, mlxsw_sp->trap->policers_usage);
   2509		mlxsw_reg_qpcr_pack(qpcr_pl, i, ir_units, is_bytes, rate,
   2510				    burst_size);
   2511		err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(qpcr), qpcr_pl);
   2512		if (err)
   2513			return err;
   2514	}
   2515
   2516	return 0;
   2517}
   2518
   2519static int mlxsw_sp_trap_groups_set(struct mlxsw_core *mlxsw_core)
   2520{
   2521	char htgt_pl[MLXSW_REG_HTGT_LEN];
   2522	enum mlxsw_reg_htgt_trap_group i;
   2523	int max_cpu_policers;
   2524	int max_trap_groups;
   2525	u8 priority, tc;
   2526	u16 policer_id;
   2527	int err;
   2528
   2529	if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_TRAP_GROUPS))
   2530		return -EIO;
   2531
   2532	max_trap_groups = MLXSW_CORE_RES_GET(mlxsw_core, MAX_TRAP_GROUPS);
   2533	max_cpu_policers = MLXSW_CORE_RES_GET(mlxsw_core, MAX_CPU_POLICERS);
   2534
   2535	for (i = 0; i < max_trap_groups; i++) {
   2536		policer_id = i;
   2537		switch (i) {
   2538		case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP:
   2539		case MLXSW_REG_HTGT_TRAP_GROUP_SP_MULTICAST:
   2540		case MLXSW_REG_HTGT_TRAP_GROUP_SP_FID_MISS:
   2541			priority = 1;
   2542			tc = 1;
   2543			break;
   2544		case MLXSW_REG_HTGT_TRAP_GROUP_SP_EVENT:
   2545			priority = MLXSW_REG_HTGT_DEFAULT_PRIORITY;
   2546			tc = MLXSW_REG_HTGT_DEFAULT_TC;
   2547			policer_id = MLXSW_REG_HTGT_INVALID_POLICER;
   2548			break;
   2549		default:
   2550			continue;
   2551		}
   2552
   2553		if (max_cpu_policers <= policer_id &&
   2554		    policer_id != MLXSW_REG_HTGT_INVALID_POLICER)
   2555			return -EIO;
   2556
   2557		mlxsw_reg_htgt_pack(htgt_pl, i, policer_id, priority, tc);
   2558		err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl);
   2559		if (err)
   2560			return err;
   2561	}
   2562
   2563	return 0;
   2564}
   2565
   2566static int mlxsw_sp_traps_init(struct mlxsw_sp *mlxsw_sp)
   2567{
   2568	struct mlxsw_sp_trap *trap;
   2569	u64 max_policers;
   2570	int err;
   2571
   2572	if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_CPU_POLICERS))
   2573		return -EIO;
   2574	max_policers = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_CPU_POLICERS);
   2575	trap = kzalloc(struct_size(trap, policers_usage,
   2576				   BITS_TO_LONGS(max_policers)), GFP_KERNEL);
   2577	if (!trap)
   2578		return -ENOMEM;
   2579	trap->max_policers = max_policers;
   2580	mlxsw_sp->trap = trap;
   2581
   2582	err = mlxsw_sp_cpu_policers_set(mlxsw_sp->core);
   2583	if (err)
   2584		goto err_cpu_policers_set;
   2585
   2586	err = mlxsw_sp_trap_groups_set(mlxsw_sp->core);
   2587	if (err)
   2588		goto err_trap_groups_set;
   2589
   2590	err = mlxsw_core_traps_register(mlxsw_sp->core, mlxsw_sp_listener,
   2591					ARRAY_SIZE(mlxsw_sp_listener),
   2592					mlxsw_sp);
   2593	if (err)
   2594		goto err_traps_register;
   2595
   2596	err = mlxsw_core_traps_register(mlxsw_sp->core, mlxsw_sp->listeners,
   2597					mlxsw_sp->listeners_count, mlxsw_sp);
   2598	if (err)
   2599		goto err_extra_traps_init;
   2600
   2601	return 0;
   2602
   2603err_extra_traps_init:
   2604	mlxsw_core_traps_unregister(mlxsw_sp->core, mlxsw_sp_listener,
   2605				    ARRAY_SIZE(mlxsw_sp_listener),
   2606				    mlxsw_sp);
   2607err_traps_register:
   2608err_trap_groups_set:
   2609err_cpu_policers_set:
   2610	kfree(trap);
   2611	return err;
   2612}
   2613
   2614static void mlxsw_sp_traps_fini(struct mlxsw_sp *mlxsw_sp)
   2615{
   2616	mlxsw_core_traps_unregister(mlxsw_sp->core, mlxsw_sp->listeners,
   2617				    mlxsw_sp->listeners_count,
   2618				    mlxsw_sp);
   2619	mlxsw_core_traps_unregister(mlxsw_sp->core, mlxsw_sp_listener,
   2620				    ARRAY_SIZE(mlxsw_sp_listener), mlxsw_sp);
   2621	kfree(mlxsw_sp->trap);
   2622}
   2623
   2624#define MLXSW_SP_LAG_SEED_INIT 0xcafecafe
   2625
   2626static int mlxsw_sp_lag_init(struct mlxsw_sp *mlxsw_sp)
   2627{
   2628	char slcr_pl[MLXSW_REG_SLCR_LEN];
   2629	u32 seed;
   2630	int err;
   2631
   2632	seed = jhash(mlxsw_sp->base_mac, sizeof(mlxsw_sp->base_mac),
   2633		     MLXSW_SP_LAG_SEED_INIT);
   2634	mlxsw_reg_slcr_pack(slcr_pl, MLXSW_REG_SLCR_LAG_HASH_SMAC |
   2635				     MLXSW_REG_SLCR_LAG_HASH_DMAC |
   2636				     MLXSW_REG_SLCR_LAG_HASH_ETHERTYPE |
   2637				     MLXSW_REG_SLCR_LAG_HASH_VLANID |
   2638				     MLXSW_REG_SLCR_LAG_HASH_SIP |
   2639				     MLXSW_REG_SLCR_LAG_HASH_DIP |
   2640				     MLXSW_REG_SLCR_LAG_HASH_SPORT |
   2641				     MLXSW_REG_SLCR_LAG_HASH_DPORT |
   2642				     MLXSW_REG_SLCR_LAG_HASH_IPPROTO, seed);
   2643	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcr), slcr_pl);
   2644	if (err)
   2645		return err;
   2646
   2647	if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LAG) ||
   2648	    !MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LAG_MEMBERS))
   2649		return -EIO;
   2650
   2651	mlxsw_sp->lags = kcalloc(MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LAG),
   2652				 sizeof(struct mlxsw_sp_upper),
   2653				 GFP_KERNEL);
   2654	if (!mlxsw_sp->lags)
   2655		return -ENOMEM;
   2656
   2657	return 0;
   2658}
   2659
   2660static void mlxsw_sp_lag_fini(struct mlxsw_sp *mlxsw_sp)
   2661{
   2662	kfree(mlxsw_sp->lags);
   2663}
   2664
   2665static const struct mlxsw_sp_ptp_ops mlxsw_sp1_ptp_ops = {
   2666	.clock_init	= mlxsw_sp1_ptp_clock_init,
   2667	.clock_fini	= mlxsw_sp1_ptp_clock_fini,
   2668	.init		= mlxsw_sp1_ptp_init,
   2669	.fini		= mlxsw_sp1_ptp_fini,
   2670	.receive	= mlxsw_sp1_ptp_receive,
   2671	.transmitted	= mlxsw_sp1_ptp_transmitted,
   2672	.hwtstamp_get	= mlxsw_sp1_ptp_hwtstamp_get,
   2673	.hwtstamp_set	= mlxsw_sp1_ptp_hwtstamp_set,
   2674	.shaper_work	= mlxsw_sp1_ptp_shaper_work,
   2675	.get_ts_info	= mlxsw_sp1_ptp_get_ts_info,
   2676	.get_stats_count = mlxsw_sp1_get_stats_count,
   2677	.get_stats_strings = mlxsw_sp1_get_stats_strings,
   2678	.get_stats	= mlxsw_sp1_get_stats,
   2679};
   2680
   2681static const struct mlxsw_sp_ptp_ops mlxsw_sp2_ptp_ops = {
   2682	.clock_init	= mlxsw_sp2_ptp_clock_init,
   2683	.clock_fini	= mlxsw_sp2_ptp_clock_fini,
   2684	.init		= mlxsw_sp2_ptp_init,
   2685	.fini		= mlxsw_sp2_ptp_fini,
   2686	.receive	= mlxsw_sp2_ptp_receive,
   2687	.transmitted	= mlxsw_sp2_ptp_transmitted,
   2688	.hwtstamp_get	= mlxsw_sp2_ptp_hwtstamp_get,
   2689	.hwtstamp_set	= mlxsw_sp2_ptp_hwtstamp_set,
   2690	.shaper_work	= mlxsw_sp2_ptp_shaper_work,
   2691	.get_ts_info	= mlxsw_sp2_ptp_get_ts_info,
   2692	.get_stats_count = mlxsw_sp2_get_stats_count,
   2693	.get_stats_strings = mlxsw_sp2_get_stats_strings,
   2694	.get_stats	= mlxsw_sp2_get_stats,
   2695};
   2696
   2697struct mlxsw_sp_sample_trigger_node {
   2698	struct mlxsw_sp_sample_trigger trigger;
   2699	struct mlxsw_sp_sample_params params;
   2700	struct rhash_head ht_node;
   2701	struct rcu_head rcu;
   2702	refcount_t refcount;
   2703};
   2704
   2705static const struct rhashtable_params mlxsw_sp_sample_trigger_ht_params = {
   2706	.key_offset = offsetof(struct mlxsw_sp_sample_trigger_node, trigger),
   2707	.head_offset = offsetof(struct mlxsw_sp_sample_trigger_node, ht_node),
   2708	.key_len = sizeof(struct mlxsw_sp_sample_trigger),
   2709	.automatic_shrinking = true,
   2710};
   2711
   2712static void
   2713mlxsw_sp_sample_trigger_key_init(struct mlxsw_sp_sample_trigger *key,
   2714				 const struct mlxsw_sp_sample_trigger *trigger)
   2715{
   2716	memset(key, 0, sizeof(*key));
   2717	key->type = trigger->type;
   2718	key->local_port = trigger->local_port;
   2719}
   2720
   2721/* RCU read lock must be held */
   2722struct mlxsw_sp_sample_params *
   2723mlxsw_sp_sample_trigger_params_lookup(struct mlxsw_sp *mlxsw_sp,
   2724				      const struct mlxsw_sp_sample_trigger *trigger)
   2725{
   2726	struct mlxsw_sp_sample_trigger_node *trigger_node;
   2727	struct mlxsw_sp_sample_trigger key;
   2728
   2729	mlxsw_sp_sample_trigger_key_init(&key, trigger);
   2730	trigger_node = rhashtable_lookup(&mlxsw_sp->sample_trigger_ht, &key,
   2731					 mlxsw_sp_sample_trigger_ht_params);
   2732	if (!trigger_node)
   2733		return NULL;
   2734
   2735	return &trigger_node->params;
   2736}
   2737
   2738static int
   2739mlxsw_sp_sample_trigger_node_init(struct mlxsw_sp *mlxsw_sp,
   2740				  const struct mlxsw_sp_sample_trigger *trigger,
   2741				  const struct mlxsw_sp_sample_params *params)
   2742{
   2743	struct mlxsw_sp_sample_trigger_node *trigger_node;
   2744	int err;
   2745
   2746	trigger_node = kzalloc(sizeof(*trigger_node), GFP_KERNEL);
   2747	if (!trigger_node)
   2748		return -ENOMEM;
   2749
   2750	trigger_node->trigger = *trigger;
   2751	trigger_node->params = *params;
   2752	refcount_set(&trigger_node->refcount, 1);
   2753
   2754	err = rhashtable_insert_fast(&mlxsw_sp->sample_trigger_ht,
   2755				     &trigger_node->ht_node,
   2756				     mlxsw_sp_sample_trigger_ht_params);
   2757	if (err)
   2758		goto err_rhashtable_insert;
   2759
   2760	return 0;
   2761
   2762err_rhashtable_insert:
   2763	kfree(trigger_node);
   2764	return err;
   2765}
   2766
   2767static void
   2768mlxsw_sp_sample_trigger_node_fini(struct mlxsw_sp *mlxsw_sp,
   2769				  struct mlxsw_sp_sample_trigger_node *trigger_node)
   2770{
   2771	rhashtable_remove_fast(&mlxsw_sp->sample_trigger_ht,
   2772			       &trigger_node->ht_node,
   2773			       mlxsw_sp_sample_trigger_ht_params);
   2774	kfree_rcu(trigger_node, rcu);
   2775}
   2776
   2777int
   2778mlxsw_sp_sample_trigger_params_set(struct mlxsw_sp *mlxsw_sp,
   2779				   const struct mlxsw_sp_sample_trigger *trigger,
   2780				   const struct mlxsw_sp_sample_params *params,
   2781				   struct netlink_ext_ack *extack)
   2782{
   2783	struct mlxsw_sp_sample_trigger_node *trigger_node;
   2784	struct mlxsw_sp_sample_trigger key;
   2785
   2786	ASSERT_RTNL();
   2787
   2788	mlxsw_sp_sample_trigger_key_init(&key, trigger);
   2789
   2790	trigger_node = rhashtable_lookup_fast(&mlxsw_sp->sample_trigger_ht,
   2791					      &key,
   2792					      mlxsw_sp_sample_trigger_ht_params);
   2793	if (!trigger_node)
   2794		return mlxsw_sp_sample_trigger_node_init(mlxsw_sp, &key,
   2795							 params);
   2796
   2797	if (trigger_node->trigger.local_port) {
   2798		NL_SET_ERR_MSG_MOD(extack, "Sampling already enabled on port");
   2799		return -EINVAL;
   2800	}
   2801
   2802	if (trigger_node->params.psample_group != params->psample_group ||
   2803	    trigger_node->params.truncate != params->truncate ||
   2804	    trigger_node->params.rate != params->rate ||
   2805	    trigger_node->params.trunc_size != params->trunc_size) {
   2806		NL_SET_ERR_MSG_MOD(extack, "Sampling parameters do not match for an existing sampling trigger");
   2807		return -EINVAL;
   2808	}
   2809
   2810	refcount_inc(&trigger_node->refcount);
   2811
   2812	return 0;
   2813}
   2814
   2815void
   2816mlxsw_sp_sample_trigger_params_unset(struct mlxsw_sp *mlxsw_sp,
   2817				     const struct mlxsw_sp_sample_trigger *trigger)
   2818{
   2819	struct mlxsw_sp_sample_trigger_node *trigger_node;
   2820	struct mlxsw_sp_sample_trigger key;
   2821
   2822	ASSERT_RTNL();
   2823
   2824	mlxsw_sp_sample_trigger_key_init(&key, trigger);
   2825
   2826	trigger_node = rhashtable_lookup_fast(&mlxsw_sp->sample_trigger_ht,
   2827					      &key,
   2828					      mlxsw_sp_sample_trigger_ht_params);
   2829	if (!trigger_node)
   2830		return;
   2831
   2832	if (!refcount_dec_and_test(&trigger_node->refcount))
   2833		return;
   2834
   2835	mlxsw_sp_sample_trigger_node_fini(mlxsw_sp, trigger_node);
   2836}
   2837
   2838static int mlxsw_sp_netdevice_event(struct notifier_block *unused,
   2839				    unsigned long event, void *ptr);
   2840
   2841#define MLXSW_SP_DEFAULT_PARSING_DEPTH 96
   2842#define MLXSW_SP_INCREASED_PARSING_DEPTH 128
   2843#define MLXSW_SP_DEFAULT_VXLAN_UDP_DPORT 4789
   2844
   2845static void mlxsw_sp_parsing_init(struct mlxsw_sp *mlxsw_sp)
   2846{
   2847	mlxsw_sp->parsing.parsing_depth = MLXSW_SP_DEFAULT_PARSING_DEPTH;
   2848	mlxsw_sp->parsing.vxlan_udp_dport = MLXSW_SP_DEFAULT_VXLAN_UDP_DPORT;
   2849	mutex_init(&mlxsw_sp->parsing.lock);
   2850}
   2851
   2852static void mlxsw_sp_parsing_fini(struct mlxsw_sp *mlxsw_sp)
   2853{
   2854	mutex_destroy(&mlxsw_sp->parsing.lock);
   2855}
   2856
   2857struct mlxsw_sp_ipv6_addr_node {
   2858	struct in6_addr key;
   2859	struct rhash_head ht_node;
   2860	u32 kvdl_index;
   2861	refcount_t refcount;
   2862};
   2863
   2864static const struct rhashtable_params mlxsw_sp_ipv6_addr_ht_params = {
   2865	.key_offset = offsetof(struct mlxsw_sp_ipv6_addr_node, key),
   2866	.head_offset = offsetof(struct mlxsw_sp_ipv6_addr_node, ht_node),
   2867	.key_len = sizeof(struct in6_addr),
   2868	.automatic_shrinking = true,
   2869};
   2870
   2871static int
   2872mlxsw_sp_ipv6_addr_init(struct mlxsw_sp *mlxsw_sp, const struct in6_addr *addr6,
   2873			u32 *p_kvdl_index)
   2874{
   2875	struct mlxsw_sp_ipv6_addr_node *node;
   2876	char rips_pl[MLXSW_REG_RIPS_LEN];
   2877	int err;
   2878
   2879	err = mlxsw_sp_kvdl_alloc(mlxsw_sp,
   2880				  MLXSW_SP_KVDL_ENTRY_TYPE_IPV6_ADDRESS, 1,
   2881				  p_kvdl_index);
   2882	if (err)
   2883		return err;
   2884
   2885	mlxsw_reg_rips_pack(rips_pl, *p_kvdl_index, addr6);
   2886	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rips), rips_pl);
   2887	if (err)
   2888		goto err_rips_write;
   2889
   2890	node = kzalloc(sizeof(*node), GFP_KERNEL);
   2891	if (!node) {
   2892		err = -ENOMEM;
   2893		goto err_node_alloc;
   2894	}
   2895
   2896	node->key = *addr6;
   2897	node->kvdl_index = *p_kvdl_index;
   2898	refcount_set(&node->refcount, 1);
   2899
   2900	err = rhashtable_insert_fast(&mlxsw_sp->ipv6_addr_ht,
   2901				     &node->ht_node,
   2902				     mlxsw_sp_ipv6_addr_ht_params);
   2903	if (err)
   2904		goto err_rhashtable_insert;
   2905
   2906	return 0;
   2907
   2908err_rhashtable_insert:
   2909	kfree(node);
   2910err_node_alloc:
   2911err_rips_write:
   2912	mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_IPV6_ADDRESS, 1,
   2913			   *p_kvdl_index);
   2914	return err;
   2915}
   2916
   2917static void mlxsw_sp_ipv6_addr_fini(struct mlxsw_sp *mlxsw_sp,
   2918				    struct mlxsw_sp_ipv6_addr_node *node)
   2919{
   2920	u32 kvdl_index = node->kvdl_index;
   2921
   2922	rhashtable_remove_fast(&mlxsw_sp->ipv6_addr_ht, &node->ht_node,
   2923			       mlxsw_sp_ipv6_addr_ht_params);
   2924	kfree(node);
   2925	mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_IPV6_ADDRESS, 1,
   2926			   kvdl_index);
   2927}
   2928
   2929int mlxsw_sp_ipv6_addr_kvdl_index_get(struct mlxsw_sp *mlxsw_sp,
   2930				      const struct in6_addr *addr6,
   2931				      u32 *p_kvdl_index)
   2932{
   2933	struct mlxsw_sp_ipv6_addr_node *node;
   2934	int err = 0;
   2935
   2936	mutex_lock(&mlxsw_sp->ipv6_addr_ht_lock);
   2937	node = rhashtable_lookup_fast(&mlxsw_sp->ipv6_addr_ht, addr6,
   2938				      mlxsw_sp_ipv6_addr_ht_params);
   2939	if (node) {
   2940		refcount_inc(&node->refcount);
   2941		*p_kvdl_index = node->kvdl_index;
   2942		goto out_unlock;
   2943	}
   2944
   2945	err = mlxsw_sp_ipv6_addr_init(mlxsw_sp, addr6, p_kvdl_index);
   2946
   2947out_unlock:
   2948	mutex_unlock(&mlxsw_sp->ipv6_addr_ht_lock);
   2949	return err;
   2950}
   2951
   2952void
   2953mlxsw_sp_ipv6_addr_put(struct mlxsw_sp *mlxsw_sp, const struct in6_addr *addr6)
   2954{
   2955	struct mlxsw_sp_ipv6_addr_node *node;
   2956
   2957	mutex_lock(&mlxsw_sp->ipv6_addr_ht_lock);
   2958	node = rhashtable_lookup_fast(&mlxsw_sp->ipv6_addr_ht, addr6,
   2959				      mlxsw_sp_ipv6_addr_ht_params);
   2960	if (WARN_ON(!node))
   2961		goto out_unlock;
   2962
   2963	if (!refcount_dec_and_test(&node->refcount))
   2964		goto out_unlock;
   2965
   2966	mlxsw_sp_ipv6_addr_fini(mlxsw_sp, node);
   2967
   2968out_unlock:
   2969	mutex_unlock(&mlxsw_sp->ipv6_addr_ht_lock);
   2970}
   2971
   2972static int mlxsw_sp_ipv6_addr_ht_init(struct mlxsw_sp *mlxsw_sp)
   2973{
   2974	int err;
   2975
   2976	err = rhashtable_init(&mlxsw_sp->ipv6_addr_ht,
   2977			      &mlxsw_sp_ipv6_addr_ht_params);
   2978	if (err)
   2979		return err;
   2980
   2981	mutex_init(&mlxsw_sp->ipv6_addr_ht_lock);
   2982	return 0;
   2983}
   2984
   2985static void mlxsw_sp_ipv6_addr_ht_fini(struct mlxsw_sp *mlxsw_sp)
   2986{
   2987	mutex_destroy(&mlxsw_sp->ipv6_addr_ht_lock);
   2988	rhashtable_destroy(&mlxsw_sp->ipv6_addr_ht);
   2989}
   2990
   2991static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
   2992			 const struct mlxsw_bus_info *mlxsw_bus_info,
   2993			 struct netlink_ext_ack *extack)
   2994{
   2995	struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
   2996	int err;
   2997
   2998	mlxsw_sp->core = mlxsw_core;
   2999	mlxsw_sp->bus_info = mlxsw_bus_info;
   3000
   3001	mlxsw_sp_parsing_init(mlxsw_sp);
   3002	mlxsw_core_emad_string_tlv_enable(mlxsw_core);
   3003
   3004	err = mlxsw_sp_base_mac_get(mlxsw_sp);
   3005	if (err) {
   3006		dev_err(mlxsw_sp->bus_info->dev, "Failed to get base mac\n");
   3007		return err;
   3008	}
   3009
   3010	err = mlxsw_sp_kvdl_init(mlxsw_sp);
   3011	if (err) {
   3012		dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize KVDL\n");
   3013		return err;
   3014	}
   3015
   3016	err = mlxsw_sp_fids_init(mlxsw_sp);
   3017	if (err) {
   3018		dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize FIDs\n");
   3019		goto err_fids_init;
   3020	}
   3021
   3022	err = mlxsw_sp_policers_init(mlxsw_sp);
   3023	if (err) {
   3024		dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize policers\n");
   3025		goto err_policers_init;
   3026	}
   3027
   3028	err = mlxsw_sp_traps_init(mlxsw_sp);
   3029	if (err) {
   3030		dev_err(mlxsw_sp->bus_info->dev, "Failed to set traps\n");
   3031		goto err_traps_init;
   3032	}
   3033
   3034	err = mlxsw_sp_devlink_traps_init(mlxsw_sp);
   3035	if (err) {
   3036		dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize devlink traps\n");
   3037		goto err_devlink_traps_init;
   3038	}
   3039
   3040	err = mlxsw_sp_buffers_init(mlxsw_sp);
   3041	if (err) {
   3042		dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize buffers\n");
   3043		goto err_buffers_init;
   3044	}
   3045
   3046	err = mlxsw_sp_lag_init(mlxsw_sp);
   3047	if (err) {
   3048		dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize LAG\n");
   3049		goto err_lag_init;
   3050	}
   3051
   3052	/* Initialize SPAN before router and switchdev, so that those components
   3053	 * can call mlxsw_sp_span_respin().
   3054	 */
   3055	err = mlxsw_sp_span_init(mlxsw_sp);
   3056	if (err) {
   3057		dev_err(mlxsw_sp->bus_info->dev, "Failed to init span system\n");
   3058		goto err_span_init;
   3059	}
   3060
   3061	err = mlxsw_sp_switchdev_init(mlxsw_sp);
   3062	if (err) {
   3063		dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize switchdev\n");
   3064		goto err_switchdev_init;
   3065	}
   3066
   3067	err = mlxsw_sp_counter_pool_init(mlxsw_sp);
   3068	if (err) {
   3069		dev_err(mlxsw_sp->bus_info->dev, "Failed to init counter pool\n");
   3070		goto err_counter_pool_init;
   3071	}
   3072
   3073	err = mlxsw_sp_afa_init(mlxsw_sp);
   3074	if (err) {
   3075		dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize ACL actions\n");
   3076		goto err_afa_init;
   3077	}
   3078
   3079	err = mlxsw_sp_ipv6_addr_ht_init(mlxsw_sp);
   3080	if (err) {
   3081		dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize hash table for IPv6 addresses\n");
   3082		goto err_ipv6_addr_ht_init;
   3083	}
   3084
   3085	err = mlxsw_sp_nve_init(mlxsw_sp);
   3086	if (err) {
   3087		dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize NVE\n");
   3088		goto err_nve_init;
   3089	}
   3090
   3091	err = mlxsw_sp_acl_init(mlxsw_sp);
   3092	if (err) {
   3093		dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize ACL\n");
   3094		goto err_acl_init;
   3095	}
   3096
   3097	err = mlxsw_sp_router_init(mlxsw_sp, extack);
   3098	if (err) {
   3099		dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize router\n");
   3100		goto err_router_init;
   3101	}
   3102
   3103	if (mlxsw_sp->bus_info->read_frc_capable) {
   3104		/* NULL is a valid return value from clock_init */
   3105		mlxsw_sp->clock =
   3106			mlxsw_sp->ptp_ops->clock_init(mlxsw_sp,
   3107						      mlxsw_sp->bus_info->dev);
   3108		if (IS_ERR(mlxsw_sp->clock)) {
   3109			err = PTR_ERR(mlxsw_sp->clock);
   3110			dev_err(mlxsw_sp->bus_info->dev, "Failed to init ptp clock\n");
   3111			goto err_ptp_clock_init;
   3112		}
   3113	}
   3114
   3115	if (mlxsw_sp->clock) {
   3116		/* NULL is a valid return value from ptp_ops->init */
   3117		mlxsw_sp->ptp_state = mlxsw_sp->ptp_ops->init(mlxsw_sp);
   3118		if (IS_ERR(mlxsw_sp->ptp_state)) {
   3119			err = PTR_ERR(mlxsw_sp->ptp_state);
   3120			dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize PTP\n");
   3121			goto err_ptp_init;
   3122		}
   3123	}
   3124
   3125	/* Initialize netdevice notifier after SPAN is initialized, so that the
   3126	 * event handler can call SPAN respin.
   3127	 */
   3128	mlxsw_sp->netdevice_nb.notifier_call = mlxsw_sp_netdevice_event;
   3129	err = register_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp),
   3130					      &mlxsw_sp->netdevice_nb);
   3131	if (err) {
   3132		dev_err(mlxsw_sp->bus_info->dev, "Failed to register netdev notifier\n");
   3133		goto err_netdev_notifier;
   3134	}
   3135
   3136	err = mlxsw_sp_dpipe_init(mlxsw_sp);
   3137	if (err) {
   3138		dev_err(mlxsw_sp->bus_info->dev, "Failed to init pipeline debug\n");
   3139		goto err_dpipe_init;
   3140	}
   3141
   3142	err = mlxsw_sp_port_module_info_init(mlxsw_sp);
   3143	if (err) {
   3144		dev_err(mlxsw_sp->bus_info->dev, "Failed to init port module info\n");
   3145		goto err_port_module_info_init;
   3146	}
   3147
   3148	err = rhashtable_init(&mlxsw_sp->sample_trigger_ht,
   3149			      &mlxsw_sp_sample_trigger_ht_params);
   3150	if (err) {
   3151		dev_err(mlxsw_sp->bus_info->dev, "Failed to init sampling trigger hashtable\n");
   3152		goto err_sample_trigger_init;
   3153	}
   3154
   3155	err = mlxsw_sp_ports_create(mlxsw_sp);
   3156	if (err) {
   3157		dev_err(mlxsw_sp->bus_info->dev, "Failed to create ports\n");
   3158		goto err_ports_create;
   3159	}
   3160
   3161	return 0;
   3162
   3163err_ports_create:
   3164	rhashtable_destroy(&mlxsw_sp->sample_trigger_ht);
   3165err_sample_trigger_init:
   3166	mlxsw_sp_port_module_info_fini(mlxsw_sp);
   3167err_port_module_info_init:
   3168	mlxsw_sp_dpipe_fini(mlxsw_sp);
   3169err_dpipe_init:
   3170	unregister_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp),
   3171					  &mlxsw_sp->netdevice_nb);
   3172err_netdev_notifier:
   3173	if (mlxsw_sp->clock)
   3174		mlxsw_sp->ptp_ops->fini(mlxsw_sp->ptp_state);
   3175err_ptp_init:
   3176	if (mlxsw_sp->clock)
   3177		mlxsw_sp->ptp_ops->clock_fini(mlxsw_sp->clock);
   3178err_ptp_clock_init:
   3179	mlxsw_sp_router_fini(mlxsw_sp);
   3180err_router_init:
   3181	mlxsw_sp_acl_fini(mlxsw_sp);
   3182err_acl_init:
   3183	mlxsw_sp_nve_fini(mlxsw_sp);
   3184err_nve_init:
   3185	mlxsw_sp_ipv6_addr_ht_fini(mlxsw_sp);
   3186err_ipv6_addr_ht_init:
   3187	mlxsw_sp_afa_fini(mlxsw_sp);
   3188err_afa_init:
   3189	mlxsw_sp_counter_pool_fini(mlxsw_sp);
   3190err_counter_pool_init:
   3191	mlxsw_sp_switchdev_fini(mlxsw_sp);
   3192err_switchdev_init:
   3193	mlxsw_sp_span_fini(mlxsw_sp);
   3194err_span_init:
   3195	mlxsw_sp_lag_fini(mlxsw_sp);
   3196err_lag_init:
   3197	mlxsw_sp_buffers_fini(mlxsw_sp);
   3198err_buffers_init:
   3199	mlxsw_sp_devlink_traps_fini(mlxsw_sp);
   3200err_devlink_traps_init:
   3201	mlxsw_sp_traps_fini(mlxsw_sp);
   3202err_traps_init:
   3203	mlxsw_sp_policers_fini(mlxsw_sp);
   3204err_policers_init:
   3205	mlxsw_sp_fids_fini(mlxsw_sp);
   3206err_fids_init:
   3207	mlxsw_sp_kvdl_fini(mlxsw_sp);
   3208	mlxsw_sp_parsing_fini(mlxsw_sp);
   3209	return err;
   3210}
   3211
   3212static int mlxsw_sp1_init(struct mlxsw_core *mlxsw_core,
   3213			  const struct mlxsw_bus_info *mlxsw_bus_info,
   3214			  struct netlink_ext_ack *extack)
   3215{
   3216	struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
   3217
   3218	mlxsw_sp->switchdev_ops = &mlxsw_sp1_switchdev_ops;
   3219	mlxsw_sp->kvdl_ops = &mlxsw_sp1_kvdl_ops;
   3220	mlxsw_sp->afa_ops = &mlxsw_sp1_act_afa_ops;
   3221	mlxsw_sp->afk_ops = &mlxsw_sp1_afk_ops;
   3222	mlxsw_sp->mr_tcam_ops = &mlxsw_sp1_mr_tcam_ops;
   3223	mlxsw_sp->acl_rulei_ops = &mlxsw_sp1_acl_rulei_ops;
   3224	mlxsw_sp->acl_tcam_ops = &mlxsw_sp1_acl_tcam_ops;
   3225	mlxsw_sp->nve_ops_arr = mlxsw_sp1_nve_ops_arr;
   3226	mlxsw_sp->mac_mask = mlxsw_sp1_mac_mask;
   3227	mlxsw_sp->sb_vals = &mlxsw_sp1_sb_vals;
   3228	mlxsw_sp->sb_ops = &mlxsw_sp1_sb_ops;
   3229	mlxsw_sp->port_type_speed_ops = &mlxsw_sp1_port_type_speed_ops;
   3230	mlxsw_sp->ptp_ops = &mlxsw_sp1_ptp_ops;
   3231	mlxsw_sp->span_ops = &mlxsw_sp1_span_ops;
   3232	mlxsw_sp->policer_core_ops = &mlxsw_sp1_policer_core_ops;
   3233	mlxsw_sp->trap_ops = &mlxsw_sp1_trap_ops;
   3234	mlxsw_sp->mall_ops = &mlxsw_sp1_mall_ops;
   3235	mlxsw_sp->router_ops = &mlxsw_sp1_router_ops;
   3236	mlxsw_sp->listeners = mlxsw_sp1_listener;
   3237	mlxsw_sp->listeners_count = ARRAY_SIZE(mlxsw_sp1_listener);
   3238	mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP1;
   3239
   3240	return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack);
   3241}
   3242
   3243static int mlxsw_sp2_init(struct mlxsw_core *mlxsw_core,
   3244			  const struct mlxsw_bus_info *mlxsw_bus_info,
   3245			  struct netlink_ext_ack *extack)
   3246{
   3247	struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
   3248
   3249	mlxsw_sp->switchdev_ops = &mlxsw_sp2_switchdev_ops;
   3250	mlxsw_sp->kvdl_ops = &mlxsw_sp2_kvdl_ops;
   3251	mlxsw_sp->afa_ops = &mlxsw_sp2_act_afa_ops;
   3252	mlxsw_sp->afk_ops = &mlxsw_sp2_afk_ops;
   3253	mlxsw_sp->mr_tcam_ops = &mlxsw_sp2_mr_tcam_ops;
   3254	mlxsw_sp->acl_rulei_ops = &mlxsw_sp2_acl_rulei_ops;
   3255	mlxsw_sp->acl_tcam_ops = &mlxsw_sp2_acl_tcam_ops;
   3256	mlxsw_sp->acl_bf_ops = &mlxsw_sp2_acl_bf_ops;
   3257	mlxsw_sp->nve_ops_arr = mlxsw_sp2_nve_ops_arr;
   3258	mlxsw_sp->mac_mask = mlxsw_sp2_mac_mask;
   3259	mlxsw_sp->sb_vals = &mlxsw_sp2_sb_vals;
   3260	mlxsw_sp->sb_ops = &mlxsw_sp2_sb_ops;
   3261	mlxsw_sp->port_type_speed_ops = &mlxsw_sp2_port_type_speed_ops;
   3262	mlxsw_sp->ptp_ops = &mlxsw_sp2_ptp_ops;
   3263	mlxsw_sp->span_ops = &mlxsw_sp2_span_ops;
   3264	mlxsw_sp->policer_core_ops = &mlxsw_sp2_policer_core_ops;
   3265	mlxsw_sp->trap_ops = &mlxsw_sp2_trap_ops;
   3266	mlxsw_sp->mall_ops = &mlxsw_sp2_mall_ops;
   3267	mlxsw_sp->router_ops = &mlxsw_sp2_router_ops;
   3268	mlxsw_sp->listeners = mlxsw_sp2_listener;
   3269	mlxsw_sp->listeners_count = ARRAY_SIZE(mlxsw_sp2_listener);
   3270	mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP2;
   3271
   3272	return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack);
   3273}
   3274
   3275static int mlxsw_sp3_init(struct mlxsw_core *mlxsw_core,
   3276			  const struct mlxsw_bus_info *mlxsw_bus_info,
   3277			  struct netlink_ext_ack *extack)
   3278{
   3279	struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
   3280
   3281	mlxsw_sp->switchdev_ops = &mlxsw_sp2_switchdev_ops;
   3282	mlxsw_sp->kvdl_ops = &mlxsw_sp2_kvdl_ops;
   3283	mlxsw_sp->afa_ops = &mlxsw_sp2_act_afa_ops;
   3284	mlxsw_sp->afk_ops = &mlxsw_sp2_afk_ops;
   3285	mlxsw_sp->mr_tcam_ops = &mlxsw_sp2_mr_tcam_ops;
   3286	mlxsw_sp->acl_rulei_ops = &mlxsw_sp2_acl_rulei_ops;
   3287	mlxsw_sp->acl_tcam_ops = &mlxsw_sp2_acl_tcam_ops;
   3288	mlxsw_sp->acl_bf_ops = &mlxsw_sp2_acl_bf_ops;
   3289	mlxsw_sp->nve_ops_arr = mlxsw_sp2_nve_ops_arr;
   3290	mlxsw_sp->mac_mask = mlxsw_sp2_mac_mask;
   3291	mlxsw_sp->sb_vals = &mlxsw_sp2_sb_vals;
   3292	mlxsw_sp->sb_ops = &mlxsw_sp3_sb_ops;
   3293	mlxsw_sp->port_type_speed_ops = &mlxsw_sp2_port_type_speed_ops;
   3294	mlxsw_sp->ptp_ops = &mlxsw_sp2_ptp_ops;
   3295	mlxsw_sp->span_ops = &mlxsw_sp3_span_ops;
   3296	mlxsw_sp->policer_core_ops = &mlxsw_sp2_policer_core_ops;
   3297	mlxsw_sp->trap_ops = &mlxsw_sp2_trap_ops;
   3298	mlxsw_sp->mall_ops = &mlxsw_sp2_mall_ops;
   3299	mlxsw_sp->router_ops = &mlxsw_sp2_router_ops;
   3300	mlxsw_sp->listeners = mlxsw_sp2_listener;
   3301	mlxsw_sp->listeners_count = ARRAY_SIZE(mlxsw_sp2_listener);
   3302	mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP3;
   3303
   3304	return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack);
   3305}
   3306
   3307static int mlxsw_sp4_init(struct mlxsw_core *mlxsw_core,
   3308			  const struct mlxsw_bus_info *mlxsw_bus_info,
   3309			  struct netlink_ext_ack *extack)
   3310{
   3311	struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
   3312
   3313	mlxsw_sp->switchdev_ops = &mlxsw_sp2_switchdev_ops;
   3314	mlxsw_sp->kvdl_ops = &mlxsw_sp2_kvdl_ops;
   3315	mlxsw_sp->afa_ops = &mlxsw_sp2_act_afa_ops;
   3316	mlxsw_sp->afk_ops = &mlxsw_sp4_afk_ops;
   3317	mlxsw_sp->mr_tcam_ops = &mlxsw_sp2_mr_tcam_ops;
   3318	mlxsw_sp->acl_rulei_ops = &mlxsw_sp2_acl_rulei_ops;
   3319	mlxsw_sp->acl_tcam_ops = &mlxsw_sp2_acl_tcam_ops;
   3320	mlxsw_sp->acl_bf_ops = &mlxsw_sp4_acl_bf_ops;
   3321	mlxsw_sp->nve_ops_arr = mlxsw_sp2_nve_ops_arr;
   3322	mlxsw_sp->mac_mask = mlxsw_sp2_mac_mask;
   3323	mlxsw_sp->sb_vals = &mlxsw_sp2_sb_vals;
   3324	mlxsw_sp->sb_ops = &mlxsw_sp3_sb_ops;
   3325	mlxsw_sp->port_type_speed_ops = &mlxsw_sp2_port_type_speed_ops;
   3326	mlxsw_sp->ptp_ops = &mlxsw_sp2_ptp_ops;
   3327	mlxsw_sp->span_ops = &mlxsw_sp3_span_ops;
   3328	mlxsw_sp->policer_core_ops = &mlxsw_sp2_policer_core_ops;
   3329	mlxsw_sp->trap_ops = &mlxsw_sp2_trap_ops;
   3330	mlxsw_sp->mall_ops = &mlxsw_sp2_mall_ops;
   3331	mlxsw_sp->router_ops = &mlxsw_sp2_router_ops;
   3332	mlxsw_sp->listeners = mlxsw_sp2_listener;
   3333	mlxsw_sp->listeners_count = ARRAY_SIZE(mlxsw_sp2_listener);
   3334	mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP4;
   3335
   3336	return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack);
   3337}
   3338
   3339static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core)
   3340{
   3341	struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
   3342
   3343	mlxsw_sp_ports_remove(mlxsw_sp);
   3344	rhashtable_destroy(&mlxsw_sp->sample_trigger_ht);
   3345	mlxsw_sp_port_module_info_fini(mlxsw_sp);
   3346	mlxsw_sp_dpipe_fini(mlxsw_sp);
   3347	unregister_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp),
   3348					  &mlxsw_sp->netdevice_nb);
   3349	if (mlxsw_sp->clock) {
   3350		mlxsw_sp->ptp_ops->fini(mlxsw_sp->ptp_state);
   3351		mlxsw_sp->ptp_ops->clock_fini(mlxsw_sp->clock);
   3352	}
   3353	mlxsw_sp_router_fini(mlxsw_sp);
   3354	mlxsw_sp_acl_fini(mlxsw_sp);
   3355	mlxsw_sp_nve_fini(mlxsw_sp);
   3356	mlxsw_sp_ipv6_addr_ht_fini(mlxsw_sp);
   3357	mlxsw_sp_afa_fini(mlxsw_sp);
   3358	mlxsw_sp_counter_pool_fini(mlxsw_sp);
   3359	mlxsw_sp_switchdev_fini(mlxsw_sp);
   3360	mlxsw_sp_span_fini(mlxsw_sp);
   3361	mlxsw_sp_lag_fini(mlxsw_sp);
   3362	mlxsw_sp_buffers_fini(mlxsw_sp);
   3363	mlxsw_sp_devlink_traps_fini(mlxsw_sp);
   3364	mlxsw_sp_traps_fini(mlxsw_sp);
   3365	mlxsw_sp_policers_fini(mlxsw_sp);
   3366	mlxsw_sp_fids_fini(mlxsw_sp);
   3367	mlxsw_sp_kvdl_fini(mlxsw_sp);
   3368	mlxsw_sp_parsing_fini(mlxsw_sp);
   3369}
   3370
   3371/* Per-FID flood tables are used for both "true" 802.1D FIDs and emulated
   3372 * 802.1Q FIDs
   3373 */
   3374#define MLXSW_SP_FID_FLOOD_TABLE_SIZE	(MLXSW_SP_FID_8021D_MAX + \
   3375					 VLAN_VID_MASK - 1)
   3376
   3377static const struct mlxsw_config_profile mlxsw_sp1_config_profile = {
   3378	.used_max_mid			= 1,
   3379	.max_mid			= MLXSW_SP_MID_MAX,
   3380	.used_flood_tables		= 1,
   3381	.used_flood_mode		= 1,
   3382	.flood_mode			= 3,
   3383	.max_fid_flood_tables		= 3,
   3384	.fid_flood_table_size		= MLXSW_SP_FID_FLOOD_TABLE_SIZE,
   3385	.used_max_ib_mc			= 1,
   3386	.max_ib_mc			= 0,
   3387	.used_max_pkey			= 1,
   3388	.max_pkey			= 0,
   3389	.used_kvd_sizes			= 1,
   3390	.kvd_hash_single_parts		= 59,
   3391	.kvd_hash_double_parts		= 41,
   3392	.kvd_linear_size		= MLXSW_SP_KVD_LINEAR_SIZE,
   3393	.swid_config			= {
   3394		{
   3395			.used_type	= 1,
   3396			.type		= MLXSW_PORT_SWID_TYPE_ETH,
   3397		}
   3398	},
   3399};
   3400
   3401static const struct mlxsw_config_profile mlxsw_sp2_config_profile = {
   3402	.used_max_mid			= 1,
   3403	.max_mid			= MLXSW_SP_MID_MAX,
   3404	.used_flood_tables		= 1,
   3405	.used_flood_mode		= 1,
   3406	.flood_mode			= 3,
   3407	.max_fid_flood_tables		= 3,
   3408	.fid_flood_table_size		= MLXSW_SP_FID_FLOOD_TABLE_SIZE,
   3409	.used_max_ib_mc			= 1,
   3410	.max_ib_mc			= 0,
   3411	.used_max_pkey			= 1,
   3412	.max_pkey			= 0,
   3413	.used_kvh_xlt_cache_mode	= 1,
   3414	.kvh_xlt_cache_mode		= 1,
   3415	.swid_config			= {
   3416		{
   3417			.used_type	= 1,
   3418			.type		= MLXSW_PORT_SWID_TYPE_ETH,
   3419		}
   3420	},
   3421};
   3422
   3423static void
   3424mlxsw_sp_resource_size_params_prepare(struct mlxsw_core *mlxsw_core,
   3425				      struct devlink_resource_size_params *kvd_size_params,
   3426				      struct devlink_resource_size_params *linear_size_params,
   3427				      struct devlink_resource_size_params *hash_double_size_params,
   3428				      struct devlink_resource_size_params *hash_single_size_params)
   3429{
   3430	u32 single_size_min = MLXSW_CORE_RES_GET(mlxsw_core,
   3431						 KVD_SINGLE_MIN_SIZE);
   3432	u32 double_size_min = MLXSW_CORE_RES_GET(mlxsw_core,
   3433						 KVD_DOUBLE_MIN_SIZE);
   3434	u32 kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE);
   3435	u32 linear_size_min = 0;
   3436
   3437	devlink_resource_size_params_init(kvd_size_params, kvd_size, kvd_size,
   3438					  MLXSW_SP_KVD_GRANULARITY,
   3439					  DEVLINK_RESOURCE_UNIT_ENTRY);
   3440	devlink_resource_size_params_init(linear_size_params, linear_size_min,
   3441					  kvd_size - single_size_min -
   3442					  double_size_min,
   3443					  MLXSW_SP_KVD_GRANULARITY,
   3444					  DEVLINK_RESOURCE_UNIT_ENTRY);
   3445	devlink_resource_size_params_init(hash_double_size_params,
   3446					  double_size_min,
   3447					  kvd_size - single_size_min -
   3448					  linear_size_min,
   3449					  MLXSW_SP_KVD_GRANULARITY,
   3450					  DEVLINK_RESOURCE_UNIT_ENTRY);
   3451	devlink_resource_size_params_init(hash_single_size_params,
   3452					  single_size_min,
   3453					  kvd_size - double_size_min -
   3454					  linear_size_min,
   3455					  MLXSW_SP_KVD_GRANULARITY,
   3456					  DEVLINK_RESOURCE_UNIT_ENTRY);
   3457}
   3458
   3459static int mlxsw_sp1_resources_kvd_register(struct mlxsw_core *mlxsw_core)
   3460{
   3461	struct devlink *devlink = priv_to_devlink(mlxsw_core);
   3462	struct devlink_resource_size_params hash_single_size_params;
   3463	struct devlink_resource_size_params hash_double_size_params;
   3464	struct devlink_resource_size_params linear_size_params;
   3465	struct devlink_resource_size_params kvd_size_params;
   3466	u32 kvd_size, single_size, double_size, linear_size;
   3467	const struct mlxsw_config_profile *profile;
   3468	int err;
   3469
   3470	profile = &mlxsw_sp1_config_profile;
   3471	if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SIZE))
   3472		return -EIO;
   3473
   3474	mlxsw_sp_resource_size_params_prepare(mlxsw_core, &kvd_size_params,
   3475					      &linear_size_params,
   3476					      &hash_double_size_params,
   3477					      &hash_single_size_params);
   3478
   3479	kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE);
   3480	err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD,
   3481					kvd_size, MLXSW_SP_RESOURCE_KVD,
   3482					DEVLINK_RESOURCE_ID_PARENT_TOP,
   3483					&kvd_size_params);
   3484	if (err)
   3485		return err;
   3486
   3487	linear_size = profile->kvd_linear_size;
   3488	err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_LINEAR,
   3489					linear_size,
   3490					MLXSW_SP_RESOURCE_KVD_LINEAR,
   3491					MLXSW_SP_RESOURCE_KVD,
   3492					&linear_size_params);
   3493	if (err)
   3494		return err;
   3495
   3496	err = mlxsw_sp1_kvdl_resources_register(mlxsw_core);
   3497	if  (err)
   3498		return err;
   3499
   3500	double_size = kvd_size - linear_size;
   3501	double_size *= profile->kvd_hash_double_parts;
   3502	double_size /= profile->kvd_hash_double_parts +
   3503		       profile->kvd_hash_single_parts;
   3504	double_size = rounddown(double_size, MLXSW_SP_KVD_GRANULARITY);
   3505	err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_HASH_DOUBLE,
   3506					double_size,
   3507					MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE,
   3508					MLXSW_SP_RESOURCE_KVD,
   3509					&hash_double_size_params);
   3510	if (err)
   3511		return err;
   3512
   3513	single_size = kvd_size - double_size - linear_size;
   3514	err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_HASH_SINGLE,
   3515					single_size,
   3516					MLXSW_SP_RESOURCE_KVD_HASH_SINGLE,
   3517					MLXSW_SP_RESOURCE_KVD,
   3518					&hash_single_size_params);
   3519	if (err)
   3520		return err;
   3521
   3522	return 0;
   3523}
   3524
   3525static int mlxsw_sp2_resources_kvd_register(struct mlxsw_core *mlxsw_core)
   3526{
   3527	struct devlink *devlink = priv_to_devlink(mlxsw_core);
   3528	struct devlink_resource_size_params kvd_size_params;
   3529	u32 kvd_size;
   3530
   3531	if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SIZE))
   3532		return -EIO;
   3533
   3534	kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE);
   3535	devlink_resource_size_params_init(&kvd_size_params, kvd_size, kvd_size,
   3536					  MLXSW_SP_KVD_GRANULARITY,
   3537					  DEVLINK_RESOURCE_UNIT_ENTRY);
   3538
   3539	return devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD,
   3540					 kvd_size, MLXSW_SP_RESOURCE_KVD,
   3541					 DEVLINK_RESOURCE_ID_PARENT_TOP,
   3542					 &kvd_size_params);
   3543}
   3544
   3545static int mlxsw_sp_resources_span_register(struct mlxsw_core *mlxsw_core)
   3546{
   3547	struct devlink *devlink = priv_to_devlink(mlxsw_core);
   3548	struct devlink_resource_size_params span_size_params;
   3549	u32 max_span;
   3550
   3551	if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_SPAN))
   3552		return -EIO;
   3553
   3554	max_span = MLXSW_CORE_RES_GET(mlxsw_core, MAX_SPAN);
   3555	devlink_resource_size_params_init(&span_size_params, max_span, max_span,
   3556					  1, DEVLINK_RESOURCE_UNIT_ENTRY);
   3557
   3558	return devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_SPAN,
   3559					 max_span, MLXSW_SP_RESOURCE_SPAN,
   3560					 DEVLINK_RESOURCE_ID_PARENT_TOP,
   3561					 &span_size_params);
   3562}
   3563
   3564static int
   3565mlxsw_sp_resources_rif_mac_profile_register(struct mlxsw_core *mlxsw_core)
   3566{
   3567	struct devlink *devlink = priv_to_devlink(mlxsw_core);
   3568	struct devlink_resource_size_params size_params;
   3569	u8 max_rif_mac_profiles;
   3570
   3571	if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_RIF_MAC_PROFILES))
   3572		max_rif_mac_profiles = 1;
   3573	else
   3574		max_rif_mac_profiles = MLXSW_CORE_RES_GET(mlxsw_core,
   3575							  MAX_RIF_MAC_PROFILES);
   3576	devlink_resource_size_params_init(&size_params, max_rif_mac_profiles,
   3577					  max_rif_mac_profiles, 1,
   3578					  DEVLINK_RESOURCE_UNIT_ENTRY);
   3579
   3580	return devlink_resource_register(devlink,
   3581					 "rif_mac_profiles",
   3582					 max_rif_mac_profiles,
   3583					 MLXSW_SP_RESOURCE_RIF_MAC_PROFILES,
   3584					 DEVLINK_RESOURCE_ID_PARENT_TOP,
   3585					 &size_params);
   3586}
   3587
   3588static int mlxsw_sp1_resources_register(struct mlxsw_core *mlxsw_core)
   3589{
   3590	int err;
   3591
   3592	err = mlxsw_sp1_resources_kvd_register(mlxsw_core);
   3593	if (err)
   3594		return err;
   3595
   3596	err = mlxsw_sp_resources_span_register(mlxsw_core);
   3597	if (err)
   3598		goto err_resources_span_register;
   3599
   3600	err = mlxsw_sp_counter_resources_register(mlxsw_core);
   3601	if (err)
   3602		goto err_resources_counter_register;
   3603
   3604	err = mlxsw_sp_policer_resources_register(mlxsw_core);
   3605	if (err)
   3606		goto err_policer_resources_register;
   3607
   3608	err = mlxsw_sp_resources_rif_mac_profile_register(mlxsw_core);
   3609	if (err)
   3610		goto err_resources_rif_mac_profile_register;
   3611
   3612	return 0;
   3613
   3614err_resources_rif_mac_profile_register:
   3615err_policer_resources_register:
   3616err_resources_counter_register:
   3617err_resources_span_register:
   3618	devlink_resources_unregister(priv_to_devlink(mlxsw_core));
   3619	return err;
   3620}
   3621
   3622static int mlxsw_sp2_resources_register(struct mlxsw_core *mlxsw_core)
   3623{
   3624	int err;
   3625
   3626	err = mlxsw_sp2_resources_kvd_register(mlxsw_core);
   3627	if (err)
   3628		return err;
   3629
   3630	err = mlxsw_sp_resources_span_register(mlxsw_core);
   3631	if (err)
   3632		goto err_resources_span_register;
   3633
   3634	err = mlxsw_sp_counter_resources_register(mlxsw_core);
   3635	if (err)
   3636		goto err_resources_counter_register;
   3637
   3638	err = mlxsw_sp_policer_resources_register(mlxsw_core);
   3639	if (err)
   3640		goto err_policer_resources_register;
   3641
   3642	err = mlxsw_sp_resources_rif_mac_profile_register(mlxsw_core);
   3643	if (err)
   3644		goto err_resources_rif_mac_profile_register;
   3645
   3646	return 0;
   3647
   3648err_resources_rif_mac_profile_register:
   3649err_policer_resources_register:
   3650err_resources_counter_register:
   3651err_resources_span_register:
   3652	devlink_resources_unregister(priv_to_devlink(mlxsw_core));
   3653	return err;
   3654}
   3655
   3656static int mlxsw_sp_kvd_sizes_get(struct mlxsw_core *mlxsw_core,
   3657				  const struct mlxsw_config_profile *profile,
   3658				  u64 *p_single_size, u64 *p_double_size,
   3659				  u64 *p_linear_size)
   3660{
   3661	struct devlink *devlink = priv_to_devlink(mlxsw_core);
   3662	u32 double_size;
   3663	int err;
   3664
   3665	if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SINGLE_MIN_SIZE) ||
   3666	    !MLXSW_CORE_RES_VALID(mlxsw_core, KVD_DOUBLE_MIN_SIZE))
   3667		return -EIO;
   3668
   3669	/* The hash part is what left of the kvd without the
   3670	 * linear part. It is split to the single size and
   3671	 * double size by the parts ratio from the profile.
   3672	 * Both sizes must be a multiplications of the
   3673	 * granularity from the profile. In case the user
   3674	 * provided the sizes they are obtained via devlink.
   3675	 */
   3676	err = devlink_resource_size_get(devlink,
   3677					MLXSW_SP_RESOURCE_KVD_LINEAR,
   3678					p_linear_size);
   3679	if (err)
   3680		*p_linear_size = profile->kvd_linear_size;
   3681
   3682	err = devlink_resource_size_get(devlink,
   3683					MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE,
   3684					p_double_size);
   3685	if (err) {
   3686		double_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) -
   3687			      *p_linear_size;
   3688		double_size *= profile->kvd_hash_double_parts;
   3689		double_size /= profile->kvd_hash_double_parts +
   3690			       profile->kvd_hash_single_parts;
   3691		*p_double_size = rounddown(double_size,
   3692					   MLXSW_SP_KVD_GRANULARITY);
   3693	}
   3694
   3695	err = devlink_resource_size_get(devlink,
   3696					MLXSW_SP_RESOURCE_KVD_HASH_SINGLE,
   3697					p_single_size);
   3698	if (err)
   3699		*p_single_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) -
   3700				 *p_double_size - *p_linear_size;
   3701
   3702	/* Check results are legal. */
   3703	if (*p_single_size < MLXSW_CORE_RES_GET(mlxsw_core, KVD_SINGLE_MIN_SIZE) ||
   3704	    *p_double_size < MLXSW_CORE_RES_GET(mlxsw_core, KVD_DOUBLE_MIN_SIZE) ||
   3705	    MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) < *p_linear_size)
   3706		return -EIO;
   3707
   3708	return 0;
   3709}
   3710
   3711static int
   3712mlxsw_sp_params_acl_region_rehash_intrvl_get(struct devlink *devlink, u32 id,
   3713					     struct devlink_param_gset_ctx *ctx)
   3714{
   3715	struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
   3716	struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
   3717
   3718	ctx->val.vu32 = mlxsw_sp_acl_region_rehash_intrvl_get(mlxsw_sp);
   3719	return 0;
   3720}
   3721
   3722static int
   3723mlxsw_sp_params_acl_region_rehash_intrvl_set(struct devlink *devlink, u32 id,
   3724					     struct devlink_param_gset_ctx *ctx)
   3725{
   3726	struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
   3727	struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
   3728
   3729	return mlxsw_sp_acl_region_rehash_intrvl_set(mlxsw_sp, ctx->val.vu32);
   3730}
   3731
   3732static const struct devlink_param mlxsw_sp2_devlink_params[] = {
   3733	DEVLINK_PARAM_DRIVER(MLXSW_DEVLINK_PARAM_ID_ACL_REGION_REHASH_INTERVAL,
   3734			     "acl_region_rehash_interval",
   3735			     DEVLINK_PARAM_TYPE_U32,
   3736			     BIT(DEVLINK_PARAM_CMODE_RUNTIME),
   3737			     mlxsw_sp_params_acl_region_rehash_intrvl_get,
   3738			     mlxsw_sp_params_acl_region_rehash_intrvl_set,
   3739			     NULL),
   3740};
   3741
   3742static int mlxsw_sp2_params_register(struct mlxsw_core *mlxsw_core)
   3743{
   3744	struct devlink *devlink = priv_to_devlink(mlxsw_core);
   3745	union devlink_param_value value;
   3746	int err;
   3747
   3748	err = devlink_params_register(devlink, mlxsw_sp2_devlink_params,
   3749				      ARRAY_SIZE(mlxsw_sp2_devlink_params));
   3750	if (err)
   3751		return err;
   3752
   3753	value.vu32 = 0;
   3754	devlink_param_driverinit_value_set(devlink,
   3755					   MLXSW_DEVLINK_PARAM_ID_ACL_REGION_REHASH_INTERVAL,
   3756					   value);
   3757	return 0;
   3758}
   3759
   3760static void mlxsw_sp2_params_unregister(struct mlxsw_core *mlxsw_core)
   3761{
   3762	devlink_params_unregister(priv_to_devlink(mlxsw_core),
   3763				  mlxsw_sp2_devlink_params,
   3764				  ARRAY_SIZE(mlxsw_sp2_devlink_params));
   3765}
   3766
   3767static void mlxsw_sp_ptp_transmitted(struct mlxsw_core *mlxsw_core,
   3768				     struct sk_buff *skb, u16 local_port)
   3769{
   3770	struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
   3771
   3772	skb_pull(skb, MLXSW_TXHDR_LEN);
   3773	mlxsw_sp->ptp_ops->transmitted(mlxsw_sp, skb, local_port);
   3774}
   3775
   3776static struct mlxsw_driver mlxsw_sp1_driver = {
   3777	.kind				= mlxsw_sp1_driver_name,
   3778	.priv_size			= sizeof(struct mlxsw_sp),
   3779	.fw_req_rev			= &mlxsw_sp1_fw_rev,
   3780	.fw_filename			= MLXSW_SP1_FW_FILENAME,
   3781	.init				= mlxsw_sp1_init,
   3782	.fini				= mlxsw_sp_fini,
   3783	.port_split			= mlxsw_sp_port_split,
   3784	.port_unsplit			= mlxsw_sp_port_unsplit,
   3785	.sb_pool_get			= mlxsw_sp_sb_pool_get,
   3786	.sb_pool_set			= mlxsw_sp_sb_pool_set,
   3787	.sb_port_pool_get		= mlxsw_sp_sb_port_pool_get,
   3788	.sb_port_pool_set		= mlxsw_sp_sb_port_pool_set,
   3789	.sb_tc_pool_bind_get		= mlxsw_sp_sb_tc_pool_bind_get,
   3790	.sb_tc_pool_bind_set		= mlxsw_sp_sb_tc_pool_bind_set,
   3791	.sb_occ_snapshot		= mlxsw_sp_sb_occ_snapshot,
   3792	.sb_occ_max_clear		= mlxsw_sp_sb_occ_max_clear,
   3793	.sb_occ_port_pool_get		= mlxsw_sp_sb_occ_port_pool_get,
   3794	.sb_occ_tc_port_bind_get	= mlxsw_sp_sb_occ_tc_port_bind_get,
   3795	.trap_init			= mlxsw_sp_trap_init,
   3796	.trap_fini			= mlxsw_sp_trap_fini,
   3797	.trap_action_set		= mlxsw_sp_trap_action_set,
   3798	.trap_group_init		= mlxsw_sp_trap_group_init,
   3799	.trap_group_set			= mlxsw_sp_trap_group_set,
   3800	.trap_policer_init		= mlxsw_sp_trap_policer_init,
   3801	.trap_policer_fini		= mlxsw_sp_trap_policer_fini,
   3802	.trap_policer_set		= mlxsw_sp_trap_policer_set,
   3803	.trap_policer_counter_get	= mlxsw_sp_trap_policer_counter_get,
   3804	.txhdr_construct		= mlxsw_sp_txhdr_construct,
   3805	.resources_register		= mlxsw_sp1_resources_register,
   3806	.kvd_sizes_get			= mlxsw_sp_kvd_sizes_get,
   3807	.ptp_transmitted		= mlxsw_sp_ptp_transmitted,
   3808	.txhdr_len			= MLXSW_TXHDR_LEN,
   3809	.profile			= &mlxsw_sp1_config_profile,
   3810};
   3811
   3812static struct mlxsw_driver mlxsw_sp2_driver = {
   3813	.kind				= mlxsw_sp2_driver_name,
   3814	.priv_size			= sizeof(struct mlxsw_sp),
   3815	.fw_req_rev			= &mlxsw_sp2_fw_rev,
   3816	.fw_filename			= MLXSW_SP2_FW_FILENAME,
   3817	.init				= mlxsw_sp2_init,
   3818	.fini				= mlxsw_sp_fini,
   3819	.port_split			= mlxsw_sp_port_split,
   3820	.port_unsplit			= mlxsw_sp_port_unsplit,
   3821	.ports_remove_selected		= mlxsw_sp_ports_remove_selected,
   3822	.sb_pool_get			= mlxsw_sp_sb_pool_get,
   3823	.sb_pool_set			= mlxsw_sp_sb_pool_set,
   3824	.sb_port_pool_get		= mlxsw_sp_sb_port_pool_get,
   3825	.sb_port_pool_set		= mlxsw_sp_sb_port_pool_set,
   3826	.sb_tc_pool_bind_get		= mlxsw_sp_sb_tc_pool_bind_get,
   3827	.sb_tc_pool_bind_set		= mlxsw_sp_sb_tc_pool_bind_set,
   3828	.sb_occ_snapshot		= mlxsw_sp_sb_occ_snapshot,
   3829	.sb_occ_max_clear		= mlxsw_sp_sb_occ_max_clear,
   3830	.sb_occ_port_pool_get		= mlxsw_sp_sb_occ_port_pool_get,
   3831	.sb_occ_tc_port_bind_get	= mlxsw_sp_sb_occ_tc_port_bind_get,
   3832	.trap_init			= mlxsw_sp_trap_init,
   3833	.trap_fini			= mlxsw_sp_trap_fini,
   3834	.trap_action_set		= mlxsw_sp_trap_action_set,
   3835	.trap_group_init		= mlxsw_sp_trap_group_init,
   3836	.trap_group_set			= mlxsw_sp_trap_group_set,
   3837	.trap_policer_init		= mlxsw_sp_trap_policer_init,
   3838	.trap_policer_fini		= mlxsw_sp_trap_policer_fini,
   3839	.trap_policer_set		= mlxsw_sp_trap_policer_set,
   3840	.trap_policer_counter_get	= mlxsw_sp_trap_policer_counter_get,
   3841	.txhdr_construct		= mlxsw_sp_txhdr_construct,
   3842	.resources_register		= mlxsw_sp2_resources_register,
   3843	.params_register		= mlxsw_sp2_params_register,
   3844	.params_unregister		= mlxsw_sp2_params_unregister,
   3845	.ptp_transmitted		= mlxsw_sp_ptp_transmitted,
   3846	.txhdr_len			= MLXSW_TXHDR_LEN,
   3847	.profile			= &mlxsw_sp2_config_profile,
   3848};
   3849
   3850static struct mlxsw_driver mlxsw_sp3_driver = {
   3851	.kind				= mlxsw_sp3_driver_name,
   3852	.priv_size			= sizeof(struct mlxsw_sp),
   3853	.fw_req_rev			= &mlxsw_sp3_fw_rev,
   3854	.fw_filename			= MLXSW_SP3_FW_FILENAME,
   3855	.init				= mlxsw_sp3_init,
   3856	.fini				= mlxsw_sp_fini,
   3857	.port_split			= mlxsw_sp_port_split,
   3858	.port_unsplit			= mlxsw_sp_port_unsplit,
   3859	.ports_remove_selected		= mlxsw_sp_ports_remove_selected,
   3860	.sb_pool_get			= mlxsw_sp_sb_pool_get,
   3861	.sb_pool_set			= mlxsw_sp_sb_pool_set,
   3862	.sb_port_pool_get		= mlxsw_sp_sb_port_pool_get,
   3863	.sb_port_pool_set		= mlxsw_sp_sb_port_pool_set,
   3864	.sb_tc_pool_bind_get		= mlxsw_sp_sb_tc_pool_bind_get,
   3865	.sb_tc_pool_bind_set		= mlxsw_sp_sb_tc_pool_bind_set,
   3866	.sb_occ_snapshot		= mlxsw_sp_sb_occ_snapshot,
   3867	.sb_occ_max_clear		= mlxsw_sp_sb_occ_max_clear,
   3868	.sb_occ_port_pool_get		= mlxsw_sp_sb_occ_port_pool_get,
   3869	.sb_occ_tc_port_bind_get	= mlxsw_sp_sb_occ_tc_port_bind_get,
   3870	.trap_init			= mlxsw_sp_trap_init,
   3871	.trap_fini			= mlxsw_sp_trap_fini,
   3872	.trap_action_set		= mlxsw_sp_trap_action_set,
   3873	.trap_group_init		= mlxsw_sp_trap_group_init,
   3874	.trap_group_set			= mlxsw_sp_trap_group_set,
   3875	.trap_policer_init		= mlxsw_sp_trap_policer_init,
   3876	.trap_policer_fini		= mlxsw_sp_trap_policer_fini,
   3877	.trap_policer_set		= mlxsw_sp_trap_policer_set,
   3878	.trap_policer_counter_get	= mlxsw_sp_trap_policer_counter_get,
   3879	.txhdr_construct		= mlxsw_sp_txhdr_construct,
   3880	.resources_register		= mlxsw_sp2_resources_register,
   3881	.params_register		= mlxsw_sp2_params_register,
   3882	.params_unregister		= mlxsw_sp2_params_unregister,
   3883	.ptp_transmitted		= mlxsw_sp_ptp_transmitted,
   3884	.txhdr_len			= MLXSW_TXHDR_LEN,
   3885	.profile			= &mlxsw_sp2_config_profile,
   3886};
   3887
   3888static struct mlxsw_driver mlxsw_sp4_driver = {
   3889	.kind				= mlxsw_sp4_driver_name,
   3890	.priv_size			= sizeof(struct mlxsw_sp),
   3891	.init				= mlxsw_sp4_init,
   3892	.fini				= mlxsw_sp_fini,
   3893	.port_split			= mlxsw_sp_port_split,
   3894	.port_unsplit			= mlxsw_sp_port_unsplit,
   3895	.ports_remove_selected		= mlxsw_sp_ports_remove_selected,
   3896	.sb_pool_get			= mlxsw_sp_sb_pool_get,
   3897	.sb_pool_set			= mlxsw_sp_sb_pool_set,
   3898	.sb_port_pool_get		= mlxsw_sp_sb_port_pool_get,
   3899	.sb_port_pool_set		= mlxsw_sp_sb_port_pool_set,
   3900	.sb_tc_pool_bind_get		= mlxsw_sp_sb_tc_pool_bind_get,
   3901	.sb_tc_pool_bind_set		= mlxsw_sp_sb_tc_pool_bind_set,
   3902	.sb_occ_snapshot		= mlxsw_sp_sb_occ_snapshot,
   3903	.sb_occ_max_clear		= mlxsw_sp_sb_occ_max_clear,
   3904	.sb_occ_port_pool_get		= mlxsw_sp_sb_occ_port_pool_get,
   3905	.sb_occ_tc_port_bind_get	= mlxsw_sp_sb_occ_tc_port_bind_get,
   3906	.trap_init			= mlxsw_sp_trap_init,
   3907	.trap_fini			= mlxsw_sp_trap_fini,
   3908	.trap_action_set		= mlxsw_sp_trap_action_set,
   3909	.trap_group_init		= mlxsw_sp_trap_group_init,
   3910	.trap_group_set			= mlxsw_sp_trap_group_set,
   3911	.trap_policer_init		= mlxsw_sp_trap_policer_init,
   3912	.trap_policer_fini		= mlxsw_sp_trap_policer_fini,
   3913	.trap_policer_set		= mlxsw_sp_trap_policer_set,
   3914	.trap_policer_counter_get	= mlxsw_sp_trap_policer_counter_get,
   3915	.txhdr_construct		= mlxsw_sp_txhdr_construct,
   3916	.resources_register		= mlxsw_sp2_resources_register,
   3917	.params_register		= mlxsw_sp2_params_register,
   3918	.params_unregister		= mlxsw_sp2_params_unregister,
   3919	.ptp_transmitted		= mlxsw_sp_ptp_transmitted,
   3920	.txhdr_len			= MLXSW_TXHDR_LEN,
   3921	.profile			= &mlxsw_sp2_config_profile,
   3922};
   3923
   3924bool mlxsw_sp_port_dev_check(const struct net_device *dev)
   3925{
   3926	return dev->netdev_ops == &mlxsw_sp_port_netdev_ops;
   3927}
   3928
   3929static int mlxsw_sp_lower_dev_walk(struct net_device *lower_dev,
   3930				   struct netdev_nested_priv *priv)
   3931{
   3932	int ret = 0;
   3933
   3934	if (mlxsw_sp_port_dev_check(lower_dev)) {
   3935		priv->data = (void *)netdev_priv(lower_dev);
   3936		ret = 1;
   3937	}
   3938
   3939	return ret;
   3940}
   3941
   3942struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find(struct net_device *dev)
   3943{
   3944	struct netdev_nested_priv priv = {
   3945		.data = NULL,
   3946	};
   3947
   3948	if (mlxsw_sp_port_dev_check(dev))
   3949		return netdev_priv(dev);
   3950
   3951	netdev_walk_all_lower_dev(dev, mlxsw_sp_lower_dev_walk, &priv);
   3952
   3953	return (struct mlxsw_sp_port *)priv.data;
   3954}
   3955
   3956struct mlxsw_sp *mlxsw_sp_lower_get(struct net_device *dev)
   3957{
   3958	struct mlxsw_sp_port *mlxsw_sp_port;
   3959
   3960	mlxsw_sp_port = mlxsw_sp_port_dev_lower_find(dev);
   3961	return mlxsw_sp_port ? mlxsw_sp_port->mlxsw_sp : NULL;
   3962}
   3963
   3964struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find_rcu(struct net_device *dev)
   3965{
   3966	struct netdev_nested_priv priv = {
   3967		.data = NULL,
   3968	};
   3969
   3970	if (mlxsw_sp_port_dev_check(dev))
   3971		return netdev_priv(dev);
   3972
   3973	netdev_walk_all_lower_dev_rcu(dev, mlxsw_sp_lower_dev_walk,
   3974				      &priv);
   3975
   3976	return (struct mlxsw_sp_port *)priv.data;
   3977}
   3978
   3979struct mlxsw_sp_port *mlxsw_sp_port_lower_dev_hold(struct net_device *dev)
   3980{
   3981	struct mlxsw_sp_port *mlxsw_sp_port;
   3982
   3983	rcu_read_lock();
   3984	mlxsw_sp_port = mlxsw_sp_port_dev_lower_find_rcu(dev);
   3985	if (mlxsw_sp_port)
   3986		dev_hold(mlxsw_sp_port->dev);
   3987	rcu_read_unlock();
   3988	return mlxsw_sp_port;
   3989}
   3990
   3991void mlxsw_sp_port_dev_put(struct mlxsw_sp_port *mlxsw_sp_port)
   3992{
   3993	dev_put(mlxsw_sp_port->dev);
   3994}
   3995
   3996int mlxsw_sp_parsing_depth_inc(struct mlxsw_sp *mlxsw_sp)
   3997{
   3998	char mprs_pl[MLXSW_REG_MPRS_LEN];
   3999	int err = 0;
   4000
   4001	mutex_lock(&mlxsw_sp->parsing.lock);
   4002
   4003	if (refcount_inc_not_zero(&mlxsw_sp->parsing.parsing_depth_ref))
   4004		goto out_unlock;
   4005
   4006	mlxsw_reg_mprs_pack(mprs_pl, MLXSW_SP_INCREASED_PARSING_DEPTH,
   4007			    mlxsw_sp->parsing.vxlan_udp_dport);
   4008	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mprs), mprs_pl);
   4009	if (err)
   4010		goto out_unlock;
   4011
   4012	mlxsw_sp->parsing.parsing_depth = MLXSW_SP_INCREASED_PARSING_DEPTH;
   4013	refcount_set(&mlxsw_sp->parsing.parsing_depth_ref, 1);
   4014
   4015out_unlock:
   4016	mutex_unlock(&mlxsw_sp->parsing.lock);
   4017	return err;
   4018}
   4019
   4020void mlxsw_sp_parsing_depth_dec(struct mlxsw_sp *mlxsw_sp)
   4021{
   4022	char mprs_pl[MLXSW_REG_MPRS_LEN];
   4023
   4024	mutex_lock(&mlxsw_sp->parsing.lock);
   4025
   4026	if (!refcount_dec_and_test(&mlxsw_sp->parsing.parsing_depth_ref))
   4027		goto out_unlock;
   4028
   4029	mlxsw_reg_mprs_pack(mprs_pl, MLXSW_SP_DEFAULT_PARSING_DEPTH,
   4030			    mlxsw_sp->parsing.vxlan_udp_dport);
   4031	mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mprs), mprs_pl);
   4032	mlxsw_sp->parsing.parsing_depth = MLXSW_SP_DEFAULT_PARSING_DEPTH;
   4033
   4034out_unlock:
   4035	mutex_unlock(&mlxsw_sp->parsing.lock);
   4036}
   4037
   4038int mlxsw_sp_parsing_vxlan_udp_dport_set(struct mlxsw_sp *mlxsw_sp,
   4039					 __be16 udp_dport)
   4040{
   4041	char mprs_pl[MLXSW_REG_MPRS_LEN];
   4042	int err;
   4043
   4044	mutex_lock(&mlxsw_sp->parsing.lock);
   4045
   4046	mlxsw_reg_mprs_pack(mprs_pl, mlxsw_sp->parsing.parsing_depth,
   4047			    be16_to_cpu(udp_dport));
   4048	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mprs), mprs_pl);
   4049	if (err)
   4050		goto out_unlock;
   4051
   4052	mlxsw_sp->parsing.vxlan_udp_dport = be16_to_cpu(udp_dport);
   4053
   4054out_unlock:
   4055	mutex_unlock(&mlxsw_sp->parsing.lock);
   4056	return err;
   4057}
   4058
   4059static void
   4060mlxsw_sp_port_lag_uppers_cleanup(struct mlxsw_sp_port *mlxsw_sp_port,
   4061				 struct net_device *lag_dev)
   4062{
   4063	struct net_device *br_dev = netdev_master_upper_dev_get(lag_dev);
   4064	struct net_device *upper_dev;
   4065	struct list_head *iter;
   4066
   4067	if (netif_is_bridge_port(lag_dev))
   4068		mlxsw_sp_port_bridge_leave(mlxsw_sp_port, lag_dev, br_dev);
   4069
   4070	netdev_for_each_upper_dev_rcu(lag_dev, upper_dev, iter) {
   4071		if (!netif_is_bridge_port(upper_dev))
   4072			continue;
   4073		br_dev = netdev_master_upper_dev_get(upper_dev);
   4074		mlxsw_sp_port_bridge_leave(mlxsw_sp_port, upper_dev, br_dev);
   4075	}
   4076}
   4077
   4078static int mlxsw_sp_lag_create(struct mlxsw_sp *mlxsw_sp, u16 lag_id)
   4079{
   4080	char sldr_pl[MLXSW_REG_SLDR_LEN];
   4081
   4082	mlxsw_reg_sldr_lag_create_pack(sldr_pl, lag_id);
   4083	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
   4084}
   4085
   4086static int mlxsw_sp_lag_destroy(struct mlxsw_sp *mlxsw_sp, u16 lag_id)
   4087{
   4088	char sldr_pl[MLXSW_REG_SLDR_LEN];
   4089
   4090	mlxsw_reg_sldr_lag_destroy_pack(sldr_pl, lag_id);
   4091	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
   4092}
   4093
   4094static int mlxsw_sp_lag_col_port_add(struct mlxsw_sp_port *mlxsw_sp_port,
   4095				     u16 lag_id, u8 port_index)
   4096{
   4097	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
   4098	char slcor_pl[MLXSW_REG_SLCOR_LEN];
   4099
   4100	mlxsw_reg_slcor_port_add_pack(slcor_pl, mlxsw_sp_port->local_port,
   4101				      lag_id, port_index);
   4102	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
   4103}
   4104
   4105static int mlxsw_sp_lag_col_port_remove(struct mlxsw_sp_port *mlxsw_sp_port,
   4106					u16 lag_id)
   4107{
   4108	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
   4109	char slcor_pl[MLXSW_REG_SLCOR_LEN];
   4110
   4111	mlxsw_reg_slcor_port_remove_pack(slcor_pl, mlxsw_sp_port->local_port,
   4112					 lag_id);
   4113	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
   4114}
   4115
   4116static int mlxsw_sp_lag_col_port_enable(struct mlxsw_sp_port *mlxsw_sp_port,
   4117					u16 lag_id)
   4118{
   4119	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
   4120	char slcor_pl[MLXSW_REG_SLCOR_LEN];
   4121
   4122	mlxsw_reg_slcor_col_enable_pack(slcor_pl, mlxsw_sp_port->local_port,
   4123					lag_id);
   4124	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
   4125}
   4126
   4127static int mlxsw_sp_lag_col_port_disable(struct mlxsw_sp_port *mlxsw_sp_port,
   4128					 u16 lag_id)
   4129{
   4130	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
   4131	char slcor_pl[MLXSW_REG_SLCOR_LEN];
   4132
   4133	mlxsw_reg_slcor_col_disable_pack(slcor_pl, mlxsw_sp_port->local_port,
   4134					 lag_id);
   4135	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
   4136}
   4137
   4138static int mlxsw_sp_lag_index_get(struct mlxsw_sp *mlxsw_sp,
   4139				  struct net_device *lag_dev,
   4140				  u16 *p_lag_id)
   4141{
   4142	struct mlxsw_sp_upper *lag;
   4143	int free_lag_id = -1;
   4144	u64 max_lag;
   4145	int i;
   4146
   4147	max_lag = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LAG);
   4148	for (i = 0; i < max_lag; i++) {
   4149		lag = mlxsw_sp_lag_get(mlxsw_sp, i);
   4150		if (lag->ref_count) {
   4151			if (lag->dev == lag_dev) {
   4152				*p_lag_id = i;
   4153				return 0;
   4154			}
   4155		} else if (free_lag_id < 0) {
   4156			free_lag_id = i;
   4157		}
   4158	}
   4159	if (free_lag_id < 0)
   4160		return -EBUSY;
   4161	*p_lag_id = free_lag_id;
   4162	return 0;
   4163}
   4164
   4165static bool
   4166mlxsw_sp_master_lag_check(struct mlxsw_sp *mlxsw_sp,
   4167			  struct net_device *lag_dev,
   4168			  struct netdev_lag_upper_info *lag_upper_info,
   4169			  struct netlink_ext_ack *extack)
   4170{
   4171	u16 lag_id;
   4172
   4173	if (mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id) != 0) {
   4174		NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported LAG devices");
   4175		return false;
   4176	}
   4177	if (lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_HASH) {
   4178		NL_SET_ERR_MSG_MOD(extack, "LAG device using unsupported Tx type");
   4179		return false;
   4180	}
   4181	return true;
   4182}
   4183
   4184static int mlxsw_sp_port_lag_index_get(struct mlxsw_sp *mlxsw_sp,
   4185				       u16 lag_id, u8 *p_port_index)
   4186{
   4187	u64 max_lag_members;
   4188	int i;
   4189
   4190	max_lag_members = MLXSW_CORE_RES_GET(mlxsw_sp->core,
   4191					     MAX_LAG_MEMBERS);
   4192	for (i = 0; i < max_lag_members; i++) {
   4193		if (!mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i)) {
   4194			*p_port_index = i;
   4195			return 0;
   4196		}
   4197	}
   4198	return -EBUSY;
   4199}
   4200
   4201static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port,
   4202				  struct net_device *lag_dev,
   4203				  struct netlink_ext_ack *extack)
   4204{
   4205	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
   4206	struct mlxsw_sp_upper *lag;
   4207	u16 lag_id;
   4208	u8 port_index;
   4209	int err;
   4210
   4211	err = mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id);
   4212	if (err)
   4213		return err;
   4214	lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id);
   4215	if (!lag->ref_count) {
   4216		err = mlxsw_sp_lag_create(mlxsw_sp, lag_id);
   4217		if (err)
   4218			return err;
   4219		lag->dev = lag_dev;
   4220	}
   4221
   4222	err = mlxsw_sp_port_lag_index_get(mlxsw_sp, lag_id, &port_index);
   4223	if (err)
   4224		return err;
   4225	err = mlxsw_sp_lag_col_port_add(mlxsw_sp_port, lag_id, port_index);
   4226	if (err)
   4227		goto err_col_port_add;
   4228
   4229	mlxsw_core_lag_mapping_set(mlxsw_sp->core, lag_id, port_index,
   4230				   mlxsw_sp_port->local_port);
   4231	mlxsw_sp_port->lag_id = lag_id;
   4232	mlxsw_sp_port->lagged = 1;
   4233	lag->ref_count++;
   4234
   4235	/* Port is no longer usable as a router interface */
   4236	if (mlxsw_sp_port->default_vlan->fid)
   4237		mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port->default_vlan);
   4238
   4239	/* Join a router interface configured on the LAG, if exists */
   4240	err = mlxsw_sp_port_vlan_router_join(mlxsw_sp_port->default_vlan,
   4241					     lag_dev, extack);
   4242	if (err)
   4243		goto err_router_join;
   4244
   4245	return 0;
   4246
   4247err_router_join:
   4248	lag->ref_count--;
   4249	mlxsw_sp_port->lagged = 0;
   4250	mlxsw_core_lag_mapping_clear(mlxsw_sp->core, lag_id,
   4251				     mlxsw_sp_port->local_port);
   4252	mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id);
   4253err_col_port_add:
   4254	if (!lag->ref_count)
   4255		mlxsw_sp_lag_destroy(mlxsw_sp, lag_id);
   4256	return err;
   4257}
   4258
   4259static void mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port,
   4260				    struct net_device *lag_dev)
   4261{
   4262	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
   4263	u16 lag_id = mlxsw_sp_port->lag_id;
   4264	struct mlxsw_sp_upper *lag;
   4265
   4266	if (!mlxsw_sp_port->lagged)
   4267		return;
   4268	lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id);
   4269	WARN_ON(lag->ref_count == 0);
   4270
   4271	mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id);
   4272
   4273	/* Any VLANs configured on the port are no longer valid */
   4274	mlxsw_sp_port_vlan_flush(mlxsw_sp_port, false);
   4275	mlxsw_sp_port_vlan_cleanup(mlxsw_sp_port->default_vlan);
   4276	/* Make the LAG and its directly linked uppers leave bridges they
   4277	 * are memeber in
   4278	 */
   4279	mlxsw_sp_port_lag_uppers_cleanup(mlxsw_sp_port, lag_dev);
   4280
   4281	if (lag->ref_count == 1)
   4282		mlxsw_sp_lag_destroy(mlxsw_sp, lag_id);
   4283
   4284	mlxsw_core_lag_mapping_clear(mlxsw_sp->core, lag_id,
   4285				     mlxsw_sp_port->local_port);
   4286	mlxsw_sp_port->lagged = 0;
   4287	lag->ref_count--;
   4288
   4289	/* Make sure untagged frames are allowed to ingress */
   4290	mlxsw_sp_port_pvid_set(mlxsw_sp_port, MLXSW_SP_DEFAULT_VID,
   4291			       ETH_P_8021Q);
   4292}
   4293
   4294static int mlxsw_sp_lag_dist_port_add(struct mlxsw_sp_port *mlxsw_sp_port,
   4295				      u16 lag_id)
   4296{
   4297	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
   4298	char sldr_pl[MLXSW_REG_SLDR_LEN];
   4299
   4300	mlxsw_reg_sldr_lag_add_port_pack(sldr_pl, lag_id,
   4301					 mlxsw_sp_port->local_port);
   4302	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
   4303}
   4304
   4305static int mlxsw_sp_lag_dist_port_remove(struct mlxsw_sp_port *mlxsw_sp_port,
   4306					 u16 lag_id)
   4307{
   4308	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
   4309	char sldr_pl[MLXSW_REG_SLDR_LEN];
   4310
   4311	mlxsw_reg_sldr_lag_remove_port_pack(sldr_pl, lag_id,
   4312					    mlxsw_sp_port->local_port);
   4313	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
   4314}
   4315
   4316static int
   4317mlxsw_sp_port_lag_col_dist_enable(struct mlxsw_sp_port *mlxsw_sp_port)
   4318{
   4319	int err;
   4320
   4321	err = mlxsw_sp_lag_col_port_enable(mlxsw_sp_port,
   4322					   mlxsw_sp_port->lag_id);
   4323	if (err)
   4324		return err;
   4325
   4326	err = mlxsw_sp_lag_dist_port_add(mlxsw_sp_port, mlxsw_sp_port->lag_id);
   4327	if (err)
   4328		goto err_dist_port_add;
   4329
   4330	return 0;
   4331
   4332err_dist_port_add:
   4333	mlxsw_sp_lag_col_port_disable(mlxsw_sp_port, mlxsw_sp_port->lag_id);
   4334	return err;
   4335}
   4336
   4337static int
   4338mlxsw_sp_port_lag_col_dist_disable(struct mlxsw_sp_port *mlxsw_sp_port)
   4339{
   4340	int err;
   4341
   4342	err = mlxsw_sp_lag_dist_port_remove(mlxsw_sp_port,
   4343					    mlxsw_sp_port->lag_id);
   4344	if (err)
   4345		return err;
   4346
   4347	err = mlxsw_sp_lag_col_port_disable(mlxsw_sp_port,
   4348					    mlxsw_sp_port->lag_id);
   4349	if (err)
   4350		goto err_col_port_disable;
   4351
   4352	return 0;
   4353
   4354err_col_port_disable:
   4355	mlxsw_sp_lag_dist_port_add(mlxsw_sp_port, mlxsw_sp_port->lag_id);
   4356	return err;
   4357}
   4358
   4359static int mlxsw_sp_port_lag_changed(struct mlxsw_sp_port *mlxsw_sp_port,
   4360				     struct netdev_lag_lower_state_info *info)
   4361{
   4362	if (info->tx_enabled)
   4363		return mlxsw_sp_port_lag_col_dist_enable(mlxsw_sp_port);
   4364	else
   4365		return mlxsw_sp_port_lag_col_dist_disable(mlxsw_sp_port);
   4366}
   4367
   4368static int mlxsw_sp_port_stp_set(struct mlxsw_sp_port *mlxsw_sp_port,
   4369				 bool enable)
   4370{
   4371	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
   4372	enum mlxsw_reg_spms_state spms_state;
   4373	char *spms_pl;
   4374	u16 vid;
   4375	int err;
   4376
   4377	spms_state = enable ? MLXSW_REG_SPMS_STATE_FORWARDING :
   4378			      MLXSW_REG_SPMS_STATE_DISCARDING;
   4379
   4380	spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL);
   4381	if (!spms_pl)
   4382		return -ENOMEM;
   4383	mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port);
   4384
   4385	for (vid = 0; vid < VLAN_N_VID; vid++)
   4386		mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state);
   4387
   4388	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl);
   4389	kfree(spms_pl);
   4390	return err;
   4391}
   4392
   4393static int mlxsw_sp_port_ovs_join(struct mlxsw_sp_port *mlxsw_sp_port)
   4394{
   4395	u16 vid = 1;
   4396	int err;
   4397
   4398	err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, true);
   4399	if (err)
   4400		return err;
   4401	err = mlxsw_sp_port_stp_set(mlxsw_sp_port, true);
   4402	if (err)
   4403		goto err_port_stp_set;
   4404	err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, 1, VLAN_N_VID - 2,
   4405				     true, false);
   4406	if (err)
   4407		goto err_port_vlan_set;
   4408
   4409	for (; vid <= VLAN_N_VID - 1; vid++) {
   4410		err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port,
   4411						     vid, false);
   4412		if (err)
   4413			goto err_vid_learning_set;
   4414	}
   4415
   4416	return 0;
   4417
   4418err_vid_learning_set:
   4419	for (vid--; vid >= 1; vid--)
   4420		mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true);
   4421err_port_vlan_set:
   4422	mlxsw_sp_port_stp_set(mlxsw_sp_port, false);
   4423err_port_stp_set:
   4424	mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false);
   4425	return err;
   4426}
   4427
   4428static void mlxsw_sp_port_ovs_leave(struct mlxsw_sp_port *mlxsw_sp_port)
   4429{
   4430	u16 vid;
   4431
   4432	for (vid = VLAN_N_VID - 1; vid >= 1; vid--)
   4433		mlxsw_sp_port_vid_learning_set(mlxsw_sp_port,
   4434					       vid, true);
   4435
   4436	mlxsw_sp_port_vlan_set(mlxsw_sp_port, 1, VLAN_N_VID - 2,
   4437			       false, false);
   4438	mlxsw_sp_port_stp_set(mlxsw_sp_port, false);
   4439	mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false);
   4440}
   4441
   4442static bool mlxsw_sp_bridge_has_multiple_vxlans(struct net_device *br_dev)
   4443{
   4444	unsigned int num_vxlans = 0;
   4445	struct net_device *dev;
   4446	struct list_head *iter;
   4447
   4448	netdev_for_each_lower_dev(br_dev, dev, iter) {
   4449		if (netif_is_vxlan(dev))
   4450			num_vxlans++;
   4451	}
   4452
   4453	return num_vxlans > 1;
   4454}
   4455
   4456static bool mlxsw_sp_bridge_vxlan_vlan_is_valid(struct net_device *br_dev)
   4457{
   4458	DECLARE_BITMAP(vlans, VLAN_N_VID) = {0};
   4459	struct net_device *dev;
   4460	struct list_head *iter;
   4461
   4462	netdev_for_each_lower_dev(br_dev, dev, iter) {
   4463		u16 pvid;
   4464		int err;
   4465
   4466		if (!netif_is_vxlan(dev))
   4467			continue;
   4468
   4469		err = mlxsw_sp_vxlan_mapped_vid(dev, &pvid);
   4470		if (err || !pvid)
   4471			continue;
   4472
   4473		if (test_and_set_bit(pvid, vlans))
   4474			return false;
   4475	}
   4476
   4477	return true;
   4478}
   4479
   4480static bool mlxsw_sp_bridge_vxlan_is_valid(struct net_device *br_dev,
   4481					   struct netlink_ext_ack *extack)
   4482{
   4483	if (br_multicast_enabled(br_dev)) {
   4484		NL_SET_ERR_MSG_MOD(extack, "Multicast can not be enabled on a bridge with a VxLAN device");
   4485		return false;
   4486	}
   4487
   4488	if (!br_vlan_enabled(br_dev) &&
   4489	    mlxsw_sp_bridge_has_multiple_vxlans(br_dev)) {
   4490		NL_SET_ERR_MSG_MOD(extack, "Multiple VxLAN devices are not supported in a VLAN-unaware bridge");
   4491		return false;
   4492	}
   4493
   4494	if (br_vlan_enabled(br_dev) &&
   4495	    !mlxsw_sp_bridge_vxlan_vlan_is_valid(br_dev)) {
   4496		NL_SET_ERR_MSG_MOD(extack, "Multiple VxLAN devices cannot have the same VLAN as PVID and egress untagged");
   4497		return false;
   4498	}
   4499
   4500	return true;
   4501}
   4502
   4503static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev,
   4504					       struct net_device *dev,
   4505					       unsigned long event, void *ptr)
   4506{
   4507	struct netdev_notifier_changeupper_info *info;
   4508	struct mlxsw_sp_port *mlxsw_sp_port;
   4509	struct netlink_ext_ack *extack;
   4510	struct net_device *upper_dev;
   4511	struct mlxsw_sp *mlxsw_sp;
   4512	int err = 0;
   4513	u16 proto;
   4514
   4515	mlxsw_sp_port = netdev_priv(dev);
   4516	mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
   4517	info = ptr;
   4518	extack = netdev_notifier_info_to_extack(&info->info);
   4519
   4520	switch (event) {
   4521	case NETDEV_PRECHANGEUPPER:
   4522		upper_dev = info->upper_dev;
   4523		if (!is_vlan_dev(upper_dev) &&
   4524		    !netif_is_lag_master(upper_dev) &&
   4525		    !netif_is_bridge_master(upper_dev) &&
   4526		    !netif_is_ovs_master(upper_dev) &&
   4527		    !netif_is_macvlan(upper_dev) &&
   4528		    !netif_is_l3_master(upper_dev)) {
   4529			NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type");
   4530			return -EINVAL;
   4531		}
   4532		if (!info->linking)
   4533			break;
   4534		if (netif_is_bridge_master(upper_dev) &&
   4535		    !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, upper_dev) &&
   4536		    mlxsw_sp_bridge_has_vxlan(upper_dev) &&
   4537		    !mlxsw_sp_bridge_vxlan_is_valid(upper_dev, extack))
   4538			return -EOPNOTSUPP;
   4539		if (netdev_has_any_upper_dev(upper_dev) &&
   4540		    (!netif_is_bridge_master(upper_dev) ||
   4541		     !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp,
   4542							  upper_dev))) {
   4543			NL_SET_ERR_MSG_MOD(extack, "Enslaving a port to a device that already has an upper device is not supported");
   4544			return -EINVAL;
   4545		}
   4546		if (netif_is_lag_master(upper_dev) &&
   4547		    !mlxsw_sp_master_lag_check(mlxsw_sp, upper_dev,
   4548					       info->upper_info, extack))
   4549			return -EINVAL;
   4550		if (netif_is_lag_master(upper_dev) && vlan_uses_dev(dev)) {
   4551			NL_SET_ERR_MSG_MOD(extack, "Master device is a LAG master and this device has a VLAN");
   4552			return -EINVAL;
   4553		}
   4554		if (netif_is_lag_port(dev) && is_vlan_dev(upper_dev) &&
   4555		    !netif_is_lag_master(vlan_dev_real_dev(upper_dev))) {
   4556			NL_SET_ERR_MSG_MOD(extack, "Can not put a VLAN on a LAG port");
   4557			return -EINVAL;
   4558		}
   4559		if (netif_is_macvlan(upper_dev) &&
   4560		    !mlxsw_sp_rif_exists(mlxsw_sp, lower_dev)) {
   4561			NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces");
   4562			return -EOPNOTSUPP;
   4563		}
   4564		if (netif_is_ovs_master(upper_dev) && vlan_uses_dev(dev)) {
   4565			NL_SET_ERR_MSG_MOD(extack, "Master device is an OVS master and this device has a VLAN");
   4566			return -EINVAL;
   4567		}
   4568		if (netif_is_ovs_port(dev) && is_vlan_dev(upper_dev)) {
   4569			NL_SET_ERR_MSG_MOD(extack, "Can not put a VLAN on an OVS port");
   4570			return -EINVAL;
   4571		}
   4572		if (netif_is_bridge_master(upper_dev)) {
   4573			br_vlan_get_proto(upper_dev, &proto);
   4574			if (br_vlan_enabled(upper_dev) &&
   4575			    proto != ETH_P_8021Q && proto != ETH_P_8021AD) {
   4576				NL_SET_ERR_MSG_MOD(extack, "Enslaving a port to a bridge with unknown VLAN protocol is not supported");
   4577				return -EOPNOTSUPP;
   4578			}
   4579			if (vlan_uses_dev(lower_dev) &&
   4580			    br_vlan_enabled(upper_dev) &&
   4581			    proto == ETH_P_8021AD) {
   4582				NL_SET_ERR_MSG_MOD(extack, "Enslaving a port that already has a VLAN upper to an 802.1ad bridge is not supported");
   4583				return -EOPNOTSUPP;
   4584			}
   4585		}
   4586		if (netif_is_bridge_port(lower_dev) && is_vlan_dev(upper_dev)) {
   4587			struct net_device *br_dev = netdev_master_upper_dev_get(lower_dev);
   4588
   4589			if (br_vlan_enabled(br_dev)) {
   4590				br_vlan_get_proto(br_dev, &proto);
   4591				if (proto == ETH_P_8021AD) {
   4592					NL_SET_ERR_MSG_MOD(extack, "VLAN uppers are not supported on a port enslaved to an 802.1ad bridge");
   4593					return -EOPNOTSUPP;
   4594				}
   4595			}
   4596		}
   4597		if (is_vlan_dev(upper_dev) &&
   4598		    ntohs(vlan_dev_vlan_proto(upper_dev)) != ETH_P_8021Q) {
   4599			NL_SET_ERR_MSG_MOD(extack, "VLAN uppers are only supported with 802.1q VLAN protocol");
   4600			return -EOPNOTSUPP;
   4601		}
   4602		break;
   4603	case NETDEV_CHANGEUPPER:
   4604		upper_dev = info->upper_dev;
   4605		if (netif_is_bridge_master(upper_dev)) {
   4606			if (info->linking)
   4607				err = mlxsw_sp_port_bridge_join(mlxsw_sp_port,
   4608								lower_dev,
   4609								upper_dev,
   4610								extack);
   4611			else
   4612				mlxsw_sp_port_bridge_leave(mlxsw_sp_port,
   4613							   lower_dev,
   4614							   upper_dev);
   4615		} else if (netif_is_lag_master(upper_dev)) {
   4616			if (info->linking) {
   4617				err = mlxsw_sp_port_lag_join(mlxsw_sp_port,
   4618							     upper_dev, extack);
   4619			} else {
   4620				mlxsw_sp_port_lag_col_dist_disable(mlxsw_sp_port);
   4621				mlxsw_sp_port_lag_leave(mlxsw_sp_port,
   4622							upper_dev);
   4623			}
   4624		} else if (netif_is_ovs_master(upper_dev)) {
   4625			if (info->linking)
   4626				err = mlxsw_sp_port_ovs_join(mlxsw_sp_port);
   4627			else
   4628				mlxsw_sp_port_ovs_leave(mlxsw_sp_port);
   4629		} else if (netif_is_macvlan(upper_dev)) {
   4630			if (!info->linking)
   4631				mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev);
   4632		} else if (is_vlan_dev(upper_dev)) {
   4633			struct net_device *br_dev;
   4634
   4635			if (!netif_is_bridge_port(upper_dev))
   4636				break;
   4637			if (info->linking)
   4638				break;
   4639			br_dev = netdev_master_upper_dev_get(upper_dev);
   4640			mlxsw_sp_port_bridge_leave(mlxsw_sp_port, upper_dev,
   4641						   br_dev);
   4642		}
   4643		break;
   4644	}
   4645
   4646	return err;
   4647}
   4648
   4649static int mlxsw_sp_netdevice_port_lower_event(struct net_device *dev,
   4650					       unsigned long event, void *ptr)
   4651{
   4652	struct netdev_notifier_changelowerstate_info *info;
   4653	struct mlxsw_sp_port *mlxsw_sp_port;
   4654	int err;
   4655
   4656	mlxsw_sp_port = netdev_priv(dev);
   4657	info = ptr;
   4658
   4659	switch (event) {
   4660	case NETDEV_CHANGELOWERSTATE:
   4661		if (netif_is_lag_port(dev) && mlxsw_sp_port->lagged) {
   4662			err = mlxsw_sp_port_lag_changed(mlxsw_sp_port,
   4663							info->lower_state_info);
   4664			if (err)
   4665				netdev_err(dev, "Failed to reflect link aggregation lower state change\n");
   4666		}
   4667		break;
   4668	}
   4669
   4670	return 0;
   4671}
   4672
   4673static int mlxsw_sp_netdevice_port_event(struct net_device *lower_dev,
   4674					 struct net_device *port_dev,
   4675					 unsigned long event, void *ptr)
   4676{
   4677	switch (event) {
   4678	case NETDEV_PRECHANGEUPPER:
   4679	case NETDEV_CHANGEUPPER:
   4680		return mlxsw_sp_netdevice_port_upper_event(lower_dev, port_dev,
   4681							   event, ptr);
   4682	case NETDEV_CHANGELOWERSTATE:
   4683		return mlxsw_sp_netdevice_port_lower_event(port_dev, event,
   4684							   ptr);
   4685	}
   4686
   4687	return 0;
   4688}
   4689
   4690static int mlxsw_sp_netdevice_lag_event(struct net_device *lag_dev,
   4691					unsigned long event, void *ptr)
   4692{
   4693	struct net_device *dev;
   4694	struct list_head *iter;
   4695	int ret;
   4696
   4697	netdev_for_each_lower_dev(lag_dev, dev, iter) {
   4698		if (mlxsw_sp_port_dev_check(dev)) {
   4699			ret = mlxsw_sp_netdevice_port_event(lag_dev, dev, event,
   4700							    ptr);
   4701			if (ret)
   4702				return ret;
   4703		}
   4704	}
   4705
   4706	return 0;
   4707}
   4708
   4709static int mlxsw_sp_netdevice_port_vlan_event(struct net_device *vlan_dev,
   4710					      struct net_device *dev,
   4711					      unsigned long event, void *ptr,
   4712					      u16 vid)
   4713{
   4714	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
   4715	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
   4716	struct netdev_notifier_changeupper_info *info = ptr;
   4717	struct netlink_ext_ack *extack;
   4718	struct net_device *upper_dev;
   4719	int err = 0;
   4720
   4721	extack = netdev_notifier_info_to_extack(&info->info);
   4722
   4723	switch (event) {
   4724	case NETDEV_PRECHANGEUPPER:
   4725		upper_dev = info->upper_dev;
   4726		if (!netif_is_bridge_master(upper_dev) &&
   4727		    !netif_is_macvlan(upper_dev) &&
   4728		    !netif_is_l3_master(upper_dev)) {
   4729			NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type");
   4730			return -EINVAL;
   4731		}
   4732		if (!info->linking)
   4733			break;
   4734		if (netif_is_bridge_master(upper_dev) &&
   4735		    !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, upper_dev) &&
   4736		    mlxsw_sp_bridge_has_vxlan(upper_dev) &&
   4737		    !mlxsw_sp_bridge_vxlan_is_valid(upper_dev, extack))
   4738			return -EOPNOTSUPP;
   4739		if (netdev_has_any_upper_dev(upper_dev) &&
   4740		    (!netif_is_bridge_master(upper_dev) ||
   4741		     !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp,
   4742							  upper_dev))) {
   4743			NL_SET_ERR_MSG_MOD(extack, "Enslaving a port to a device that already has an upper device is not supported");
   4744			return -EINVAL;
   4745		}
   4746		if (netif_is_macvlan(upper_dev) &&
   4747		    !mlxsw_sp_rif_exists(mlxsw_sp, vlan_dev)) {
   4748			NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces");
   4749			return -EOPNOTSUPP;
   4750		}
   4751		break;
   4752	case NETDEV_CHANGEUPPER:
   4753		upper_dev = info->upper_dev;
   4754		if (netif_is_bridge_master(upper_dev)) {
   4755			if (info->linking)
   4756				err = mlxsw_sp_port_bridge_join(mlxsw_sp_port,
   4757								vlan_dev,
   4758								upper_dev,
   4759								extack);
   4760			else
   4761				mlxsw_sp_port_bridge_leave(mlxsw_sp_port,
   4762							   vlan_dev,
   4763							   upper_dev);
   4764		} else if (netif_is_macvlan(upper_dev)) {
   4765			if (!info->linking)
   4766				mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev);
   4767		}
   4768		break;
   4769	}
   4770
   4771	return err;
   4772}
   4773
   4774static int mlxsw_sp_netdevice_lag_port_vlan_event(struct net_device *vlan_dev,
   4775						  struct net_device *lag_dev,
   4776						  unsigned long event,
   4777						  void *ptr, u16 vid)
   4778{
   4779	struct net_device *dev;
   4780	struct list_head *iter;
   4781	int ret;
   4782
   4783	netdev_for_each_lower_dev(lag_dev, dev, iter) {
   4784		if (mlxsw_sp_port_dev_check(dev)) {
   4785			ret = mlxsw_sp_netdevice_port_vlan_event(vlan_dev, dev,
   4786								 event, ptr,
   4787								 vid);
   4788			if (ret)
   4789				return ret;
   4790		}
   4791	}
   4792
   4793	return 0;
   4794}
   4795
   4796static int mlxsw_sp_netdevice_bridge_vlan_event(struct net_device *vlan_dev,
   4797						struct net_device *br_dev,
   4798						unsigned long event, void *ptr,
   4799						u16 vid)
   4800{
   4801	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(vlan_dev);
   4802	struct netdev_notifier_changeupper_info *info = ptr;
   4803	struct netlink_ext_ack *extack;
   4804	struct net_device *upper_dev;
   4805
   4806	if (!mlxsw_sp)
   4807		return 0;
   4808
   4809	extack = netdev_notifier_info_to_extack(&info->info);
   4810
   4811	switch (event) {
   4812	case NETDEV_PRECHANGEUPPER:
   4813		upper_dev = info->upper_dev;
   4814		if (!netif_is_macvlan(upper_dev) &&
   4815		    !netif_is_l3_master(upper_dev)) {
   4816			NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type");
   4817			return -EOPNOTSUPP;
   4818		}
   4819		if (!info->linking)
   4820			break;
   4821		if (netif_is_macvlan(upper_dev) &&
   4822		    !mlxsw_sp_rif_exists(mlxsw_sp, vlan_dev)) {
   4823			NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces");
   4824			return -EOPNOTSUPP;
   4825		}
   4826		break;
   4827	case NETDEV_CHANGEUPPER:
   4828		upper_dev = info->upper_dev;
   4829		if (info->linking)
   4830			break;
   4831		if (netif_is_macvlan(upper_dev))
   4832			mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev);
   4833		break;
   4834	}
   4835
   4836	return 0;
   4837}
   4838
   4839static int mlxsw_sp_netdevice_vlan_event(struct net_device *vlan_dev,
   4840					 unsigned long event, void *ptr)
   4841{
   4842	struct net_device *real_dev = vlan_dev_real_dev(vlan_dev);
   4843	u16 vid = vlan_dev_vlan_id(vlan_dev);
   4844
   4845	if (mlxsw_sp_port_dev_check(real_dev))
   4846		return mlxsw_sp_netdevice_port_vlan_event(vlan_dev, real_dev,
   4847							  event, ptr, vid);
   4848	else if (netif_is_lag_master(real_dev))
   4849		return mlxsw_sp_netdevice_lag_port_vlan_event(vlan_dev,
   4850							      real_dev, event,
   4851							      ptr, vid);
   4852	else if (netif_is_bridge_master(real_dev))
   4853		return mlxsw_sp_netdevice_bridge_vlan_event(vlan_dev, real_dev,
   4854							    event, ptr, vid);
   4855
   4856	return 0;
   4857}
   4858
   4859static int mlxsw_sp_netdevice_bridge_event(struct net_device *br_dev,
   4860					   unsigned long event, void *ptr)
   4861{
   4862	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(br_dev);
   4863	struct netdev_notifier_changeupper_info *info = ptr;
   4864	struct netlink_ext_ack *extack;
   4865	struct net_device *upper_dev;
   4866	u16 proto;
   4867
   4868	if (!mlxsw_sp)
   4869		return 0;
   4870
   4871	extack = netdev_notifier_info_to_extack(&info->info);
   4872
   4873	switch (event) {
   4874	case NETDEV_PRECHANGEUPPER:
   4875		upper_dev = info->upper_dev;
   4876		if (!is_vlan_dev(upper_dev) &&
   4877		    !netif_is_macvlan(upper_dev) &&
   4878		    !netif_is_l3_master(upper_dev)) {
   4879			NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type");
   4880			return -EOPNOTSUPP;
   4881		}
   4882		if (!info->linking)
   4883			break;
   4884		if (br_vlan_enabled(br_dev)) {
   4885			br_vlan_get_proto(br_dev, &proto);
   4886			if (proto == ETH_P_8021AD) {
   4887				NL_SET_ERR_MSG_MOD(extack, "Upper devices are not supported on top of an 802.1ad bridge");
   4888				return -EOPNOTSUPP;
   4889			}
   4890		}
   4891		if (is_vlan_dev(upper_dev) &&
   4892		    ntohs(vlan_dev_vlan_proto(upper_dev)) != ETH_P_8021Q) {
   4893			NL_SET_ERR_MSG_MOD(extack, "VLAN uppers are only supported with 802.1q VLAN protocol");
   4894			return -EOPNOTSUPP;
   4895		}
   4896		if (netif_is_macvlan(upper_dev) &&
   4897		    !mlxsw_sp_rif_exists(mlxsw_sp, br_dev)) {
   4898			NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces");
   4899			return -EOPNOTSUPP;
   4900		}
   4901		break;
   4902	case NETDEV_CHANGEUPPER:
   4903		upper_dev = info->upper_dev;
   4904		if (info->linking)
   4905			break;
   4906		if (is_vlan_dev(upper_dev))
   4907			mlxsw_sp_rif_destroy_by_dev(mlxsw_sp, upper_dev);
   4908		if (netif_is_macvlan(upper_dev))
   4909			mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev);
   4910		break;
   4911	}
   4912
   4913	return 0;
   4914}
   4915
   4916static int mlxsw_sp_netdevice_macvlan_event(struct net_device *macvlan_dev,
   4917					    unsigned long event, void *ptr)
   4918{
   4919	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(macvlan_dev);
   4920	struct netdev_notifier_changeupper_info *info = ptr;
   4921	struct netlink_ext_ack *extack;
   4922	struct net_device *upper_dev;
   4923
   4924	if (!mlxsw_sp || event != NETDEV_PRECHANGEUPPER)
   4925		return 0;
   4926
   4927	extack = netdev_notifier_info_to_extack(&info->info);
   4928	upper_dev = info->upper_dev;
   4929
   4930	if (!netif_is_l3_master(upper_dev)) {
   4931		NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type");
   4932		return -EOPNOTSUPP;
   4933	}
   4934
   4935	return 0;
   4936}
   4937
   4938static int mlxsw_sp_netdevice_vxlan_event(struct mlxsw_sp *mlxsw_sp,
   4939					  struct net_device *dev,
   4940					  unsigned long event, void *ptr)
   4941{
   4942	struct netdev_notifier_changeupper_info *cu_info;
   4943	struct netdev_notifier_info *info = ptr;
   4944	struct netlink_ext_ack *extack;
   4945	struct net_device *upper_dev;
   4946
   4947	extack = netdev_notifier_info_to_extack(info);
   4948
   4949	switch (event) {
   4950	case NETDEV_CHANGEUPPER:
   4951		cu_info = container_of(info,
   4952				       struct netdev_notifier_changeupper_info,
   4953				       info);
   4954		upper_dev = cu_info->upper_dev;
   4955		if (!netif_is_bridge_master(upper_dev))
   4956			return 0;
   4957		if (!mlxsw_sp_lower_get(upper_dev))
   4958			return 0;
   4959		if (!mlxsw_sp_bridge_vxlan_is_valid(upper_dev, extack))
   4960			return -EOPNOTSUPP;
   4961		if (cu_info->linking) {
   4962			if (!netif_running(dev))
   4963				return 0;
   4964			/* When the bridge is VLAN-aware, the VNI of the VxLAN
   4965			 * device needs to be mapped to a VLAN, but at this
   4966			 * point no VLANs are configured on the VxLAN device
   4967			 */
   4968			if (br_vlan_enabled(upper_dev))
   4969				return 0;
   4970			return mlxsw_sp_bridge_vxlan_join(mlxsw_sp, upper_dev,
   4971							  dev, 0, extack);
   4972		} else {
   4973			/* VLANs were already flushed, which triggered the
   4974			 * necessary cleanup
   4975			 */
   4976			if (br_vlan_enabled(upper_dev))
   4977				return 0;
   4978			mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, dev);
   4979		}
   4980		break;
   4981	case NETDEV_PRE_UP:
   4982		upper_dev = netdev_master_upper_dev_get(dev);
   4983		if (!upper_dev)
   4984			return 0;
   4985		if (!netif_is_bridge_master(upper_dev))
   4986			return 0;
   4987		if (!mlxsw_sp_lower_get(upper_dev))
   4988			return 0;
   4989		return mlxsw_sp_bridge_vxlan_join(mlxsw_sp, upper_dev, dev, 0,
   4990						  extack);
   4991	case NETDEV_DOWN:
   4992		upper_dev = netdev_master_upper_dev_get(dev);
   4993		if (!upper_dev)
   4994			return 0;
   4995		if (!netif_is_bridge_master(upper_dev))
   4996			return 0;
   4997		if (!mlxsw_sp_lower_get(upper_dev))
   4998			return 0;
   4999		mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, dev);
   5000		break;
   5001	}
   5002
   5003	return 0;
   5004}
   5005
   5006static int mlxsw_sp_netdevice_event(struct notifier_block *nb,
   5007				    unsigned long event, void *ptr)
   5008{
   5009	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
   5010	struct mlxsw_sp_span_entry *span_entry;
   5011	struct mlxsw_sp *mlxsw_sp;
   5012	int err = 0;
   5013
   5014	mlxsw_sp = container_of(nb, struct mlxsw_sp, netdevice_nb);
   5015	if (event == NETDEV_UNREGISTER) {
   5016		span_entry = mlxsw_sp_span_entry_find_by_port(mlxsw_sp, dev);
   5017		if (span_entry)
   5018			mlxsw_sp_span_entry_invalidate(mlxsw_sp, span_entry);
   5019	}
   5020	mlxsw_sp_span_respin(mlxsw_sp);
   5021
   5022	if (netif_is_vxlan(dev))
   5023		err = mlxsw_sp_netdevice_vxlan_event(mlxsw_sp, dev, event, ptr);
   5024	else if (mlxsw_sp_port_dev_check(dev))
   5025		err = mlxsw_sp_netdevice_port_event(dev, dev, event, ptr);
   5026	else if (netif_is_lag_master(dev))
   5027		err = mlxsw_sp_netdevice_lag_event(dev, event, ptr);
   5028	else if (is_vlan_dev(dev))
   5029		err = mlxsw_sp_netdevice_vlan_event(dev, event, ptr);
   5030	else if (netif_is_bridge_master(dev))
   5031		err = mlxsw_sp_netdevice_bridge_event(dev, event, ptr);
   5032	else if (netif_is_macvlan(dev))
   5033		err = mlxsw_sp_netdevice_macvlan_event(dev, event, ptr);
   5034
   5035	return notifier_from_errno(err);
   5036}
   5037
   5038static struct notifier_block mlxsw_sp_inetaddr_valid_nb __read_mostly = {
   5039	.notifier_call = mlxsw_sp_inetaddr_valid_event,
   5040};
   5041
   5042static struct notifier_block mlxsw_sp_inet6addr_valid_nb __read_mostly = {
   5043	.notifier_call = mlxsw_sp_inet6addr_valid_event,
   5044};
   5045
   5046static const struct pci_device_id mlxsw_sp1_pci_id_table[] = {
   5047	{PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM), 0},
   5048	{0, },
   5049};
   5050
   5051static struct pci_driver mlxsw_sp1_pci_driver = {
   5052	.name = mlxsw_sp1_driver_name,
   5053	.id_table = mlxsw_sp1_pci_id_table,
   5054};
   5055
   5056static const struct pci_device_id mlxsw_sp2_pci_id_table[] = {
   5057	{PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM2), 0},
   5058	{0, },
   5059};
   5060
   5061static struct pci_driver mlxsw_sp2_pci_driver = {
   5062	.name = mlxsw_sp2_driver_name,
   5063	.id_table = mlxsw_sp2_pci_id_table,
   5064};
   5065
   5066static const struct pci_device_id mlxsw_sp3_pci_id_table[] = {
   5067	{PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM3), 0},
   5068	{0, },
   5069};
   5070
   5071static struct pci_driver mlxsw_sp3_pci_driver = {
   5072	.name = mlxsw_sp3_driver_name,
   5073	.id_table = mlxsw_sp3_pci_id_table,
   5074};
   5075
   5076static const struct pci_device_id mlxsw_sp4_pci_id_table[] = {
   5077	{PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM4), 0},
   5078	{0, },
   5079};
   5080
   5081static struct pci_driver mlxsw_sp4_pci_driver = {
   5082	.name = mlxsw_sp4_driver_name,
   5083	.id_table = mlxsw_sp4_pci_id_table,
   5084};
   5085
   5086static int __init mlxsw_sp_module_init(void)
   5087{
   5088	int err;
   5089
   5090	register_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb);
   5091	register_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb);
   5092
   5093	err = mlxsw_core_driver_register(&mlxsw_sp1_driver);
   5094	if (err)
   5095		goto err_sp1_core_driver_register;
   5096
   5097	err = mlxsw_core_driver_register(&mlxsw_sp2_driver);
   5098	if (err)
   5099		goto err_sp2_core_driver_register;
   5100
   5101	err = mlxsw_core_driver_register(&mlxsw_sp3_driver);
   5102	if (err)
   5103		goto err_sp3_core_driver_register;
   5104
   5105	err = mlxsw_core_driver_register(&mlxsw_sp4_driver);
   5106	if (err)
   5107		goto err_sp4_core_driver_register;
   5108
   5109	err = mlxsw_pci_driver_register(&mlxsw_sp1_pci_driver);
   5110	if (err)
   5111		goto err_sp1_pci_driver_register;
   5112
   5113	err = mlxsw_pci_driver_register(&mlxsw_sp2_pci_driver);
   5114	if (err)
   5115		goto err_sp2_pci_driver_register;
   5116
   5117	err = mlxsw_pci_driver_register(&mlxsw_sp3_pci_driver);
   5118	if (err)
   5119		goto err_sp3_pci_driver_register;
   5120
   5121	err = mlxsw_pci_driver_register(&mlxsw_sp4_pci_driver);
   5122	if (err)
   5123		goto err_sp4_pci_driver_register;
   5124
   5125	return 0;
   5126
   5127err_sp4_pci_driver_register:
   5128	mlxsw_pci_driver_unregister(&mlxsw_sp3_pci_driver);
   5129err_sp3_pci_driver_register:
   5130	mlxsw_pci_driver_unregister(&mlxsw_sp2_pci_driver);
   5131err_sp2_pci_driver_register:
   5132	mlxsw_pci_driver_unregister(&mlxsw_sp1_pci_driver);
   5133err_sp1_pci_driver_register:
   5134	mlxsw_core_driver_unregister(&mlxsw_sp4_driver);
   5135err_sp4_core_driver_register:
   5136	mlxsw_core_driver_unregister(&mlxsw_sp3_driver);
   5137err_sp3_core_driver_register:
   5138	mlxsw_core_driver_unregister(&mlxsw_sp2_driver);
   5139err_sp2_core_driver_register:
   5140	mlxsw_core_driver_unregister(&mlxsw_sp1_driver);
   5141err_sp1_core_driver_register:
   5142	unregister_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb);
   5143	unregister_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb);
   5144	return err;
   5145}
   5146
   5147static void __exit mlxsw_sp_module_exit(void)
   5148{
   5149	mlxsw_pci_driver_unregister(&mlxsw_sp4_pci_driver);
   5150	mlxsw_pci_driver_unregister(&mlxsw_sp3_pci_driver);
   5151	mlxsw_pci_driver_unregister(&mlxsw_sp2_pci_driver);
   5152	mlxsw_pci_driver_unregister(&mlxsw_sp1_pci_driver);
   5153	mlxsw_core_driver_unregister(&mlxsw_sp4_driver);
   5154	mlxsw_core_driver_unregister(&mlxsw_sp3_driver);
   5155	mlxsw_core_driver_unregister(&mlxsw_sp2_driver);
   5156	mlxsw_core_driver_unregister(&mlxsw_sp1_driver);
   5157	unregister_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb);
   5158	unregister_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb);
   5159}
   5160
   5161module_init(mlxsw_sp_module_init);
   5162module_exit(mlxsw_sp_module_exit);
   5163
   5164MODULE_LICENSE("Dual BSD/GPL");
   5165MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
   5166MODULE_DESCRIPTION("Mellanox Spectrum driver");
   5167MODULE_DEVICE_TABLE(pci, mlxsw_sp1_pci_id_table);
   5168MODULE_DEVICE_TABLE(pci, mlxsw_sp2_pci_id_table);
   5169MODULE_DEVICE_TABLE(pci, mlxsw_sp3_pci_id_table);
   5170MODULE_DEVICE_TABLE(pci, mlxsw_sp4_pci_id_table);
   5171MODULE_FIRMWARE(MLXSW_SP1_FW_FILENAME);
   5172MODULE_FIRMWARE(MLXSW_SP2_FW_FILENAME);
   5173MODULE_FIRMWARE(MLXSW_SP3_FW_FILENAME);
   5174MODULE_FIRMWARE(MLXSW_SP_LINECARDS_INI_BUNDLE_FILENAME);