cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

en_fs.c (36570B)


      1/*
      2 * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
      3 *
      4 * This software is available to you under a choice of one of two
      5 * licenses.  You may choose to be licensed under the terms of the GNU
      6 * General Public License (GPL) Version 2, available from the file
      7 * COPYING in the main directory of this source tree, or the
      8 * OpenIB.org BSD license below:
      9 *
     10 *     Redistribution and use in source and binary forms, with or
     11 *     without modification, are permitted provided that the following
     12 *     conditions are met:
     13 *
     14 *      - Redistributions of source code must retain the above
     15 *        copyright notice, this list of conditions and the following
     16 *        disclaimer.
     17 *
     18 *      - Redistributions in binary form must reproduce the above
     19 *        copyright notice, this list of conditions and the following
     20 *        disclaimer in the documentation and/or other materials
     21 *        provided with the distribution.
     22 *
     23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
     24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
     25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
     26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
     27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
     28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
     29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
     30 * SOFTWARE.
     31 */
     32
     33#include <linux/list.h>
     34#include <linux/ip.h>
     35#include <linux/ipv6.h>
     36#include <linux/tcp.h>
     37#include <linux/mlx5/fs.h>
     38#include <linux/mlx5/mpfs.h>
     39#include "en.h"
     40#include "en_rep.h"
     41#include "lib/mpfs.h"
     42#include "en/ptp.h"
     43
     44static int mlx5e_add_l2_flow_rule(struct mlx5e_priv *priv,
     45				  struct mlx5e_l2_rule *ai, int type);
     46static void mlx5e_del_l2_flow_rule(struct mlx5e_priv *priv,
     47				   struct mlx5e_l2_rule *ai);
     48
     49enum {
     50	MLX5E_FULLMATCH = 0,
     51	MLX5E_ALLMULTI  = 1,
     52};
     53
     54enum {
     55	MLX5E_UC        = 0,
     56	MLX5E_MC_IPV4   = 1,
     57	MLX5E_MC_IPV6   = 2,
     58	MLX5E_MC_OTHER  = 3,
     59};
     60
     61enum {
     62	MLX5E_ACTION_NONE = 0,
     63	MLX5E_ACTION_ADD  = 1,
     64	MLX5E_ACTION_DEL  = 2,
     65};
     66
     67struct mlx5e_l2_hash_node {
     68	struct hlist_node          hlist;
     69	u8                         action;
     70	struct mlx5e_l2_rule ai;
     71	bool   mpfs;
     72};
     73
     74static inline int mlx5e_hash_l2(const u8 *addr)
     75{
     76	return addr[5];
     77}
     78
     79static void mlx5e_add_l2_to_hash(struct hlist_head *hash, const u8 *addr)
     80{
     81	struct mlx5e_l2_hash_node *hn;
     82	int ix = mlx5e_hash_l2(addr);
     83	int found = 0;
     84
     85	hlist_for_each_entry(hn, &hash[ix], hlist)
     86		if (ether_addr_equal_64bits(hn->ai.addr, addr)) {
     87			found = 1;
     88			break;
     89		}
     90
     91	if (found) {
     92		hn->action = MLX5E_ACTION_NONE;
     93		return;
     94	}
     95
     96	hn = kzalloc(sizeof(*hn), GFP_ATOMIC);
     97	if (!hn)
     98		return;
     99
    100	ether_addr_copy(hn->ai.addr, addr);
    101	hn->action = MLX5E_ACTION_ADD;
    102
    103	hlist_add_head(&hn->hlist, &hash[ix]);
    104}
    105
    106static void mlx5e_del_l2_from_hash(struct mlx5e_l2_hash_node *hn)
    107{
    108	hlist_del(&hn->hlist);
    109	kfree(hn);
    110}
    111
    112struct mlx5e_vlan_table {
    113	struct mlx5e_flow_table		ft;
    114	DECLARE_BITMAP(active_cvlans, VLAN_N_VID);
    115	DECLARE_BITMAP(active_svlans, VLAN_N_VID);
    116	struct mlx5_flow_handle	*active_cvlans_rule[VLAN_N_VID];
    117	struct mlx5_flow_handle	*active_svlans_rule[VLAN_N_VID];
    118	struct mlx5_flow_handle	*untagged_rule;
    119	struct mlx5_flow_handle	*any_cvlan_rule;
    120	struct mlx5_flow_handle	*any_svlan_rule;
    121	struct mlx5_flow_handle	*trap_rule;
    122	bool			cvlan_filter_disabled;
    123};
    124
    125unsigned long *mlx5e_vlan_get_active_svlans(struct mlx5e_vlan_table *vlan)
    126{
    127	return vlan->active_svlans;
    128}
    129
    130struct mlx5_flow_table *mlx5e_vlan_get_flowtable(struct mlx5e_vlan_table *vlan)
    131{
    132	return vlan->ft.t;
    133}
    134
    135static int mlx5e_vport_context_update_vlans(struct mlx5e_priv *priv)
    136{
    137	struct net_device *ndev = priv->netdev;
    138	int max_list_size;
    139	int list_size;
    140	u16 *vlans;
    141	int vlan;
    142	int err;
    143	int i;
    144
    145	list_size = 0;
    146	for_each_set_bit(vlan, priv->fs.vlan->active_cvlans, VLAN_N_VID)
    147		list_size++;
    148
    149	max_list_size = 1 << MLX5_CAP_GEN(priv->mdev, log_max_vlan_list);
    150
    151	if (list_size > max_list_size) {
    152		netdev_warn(ndev,
    153			    "netdev vlans list size (%d) > (%d) max vport list size, some vlans will be dropped\n",
    154			    list_size, max_list_size);
    155		list_size = max_list_size;
    156	}
    157
    158	vlans = kvcalloc(list_size, sizeof(*vlans), GFP_KERNEL);
    159	if (!vlans)
    160		return -ENOMEM;
    161
    162	i = 0;
    163	for_each_set_bit(vlan, priv->fs.vlan->active_cvlans, VLAN_N_VID) {
    164		if (i >= list_size)
    165			break;
    166		vlans[i++] = vlan;
    167	}
    168
    169	err = mlx5_modify_nic_vport_vlans(priv->mdev, vlans, list_size);
    170	if (err)
    171		netdev_err(ndev, "Failed to modify vport vlans list err(%d)\n",
    172			   err);
    173
    174	kvfree(vlans);
    175	return err;
    176}
    177
    178enum mlx5e_vlan_rule_type {
    179	MLX5E_VLAN_RULE_TYPE_UNTAGGED,
    180	MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID,
    181	MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID,
    182	MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID,
    183	MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID,
    184};
    185
    186static int __mlx5e_add_vlan_rule(struct mlx5e_priv *priv,
    187				 enum mlx5e_vlan_rule_type rule_type,
    188				 u16 vid, struct mlx5_flow_spec *spec)
    189{
    190	struct mlx5_flow_table *ft = priv->fs.vlan->ft.t;
    191	struct mlx5_flow_destination dest = {};
    192	struct mlx5_flow_handle **rule_p;
    193	MLX5_DECLARE_FLOW_ACT(flow_act);
    194	int err = 0;
    195
    196	dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
    197	dest.ft = priv->fs.l2.ft.t;
    198
    199	spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
    200
    201	switch (rule_type) {
    202	case MLX5E_VLAN_RULE_TYPE_UNTAGGED:
    203		/* cvlan_tag enabled in match criteria and
    204		 * disabled in match value means both S & C tags
    205		 * don't exist (untagged of both)
    206		 */
    207		rule_p = &priv->fs.vlan->untagged_rule;
    208		MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
    209				 outer_headers.cvlan_tag);
    210		break;
    211	case MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID:
    212		rule_p = &priv->fs.vlan->any_cvlan_rule;
    213		MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
    214				 outer_headers.cvlan_tag);
    215		MLX5_SET(fte_match_param, spec->match_value, outer_headers.cvlan_tag, 1);
    216		break;
    217	case MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID:
    218		rule_p = &priv->fs.vlan->any_svlan_rule;
    219		MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
    220				 outer_headers.svlan_tag);
    221		MLX5_SET(fte_match_param, spec->match_value, outer_headers.svlan_tag, 1);
    222		break;
    223	case MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID:
    224		rule_p = &priv->fs.vlan->active_svlans_rule[vid];
    225		MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
    226				 outer_headers.svlan_tag);
    227		MLX5_SET(fte_match_param, spec->match_value, outer_headers.svlan_tag, 1);
    228		MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
    229				 outer_headers.first_vid);
    230		MLX5_SET(fte_match_param, spec->match_value, outer_headers.first_vid,
    231			 vid);
    232		break;
    233	default: /* MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID */
    234		rule_p = &priv->fs.vlan->active_cvlans_rule[vid];
    235		MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
    236				 outer_headers.cvlan_tag);
    237		MLX5_SET(fte_match_param, spec->match_value, outer_headers.cvlan_tag, 1);
    238		MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
    239				 outer_headers.first_vid);
    240		MLX5_SET(fte_match_param, spec->match_value, outer_headers.first_vid,
    241			 vid);
    242		break;
    243	}
    244
    245	if (WARN_ONCE(*rule_p, "VLAN rule already exists type %d", rule_type))
    246		return 0;
    247
    248	*rule_p = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
    249
    250	if (IS_ERR(*rule_p)) {
    251		err = PTR_ERR(*rule_p);
    252		*rule_p = NULL;
    253		netdev_err(priv->netdev, "%s: add rule failed\n", __func__);
    254	}
    255
    256	return err;
    257}
    258
    259static int mlx5e_add_vlan_rule(struct mlx5e_priv *priv,
    260			       enum mlx5e_vlan_rule_type rule_type, u16 vid)
    261{
    262	struct mlx5_flow_spec *spec;
    263	int err = 0;
    264
    265	spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
    266	if (!spec)
    267		return -ENOMEM;
    268
    269	if (rule_type == MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID)
    270		mlx5e_vport_context_update_vlans(priv);
    271
    272	err = __mlx5e_add_vlan_rule(priv, rule_type, vid, spec);
    273
    274	kvfree(spec);
    275
    276	return err;
    277}
    278
    279static void mlx5e_del_vlan_rule(struct mlx5e_priv *priv,
    280				enum mlx5e_vlan_rule_type rule_type, u16 vid)
    281{
    282	switch (rule_type) {
    283	case MLX5E_VLAN_RULE_TYPE_UNTAGGED:
    284		if (priv->fs.vlan->untagged_rule) {
    285			mlx5_del_flow_rules(priv->fs.vlan->untagged_rule);
    286			priv->fs.vlan->untagged_rule = NULL;
    287		}
    288		break;
    289	case MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID:
    290		if (priv->fs.vlan->any_cvlan_rule) {
    291			mlx5_del_flow_rules(priv->fs.vlan->any_cvlan_rule);
    292			priv->fs.vlan->any_cvlan_rule = NULL;
    293		}
    294		break;
    295	case MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID:
    296		if (priv->fs.vlan->any_svlan_rule) {
    297			mlx5_del_flow_rules(priv->fs.vlan->any_svlan_rule);
    298			priv->fs.vlan->any_svlan_rule = NULL;
    299		}
    300		break;
    301	case MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID:
    302		if (priv->fs.vlan->active_svlans_rule[vid]) {
    303			mlx5_del_flow_rules(priv->fs.vlan->active_svlans_rule[vid]);
    304			priv->fs.vlan->active_svlans_rule[vid] = NULL;
    305		}
    306		break;
    307	case MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID:
    308		if (priv->fs.vlan->active_cvlans_rule[vid]) {
    309			mlx5_del_flow_rules(priv->fs.vlan->active_cvlans_rule[vid]);
    310			priv->fs.vlan->active_cvlans_rule[vid] = NULL;
    311		}
    312		mlx5e_vport_context_update_vlans(priv);
    313		break;
    314	}
    315}
    316
    317static void mlx5e_del_any_vid_rules(struct mlx5e_priv *priv)
    318{
    319	mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0);
    320	mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID, 0);
    321}
    322
    323static int mlx5e_add_any_vid_rules(struct mlx5e_priv *priv)
    324{
    325	int err;
    326
    327	err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0);
    328	if (err)
    329		return err;
    330
    331	return mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID, 0);
    332}
    333
    334static struct mlx5_flow_handle *
    335mlx5e_add_trap_rule(struct mlx5_flow_table *ft, int trap_id, int tir_num)
    336{
    337	struct mlx5_flow_destination dest = {};
    338	MLX5_DECLARE_FLOW_ACT(flow_act);
    339	struct mlx5_flow_handle *rule;
    340	struct mlx5_flow_spec *spec;
    341
    342	spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
    343	if (!spec)
    344		return ERR_PTR(-ENOMEM);
    345	spec->flow_context.flags |= FLOW_CONTEXT_HAS_TAG;
    346	spec->flow_context.flow_tag = trap_id;
    347	dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
    348	dest.tir_num = tir_num;
    349
    350	rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
    351	kvfree(spec);
    352	return rule;
    353}
    354
    355int mlx5e_add_vlan_trap(struct mlx5e_priv *priv, int trap_id, int tir_num)
    356{
    357	struct mlx5_flow_table *ft = priv->fs.vlan->ft.t;
    358	struct mlx5_flow_handle *rule;
    359	int err;
    360
    361	rule = mlx5e_add_trap_rule(ft, trap_id, tir_num);
    362	if (IS_ERR(rule)) {
    363		err = PTR_ERR(rule);
    364		priv->fs.vlan->trap_rule = NULL;
    365		netdev_err(priv->netdev, "%s: add VLAN trap rule failed, err %d\n",
    366			   __func__, err);
    367		return err;
    368	}
    369	priv->fs.vlan->trap_rule = rule;
    370	return 0;
    371}
    372
    373void mlx5e_remove_vlan_trap(struct mlx5e_priv *priv)
    374{
    375	if (priv->fs.vlan->trap_rule) {
    376		mlx5_del_flow_rules(priv->fs.vlan->trap_rule);
    377		priv->fs.vlan->trap_rule = NULL;
    378	}
    379}
    380
    381int mlx5e_add_mac_trap(struct mlx5e_priv *priv, int trap_id, int tir_num)
    382{
    383	struct mlx5_flow_table *ft = priv->fs.l2.ft.t;
    384	struct mlx5_flow_handle *rule;
    385	int err;
    386
    387	rule = mlx5e_add_trap_rule(ft, trap_id, tir_num);
    388	if (IS_ERR(rule)) {
    389		err = PTR_ERR(rule);
    390		priv->fs.l2.trap_rule = NULL;
    391		netdev_err(priv->netdev, "%s: add MAC trap rule failed, err %d\n",
    392			   __func__, err);
    393		return err;
    394	}
    395	priv->fs.l2.trap_rule = rule;
    396	return 0;
    397}
    398
    399void mlx5e_remove_mac_trap(struct mlx5e_priv *priv)
    400{
    401	if (priv->fs.l2.trap_rule) {
    402		mlx5_del_flow_rules(priv->fs.l2.trap_rule);
    403		priv->fs.l2.trap_rule = NULL;
    404	}
    405}
    406
    407void mlx5e_enable_cvlan_filter(struct mlx5e_priv *priv)
    408{
    409	if (!priv->fs.vlan->cvlan_filter_disabled)
    410		return;
    411
    412	priv->fs.vlan->cvlan_filter_disabled = false;
    413	if (priv->netdev->flags & IFF_PROMISC)
    414		return;
    415	mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0);
    416}
    417
    418void mlx5e_disable_cvlan_filter(struct mlx5e_priv *priv)
    419{
    420	if (priv->fs.vlan->cvlan_filter_disabled)
    421		return;
    422
    423	priv->fs.vlan->cvlan_filter_disabled = true;
    424	if (priv->netdev->flags & IFF_PROMISC)
    425		return;
    426	mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0);
    427}
    428
    429static int mlx5e_vlan_rx_add_cvid(struct mlx5e_priv *priv, u16 vid)
    430{
    431	int err;
    432
    433	set_bit(vid, priv->fs.vlan->active_cvlans);
    434
    435	err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID, vid);
    436	if (err)
    437		clear_bit(vid, priv->fs.vlan->active_cvlans);
    438
    439	return err;
    440}
    441
    442static int mlx5e_vlan_rx_add_svid(struct mlx5e_priv *priv, u16 vid)
    443{
    444	struct net_device *netdev = priv->netdev;
    445	int err;
    446
    447	set_bit(vid, priv->fs.vlan->active_svlans);
    448
    449	err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID, vid);
    450	if (err) {
    451		clear_bit(vid, priv->fs.vlan->active_svlans);
    452		return err;
    453	}
    454
    455	/* Need to fix some features.. */
    456	netdev_update_features(netdev);
    457	return err;
    458}
    459
    460int mlx5e_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
    461{
    462	struct mlx5e_priv *priv = netdev_priv(dev);
    463
    464	if (mlx5e_is_uplink_rep(priv))
    465		return 0; /* no vlan table for uplink rep */
    466
    467	if (be16_to_cpu(proto) == ETH_P_8021Q)
    468		return mlx5e_vlan_rx_add_cvid(priv, vid);
    469	else if (be16_to_cpu(proto) == ETH_P_8021AD)
    470		return mlx5e_vlan_rx_add_svid(priv, vid);
    471
    472	return -EOPNOTSUPP;
    473}
    474
    475int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
    476{
    477	struct mlx5e_priv *priv = netdev_priv(dev);
    478
    479	if (mlx5e_is_uplink_rep(priv))
    480		return 0; /* no vlan table for uplink rep */
    481
    482	if (be16_to_cpu(proto) == ETH_P_8021Q) {
    483		clear_bit(vid, priv->fs.vlan->active_cvlans);
    484		mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID, vid);
    485	} else if (be16_to_cpu(proto) == ETH_P_8021AD) {
    486		clear_bit(vid, priv->fs.vlan->active_svlans);
    487		mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID, vid);
    488		netdev_update_features(dev);
    489	}
    490
    491	return 0;
    492}
    493
    494static void mlx5e_add_vlan_rules(struct mlx5e_priv *priv)
    495{
    496	int i;
    497
    498	mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
    499
    500	for_each_set_bit(i, priv->fs.vlan->active_cvlans, VLAN_N_VID) {
    501		mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID, i);
    502	}
    503
    504	for_each_set_bit(i, priv->fs.vlan->active_svlans, VLAN_N_VID)
    505		mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID, i);
    506
    507	if (priv->fs.vlan->cvlan_filter_disabled)
    508		mlx5e_add_any_vid_rules(priv);
    509}
    510
    511static void mlx5e_del_vlan_rules(struct mlx5e_priv *priv)
    512{
    513	int i;
    514
    515	mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
    516
    517	for_each_set_bit(i, priv->fs.vlan->active_cvlans, VLAN_N_VID) {
    518		mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID, i);
    519	}
    520
    521	for_each_set_bit(i, priv->fs.vlan->active_svlans, VLAN_N_VID)
    522		mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID, i);
    523
    524	WARN_ON_ONCE(!(test_bit(MLX5E_STATE_DESTROYING, &priv->state)));
    525
    526	mlx5e_remove_vlan_trap(priv);
    527
    528	/* must be called after DESTROY bit is set and
    529	 * set_rx_mode is called and flushed
    530	 */
    531	if (priv->fs.vlan->cvlan_filter_disabled)
    532		mlx5e_del_any_vid_rules(priv);
    533}
    534
    535#define mlx5e_for_each_hash_node(hn, tmp, hash, i) \
    536	for (i = 0; i < MLX5E_L2_ADDR_HASH_SIZE; i++) \
    537		hlist_for_each_entry_safe(hn, tmp, &hash[i], hlist)
    538
    539static void mlx5e_execute_l2_action(struct mlx5e_priv *priv,
    540				    struct mlx5e_l2_hash_node *hn)
    541{
    542	u8 action = hn->action;
    543	u8 mac_addr[ETH_ALEN];
    544	int l2_err = 0;
    545
    546	ether_addr_copy(mac_addr, hn->ai.addr);
    547
    548	switch (action) {
    549	case MLX5E_ACTION_ADD:
    550		mlx5e_add_l2_flow_rule(priv, &hn->ai, MLX5E_FULLMATCH);
    551		if (!is_multicast_ether_addr(mac_addr)) {
    552			l2_err = mlx5_mpfs_add_mac(priv->mdev, mac_addr);
    553			hn->mpfs = !l2_err;
    554		}
    555		hn->action = MLX5E_ACTION_NONE;
    556		break;
    557
    558	case MLX5E_ACTION_DEL:
    559		if (!is_multicast_ether_addr(mac_addr) && hn->mpfs)
    560			l2_err = mlx5_mpfs_del_mac(priv->mdev, mac_addr);
    561		mlx5e_del_l2_flow_rule(priv, &hn->ai);
    562		mlx5e_del_l2_from_hash(hn);
    563		break;
    564	}
    565
    566	if (l2_err)
    567		netdev_warn(priv->netdev, "MPFS, failed to %s mac %pM, err(%d)\n",
    568			    action == MLX5E_ACTION_ADD ? "add" : "del", mac_addr, l2_err);
    569}
    570
    571static void mlx5e_sync_netdev_addr(struct mlx5e_priv *priv)
    572{
    573	struct net_device *netdev = priv->netdev;
    574	struct netdev_hw_addr *ha;
    575
    576	netif_addr_lock_bh(netdev);
    577
    578	mlx5e_add_l2_to_hash(priv->fs.l2.netdev_uc,
    579			     priv->netdev->dev_addr);
    580
    581	netdev_for_each_uc_addr(ha, netdev)
    582		mlx5e_add_l2_to_hash(priv->fs.l2.netdev_uc, ha->addr);
    583
    584	netdev_for_each_mc_addr(ha, netdev)
    585		mlx5e_add_l2_to_hash(priv->fs.l2.netdev_mc, ha->addr);
    586
    587	netif_addr_unlock_bh(netdev);
    588}
    589
    590static void mlx5e_fill_addr_array(struct mlx5e_priv *priv, int list_type,
    591				  u8 addr_array[][ETH_ALEN], int size)
    592{
    593	bool is_uc = (list_type == MLX5_NVPRT_LIST_TYPE_UC);
    594	struct net_device *ndev = priv->netdev;
    595	struct mlx5e_l2_hash_node *hn;
    596	struct hlist_head *addr_list;
    597	struct hlist_node *tmp;
    598	int i = 0;
    599	int hi;
    600
    601	addr_list = is_uc ? priv->fs.l2.netdev_uc : priv->fs.l2.netdev_mc;
    602
    603	if (is_uc) /* Make sure our own address is pushed first */
    604		ether_addr_copy(addr_array[i++], ndev->dev_addr);
    605	else if (priv->fs.l2.broadcast_enabled)
    606		ether_addr_copy(addr_array[i++], ndev->broadcast);
    607
    608	mlx5e_for_each_hash_node(hn, tmp, addr_list, hi) {
    609		if (ether_addr_equal(ndev->dev_addr, hn->ai.addr))
    610			continue;
    611		if (i >= size)
    612			break;
    613		ether_addr_copy(addr_array[i++], hn->ai.addr);
    614	}
    615}
    616
    617static void mlx5e_vport_context_update_addr_list(struct mlx5e_priv *priv,
    618						 int list_type)
    619{
    620	bool is_uc = (list_type == MLX5_NVPRT_LIST_TYPE_UC);
    621	struct mlx5e_l2_hash_node *hn;
    622	u8 (*addr_array)[ETH_ALEN] = NULL;
    623	struct hlist_head *addr_list;
    624	struct hlist_node *tmp;
    625	int max_size;
    626	int size;
    627	int err;
    628	int hi;
    629
    630	size = is_uc ? 0 : (priv->fs.l2.broadcast_enabled ? 1 : 0);
    631	max_size = is_uc ?
    632		1 << MLX5_CAP_GEN(priv->mdev, log_max_current_uc_list) :
    633		1 << MLX5_CAP_GEN(priv->mdev, log_max_current_mc_list);
    634
    635	addr_list = is_uc ? priv->fs.l2.netdev_uc : priv->fs.l2.netdev_mc;
    636	mlx5e_for_each_hash_node(hn, tmp, addr_list, hi)
    637		size++;
    638
    639	if (size > max_size) {
    640		netdev_warn(priv->netdev,
    641			    "netdev %s list size (%d) > (%d) max vport list size, some addresses will be dropped\n",
    642			    is_uc ? "UC" : "MC", size, max_size);
    643		size = max_size;
    644	}
    645
    646	if (size) {
    647		addr_array = kcalloc(size, ETH_ALEN, GFP_KERNEL);
    648		if (!addr_array) {
    649			err = -ENOMEM;
    650			goto out;
    651		}
    652		mlx5e_fill_addr_array(priv, list_type, addr_array, size);
    653	}
    654
    655	err = mlx5_modify_nic_vport_mac_list(priv->mdev, list_type, addr_array, size);
    656out:
    657	if (err)
    658		netdev_err(priv->netdev,
    659			   "Failed to modify vport %s list err(%d)\n",
    660			   is_uc ? "UC" : "MC", err);
    661	kfree(addr_array);
    662}
    663
    664static void mlx5e_vport_context_update(struct mlx5e_priv *priv)
    665{
    666	struct mlx5e_l2_table *ea = &priv->fs.l2;
    667
    668	mlx5e_vport_context_update_addr_list(priv, MLX5_NVPRT_LIST_TYPE_UC);
    669	mlx5e_vport_context_update_addr_list(priv, MLX5_NVPRT_LIST_TYPE_MC);
    670	mlx5_modify_nic_vport_promisc(priv->mdev, 0,
    671				      ea->allmulti_enabled,
    672				      ea->promisc_enabled);
    673}
    674
    675static void mlx5e_apply_netdev_addr(struct mlx5e_priv *priv)
    676{
    677	struct mlx5e_l2_hash_node *hn;
    678	struct hlist_node *tmp;
    679	int i;
    680
    681	mlx5e_for_each_hash_node(hn, tmp, priv->fs.l2.netdev_uc, i)
    682		mlx5e_execute_l2_action(priv, hn);
    683
    684	mlx5e_for_each_hash_node(hn, tmp, priv->fs.l2.netdev_mc, i)
    685		mlx5e_execute_l2_action(priv, hn);
    686}
    687
    688static void mlx5e_handle_netdev_addr(struct mlx5e_priv *priv)
    689{
    690	struct mlx5e_l2_hash_node *hn;
    691	struct hlist_node *tmp;
    692	int i;
    693
    694	mlx5e_for_each_hash_node(hn, tmp, priv->fs.l2.netdev_uc, i)
    695		hn->action = MLX5E_ACTION_DEL;
    696	mlx5e_for_each_hash_node(hn, tmp, priv->fs.l2.netdev_mc, i)
    697		hn->action = MLX5E_ACTION_DEL;
    698
    699	if (!test_bit(MLX5E_STATE_DESTROYING, &priv->state))
    700		mlx5e_sync_netdev_addr(priv);
    701
    702	mlx5e_apply_netdev_addr(priv);
    703}
    704
    705#define MLX5E_PROMISC_GROUP0_SIZE BIT(0)
    706#define MLX5E_PROMISC_TABLE_SIZE MLX5E_PROMISC_GROUP0_SIZE
    707
    708static int mlx5e_add_promisc_rule(struct mlx5e_priv *priv)
    709{
    710	struct mlx5_flow_table *ft = priv->fs.promisc.ft.t;
    711	struct mlx5_flow_destination dest = {};
    712	struct mlx5_flow_handle **rule_p;
    713	MLX5_DECLARE_FLOW_ACT(flow_act);
    714	struct mlx5_flow_spec *spec;
    715	int err = 0;
    716
    717	spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
    718	if (!spec)
    719		return -ENOMEM;
    720	dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
    721	dest.ft = mlx5_get_ttc_flow_table(priv->fs.ttc);
    722
    723	rule_p = &priv->fs.promisc.rule;
    724	*rule_p = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
    725	if (IS_ERR(*rule_p)) {
    726		err = PTR_ERR(*rule_p);
    727		*rule_p = NULL;
    728		netdev_err(priv->netdev, "%s: add promiscuous rule failed\n", __func__);
    729	}
    730	kvfree(spec);
    731	return err;
    732}
    733
    734static int mlx5e_create_promisc_table(struct mlx5e_priv *priv)
    735{
    736	struct mlx5e_flow_table *ft = &priv->fs.promisc.ft;
    737	struct mlx5_flow_table_attr ft_attr = {};
    738	int err;
    739
    740	ft_attr.max_fte = MLX5E_PROMISC_TABLE_SIZE;
    741	ft_attr.autogroup.max_num_groups = 1;
    742	ft_attr.level = MLX5E_PROMISC_FT_LEVEL;
    743	ft_attr.prio = MLX5E_NIC_PRIO;
    744
    745	ft->t = mlx5_create_auto_grouped_flow_table(priv->fs.ns, &ft_attr);
    746	if (IS_ERR(ft->t)) {
    747		err = PTR_ERR(ft->t);
    748		netdev_err(priv->netdev, "fail to create promisc table err=%d\n", err);
    749		return err;
    750	}
    751
    752	err = mlx5e_add_promisc_rule(priv);
    753	if (err)
    754		goto err_destroy_promisc_table;
    755
    756	return 0;
    757
    758err_destroy_promisc_table:
    759	mlx5_destroy_flow_table(ft->t);
    760	ft->t = NULL;
    761
    762	return err;
    763}
    764
    765static void mlx5e_del_promisc_rule(struct mlx5e_priv *priv)
    766{
    767	if (WARN(!priv->fs.promisc.rule, "Trying to remove non-existing promiscuous rule"))
    768		return;
    769	mlx5_del_flow_rules(priv->fs.promisc.rule);
    770	priv->fs.promisc.rule = NULL;
    771}
    772
    773static void mlx5e_destroy_promisc_table(struct mlx5e_priv *priv)
    774{
    775	if (WARN(!priv->fs.promisc.ft.t, "Trying to remove non-existing promiscuous table"))
    776		return;
    777	mlx5e_del_promisc_rule(priv);
    778	mlx5_destroy_flow_table(priv->fs.promisc.ft.t);
    779	priv->fs.promisc.ft.t = NULL;
    780}
    781
    782void mlx5e_set_rx_mode_work(struct work_struct *work)
    783{
    784	struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
    785					       set_rx_mode_work);
    786
    787	struct mlx5e_l2_table *ea = &priv->fs.l2;
    788	struct net_device *ndev = priv->netdev;
    789
    790	bool rx_mode_enable   = !test_bit(MLX5E_STATE_DESTROYING, &priv->state);
    791	bool promisc_enabled   = rx_mode_enable && (ndev->flags & IFF_PROMISC);
    792	bool allmulti_enabled  = rx_mode_enable && (ndev->flags & IFF_ALLMULTI);
    793	bool broadcast_enabled = rx_mode_enable;
    794
    795	bool enable_promisc    = !ea->promisc_enabled   &&  promisc_enabled;
    796	bool disable_promisc   =  ea->promisc_enabled   && !promisc_enabled;
    797	bool enable_allmulti   = !ea->allmulti_enabled  &&  allmulti_enabled;
    798	bool disable_allmulti  =  ea->allmulti_enabled  && !allmulti_enabled;
    799	bool enable_broadcast  = !ea->broadcast_enabled &&  broadcast_enabled;
    800	bool disable_broadcast =  ea->broadcast_enabled && !broadcast_enabled;
    801	int err;
    802
    803	if (enable_promisc) {
    804		err = mlx5e_create_promisc_table(priv);
    805		if (err)
    806			enable_promisc = false;
    807		if (!priv->channels.params.vlan_strip_disable && !err)
    808			netdev_warn_once(ndev,
    809					 "S-tagged traffic will be dropped while C-tag vlan stripping is enabled\n");
    810	}
    811	if (enable_allmulti)
    812		mlx5e_add_l2_flow_rule(priv, &ea->allmulti, MLX5E_ALLMULTI);
    813	if (enable_broadcast)
    814		mlx5e_add_l2_flow_rule(priv, &ea->broadcast, MLX5E_FULLMATCH);
    815
    816	mlx5e_handle_netdev_addr(priv);
    817
    818	if (disable_broadcast)
    819		mlx5e_del_l2_flow_rule(priv, &ea->broadcast);
    820	if (disable_allmulti)
    821		mlx5e_del_l2_flow_rule(priv, &ea->allmulti);
    822	if (disable_promisc)
    823		mlx5e_destroy_promisc_table(priv);
    824
    825	ea->promisc_enabled   = promisc_enabled;
    826	ea->allmulti_enabled  = allmulti_enabled;
    827	ea->broadcast_enabled = broadcast_enabled;
    828
    829	mlx5e_vport_context_update(priv);
    830}
    831
    832static void mlx5e_destroy_groups(struct mlx5e_flow_table *ft)
    833{
    834	int i;
    835
    836	for (i = ft->num_groups - 1; i >= 0; i--) {
    837		if (!IS_ERR_OR_NULL(ft->g[i]))
    838			mlx5_destroy_flow_group(ft->g[i]);
    839		ft->g[i] = NULL;
    840	}
    841	ft->num_groups = 0;
    842}
    843
    844void mlx5e_init_l2_addr(struct mlx5e_priv *priv)
    845{
    846	ether_addr_copy(priv->fs.l2.broadcast.addr, priv->netdev->broadcast);
    847}
    848
    849void mlx5e_destroy_flow_table(struct mlx5e_flow_table *ft)
    850{
    851	mlx5e_destroy_groups(ft);
    852	kfree(ft->g);
    853	mlx5_destroy_flow_table(ft->t);
    854	ft->t = NULL;
    855}
    856
    857static void mlx5e_set_inner_ttc_params(struct mlx5e_priv *priv,
    858				       struct ttc_params *ttc_params)
    859{
    860	struct mlx5_flow_table_attr *ft_attr = &ttc_params->ft_attr;
    861	int tt;
    862
    863	memset(ttc_params, 0, sizeof(*ttc_params));
    864	ttc_params->ns = mlx5_get_flow_namespace(priv->mdev,
    865						 MLX5_FLOW_NAMESPACE_KERNEL);
    866	ft_attr->level = MLX5E_INNER_TTC_FT_LEVEL;
    867	ft_attr->prio = MLX5E_NIC_PRIO;
    868
    869	for (tt = 0; tt < MLX5_NUM_TT; tt++) {
    870		ttc_params->dests[tt].type = MLX5_FLOW_DESTINATION_TYPE_TIR;
    871		ttc_params->dests[tt].tir_num =
    872			tt == MLX5_TT_ANY ?
    873				mlx5e_rx_res_get_tirn_direct(priv->rx_res, 0) :
    874				mlx5e_rx_res_get_tirn_rss_inner(priv->rx_res,
    875								tt);
    876	}
    877}
    878
    879void mlx5e_set_ttc_params(struct mlx5e_priv *priv,
    880			  struct ttc_params *ttc_params, bool tunnel)
    881
    882{
    883	struct mlx5_flow_table_attr *ft_attr = &ttc_params->ft_attr;
    884	int tt;
    885
    886	memset(ttc_params, 0, sizeof(*ttc_params));
    887	ttc_params->ns = mlx5_get_flow_namespace(priv->mdev,
    888						 MLX5_FLOW_NAMESPACE_KERNEL);
    889	ft_attr->level = MLX5E_TTC_FT_LEVEL;
    890	ft_attr->prio = MLX5E_NIC_PRIO;
    891
    892	for (tt = 0; tt < MLX5_NUM_TT; tt++) {
    893		ttc_params->dests[tt].type = MLX5_FLOW_DESTINATION_TYPE_TIR;
    894		ttc_params->dests[tt].tir_num =
    895			tt == MLX5_TT_ANY ?
    896				mlx5e_rx_res_get_tirn_direct(priv->rx_res, 0) :
    897				mlx5e_rx_res_get_tirn_rss(priv->rx_res, tt);
    898	}
    899
    900	ttc_params->inner_ttc = tunnel;
    901	if (!tunnel || !mlx5_tunnel_inner_ft_supported(priv->mdev))
    902		return;
    903
    904	for (tt = 0; tt < MLX5_NUM_TUNNEL_TT; tt++) {
    905		ttc_params->tunnel_dests[tt].type =
    906			MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
    907		ttc_params->tunnel_dests[tt].ft =
    908			mlx5_get_ttc_flow_table(priv->fs.inner_ttc);
    909	}
    910}
    911
    912static void mlx5e_del_l2_flow_rule(struct mlx5e_priv *priv,
    913				   struct mlx5e_l2_rule *ai)
    914{
    915	if (!IS_ERR_OR_NULL(ai->rule)) {
    916		mlx5_del_flow_rules(ai->rule);
    917		ai->rule = NULL;
    918	}
    919}
    920
    921static int mlx5e_add_l2_flow_rule(struct mlx5e_priv *priv,
    922				  struct mlx5e_l2_rule *ai, int type)
    923{
    924	struct mlx5_flow_table *ft = priv->fs.l2.ft.t;
    925	struct mlx5_flow_destination dest = {};
    926	MLX5_DECLARE_FLOW_ACT(flow_act);
    927	struct mlx5_flow_spec *spec;
    928	int err = 0;
    929	u8 *mc_dmac;
    930	u8 *mv_dmac;
    931
    932	spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
    933	if (!spec)
    934		return -ENOMEM;
    935
    936	mc_dmac = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
    937			       outer_headers.dmac_47_16);
    938	mv_dmac = MLX5_ADDR_OF(fte_match_param, spec->match_value,
    939			       outer_headers.dmac_47_16);
    940
    941	dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
    942	dest.ft = mlx5_get_ttc_flow_table(priv->fs.ttc);
    943
    944	switch (type) {
    945	case MLX5E_FULLMATCH:
    946		spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
    947		eth_broadcast_addr(mc_dmac);
    948		ether_addr_copy(mv_dmac, ai->addr);
    949		break;
    950
    951	case MLX5E_ALLMULTI:
    952		spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
    953		mc_dmac[0] = 0x01;
    954		mv_dmac[0] = 0x01;
    955		break;
    956	}
    957
    958	ai->rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
    959	if (IS_ERR(ai->rule)) {
    960		netdev_err(priv->netdev, "%s: add l2 rule(mac:%pM) failed\n",
    961			   __func__, mv_dmac);
    962		err = PTR_ERR(ai->rule);
    963		ai->rule = NULL;
    964	}
    965
    966	kvfree(spec);
    967
    968	return err;
    969}
    970
    971#define MLX5E_NUM_L2_GROUPS	   3
    972#define MLX5E_L2_GROUP1_SIZE	   BIT(15)
    973#define MLX5E_L2_GROUP2_SIZE	   BIT(0)
    974#define MLX5E_L2_GROUP_TRAP_SIZE   BIT(0) /* must be last */
    975#define MLX5E_L2_TABLE_SIZE	   (MLX5E_L2_GROUP1_SIZE +\
    976				    MLX5E_L2_GROUP2_SIZE +\
    977				    MLX5E_L2_GROUP_TRAP_SIZE)
    978static int mlx5e_create_l2_table_groups(struct mlx5e_l2_table *l2_table)
    979{
    980	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
    981	struct mlx5e_flow_table *ft = &l2_table->ft;
    982	int ix = 0;
    983	u8 *mc_dmac;
    984	u32 *in;
    985	int err;
    986	u8 *mc;
    987
    988	ft->g = kcalloc(MLX5E_NUM_L2_GROUPS, sizeof(*ft->g), GFP_KERNEL);
    989	if (!ft->g)
    990		return -ENOMEM;
    991	in = kvzalloc(inlen, GFP_KERNEL);
    992	if (!in) {
    993		kfree(ft->g);
    994		return -ENOMEM;
    995	}
    996
    997	mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
    998	mc_dmac = MLX5_ADDR_OF(fte_match_param, mc,
    999			       outer_headers.dmac_47_16);
   1000	/* Flow Group for full match */
   1001	eth_broadcast_addr(mc_dmac);
   1002	MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
   1003	MLX5_SET_CFG(in, start_flow_index, ix);
   1004	ix += MLX5E_L2_GROUP1_SIZE;
   1005	MLX5_SET_CFG(in, end_flow_index, ix - 1);
   1006	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
   1007	if (IS_ERR(ft->g[ft->num_groups]))
   1008		goto err_destroy_groups;
   1009	ft->num_groups++;
   1010
   1011	/* Flow Group for allmulti */
   1012	eth_zero_addr(mc_dmac);
   1013	mc_dmac[0] = 0x01;
   1014	MLX5_SET_CFG(in, start_flow_index, ix);
   1015	ix += MLX5E_L2_GROUP2_SIZE;
   1016	MLX5_SET_CFG(in, end_flow_index, ix - 1);
   1017	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
   1018	if (IS_ERR(ft->g[ft->num_groups]))
   1019		goto err_destroy_groups;
   1020	ft->num_groups++;
   1021
   1022	/* Flow Group for l2 traps */
   1023	memset(in, 0, inlen);
   1024	MLX5_SET_CFG(in, start_flow_index, ix);
   1025	ix += MLX5E_L2_GROUP_TRAP_SIZE;
   1026	MLX5_SET_CFG(in, end_flow_index, ix - 1);
   1027	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
   1028	if (IS_ERR(ft->g[ft->num_groups]))
   1029		goto err_destroy_groups;
   1030	ft->num_groups++;
   1031
   1032	kvfree(in);
   1033	return 0;
   1034
   1035err_destroy_groups:
   1036	err = PTR_ERR(ft->g[ft->num_groups]);
   1037	ft->g[ft->num_groups] = NULL;
   1038	mlx5e_destroy_groups(ft);
   1039	kvfree(in);
   1040	kfree(ft->g);
   1041
   1042	return err;
   1043}
   1044
   1045static void mlx5e_destroy_l2_table(struct mlx5e_priv *priv)
   1046{
   1047	mlx5e_destroy_flow_table(&priv->fs.l2.ft);
   1048}
   1049
   1050static int mlx5e_create_l2_table(struct mlx5e_priv *priv)
   1051{
   1052	struct mlx5e_l2_table *l2_table = &priv->fs.l2;
   1053	struct mlx5e_flow_table *ft = &l2_table->ft;
   1054	struct mlx5_flow_table_attr ft_attr = {};
   1055	int err;
   1056
   1057	ft->num_groups = 0;
   1058
   1059	ft_attr.max_fte = MLX5E_L2_TABLE_SIZE;
   1060	ft_attr.level = MLX5E_L2_FT_LEVEL;
   1061	ft_attr.prio = MLX5E_NIC_PRIO;
   1062
   1063	ft->t = mlx5_create_flow_table(priv->fs.ns, &ft_attr);
   1064	if (IS_ERR(ft->t)) {
   1065		err = PTR_ERR(ft->t);
   1066		ft->t = NULL;
   1067		return err;
   1068	}
   1069
   1070	err = mlx5e_create_l2_table_groups(l2_table);
   1071	if (err)
   1072		goto err_destroy_flow_table;
   1073
   1074	return 0;
   1075
   1076err_destroy_flow_table:
   1077	mlx5_destroy_flow_table(ft->t);
   1078	ft->t = NULL;
   1079
   1080	return err;
   1081}
   1082
   1083#define MLX5E_NUM_VLAN_GROUPS	5
   1084#define MLX5E_VLAN_GROUP0_SIZE	BIT(12)
   1085#define MLX5E_VLAN_GROUP1_SIZE	BIT(12)
   1086#define MLX5E_VLAN_GROUP2_SIZE	BIT(1)
   1087#define MLX5E_VLAN_GROUP3_SIZE	BIT(0)
   1088#define MLX5E_VLAN_GROUP_TRAP_SIZE BIT(0) /* must be last */
   1089#define MLX5E_VLAN_TABLE_SIZE	(MLX5E_VLAN_GROUP0_SIZE +\
   1090				 MLX5E_VLAN_GROUP1_SIZE +\
   1091				 MLX5E_VLAN_GROUP2_SIZE +\
   1092				 MLX5E_VLAN_GROUP3_SIZE +\
   1093				 MLX5E_VLAN_GROUP_TRAP_SIZE)
   1094
   1095static int __mlx5e_create_vlan_table_groups(struct mlx5e_flow_table *ft, u32 *in,
   1096					    int inlen)
   1097{
   1098	int err;
   1099	int ix = 0;
   1100	u8 *mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
   1101
   1102	memset(in, 0, inlen);
   1103	MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
   1104	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.cvlan_tag);
   1105	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.first_vid);
   1106	MLX5_SET_CFG(in, start_flow_index, ix);
   1107	ix += MLX5E_VLAN_GROUP0_SIZE;
   1108	MLX5_SET_CFG(in, end_flow_index, ix - 1);
   1109	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
   1110	if (IS_ERR(ft->g[ft->num_groups]))
   1111		goto err_destroy_groups;
   1112	ft->num_groups++;
   1113
   1114	memset(in, 0, inlen);
   1115	MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
   1116	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.svlan_tag);
   1117	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.first_vid);
   1118	MLX5_SET_CFG(in, start_flow_index, ix);
   1119	ix += MLX5E_VLAN_GROUP1_SIZE;
   1120	MLX5_SET_CFG(in, end_flow_index, ix - 1);
   1121	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
   1122	if (IS_ERR(ft->g[ft->num_groups]))
   1123		goto err_destroy_groups;
   1124	ft->num_groups++;
   1125
   1126	memset(in, 0, inlen);
   1127	MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
   1128	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.cvlan_tag);
   1129	MLX5_SET_CFG(in, start_flow_index, ix);
   1130	ix += MLX5E_VLAN_GROUP2_SIZE;
   1131	MLX5_SET_CFG(in, end_flow_index, ix - 1);
   1132	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
   1133	if (IS_ERR(ft->g[ft->num_groups]))
   1134		goto err_destroy_groups;
   1135	ft->num_groups++;
   1136
   1137	memset(in, 0, inlen);
   1138	MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
   1139	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.svlan_tag);
   1140	MLX5_SET_CFG(in, start_flow_index, ix);
   1141	ix += MLX5E_VLAN_GROUP3_SIZE;
   1142	MLX5_SET_CFG(in, end_flow_index, ix - 1);
   1143	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
   1144	if (IS_ERR(ft->g[ft->num_groups]))
   1145		goto err_destroy_groups;
   1146	ft->num_groups++;
   1147
   1148	memset(in, 0, inlen);
   1149	MLX5_SET_CFG(in, start_flow_index, ix);
   1150	ix += MLX5E_VLAN_GROUP_TRAP_SIZE;
   1151	MLX5_SET_CFG(in, end_flow_index, ix - 1);
   1152	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
   1153	if (IS_ERR(ft->g[ft->num_groups]))
   1154		goto err_destroy_groups;
   1155	ft->num_groups++;
   1156
   1157	return 0;
   1158
   1159err_destroy_groups:
   1160	err = PTR_ERR(ft->g[ft->num_groups]);
   1161	ft->g[ft->num_groups] = NULL;
   1162	mlx5e_destroy_groups(ft);
   1163
   1164	return err;
   1165}
   1166
   1167static int mlx5e_create_vlan_table_groups(struct mlx5e_flow_table *ft)
   1168{
   1169	u32 *in;
   1170	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
   1171	int err;
   1172
   1173	in = kvzalloc(inlen, GFP_KERNEL);
   1174	if (!in)
   1175		return -ENOMEM;
   1176
   1177	err = __mlx5e_create_vlan_table_groups(ft, in, inlen);
   1178
   1179	kvfree(in);
   1180	return err;
   1181}
   1182
   1183static int mlx5e_create_vlan_table(struct mlx5e_priv *priv)
   1184{
   1185	struct mlx5_flow_table_attr ft_attr = {};
   1186	struct mlx5e_flow_table *ft;
   1187	int err;
   1188
   1189	ft = &priv->fs.vlan->ft;
   1190	ft->num_groups = 0;
   1191
   1192	ft_attr.max_fte = MLX5E_VLAN_TABLE_SIZE;
   1193	ft_attr.level = MLX5E_VLAN_FT_LEVEL;
   1194	ft_attr.prio = MLX5E_NIC_PRIO;
   1195
   1196	ft->t = mlx5_create_flow_table(priv->fs.ns, &ft_attr);
   1197	if (IS_ERR(ft->t))
   1198		return PTR_ERR(ft->t);
   1199
   1200	ft->g = kcalloc(MLX5E_NUM_VLAN_GROUPS, sizeof(*ft->g), GFP_KERNEL);
   1201	if (!ft->g) {
   1202		err = -ENOMEM;
   1203		goto err_destroy_vlan_table;
   1204	}
   1205
   1206	err = mlx5e_create_vlan_table_groups(ft);
   1207	if (err)
   1208		goto err_free_g;
   1209
   1210	mlx5e_add_vlan_rules(priv);
   1211
   1212	return 0;
   1213
   1214err_free_g:
   1215	kfree(ft->g);
   1216err_destroy_vlan_table:
   1217	mlx5_destroy_flow_table(ft->t);
   1218
   1219	return err;
   1220}
   1221
   1222static void mlx5e_destroy_vlan_table(struct mlx5e_priv *priv)
   1223{
   1224	mlx5e_del_vlan_rules(priv);
   1225	mlx5e_destroy_flow_table(&priv->fs.vlan->ft);
   1226}
   1227
   1228static void mlx5e_destroy_inner_ttc_table(struct mlx5e_priv *priv)
   1229{
   1230	if (!mlx5_tunnel_inner_ft_supported(priv->mdev))
   1231		return;
   1232	mlx5_destroy_ttc_table(priv->fs.inner_ttc);
   1233}
   1234
   1235void mlx5e_destroy_ttc_table(struct mlx5e_priv *priv)
   1236{
   1237	mlx5_destroy_ttc_table(priv->fs.ttc);
   1238}
   1239
   1240static int mlx5e_create_inner_ttc_table(struct mlx5e_priv *priv)
   1241{
   1242	struct ttc_params ttc_params = {};
   1243
   1244	if (!mlx5_tunnel_inner_ft_supported(priv->mdev))
   1245		return 0;
   1246
   1247	mlx5e_set_inner_ttc_params(priv, &ttc_params);
   1248	priv->fs.inner_ttc = mlx5_create_inner_ttc_table(priv->mdev,
   1249							 &ttc_params);
   1250	if (IS_ERR(priv->fs.inner_ttc))
   1251		return PTR_ERR(priv->fs.inner_ttc);
   1252	return 0;
   1253}
   1254
   1255int mlx5e_create_ttc_table(struct mlx5e_priv *priv)
   1256{
   1257	struct ttc_params ttc_params = {};
   1258
   1259	mlx5e_set_ttc_params(priv, &ttc_params, true);
   1260	priv->fs.ttc = mlx5_create_ttc_table(priv->mdev, &ttc_params);
   1261	if (IS_ERR(priv->fs.ttc))
   1262		return PTR_ERR(priv->fs.ttc);
   1263	return 0;
   1264}
   1265
   1266int mlx5e_create_flow_steering(struct mlx5e_priv *priv)
   1267{
   1268	int err;
   1269
   1270	priv->fs.ns = mlx5_get_flow_namespace(priv->mdev,
   1271					       MLX5_FLOW_NAMESPACE_KERNEL);
   1272
   1273	if (!priv->fs.ns)
   1274		return -EOPNOTSUPP;
   1275
   1276	err = mlx5e_arfs_create_tables(priv);
   1277	if (err) {
   1278		netdev_err(priv->netdev, "Failed to create arfs tables, err=%d\n",
   1279			   err);
   1280		priv->netdev->hw_features &= ~NETIF_F_NTUPLE;
   1281	}
   1282
   1283	err = mlx5e_create_inner_ttc_table(priv);
   1284	if (err) {
   1285		netdev_err(priv->netdev,
   1286			   "Failed to create inner ttc table, err=%d\n",
   1287			   err);
   1288		goto err_destroy_arfs_tables;
   1289	}
   1290
   1291	err = mlx5e_create_ttc_table(priv);
   1292	if (err) {
   1293		netdev_err(priv->netdev, "Failed to create ttc table, err=%d\n",
   1294			   err);
   1295		goto err_destroy_inner_ttc_table;
   1296	}
   1297
   1298	err = mlx5e_create_l2_table(priv);
   1299	if (err) {
   1300		netdev_err(priv->netdev, "Failed to create l2 table, err=%d\n",
   1301			   err);
   1302		goto err_destroy_ttc_table;
   1303	}
   1304
   1305	err = mlx5e_create_vlan_table(priv);
   1306	if (err) {
   1307		netdev_err(priv->netdev, "Failed to create vlan table, err=%d\n",
   1308			   err);
   1309		goto err_destroy_l2_table;
   1310	}
   1311
   1312	err = mlx5e_ptp_alloc_rx_fs(priv);
   1313	if (err)
   1314		goto err_destory_vlan_table;
   1315
   1316	mlx5e_ethtool_init_steering(priv);
   1317
   1318	return 0;
   1319
   1320err_destory_vlan_table:
   1321	mlx5e_destroy_vlan_table(priv);
   1322err_destroy_l2_table:
   1323	mlx5e_destroy_l2_table(priv);
   1324err_destroy_ttc_table:
   1325	mlx5e_destroy_ttc_table(priv);
   1326err_destroy_inner_ttc_table:
   1327	mlx5e_destroy_inner_ttc_table(priv);
   1328err_destroy_arfs_tables:
   1329	mlx5e_arfs_destroy_tables(priv);
   1330
   1331	return err;
   1332}
   1333
   1334void mlx5e_destroy_flow_steering(struct mlx5e_priv *priv)
   1335{
   1336	mlx5e_ptp_free_rx_fs(priv);
   1337	mlx5e_destroy_vlan_table(priv);
   1338	mlx5e_destroy_l2_table(priv);
   1339	mlx5e_destroy_ttc_table(priv);
   1340	mlx5e_destroy_inner_ttc_table(priv);
   1341	mlx5e_arfs_destroy_tables(priv);
   1342	mlx5e_ethtool_cleanup_steering(priv);
   1343}
   1344
   1345int mlx5e_fs_init(struct mlx5e_priv *priv)
   1346{
   1347	priv->fs.vlan = kvzalloc(sizeof(*priv->fs.vlan), GFP_KERNEL);
   1348	if (!priv->fs.vlan)
   1349		return -ENOMEM;
   1350	return 0;
   1351}
   1352
   1353void mlx5e_fs_cleanup(struct mlx5e_priv *priv)
   1354{
   1355	kvfree(priv->fs.vlan);
   1356	priv->fs.vlan = NULL;
   1357}