cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

spectrum_flower.c (23233B)


      1// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
      2/* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */
      3
      4#include <linux/kernel.h>
      5#include <linux/errno.h>
      6#include <linux/netdevice.h>
      7#include <linux/log2.h>
      8#include <net/net_namespace.h>
      9#include <net/flow_dissector.h>
     10#include <net/pkt_cls.h>
     11#include <net/tc_act/tc_gact.h>
     12#include <net/tc_act/tc_mirred.h>
     13#include <net/tc_act/tc_vlan.h>
     14
     15#include "spectrum.h"
     16#include "core_acl_flex_keys.h"
     17
     18static int mlxsw_sp_policer_validate(const struct flow_action *action,
     19				     const struct flow_action_entry *act,
     20				     struct netlink_ext_ack *extack)
     21{
     22	if (act->police.exceed.act_id != FLOW_ACTION_DROP) {
     23		NL_SET_ERR_MSG_MOD(extack,
     24				   "Offload not supported when exceed action is not drop");
     25		return -EOPNOTSUPP;
     26	}
     27
     28	if (act->police.notexceed.act_id != FLOW_ACTION_PIPE &&
     29	    act->police.notexceed.act_id != FLOW_ACTION_ACCEPT) {
     30		NL_SET_ERR_MSG_MOD(extack,
     31				   "Offload not supported when conform action is not pipe or ok");
     32		return -EOPNOTSUPP;
     33	}
     34
     35	if (act->police.notexceed.act_id == FLOW_ACTION_ACCEPT &&
     36	    !flow_action_is_last_entry(action, act)) {
     37		NL_SET_ERR_MSG_MOD(extack,
     38				   "Offload not supported when conform action is ok, but action is not last");
     39		return -EOPNOTSUPP;
     40	}
     41
     42	if (act->police.peakrate_bytes_ps ||
     43	    act->police.avrate || act->police.overhead) {
     44		NL_SET_ERR_MSG_MOD(extack,
     45				   "Offload not supported when peakrate/avrate/overhead is configured");
     46		return -EOPNOTSUPP;
     47	}
     48
     49	if (act->police.rate_pkt_ps) {
     50		NL_SET_ERR_MSG_MOD(extack,
     51				   "QoS offload not support packets per second");
     52		return -EOPNOTSUPP;
     53	}
     54
     55	return 0;
     56}
     57
     58static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
     59					 struct mlxsw_sp_flow_block *block,
     60					 struct mlxsw_sp_acl_rule_info *rulei,
     61					 struct flow_action *flow_action,
     62					 struct netlink_ext_ack *extack)
     63{
     64	const struct flow_action_entry *act;
     65	int mirror_act_count = 0;
     66	int police_act_count = 0;
     67	int sample_act_count = 0;
     68	int err, i;
     69
     70	if (!flow_action_has_entries(flow_action))
     71		return 0;
     72	if (!flow_action_mixed_hw_stats_check(flow_action, extack))
     73		return -EOPNOTSUPP;
     74
     75	act = flow_action_first_entry_get(flow_action);
     76	if (act->hw_stats & FLOW_ACTION_HW_STATS_DISABLED) {
     77		/* Nothing to do */
     78	} else if (act->hw_stats & FLOW_ACTION_HW_STATS_IMMEDIATE) {
     79		/* Count action is inserted first */
     80		err = mlxsw_sp_acl_rulei_act_count(mlxsw_sp, rulei, extack);
     81		if (err)
     82			return err;
     83	} else {
     84		NL_SET_ERR_MSG_MOD(extack, "Unsupported action HW stats type");
     85		return -EOPNOTSUPP;
     86	}
     87
     88	flow_action_for_each(i, act, flow_action) {
     89		switch (act->id) {
     90		case FLOW_ACTION_ACCEPT:
     91			err = mlxsw_sp_acl_rulei_act_terminate(rulei);
     92			if (err) {
     93				NL_SET_ERR_MSG_MOD(extack, "Cannot append terminate action");
     94				return err;
     95			}
     96			break;
     97		case FLOW_ACTION_DROP: {
     98			bool ingress;
     99
    100			if (mlxsw_sp_flow_block_is_mixed_bound(block)) {
    101				NL_SET_ERR_MSG_MOD(extack, "Drop action is not supported when block is bound to ingress and egress");
    102				return -EOPNOTSUPP;
    103			}
    104			ingress = mlxsw_sp_flow_block_is_ingress_bound(block);
    105			err = mlxsw_sp_acl_rulei_act_drop(rulei, ingress,
    106							  act->cookie, extack);
    107			if (err) {
    108				NL_SET_ERR_MSG_MOD(extack, "Cannot append drop action");
    109				return err;
    110			}
    111
    112			/* Forbid block with this rulei to be bound
    113			 * to ingress/egress in future. Ingress rule is
    114			 * a blocker for egress and vice versa.
    115			 */
    116			if (ingress)
    117				rulei->egress_bind_blocker = 1;
    118			else
    119				rulei->ingress_bind_blocker = 1;
    120			}
    121			break;
    122		case FLOW_ACTION_TRAP:
    123			err = mlxsw_sp_acl_rulei_act_trap(rulei);
    124			if (err) {
    125				NL_SET_ERR_MSG_MOD(extack, "Cannot append trap action");
    126				return err;
    127			}
    128			break;
    129		case FLOW_ACTION_GOTO: {
    130			u32 chain_index = act->chain_index;
    131			struct mlxsw_sp_acl_ruleset *ruleset;
    132			u16 group_id;
    133
    134			ruleset = mlxsw_sp_acl_ruleset_lookup(mlxsw_sp, block,
    135							      chain_index,
    136							      MLXSW_SP_ACL_PROFILE_FLOWER);
    137			if (IS_ERR(ruleset))
    138				return PTR_ERR(ruleset);
    139
    140			group_id = mlxsw_sp_acl_ruleset_group_id(ruleset);
    141			err = mlxsw_sp_acl_rulei_act_jump(rulei, group_id);
    142			if (err) {
    143				NL_SET_ERR_MSG_MOD(extack, "Cannot append jump action");
    144				return err;
    145			}
    146			}
    147			break;
    148		case FLOW_ACTION_REDIRECT: {
    149			struct net_device *out_dev;
    150			struct mlxsw_sp_fid *fid;
    151			u16 fid_index;
    152
    153			if (mlxsw_sp_flow_block_is_egress_bound(block)) {
    154				NL_SET_ERR_MSG_MOD(extack, "Redirect action is not supported on egress");
    155				return -EOPNOTSUPP;
    156			}
    157
    158			/* Forbid block with this rulei to be bound
    159			 * to egress in future.
    160			 */
    161			rulei->egress_bind_blocker = 1;
    162
    163			fid = mlxsw_sp_acl_dummy_fid(mlxsw_sp);
    164			fid_index = mlxsw_sp_fid_index(fid);
    165			err = mlxsw_sp_acl_rulei_act_fid_set(mlxsw_sp, rulei,
    166							     fid_index, extack);
    167			if (err)
    168				return err;
    169
    170			out_dev = act->dev;
    171			err = mlxsw_sp_acl_rulei_act_fwd(mlxsw_sp, rulei,
    172							 out_dev, extack);
    173			if (err)
    174				return err;
    175			}
    176			break;
    177		case FLOW_ACTION_MIRRED: {
    178			struct net_device *out_dev = act->dev;
    179
    180			if (mirror_act_count++) {
    181				NL_SET_ERR_MSG_MOD(extack, "Multiple mirror actions per rule are not supported");
    182				return -EOPNOTSUPP;
    183			}
    184
    185			err = mlxsw_sp_acl_rulei_act_mirror(mlxsw_sp, rulei,
    186							    block, out_dev,
    187							    extack);
    188			if (err)
    189				return err;
    190			}
    191			break;
    192		case FLOW_ACTION_VLAN_MANGLE: {
    193			u16 proto = be16_to_cpu(act->vlan.proto);
    194			u8 prio = act->vlan.prio;
    195			u16 vid = act->vlan.vid;
    196
    197			err = mlxsw_sp_acl_rulei_act_vlan(mlxsw_sp, rulei,
    198							  act->id, vid,
    199							  proto, prio, extack);
    200			if (err)
    201				return err;
    202			break;
    203			}
    204		case FLOW_ACTION_PRIORITY:
    205			err = mlxsw_sp_acl_rulei_act_priority(mlxsw_sp, rulei,
    206							      act->priority,
    207							      extack);
    208			if (err)
    209				return err;
    210			break;
    211		case FLOW_ACTION_MANGLE: {
    212			enum flow_action_mangle_base htype = act->mangle.htype;
    213			__be32 be_mask = (__force __be32) act->mangle.mask;
    214			__be32 be_val = (__force __be32) act->mangle.val;
    215			u32 offset = act->mangle.offset;
    216			u32 mask = be32_to_cpu(be_mask);
    217			u32 val = be32_to_cpu(be_val);
    218
    219			err = mlxsw_sp_acl_rulei_act_mangle(mlxsw_sp, rulei,
    220							    htype, offset,
    221							    mask, val, extack);
    222			if (err)
    223				return err;
    224			break;
    225			}
    226		case FLOW_ACTION_POLICE: {
    227			u32 burst;
    228
    229			if (police_act_count++) {
    230				NL_SET_ERR_MSG_MOD(extack, "Multiple police actions per rule are not supported");
    231				return -EOPNOTSUPP;
    232			}
    233
    234			err = mlxsw_sp_policer_validate(flow_action, act, extack);
    235			if (err)
    236				return err;
    237
    238			/* The kernel might adjust the requested burst size so
    239			 * that it is not exactly a power of two. Re-adjust it
    240			 * here since the hardware only supports burst sizes
    241			 * that are a power of two.
    242			 */
    243			burst = roundup_pow_of_two(act->police.burst);
    244			err = mlxsw_sp_acl_rulei_act_police(mlxsw_sp, rulei,
    245							    act->hw_index,
    246							    act->police.rate_bytes_ps,
    247							    burst, extack);
    248			if (err)
    249				return err;
    250			break;
    251			}
    252		case FLOW_ACTION_SAMPLE: {
    253			if (sample_act_count++) {
    254				NL_SET_ERR_MSG_MOD(extack, "Multiple sample actions per rule are not supported");
    255				return -EOPNOTSUPP;
    256			}
    257
    258			err = mlxsw_sp_acl_rulei_act_sample(mlxsw_sp, rulei,
    259							    block,
    260							    act->sample.psample_group,
    261							    act->sample.rate,
    262							    act->sample.trunc_size,
    263							    act->sample.truncate,
    264							    extack);
    265			if (err)
    266				return err;
    267			break;
    268			}
    269		default:
    270			NL_SET_ERR_MSG_MOD(extack, "Unsupported action");
    271			dev_err(mlxsw_sp->bus_info->dev, "Unsupported action\n");
    272			return -EOPNOTSUPP;
    273		}
    274	}
    275
    276	if (rulei->ipv6_valid) {
    277		NL_SET_ERR_MSG_MOD(extack, "Unsupported mangle field");
    278		return -EOPNOTSUPP;
    279	}
    280
    281	return 0;
    282}
    283
    284static int mlxsw_sp_flower_parse_meta(struct mlxsw_sp_acl_rule_info *rulei,
    285				      struct flow_cls_offload *f,
    286				      struct mlxsw_sp_flow_block *block)
    287{
    288	struct flow_rule *rule = flow_cls_offload_flow_rule(f);
    289	struct mlxsw_sp_port *mlxsw_sp_port;
    290	struct net_device *ingress_dev;
    291	struct flow_match_meta match;
    292
    293	if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_META))
    294		return 0;
    295
    296	flow_rule_match_meta(rule, &match);
    297	if (match.mask->ingress_ifindex != 0xFFFFFFFF) {
    298		NL_SET_ERR_MSG_MOD(f->common.extack, "Unsupported ingress ifindex mask");
    299		return -EINVAL;
    300	}
    301
    302	ingress_dev = __dev_get_by_index(block->net,
    303					 match.key->ingress_ifindex);
    304	if (!ingress_dev) {
    305		NL_SET_ERR_MSG_MOD(f->common.extack, "Can't find specified ingress port to match on");
    306		return -EINVAL;
    307	}
    308
    309	if (!mlxsw_sp_port_dev_check(ingress_dev)) {
    310		NL_SET_ERR_MSG_MOD(f->common.extack, "Can't match on non-mlxsw ingress port");
    311		return -EINVAL;
    312	}
    313
    314	mlxsw_sp_port = netdev_priv(ingress_dev);
    315	if (mlxsw_sp_port->mlxsw_sp != block->mlxsw_sp) {
    316		NL_SET_ERR_MSG_MOD(f->common.extack, "Can't match on a port from different device");
    317		return -EINVAL;
    318	}
    319
    320	mlxsw_sp_acl_rulei_keymask_u32(rulei,
    321				       MLXSW_AFK_ELEMENT_SRC_SYS_PORT,
    322				       mlxsw_sp_port->local_port,
    323				       0xFFFFFFFF);
    324	return 0;
    325}
    326
    327static void mlxsw_sp_flower_parse_ipv4(struct mlxsw_sp_acl_rule_info *rulei,
    328				       struct flow_cls_offload *f)
    329{
    330	struct flow_match_ipv4_addrs match;
    331
    332	flow_rule_match_ipv4_addrs(f->rule, &match);
    333
    334	mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_0_31,
    335				       (char *) &match.key->src,
    336				       (char *) &match.mask->src, 4);
    337	mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP_0_31,
    338				       (char *) &match.key->dst,
    339				       (char *) &match.mask->dst, 4);
    340}
    341
    342static void mlxsw_sp_flower_parse_ipv6(struct mlxsw_sp_acl_rule_info *rulei,
    343				       struct flow_cls_offload *f)
    344{
    345	struct flow_match_ipv6_addrs match;
    346
    347	flow_rule_match_ipv6_addrs(f->rule, &match);
    348
    349	mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_96_127,
    350				       &match.key->src.s6_addr[0x0],
    351				       &match.mask->src.s6_addr[0x0], 4);
    352	mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_64_95,
    353				       &match.key->src.s6_addr[0x4],
    354				       &match.mask->src.s6_addr[0x4], 4);
    355	mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_32_63,
    356				       &match.key->src.s6_addr[0x8],
    357				       &match.mask->src.s6_addr[0x8], 4);
    358	mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_0_31,
    359				       &match.key->src.s6_addr[0xC],
    360				       &match.mask->src.s6_addr[0xC], 4);
    361	mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP_96_127,
    362				       &match.key->dst.s6_addr[0x0],
    363				       &match.mask->dst.s6_addr[0x0], 4);
    364	mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP_64_95,
    365				       &match.key->dst.s6_addr[0x4],
    366				       &match.mask->dst.s6_addr[0x4], 4);
    367	mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP_32_63,
    368				       &match.key->dst.s6_addr[0x8],
    369				       &match.mask->dst.s6_addr[0x8], 4);
    370	mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP_0_31,
    371				       &match.key->dst.s6_addr[0xC],
    372				       &match.mask->dst.s6_addr[0xC], 4);
    373}
    374
    375static int mlxsw_sp_flower_parse_ports(struct mlxsw_sp *mlxsw_sp,
    376				       struct mlxsw_sp_acl_rule_info *rulei,
    377				       struct flow_cls_offload *f,
    378				       u8 ip_proto)
    379{
    380	const struct flow_rule *rule = flow_cls_offload_flow_rule(f);
    381	struct flow_match_ports match;
    382
    383	if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS))
    384		return 0;
    385
    386	if (ip_proto != IPPROTO_TCP && ip_proto != IPPROTO_UDP) {
    387		NL_SET_ERR_MSG_MOD(f->common.extack, "Only UDP and TCP keys are supported");
    388		dev_err(mlxsw_sp->bus_info->dev, "Only UDP and TCP keys are supported\n");
    389		return -EINVAL;
    390	}
    391
    392	flow_rule_match_ports(rule, &match);
    393	mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_DST_L4_PORT,
    394				       ntohs(match.key->dst),
    395				       ntohs(match.mask->dst));
    396	mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_SRC_L4_PORT,
    397				       ntohs(match.key->src),
    398				       ntohs(match.mask->src));
    399	return 0;
    400}
    401
    402static int mlxsw_sp_flower_parse_tcp(struct mlxsw_sp *mlxsw_sp,
    403				     struct mlxsw_sp_acl_rule_info *rulei,
    404				     struct flow_cls_offload *f,
    405				     u8 ip_proto)
    406{
    407	const struct flow_rule *rule = flow_cls_offload_flow_rule(f);
    408	struct flow_match_tcp match;
    409
    410	if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_TCP))
    411		return 0;
    412
    413	if (ip_proto != IPPROTO_TCP) {
    414		NL_SET_ERR_MSG_MOD(f->common.extack, "TCP keys supported only for TCP");
    415		dev_err(mlxsw_sp->bus_info->dev, "TCP keys supported only for TCP\n");
    416		return -EINVAL;
    417	}
    418
    419	flow_rule_match_tcp(rule, &match);
    420
    421	if (match.mask->flags & htons(0x0E00)) {
    422		NL_SET_ERR_MSG_MOD(f->common.extack, "TCP flags match not supported on reserved bits");
    423		dev_err(mlxsw_sp->bus_info->dev, "TCP flags match not supported on reserved bits\n");
    424		return -EINVAL;
    425	}
    426
    427	mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_TCP_FLAGS,
    428				       ntohs(match.key->flags),
    429				       ntohs(match.mask->flags));
    430	return 0;
    431}
    432
    433static int mlxsw_sp_flower_parse_ip(struct mlxsw_sp *mlxsw_sp,
    434				    struct mlxsw_sp_acl_rule_info *rulei,
    435				    struct flow_cls_offload *f,
    436				    u16 n_proto)
    437{
    438	const struct flow_rule *rule = flow_cls_offload_flow_rule(f);
    439	struct flow_match_ip match;
    440
    441	if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP))
    442		return 0;
    443
    444	if (n_proto != ETH_P_IP && n_proto != ETH_P_IPV6) {
    445		NL_SET_ERR_MSG_MOD(f->common.extack, "IP keys supported only for IPv4/6");
    446		dev_err(mlxsw_sp->bus_info->dev, "IP keys supported only for IPv4/6\n");
    447		return -EINVAL;
    448	}
    449
    450	flow_rule_match_ip(rule, &match);
    451
    452	mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_IP_TTL_,
    453				       match.key->ttl, match.mask->ttl);
    454
    455	mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_IP_ECN,
    456				       match.key->tos & 0x3,
    457				       match.mask->tos & 0x3);
    458
    459	mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_IP_DSCP,
    460				       match.key->tos >> 2,
    461				       match.mask->tos >> 2);
    462
    463	return 0;
    464}
    465
    466static int mlxsw_sp_flower_parse(struct mlxsw_sp *mlxsw_sp,
    467				 struct mlxsw_sp_flow_block *block,
    468				 struct mlxsw_sp_acl_rule_info *rulei,
    469				 struct flow_cls_offload *f)
    470{
    471	struct flow_rule *rule = flow_cls_offload_flow_rule(f);
    472	struct flow_dissector *dissector = rule->match.dissector;
    473	u16 n_proto_mask = 0;
    474	u16 n_proto_key = 0;
    475	u16 addr_type = 0;
    476	u8 ip_proto = 0;
    477	int err;
    478
    479	if (dissector->used_keys &
    480	    ~(BIT(FLOW_DISSECTOR_KEY_META) |
    481	      BIT(FLOW_DISSECTOR_KEY_CONTROL) |
    482	      BIT(FLOW_DISSECTOR_KEY_BASIC) |
    483	      BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
    484	      BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
    485	      BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
    486	      BIT(FLOW_DISSECTOR_KEY_PORTS) |
    487	      BIT(FLOW_DISSECTOR_KEY_TCP) |
    488	      BIT(FLOW_DISSECTOR_KEY_IP) |
    489	      BIT(FLOW_DISSECTOR_KEY_VLAN))) {
    490		dev_err(mlxsw_sp->bus_info->dev, "Unsupported key\n");
    491		NL_SET_ERR_MSG_MOD(f->common.extack, "Unsupported key");
    492		return -EOPNOTSUPP;
    493	}
    494
    495	mlxsw_sp_acl_rulei_priority(rulei, f->common.prio);
    496
    497	err = mlxsw_sp_flower_parse_meta(rulei, f, block);
    498	if (err)
    499		return err;
    500
    501	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
    502		struct flow_match_control match;
    503
    504		flow_rule_match_control(rule, &match);
    505		addr_type = match.key->addr_type;
    506	}
    507
    508	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
    509		struct flow_match_basic match;
    510
    511		flow_rule_match_basic(rule, &match);
    512		n_proto_key = ntohs(match.key->n_proto);
    513		n_proto_mask = ntohs(match.mask->n_proto);
    514
    515		if (n_proto_key == ETH_P_ALL) {
    516			n_proto_key = 0;
    517			n_proto_mask = 0;
    518		}
    519		mlxsw_sp_acl_rulei_keymask_u32(rulei,
    520					       MLXSW_AFK_ELEMENT_ETHERTYPE,
    521					       n_proto_key, n_proto_mask);
    522
    523		ip_proto = match.key->ip_proto;
    524		mlxsw_sp_acl_rulei_keymask_u32(rulei,
    525					       MLXSW_AFK_ELEMENT_IP_PROTO,
    526					       match.key->ip_proto,
    527					       match.mask->ip_proto);
    528	}
    529
    530	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
    531		struct flow_match_eth_addrs match;
    532
    533		flow_rule_match_eth_addrs(rule, &match);
    534		mlxsw_sp_acl_rulei_keymask_buf(rulei,
    535					       MLXSW_AFK_ELEMENT_DMAC_32_47,
    536					       match.key->dst,
    537					       match.mask->dst, 2);
    538		mlxsw_sp_acl_rulei_keymask_buf(rulei,
    539					       MLXSW_AFK_ELEMENT_DMAC_0_31,
    540					       match.key->dst + 2,
    541					       match.mask->dst + 2, 4);
    542		mlxsw_sp_acl_rulei_keymask_buf(rulei,
    543					       MLXSW_AFK_ELEMENT_SMAC_32_47,
    544					       match.key->src,
    545					       match.mask->src, 2);
    546		mlxsw_sp_acl_rulei_keymask_buf(rulei,
    547					       MLXSW_AFK_ELEMENT_SMAC_0_31,
    548					       match.key->src + 2,
    549					       match.mask->src + 2, 4);
    550	}
    551
    552	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
    553		struct flow_match_vlan match;
    554
    555		flow_rule_match_vlan(rule, &match);
    556		if (mlxsw_sp_flow_block_is_egress_bound(block) &&
    557		    match.mask->vlan_id) {
    558			NL_SET_ERR_MSG_MOD(f->common.extack, "vlan_id key is not supported on egress");
    559			return -EOPNOTSUPP;
    560		}
    561
    562		/* Forbid block with this rulei to be bound
    563		 * to egress in future.
    564		 */
    565		rulei->egress_bind_blocker = 1;
    566
    567		if (match.mask->vlan_id != 0)
    568			mlxsw_sp_acl_rulei_keymask_u32(rulei,
    569						       MLXSW_AFK_ELEMENT_VID,
    570						       match.key->vlan_id,
    571						       match.mask->vlan_id);
    572		if (match.mask->vlan_priority != 0)
    573			mlxsw_sp_acl_rulei_keymask_u32(rulei,
    574						       MLXSW_AFK_ELEMENT_PCP,
    575						       match.key->vlan_priority,
    576						       match.mask->vlan_priority);
    577	}
    578
    579	if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS)
    580		mlxsw_sp_flower_parse_ipv4(rulei, f);
    581
    582	if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS)
    583		mlxsw_sp_flower_parse_ipv6(rulei, f);
    584
    585	err = mlxsw_sp_flower_parse_ports(mlxsw_sp, rulei, f, ip_proto);
    586	if (err)
    587		return err;
    588	err = mlxsw_sp_flower_parse_tcp(mlxsw_sp, rulei, f, ip_proto);
    589	if (err)
    590		return err;
    591
    592	err = mlxsw_sp_flower_parse_ip(mlxsw_sp, rulei, f, n_proto_key & n_proto_mask);
    593	if (err)
    594		return err;
    595
    596	return mlxsw_sp_flower_parse_actions(mlxsw_sp, block, rulei,
    597					     &f->rule->action,
    598					     f->common.extack);
    599}
    600
    601static int mlxsw_sp_flower_mall_prio_check(struct mlxsw_sp_flow_block *block,
    602					   struct flow_cls_offload *f)
    603{
    604	bool ingress = mlxsw_sp_flow_block_is_ingress_bound(block);
    605	unsigned int mall_min_prio;
    606	unsigned int mall_max_prio;
    607	int err;
    608
    609	err = mlxsw_sp_mall_prio_get(block, f->common.chain_index,
    610				     &mall_min_prio, &mall_max_prio);
    611	if (err) {
    612		if (err == -ENOENT)
    613			/* No matchall filters installed on this chain. */
    614			return 0;
    615		NL_SET_ERR_MSG(f->common.extack, "Failed to get matchall priorities");
    616		return err;
    617	}
    618	if (ingress && f->common.prio <= mall_min_prio) {
    619		NL_SET_ERR_MSG(f->common.extack, "Failed to add in front of existing matchall rules");
    620		return -EOPNOTSUPP;
    621	}
    622	if (!ingress && f->common.prio >= mall_max_prio) {
    623		NL_SET_ERR_MSG(f->common.extack, "Failed to add behind of existing matchall rules");
    624		return -EOPNOTSUPP;
    625	}
    626	return 0;
    627}
    628
    629int mlxsw_sp_flower_replace(struct mlxsw_sp *mlxsw_sp,
    630			    struct mlxsw_sp_flow_block *block,
    631			    struct flow_cls_offload *f)
    632{
    633	struct mlxsw_sp_acl_rule_info *rulei;
    634	struct mlxsw_sp_acl_ruleset *ruleset;
    635	struct mlxsw_sp_acl_rule *rule;
    636	int err;
    637
    638	err = mlxsw_sp_flower_mall_prio_check(block, f);
    639	if (err)
    640		return err;
    641
    642	ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, block,
    643					   f->common.chain_index,
    644					   MLXSW_SP_ACL_PROFILE_FLOWER, NULL);
    645	if (IS_ERR(ruleset))
    646		return PTR_ERR(ruleset);
    647
    648	rule = mlxsw_sp_acl_rule_create(mlxsw_sp, ruleset, f->cookie, NULL,
    649					f->common.extack);
    650	if (IS_ERR(rule)) {
    651		err = PTR_ERR(rule);
    652		goto err_rule_create;
    653	}
    654
    655	rulei = mlxsw_sp_acl_rule_rulei(rule);
    656	err = mlxsw_sp_flower_parse(mlxsw_sp, block, rulei, f);
    657	if (err)
    658		goto err_flower_parse;
    659
    660	err = mlxsw_sp_acl_rulei_commit(rulei);
    661	if (err)
    662		goto err_rulei_commit;
    663
    664	err = mlxsw_sp_acl_rule_add(mlxsw_sp, rule);
    665	if (err)
    666		goto err_rule_add;
    667
    668	mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
    669	return 0;
    670
    671err_rule_add:
    672err_rulei_commit:
    673err_flower_parse:
    674	mlxsw_sp_acl_rule_destroy(mlxsw_sp, rule);
    675err_rule_create:
    676	mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
    677	return err;
    678}
    679
    680void mlxsw_sp_flower_destroy(struct mlxsw_sp *mlxsw_sp,
    681			     struct mlxsw_sp_flow_block *block,
    682			     struct flow_cls_offload *f)
    683{
    684	struct mlxsw_sp_acl_ruleset *ruleset;
    685	struct mlxsw_sp_acl_rule *rule;
    686
    687	ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, block,
    688					   f->common.chain_index,
    689					   MLXSW_SP_ACL_PROFILE_FLOWER, NULL);
    690	if (IS_ERR(ruleset))
    691		return;
    692
    693	rule = mlxsw_sp_acl_rule_lookup(mlxsw_sp, ruleset, f->cookie);
    694	if (rule) {
    695		mlxsw_sp_acl_rule_del(mlxsw_sp, rule);
    696		mlxsw_sp_acl_rule_destroy(mlxsw_sp, rule);
    697	}
    698
    699	mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
    700}
    701
    702int mlxsw_sp_flower_stats(struct mlxsw_sp *mlxsw_sp,
    703			  struct mlxsw_sp_flow_block *block,
    704			  struct flow_cls_offload *f)
    705{
    706	enum flow_action_hw_stats used_hw_stats = FLOW_ACTION_HW_STATS_DISABLED;
    707	struct mlxsw_sp_acl_ruleset *ruleset;
    708	struct mlxsw_sp_acl_rule *rule;
    709	u64 packets;
    710	u64 lastuse;
    711	u64 bytes;
    712	u64 drops;
    713	int err;
    714
    715	ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, block,
    716					   f->common.chain_index,
    717					   MLXSW_SP_ACL_PROFILE_FLOWER, NULL);
    718	if (WARN_ON(IS_ERR(ruleset)))
    719		return -EINVAL;
    720
    721	rule = mlxsw_sp_acl_rule_lookup(mlxsw_sp, ruleset, f->cookie);
    722	if (!rule)
    723		return -EINVAL;
    724
    725	err = mlxsw_sp_acl_rule_get_stats(mlxsw_sp, rule, &packets, &bytes,
    726					  &drops, &lastuse, &used_hw_stats);
    727	if (err)
    728		goto err_rule_get_stats;
    729
    730	flow_stats_update(&f->stats, bytes, packets, drops, lastuse,
    731			  used_hw_stats);
    732
    733	mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
    734	return 0;
    735
    736err_rule_get_stats:
    737	mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
    738	return err;
    739}
    740
    741int mlxsw_sp_flower_tmplt_create(struct mlxsw_sp *mlxsw_sp,
    742				 struct mlxsw_sp_flow_block *block,
    743				 struct flow_cls_offload *f)
    744{
    745	struct mlxsw_sp_acl_ruleset *ruleset;
    746	struct mlxsw_sp_acl_rule_info rulei;
    747	int err;
    748
    749	memset(&rulei, 0, sizeof(rulei));
    750	err = mlxsw_sp_flower_parse(mlxsw_sp, block, &rulei, f);
    751	if (err)
    752		return err;
    753	ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, block,
    754					   f->common.chain_index,
    755					   MLXSW_SP_ACL_PROFILE_FLOWER,
    756					   &rulei.values.elusage);
    757
    758	/* keep the reference to the ruleset */
    759	return PTR_ERR_OR_ZERO(ruleset);
    760}
    761
    762void mlxsw_sp_flower_tmplt_destroy(struct mlxsw_sp *mlxsw_sp,
    763				   struct mlxsw_sp_flow_block *block,
    764				   struct flow_cls_offload *f)
    765{
    766	struct mlxsw_sp_acl_ruleset *ruleset;
    767
    768	ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, block,
    769					   f->common.chain_index,
    770					   MLXSW_SP_ACL_PROFILE_FLOWER, NULL);
    771	if (IS_ERR(ruleset))
    772		return;
    773	/* put the reference to the ruleset kept in create */
    774	mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
    775	mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
    776}
    777
    778int mlxsw_sp_flower_prio_get(struct mlxsw_sp *mlxsw_sp,
    779			     struct mlxsw_sp_flow_block *block,
    780			     u32 chain_index, unsigned int *p_min_prio,
    781			     unsigned int *p_max_prio)
    782{
    783	struct mlxsw_sp_acl_ruleset *ruleset;
    784
    785	ruleset = mlxsw_sp_acl_ruleset_lookup(mlxsw_sp, block,
    786					      chain_index,
    787					      MLXSW_SP_ACL_PROFILE_FLOWER);
    788	if (IS_ERR(ruleset))
    789		/* In case there are no flower rules, the caller
    790		 * receives -ENOENT to indicate there is no need
    791		 * to check the priorities.
    792		 */
    793		return PTR_ERR(ruleset);
    794	mlxsw_sp_acl_ruleset_prio_get(ruleset, p_min_prio, p_max_prio);
    795	return 0;
    796}